summaryrefslogtreecommitdiffstats
path: root/meta-moblin
diff options
context:
space:
mode:
authorJoshua Lock <josh@linux.intel.com>2010-05-18 14:51:13 +0100
committerJoshua Lock <josh@linux.intel.com>2010-05-19 12:20:16 +0100
commit5e8c7c54a9b297dae0081dd19a7bb94e23040a3d (patch)
tree948e3642c1bf426870b83c72c68c997dce66766c /meta-moblin
parent5e07bc91281969d54896dd0a13e3d6134e432027 (diff)
downloadast2050-yocto-poky-5e8c7c54a9b297dae0081dd19a7bb94e23040a3d.zip
ast2050-yocto-poky-5e8c7c54a9b297dae0081dd19a7bb94e23040a3d.tar.gz
linux-moblin: add 2.6.33.2 kernel from MeeGo 1.0
Signed-off-by: Joshua Lock <josh@linux.intel.com>
Diffstat (limited to 'meta-moblin')
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/defconfig-menlow124
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/defconfig-netbook73
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6-build-nonintconfig.patch147
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6-driver-level-usb-autosuspend.patch60
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6-usb-bt-autosuspend.patch11
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6-usb-uvc-autosuspend.patch19
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-dont-wait-for-mouse.patch47
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-kms-after-sata.patch50
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-kms-edid-cache.patch58
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-kms-run-async.patch137
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-silence-acer-message.patch22
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-sreadahead.patch96
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.31-silence-wacom.patch14
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-ahci-alpm-accounting.patch284
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-ahci-fix-oops-on-dummy-port.patch53
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-i2c-workaround-for-aava-koski-touchscreen.patch41
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-rc8-timberdale.patch10621
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-rt2860-1-2.patch48
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-rt2860-2-2.patch24
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-timberdale-audio-fix.patch23
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-vfs-tracepoints.patch120
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-CVE-tipc-Fix-oops-on-send-prior-to-entering-networked-mode.patch218
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-USB-gadget-introduce-g_nokia-gadget-driver.patch320
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-USB-otg-add-notifier-support.patch85
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-cando-dual-touch-driver.patch366
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-cypress-touch-driver.patch870
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-drm-i915-Ignore-LVDS-EDID-when-it-is-unavailabe-or-invalid.patch67
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-enable-hid-dg-contact-count-stantum-and-cando-touch-drivers.patch76
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-fix-marvell-firmware-path.patch58
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-hack-to-fix-aava-camera-sensor-issue.patch30
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-input-synaptics-clickpad-support.patch142
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-aava-specific-changes-no-audio.patch3342
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-analog-accelerometer-driver.patch462
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-1-8.patch1433
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-2-8.patch900
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-3-8.patch2532
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-4-8.patch1285
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-5-8.patch1690
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-6-8.patch2861
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-7-8.patch2391
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-8-8.patch85
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-ericsson-mbm-driver.patch465
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-fix-hw-qh-prefetch-bug.patch25
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-gpe-fix-for-sensor.patch85
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-graphics-changes-for-aava-koski-dv1-hardware.patch1859
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-gtm501l-driver-1.2.patch2395
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-ifxgps-driver.patch1648
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-img-graphics-driver-5.3.0.0007.patch106773
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-ipc-host-driver.patch268
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-keypad-driver.patch839
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-langwell-dma-driver-3.0.patch2469
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-mmc-driver-1.0.patch1367
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-nand-driver-2.0.patch11841
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-only-enable-mrst-pciquirks-on-mrst.patch35
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-platform-enabling.patch13580
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-pmic-battery-driver.patch849
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-rar-handler-driver-3.1.patch2531
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-sensor-driver-1.1.patch1836
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-spi-slave-controller-driver-1.1.patch2230
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-thermal-emc1403-driver.patch285
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-touchscreen-driver.patch996
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-usb-otg-and-still-image-driver.patch8395
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-multi-touch-input-driver-for-event-devices.patch398
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-1-7.patch48
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-2-7.patch78
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-3-7.patch62
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-4-7.patch89
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-5-7.patch63
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-6-7.patch122
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-7-7.patch114
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-can.patch10765
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-dma.patch4133
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-gbe.patch8889
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-gpio.patch2700
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-i2c.patch3435
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-ieee1588.patch7945
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-pcieqos.patch2083
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-spi.patch4377
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-uart.patch1589
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-usbdev.patch7018
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-rt2860-no-debug.patch36
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-stantum-and-mosart-multitouch-drivers.patch714
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-DSS2-Add-ACX565AKM-Panel-Driver.patch813
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-DSS2-Add-Kconfig-option-for-DPI-display-type.patch107
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-DSS2-Use-vdds_sdi-regulator-supply-in-SDI.patch118
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-RX51-Add-LCD-Panel-support.patch207
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-RX51-Add-Touch-Controller-in-SPI-board-info.patch64
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-RX51-Add-vdds_sdi-supply-voltage-for-SDI.patch54
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-input-touchscreen-introduce-tsc2005-driver.patch804
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-moorestown-camera-driver-10.0-1-3.patch10167
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-moorestown-camera-driver-10.0-2-3.patch9779
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-moorestown-camera-driver-10.0-3-3.patch8290
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-omap-rx-51-enable-tsc2005.patch122
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-phylib-Add-module-table-to-all-existing-phy-drivers.patch247
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-phylib-Support-phy-module-autoloading.patch150
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/rtl8192_carrier_off.patch52
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/rtl8192_no_WAP_unassoc.patch40
-rw-r--r--meta-moblin/packages/linux/linux-moblin-2.6.33.2/rtl8192_no_autoconnect.patch41
-rw-r--r--meta-moblin/packages/linux/linux-moblin_2.6.33.2.bb108
99 files changed, 277877 insertions, 0 deletions
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/defconfig-menlow b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/defconfig-menlow
new file mode 100644
index 0000000..96b4142
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/defconfig-menlow
@@ -0,0 +1,124 @@
+#
+# This file is an overlay over config-generic.
+# Only options different from config-generic
+# belong here!!!
+#
+
+CONFIG_LOCALVERSION="-menlow"
+
+CONFIG_INTEL_MENLOW=y
+# CONFIG_DRM_MRST_AAVA is not set
+CONFIG_DRM_MRST_CDK=y
+
+CONFIG_IMG_DOES_NOT_SUPPORT_MENLOW=y
+
+CONFIG_AGP=y
+CONFIG_DRM=y
+
+# MRST Poulsbo gfx driver
+CONFIG_DRM_MRST=y
+CONFIG_PVR_RELEASE="release"
+CONFIG_PVR_SERVICES4=y
+CONFIG_PVR_XOPEN_SOURCE=600
+CONFIG_PVR2D_VALIDATE_INPUT_PARAMS=y
+CONFIG_PVR_DISPLAY_CONTROLLER="mrstlfb"
+CONFIG_PVR_SGX_CORE_REV=121
+CONFIG_PVR_SUPPORT_SVRINIT=y
+CONFIG_PVR_SUPPORT_SGX=y
+CONFIG_PVR_SUPPORT_PERCONTEXT_PB=y
+CONFIG_PVR_SUPPORT_LINUX_X86_WRITECOMBINE=y
+CONFIG_PVR_TRANSFER_QUEUE=y
+CONFIG_PVR_SUPPORT_DRI_DRM=y
+CONFIG_PVR_SYS_USING_INTERRUPTS=y
+CONFIG_PVR_SUPPORT_HW_RECOVERY=y
+CONFIG_PVR_SUPPORT_POWER_MANAGEMENT=y
+CONFIG_PVR_SECURE_HANDLES=y
+CONFIG_PVR_USE_PTHREADS=y
+CONFIG_PVR_SUPPORT_SGX_EVENT_OBJECT=y
+CONFIG_PVR_SUPPORT_SGX_HWPERF=y
+CONFIG_PVR_SUPPORT_SGX_LOW_LATENCY_SCHEDULING=y
+CONFIG_PVR_SUPPORT_LINUX_X86_PAT=y
+CONFIG_PVR_PROC_USE_SEQ_FILE=y
+CONFIG_PVR_SUPPORT_SGX535=y
+# CONFIG_PVR_SUPPORT_CACHEFLUSH_ON_ALLOC is not set
+# CONFIG_PVR_SUPPORT_MEMINFO_IDS is not set
+CONFIG_PVR_SUPPORT_CACHE_LINE_FLUSH=y
+CONFIG_PVR_SUPPORT_CPU_CACHED_BUFFERS=y
+CONFIG_PVR_DEBUG_MESA_OGL_TRACE=y
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+# CONFIG_FB_DDC is not set
+CONFIG_FB_BOOT_VESA_SUPPORT=y
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+CONFIG_FB_MODE_HELPERS=y
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_ARC is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_VGA16 is not set
+CONFIG_FB_VESA=y
+# CONFIG_FB_N411 is not set
+# CONFIG_FB_HGA is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_LE80578 is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_VIA is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_CARMINE is not set
+# CONFIG_FB_GEODE is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_ILI9320 is not set
+CONFIG_LCD_PLATFORM=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+# CONFIG_BACKLIGHT_PROGEAR is not set
+# CONFIG_BACKLIGHT_MBP_NVIDIA is not set
+# CONFIG_BACKLIGHT_SAHARA is not set
+
+#
+# Display device support
+#
+CONFIG_DISPLAY_SUPPORT=y
+
+# CONFIG_DRM_I915 is not set
+# CONFIG_DRM_I915_KMS is not set
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/defconfig-netbook b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/defconfig-netbook
new file mode 100644
index 0000000..2481788
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/defconfig-netbook
@@ -0,0 +1,73 @@
+CONFIG_LOCALVERSION="-netbook"
+
+CONFIG_ACER_WMI=y
+
+CONFIG_EEEPC_LAPTOP=m
+
+CONFIG_SAMSUNG_LAPTOP=m
+
+CONFIG_R8169=y
+# CONFIG_R8169_VLAN is not set
+
+CONFIG_ATL1E=y
+
+CONFIG_ATL1C=m
+
+CONFIG_ATH5K=y
+# CONFIG_ATH5K_DEBUG is not set
+
+CONFIG_RT2860=m
+
+CONFIG_RTL8187SE=m
+
+CONFIG_INTEL_MENLOW=y
+CONFIG_DRM_MGA=m
+
+
+
+
+CONFIG_DRM_I915_KMS=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_6x11=y
+CONFIG_FONT_7x14=y
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FONT_MINI_4x6 is not set
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+CONFIG_FONT_10x18=y
+
+
+#
+# Enable KVM
+#
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
+CONFIG_KVM_INTEL=m
+# CONFIG_KVM_AMD is not set
+# CONFIG_KVM_TRACE is not set
+# CONFIG_VIRTIO_PCI is not set
+# CONFIG_VIRTIO_BALLOON is not set
+
+#
+# For VMWARE support
+#
+CONFIG_FUSION_SPI=y
+CONFIG_SND_ENS1371=m
+
+
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_PCIE=m
+CONFIG_HOTPLUG_PCI_ACPI=m
+CONFIG_HOTPLUG_PCI_ACPI_IBM=m
+
+#
+# Enable eCryptfs
+#
+CONFIG_KEYS=y
+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
+CONFIG_ECRYPT_FS=m
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6-build-nonintconfig.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6-build-nonintconfig.patch
new file mode 100644
index 0000000..d55dc02
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6-build-nonintconfig.patch
@@ -0,0 +1,147 @@
+From 8642463d981c93d188814b11fdc81e17719aab05 Mon Sep 17 00:00:00 2001
+From: Yong Wang <yong.y.wang@intel.com>
+Date: Fri, 5 Feb 2010 10:07:51 +0800
+Subject: [PATCH] kbuild: add a "nonintconfig" option to the Makefile
+
+Add a "nonintconfig" option to the Makefile needed for
+unattended builds.
+
+This patch is from Arjan ven de Ven <arjan@linux.intel.com>
+
+Signed-off-by: Yong Wang <yong.y.wang@intel.com>
+---
+ scripts/kconfig/Makefile | 5 +++++
+ scripts/kconfig/conf.c | 36 ++++++++++++++++++++++++++++++++----
+ 2 files changed, 37 insertions(+), 4 deletions(-)
+
+diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
+index 999e8a7..fb3cdee 100644
+--- a/scripts/kconfig/Makefile
++++ b/scripts/kconfig/Makefile
+@@ -30,6 +30,11 @@ silentoldconfig: $(obj)/conf
+ $(Q)mkdir -p include/generated
+ $< -s $(Kconfig)
+
++nonint_oldconfig: $(obj)/conf
++ $< -b $(Kconfig)
++loose_nonint_oldconfig: $(obj)/conf
++ $< -B $(Kconfig)
++
+ localmodconfig: $(obj)/streamline_config.pl $(obj)/conf
+ $(Q)perl $< $(srctree) $(Kconfig) > .tmp.config
+ $(Q)if [ -f .config ]; then \
+diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
+index 9960d1c..bf6a528 100644
+--- a/scripts/kconfig/conf.c
++++ b/scripts/kconfig/conf.c
+@@ -23,6 +23,8 @@ enum {
+ ask_all,
+ ask_new,
+ ask_silent,
++ dont_ask,
++ dont_ask_dont_tell,
+ set_default,
+ set_yes,
+ set_mod,
+@@ -38,6 +40,8 @@ static int conf_cnt;
+ static char line[128];
+ static struct menu *rootEntry;
+
++static int return_value = 0;
++
+ static void print_help(struct menu *menu)
+ {
+ struct gstr help = str_new();
+@@ -360,7 +364,10 @@ static void conf(struct menu *menu)
+
+ switch (prop->type) {
+ case P_MENU:
+- if (input_mode == ask_silent && rootEntry != menu) {
++ if ((input_mode == ask_silent ||
++ input_mode == dont_ask ||
++ input_mode == dont_ask_dont_tell) &&
++ rootEntry != menu) {
+ check_conf(menu);
+ return;
+ }
+@@ -418,12 +425,21 @@ static void check_conf(struct menu *menu)
+ if (sym && !sym_has_value(sym)) {
+ if (sym_is_changable(sym) ||
+ (sym_is_choice(sym) && sym_get_tristate_value(sym) == yes)) {
++ if (input_mode == dont_ask ||
++ input_mode == dont_ask_dont_tell) {
++ if (input_mode == dont_ask &&
++ sym->name && !sym_is_choice_value(sym)) {
++ fprintf(stderr,"CONFIG_%s\n",sym->name);
++ ++return_value;
++ }
++ } else {
+ if (!conf_cnt++)
+ printf(_("*\n* Restart config...\n*\n"));
+ rootEntry = menu_get_parent_menu(menu);
+ conf(rootEntry);
+ }
+ }
++ }
+
+ for (child = menu->list; child; child = child->next)
+ check_conf(child);
+@@ -439,7 +455,7 @@ int main(int ac, char **av)
+ bindtextdomain(PACKAGE, LOCALEDIR);
+ textdomain(PACKAGE);
+
+- while ((opt = getopt(ac, av, "osdD:nmyrh")) != -1) {
++ while ((opt = getopt(ac, av, "osbBdD:nmyrh")) != -1) {
+ switch (opt) {
+ case 'o':
+ input_mode = ask_silent;
+@@ -448,6 +464,12 @@ int main(int ac, char **av)
+ input_mode = ask_silent;
+ sync_kconfig = 1;
+ break;
++ case 'b':
++ input_mode = dont_ask;
++ break;
++ case 'B':
++ input_mode = dont_ask_dont_tell;
++ break;
+ case 'd':
+ input_mode = set_default;
+ break;
+@@ -525,6 +547,8 @@ int main(int ac, char **av)
+ case ask_silent:
+ case ask_all:
+ case ask_new:
++ case dont_ask:
++ case dont_ask_dont_tell:
+ conf_read(NULL);
+ break;
+ case set_no:
+@@ -586,12 +610,16 @@ int main(int ac, char **av)
+ conf(&rootmenu);
+ input_mode = ask_silent;
+ /* fall through */
++ case dont_ask:
++ case dont_ask_dont_tell:
+ case ask_silent:
+ /* Update until a loop caused no more changes */
+ do {
+ conf_cnt = 0;
+ check_conf(&rootmenu);
+- } while (conf_cnt);
++ } while (conf_cnt &&
++ (input_mode != dont_ask &&
++ input_mode != dont_ask_dont_tell));
+ break;
+ }
+
+@@ -613,5 +641,5 @@ int main(int ac, char **av)
+ exit(1);
+ }
+ }
+- return 0;
++ return return_value;
+ }
+--
+1.5.5.1
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6-driver-level-usb-autosuspend.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6-driver-level-usb-autosuspend.patch
new file mode 100644
index 0000000..fc0fd7e
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6-driver-level-usb-autosuspend.patch
@@ -0,0 +1,60 @@
+commit 0f592e33934bf6108e33e34f00b425f98ee833ef
+Author: Matthew Garrett <mjg@redhat.com>
+Date: Wed Jul 8 19:04:23 2009 +0100
+
+ usb: Allow drivers to enable USB autosuspend on a per-device basis
+
+ USB autosuspend is currently only enabled by default for hubs. On other
+ hardware the decision is made by userspace. This is unnecessary in cases
+ where we know that the hardware supports autosuspend, so this patch adds
+ a function to allow drivers to enable it at probe time.
+
+ Signed-off-by: Matthew Garrett <mjg@redhat.com>
+
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index 60a45f1..06d24df 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -1648,6 +1648,20 @@ void usb_autopm_put_interface_async(struct usb_interface *intf)
+ EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async);
+
+ /**
++ * usb_device_autosuspend_enable - enable autosuspend on a device
++ * @udev: the usb_device to be autosuspended
++ *
++ * This routine should be called by an interface driver when it knows that
++ * the device in question supports USB autosuspend.
++ *
++ */
++void usb_device_autosuspend_enable(struct usb_device *udev)
++{
++ udev->autosuspend_disabled = 0;
++}
++EXPORT_SYMBOL_GPL(usb_device_autosuspend_enable);
++
++/**
+ * usb_autopm_get_interface - increment a USB interface's PM-usage counter
+ * @intf: the usb_interface whose counter should be incremented
+ *
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index e101a2d..dd47590 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -540,6 +540,7 @@ extern struct usb_device *usb_find_device(u16 vendor_id, u16 product_id);
+
+ /* USB autosuspend and autoresume */
+ #ifdef CONFIG_USB_SUSPEND
++extern void usb_device_autosuspend_enable(struct usb_device *udev);
+ extern int usb_autopm_get_interface(struct usb_interface *intf);
+ extern void usb_autopm_put_interface(struct usb_interface *intf);
+ extern int usb_autopm_get_interface_async(struct usb_interface *intf);
+@@ -563,6 +564,9 @@ static inline void usb_mark_last_busy(struct usb_device *udev)
+
+ #else
+
++static inline void usb_device_autosuspend_enable(struct usb_device *udev)
++{ }
++
+ static inline int usb_autopm_get_interface(struct usb_interface *intf)
+ { return 0; }
+ static inline int usb_autopm_get_interface_async(struct usb_interface *intf)
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6-usb-bt-autosuspend.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6-usb-bt-autosuspend.patch
new file mode 100644
index 0000000..4610910
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6-usb-bt-autosuspend.patch
@@ -0,0 +1,11 @@
+--- linux-2.6.33/drivers/bluetooth/btusb.c~ 2010-02-24 13:52:17.000000000 -0500
++++ linux-2.6.33/drivers/bluetooth/btusb.c 2010-03-23 14:36:48.301179380 -0400
+@@ -1020,6 +1020,8 @@
+ return err;
+ }
+
++ usb_device_autosuspend_enable(data->udev);
++
+ usb_set_intfdata(intf, data);
+
+ return 0;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6-usb-uvc-autosuspend.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6-usb-uvc-autosuspend.patch
new file mode 100644
index 0000000..b7c7f6e
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6-usb-uvc-autosuspend.patch
@@ -0,0 +1,19 @@
+commit 9d4c919bcfa794c054cc33155c7e3c53ac2c5684
+Author: Matthew Garrett <mjg@redhat.com>
+Date: Sun Jul 19 02:24:49 2009 +0100
+
+ Enable autosuspend on UVC by default
+
+diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
+index 89927b7..8de516b 100644
+--- a/drivers/media/video/uvc/uvc_driver.c
++++ b/drivers/media/video/uvc/uvc_driver.c
+@@ -1647,6 +1647,8 @@ static int uvc_probe(struct usb_interface *intf,
+ "supported.\n", ret);
+ }
+
++ usb_device_autosuspend_enable(udev);
++
+ uvc_trace(UVC_TRACE_PROBE, "UVC device initialized.\n");
+ return 0;
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-dont-wait-for-mouse.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-dont-wait-for-mouse.patch
new file mode 100644
index 0000000..6b2d54f
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-dont-wait-for-mouse.patch
@@ -0,0 +1,47 @@
+From dce8113d033975f56630cf6d2a6a908cfb66059d Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sun, 20 Jul 2008 13:12:16 -0700
+Subject: [PATCH] fastboot: remove "wait for all devices before mounting root" delay
+
+In the non-initrd case, we wait for all devices to finish their
+probing before we try to mount the rootfs.
+In practice, this means that we end up waiting 2 extra seconds for
+the PS/2 mouse probing even though the root holding device has been
+ready since a long time.
+
+The previous two patches in this series made the RAID autodetect code
+do it's own "wait for probing to be done" code, and added
+"wait and retry" functionality in case the root device isn't actually
+available.
+
+These two changes should make it safe to remove the delay itself,
+and this patch does this. On my test laptop, this reduces the boot time
+by 2 seconds (kernel time goes from 3.9 to 1.9 seconds).
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+---
+ init/do_mounts.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+Index: linux-2.6.29/init/do_mounts.c
+===================================================================
+--- linux-2.6.29.orig/init/do_mounts.c
++++ linux-2.6.29/init/do_mounts.c
+@@ -370,6 +370,7 @@ void __init prepare_namespace(void)
+ ssleep(root_delay);
+ }
+
++#if 0
+ /*
+ * wait for the known devices to complete their probing
+ *
+@@ -378,6 +379,8 @@ void __init prepare_namespace(void)
+ * for the touchpad of a laptop to initialize.
+ */
+ wait_for_device_probe();
++#endif
++ async_synchronize_full();
+
+ md_run_setup();
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-kms-after-sata.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-kms-after-sata.patch
new file mode 100644
index 0000000..3d7ce2b
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-kms-after-sata.patch
@@ -0,0 +1,50 @@
+From 5872557a54f440d8cf046714508898b173885399 Mon Sep 17 00:00:00 2001
+From: Yong Wang <yong.y.wang@intel.com>
+Date: Fri, 5 Feb 2010 11:19:36 +0800
+Subject: [PATCH] linux-2.6.29-kms-after-sata.patch
+
+This patch is from Arjan ven de Ven <arjan@linux.intel.com>
+
+Signed-off-by: Yong Wang <yong.y.wang@intel.com>
+---
+ drivers/Makefile | 15 ++++++++-------
+ 1 files changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/Makefile b/drivers/Makefile
+index 6ee53c7..5dc6dd3 100644
+--- a/drivers/Makefile
++++ b/drivers/Makefile
+@@ -26,15 +26,8 @@ obj-$(CONFIG_REGULATOR) += regulator/
+ # default.
+ obj-y += char/
+
+-# gpu/ comes after char for AGP vs DRM startup
+-obj-y += gpu/
+-
+ obj-$(CONFIG_CONNECTOR) += connector/
+
+-# i810fb and intelfb depend on char/agp/
+-obj-$(CONFIG_FB_I810) += video/i810/
+-obj-$(CONFIG_FB_INTEL) += video/intelfb/
+-
+ obj-y += serial/
+ obj-$(CONFIG_PARPORT) += parport/
+ obj-y += base/ block/ misc/ mfd/
+@@ -46,6 +39,14 @@ obj-$(CONFIG_ATA) += ata/
+ obj-$(CONFIG_MTD) += mtd/
+ obj-$(CONFIG_SPI) += spi/
+ obj-y += net/
++
++# gpu/ comes after char for AGP vs DRM startup
++obj-y += gpu/
++
++# i810fb and intelfb depend on char/agp/
++obj-$(CONFIG_FB_I810) += video/i810/
++obj-$(CONFIG_FB_INTEL) += video/intelfb/
++
+ obj-$(CONFIG_ATM) += atm/
+ obj-$(CONFIG_FUSION) += message/
+ obj-$(CONFIG_FIREWIRE) += firewire/
+--
+1.5.5.1
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-kms-edid-cache.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-kms-edid-cache.patch
new file mode 100644
index 0000000..1fda401
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-kms-edid-cache.patch
@@ -0,0 +1,58 @@
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index a51573d..3dcf5cc 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -102,6 +102,7 @@ struct intel_output {
+ int type;
+ struct i2c_adapter *i2c_bus;
+ struct i2c_adapter *ddc_bus;
++ struct edid *edid;
+ bool load_detect_temp;
+ bool needs_tv_clock;
+ void *dev_priv;
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 3118ce2..fa0299e 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -716,6 +716,7 @@ static void intel_lvds_destroy(struct drm_connector *connector)
+ acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
++ kfree(intel_output->edid);
+ kfree(connector);
+ }
+
+@@ -1189,5 +1190,6 @@ failed:
+ intel_i2c_destroy(intel_output->ddc_bus);
+ drm_connector_cleanup(connector);
+ drm_encoder_cleanup(encoder);
++ kfree(intel_output->edid);
+ kfree(intel_output);
+ }
+diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
+index 67e2f46..5ac537f 100644
+--- a/drivers/gpu/drm/i915/intel_modes.c
++++ b/drivers/gpu/drm/i915/intel_modes.c
+@@ -74,6 +74,10 @@ int intel_ddc_get_modes(struct intel_output *intel_output)
+ int ret = 0;
+
+ intel_i2c_quirk_set(intel_output->base.dev, true);
++ if (intel_output->edid && intel_output->type == INTEL_OUTPUT_LVDS) {
++ printk(KERN_INFO "Skipping EDID probe due to cached edid\n");
++ return ret;
++ }
+ edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus);
+ intel_i2c_quirk_set(intel_output->base.dev, false);
+ if (edid) {
+@@ -81,7 +85,10 @@ int intel_ddc_get_modes(struct intel_output *intel_output)
+ edid);
+ ret = drm_add_edid_modes(&intel_output->base, edid);
+ intel_output->base.display_info.raw_edid = NULL;
+- kfree(edid);
++ if (intel_output->type == INTEL_OUTPUT_LVDS)
++ intel_output->edid = edid;
++ else
++ kfree(edid);
+ }
+
+ return ret;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-kms-run-async.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-kms-run-async.patch
new file mode 100644
index 0000000..0c4fe26
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-kms-run-async.patch
@@ -0,0 +1,137 @@
+From 5da92dfa0047f40221e96162c768cd12e063fa0c Mon Sep 17 00:00:00 2001
+From: Yong Wang <yong.y.wang@intel.com>
+Date: Fri, 5 Feb 2010 10:22:01 +0800
+Subject: [PATCH] linux-2.6.29-kms-run-async.patch
+
+This patch is from Arjan ven de Ven <arjan@linux.intel.com>
+
+Signed-off-by: Yong Wang <yong.y.wang@intel.com>
+---
+ drivers/gpu/drm/drm_crtc_helper.c | 22 ++++++++++++++++++++--
+ drivers/gpu/drm/drm_drv.c | 4 ++++
+ drivers/gpu/drm/i915/i915_dma.c | 2 +-
+ include/drm/drmP.h | 1 +
+ include/drm/drm_crtc_helper.h | 1 +
+ 5 files changed, 27 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
+index 7d0f00a..1f814b4 100644
+--- a/drivers/gpu/drm/drm_crtc_helper.c
++++ b/drivers/gpu/drm/drm_crtc_helper.c
+@@ -29,6 +29,8 @@
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
++#include <linux/async.h>
++
+ #include "drmP.h"
+ #include "drm_crtc.h"
+ #include "drm_crtc_helper.h"
+@@ -54,6 +56,8 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
+ return;
+ }
+
++LIST_HEAD(drm_async_list);
++
+ /**
+ * drm_helper_probe_connector_modes - get complete set of display modes
+ * @dev: DRM device
+@@ -1002,6 +1006,7 @@ bool drm_helper_plugged_event(struct drm_device *dev)
+ /* FIXME: send hotplug event */
+ return true;
+ }
++
+ /**
+ * drm_initial_config - setup a sane initial connector configuration
+ * @dev: DRM device
+@@ -1037,13 +1042,26 @@ bool drm_helper_initial_config(struct drm_device *dev)
+
+ drm_setup_crtcs(dev);
+
+- /* alert the driver fb layer */
+ dev->mode_config.funcs->fb_changed(dev);
+-
+ return 0;
+ }
+ EXPORT_SYMBOL(drm_helper_initial_config);
+
++static void drm_helper_initial_config_helper(void *ptr, async_cookie_t cookie)
++{
++ struct drm_device *dev = ptr;
++ drm_helper_initial_config(dev);
++}
++
++void drm_helper_initial_config_async(struct drm_device *dev)
++{
++ async_schedule_domain(drm_helper_initial_config_helper,
++ dev, &drm_async_list);
++}
++EXPORT_SYMBOL(drm_helper_initial_config_async);
++
++
++
+ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
+ {
+ int dpms = DRM_MODE_DPMS_OFF;
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index 766c468..1a0bf76 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -49,6 +49,7 @@
+ #include <linux/debugfs.h>
+ #include "drmP.h"
+ #include "drm_core.h"
++#include <linux/async.h>
+
+
+ static int drm_version(struct drm_device *dev, void *data,
+@@ -292,6 +293,9 @@ void drm_exit(struct drm_driver *driver)
+ struct drm_device *dev, *tmp;
+ DRM_DEBUG("\n");
+
++ /* make sure all async DRM operations are finished */
++ async_synchronize_full_domain(&drm_async_list);
++
+ if (driver->driver_features & DRIVER_MODESET) {
+ pci_unregister_driver(&driver->pci_driver);
+ } else {
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index e660ac0..3ffce27 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -1274,7 +1274,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
+
+ I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
+
+- drm_helper_initial_config(dev);
++ drm_helper_initial_config_async(dev);
+
+ return 0;
+
+diff --git a/include/drm/drmP.h b/include/drm/drmP.h
+index ffac157..4bbd7b5 100644
+--- a/include/drm/drmP.h
++++ b/include/drm/drmP.h
+@@ -323,6 +323,7 @@ struct drm_vma_entry {
+ pid_t pid;
+ };
+
++extern struct list_head drm_async_list;
+ /**
+ * DMA buffer.
+ */
+diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
+index b29e201..38ed420 100644
+--- a/include/drm/drm_crtc_helper.h
++++ b/include/drm/drm_crtc_helper.h
+@@ -98,6 +98,7 @@ extern int drm_helper_probe_single_connector_modes(struct drm_connector *connect
+ extern void drm_helper_disable_unused_functions(struct drm_device *dev);
+ extern int drm_helper_hotplug_stage_two(struct drm_device *dev);
+ extern bool drm_helper_initial_config(struct drm_device *dev);
++extern void drm_helper_initial_config_async(struct drm_device *dev);
+ extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
+ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+--
+1.5.5.1
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-silence-acer-message.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-silence-acer-message.patch
new file mode 100644
index 0000000..ff76f09
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-silence-acer-message.patch
@@ -0,0 +1,22 @@
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Fri, 23 Jan 2009
+
+Small fix changing error msg to info msg in acer wmi driver
+---
+---
+ drivers/platform/x86/acer-wmi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-2.6.29/drivers/platform/x86/acer-wmi.c
+===================================================================
+--- linux-2.6.29.orig/drivers/platform/x86/acer-wmi.c
++++ linux-2.6.29/drivers/platform/x86/acer-wmi.c
+@@ -1290,7 +1290,7 @@ static int __init acer_wmi_init(void)
+ AMW0_find_mailled();
+
+ if (!interface) {
+- printk(ACER_ERR "No or unsupported WMI interface, unable to "
++ printk(ACER_INFO "No or unsupported WMI interface, unable to "
+ "load\n");
+ return -ENODEV;
+ }
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-sreadahead.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-sreadahead.patch
new file mode 100644
index 0000000..a6764a2
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.29-sreadahead.patch
@@ -0,0 +1,96 @@
+From 4d690855d6bdc15b753ac3c21bf507ad94d46aac Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sun, 21 Sep 2008 11:58:27 -0700
+Subject: [PATCH] superreadahead patch
+
+---
+ fs/ext3/ioctl.c | 3 +++
+ fs/ext3/super.c | 1 +
+ include/linux/ext3_fs.h | 1 +
+ include/linux/fs.h | 2 ++
+ 4 files changed, 7 insertions(+), 0 deletions(-)
+
+diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
+index 8897481..08f4854 100644
+--- a/fs/ext3/ioctl.c
++++ b/fs/ext3/ioctl.c
+@@ -276,6 +276,9 @@ group_add_out:
+ mnt_drop_write(filp->f_path.mnt);
+ return err;
+ }
++ case EXT3_IOC_INODE_JIFFIES: {
++ return inode->created_when;
++ }
+
+
+ default:
+diff --git a/fs/ext3/super.c b/fs/ext3/super.c
+index 524b349..e6e8514 100644
+--- a/fs/ext3/super.c
++++ b/fs/ext3/super.c
+@@ -466,6 +466,7 @@ static struct inode *ext3_alloc_inode(struct super_block *sb)
+ return NULL;
+ ei->i_block_alloc_info = NULL;
+ ei->vfs_inode.i_version = 1;
++ ei->vfs_inode.created_when = jiffies;
+ atomic_set(&ei->i_datasync_tid, 0);
+ atomic_set(&ei->i_sync_tid, 0);
+ return &ei->vfs_inode;
+diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
+index 634a5e5..84d5394 100644
+--- a/include/linux/ext3_fs.h
++++ b/include/linux/ext3_fs.h
+@@ -250,6 +250,7 @@ struct ext3_new_group_data {
+ #endif
+ #define EXT3_IOC_GETRSVSZ _IOR('f', 5, long)
+ #define EXT3_IOC_SETRSVSZ _IOW('f', 6, long)
++#define EXT3_IOC_INODE_JIFFIES _IOR('f', 19, long)
+
+ /*
+ * ioctl commands in 32 bit emulation
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 0872372..078e3fd 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -781,6 +781,8 @@ struct inode {
+ struct posix_acl *i_default_acl;
+ #endif
+ void *i_private; /* fs or device private pointer */
++
++ unsigned long created_when; /* jiffies of creation time */
+ };
+
+ /*
+--
+1.6.0.6
+
+--- vanilla-2.6.32-rc7/fs/btrfs/inode.c~ 2009-11-13 11:15:48.000000000 -0800
++++ vanilla-2.6.32-rc7/fs/btrfs/inode.c 2009-11-13 11:15:48.000000000 -0800
+@@ -5181,6 +5181,7 @@
+ ei->outstanding_extents = 0;
+ ei->reserved_extents = 0;
+ ei->root = NULL;
++ ei->vfs_inode.created_when = jiffies;
+ spin_lock_init(&ei->accounting_lock);
+ btrfs_ordered_inode_tree_init(&ei->ordered_tree);
+ INIT_LIST_HEAD(&ei->i_orphan);
+--- vanilla-2.6.32-rc7/fs/btrfs/ioctl.c~ 2009-11-13 11:16:58.000000000 -0800
++++ vanilla-2.6.32-rc7/fs/btrfs/ioctl.c 2009-11-13 11:16:58.000000000 -0800
+@@ -1298,6 +1298,8 @@
+ return 0;
+ }
+
++#define EXT3_IOC_INODE_JIFFIES _IOR('f', 19, long)
++
+ long btrfs_ioctl(struct file *file, unsigned int
+ cmd, unsigned long arg)
+ {
+@@ -1337,6 +1338,8 @@
+ case BTRFS_IOC_SYNC:
+ btrfs_sync_fs(file->f_dentry->d_sb, 1);
+ return 0;
++ case EXT3_IOC_INODE_JIFFIES:
++ return fdentry(file)->d_inode->created_when;
+ }
+
+ return -ENOTTY;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.31-silence-wacom.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.31-silence-wacom.patch
new file mode 100644
index 0000000..635709e
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.31-silence-wacom.patch
@@ -0,0 +1,14 @@
+KERN_ERR is not appropriate for a printk level of a successful operation
+
+
+--- linux-2.6.30/drivers/hid/hid-wacom.c~ 2009-09-04 10:37:20.000000000 -0700
++++ linux-2.6.30/drivers/hid/hid-wacom.c 2009-09-04 10:37:20.000000000 -0700
+@@ -244,7 +244,7 @@
+ ret = hid_register_driver(&wacom_driver);
+ if (ret)
+ printk(KERN_ERR "can't register wacom driver\n");
+- printk(KERN_ERR "wacom driver registered\n");
++ printk(KERN_INFO "wacom driver registered\n");
+ return ret;
+ }
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-ahci-alpm-accounting.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-ahci-alpm-accounting.patch
new file mode 100644
index 0000000..800e10a
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-ahci-alpm-accounting.patch
@@ -0,0 +1,284 @@
+From f62ff8c98080b4a9e66f82f793145b863b4e183a Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Fri, 13 Nov 2009 16:54:37 -0800
+Subject: [PATCH] libata: Add ALPM power state accounting to the AHCI driver
+
+PowerTOP wants to be able to show the user how effective the ALPM link
+power management is for the user. ALPM is worth around 0.5W on a quiet
+link; PowerTOP wants to be able to find cases where the "quiet link" isn't
+actually quiet.
+
+This patch adds state accounting functionality to the AHCI driver for
+PowerTOP to use.
+The parts of the patch are
+1) the sysfs logic of exposing the stats for each state in sysfs
+2) the basic accounting logic that gets update on link change interrupts
+ (or when the user accesses the info from sysfs)
+3) a "accounting enable" flag; in order to get the accounting to work,
+ the driver needs to get phyrdy interrupts on link status changes.
+ Normally and currently this is disabled by the driver when ALPM is
+ on (to reduce overhead); when PowerTOP is running this will need
+ to be on to get usable statistics... hence the sysfs tunable.
+
+The PowerTOP output currently looks like this:
+
+Recent SATA AHCI link activity statistics
+Active Partial Slumber Device name
+ 0.5% 99.5% 0.0% host0
+
+(work to resolve "host0" to a more human readable name is in progress)
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+ drivers/ata/ahci.c | 175 +++++++++++++++++++++++++++++++++++++++++++++++++++-
+ 1 files changed, 173 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index a3241a1..448d684 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -72,6 +72,21 @@ MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ig
+ static int ahci_enable_alpm(struct ata_port *ap,
+ enum link_pm policy);
+ static void ahci_disable_alpm(struct ata_port *ap);
++
++static ssize_t ahci_alpm_show_active(struct device *dev,
++ struct device_attribute *attr, char *buf);
++static ssize_t ahci_alpm_show_slumber(struct device *dev,
++ struct device_attribute *attr, char *buf);
++static ssize_t ahci_alpm_show_partial(struct device *dev,
++ struct device_attribute *attr, char *buf);
++
++static ssize_t ahci_alpm_show_accounting(struct device *dev,
++ struct device_attribute *attr, char *buf);
++
++static ssize_t ahci_alpm_set_accounting(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count);
++
+ static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
+ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
+ size_t size);
+@@ -289,6 +304,13 @@ struct ahci_host_priv {
+ u32 em_loc; /* enclosure management location */
+ };
+
++enum ahci_port_states {
++ AHCI_PORT_NOLINK = 0,
++ AHCI_PORT_ACTIVE = 1,
++ AHCI_PORT_PARTIAL = 2,
++ AHCI_PORT_SLUMBER = 3
++};
++
+ struct ahci_port_priv {
+ struct ata_link *active_link;
+ struct ahci_cmd_hdr *cmd_slot;
+@@ -304,6 +326,14 @@ struct ahci_port_priv {
+ u32 intr_mask; /* interrupts to enable */
+ /* enclosure management info per PM slot */
+ struct ahci_em_priv em_priv[EM_MAX_SLOTS];
++
++ /* ALPM accounting state and stats */
++ unsigned int accounting_active:1;
++ u64 active_jiffies;
++ u64 partial_jiffies;
++ u64 slumber_jiffies;
++ int previous_state;
++ int previous_jiffies;
+ };
+
+ static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+@@ -359,6 +389,12 @@ DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
+ DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
+ DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
+
++DEVICE_ATTR(ahci_alpm_active, S_IRUGO, ahci_alpm_show_active, NULL);
++DEVICE_ATTR(ahci_alpm_partial, S_IRUGO, ahci_alpm_show_partial, NULL);
++DEVICE_ATTR(ahci_alpm_slumber, S_IRUGO, ahci_alpm_show_slumber, NULL);
++DEVICE_ATTR(ahci_alpm_accounting, S_IRUGO | S_IWUSR,
++ ahci_alpm_show_accounting, ahci_alpm_set_accounting);
++
+ static struct device_attribute *ahci_shost_attrs[] = {
+ &dev_attr_link_power_management_policy,
+ &dev_attr_em_message_type,
+@@ -367,6 +403,10 @@ static struct device_attribute *ahci_shost_attrs[] = {
+ &dev_attr_ahci_host_cap2,
+ &dev_attr_ahci_host_version,
+ &dev_attr_ahci_port_cmd,
++ &dev_attr_ahci_alpm_active,
++ &dev_attr_ahci_alpm_partial,
++ &dev_attr_ahci_alpm_slumber,
++ &dev_attr_ahci_alpm_accounting,
+ NULL
+ };
+
+@@ -1165,9 +1205,14 @@ static int ahci_enable_alpm(struct ata_port *ap,
+ * getting woken up due to spurious phy ready interrupts
+ * TBD - Hot plug should be done via polling now, is
+ * that even supported?
++ *
++ * However, when accounting_active is set, we do want
++ * the interrupts for accounting purposes.
+ */
+- pp->intr_mask &= ~PORT_IRQ_PHYRDY;
+- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
++ if (!pp->accounting_active) {
++ pp->intr_mask &= ~PORT_IRQ_PHYRDY;
++ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
++ }
+
+ /*
+ * Set a flag to indicate that we should ignore all PhyRdy
+@@ -2157,6 +2202,141 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
+ ata_port_abort(ap);
+ }
+
++static int get_current_alpm_state(struct ata_port *ap)
++{
++ u32 status = 0;
++
++ ahci_scr_read(&ap->link, SCR_STATUS, &status);
++
++ /* link status is in bits 11-8 */
++ status = status >> 8;
++ status = status & 0x7;
++
++ if (status == 6)
++ return AHCI_PORT_SLUMBER;
++ if (status == 2)
++ return AHCI_PORT_PARTIAL;
++ if (status == 1)
++ return AHCI_PORT_ACTIVE;
++ return AHCI_PORT_NOLINK;
++}
++
++static void account_alpm_stats(struct ata_port *ap)
++{
++ struct ahci_port_priv *pp;
++
++ int new_state;
++ u64 new_jiffies, jiffies_delta;
++
++ if (ap == NULL)
++ return;
++ pp = ap->private_data;
++ if (!pp) return;
++
++ new_state = get_current_alpm_state(ap);
++ new_jiffies = jiffies;
++
++ jiffies_delta = new_jiffies - pp->previous_jiffies;
++
++ switch (pp->previous_state) {
++ case AHCI_PORT_NOLINK:
++ pp->active_jiffies = 0;
++ pp->partial_jiffies = 0;
++ pp->slumber_jiffies = 0;
++ break;
++ case AHCI_PORT_ACTIVE:
++ pp->active_jiffies += jiffies_delta;
++ break;
++ case AHCI_PORT_PARTIAL:
++ pp->partial_jiffies += jiffies_delta;
++ break;
++ case AHCI_PORT_SLUMBER:
++ pp->slumber_jiffies += jiffies_delta;
++ break;
++ default:
++ break;
++ }
++ pp->previous_state = new_state;
++ pp->previous_jiffies = new_jiffies;
++}
++
++static ssize_t ahci_alpm_show_active(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct ata_port *ap = ata_shost_to_port(shost);
++ struct ahci_port_priv *pp;
++
++ if (!ap)
++ return;
++ pp = ap->private_data;
++ account_alpm_stats(ap);
++
++ return sprintf(buf, "%u\n", jiffies_to_msecs(pp->active_jiffies));
++}
++
++static ssize_t ahci_alpm_show_partial(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct ata_port *ap = ata_shost_to_port(shost);
++ struct ahci_port_priv *pp = ap->private_data;
++
++ account_alpm_stats(ap);
++
++ return sprintf(buf, "%u\n", jiffies_to_msecs(pp->partial_jiffies));
++}
++
++static ssize_t ahci_alpm_show_slumber(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct ata_port *ap = ata_shost_to_port(shost);
++ struct ahci_port_priv *pp = ap->private_data;
++
++ account_alpm_stats(ap);
++
++ return sprintf(buf, "%u\n", jiffies_to_msecs(pp->slumber_jiffies));
++}
++
++
++static ssize_t ahci_alpm_show_accounting(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct ata_port *ap = ata_shost_to_port(shost);
++ struct ahci_port_priv *pp = ap->private_data;
++
++ return sprintf(buf, "%u\n", pp->accounting_active);
++}
++
++static ssize_t ahci_alpm_set_accounting(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ unsigned long flags;
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct ata_port *ap = ata_shost_to_port(shost);
++ struct ahci_port_priv *pp = ap->private_data;
++ void __iomem *port_mmio = ahci_port_base(ap);
++
++ if (!pp)
++ return 1;
++ if (buf[0] == '0')
++ pp->accounting_active = 0;
++ if (buf[0] == '1')
++ pp->accounting_active = 1;
++
++ /* we need to enable the PHYRDY interrupt when we want accounting */
++ if (pp->accounting_active) {
++ spin_lock_irqsave(ap->lock, flags);
++ pp->intr_mask |= PORT_IRQ_PHYRDY;
++ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
++ spin_unlock_irqrestore(ap->lock, flags);
++ }
++ return count;
++}
++
+ static void ahci_port_intr(struct ata_port *ap)
+ {
+ void __iomem *port_mmio = ahci_port_base(ap);
+@@ -2182,6 +2352,7 @@ static void ahci_port_intr(struct ata_port *ap)
+ if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
+ (status & PORT_IRQ_PHYRDY)) {
+ status &= ~PORT_IRQ_PHYRDY;
++ account_alpm_stats(ap);
+ ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
+ }
+
+--
+1.6.0.6
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-ahci-fix-oops-on-dummy-port.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-ahci-fix-oops-on-dummy-port.patch
new file mode 100644
index 0000000..103c7f6
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-ahci-fix-oops-on-dummy-port.patch
@@ -0,0 +1,53 @@
+From: Yong Wang <yong.y.wang@intel.com>
+Date: Thu Mar 11 15:31:50 2010 +0800
+Subject: [PATCH] ahci: Fix accounting oops on dummy port
+
+Not all ports are implemented in AHCI mode on some machines.
+If this is the case, it results in NULL pointer dereference
+when doing alpm accounting. Skip such dummy ports.
+
+Signed-off-by: Yong Wang <yong.y.wang@intel.com>
+---
+ drivers/ata/ahci.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -2283,8 +2283,8 @@ static ssize_t ahci_alpm_show_active(str
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_port_priv *pp;
+
+- if (!ap)
+- return;
++ if (!ap || ata_port_is_dummy(ap))
++ return 0;
+ pp = ap->private_data;
+ account_alpm_stats(ap);
+
+@@ -2298,6 +2298,8 @@ static ssize_t ahci_alpm_show_partial(st
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_port_priv *pp = ap->private_data;
+
++ if (!ap || ata_port_is_dummy(ap))
++ return 0;
+ account_alpm_stats(ap);
+
+ return sprintf(buf, "%u\n", jiffies_to_msecs(pp->partial_jiffies));
+@@ -2310,6 +2312,8 @@ static ssize_t ahci_alpm_show_slumber(st
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_port_priv *pp = ap->private_data;
+
++ if (!ap || ata_port_is_dummy(ap))
++ return 0;
+ account_alpm_stats(ap);
+
+ return sprintf(buf, "%u\n", jiffies_to_msecs(pp->slumber_jiffies));
+@@ -2323,6 +2327,8 @@ static ssize_t ahci_alpm_show_accounting
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_port_priv *pp = ap->private_data;
+
++ if (!ap || ata_port_is_dummy(ap))
++ return 0;
+ return sprintf(buf, "%u\n", pp->accounting_active);
+ }
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-i2c-workaround-for-aava-koski-touchscreen.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-i2c-workaround-for-aava-koski-touchscreen.patch
new file mode 100644
index 0000000..fd267df
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-i2c-workaround-for-aava-koski-touchscreen.patch
@@ -0,0 +1,41 @@
+From b766d59f044858ed3d149e97619a0bf5cc2873f3 Mon Sep 17 00:00:00 2001
+From: Priya Vijayan <priya.vijayan@intel.com>
+Date: Tue, 27 Apr 2010 13:23:28 -0700
+Subject: [PATCH] workaround for aava KOSKI
+
+Signed-off-by: Priya Vijayan <priya.vijayan@intel.com>
+---
+ drivers/i2c/busses/i2c-mrst.c | 3 +++
+ 1 files changed, 3 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/i2c/busses/i2c-mrst.c b/drivers/i2c/busses/i2c-mrst.c
+index e591a90..0737f42 100644
+--- a/drivers/i2c/busses/i2c-mrst.c
++++ b/drivers/i2c/busses/i2c-mrst.c
+@@ -217,6 +217,7 @@ static int mrst_i2c_disable(struct i2c_adapter *adap)
+ dev_dbg(&adap->dev, "i2c is busy, count is %d\n", count);
+ if (count++ > 10000)
+ return -EBUSY;
++ udelay(10);
+ }
+
+ /* Set IC_ENABLE to 0 */
+@@ -468,6 +469,7 @@ static int xfer_read(struct i2c_adapter *adap, unsigned char *buf, int length)
+ reg_val = mrst_i2c_read(i2c->base + IC_STATUS);
+ if (count++ > MAX_T_POLL_COUNT)
+ goto read_loop;
++ udelay(10);
+ }
+
+ reg_val = mrst_i2c_read(i2c->base + IC_DATA_CMD);
+@@ -522,6 +524,7 @@ static int xfer_write(struct i2c_adapter *adap,
+ while ((reg_val & bit_get) == 0) {
+ if (count++ > MAX_T_POLL_COUNT)
+ goto write_loop;
++ udelay(10);
+ reg_val = mrst_i2c_read(i2c->base + IC_STATUS);
+ }
+
+--
+1.6.2.2
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-rc8-timberdale.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-rc8-timberdale.patch
new file mode 100644
index 0000000..32879a2
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-rc8-timberdale.patch
@@ -0,0 +1,10621 @@
+diff --git a/arch/x86/include/asm/gpio.h b/arch/x86/include/asm/gpio.h
+index 49dbfdf..9da6f01 100644
+--- a/arch/x86/include/asm/gpio.h
++++ b/arch/x86/include/asm/gpio.h
+@@ -43,7 +43,7 @@ static inline int gpio_cansleep(unsigned int gpio)
+ */
+ static inline int gpio_to_irq(unsigned int gpio)
+ {
+- return -ENOSYS;
++ return __gpio_to_irq(gpio);
+ }
+
+ static inline int irq_to_gpio(unsigned int irq)
+diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
+index e02d74b..ab2dcd3 100644
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -126,6 +126,13 @@ config AMCC_PPC440SPE_ADMA
+ help
+ Enable support for the AMCC PPC440SPe RAID engines.
+
++config TIMB_DMA
++ tristate "Timberdale FPGA DMA support"
++ depends on MFD_TIMBERDALE || HAS_IOMEM
++ select DMA_ENGINE
++ help
++ Enable support for the Timberdale FPGA DMA engine.
++
+ config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+ bool
+
+diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
+index 807053d..33a94ec 100644
+--- a/drivers/dma/Makefile
++++ b/drivers/dma/Makefile
+@@ -12,3 +12,4 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
+ obj-$(CONFIG_SH_DMAE) += shdma.o
+ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
+ obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
++obj-$(CONFIG_TIMB_DMA) += timb_dma.o
+diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
+new file mode 100644
+index 0000000..74b37aa
+--- /dev/null
++++ b/drivers/dma/timb_dma.c
+@@ -0,0 +1,875 @@
++/*
++ * timb_dma.c timberdale FPGA DMA driver
++ * Copyright (c) 2010 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA DMA engine
++ */
++
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++
++#include <linux/timb_dma.h>
++
++#define DRIVER_NAME "timb-dma"
++
++/* Global DMA registers */
++#define TIMBDMA_ACR 0x34
++#define TIMBDMA_32BIT_ADDR 0x01
++
++#define TIMBDMA_ISR 0x080000
++#define TIMBDMA_IPR 0x080004
++#define TIMBDMA_IER 0x080008
++
++/* Channel specific registers */
++/* RX instances base addresses are 0x00, 0x40, 0x80 ...
++ * TX instances base addresses are 0x18, 0x58, 0x98 ...
++ */
++#define TIMBDMA_INSTANCE_OFFSET 0x40
++#define TIMBDMA_INSTANCE_TX_OFFSET 0x18
++
++/* RX registers, relative the instance base */
++#define TIMBDMA_OFFS_RX_DHAR 0x00
++#define TIMBDMA_OFFS_RX_DLAR 0x04
++#define TIMBDMA_OFFS_RX_LR 0x0C
++#define TIMBDMA_OFFS_RX_BLR 0x10
++#define TIMBDMA_OFFS_RX_ER 0x14
++#define TIMBDMA_RX_EN 0x01
++/* bytes per Row, video specific register
++ * which is placed after the TX registers...
++ */
++#define TIMBDMA_OFFS_RX_BPRR 0x30
++
++/* TX registers, relative the instance base */
++#define TIMBDMA_OFFS_TX_DHAR 0x00
++#define TIMBDMA_OFFS_TX_DLAR 0x04
++#define TIMBDMA_OFFS_TX_BLR 0x0C
++#define TIMBDMA_OFFS_TX_LR 0x14
++
++
++#define TIMB_DMA_DESC_SIZE 8
++
++struct timb_dma_desc {
++ struct list_head desc_node;
++ struct dma_async_tx_descriptor txd;
++ u8 *desc_list;
++ unsigned int desc_list_len;
++ bool interrupt;
++};
++
++struct timb_dma_chan {
++ struct dma_chan chan;
++ void __iomem *membase;
++ spinlock_t lock; /* Used for mutual exclusion */
++ dma_cookie_t last_completed_cookie;
++ bool ongoing;
++ struct list_head active_list;
++ struct list_head queue;
++ struct list_head free_list;
++ unsigned int bytes_per_line;
++ enum dma_data_direction direction;
++ unsigned int descs; /* Descriptors to allocate */
++ unsigned int desc_elems; /* number of elems per descriptor */
++};
++
++struct timb_dma {
++ struct dma_device dma;
++ void __iomem *membase;
++ struct tasklet_struct tasklet;
++ struct timb_dma_chan channels[0];
++};
++
++static struct device *chan2dev(struct dma_chan *chan)
++{
++ return &chan->dev->device;
++}
++static struct device *chan2dmadev(struct dma_chan *chan)
++{
++ return chan2dev(chan)->parent->parent;
++}
++
++static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan)
++{
++ int id = td_chan->chan.chan_id;
++ return (struct timb_dma *)((u8 *)td_chan -
++ id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
++}
++
++/* Must be called with the spinlock held */
++static void __td_enable_chan_irq(struct timb_dma_chan *td_chan)
++{
++ int id = td_chan->chan.chan_id;
++ struct timb_dma *td = tdchantotd(td_chan);
++ u32 ier;
++
++ /* enable interrupt for this channel */
++ ier = ioread32(td->membase + TIMBDMA_IER);
++ ier |= 1 << id;
++ dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id,
++ ier);
++ iowrite32(ier, td->membase + TIMBDMA_IER);
++}
++
++/* Should be called with the spinlock held */
++static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
++{
++ int id = td_chan->chan.chan_id;
++ struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan -
++ id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
++ u32 isr;
++ bool done = false;
++
++ dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td);
++
++ isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id);
++ if (isr) {
++ iowrite32(isr, td->membase + TIMBDMA_ISR);
++ done = true;
++ }
++
++ return done;
++}
++
++static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
++ bool single)
++{
++ dma_addr_t addr;
++ int len;
++
++ addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) |
++ dma_desc[4];
++
++ len = (dma_desc[3] << 8) | dma_desc[2];
++
++ if (single)
++ dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
++ td_chan->direction);
++ else
++ dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
++ td_chan->direction);
++}
++
++static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
++{
++ struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan,
++ struct timb_dma_chan, chan);
++ u8 *descs;
++
++ for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) {
++ __td_unmap_desc(td_chan, descs, single);
++ if (descs[0] & 0x02)
++ break;
++ }
++}
++
++static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
++ struct scatterlist *sg, bool last)
++{
++ if (sg_dma_len(sg) > USHORT_MAX) {
++ dev_err(chan2dev(&td_chan->chan), "Too big sg element\n");
++ return -EINVAL;
++ }
++
++ /* length must be word aligned */
++ if (sg_dma_len(sg) % sizeof(u32)) {
++ dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n",
++ sg_dma_len(sg));
++ return -EINVAL;
++ }
++
++ dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: %p\n",
++ dma_desc, (void *)(int)sg_dma_address(sg));
++
++ dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff;
++ dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff;
++ dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff;
++ dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff;
++
++ dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff;
++ dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff;
++
++ dma_desc[1] = 0x00;
++ dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */
++
++ return 0;
++}
++
++/* Must be called with the spinlock held */
++static void __td_start_dma(struct timb_dma_chan *td_chan)
++{
++ struct timb_dma_desc *td_desc;
++
++ if (td_chan->ongoing) {
++ dev_err(chan2dev(&td_chan->chan),
++ "Transfer already ongoing\n");
++ return;
++ }
++
++ td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
++ desc_node);
++
++ dev_dbg(chan2dev(&td_chan->chan),
++ "td_chan: %p, chan: %d, membase: %p\n",
++ td_chan, td_chan->chan.chan_id, td_chan->membase);
++
++ dev_dbg(chan2dev(&td_chan->chan), "Desc: 0x%02x 0x%02x 0x%02x 0x%02x "
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ td_desc->desc_list[0], td_desc->desc_list[1],
++ td_desc->desc_list[2], td_desc->desc_list[3],
++ td_desc->desc_list[4], td_desc->desc_list[5],
++ td_desc->desc_list[6], td_desc->desc_list[7]);
++
++ dev_dbg(chan2dev(&td_chan->chan),
++ "Desc: phys: %p\n", (void *)(int)td_desc->txd.phys);
++
++ if (td_chan->direction == DMA_FROM_DEVICE) {
++
++ /* descriptor address */
++ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
++ iowrite32(td_desc->txd.phys, td_chan->membase +
++ TIMBDMA_OFFS_RX_DLAR);
++ /* Bytes per line */
++ iowrite32(td_chan->bytes_per_line, td_chan->membase +
++ TIMBDMA_OFFS_RX_BPRR);
++ /* enable RX */
++ iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER);
++ } else {
++ /* address high */
++ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR);
++ iowrite32(td_desc->txd.phys, td_chan->membase +
++ TIMBDMA_OFFS_TX_DLAR);
++ }
++
++ td_chan->ongoing = true;
++
++ if (td_desc->interrupt)
++ __td_enable_chan_irq(td_chan);
++}
++
++static void __td_finish(struct timb_dma_chan *td_chan)
++{
++ dma_async_tx_callback callback;
++ void *param;
++ struct dma_async_tx_descriptor *txd;
++ struct timb_dma_desc *td_desc;
++
++ /* can happen if the descriptor is canceled */
++ if (list_empty(&td_chan->active_list))
++ return;
++
++ td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
++ desc_node);
++ txd = &td_desc->txd;
++
++ dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n",
++ txd->cookie);
++
++ /* make sure to stop the transfer */
++ if (td_chan->direction == DMA_FROM_DEVICE)
++ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
++/* Currently no support for stopping DMA transfers
++ else
++ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
++*/
++ td_chan->last_completed_cookie = txd->cookie;
++ td_chan->ongoing = false;
++
++ callback = txd->callback;
++ param = txd->callback_param;
++
++ list_move(&td_desc->desc_node, &td_chan->free_list);
++
++ if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
++ __td_unmap_descs(td_desc,
++ txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE);
++
++ /*
++ * The API requires that no submissions are done from a
++ * callback, so we don't need to drop the lock here
++ */
++ if (callback)
++ callback(param);
++}
++
++static u32 __td_ier_mask(struct timb_dma *td)
++{
++ int i;
++ u32 ret = 0;
++
++ for (i = 0; i < td->dma.chancnt; i++) {
++ struct timb_dma_chan *td_chan = td->channels + i;
++ if (td_chan->ongoing) {
++ struct timb_dma_desc *td_desc =
++ list_entry(td_chan->active_list.next,
++ struct timb_dma_desc, desc_node);
++ if (td_desc->interrupt)
++ ret |= 1 << i;
++ }
++ }
++
++ return ret;
++}
++
++static void __td_start_next(struct timb_dma_chan *td_chan)
++{
++ struct timb_dma_desc *td_desc;
++
++ BUG_ON(list_empty(&td_chan->queue));
++ BUG_ON(td_chan->ongoing);
++
++ td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc,
++ desc_node);
++
++ dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n",
++ __func__, td_desc->txd.cookie);
++
++ list_move(&td_desc->desc_node, &td_chan->active_list);
++ __td_start_dma(td_chan);
++}
++
++static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd)
++{
++ struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc,
++ txd);
++ struct timb_dma_chan *td_chan = container_of(txd->chan,
++ struct timb_dma_chan, chan);
++ dma_cookie_t cookie;
++
++ spin_lock_bh(&td_chan->lock);
++
++ cookie = txd->chan->cookie;
++ if (++cookie < 0)
++ cookie = 1;
++ txd->chan->cookie = cookie;
++ txd->cookie = cookie;
++
++ if (list_empty(&td_chan->active_list)) {
++ dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
++ txd->cookie);
++ list_add_tail(&td_desc->desc_node, &td_chan->active_list);
++ __td_start_dma(td_chan);
++ } else {
++ dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n",
++ txd->cookie);
++
++ list_add_tail(&td_desc->desc_node, &td_chan->queue);
++ }
++
++ spin_unlock_bh(&td_chan->lock);
++
++ return cookie;
++}
++
++static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
++{
++ struct dma_chan *chan = &td_chan->chan;
++ struct timb_dma_desc *td_desc;
++ int err;
++
++ dev_dbg(chan2dev(chan), "Allocating descriptor\n");
++
++ td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
++ if (!td_desc) {
++ dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
++ goto err;
++ }
++
++ td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
++
++ dev_dbg(chan2dev(chan), "Allocating descriptor list\n");
++
++ td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
++ if (!td_desc->desc_list) {
++ dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
++ goto err;
++ }
++
++ dev_dbg(chan2dev(chan), "Initialising txd\n");
++
++ dma_async_tx_descriptor_init(&td_desc->txd, chan);
++ td_desc->txd.tx_submit = td_tx_submit;
++ td_desc->txd.flags = DMA_CTRL_ACK;
++
++ dev_dbg(chan2dev(chan), "Mapping up decriptor list\n");
++
++ dev_dbg(chan2dev(chan), "parent: %p, list: %p, len: %d\n",
++ chan2dmadev(chan), td_desc->desc_list, td_desc->desc_list_len);
++ td_desc->txd.phys = dma_map_single(chan2dmadev(chan),
++ td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE);
++ dev_dbg(chan2dev(chan), "Mapping done, phys: %p\n",
++ (void *)(int)td_desc->txd.phys);
++
++ err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys);
++ if (err) {
++ dev_err(chan2dev(chan), "DMA mapping error: %d\n", err);
++ goto err;
++ }
++
++ dev_dbg(chan2dev(chan), "Allocated desc: %p\n", td_desc);
++ return td_desc;
++err:
++ kfree(td_desc->desc_list);
++ kfree(td_desc);
++
++ return NULL;
++
++}
++
++static void td_free_desc(struct timb_dma_desc *td_desc)
++{
++ dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc);
++ dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys,
++ td_desc->desc_list_len, DMA_TO_DEVICE);
++
++ kfree(td_desc->desc_list);
++ kfree(td_desc);
++}
++
++static void td_desc_put(struct timb_dma_chan *td_chan,
++ struct timb_dma_desc *td_desc)
++{
++ dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc);
++
++ spin_lock_bh(&td_chan->lock);
++ list_add(&td_desc->desc_node, &td_chan->free_list);
++ spin_unlock_bh(&td_chan->lock);
++}
++
++static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan)
++{
++ struct timb_dma_desc *td_desc, *_td_desc;
++ struct timb_dma_desc *ret = NULL;
++
++ spin_lock_bh(&td_chan->lock);
++ list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list,
++ desc_node) {
++ if (async_tx_test_ack(&td_desc->txd)) {
++ list_del(&td_desc->desc_node);
++ ret = td_desc;
++ break;
++ }
++ dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n",
++ td_desc);
++ }
++ spin_unlock_bh(&td_chan->lock);
++
++ return ret;
++}
++
++static int td_alloc_chan_resources(struct dma_chan *chan)
++{
++ struct timb_dma_chan *td_chan =
++ container_of(chan, struct timb_dma_chan, chan);
++ int i;
++
++ dev_dbg(chan2dev(chan), "%s: entry\n", __func__);
++
++ BUG_ON(!list_empty(&td_chan->free_list));
++ for (i = 0; i < td_chan->descs; i++) {
++ struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan);
++ if (!td_desc) {
++ if (i)
++ break;
++ else {
++ dev_err(chan2dev(chan),
++ "Couldnt allocate any descriptors\n");
++ return -ENOMEM;
++ }
++ }
++
++ td_desc_put(td_chan, td_desc);
++ }
++
++ spin_lock_bh(&td_chan->lock);
++ td_chan->last_completed_cookie = 1;
++ chan->cookie = 1;
++ spin_unlock_bh(&td_chan->lock);
++
++ return 0;
++}
++
++static void td_free_chan_resources(struct dma_chan *chan)
++{
++ struct timb_dma_chan *td_chan =
++ container_of(chan, struct timb_dma_chan, chan);
++ struct timb_dma_desc *td_desc, *_td_desc;
++ LIST_HEAD(list);
++
++ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
++
++ /* check that all descriptors are free */
++ BUG_ON(!list_empty(&td_chan->active_list));
++ BUG_ON(!list_empty(&td_chan->queue));
++
++ spin_lock_bh(&td_chan->lock);
++ list_splice_init(&td_chan->free_list, &list);
++ spin_unlock_bh(&td_chan->lock);
++
++ list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) {
++ dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__,
++ td_desc);
++ td_free_desc(td_desc);
++ }
++}
++
++static enum dma_status td_is_tx_complete(struct dma_chan *chan,
++ dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
++{
++ struct timb_dma_chan *td_chan =
++ container_of(chan, struct timb_dma_chan, chan);
++ dma_cookie_t last_used;
++ dma_cookie_t last_complete;
++ int ret;
++
++ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
++
++ last_complete = td_chan->last_completed_cookie;
++ last_used = chan->cookie;
++
++ ret = dma_async_is_complete(cookie, last_complete, last_used);
++
++ if (done)
++ *done = last_complete;
++ if (used)
++ *used = last_used;
++
++ dev_dbg(chan2dev(chan),
++ "%s: exit, ret: %d, last_complete: %d, last_used: %d\n",
++ __func__, ret, last_complete, last_used);
++
++ return ret;
++}
++
++static void td_issue_pending(struct dma_chan *chan)
++{
++ struct timb_dma_chan *td_chan =
++ container_of(chan, struct timb_dma_chan, chan);
++
++ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
++ spin_lock_bh(&td_chan->lock);
++
++ if (!list_empty(&td_chan->active_list))
++ /* transfer ongoing */
++ if (__td_dma_done_ack(td_chan))
++ __td_finish(td_chan);
++
++ if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue))
++ __td_start_next(td_chan);
++
++ spin_unlock_bh(&td_chan->lock);
++}
++
++static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
++ struct scatterlist *sgl, unsigned int sg_len,
++ enum dma_data_direction direction, unsigned long flags)
++{
++ struct timb_dma_chan *td_chan =
++ container_of(chan, struct timb_dma_chan, chan);
++ struct timb_dma_desc *td_desc;
++ struct scatterlist *sg;
++ unsigned int i;
++ unsigned int desc_usage = 0;
++
++ if (!sgl || !sg_len) {
++ dev_err(chan2dev(chan), "%s: No SG list\n", __func__);
++ return NULL;
++ }
++
++ /* even channels are for RX, odd for TX */
++ if (td_chan->direction != direction) {
++ dev_err(chan2dev(chan),
++ "Requesting channel in wrong direction\n");
++ return NULL;
++ }
++
++ td_desc = td_desc_get(td_chan);
++ if (!td_desc) {
++ dev_err(chan2dev(chan), "Not enough descriptors available\n");
++ return NULL;
++ }
++
++ td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
++
++ for_each_sg(sgl, sg, sg_len, i) {
++ int err;
++ if (desc_usage > td_desc->desc_list_len) {
++ dev_err(chan2dev(chan), "No descriptor space\n");
++ return NULL;
++ }
++
++ err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg,
++ i == (sg_len - 1));
++ if (err) {
++ dev_err(chan2dev(chan), "Failed to update desc: %d\n",
++ err);
++ td_desc_put(td_chan, td_desc);
++ return NULL;
++ }
++ desc_usage += TIMB_DMA_DESC_SIZE;
++ }
++
++ dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
++ td_desc->desc_list_len, DMA_TO_DEVICE);
++
++ return &td_desc->txd;
++}
++
++static void td_terminate_all(struct dma_chan *chan)
++{
++ struct timb_dma_chan *td_chan =
++ container_of(chan, struct timb_dma_chan, chan);
++ struct timb_dma_desc *td_desc, *_td_desc;
++
++ dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
++
++ /* first the easy part, put the queue into the free list */
++ spin_lock_bh(&td_chan->lock);
++ list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
++ desc_node)
++ list_move(&td_desc->desc_node, &td_chan->free_list);
++
++ /* now tear down the runnning */
++ __td_finish(td_chan);
++ spin_unlock_bh(&td_chan->lock);
++}
++
++static void td_tasklet(unsigned long data)
++{
++ struct timb_dma *td = (struct timb_dma *)data;
++ u32 isr;
++ u32 ipr;
++ u32 ier;
++ int i;
++
++ isr = ioread32(td->membase + TIMBDMA_ISR);
++ ipr = isr & __td_ier_mask(td);
++
++ /* ack the interrupts */
++ iowrite32(ipr, td->membase + TIMBDMA_ISR);
++
++ for (i = 0; i < td->dma.chancnt; i++)
++ if (ipr & (1 << i)) {
++ struct timb_dma_chan *td_chan = td->channels + i;
++ spin_lock(&td_chan->lock);
++ __td_finish(td_chan);
++ if (!list_empty(&td_chan->queue))
++ __td_start_next(td_chan);
++ spin_unlock(&td_chan->lock);
++ }
++
++ ier = __td_ier_mask(td);
++ iowrite32(ier, td->membase + TIMBDMA_IER);
++}
++
++
++static irqreturn_t td_irq(int irq, void *devid)
++{
++ struct timb_dma *td = devid;
++ u32 ipr = ioread32(td->membase + TIMBDMA_IPR);
++
++ if (ipr) {
++ /* disable interrupts, will be re-enabled in tasklet */
++ iowrite32(0, td->membase + TIMBDMA_IER);
++
++ tasklet_schedule(&td->tasklet);
++
++ return IRQ_HANDLED;
++ } else
++ return IRQ_NONE;
++}
++
++
++static int __devinit td_probe(struct platform_device *pdev)
++{
++ struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
++ struct timb_dma *td;
++ struct resource *iomem;
++ int irq;
++ int err;
++ int i;
++
++ if (!pdata) {
++ dev_err(&pdev->dev, "No platform data\n");
++ return -EINVAL;
++ }
++
++ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!iomem)
++ return -EINVAL;
++
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0)
++ return irq;
++
++ if (!request_mem_region(iomem->start, resource_size(iomem),
++ DRIVER_NAME))
++ return -EBUSY;
++
++ td = kzalloc(sizeof(struct timb_dma) +
++ sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL);
++ if (!td) {
++ err = -ENOMEM;
++ goto err_release_region;
++ }
++
++ dev_dbg(&pdev->dev, "Allocated TD: %p\n", td);
++
++ td->membase = ioremap(iomem->start, resource_size(iomem));
++ if (!td->membase) {
++ dev_err(&pdev->dev, "Failed to remap I/O memory\n");
++ err = -ENOMEM;
++ goto err_free_mem;
++ }
++
++ /* 32bit addressing */
++ iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR);
++
++ /* disable and clear any interrupts */
++ iowrite32(0x0, td->membase + TIMBDMA_IER);
++ iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR);
++
++ tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td);
++
++ err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td);
++ if (err) {
++ dev_err(&pdev->dev, "Failed to request IRQ\n");
++ goto err_tasklet_kill;
++ }
++
++ td->dma.device_alloc_chan_resources = td_alloc_chan_resources;
++ td->dma.device_free_chan_resources = td_free_chan_resources;
++ td->dma.device_is_tx_complete = td_is_tx_complete;
++ td->dma.device_issue_pending = td_issue_pending;
++
++ dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
++ td->dma.device_prep_slave_sg = td_prep_slave_sg;
++ td->dma.device_terminate_all = td_terminate_all;
++
++ td->dma.dev = &pdev->dev;
++
++ INIT_LIST_HEAD(&td->dma.channels);
++
++ for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) {
++ struct timb_dma_chan *td_chan = &td->channels[i];
++ struct timb_dma_platform_data_channel *pchan =
++ pdata->channels + i;
++
++ /* even channels are RX, odd are TX */
++ if (((i % 2) && pchan->rx) || (!(i % 2) && !pchan->rx)) {
++ dev_err(&pdev->dev, "Wrong channel configuration\n");
++ err = -EINVAL;
++ goto err_tasklet_kill;
++ }
++
++ td_chan->chan.device = &td->dma;
++ td_chan->chan.cookie = 1;
++ td_chan->chan.chan_id = i;
++ spin_lock_init(&td_chan->lock);
++ INIT_LIST_HEAD(&td_chan->active_list);
++ INIT_LIST_HEAD(&td_chan->queue);
++ INIT_LIST_HEAD(&td_chan->free_list);
++
++ td_chan->descs = pchan->descriptors;
++ td_chan->desc_elems = pchan->descriptor_elements;
++ td_chan->bytes_per_line = pchan->bytes_per_line;
++ td_chan->direction = pchan->rx ? DMA_FROM_DEVICE :
++ DMA_TO_DEVICE;
++
++ td_chan->membase = td->membase +
++ (i / 2) * TIMBDMA_INSTANCE_OFFSET +
++ (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET);
++
++ dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n",
++ i, td_chan->membase);
++
++ list_add_tail(&td_chan->chan.device_node, &td->dma.channels);
++ }
++
++ err = dma_async_device_register(&td->dma);
++ if (err) {
++ dev_err(&pdev->dev, "Failed to register async device\n");
++ goto err_free_irq;
++ }
++
++ platform_set_drvdata(pdev, td);
++
++ dev_dbg(&pdev->dev, "Probe result: %d\n", err);
++ return err;
++
++err_free_irq:
++ free_irq(irq, td);
++err_tasklet_kill:
++ tasklet_kill(&td->tasklet);
++ iounmap(td->membase);
++err_free_mem:
++ kfree(td);
++err_release_region:
++ release_mem_region(iomem->start, resource_size(iomem));
++
++ return err;
++
++}
++
++static int __devexit td_remove(struct platform_device *pdev)
++{
++ struct timb_dma *td = platform_get_drvdata(pdev);
++ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ int irq = platform_get_irq(pdev, 0);
++
++ dma_async_device_unregister(&td->dma);
++ free_irq(irq, td);
++ tasklet_kill(&td->tasklet);
++ iounmap(td->membase);
++ kfree(td);
++ release_mem_region(iomem->start, resource_size(iomem));
++
++ platform_set_drvdata(pdev, NULL);
++
++ dev_dbg(&pdev->dev, "Removed...\n");
++ return 0;
++}
++
++static struct platform_driver td_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = td_probe,
++ .remove = __exit_p(td_remove),
++};
++
++static int __init td_init(void)
++{
++ return platform_driver_register(&td_driver);
++}
++module_init(td_init);
++
++static void __exit td_exit(void)
++{
++ platform_driver_unregister(&td_driver);
++}
++module_exit(td_exit);
++
++MODULE_LICENSE("GPL v2");
++MODULE_DESCRIPTION("Timberdale DMA controller driver");
++MODULE_AUTHOR("Richard Röjfors <richard.rojfors@pelagicore.com>");
++MODULE_ALIAS("platform:"DRIVER_NAME);
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index 1f1d88a..b34485f 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -217,7 +217,8 @@ config GPIO_LANGWELL
+
+ config GPIO_TIMBERDALE
+ bool "Support for timberdale GPIO IP"
+- depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM
++ default n
++ depends on HAS_IOMEM
+ ---help---
+ Add support for the GPIO IP in the timberdale FPGA.
+
+diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c
+index a4d344b..1abc486 100644
+--- a/drivers/gpio/timbgpio.c
++++ b/drivers/gpio/timbgpio.c
+@@ -37,6 +37,8 @@
+ #define TGPIO_ICR 0x14
+ #define TGPIO_FLR 0x18
+ #define TGPIO_LVR 0x1c
++#define TGPIO_VER 0x20
++#define TGPIO_BFLR 0x24
+
+ struct timbgpio {
+ void __iomem *membase;
+@@ -125,17 +127,24 @@ static int timbgpio_irq_type(unsigned irq, unsigned trigger)
+ struct timbgpio *tgpio = get_irq_chip_data(irq);
+ int offset = irq - tgpio->irq_base;
+ unsigned long flags;
+- u32 lvr, flr;
++ u32 lvr, flr, bflr = 0;
++ u32 ver;
+
+ if (offset < 0 || offset > tgpio->gpio.ngpio)
+ return -EINVAL;
+
++ ver = ioread32(tgpio->membase + TGPIO_VER);
++
+ spin_lock_irqsave(&tgpio->lock, flags);
+
++
+ lvr = ioread32(tgpio->membase + TGPIO_LVR);
+ flr = ioread32(tgpio->membase + TGPIO_FLR);
++ if (ver > 2)
++ bflr = ioread32(tgpio->membase + TGPIO_BFLR);
+
+ if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
++ bflr &= ~(1 << offset);
+ flr &= ~(1 << offset);
+ if (trigger & IRQ_TYPE_LEVEL_HIGH)
+ lvr |= 1 << offset;
+@@ -143,21 +152,27 @@ static int timbgpio_irq_type(unsigned irq, unsigned trigger)
+ lvr &= ~(1 << offset);
+ }
+
+- if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+- return -EINVAL;
+- else {
++ if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
++ if (ver < 3)
++ return -EINVAL;
++ else {
++ flr |= 1 << offset;
++ bflr |= 1 << offset;
++ }
++ } else {
++ bflr &= ~(1 << offset);
+ flr |= 1 << offset;
+- /* opposite compared to the datasheet, but it mirrors the
+- * reality
+- */
+ if (trigger & IRQ_TYPE_EDGE_FALLING)
+- lvr |= 1 << offset;
+- else
+ lvr &= ~(1 << offset);
++ else
++ lvr |= 1 << offset;
+ }
+
+ iowrite32(lvr, tgpio->membase + TGPIO_LVR);
+ iowrite32(flr, tgpio->membase + TGPIO_FLR);
++ if (ver > 2)
++ iowrite32(bflr, tgpio->membase + TGPIO_BFLR);
++
+ iowrite32(1 << offset, tgpio->membase + TGPIO_ICR);
+ spin_unlock_irqrestore(&tgpio->lock, flags);
+
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 5f318ce..44ff5c8 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -564,6 +564,16 @@ config I2C_VERSATILE
+ This driver can also be built as a module. If so, the module
+ will be called i2c-versatile.
+
++config I2C_XILINX
++ tristate "Xilinx I2C Controller"
++ depends on EXPERIMENTAL && HAS_IOMEM
++ help
++ If you say yes to this option, support will be included for the
++ Xilinx I2C controller.
++
++ This driver can also be built as a module. If so, the module
++ will be called xilinx_i2c.
++
+ comment "External I2C/SMBus adapter drivers"
+
+ config I2C_PARPORT
+diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
+index 302c551..168f302 100644
+--- a/drivers/i2c/busses/Makefile
++++ b/drivers/i2c/busses/Makefile
+@@ -54,6 +54,7 @@ obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o
+ obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
+ obj-$(CONFIG_I2C_STU300) += i2c-stu300.o
+ obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
++obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
+
+ # External I2C/SMBus adapter drivers
+ obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
+diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
+new file mode 100644
+index 0000000..6946a09
+--- /dev/null
++++ b/drivers/i2c/busses/i2c-xiic.c
+@@ -0,0 +1,824 @@
++/*
++ * i2c-xiic.c
++ * Copyright (c) 2002-2007 Xilinx Inc.
++ * Copyright (c) 2009-2010 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ *
++ * This code was implemented by Mocean Laboratories AB when porting linux
++ * to the automotive development board Russellville. The copyright holder
++ * as seen in the header is Intel corporation.
++ * Mocean Laboratories forked off the GNU/Linux platform work into a
++ * separate company called Pelagicore AB, which commited the code to the
++ * kernel.
++ */
++
++/* Supports:
++ * Xilinx IIC
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/platform_device.h>
++#include <linux/i2c.h>
++#include <linux/interrupt.h>
++#include <linux/wait.h>
++#include <linux/i2c-xiic.h>
++#include <linux/io.h>
++
++#define DRIVER_NAME "xiic-i2c"
++
++enum xilinx_i2c_state {
++ STATE_DONE,
++ STATE_ERROR,
++ STATE_START
++};
++
++/**
++ * struct xiic_i2c - Internal representation of the XIIC I2C bus
++ * @base: Memory base of the HW registers
++ * @wait: Wait queue for callers
++ * @adap: Kernel adapter representation
++ * @tx_msg: Messages from above to be sent
++ * @lock: Mutual exclusion
++ * @tx_pos: Current pos in TX message
++ * @nmsgs: Number of messages in tx_msg
++ * @state: See STATE_
++ * @rx_msg: Current RX message
++ * @rx_pos: Position within current RX message
++ */
++struct xiic_i2c {
++ void __iomem *base;
++ wait_queue_head_t wait;
++ struct i2c_adapter adap;
++ struct i2c_msg *tx_msg;
++ spinlock_t lock;
++ unsigned int tx_pos;
++ unsigned int nmsgs;
++ enum xilinx_i2c_state state;
++ struct i2c_msg *rx_msg;
++ int rx_pos;
++};
++
++
++#define XIIC_MSB_OFFSET 0
++#define XIIC_REG_OFFSET (0x100+XIIC_MSB_OFFSET)
++
++/*
++ * Register offsets in bytes from RegisterBase. Three is added to the
++ * base offset to access LSB (IBM style) of the word
++ */
++#define XIIC_CR_REG_OFFSET (0x00+XIIC_REG_OFFSET) /* Control Register */
++#define XIIC_SR_REG_OFFSET (0x04+XIIC_REG_OFFSET) /* Status Register */
++#define XIIC_DTR_REG_OFFSET (0x08+XIIC_REG_OFFSET) /* Data Tx Register */
++#define XIIC_DRR_REG_OFFSET (0x0C+XIIC_REG_OFFSET) /* Data Rx Register */
++#define XIIC_ADR_REG_OFFSET (0x10+XIIC_REG_OFFSET) /* Address Register */
++#define XIIC_TFO_REG_OFFSET (0x14+XIIC_REG_OFFSET) /* Tx FIFO Occupancy */
++#define XIIC_RFO_REG_OFFSET (0x18+XIIC_REG_OFFSET) /* Rx FIFO Occupancy */
++#define XIIC_TBA_REG_OFFSET (0x1C+XIIC_REG_OFFSET) /* 10 Bit Address reg */
++#define XIIC_RFD_REG_OFFSET (0x20+XIIC_REG_OFFSET) /* Rx FIFO Depth reg */
++#define XIIC_GPO_REG_OFFSET (0x24+XIIC_REG_OFFSET) /* Output Register */
++
++/* Control Register masks */
++#define XIIC_CR_ENABLE_DEVICE_MASK 0x01 /* Device enable = 1 */
++#define XIIC_CR_TX_FIFO_RESET_MASK 0x02 /* Transmit FIFO reset=1 */
++#define XIIC_CR_MSMS_MASK 0x04 /* Master starts Txing=1 */
++#define XIIC_CR_DIR_IS_TX_MASK 0x08 /* Dir of tx. Txing=1 */
++#define XIIC_CR_NO_ACK_MASK 0x10 /* Tx Ack. NO ack = 1 */
++#define XIIC_CR_REPEATED_START_MASK 0x20 /* Repeated start = 1 */
++#define XIIC_CR_GENERAL_CALL_MASK 0x40 /* Gen Call enabled = 1 */
++
++/* Status Register masks */
++#define XIIC_SR_GEN_CALL_MASK 0x01 /* 1=a mstr issued a GC */
++#define XIIC_SR_ADDR_AS_SLAVE_MASK 0x02 /* 1=when addr as slave */
++#define XIIC_SR_BUS_BUSY_MASK 0x04 /* 1 = bus is busy */
++#define XIIC_SR_MSTR_RDING_SLAVE_MASK 0x08 /* 1=Dir: mstr <-- slave */
++#define XIIC_SR_TX_FIFO_FULL_MASK 0x10 /* 1 = Tx FIFO full */
++#define XIIC_SR_RX_FIFO_FULL_MASK 0x20 /* 1 = Rx FIFO full */
++#define XIIC_SR_RX_FIFO_EMPTY_MASK 0x40 /* 1 = Rx FIFO empty */
++#define XIIC_SR_TX_FIFO_EMPTY_MASK 0x80 /* 1 = Tx FIFO empty */
++
++/* Interrupt Status Register masks Interrupt occurs when... */
++#define XIIC_INTR_ARB_LOST_MASK 0x01 /* 1 = arbitration lost */
++#define XIIC_INTR_TX_ERROR_MASK 0x02 /* 1=Tx error/msg complete */
++#define XIIC_INTR_TX_EMPTY_MASK 0x04 /* 1 = Tx FIFO/reg empty */
++#define XIIC_INTR_RX_FULL_MASK 0x08 /* 1=Rx FIFO/reg=OCY level */
++#define XIIC_INTR_BNB_MASK 0x10 /* 1 = Bus not busy */
++#define XIIC_INTR_AAS_MASK 0x20 /* 1 = when addr as slave */
++#define XIIC_INTR_NAAS_MASK 0x40 /* 1 = not addr as slave */
++#define XIIC_INTR_TX_HALF_MASK 0x80 /* 1 = TX FIFO half empty */
++
++/* The following constants specify the depth of the FIFOs */
++#define IIC_RX_FIFO_DEPTH 16 /* Rx fifo capacity */
++#define IIC_TX_FIFO_DEPTH 16 /* Tx fifo capacity */
++
++/* The following constants specify groups of interrupts that are typically
++ * enabled or disables at the same time
++ */
++#define XIIC_TX_INTERRUPTS \
++(XIIC_INTR_TX_ERROR_MASK | XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)
++
++#define XIIC_TX_RX_INTERRUPTS (XIIC_INTR_RX_FULL_MASK | XIIC_TX_INTERRUPTS)
++
++/* The following constants are used with the following macros to specify the
++ * operation, a read or write operation.
++ */
++#define XIIC_READ_OPERATION 1
++#define XIIC_WRITE_OPERATION 0
++
++/*
++ * Tx Fifo upper bit masks.
++ */
++#define XIIC_TX_DYN_START_MASK 0x0100 /* 1 = Set dynamic start */
++#define XIIC_TX_DYN_STOP_MASK 0x0200 /* 1 = Set dynamic stop */
++
++/*
++ * The following constants define the register offsets for the Interrupt
++ * registers. There are some holes in the memory map for reserved addresses
++ * to allow other registers to be added and still match the memory map of the
++ * interrupt controller registers
++ */
++#define XIIC_DGIER_OFFSET 0x1C /* Device Global Interrupt Enable Register */
++#define XIIC_IISR_OFFSET 0x20 /* Interrupt Status Register */
++#define XIIC_IIER_OFFSET 0x28 /* Interrupt Enable Register */
++#define XIIC_RESETR_OFFSET 0x40 /* Reset Register */
++
++#define XIIC_RESET_MASK 0xAUL
++
++/*
++ * The following constant is used for the device global interrupt enable
++ * register, to enable all interrupts for the device, this is the only bit
++ * in the register
++ */
++#define XIIC_GINTR_ENABLE_MASK 0x80000000UL
++
++#define xiic_tx_space(i2c) ((i2c)->tx_msg->len - (i2c)->tx_pos)
++#define xiic_rx_space(i2c) ((i2c)->rx_msg->len - (i2c)->rx_pos)
++
++static void xiic_start_xfer(struct xiic_i2c *i2c);
++static void __xiic_start_xfer(struct xiic_i2c *i2c);
++
++static inline void xiic_setreg8(struct xiic_i2c *i2c, int reg, u8 value)
++{
++ iowrite8(value, i2c->base + reg);
++}
++
++static inline u8 xiic_getreg8(struct xiic_i2c *i2c, int reg)
++{
++ return ioread8(i2c->base + reg);
++}
++
++static inline void xiic_setreg16(struct xiic_i2c *i2c, int reg, u16 value)
++{
++ iowrite16(value, i2c->base + reg);
++}
++
++static inline void xiic_setreg32(struct xiic_i2c *i2c, int reg, int value)
++{
++ iowrite32(value, i2c->base + reg);
++}
++
++static inline int xiic_getreg32(struct xiic_i2c *i2c, int reg)
++{
++ return ioread32(i2c->base + reg);
++}
++
++static inline void xiic_irq_dis(struct xiic_i2c *i2c, u32 mask)
++{
++ u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
++ xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier & ~mask);
++}
++
++static inline void xiic_irq_en(struct xiic_i2c *i2c, u32 mask)
++{
++ u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
++ xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier | mask);
++}
++
++static inline void xiic_irq_clr(struct xiic_i2c *i2c, u32 mask)
++{
++ u32 isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
++ xiic_setreg32(i2c, XIIC_IISR_OFFSET, isr & mask);
++}
++
++static inline void xiic_irq_clr_en(struct xiic_i2c *i2c, u32 mask)
++{
++ xiic_irq_clr(i2c, mask);
++ xiic_irq_en(i2c, mask);
++}
++
++static void xiic_clear_rx_fifo(struct xiic_i2c *i2c)
++{
++ u8 sr;
++ for (sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET);
++ !(sr & XIIC_SR_RX_FIFO_EMPTY_MASK);
++ sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET))
++ xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
++}
++
++static void xiic_reinit(struct xiic_i2c *i2c)
++{
++ xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK);
++
++ /* Set receive Fifo depth to maximum (zero based). */
++ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, IIC_RX_FIFO_DEPTH - 1);
++
++ /* Reset Tx Fifo. */
++ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_TX_FIFO_RESET_MASK);
++
++ /* Enable IIC Device, remove Tx Fifo reset & disable general call. */
++ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_ENABLE_DEVICE_MASK);
++
++ /* make sure RX fifo is empty */
++ xiic_clear_rx_fifo(i2c);
++
++ /* Enable interrupts */
++ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
++
++ xiic_irq_clr_en(i2c, XIIC_INTR_AAS_MASK | XIIC_INTR_ARB_LOST_MASK);
++}
++
++static void xiic_deinit(struct xiic_i2c *i2c)
++{
++ u8 cr;
++
++ xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK);
++
++ /* Disable IIC Device. */
++ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
++ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr & ~XIIC_CR_ENABLE_DEVICE_MASK);
++}
++
++static void xiic_read_rx(struct xiic_i2c *i2c)
++{
++ u8 bytes_in_fifo;
++ int i;
++
++ bytes_in_fifo = xiic_getreg8(i2c, XIIC_RFO_REG_OFFSET) + 1;
++
++ dev_dbg(i2c->adap.dev.parent, "%s entry, bytes in fifo: %d, msg: %d"
++ ", SR: 0x%x, CR: 0x%x\n",
++ __func__, bytes_in_fifo, xiic_rx_space(i2c),
++ xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
++ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
++
++ if (bytes_in_fifo > xiic_rx_space(i2c))
++ bytes_in_fifo = xiic_rx_space(i2c);
++
++ for (i = 0; i < bytes_in_fifo; i++)
++ i2c->rx_msg->buf[i2c->rx_pos++] =
++ xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
++
++ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET,
++ (xiic_rx_space(i2c) > IIC_RX_FIFO_DEPTH) ?
++ IIC_RX_FIFO_DEPTH - 1 : xiic_rx_space(i2c) - 1);
++}
++
++static int xiic_tx_fifo_space(struct xiic_i2c *i2c)
++{
++ /* return the actual space left in the FIFO */
++ return IIC_TX_FIFO_DEPTH - xiic_getreg8(i2c, XIIC_TFO_REG_OFFSET) - 1;
++}
++
++static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
++{
++ u8 fifo_space = xiic_tx_fifo_space(i2c);
++ int len = xiic_tx_space(i2c);
++
++ len = (len > fifo_space) ? fifo_space : len;
++
++ dev_dbg(i2c->adap.dev.parent, "%s entry, len: %d, fifo space: %d\n",
++ __func__, len, fifo_space);
++
++ while (len--) {
++ u16 data = i2c->tx_msg->buf[i2c->tx_pos++];
++ if ((xiic_tx_space(i2c) == 0) && (i2c->nmsgs == 1)) {
++ /* last message in transfer -> STOP */
++ data |= XIIC_TX_DYN_STOP_MASK;
++ dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__);
++
++ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
++ } else
++ xiic_setreg8(i2c, XIIC_DTR_REG_OFFSET, data);
++ }
++}
++
++static void xiic_wakeup(struct xiic_i2c *i2c, int code)
++{
++ i2c->tx_msg = NULL;
++ i2c->rx_msg = NULL;
++ i2c->nmsgs = 0;
++ i2c->state = code;
++ wake_up(&i2c->wait);
++}
++
++static void xiic_process(struct xiic_i2c *i2c)
++{
++ u32 pend, isr, ier;
++ u32 clr = 0;
++
++ /* Get the interrupt Status from the IPIF. There is no clearing of
++ * interrupts in the IPIF. Interrupts must be cleared at the source.
++ * To find which interrupts are pending; AND interrupts pending with
++ * interrupts masked.
++ */
++ isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
++ ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
++ pend = isr & ier;
++
++ dev_dbg(i2c->adap.dev.parent, "%s entry, IER: 0x%x, ISR: 0x%x, "
++ "pend: 0x%x, SR: 0x%x, msg: %p, nmsgs: %d\n",
++ __func__, ier, isr, pend, xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
++ i2c->tx_msg, i2c->nmsgs);
++
++ /* Do not processes a devices interrupts if the device has no
++ * interrupts pending
++ */
++ if (!pend)
++ return;
++
++ /* Service requesting interrupt */
++ if ((pend & XIIC_INTR_ARB_LOST_MASK) ||
++ ((pend & XIIC_INTR_TX_ERROR_MASK) &&
++ !(pend & XIIC_INTR_RX_FULL_MASK))) {
++ /* bus arbritration lost, or...
++ * Transmit error _OR_ RX completed
++ * if this happens when RX_FULL is not set
++ * this is probably a TX error
++ */
++
++ dev_dbg(i2c->adap.dev.parent, "%s error\n", __func__);
++
++ /* dynamic mode seem to suffer from problems if we just flushes
++ * fifos and the next message is a TX with len 0 (only addr)
++ * reset the IP instead of just flush fifos
++ */
++ xiic_reinit(i2c);
++
++ if (i2c->tx_msg)
++ xiic_wakeup(i2c, STATE_ERROR);
++
++ } else if (pend & XIIC_INTR_RX_FULL_MASK) {
++ /* Receive register/FIFO is full */
++
++ clr = XIIC_INTR_RX_FULL_MASK;
++ if (!i2c->rx_msg) {
++ dev_dbg(i2c->adap.dev.parent,
++ "%s unexpexted RX IRQ\n", __func__);
++ xiic_clear_rx_fifo(i2c);
++ goto out;
++ }
++
++ xiic_read_rx(i2c);
++ if (xiic_rx_space(i2c) == 0) {
++ /* this is the last part of the message */
++ i2c->rx_msg = NULL;
++
++ /* also clear TX error if there (RX complete) */
++ clr |= (isr & XIIC_INTR_TX_ERROR_MASK);
++
++ dev_dbg(i2c->adap.dev.parent,
++ "%s end of message, nmsgs: %d\n",
++ __func__, i2c->nmsgs);
++
++ /* send next message if this wasn't the last,
++ * otherwise the transfer will be finialise when
++ * receiving the bus not busy interrupt
++ */
++ if (i2c->nmsgs > 1) {
++ i2c->nmsgs--;
++ i2c->tx_msg++;
++ dev_dbg(i2c->adap.dev.parent,
++ "%s will start next...\n", __func__);
++
++ __xiic_start_xfer(i2c);
++ }
++ }
++ } else if (pend & XIIC_INTR_BNB_MASK) {
++ /* IIC bus has transitioned to not busy */
++ clr = XIIC_INTR_BNB_MASK;
++
++ /* The bus is not busy, disable BusNotBusy interrupt */
++ xiic_irq_dis(i2c, XIIC_INTR_BNB_MASK);
++
++ if (!i2c->tx_msg)
++ goto out;
++
++ if ((i2c->nmsgs == 1) && !i2c->rx_msg &&
++ xiic_tx_space(i2c) == 0)
++ xiic_wakeup(i2c, STATE_DONE);
++ else
++ xiic_wakeup(i2c, STATE_ERROR);
++
++ } else if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
++ /* Transmit register/FIFO is empty or ½ empty */
++
++ clr = pend &
++ (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK);
++
++ if (!i2c->tx_msg) {
++ dev_dbg(i2c->adap.dev.parent,
++ "%s unexpexted TX IRQ\n", __func__);
++ goto out;
++ }
++
++ xiic_fill_tx_fifo(i2c);
++
++ /* current message sent and there is space in the fifo */
++ if (!xiic_tx_space(i2c) && xiic_tx_fifo_space(i2c) >= 2) {
++ dev_dbg(i2c->adap.dev.parent,
++ "%s end of message sent, nmsgs: %d\n",
++ __func__, i2c->nmsgs);
++ if (i2c->nmsgs > 1) {
++ i2c->nmsgs--;
++ i2c->tx_msg++;
++ __xiic_start_xfer(i2c);
++ } else {
++ xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
++
++ dev_dbg(i2c->adap.dev.parent,
++ "%s Got TX IRQ but no more to do...\n",
++ __func__);
++ }
++ } else if (!xiic_tx_space(i2c) && (i2c->nmsgs == 1))
++ /* current frame is sent and is last,
++ * make sure to disable tx half
++ */
++ xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
++ } else {
++ /* got IRQ which is not acked */
++ dev_err(i2c->adap.dev.parent, "%s Got unexpected IRQ\n",
++ __func__);
++ clr = pend;
++ }
++out:
++ dev_dbg(i2c->adap.dev.parent, "%s clr: 0x%x\n", __func__, clr);
++
++ xiic_setreg32(i2c, XIIC_IISR_OFFSET, clr);
++}
++
++static int xiic_bus_busy(struct xiic_i2c *i2c)
++{
++ u8 sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET);
++
++ return (sr & XIIC_SR_BUS_BUSY_MASK) ? -EBUSY : 0;
++}
++
++static int xiic_busy(struct xiic_i2c *i2c)
++{
++ int tries = 3;
++ int err;
++
++ if (i2c->tx_msg)
++ return -EBUSY;
++
++ /* for instance if previous transfer was terminated due to TX error
++ * it might be that the bus is on it's way to become available
++ * give it at most 3 ms to wake
++ */
++ err = xiic_bus_busy(i2c);
++ while (err && tries--) {
++ mdelay(1);
++ err = xiic_bus_busy(i2c);
++ }
++
++ return err;
++}
++
++static void xiic_start_recv(struct xiic_i2c *i2c)
++{
++ u8 rx_watermark;
++ struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
++
++ /* Clear and enable Rx full interrupt. */
++ xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
++
++ /* we want to get all but last byte, because the TX_ERROR IRQ is used
++ * to inidicate error ACK on the address, and negative ack on the last
++ * received byte, so to not mix them receive all but last.
++ * In the case where there is only one byte to receive
++ * we can check if ERROR and RX full is set at the same time
++ */
++ rx_watermark = msg->len;
++ if (rx_watermark > IIC_RX_FIFO_DEPTH)
++ rx_watermark = IIC_RX_FIFO_DEPTH;
++ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
++
++ if (!(msg->flags & I2C_M_NOSTART))
++ /* write the address */
++ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
++ (msg->addr << 1) | XIIC_READ_OPERATION |
++ XIIC_TX_DYN_START_MASK);
++
++ xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
++
++ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
++ msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
++ if (i2c->nmsgs == 1)
++ /* very last, enable bus not busy as well */
++ xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
++
++ /* the message is tx:ed */
++ i2c->tx_pos = msg->len;
++}
++
++static void xiic_start_send(struct xiic_i2c *i2c)
++{
++ struct i2c_msg *msg = i2c->tx_msg;
++
++ xiic_irq_clr(i2c, XIIC_INTR_TX_ERROR_MASK);
++
++ dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, len: %d, "
++ "ISR: 0x%x, CR: 0x%x\n",
++ __func__, msg, msg->len, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
++ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
++
++ if (!(msg->flags & I2C_M_NOSTART)) {
++ /* write the address */
++ u16 data = ((msg->addr << 1) & 0xfe) | XIIC_WRITE_OPERATION |
++ XIIC_TX_DYN_START_MASK;
++ if ((i2c->nmsgs == 1) && msg->len == 0)
++ /* no data and last message -> add STOP */
++ data |= XIIC_TX_DYN_STOP_MASK;
++
++ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
++ }
++
++ xiic_fill_tx_fifo(i2c);
++
++ /* Clear any pending Tx empty, Tx Error and then enable them. */
++ xiic_irq_clr_en(i2c, XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_ERROR_MASK |
++ XIIC_INTR_BNB_MASK);
++}
++
++static irqreturn_t xiic_isr(int irq, void *dev_id)
++{
++ struct xiic_i2c *i2c = dev_id;
++
++ spin_lock(&i2c->lock);
++ /* disable interrupts globally */
++ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, 0);
++
++ dev_dbg(i2c->adap.dev.parent, "%s entry\n", __func__);
++
++ xiic_process(i2c);
++
++ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
++ spin_unlock(&i2c->lock);
++
++ return IRQ_HANDLED;
++}
++
++static void __xiic_start_xfer(struct xiic_i2c *i2c)
++{
++ int first = 1;
++ int fifo_space = xiic_tx_fifo_space(i2c);
++ dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, fifos space: %d\n",
++ __func__, i2c->tx_msg, fifo_space);
++
++ if (!i2c->tx_msg)
++ return;
++
++ i2c->rx_pos = 0;
++ i2c->tx_pos = 0;
++ i2c->state = STATE_START;
++ while ((fifo_space >= 2) && (first || (i2c->nmsgs > 1))) {
++ if (!first) {
++ i2c->nmsgs--;
++ i2c->tx_msg++;
++ i2c->tx_pos = 0;
++ } else
++ first = 0;
++
++ if (i2c->tx_msg->flags & I2C_M_RD) {
++ /* we dont date putting several reads in the FIFO */
++ xiic_start_recv(i2c);
++ return;
++ } else {
++ xiic_start_send(i2c);
++ if (xiic_tx_space(i2c) != 0) {
++ /* the message could not be completely sent */
++ break;
++ }
++ }
++
++ fifo_space = xiic_tx_fifo_space(i2c);
++ }
++
++ /* there are more messages or the current one could not be completely
++ * put into the FIFO, also enable the half empty interrupt
++ */
++ if (i2c->nmsgs > 1 || xiic_tx_space(i2c))
++ xiic_irq_clr_en(i2c, XIIC_INTR_TX_HALF_MASK);
++
++}
++
++static void xiic_start_xfer(struct xiic_i2c *i2c)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&i2c->lock, flags);
++ xiic_reinit(i2c);
++ /* disable interrupts globally */
++ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, 0);
++ spin_unlock_irqrestore(&i2c->lock, flags);
++
++ __xiic_start_xfer(i2c);
++ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
++}
++
++static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
++{
++ struct xiic_i2c *i2c = i2c_get_adapdata(adap);
++ int err;
++
++ dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__,
++ xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
++
++ err = xiic_busy(i2c);
++ if (err)
++ return err;
++
++ i2c->tx_msg = msgs;
++ i2c->nmsgs = num;
++
++ xiic_start_xfer(i2c);
++
++ if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) ||
++ (i2c->state == STATE_DONE), HZ))
++ return (i2c->state == STATE_DONE) ? num : -EIO;
++ else {
++ i2c->tx_msg = NULL;
++ i2c->rx_msg = NULL;
++ i2c->nmsgs = 0;
++ return -ETIMEDOUT;
++ }
++}
++
++static u32 xiic_func(struct i2c_adapter *adap)
++{
++ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
++}
++
++static const struct i2c_algorithm xiic_algorithm = {
++ .master_xfer = xiic_xfer,
++ .functionality = xiic_func,
++};
++
++static struct i2c_adapter xiic_adapter = {
++ .owner = THIS_MODULE,
++ .name = DRIVER_NAME,
++ .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
++ .algo = &xiic_algorithm,
++};
++
++
++static int __devinit xiic_i2c_probe(struct platform_device *pdev)
++{
++ struct xiic_i2c *i2c;
++ struct xiic_i2c_platform_data *pdata;
++ struct resource *res;
++ int ret, irq;
++ u8 i;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res)
++ goto resource_missing;
++
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0)
++ goto resource_missing;
++
++ pdata = (struct xiic_i2c_platform_data *) pdev->dev.platform_data;
++ if (!pdata)
++ return -EINVAL;
++
++ i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
++ if (!i2c)
++ return -ENOMEM;
++
++ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
++ dev_err(&pdev->dev, "Memory region busy\n");
++ ret = -EBUSY;
++ goto request_mem_failed;
++ }
++
++ i2c->base = ioremap(res->start, resource_size(res));
++ if (!i2c->base) {
++ dev_err(&pdev->dev, "Unable to map registers\n");
++ ret = -EIO;
++ goto map_failed;
++ }
++
++ /* hook up driver to tree */
++ platform_set_drvdata(pdev, i2c);
++ i2c->adap = xiic_adapter;
++ i2c_set_adapdata(&i2c->adap, i2c);
++ i2c->adap.dev.parent = &pdev->dev;
++
++ xiic_reinit(i2c);
++
++ spin_lock_init(&i2c->lock);
++ init_waitqueue_head(&i2c->wait);
++ ret = request_irq(irq, xiic_isr, 0, pdev->name, i2c);
++ if (ret) {
++ dev_err(&pdev->dev, "Cannot claim IRQ\n");
++ goto request_irq_failed;
++ }
++
++ /* add i2c adapter to i2c tree */
++ ret = i2c_add_adapter(&i2c->adap);
++ if (ret) {
++ dev_err(&pdev->dev, "Failed to add adapter\n");
++ goto add_adapter_failed;
++ }
++
++ /* add in known devices to the bus */
++ for (i = 0; i < pdata->num_devices; i++)
++ i2c_new_device(&i2c->adap, pdata->devices + i);
++
++ return 0;
++
++add_adapter_failed:
++ free_irq(irq, i2c);
++request_irq_failed:
++ xiic_deinit(i2c);
++ iounmap(i2c->base);
++map_failed:
++ release_mem_region(res->start, resource_size(res));
++request_mem_failed:
++ kfree(i2c);
++
++ return ret;
++resource_missing:
++ dev_err(&pdev->dev, "IRQ or Memory resource is missing\n");
++ return -ENOENT;
++}
++
++static int __devexit xiic_i2c_remove(struct platform_device* pdev)
++{
++ struct xiic_i2c *i2c = platform_get_drvdata(pdev);
++ struct resource *res;
++
++ /* remove adapter & data */
++ i2c_del_adapter(&i2c->adap);
++
++ xiic_deinit(i2c);
++
++ platform_set_drvdata(pdev, NULL);
++
++ free_irq(platform_get_irq(pdev, 0), i2c);
++
++ iounmap(i2c->base);
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (res)
++ release_mem_region(res->start, resource_size(res));
++
++ kfree(i2c);
++
++ return 0;
++}
++
++
++/* work with hotplug and coldplug */
++MODULE_ALIAS("platform:"DRIVER_NAME);
++
++static struct platform_driver xiic_i2c_driver = {
++ .probe = xiic_i2c_probe,
++ .remove = __devexit_p(xiic_i2c_remove),
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = DRIVER_NAME,
++ },
++};
++
++static int __init xiic_i2c_init(void)
++{
++ return platform_driver_register(&xiic_i2c_driver);
++}
++
++static void __exit xiic_i2c_exit(void)
++{
++ platform_driver_unregister(&xiic_i2c_driver);
++}
++
++module_init(xiic_i2c_init);
++module_exit(xiic_i2c_exit);
++
++MODULE_AUTHOR("info@mocean-labs.com");
++MODULE_DESCRIPTION("Xilinx I2C bus driver");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
+index 7ef0d14..62a1696 100644
+--- a/drivers/input/touchscreen/tsc2007.c
++++ b/drivers/input/touchscreen/tsc2007.c
+@@ -29,6 +29,7 @@
+
+ #define TS_POLL_DELAY 1 /* ms delay between samples */
+ #define TS_POLL_PERIOD 1 /* ms delay between samples */
++#define TS_SLOW_POLL_PERIOD 20 /* ms delay between pen down check */
+
+ #define TSC2007_MEASURE_TEMP0 (0x0 << 4)
+ #define TSC2007_MEASURE_AUX (0x2 << 4)
+@@ -77,6 +78,7 @@ struct tsc2007 {
+ u16 x_plate_ohms;
+
+ bool pendown;
++ bool ignore_next_irq;
+ int irq;
+
+ int (*get_pendown_state)(void);
+@@ -228,14 +230,39 @@ static void tsc2007_work(struct work_struct *work)
+ if (ts->pendown)
+ schedule_delayed_work(&ts->work,
+ msecs_to_jiffies(TS_POLL_PERIOD));
+- else
+- enable_irq(ts->irq);
++ else {
++ /* if we don't have the get pen down state callback we
++ * ignore the next IRQ because it is provoked when we checked
++ * the touch pressure.
++ * If the user really touches the screen we will get a new
++ * interrupt anyway so it is safe to ignore it
++ *
++ * This is basically implementing this part of the manual:
++ * "In both cases previously listed, it is recommended that
++ * whenever the host writes to the TSC2007, the master
++ * processor masks the interrupt associated to PENIRQ.
++ * This masking prevents false triggering of interrupts when
++ * the PENIRQ line is disabled in the cases previously listed."
++ */
++ if (!ts->get_pendown_state)
++ ts->ignore_next_irq = true;
++ if (ts->irq)
++ enable_irq(ts->irq);
++ else
++ schedule_delayed_work(&ts->work,
++ msecs_to_jiffies(TS_SLOW_POLL_PERIOD));
++ }
+ }
+
+ static irqreturn_t tsc2007_irq(int irq, void *handle)
+ {
+ struct tsc2007 *ts = handle;
+
++ if (ts->ignore_next_irq) {
++ ts->ignore_next_irq = false;
++ return IRQ_HANDLED;
++ }
++
+ if (!ts->get_pendown_state || likely(ts->get_pendown_state())) {
+ disable_irq_nosync(ts->irq);
+ schedule_delayed_work(&ts->work,
+@@ -250,15 +277,18 @@ static irqreturn_t tsc2007_irq(int irq, void *handle)
+
+ static void tsc2007_free_irq(struct tsc2007 *ts)
+ {
+- free_irq(ts->irq, ts);
+- if (cancel_delayed_work_sync(&ts->work)) {
+- /*
+- * Work was pending, therefore we need to enable
+- * IRQ here to balance the disable_irq() done in the
+- * interrupt handler.
+- */
+- enable_irq(ts->irq);
+- }
++ if (ts->irq) {
++ free_irq(ts->irq, ts);
++ if (cancel_delayed_work_sync(&ts->work)) {
++ /*
++ * Work was pending, therefore we need to enable
++ * IRQ here to balance the disable_irq() done in the
++ * interrupt handler.
++ */
++ enable_irq(ts->irq);
++ }
++ } else
++ cancel_delayed_work_sync(&ts->work);
+ }
+
+ static int __devinit tsc2007_probe(struct i2c_client *client,
+@@ -312,12 +342,16 @@ static int __devinit tsc2007_probe(struct i2c_client *client,
+ if (pdata->init_platform_hw)
+ pdata->init_platform_hw();
+
+- err = request_irq(ts->irq, tsc2007_irq, 0,
+- client->dev.driver->name, ts);
+- if (err < 0) {
+- dev_err(&client->dev, "irq %d busy?\n", ts->irq);
+- goto err_free_mem;
+- }
++
++ if (ts->irq) {
++ err = request_irq(ts->irq, tsc2007_irq, 0,
++ client->dev.driver->name, ts);
++ if (err < 0) {
++ dev_err(&client->dev, "irq %d busy?\n", ts->irq);
++ goto err_free_mem;
++ }
++ } else
++ schedule_delayed_work(&ts->work, TS_SLOW_POLL_PERIOD);
+
+ /* Prepare for touch readings - power down ADC and enable PENIRQ */
+ err = tsc2007_xfer(ts, PWRDOWN);
+diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
+index 3f40f37..c542897 100644
+--- a/drivers/media/radio/Kconfig
++++ b/drivers/media/radio/Kconfig
+@@ -417,6 +417,18 @@ config RADIO_TEA5764_XTAL
+ Say Y here if TEA5764 have a 32768 Hz crystal in circuit, say N
+ here if TEA5764 reference frequency is connected in FREQIN.
+
++config RADIO_SAA7706H
++ tristate "SAA7706H Car Radio DSP"
++ depends on I2C && VIDEO_V4L2
++ ---help---
++ Say Y here if you want to use the SAA7706H Car radio Digital
++ Signal Processor, found for instance on the Russellville development
++ board. On the russellville the device is connected to internal
++ timberdale I2C bus.
++
++ To compile this driver as a module, choose M here: the
++ module will be called SAA7706H.
++
+ config RADIO_TEF6862
+ tristate "TEF6862 Car Radio Enhanced Selectivity Tuner"
+ depends on I2C && VIDEO_V4L2
+@@ -429,4 +441,14 @@ config RADIO_TEF6862
+ To compile this driver as a module, choose M here: the
+ module will be called TEF6862.
+
++config RADIO_TIMBERDALE
++ tristate "Enable the Timberdale radio driver"
++ depends on MFD_TIMBERDALE && VIDEO_V4L2 && HAS_IOMEM && I2C
++ select RADIO_TEF6862
++ select RADIO_SAA7706H
++ ---help---
++ This is a kind of umbrella driver for the Radio Tuner and DSP
++ found behind the Timberdale FPGA on the Russellville board.
++ Enabling this driver will automatically select the DSP and tuner.
++
+ endif # RADIO_ADAPTERS
+diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
+index 01922ad..f615583 100644
+--- a/drivers/media/radio/Makefile
++++ b/drivers/media/radio/Makefile
+@@ -23,6 +23,8 @@ obj-$(CONFIG_USB_DSBR) += dsbr100.o
+ obj-$(CONFIG_RADIO_SI470X) += si470x/
+ obj-$(CONFIG_USB_MR800) += radio-mr800.o
+ obj-$(CONFIG_RADIO_TEA5764) += radio-tea5764.o
++obj-$(CONFIG_RADIO_SAA7706H) += saa7706h.o
+ obj-$(CONFIG_RADIO_TEF6862) += tef6862.o
++obj-$(CONFIG_RADIO_TIMBERDALE) += radio-timb.o
+
+ EXTRA_CFLAGS += -Isound
+diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c
+new file mode 100644
+index 0000000..ee8618a
+--- /dev/null
++++ b/drivers/media/radio/radio-timb.c
+@@ -0,0 +1,464 @@
++/*
++ * radio-timb.c Timberdale FPGA Radio driver
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/version.h>
++#include <linux/io.h>
++#include <media/v4l2-ioctl.h>
++#include <media/v4l2-device.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/i2c.h>
++#include <media/timb_radio.h>
++
++#define DRIVER_NAME "timb-radio"
++
++#define RDS_BLOCK_SIZE 4
++#define RDS_BUFFER_SIZE (RDS_BLOCK_SIZE * 100)
++
++struct timbradio {
++ struct mutex lock; /* for mutual exclusion */
++ void __iomem *membase;
++ struct timb_radio_platform_data pdata;
++ struct v4l2_subdev *sd_tuner;
++ struct v4l2_subdev *sd_dsp;
++ struct video_device *video_dev;
++ struct v4l2_device v4l2_dev;
++ /* RDS related */
++ int open_count;
++ int rds_irq;
++ wait_queue_head_t read_queue;
++ unsigned char buffer[RDS_BUFFER_SIZE];
++ unsigned int rd_index;
++ unsigned int wr_index;
++};
++
++
++static int timbradio_vidioc_querycap(struct file *file, void *priv,
++ struct v4l2_capability *v)
++{
++ strlcpy(v->driver, DRIVER_NAME, sizeof(v->driver));
++ strlcpy(v->card, "Timberdale Radio", sizeof(v->card));
++ snprintf(v->bus_info, sizeof(v->bus_info), "platform:"DRIVER_NAME);
++ v->version = KERNEL_VERSION(0, 0, 1);
++ v->capabilities =
++ V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE;
++ return 0;
++}
++
++static int timbradio_vidioc_g_tuner(struct file *file, void *priv,
++ struct v4l2_tuner *v)
++{
++ struct timbradio *tr = video_drvdata(file);
++ return v4l2_subdev_call(tr->sd_tuner, tuner, g_tuner, v);
++}
++
++static int timbradio_vidioc_s_tuner(struct file *file, void *priv,
++ struct v4l2_tuner *v)
++{
++ struct timbradio *tr = video_drvdata(file);
++ return v4l2_subdev_call(tr->sd_tuner, tuner, s_tuner, v);
++}
++
++static int timbradio_vidioc_g_input(struct file *filp, void *priv,
++ unsigned int *i)
++{
++ *i = 0;
++ return 0;
++}
++
++static int timbradio_vidioc_s_input(struct file *filp, void *priv,
++ unsigned int i)
++{
++ return i ? -EINVAL : 0;
++}
++
++static int timbradio_vidioc_g_audio(struct file *file, void *priv,
++ struct v4l2_audio *a)
++{
++ a->index = 0;
++ strlcpy(a->name, "Radio", sizeof(a->name));
++ a->capability = V4L2_AUDCAP_STEREO;
++ return 0;
++}
++
++
++static int timbradio_vidioc_s_audio(struct file *file, void *priv,
++ struct v4l2_audio *a)
++{
++ return a->index ? -EINVAL : 0;
++}
++
++static int timbradio_vidioc_s_frequency(struct file *file, void *priv,
++ struct v4l2_frequency *f)
++{
++ struct timbradio *tr = video_drvdata(file);
++ return v4l2_subdev_call(tr->sd_tuner, tuner, s_frequency, f);
++}
++
++static int timbradio_vidioc_g_frequency(struct file *file, void *priv,
++ struct v4l2_frequency *f)
++{
++ struct timbradio *tr = video_drvdata(file);
++ return v4l2_subdev_call(tr->sd_tuner, tuner, g_frequency, f);
++}
++
++static int timbradio_vidioc_queryctrl(struct file *file, void *priv,
++ struct v4l2_queryctrl *qc)
++{
++ struct timbradio *tr = video_drvdata(file);
++ return v4l2_subdev_call(tr->sd_dsp, core, queryctrl, qc);
++}
++
++static int timbradio_vidioc_g_ctrl(struct file *file, void *priv,
++ struct v4l2_control *ctrl)
++{
++ struct timbradio *tr = video_drvdata(file);
++ return v4l2_subdev_call(tr->sd_dsp, core, g_ctrl, ctrl);
++}
++
++static int timbradio_vidioc_s_ctrl(struct file *file, void *priv,
++ struct v4l2_control *ctrl)
++{
++ struct timbradio *tr = video_drvdata(file);
++ return v4l2_subdev_call(tr->sd_dsp, core, s_ctrl, ctrl);
++}
++
++static const struct v4l2_ioctl_ops timbradio_ioctl_ops = {
++ .vidioc_querycap = timbradio_vidioc_querycap,
++ .vidioc_g_tuner = timbradio_vidioc_g_tuner,
++ .vidioc_s_tuner = timbradio_vidioc_s_tuner,
++ .vidioc_g_frequency = timbradio_vidioc_g_frequency,
++ .vidioc_s_frequency = timbradio_vidioc_s_frequency,
++ .vidioc_g_input = timbradio_vidioc_g_input,
++ .vidioc_s_input = timbradio_vidioc_s_input,
++ .vidioc_g_audio = timbradio_vidioc_g_audio,
++ .vidioc_s_audio = timbradio_vidioc_s_audio,
++ .vidioc_queryctrl = timbradio_vidioc_queryctrl,
++ .vidioc_g_ctrl = timbradio_vidioc_g_ctrl,
++ .vidioc_s_ctrl = timbradio_vidioc_s_ctrl
++};
++
++static irqreturn_t timbradio_irq(int irq, void *devid)
++{
++ struct timbradio *tr = devid;
++ u32 data = ioread32(tr->membase);
++
++ tr->buffer[tr->wr_index++] = data >> 24;
++ tr->buffer[tr->wr_index++] = data >> 16;
++ tr->buffer[tr->wr_index++] = data >> 8;
++ tr->buffer[tr->wr_index++] = data;
++ tr->wr_index %= RDS_BUFFER_SIZE;
++
++ wake_up(&tr->read_queue);
++
++ /* new RDS data received, read it */
++ return IRQ_HANDLED;
++}
++
++/**************************************************************************
++ * File Operations Interface
++ **************************************************************************/
++
++static ssize_t timbradio_rds_fops_read(struct file *file, char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ struct timbradio *tr = video_drvdata(file);
++ unsigned int outblocks = 0;
++
++ if (count < sizeof(struct v4l2_rds_data))
++ return -EINVAL;
++
++ /* block if no new data available */
++ while (tr->wr_index == tr->rd_index) {
++ if (file->f_flags & O_NONBLOCK)
++ return -EWOULDBLOCK;
++
++ if (wait_event_interruptible(tr->read_queue,
++ tr->wr_index != tr->rd_index))
++ return -EINTR;
++ }
++
++ mutex_lock(&tr->lock);
++ count /= sizeof(struct v4l2_rds_data);
++
++ while (outblocks < count) {
++ struct v4l2_rds_data rds_data;
++
++ rds_data.msb = tr->buffer[tr->rd_index++];
++ rds_data.lsb = tr->buffer[tr->rd_index++];
++ tr->rd_index %= RDS_BUFFER_SIZE;
++
++ rds_data.block = V4L2_RDS_BLOCK_A;
++
++ if (copy_to_user(buf + outblocks * sizeof(rds_data), &rds_data,
++ sizeof(rds_data))) {
++ mutex_unlock(&tr->lock);
++ return -EFAULT;
++ }
++
++ outblocks++;
++
++ if (tr->rd_index == tr->wr_index)
++ break;
++ }
++ mutex_unlock(&tr->lock);
++
++ return outblocks * sizeof(struct v4l2_rds_data);
++}
++
++static unsigned int timbradio_rds_fops_poll(struct file *file,
++ struct poll_table_struct *pts)
++{
++ struct timbradio *tr = video_drvdata(file);
++
++ poll_wait(file, &tr->read_queue, pts);
++
++ if (tr->rd_index != tr->wr_index)
++ return POLLIN | POLLRDNORM;
++
++ return 0;
++}
++
++static int timbradio_rds_fops_open(struct file *file)
++{
++ struct timbradio *tr = video_drvdata(file);
++ struct i2c_adapter *adapt;
++ int err = 0;
++
++ mutex_lock(&tr->lock);
++ if (tr->open_count)
++ goto out;
++
++ /* device currently not open, check if the DSP and tuner is not
++ * yet found, in that case find them
++ */
++
++ /* find the I2C bus */
++ adapt = i2c_get_adapter(tr->pdata.i2c_adapter);
++ if (!adapt) {
++ printk(KERN_ERR DRIVER_NAME": No I2C bus\n");
++ err = -ENODEV;
++ goto out;
++ }
++
++ /* now find the tuner and dsp */
++ if (!tr->sd_dsp)
++ tr->sd_dsp = v4l2_i2c_new_subdev_board(&tr->v4l2_dev, adapt,
++ tr->pdata.dsp.module_name, tr->pdata.dsp.info, NULL);
++
++ if (!tr->sd_tuner)
++ tr->sd_tuner = v4l2_i2c_new_subdev_board(&tr->v4l2_dev, adapt,
++ tr->pdata.tuner.module_name, tr->pdata.tuner.info,
++ NULL);
++
++ i2c_put_adapter(adapt);
++
++ if (!tr->sd_tuner || !tr->sd_dsp) {
++ printk(KERN_ERR DRIVER_NAME
++ ": Failed to get tuner or DSP\n");
++ err = -ENODEV;
++ goto out;
++ }
++
++ /* enable the IRQ for receiving RDS data */
++ err = request_irq(tr->rds_irq, timbradio_irq, 0, DRIVER_NAME, tr);
++out:
++ if (!err)
++ tr->open_count++;
++ mutex_unlock(&tr->lock);
++ return err;
++}
++
++static int timbradio_rds_fops_release(struct file *file)
++{
++ struct timbradio *tr = video_drvdata(file);
++
++ mutex_lock(&tr->lock);
++ tr->open_count--;
++ if (!tr->open_count) {
++ free_irq(tr->rds_irq, tr);
++
++ tr->wr_index = 0;
++ tr->rd_index = 0;
++
++ /* cancel read processes */
++ wake_up_interruptible(&tr->read_queue);
++ }
++ mutex_unlock(&tr->lock);
++
++ return 0;
++}
++
++
++static const struct v4l2_file_operations timbradio_fops = {
++ .owner = THIS_MODULE,
++ .ioctl = video_ioctl2,
++ .read = timbradio_rds_fops_read,
++ .poll = timbradio_rds_fops_poll,
++ .open = timbradio_rds_fops_open,
++ .release = timbradio_rds_fops_release,
++};
++
++static const struct video_device timbradio_template = {
++ .name = "Timberdale Radio",
++ .fops = &timbradio_fops,
++ .ioctl_ops = &timbradio_ioctl_ops,
++ .release = video_device_release_empty,
++ .minor = -1
++};
++
++
++static int __devinit timbradio_probe(struct platform_device *pdev)
++{
++ struct timb_radio_platform_data *pdata = pdev->dev.platform_data;
++ struct timbradio *tr;
++ struct resource *iomem;
++ int irq;
++ int err;
++
++ if (!pdata) {
++ printk(KERN_ERR DRIVER_NAME": Platform data missing\n");
++ err = -EINVAL;
++ goto err;
++ }
++
++ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!iomem) {
++ err = -ENODEV;
++ goto err;
++ }
++
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0) {
++ err = -ENODEV;
++ goto err;
++ }
++
++ if (!request_mem_region(iomem->start, resource_size(iomem),
++ DRIVER_NAME)) {
++ err = -EBUSY;
++ goto err;
++ }
++
++ tr = kzalloc(sizeof(*tr), GFP_KERNEL);
++ if (!tr) {
++ err = -ENOMEM;
++ goto err_alloc;
++ }
++ mutex_init(&tr->lock);
++
++ tr->membase = ioremap(iomem->start, resource_size(iomem));
++ if (!tr->membase) {
++ err = -ENOMEM;
++ goto err_ioremap;
++ }
++
++ tr->pdata = *pdata;
++
++ tr->video_dev = video_device_alloc();
++ if (!tr->video_dev) {
++ err = -ENOMEM;
++ goto err_video_req;
++ }
++ *tr->video_dev = timbradio_template;
++ tr->rds_irq = irq;
++ init_waitqueue_head(&tr->read_queue);
++
++ strlcpy(tr->v4l2_dev.name, DRIVER_NAME, sizeof(tr->v4l2_dev.name));
++ err = v4l2_device_register(NULL, &tr->v4l2_dev);
++ if (err)
++ goto err_v4l2_dev;
++
++ tr->video_dev->v4l2_dev = &tr->v4l2_dev;
++
++ err = video_register_device(tr->video_dev, VFL_TYPE_RADIO, -1);
++ if (err) {
++ printk(KERN_ALERT DRIVER_NAME": Error reg video\n");
++ goto err_video_req;
++ }
++
++ video_set_drvdata(tr->video_dev, tr);
++
++ platform_set_drvdata(pdev, tr);
++ return 0;
++
++err_video_req:
++ v4l2_device_unregister(&tr->v4l2_dev);
++err_v4l2_dev:
++ if (tr->video_dev->minor != -1)
++ video_unregister_device(tr->video_dev);
++ else
++ video_device_release(tr->video_dev);
++ iounmap(tr->membase);
++err_ioremap:
++ kfree(tr);
++err_alloc:
++ release_mem_region(iomem->start, resource_size(iomem));
++err:
++ printk(KERN_ERR DRIVER_NAME ": Failed to register: %d\n", err);
++
++ return err;
++}
++
++static int __devexit timbradio_remove(struct platform_device *pdev)
++{
++ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ struct timbradio *tr = platform_get_drvdata(pdev);
++
++ if (tr->video_dev->minor != -1)
++ video_unregister_device(tr->video_dev);
++ else
++ video_device_release(tr->video_dev);
++
++ v4l2_device_unregister(&tr->v4l2_dev);
++
++ iounmap(tr->membase);
++ release_mem_region(iomem->start, resource_size(iomem));
++ kfree(tr);
++
++ return 0;
++}
++
++static struct platform_driver timbradio_platform_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = timbradio_probe,
++ .remove = timbradio_remove,
++};
++
++/*--------------------------------------------------------------------------*/
++
++static int __init timbradio_init(void)
++{
++ return platform_driver_register(&timbradio_platform_driver);
++}
++
++static void __exit timbradio_exit(void)
++{
++ platform_driver_unregister(&timbradio_platform_driver);
++}
++
++module_init(timbradio_init);
++module_exit(timbradio_exit);
++
++MODULE_DESCRIPTION("Timberdale Radio driver");
++MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("platform:"DRIVER_NAME);
+diff --git a/drivers/media/radio/saa7706h.c b/drivers/media/radio/saa7706h.c
+new file mode 100644
+index 0000000..8bd6725
+--- /dev/null
++++ b/drivers/media/radio/saa7706h.c
+@@ -0,0 +1,451 @@
++/*
++ * saa7706.c Philips SAA7706H Car Radio DSP driver
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/kernel.h>
++#include <linux/interrupt.h>
++#include <linux/i2c.h>
++#include <media/v4l2-device.h>
++#include <media/v4l2-chip-ident.h>
++
++#define DRIVER_NAME "saa7706h"
++
++/* the I2C memory map looks like this
++
++ $1C00 - $FFFF Not Used
++ $2200 - $3FFF Reserved YRAM (DSP2) space
++ $2000 - $21FF YRAM (DSP2)
++ $1FF0 - $1FFF Hardware Registers
++ $1280 - $1FEF Reserved XRAM (DSP2) space
++ $1000 - $127F XRAM (DSP2)
++ $0FFF DSP CONTROL
++ $0A00 - $0FFE Reserved
++ $0980 - $09FF Reserved YRAM (DSP1) space
++ $0800 - $097F YRAM (DSP1)
++ $0200 - $07FF Not Used
++ $0180 - $01FF Reserved XRAM (DSP1) space
++ $0000 - $017F XRAM (DSP1)
++*/
++
++#define SAA7706H_REG_CTRL 0x0fff
++#define SAA7706H_CTRL_BYP_PLL 0x0001
++#define SAA7706H_CTRL_PLL_DIV_MASK 0x003e
++#define SAA7706H_CTRL_PLL3_62975MHZ 0x003e
++#define SAA7706H_CTRL_DSP_TURBO 0x0040
++#define SAA7706H_CTRL_PC_RESET_DSP1 0x0080
++#define SAA7706H_CTRL_PC_RESET_DSP2 0x0100
++#define SAA7706H_CTRL_DSP1_ROM_EN_MASK 0x0600
++#define SAA7706H_CTRL_DSP1_FUNC_PROM 0x0000
++#define SAA7706H_CTRL_DSP2_ROM_EN_MASK 0x1800
++#define SAA7706H_CTRL_DSP2_FUNC_PROM 0x0000
++#define SAA7706H_CTRL_DIG_SIL_INTERPOL 0x8000
++
++#define SAA7706H_REG_EVALUATION 0x1ff0
++#define SAA7706H_EVAL_DISABLE_CHARGE_PUMP 0x000001
++#define SAA7706H_EVAL_DCS_CLOCK 0x000002
++#define SAA7706H_EVAL_GNDRC1_ENABLE 0x000004
++#define SAA7706H_EVAL_GNDRC2_ENABLE 0x000008
++
++#define SAA7706H_REG_CL_GEN1 0x1ff3
++#define SAA7706H_CL_GEN1_MIN_LOOPGAIN_MASK 0x00000f
++#define SAA7706H_CL_GEN1_LOOPGAIN_MASK 0x0000f0
++#define SAA7706H_CL_GEN1_COARSE_RATION 0xffff00
++
++#define SAA7706H_REG_CL_GEN2 0x1ff4
++#define SAA7706H_CL_GEN2_WSEDGE_FALLING 0x000001
++#define SAA7706H_CL_GEN2_STOP_VCO 0x000002
++#define SAA7706H_CL_GEN2_FRERUN 0x000004
++#define SAA7706H_CL_GEN2_ADAPTIVE 0x000008
++#define SAA7706H_CL_GEN2_FINE_RATIO_MASK 0x0ffff0
++
++#define SAA7706H_REG_CL_GEN4 0x1ff6
++#define SAA7706H_CL_GEN4_BYPASS_PLL1 0x001000
++#define SAA7706H_CL_GEN4_PLL1_DIV_MASK 0x03e000
++#define SAA7706H_CL_GEN4_DSP1_TURBO 0x040000
++
++#define SAA7706H_REG_SEL 0x1ff7
++#define SAA7706H_SEL_DSP2_SRCA_MASK 0x000007
++#define SAA7706H_SEL_DSP2_FMTA_MASK 0x000031
++#define SAA7706H_SEL_DSP2_SRCB_MASK 0x0001c0
++#define SAA7706H_SEL_DSP2_FMTB_MASK 0x000e00
++#define SAA7706H_SEL_DSP1_SRC_MASK 0x003000
++#define SAA7706H_SEL_DSP1_FMT_MASK 0x01c003
++#define SAA7706H_SEL_SPDIF2 0x020000
++#define SAA7706H_SEL_HOST_IO_FMT_MASK 0x1c0000
++#define SAA7706H_SEL_EN_HOST_IO 0x200000
++
++#define SAA7706H_REG_IAC 0x1ff8
++#define SAA7706H_REG_CLK_SET 0x1ff9
++#define SAA7706H_REG_CLK_COEFF 0x1ffa
++#define SAA7706H_REG_INPUT_SENS 0x1ffb
++#define SAA7706H_INPUT_SENS_RDS_VOL_MASK 0x0003f
++#define SAA7706H_INPUT_SENS_FM_VOL_MASK 0x00fc0
++#define SAA7706H_INPUT_SENS_FM_MPX 0x01000
++#define SAA7706H_INPUT_SENS_OFF_FILTER_A_EN 0x02000
++#define SAA7706H_INPUT_SENS_OFF_FILTER_B_EN 0x04000
++#define SAA7706H_REG_PHONE_NAV_AUDIO 0x1ffc
++#define SAA7706H_REG_IO_CONF_DSP2 0x1ffd
++#define SAA7706H_REG_STATUS_DSP2 0x1ffe
++#define SAA7706H_REG_PC_DSP2 0x1fff
++
++#define SAA7706H_DSP1_MOD0 0x0800
++#define SAA7706H_DSP1_ROM_VER 0x097f
++#define SAA7706H_DSP2_MPTR0 0x1000
++
++#define SAA7706H_DSP1_MODPNTR 0x0000
++
++#define SAA7706H_DSP2_XMEM_CONTLLCW 0x113e
++#define SAA7706H_DSP2_XMEM_BUSAMP 0x114a
++#define SAA7706H_DSP2_XMEM_FDACPNTR 0x11f9
++#define SAA7706H_DSP2_XMEM_IIS1PNTR 0x11fb
++
++#define SAA7706H_DSP2_YMEM_PVGA 0x212a
++#define SAA7706H_DSP2_YMEM_PVAT1 0x212b
++#define SAA7706H_DSP2_YMEM_PVAT 0x212c
++#define SAA7706H_DSP2_YMEM_ROM_VER 0x21ff
++
++#define SUPPORTED_DSP1_ROM_VER 0x667
++
++struct saa7706h_state {
++ struct v4l2_subdev sd;
++ unsigned muted;
++};
++
++static inline struct saa7706h_state *to_state(struct v4l2_subdev *sd)
++{
++ return container_of(sd, struct saa7706h_state, sd);
++}
++
++static int saa7706h_i2c_send(struct i2c_client *client, const u8 *data, int len)
++{
++ int err = i2c_master_send(client, data, len);
++ if (err == len)
++ return 0;
++ return err > 0 ? -EIO : err;
++}
++
++static int saa7706h_i2c_transfer(struct i2c_client *client,
++ struct i2c_msg *msgs, int num)
++{
++ int err = i2c_transfer(client->adapter, msgs, num);
++ if (err == num)
++ return 0;
++ return err > 0 ? -EIO : err;
++}
++
++static int saa7706h_set_reg24(struct v4l2_subdev *sd, u16 reg, u32 val)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ u8 buf[5];
++ int pos = 0;
++
++ buf[pos++] = reg >> 8;
++ buf[pos++] = reg;
++ buf[pos++] = val >> 16;
++ buf[pos++] = val >> 8;
++ buf[pos++] = val;
++
++ return saa7706h_i2c_send(client, buf, pos);
++}
++
++static int saa7706h_set_reg24_err(struct v4l2_subdev *sd, u16 reg, u32 val,
++ int *err)
++{
++ return *err ? *err : saa7706h_set_reg24(sd, reg, val);
++}
++
++static int saa7706h_set_reg16(struct v4l2_subdev *sd, u16 reg, u16 val)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ u8 buf[4];
++ int pos = 0;
++
++ buf[pos++] = reg >> 8;
++ buf[pos++] = reg;
++ buf[pos++] = val >> 8;
++ buf[pos++] = val;
++
++ return saa7706h_i2c_send(client, buf, pos);
++}
++
++static int saa7706h_set_reg16_err(struct v4l2_subdev *sd, u16 reg, u16 val,
++ int *err)
++{
++ return *err ? *err : saa7706h_set_reg16(sd, reg, val);
++}
++
++static int saa7706h_get_reg16(struct v4l2_subdev *sd, u16 reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ u8 buf[2];
++ int err;
++ u8 regaddr[] = {reg >> 8, reg};
++ struct i2c_msg msg[] = { {client->addr, 0, sizeof(regaddr), regaddr},
++ {client->addr, I2C_M_RD, sizeof(buf), buf} };
++
++ err = saa7706h_i2c_transfer(client, msg, ARRAY_SIZE(msg));
++ if (err)
++ return err;
++
++ return buf[0] << 8 | buf[1];
++}
++
++static int saa7706h_unmute(struct v4l2_subdev *sd)
++{
++ struct saa7706h_state *state = to_state(sd);
++ int err = 0;
++
++ err = saa7706h_set_reg16_err(sd, SAA7706H_REG_CTRL,
++ SAA7706H_CTRL_PLL3_62975MHZ | SAA7706H_CTRL_PC_RESET_DSP1 |
++ SAA7706H_CTRL_PC_RESET_DSP2, &err);
++
++ /* newer versions of the chip requires a small sleep after reset */
++ msleep(1);
++
++ err = saa7706h_set_reg16_err(sd, SAA7706H_REG_CTRL,
++ SAA7706H_CTRL_PLL3_62975MHZ, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_EVALUATION, 0, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_CL_GEN1, 0x040022, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_CL_GEN2,
++ SAA7706H_CL_GEN2_WSEDGE_FALLING, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_CL_GEN4, 0x024080, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_SEL, 0x200080, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_IAC, 0xf4caed, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_CLK_SET, 0x124334, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_CLK_COEFF, 0x004a1a,
++ &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_INPUT_SENS, 0x0071c7,
++ &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_PHONE_NAV_AUDIO,
++ 0x0e22ff, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_IO_CONF_DSP2, 0x001ff8,
++ &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_STATUS_DSP2, 0x080003,
++ &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_REG_PC_DSP2, 0x000004, &err);
++
++ err = saa7706h_set_reg16_err(sd, SAA7706H_DSP1_MOD0, 0x0c6c, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_DSP2_MPTR0, 0x000b4b, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_DSP1_MODPNTR, 0x000600, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_DSP1_MODPNTR, 0x0000c0, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_DSP2_XMEM_CONTLLCW, 0x000819,
++ &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_DSP2_XMEM_CONTLLCW, 0x00085a,
++ &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_DSP2_XMEM_BUSAMP, 0x7fffff,
++ &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_DSP2_XMEM_FDACPNTR, 0x2000cb,
++ &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_DSP2_XMEM_IIS1PNTR, 0x2000cb,
++ &err);
++
++ err = saa7706h_set_reg16_err(sd, SAA7706H_DSP2_YMEM_PVGA, 0x0f80, &err);
++
++ err = saa7706h_set_reg16_err(sd, SAA7706H_DSP2_YMEM_PVAT1, 0x0800,
++ &err);
++
++ err = saa7706h_set_reg16_err(sd, SAA7706H_DSP2_YMEM_PVAT, 0x0800, &err);
++
++ err = saa7706h_set_reg24_err(sd, SAA7706H_DSP2_XMEM_CONTLLCW, 0x000905,
++ &err);
++ if (!err)
++ state->muted = 0;
++ return err;
++}
++
++static int saa7706h_mute(struct v4l2_subdev *sd)
++{
++ struct saa7706h_state *state = to_state(sd);
++ int err;
++
++ err = saa7706h_set_reg16(sd, SAA7706H_REG_CTRL,
++ SAA7706H_CTRL_PLL3_62975MHZ | SAA7706H_CTRL_PC_RESET_DSP1 |
++ SAA7706H_CTRL_PC_RESET_DSP2);
++ if (!err)
++ state->muted = 1;
++ return err;
++}
++
++static int saa7706h_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
++{
++ switch (qc->id) {
++ case V4L2_CID_AUDIO_MUTE:
++ return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
++ }
++ return -EINVAL;
++}
++
++static int saa7706h_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ struct saa7706h_state *state = to_state(sd);
++
++ switch (ctrl->id) {
++ case V4L2_CID_AUDIO_MUTE:
++ ctrl->value = state->muted;
++ return 0;
++ }
++ return -EINVAL;
++}
++
++static int saa7706h_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ switch (ctrl->id) {
++ case V4L2_CID_AUDIO_MUTE:
++ if (ctrl->value)
++ return saa7706h_mute(sd);
++ return saa7706h_unmute(sd);
++ }
++ return -EINVAL;
++}
++
++static int saa7706h_g_chip_ident(struct v4l2_subdev *sd,
++ struct v4l2_dbg_chip_ident *chip)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_SAA7706H, 0);
++}
++
++static const struct v4l2_subdev_core_ops saa7706h_core_ops = {
++ .g_chip_ident = saa7706h_g_chip_ident,
++ .queryctrl = saa7706h_queryctrl,
++ .g_ctrl = saa7706h_g_ctrl,
++ .s_ctrl = saa7706h_s_ctrl,
++};
++
++static const struct v4l2_subdev_ops saa7706h_ops = {
++ .core = &saa7706h_core_ops,
++};
++
++/*
++ * Generic i2c probe
++ * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
++ */
++
++static int __devinit saa7706h_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct saa7706h_state *state;
++ struct v4l2_subdev *sd;
++ int err;
++
++ /* Check if the adapter supports the needed features */
++ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
++ return -EIO;
++
++ v4l_info(client, "chip found @ 0x%02x (%s)\n",
++ client->addr << 1, client->adapter->name);
++
++ state = kmalloc(sizeof(struct saa7706h_state), GFP_KERNEL);
++ if (state == NULL)
++ return -ENOMEM;
++ sd = &state->sd;
++ v4l2_i2c_subdev_init(sd, client, &saa7706h_ops);
++
++ /* check the rom versions */
++ err = saa7706h_get_reg16(sd, SAA7706H_DSP1_ROM_VER);
++ if (err < 0)
++ goto err;
++ if (err != SUPPORTED_DSP1_ROM_VER)
++ v4l2_warn(sd, "Unknown DSP1 ROM code version: 0x%x\n", err);
++
++ state->muted = 1;
++
++ /* startup in a muted state */
++ err = saa7706h_mute(sd);
++ if (err)
++ goto err;
++
++ return 0;
++
++err:
++ v4l2_device_unregister_subdev(sd);
++ kfree(to_state(sd));
++
++ printk(KERN_ERR DRIVER_NAME ": Failed to probe: %d\n", err);
++
++ return err;
++}
++
++static int __devexit saa7706h_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *sd = i2c_get_clientdata(client);
++
++ saa7706h_mute(sd);
++ v4l2_device_unregister_subdev(sd);
++ kfree(to_state(sd));
++ return 0;
++}
++
++static const struct i2c_device_id saa7706h_id[] = {
++ {DRIVER_NAME, 0},
++ {},
++};
++
++MODULE_DEVICE_TABLE(i2c, saa7706h_id);
++
++static struct i2c_driver saa7706h_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = DRIVER_NAME,
++ },
++ .probe = saa7706h_probe,
++ .remove = saa7706h_remove,
++ .id_table = saa7706h_id,
++};
++
++static __init int saa7706h_init(void)
++{
++ return i2c_add_driver(&saa7706h_driver);
++}
++
++static __exit void saa7706h_exit(void)
++{
++ i2c_del_driver(&saa7706h_driver);
++}
++
++module_init(saa7706h_init);
++module_exit(saa7706h_exit);
++
++MODULE_DESCRIPTION("SAA7706H Car Radio DSP driver");
++MODULE_AUTHOR("Mocean Laboratories");
++MODULE_LICENSE("GPL v2");
++
+diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
+index 2f83be7..51aa8f5 100644
+--- a/drivers/media/video/Kconfig
++++ b/drivers/media/video/Kconfig
+@@ -923,6 +923,14 @@ config VIDEO_OMAP2
+ ---help---
+ This is a v4l2 driver for the TI OMAP2 camera capture interface
+
++config VIDEO_TIMBERDALE
++ tristate "Support for timberdale Video In/LogiWIN"
++ depends on VIDEO_V4L2 && I2C
++ select TIMB_DMA
++ select VIDEO_ADV7180
++ ---help---
++ Add support for the Video In peripherial of the timberdale FPGA.
++
+ #
+ # USB Multimedia device configuration
+ #
+diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
+index 2af68ee..58ece5c 100644
+--- a/drivers/media/video/Makefile
++++ b/drivers/media/video/Makefile
+@@ -162,6 +162,8 @@ obj-$(CONFIG_ARCH_DAVINCI) += davinci/
+
+ obj-$(CONFIG_VIDEO_AU0828) += au0828/
+
++obj-$(CONFIG_VIDEO_TIMBERDALE) += timblogiw.o
++
+ obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/
+ obj-$(CONFIG_VIDEO_SAA7164) += saa7164/
+
+diff --git a/drivers/media/video/adv7180.c b/drivers/media/video/adv7180.c
+index 0826f0d..eb8e32c 100644
+--- a/drivers/media/video/adv7180.c
++++ b/drivers/media/video/adv7180.c
+@@ -90,6 +90,9 @@
+ #define ADV7180_IMR3_ADI 0x4C
+ #define ADV7180_IMR4_ADI 0x50
+
++#define ADV7180_NTSC_V_BIT_END_REG 0xE6
++#define ADV7180_NTSC_V_BIT_END_MANUAL_NVEND 0x4F
++
+ struct adv7180_state {
+ struct v4l2_subdev sd;
+ struct work_struct work;
+@@ -348,6 +351,14 @@ static __devinit int adv7180_probe(struct i2c_client *client,
+ if (ret < 0)
+ goto err_unreg_subdev;
+
++
++ /* Manually set V bit end position in NTSC mode */
++ ret = i2c_smbus_write_byte_data(client,
++ ADV7180_NTSC_V_BIT_END_REG,
++ ADV7180_NTSC_V_BIT_END_MANUAL_NVEND);
++ if (ret < 0)
++ goto err_unreg_subdev;
++
+ /* read current norm */
+ __adv7180_status(client, NULL, &state->curr_norm);
+
+diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
+new file mode 100644
+index 0000000..b232f61
+--- /dev/null
++++ b/drivers/media/video/timblogiw.c
+@@ -0,0 +1,1038 @@
++/*
++ * timblogiw.c timberdale FPGA LogiWin Video In driver
++ * Copyright (c) 2009-2010 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA LogiWin Video In
++ */
++
++#include <linux/list.h>
++#include <linux/version.h>
++#include <linux/dma-mapping.h>
++#include <media/v4l2-ioctl.h>
++#include <media/v4l2-device.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include "timblogiw.h"
++#include <linux/i2c.h>
++
++#define DRIVER_NAME "timb-video"
++
++#define TIMBLOGIW_CTRL 0x40
++
++#define TIMBLOGIW_H_SCALE 0x20
++#define TIMBLOGIW_V_SCALE 0x28
++
++#define TIMBLOGIW_X_CROP 0x58
++#define TIMBLOGIW_Y_CROP 0x60
++
++#define TIMBLOGIW_W_CROP 0x00
++#define TIMBLOGIW_H_CROP 0x08
++
++#define TIMBLOGIW_VERSION_CODE 0x03
++
++#define TIMBLOGIW_BUF 0x04
++#define TIMBLOGIW_TBI 0x2c
++#define TIMBLOGIW_BPL 0x30
++
++#define BYTES_PER_LINE (720 * 2)
++
++#define DMA_BUFFER_SIZE (BYTES_PER_LINE * 576)
++
++#define TIMBLOGIW_VIDEO_FORMAT V4L2_PIX_FMT_UYVY
++
++#define TIMBLOGIW_HAS_DECODER(lw) (lw->pdata.encoder.module_name)
++
++static void timblogiw_release_buffers(struct timblogiw *lw);
++
++const struct timblogiw_tvnorm timblogiw_tvnorms[] = {
++ {
++ .std = V4L2_STD_PAL,
++ .width = 720,
++ .height = 576
++ },
++ {
++ .std = V4L2_STD_NTSC,
++ .width = 720,
++ .height = 480
++ }
++};
++
++static int timblogiw_bytes_per_line(const struct timblogiw_tvnorm *norm)
++{
++ return norm->width * 2;
++}
++
++
++static int timblogiw_frame_size(const struct timblogiw_tvnorm *norm)
++{
++ return norm->height * timblogiw_bytes_per_line(norm);
++}
++
++static const struct timblogiw_tvnorm *timblogiw_get_norm(const v4l2_std_id std)
++{
++ int i;
++ for (i = 0; i < ARRAY_SIZE(timblogiw_tvnorms); i++)
++ if (timblogiw_tvnorms[i].std & std)
++ return timblogiw_tvnorms + i;
++
++ /* default to first element */
++ return timblogiw_tvnorms;
++}
++
++static void timblogiw_dma_cb(void *data)
++{
++ struct timblogiw *lw = (struct timblogiw *)data;
++
++ dev_dbg(&lw->video_dev.dev, "%s: frame RX\n", __func__);
++ tasklet_schedule(&lw->tasklet);
++}
++
++static int __timblogiw_start_dma(struct timblogiw *lw)
++{
++ struct timbdma_transfer *transfer = lw->dma.transfer + lw->dma.curr;
++ struct dma_async_tx_descriptor *desc;
++ int sg_elems;
++ int bytes_per_desc =
++ TIMBLOGIW_LINES_PER_DESC *
++ timblogiw_bytes_per_line(lw->cur_norm);
++
++ sg_elems = timblogiw_frame_size(lw->cur_norm) / bytes_per_desc;
++ sg_elems +=
++ (timblogiw_frame_size(lw->cur_norm) % bytes_per_desc) ? 1 : 0;
++
++ dev_dbg(&lw->video_dev.dev, "Preparing DMA descriptor, elems: %d\n",
++ sg_elems);
++
++ desc = lw->chan->device->device_prep_slave_sg(lw->chan,
++ transfer->sg, sg_elems, DMA_FROM_DEVICE,
++ DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
++ if (!desc)
++ return -ENOMEM;
++
++ desc->callback_param = lw;
++ desc->callback = timblogiw_dma_cb;
++ dev_dbg(&lw->video_dev.dev, "Submitting DMA descriptor\n");
++ transfer->cookie = desc->tx_submit(desc);
++
++ return 0;
++}
++
++static void timblogiw_handleframe(unsigned long arg)
++{
++ struct timblogiw_frame *f;
++ struct timblogiw *lw = (struct timblogiw *)arg;
++
++ if (lw->stream == STREAM_OFF)
++ return;
++
++ spin_lock(&lw->queue_lock);
++
++ if (!lw->dma.filled) {
++ /* Got a frame, store it, and flip to next DMA buffer */
++ lw->dma.filled = lw->dma.transfer + lw->dma.curr;
++ lw->dma.curr = !lw->dma.curr;
++ } else if (lw->dma.filled)
++ dev_dbg(&lw->video_dev.dev, "No free frame\n");
++
++ __timblogiw_start_dma(lw);
++
++
++ if (lw->dma.filled && !list_empty(&lw->inqueue)) {
++ /* put the entry in the outqueue */
++ f = list_entry(lw->inqueue.next, struct timblogiw_frame, frame);
++
++ /* sync memory and unmap */
++ dma_sync_single_for_cpu(lw->dev,
++ sg_dma_address(lw->dma.filled->sg),
++ timblogiw_frame_size(lw->cur_norm), DMA_FROM_DEVICE);
++
++ /* copy data from the DMA buffer */
++ memcpy(f->bufmem, lw->dma.filled->buf, f->buf.length);
++ /* buffer consumed */
++ lw->dma.filled = NULL;
++
++ do_gettimeofday(&f->buf.timestamp);
++ f->buf.sequence = ++lw->frame_count;
++ f->buf.field = V4L2_FIELD_NONE;
++ f->state = F_DONE;
++ f->buf.bytesused = f->buf.length;
++ list_move_tail(&f->frame, &lw->outqueue);
++ /* wake up any waiter */
++ wake_up(&lw->wait_frame);
++ } else {
++ /* No user buffer available, consume buffer anyway
++ * who wants an old video frame?
++ */
++ lw->dma.filled = NULL;
++ }
++ spin_unlock(&lw->queue_lock);
++}
++
++
++static void timblogiw_empty_framequeues(struct timblogiw *lw)
++{
++ u32 i;
++
++ dev_dbg(&lw->video_dev.dev, "%s entry\n", __func__);
++
++ INIT_LIST_HEAD(&lw->inqueue);
++ INIT_LIST_HEAD(&lw->outqueue);
++
++ for (i = 0; i < lw->num_frames; i++) {
++ lw->frame[i].state = F_UNUSED;
++ lw->frame[i].buf.bytesused = 0;
++ }
++}
++
++static bool timblogiw_dma_filter_fn(struct dma_chan *chan, void *filter_param)
++{
++ return chan->chan_id == (int)filter_param;
++}
++
++static u32 timblogiw_request_buffers(struct timblogiw *lw, u32 count)
++{
++ /* needs to be page aligned cause the */
++ /* buffers can be mapped individually! */
++ const size_t imagesize = PAGE_ALIGN(timblogiw_frame_size(lw->cur_norm));
++ struct timbdma_transfer *t0 = lw->dma.transfer;
++ struct timbdma_transfer *t1 = lw->dma.transfer + 1;
++ int bytes_per_desc = TIMBLOGIW_LINES_PER_DESC * BYTES_PER_LINE;
++ dma_cap_mask_t mask;
++ void *buff = NULL;
++ dma_addr_t addr;
++ u32 size;
++ int ret;
++ int i;
++
++ dma_cap_zero(mask);
++ dma_cap_set(DMA_SLAVE, mask);
++
++ dev_dbg(&lw->video_dev.dev, "%s - request of %i buffers of size %zi\n",
++ __func__, count, imagesize);
++
++ t0->buf = kzalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
++ if (!t0->buf)
++ goto err;
++
++ t1->buf = kzalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
++ if (!t1->buf)
++ goto err;
++
++ sg_init_table(t0->sg, ARRAY_SIZE(t0->sg));
++ sg_init_table(t1->sg, ARRAY_SIZE(t1->sg));
++
++ /* map up the DMA buffers */
++ addr = dma_map_single(lw->dev, t0->buf, DMA_BUFFER_SIZE,
++ DMA_FROM_DEVICE);
++ ret = dma_mapping_error(lw->dev, addr);
++ if (ret)
++ goto err;
++
++ for (i = 0, size = 0; size < DMA_BUFFER_SIZE; i++) {
++ sg_dma_address(t0->sg + i) = addr + size;
++ size += bytes_per_desc;
++ sg_dma_len(t0->sg + i) = (size > DMA_BUFFER_SIZE) ?
++ (bytes_per_desc - (size - DMA_BUFFER_SIZE)) :
++ bytes_per_desc;
++ }
++
++ addr = dma_map_single(lw->dev, t1->buf, DMA_BUFFER_SIZE,
++ DMA_FROM_DEVICE);
++ ret = dma_mapping_error(lw->dev, addr);
++ if (ret)
++ goto err;
++
++ for (i = 0, size = 0; size < DMA_BUFFER_SIZE; i++) {
++ sg_dma_address(t1->sg + i) = addr + size;
++ size += bytes_per_desc;
++ sg_dma_len(t1->sg + i) = (size > DMA_BUFFER_SIZE) ?
++ (bytes_per_desc - (size - DMA_BUFFER_SIZE)) :
++ bytes_per_desc;
++ }
++
++ if (count > TIMBLOGIW_NUM_FRAMES)
++ count = TIMBLOGIW_NUM_FRAMES;
++
++ lw->num_frames = count;
++ while (lw->num_frames > 0) {
++ buff = vmalloc_32(lw->num_frames * imagesize);
++ if (buff) {
++ memset(buff, 0, lw->num_frames * imagesize);
++ break;
++ }
++ lw->num_frames--;
++ }
++
++ for (i = 0; i < lw->num_frames; i++) {
++ lw->frame[i].bufmem = buff + i * imagesize;
++ lw->frame[i].buf.index = i;
++ lw->frame[i].buf.m.offset = i * imagesize;
++ lw->frame[i].buf.length = timblogiw_frame_size(lw->cur_norm);
++ lw->frame[i].buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ lw->frame[i].buf.sequence = 0;
++ lw->frame[i].buf.field = V4L2_FIELD_NONE;
++ lw->frame[i].buf.memory = V4L2_MEMORY_MMAP;
++ lw->frame[i].buf.flags = 0;
++ }
++
++ lw->dma.curr = 0;
++ lw->dma.filled = NULL;
++
++ /* find the DMA channel */
++ lw->chan = dma_request_channel(mask, timblogiw_dma_filter_fn,
++ (void *)lw->pdata.dma_channel);
++ if (!lw->chan) {
++ dev_err(&lw->video_dev.dev, "Failed to get DMA channel\n");
++ goto err;
++ }
++
++ return lw->num_frames;
++err:
++ timblogiw_release_buffers(lw);
++
++ return 0;
++}
++
++static void timblogiw_release_buffers(struct timblogiw *lw)
++{
++ struct timbdma_transfer *t0 = lw->dma.transfer;
++ struct timbdma_transfer *t1 = lw->dma.transfer + 1;
++
++ dev_dbg(&lw->video_dev.dev, "%s entry\n", __func__);
++
++ if (lw->chan)
++ dma_release_channel(lw->chan);
++ lw->chan = NULL;
++
++
++ if (lw->frame[0].bufmem != NULL) {
++ vfree(lw->frame[0].bufmem);
++ lw->frame[0].bufmem = NULL;
++ }
++
++ if (sg_dma_address(t0->sg))
++ dma_unmap_single(lw->dev, sg_dma_address(t0->sg),
++ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
++ sg_dma_address(t0->sg) = 0;
++
++ if (sg_dma_address(t1->sg))
++ dma_unmap_single(lw->dev, sg_dma_address(t1->sg),
++ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
++ sg_dma_address(t1->sg) = 0;
++
++ if (t0->buf != NULL)
++ kfree(t0->buf);
++ t0->buf = NULL;
++
++ if (t1->buf != NULL)
++ kfree(t1->buf);
++ t1->buf = NULL;
++
++ t0->cookie = -1;
++ t1->cookie = -1;
++
++ lw->num_frames = TIMBLOGIW_NUM_FRAMES;
++}
++
++/* IOCTL functions */
++
++static int timblogiw_g_fmt(struct file *file, void *priv,
++ struct v4l2_format *format)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++
++ dev_dbg(&vdev->dev, "%s entry\n", __func__);
++
++ if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ return -EINVAL;
++
++ format->fmt.pix.width = lw->cur_norm->width;
++ format->fmt.pix.height = lw->cur_norm->height;
++ format->fmt.pix.pixelformat = TIMBLOGIW_VIDEO_FORMAT;
++ format->fmt.pix.bytesperline = timblogiw_bytes_per_line(lw->cur_norm);
++ format->fmt.pix.sizeimage = timblogiw_frame_size(lw->cur_norm);
++ format->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
++ format->fmt.pix.field = V4L2_FIELD_NONE;
++ return 0;
++}
++
++static int timblogiw_try_fmt(struct file *file, void *priv,
++ struct v4l2_format *format)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++ struct v4l2_pix_format *pix = &format->fmt.pix;
++
++ dev_dbg(&vdev->dev,
++ "%s - width=%d, height=%d, pixelformat=%d, field=%d\n"
++ "bytes per line %d, size image: %d, colorspace: %d\n",
++ __func__,
++ pix->width, pix->height, pix->pixelformat, pix->field,
++ pix->bytesperline, pix->sizeimage, pix->colorspace);
++
++ if (format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ return -EINVAL;
++
++ if (pix->field != V4L2_FIELD_NONE)
++ return -EINVAL;
++
++ if (pix->pixelformat != TIMBLOGIW_VIDEO_FORMAT)
++ return -EINVAL;
++
++ if ((lw->cur_norm->height != pix->height) ||
++ (lw->cur_norm->width != pix->width)) {
++ pix->width = lw->cur_norm->width;
++ pix->height = lw->cur_norm->height;
++ }
++
++ return 0;
++}
++
++static int timblogiw_querycap(struct file *file, void *priv,
++ struct v4l2_capability *cap)
++{
++ struct video_device *vdev = video_devdata(file);
++
++ dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
++ memset(cap, 0, sizeof(*cap));
++ strncpy(cap->card, "Timberdale Video", sizeof(cap->card)-1);
++ strncpy(cap->driver, "Timblogiw", sizeof(cap->card)-1);
++ cap->version = TIMBLOGIW_VERSION_CODE;
++ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
++ V4L2_CAP_STREAMING;
++
++ return 0;
++}
++
++static int timblogiw_enum_fmt(struct file *file, void *priv,
++ struct v4l2_fmtdesc *fmt)
++{
++ struct video_device *vdev = video_devdata(file);
++
++ dev_dbg(&vdev->dev, "%s, index: %d\n", __func__, fmt->index);
++
++ if (fmt->index != 0)
++ return -EINVAL;
++ memset(fmt, 0, sizeof(*fmt));
++ fmt->index = 0;
++ fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ strncpy(fmt->description, "4:2:2, packed, YUYV",
++ sizeof(fmt->description)-1);
++ fmt->pixelformat = TIMBLOGIW_VIDEO_FORMAT;
++ memset(fmt->reserved, 0, sizeof(fmt->reserved));
++
++ return 0;
++}
++
++static int timblogiw_reqbufs(struct file *file, void *priv,
++ struct v4l2_requestbuffers *rb)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++
++ dev_dbg(&vdev->dev, "%s: entry\n", __func__);
++
++ if (rb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
++ rb->memory != V4L2_MEMORY_MMAP)
++ return -EINVAL;
++
++ timblogiw_empty_framequeues(lw);
++
++ timblogiw_release_buffers(lw);
++ if (rb->count)
++ rb->count = timblogiw_request_buffers(lw, rb->count);
++
++ dev_dbg(&vdev->dev, "%s: io method is mmap. num bufs %i\n",
++ __func__, rb->count);
++
++ return 0;
++}
++
++static int timblogiw_querybuf(struct file *file, void *priv,
++ struct v4l2_buffer *b)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++
++ dev_dbg(&vdev->dev, "%s: entry\n", __func__);
++
++ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
++ b->index >= lw->num_frames)
++ return -EINVAL;
++
++ memcpy(b, &lw->frame[b->index].buf, sizeof(*b));
++
++ if (lw->frame[b->index].vma_use_count)
++ b->flags |= V4L2_BUF_FLAG_MAPPED;
++
++ if (lw->frame[b->index].state == F_DONE)
++ b->flags |= V4L2_BUF_FLAG_DONE;
++ else if (lw->frame[b->index].state != F_UNUSED)
++ b->flags |= V4L2_BUF_FLAG_QUEUED;
++
++ return 0;
++}
++
++static int timblogiw_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++
++ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
++ b->index >= lw->num_frames)
++ return -EINVAL;
++
++ if (lw->frame[b->index].state != F_UNUSED)
++ return -EAGAIN;
++
++ if (!lw->frame[b->index].bufmem)
++ return -EINVAL;
++
++ if (b->memory != V4L2_MEMORY_MMAP)
++ return -EINVAL;
++
++ lw->frame[b->index].state = F_QUEUED;
++
++ spin_lock_bh(&lw->queue_lock);
++ list_add_tail(&lw->frame[b->index].frame, &lw->inqueue);
++ spin_unlock_bh(&lw->queue_lock);
++
++ return 0;
++}
++
++static int timblogiw_dqbuf(struct file *file, void *priv,
++ struct v4l2_buffer *b)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++ struct timblogiw_frame *f;
++ int ret = 0;
++
++ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ dev_dbg(&vdev->dev, "%s: illegal buf type!\n", __func__);
++ return -EINVAL;
++ }
++
++ if (list_empty(&lw->outqueue)) {
++ if (file->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++
++ ret = wait_event_interruptible(lw->wait_frame,
++ !list_empty(&lw->outqueue));
++ if (ret)
++ return ret;
++ }
++
++ spin_lock_bh(&lw->queue_lock);
++ f = list_entry(lw->outqueue.next,
++ struct timblogiw_frame, frame);
++ list_del(lw->outqueue.next);
++ spin_unlock_bh(&lw->queue_lock);
++
++ f->state = F_UNUSED;
++ memcpy(b, &f->buf, sizeof(*b));
++
++ if (f->vma_use_count)
++ b->flags |= V4L2_BUF_FLAG_MAPPED;
++
++ return 0;
++}
++
++static int timblogiw_g_std(struct file *file, void *priv, v4l2_std_id *std)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++
++ dev_dbg(&vdev->dev, "%s: entry\n", __func__);
++
++ *std = lw->cur_norm->std;
++ return 0;
++}
++
++static int timblogiw_s_std(struct file *file, void *priv, v4l2_std_id *std)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++ int err = 0;
++
++ dev_dbg(&vdev->dev, "%s: entry\n", __func__);
++
++ if (TIMBLOGIW_HAS_DECODER(lw))
++ err = v4l2_subdev_call(lw->sd_enc, core, s_std, *std);
++
++ if (!err)
++ lw->cur_norm = timblogiw_get_norm(*std);
++
++ return err;
++}
++
++static int timblogiw_enuminput(struct file *file, void *priv,
++ struct v4l2_input *inp)
++{
++ struct video_device *vdev = video_devdata(file);
++
++ dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
++
++ if (inp->index != 0)
++ return -EINVAL;
++
++ memset(inp, 0, sizeof(*inp));
++ inp->index = 0;
++
++ strncpy(inp->name, "Timb input 1", sizeof(inp->name) - 1);
++ inp->type = V4L2_INPUT_TYPE_CAMERA;
++ inp->std = V4L2_STD_ALL;
++
++ return 0;
++}
++
++static int timblogiw_g_input(struct file *file, void *priv,
++ unsigned int *input)
++{
++ struct video_device *vdev = video_devdata(file);
++
++ dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
++
++ *input = 0;
++
++ return 0;
++}
++
++static int timblogiw_s_input(struct file *file, void *priv, unsigned int input)
++{
++ struct video_device *vdev = video_devdata(file);
++
++ dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
++
++ if (input != 0)
++ return -EINVAL;
++ return 0;
++}
++
++static int timblogiw_streamon(struct file *file, void *priv, unsigned int type)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++ struct timblogiw_frame *f;
++
++ dev_dbg(&vdev->dev, "%s: entry\n", __func__);
++
++ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ dev_dbg(&vdev->dev, "%s - No capture device\n", __func__);
++ return -EINVAL;
++ }
++
++ if (list_empty(&lw->inqueue)) {
++ dev_dbg(&vdev->dev, "%s - inqueue is empty\n", __func__);
++ return -EINVAL;
++ }
++
++ if (lw->stream == STREAM_ON)
++ return 0;
++
++ lw->stream = STREAM_ON;
++
++ f = list_entry(lw->inqueue.next,
++ struct timblogiw_frame, frame);
++
++ dev_dbg(&vdev->dev, "%s - f size: %d, bpr: %d, dma addr: %x\n",
++ __func__, timblogiw_frame_size(lw->cur_norm),
++ timblogiw_bytes_per_line(lw->cur_norm),
++ (int)sg_dma_address(lw->dma.transfer[lw->dma.curr].sg));
++
++ __timblogiw_start_dma(lw);
++
++ return 0;
++}
++
++static void timblogiw_stopstream(struct timblogiw *lw)
++{
++ if (lw->stream == STREAM_ON) {
++ /* The FPGA might be busy copying the current frame, we have
++ * to wait for the frame to finish
++ */
++ spin_lock_bh(&lw->queue_lock);
++ lw->stream = STREAM_OFF;
++ spin_unlock_bh(&lw->queue_lock);
++
++ dma_sync_wait(lw->chan,
++ (lw->dma.transfer + lw->dma.curr)->cookie);
++ }
++}
++
++static int timblogiw_streamoff(struct file *file, void *priv,
++ unsigned int type)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++
++ dev_dbg(&vdev->dev, "%s entry\n", __func__);
++
++ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ return -EINVAL;
++
++ timblogiw_stopstream(lw);
++
++ timblogiw_empty_framequeues(lw);
++
++ return 0;
++}
++
++static int timblogiw_querystd(struct file *file, void *priv, v4l2_std_id *std)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++
++ dev_dbg(&vdev->dev, "%s entry\n", __func__);
++
++ if (TIMBLOGIW_HAS_DECODER(lw))
++ return v4l2_subdev_call(lw->sd_enc, video, querystd, std);
++ else {
++ *std = lw->cur_norm->std;
++ return 0;
++ }
++}
++
++static int timblogiw_enum_framesizes(struct file *file, void *priv,
++ struct v4l2_frmsizeenum *fsize)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++
++ dev_dbg(&vdev->dev, "%s - index: %d, format: %d\n", __func__,
++ fsize->index, fsize->pixel_format);
++
++ if ((fsize->index != 0) ||
++ (fsize->pixel_format != TIMBLOGIW_VIDEO_FORMAT))
++ return -EINVAL;
++
++ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
++ fsize->discrete.width = lw->cur_norm->width;
++ fsize->discrete.height = lw->cur_norm->height;
++
++ return 0;
++}
++
++
++/*******************************
++ * Device Operations functions *
++ *******************************/
++
++static int timblogiw_open(struct file *file)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++ v4l2_std_id std = V4L2_STD_UNKNOWN;
++ int err = 0;
++
++ dev_dbg(&vdev->dev, "%s: entry\n", __func__);
++
++ spin_lock_init(&lw->queue_lock);
++ init_waitqueue_head(&lw->wait_frame);
++
++ mutex_lock(&lw->lock);
++
++ if (TIMBLOGIW_HAS_DECODER(lw) && !lw->sd_enc) {
++ struct i2c_adapter *adapt;
++
++ /* find the video decoder */
++ adapt = i2c_get_adapter(lw->pdata.i2c_adapter);
++ if (!adapt) {
++ dev_err(&vdev->dev, "No I2C bus #%d\n",
++ lw->pdata.i2c_adapter);
++ err = -ENODEV;
++ goto out;
++ }
++
++ /* now find the encoder */
++ lw->sd_enc = v4l2_i2c_new_subdev_board(&lw->v4l2_dev, adapt,
++ lw->pdata.encoder.module_name, lw->pdata.encoder.info,
++ NULL);
++
++ i2c_put_adapter(adapt);
++
++ if (!lw->sd_enc) {
++ dev_err(&vdev->dev, "Failed to get encoder: %s\n",
++ lw->pdata.encoder.module_name);
++ err = -ENODEV;
++ goto out;
++ }
++ }
++
++ timblogiw_querystd(file, NULL, &std);
++ lw->cur_norm = timblogiw_get_norm(std);
++
++ lw->stream = STREAM_OFF;
++ lw->num_frames = TIMBLOGIW_NUM_FRAMES;
++
++ timblogiw_empty_framequeues(lw);
++
++out:
++ mutex_unlock(&lw->lock);
++
++ return err;
++}
++
++static int timblogiw_close(struct file *file)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++
++ dev_dbg(&vdev->dev, "%s: Entry\n", __func__);
++
++ mutex_lock(&lw->lock);
++
++ timblogiw_stopstream(lw);
++
++ timblogiw_release_buffers(lw);
++
++ mutex_unlock(&lw->lock);
++ return 0;
++}
++
++static ssize_t timblogiw_read(struct file *file, char __user *data,
++ size_t count, loff_t *ppos)
++{
++ struct video_device *vdev = video_devdata(file);
++
++ dev_dbg(&vdev->dev, "%s - read request\n", __func__);
++
++ return -EINVAL;
++}
++
++static void timblogiw_vm_open(struct vm_area_struct *vma)
++{
++ struct timblogiw_frame *f = vma->vm_private_data;
++ f->vma_use_count++;
++}
++
++static void timblogiw_vm_close(struct vm_area_struct *vma)
++{
++ struct timblogiw_frame *f = vma->vm_private_data;
++ f->vma_use_count--;
++}
++
++static struct vm_operations_struct timblogiw_vm_ops = {
++ .open = timblogiw_vm_open,
++ .close = timblogiw_vm_close,
++};
++
++static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct timblogiw *lw = video_get_drvdata(vdev);
++ unsigned long size = vma->vm_end - vma->vm_start, start = vma->vm_start;
++ void *pos;
++ u32 i;
++ int ret = -EINVAL;
++
++ dev_dbg(&vdev->dev, "%s: entry\n", __func__);
++
++ if (mutex_lock_interruptible(&lw->lock))
++ return -ERESTARTSYS;
++
++ if (!(vma->vm_flags & VM_WRITE) ||
++ size != PAGE_ALIGN(lw->frame[0].buf.length))
++ goto error_unlock;
++
++ for (i = 0; i < lw->num_frames; i++)
++ if ((lw->frame[i].buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
++ break;
++
++ if (i == lw->num_frames) {
++ dev_dbg(&vdev->dev,
++ "%s - user supplied mapping address is out of range\n",
++ __func__);
++ goto error_unlock;
++ }
++
++ vma->vm_flags |= VM_IO;
++ vma->vm_flags |= VM_RESERVED; /* Do not swap out this VMA */
++
++ pos = lw->frame[i].bufmem;
++ while (size > 0) { /* size is page-aligned */
++ if (vm_insert_page(vma, start, vmalloc_to_page(pos))) {
++ dev_dbg(&vdev->dev, "%s - vm_insert_page failed\n",
++ __func__);
++ ret = -EAGAIN;
++ goto error_unlock;
++ }
++ start += PAGE_SIZE;
++ pos += PAGE_SIZE;
++ size -= PAGE_SIZE;
++ }
++
++ vma->vm_ops = &timblogiw_vm_ops;
++ vma->vm_private_data = &lw->frame[i];
++ timblogiw_vm_open(vma);
++ ret = 0;
++
++error_unlock:
++ mutex_unlock(&lw->lock);
++ return ret;
++}
++
++static const __devinitdata struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
++ .vidioc_querycap = timblogiw_querycap,
++ .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
++ .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
++ .vidioc_try_fmt_vid_cap = timblogiw_try_fmt,
++ .vidioc_s_fmt_vid_cap = timblogiw_try_fmt,
++ .vidioc_reqbufs = timblogiw_reqbufs,
++ .vidioc_querybuf = timblogiw_querybuf,
++ .vidioc_qbuf = timblogiw_qbuf,
++ .vidioc_dqbuf = timblogiw_dqbuf,
++ .vidioc_g_std = timblogiw_g_std,
++ .vidioc_s_std = timblogiw_s_std,
++ .vidioc_enum_input = timblogiw_enuminput,
++ .vidioc_g_input = timblogiw_g_input,
++ .vidioc_s_input = timblogiw_s_input,
++ .vidioc_streamon = timblogiw_streamon,
++ .vidioc_streamoff = timblogiw_streamoff,
++ .vidioc_querystd = timblogiw_querystd,
++ .vidioc_enum_framesizes = timblogiw_enum_framesizes,
++};
++
++static const __devinitdata struct v4l2_file_operations timblogiw_fops = {
++ .owner = THIS_MODULE,
++ .open = timblogiw_open,
++ .release = timblogiw_close,
++ .ioctl = video_ioctl2, /* V4L2 ioctl handler */
++ .mmap = timblogiw_mmap,
++ .read = timblogiw_read,
++};
++
++static const __devinitdata struct video_device timblogiw_template = {
++ .name = TIMBLOGIWIN_NAME,
++ .fops = &timblogiw_fops,
++ .ioctl_ops = &timblogiw_ioctl_ops,
++ .release = video_device_release_empty,
++ .minor = -1,
++ .tvnorms = V4L2_STD_PAL | V4L2_STD_NTSC
++};
++
++static int __devinit timblogiw_probe(struct platform_device *pdev)
++{
++ int err;
++ struct timblogiw *lw = NULL;
++ struct timb_video_platform_data *pdata = pdev->dev.platform_data;
++
++ if (!pdata) {
++ dev_err(&pdev->dev, "No platform data\n");
++ err = -EINVAL;
++ goto err;
++ }
++
++ if (!pdata->encoder.module_name)
++ dev_info(&pdev->dev, "Running without decoder\n");
++
++ lw = kzalloc(sizeof(*lw), GFP_KERNEL);
++ if (!lw) {
++ err = -ENOMEM;
++ goto err;
++ }
++
++ lw->cur_norm = timblogiw_tvnorms;
++
++ if (pdev->dev.parent)
++ lw->dev = pdev->dev.parent;
++ else
++ lw->dev = &pdev->dev;
++
++ memcpy(&lw->pdata, pdata, sizeof(lw->pdata));
++
++ mutex_init(&lw->lock);
++
++ lw->video_dev = timblogiw_template;
++
++ strlcpy(lw->v4l2_dev.name, DRIVER_NAME, sizeof(lw->v4l2_dev.name));
++ err = v4l2_device_register(NULL, &lw->v4l2_dev);
++ if (err)
++ goto err_register;
++
++ lw->video_dev.v4l2_dev = &lw->v4l2_dev;
++
++ err = video_register_device(&lw->video_dev, VFL_TYPE_GRABBER, 0);
++ if (err) {
++ dev_err(&pdev->dev, "Error reg video: %d\n", err);
++ goto err_request;
++ }
++
++ tasklet_init(&lw->tasklet, timblogiw_handleframe, (unsigned long)lw);
++
++ platform_set_drvdata(pdev, lw);
++ video_set_drvdata(&lw->video_dev, lw);
++
++ return 0;
++
++err_request:
++ v4l2_device_unregister(&lw->v4l2_dev);
++err_register:
++ kfree(lw);
++err:
++ dev_err(&pdev->dev, "Failed to register: %d\n", err);
++
++ return err;
++}
++
++static int timblogiw_remove(struct platform_device *pdev)
++{
++ struct timblogiw *lw = platform_get_drvdata(pdev);
++
++ video_unregister_device(&lw->video_dev);
++
++ v4l2_device_unregister(&lw->v4l2_dev);
++
++ tasklet_kill(&lw->tasklet);
++ kfree(lw);
++
++ platform_set_drvdata(pdev, NULL);
++
++ return 0;
++}
++
++static struct platform_driver timblogiw_platform_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = timblogiw_probe,
++ .remove = timblogiw_remove,
++};
++
++/*--------------------------------------------------------------------------*/
++
++static int __init timblogiw_init(void)
++{
++ return platform_driver_register(&timblogiw_platform_driver);
++}
++
++static void __exit timblogiw_exit(void)
++{
++ platform_driver_unregister(&timblogiw_platform_driver);
++}
++
++module_init(timblogiw_init);
++module_exit(timblogiw_exit);
++
++MODULE_DESCRIPTION("Timberdale Video In driver");
++MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("platform:"DRIVER_NAME);
++
+diff --git a/drivers/media/video/timblogiw.h b/drivers/media/video/timblogiw.h
+new file mode 100644
+index 0000000..56931e3
+--- /dev/null
++++ b/drivers/media/video/timblogiw.h
+@@ -0,0 +1,94 @@
++/*
++ * timblogiw.h timberdale FPGA LogiWin Video In driver defines
++ * Copyright (c) 2009-2010 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA LogiWin Video In
++ */
++
++#ifndef _TIMBLOGIW_H
++#define _TIMBLOGIW_H
++
++#include <linux/interrupt.h>
++#include <media/timb_video.h>
++#include <linux/dmaengine.h>
++#include <linux/scatterlist.h>
++
++#define TIMBLOGIWIN_NAME "Timberdale Video-In"
++
++#define TIMBLOGIW_NUM_FRAMES 10
++
++#define TIMBLOGIW_LINES_PER_DESC 45
++
++enum timblogiw_stream_state {
++ STREAM_OFF,
++ STREAM_ON,
++};
++
++enum timblogiw_frame_state {
++ F_UNUSED = 0,
++ F_QUEUED,
++ F_DONE,
++};
++
++struct timblogiw_frame {
++ void *bufmem;
++ struct v4l2_buffer buf;
++ enum timblogiw_frame_state state;
++ struct list_head frame;
++ unsigned long vma_use_count;
++};
++
++struct timblogiw_tvnorm {
++ v4l2_std_id std;
++ u16 width;
++ u16 height;
++};
++
++struct timbdma_transfer {
++ void *buf;
++ struct scatterlist sg[16];
++ dma_cookie_t cookie;
++};
++
++struct timblogiw_dma_control {
++ struct timbdma_transfer transfer[2];
++ struct timbdma_transfer *filled;
++ int curr;
++};
++
++struct timblogiw {
++ struct timblogiw_frame frame[TIMBLOGIW_NUM_FRAMES];
++ int num_frames;
++ unsigned int frame_count;
++ struct list_head inqueue, outqueue;
++ spinlock_t queue_lock; /* mutual exclusion */
++ enum timblogiw_stream_state stream;
++ struct video_device video_dev;
++ struct v4l2_device v4l2_dev;
++ struct mutex lock; /* mutual exclusion */
++ wait_queue_head_t wait_frame;
++ struct timblogiw_tvnorm const *cur_norm;
++ struct device *dev;
++ struct timblogiw_dma_control dma;
++ struct tasklet_struct tasklet;
++ struct timb_video_platform_data pdata;
++ struct v4l2_subdev *sd_enc; /* encoder */
++ struct dma_chan *chan;
++};
++
++#endif /* _TIMBLOGIW_H */
+diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
+index 8782978..b0e7fbb 100644
+--- a/drivers/mfd/Kconfig
++++ b/drivers/mfd/Kconfig
+@@ -348,6 +348,17 @@ config AB4500_CORE
+ read/write functions for the devices to get access to this chip.
+ This chip embeds various other multimedia funtionalities as well.
+
++config MFD_TIMBERDALE
++ tristate "Support for the Timberdale FPGA"
++ select MFD_CORE
++ depends on PCI
++ ---help---
++ This is the core driver for the timberdale FPGA. This device is a
++ multifunctioanl device which may provide numerous interfaces.
++
++ The timberdale FPGA can be found on the Intel Atom development board
++ for automotive in-vehicle infontainment board called Russellville.
++
+ endmenu
+
+ menu "Multimedia Capabilities Port drivers"
+diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
+index e09eb48..53375ac 100644
+--- a/drivers/mfd/Makefile
++++ b/drivers/mfd/Makefile
+@@ -55,4 +55,6 @@ obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
+ obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
+ obj-$(CONFIG_AB4500_CORE) += ab4500-core.o
+ obj-$(CONFIG_MFD_88PM8607) += 88pm8607.o
+-obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
+\ No newline at end of file
++obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
++
++obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
+diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
+new file mode 100644
+index 0000000..06d2523
+--- /dev/null
++++ b/drivers/mfd/timberdale.c
+@@ -0,0 +1,1008 @@
++/*
++ * timberdale.c timberdale FPGA mfd shim driver
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/msi.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/mfd/core.h>
++#include <linux/irq.h>
++
++#include <linux/timb_gpio.h>
++
++#include <linux/i2c.h>
++#include <linux/i2c-ocores.h>
++#include <linux/i2c-xiic.h>
++#include <linux/i2c/tsc2007.h>
++#include <linux/can/platform/ascb.h>
++
++#include <linux/spi/spi.h>
++#include <linux/spi/xilinx_spi.h>
++#include <linux/spi/max7301.h>
++#include <linux/spi/mc33880.h>
++
++#include <media/timb_video.h>
++#include <media/timb_radio.h>
++#include <linux/most/timbmlb.h>
++
++#include <linux/timb_dma.h>
++
++#include <sound/timbi2s.h>
++
++#include <linux/ks8842.h>
++
++#include "timberdale.h"
++
++#define DRIVER_NAME "timberdale"
++
++struct timberdale_device {
++ resource_size_t ctl_mapbase;
++ unsigned char __iomem *ctl_membase;
++ struct {
++ u32 major;
++ u32 minor;
++ u32 config;
++ } fw;
++};
++
++/*--------------------------------------------------------------------------*/
++
++static struct tsc2007_platform_data timberdale_tsc2007_platform_data = {
++ .model = 2003,
++ .x_plate_ohms = 100
++};
++
++static struct ascb_platform_data timberdale_ascb_platform_data = {
++ .gpio_pin = GPIO_PIN_ASCB
++};
++
++static struct i2c_board_info timberdale_i2c_board_info[] = {
++ {
++ I2C_BOARD_INFO("tsc2007", 0x48),
++ .platform_data = &timberdale_tsc2007_platform_data,
++ .irq = IRQ_TIMBERDALE_TSC_INT
++ },
++ {
++ I2C_BOARD_INFO("ascb-can", 0x18),
++ .platform_data = &timberdale_ascb_platform_data,
++ }
++};
++
++static __devinitdata struct xiic_i2c_platform_data
++timberdale_xiic_platform_data = {
++ .devices = timberdale_i2c_board_info,
++ .num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
++};
++
++static __devinitdata struct ocores_i2c_platform_data
++timberdale_ocores_platform_data = {
++ .regstep = 4,
++ .clock_khz = 62500,
++ .devices = timberdale_i2c_board_info,
++ .num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
++};
++
++const static __devinitconst struct resource timberdale_xiic_resources[] = {
++ {
++ .start = XIICOFFSET,
++ .end = XIICEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_I2C,
++ .end = IRQ_TIMBERDALE_I2C,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++const static __devinitconst struct resource timberdale_ocores_resources[] = {
++ {
++ .start = OCORESOFFSET,
++ .end = OCORESEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_I2C,
++ .end = IRQ_TIMBERDALE_I2C,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++const struct max7301_platform_data timberdale_max7301_platform_data = {
++ .base = 200
++};
++
++const struct mc33880_platform_data timberdale_mc33880_platform_data = {
++ .base = 100
++};
++
++static struct spi_board_info timberdale_spi_16bit_board_info[] = {
++ {
++ .modalias = "max7301",
++ .max_speed_hz = 26000,
++ .chip_select = 2,
++ .mode = SPI_MODE_0,
++ .platform_data = &timberdale_max7301_platform_data
++ },
++};
++
++static struct spi_board_info timberdale_spi_8bit_board_info[] = {
++ {
++ .modalias = "mc33880",
++ .max_speed_hz = 4000,
++ .chip_select = 1,
++ .mode = SPI_MODE_1,
++ .platform_data = &timberdale_mc33880_platform_data
++ },
++};
++
++static __devinitdata struct xspi_platform_data timberdale_xspi_platform_data = {
++ /* Current(2009-03-06) revision of
++ * Timberdale we can handle 3 chip selects
++ */
++ .num_chipselect = 3,
++ .little_endian = true,
++ /* bits per word and devices will be filled in runtime depending
++ * on the HW config
++ */
++};
++
++const static __devinitconst struct resource timberdale_spi_resources[] = {
++ {
++ .start = SPIOFFSET,
++ .end = SPIEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_SPI,
++ .end = IRQ_TIMBERDALE_SPI,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static __devinitdata struct ks8842_platform_data
++ timberdale_ks8842_platform_data = {
++ .rx_dma_channel = DMA_ETH_RX,
++ .tx_dma_channel = DMA_ETH_TX
++};
++
++const static __devinitconst struct resource timberdale_eth_resources[] = {
++ {
++ .start = ETHOFFSET,
++ .end = ETHEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_ETHSW_IF,
++ .end = IRQ_TIMBERDALE_ETHSW_IF,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static __devinitdata struct timbgpio_platform_data
++ timberdale_gpio_platform_data = {
++ .gpio_base = 0,
++ .nr_pins = GPIO_NR_PINS,
++ .irq_base = 200,
++};
++
++const static __devinitconst struct resource timberdale_gpio_resources[] = {
++ {
++ .start = GPIOOFFSET,
++ .end = GPIOEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_GPIO,
++ .end = IRQ_TIMBERDALE_GPIO,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static __devinitdata struct timbmlb_platform_data
++ timberdale_mlb_platform_data = {
++ .reset_pin = GPIO_PIN_INIC_RST,
++ .rx_dma_channel = DMA_MLB_RX,
++ .tx_dma_channel = DMA_MLB_TX
++};
++
++const static __devinitconst struct resource timberdale_most_resources[] = {
++ {
++ .start = MOSTOFFSET,
++ .end = MOSTEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_MLB,
++ .end = IRQ_TIMBERDALE_MLB,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++const static __devinitconst struct resource timberdale_mlogicore_resources[] = {
++ {
++ .start = MLCOREOFFSET,
++ .end = MLCOREEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_MLCORE,
++ .end = IRQ_TIMBERDALE_MLCORE,
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = IRQ_TIMBERDALE_MLCORE_BUF,
++ .end = IRQ_TIMBERDALE_MLCORE_BUF,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++const static __devinitconst struct resource timberdale_uart_resources[] = {
++ {
++ .start = UARTOFFSET,
++ .end = UARTEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_UART,
++ .end = IRQ_TIMBERDALE_UART,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++const static __devinitconst struct resource timberdale_uartlite_resources[] = {
++ {
++ .start = UARTLITEOFFSET,
++ .end = UARTLITEEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_UARTLITE,
++ .end = IRQ_TIMBERDALE_UARTLITE,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static __devinitdata struct timbi2s_bus_data timbi2s_bus_data[] = {
++ {
++ .rx = 0,
++ .sample_rate = 8000,
++ },
++ {
++ .rx = 1,
++ .sample_rate = 8000,
++ },
++ {
++ .rx = 1,
++ .sample_rate = 44100,
++ },
++};
++
++static __devinitdata struct timbi2s_platform_data timbi2s_platform_data = {
++ .busses = timbi2s_bus_data,
++ .num_busses = ARRAY_SIZE(timbi2s_bus_data),
++ .main_clk = 62500000,
++};
++
++const static __devinitconst struct resource timberdale_i2s_resources[] = {
++ {
++ .start = I2SOFFSET,
++ .end = I2SEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_I2S,
++ .end = IRQ_TIMBERDALE_I2S,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static __devinitdata struct i2c_board_info timberdale_adv7180_i2c_board_info = {
++ /* Requires jumper JP9 to be off */
++ I2C_BOARD_INFO("adv7180", 0x42 >> 1),
++ .irq = IRQ_TIMBERDALE_ADV7180
++};
++
++static __devinitdata struct timb_video_platform_data
++ timberdale_video_platform_data = {
++ .dma_channel = DMA_VIDEO_RX,
++ .i2c_adapter = 0,
++ .encoder = {
++ .module_name = "adv7180",
++ .info = &timberdale_adv7180_i2c_board_info
++ }
++};
++
++const static __devinitconst struct resource timberdale_radio_resources[] = {
++ {
++ .start = RDSOFFSET,
++ .end = RDSEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_RDS,
++ .end = IRQ_TIMBERDALE_RDS,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static __devinitdata struct i2c_board_info timberdale_tef6868_i2c_board_info = {
++ I2C_BOARD_INFO("tef6862", 0x60)
++};
++
++static __devinitdata struct i2c_board_info timberdale_saa7706_i2c_board_info = {
++ I2C_BOARD_INFO("saa7706h", 0x1C)
++};
++
++static __devinitdata struct timb_radio_platform_data
++ timberdale_radio_platform_data = {
++ .i2c_adapter = 0,
++ .tuner = {
++ .module_name = "tef6862",
++ .info = &timberdale_tef6868_i2c_board_info
++ },
++ .dsp = {
++ .module_name = "saa7706h",
++ .info = &timberdale_saa7706_i2c_board_info
++ }
++};
++
++const static __devinitconst struct resource timberdale_video_resources[] = {
++ {
++ .start = LOGIWOFFSET,
++ .end = LOGIWEND,
++ .flags = IORESOURCE_MEM,
++ },
++ /*
++ note that the "frame buffer" is located in DMA area
++ starting at 0x1200000
++ */
++};
++
++static __devinitdata struct timb_dma_platform_data timb_dma_platform_data = {
++ .nr_channels = 10,
++ .channels = {
++ {
++ /* UART RX */
++ .rx = true,
++ .descriptors = 2,
++ .descriptor_elements = 1
++ },
++ {
++ /* UART TX */
++ .rx = false,
++ .descriptors = 2,
++ .descriptor_elements = 1
++ },
++ {
++ /* MLB RX */
++ .rx = true,
++ .descriptors = 2,
++ .descriptor_elements = 1
++ },
++ {
++ /* MLB TX */
++ .rx = false,
++ .descriptors = 2,
++ .descriptor_elements = 1
++ },
++ {
++ /* Video RX */
++ .rx = true,
++ .bytes_per_line = 1440,
++ .descriptors = 2,
++ .descriptor_elements = 16
++ },
++ {
++ /* Video framedrop */
++ },
++ {
++ /* SDHCI RX */
++ .rx = true,
++ },
++ {
++ /* SDHCI TX */
++ },
++ {
++ /* ETH RX */
++ .rx = true,
++ .descriptors = 2,
++ .descriptor_elements = 1
++ },
++ {
++ /* ETH TX */
++ .rx = false,
++ .descriptors = 2,
++ .descriptor_elements = 1
++ },
++ }
++};
++
++const static __devinitconst struct resource timberdale_dma_resources[] = {
++ {
++ .start = DMAOFFSET,
++ .end = DMAEND,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_DMA,
++ .end = IRQ_TIMBERDALE_DMA,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
++ {
++ .name = "timb-dma",
++ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
++ .resources = timberdale_dma_resources,
++ .platform_data = &timb_dma_platform_data,
++ .data_size = sizeof(timb_dma_platform_data),
++ },
++ {
++ .name = "timb-uart",
++ .num_resources = ARRAY_SIZE(timberdale_uart_resources),
++ .resources = timberdale_uart_resources,
++ },
++ {
++ .name = "xiic-i2c",
++ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
++ .resources = timberdale_xiic_resources,
++ .platform_data = &timberdale_xiic_platform_data,
++ .data_size = sizeof(timberdale_xiic_platform_data),
++ },
++ {
++ .name = "timb-gpio",
++ .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
++ .resources = timberdale_gpio_resources,
++ .platform_data = &timberdale_gpio_platform_data,
++ .data_size = sizeof(timberdale_gpio_platform_data),
++ },
++ {
++ .name = "timb-i2s",
++ .num_resources = ARRAY_SIZE(timberdale_i2s_resources),
++ .resources = timberdale_i2s_resources,
++ .platform_data = &timbi2s_platform_data,
++ .data_size = sizeof(timbi2s_platform_data),
++ },
++ {
++ .name = "timb-most",
++ .num_resources = ARRAY_SIZE(timberdale_most_resources),
++ .resources = timberdale_most_resources,
++ .platform_data = &timberdale_mlb_platform_data,
++ .data_size = sizeof(timberdale_mlb_platform_data),
++ },
++ {
++ .name = "timb-video",
++ .num_resources = ARRAY_SIZE(timberdale_video_resources),
++ .resources = timberdale_video_resources,
++ .platform_data = &timberdale_video_platform_data,
++ .data_size = sizeof(timberdale_video_platform_data),
++ },
++ {
++ .name = "timb-radio",
++ .num_resources = ARRAY_SIZE(timberdale_radio_resources),
++ .resources = timberdale_radio_resources,
++ .platform_data = &timberdale_radio_platform_data,
++ .data_size = sizeof(timberdale_radio_platform_data),
++ },
++ {
++ .name = "xilinx_spi",
++ .num_resources = ARRAY_SIZE(timberdale_spi_resources),
++ .resources = timberdale_spi_resources,
++ .platform_data = &timberdale_xspi_platform_data,
++ .data_size = sizeof(timberdale_xspi_platform_data),
++ },
++ {
++ .name = "ks8842",
++ .num_resources = ARRAY_SIZE(timberdale_eth_resources),
++ .resources = timberdale_eth_resources,
++ .platform_data = &timberdale_ks8842_platform_data,
++ .data_size = sizeof(timberdale_ks8842_platform_data)
++ },
++};
++
++static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
++ {
++ .name = "timb-dma",
++ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
++ .resources = timberdale_dma_resources,
++ .platform_data = &timb_dma_platform_data,
++ .data_size = sizeof(timb_dma_platform_data),
++ },
++ {
++ .name = "timb-uart",
++ .num_resources = ARRAY_SIZE(timberdale_uart_resources),
++ .resources = timberdale_uart_resources,
++ },
++ {
++ .name = "uartlite",
++ .num_resources = ARRAY_SIZE(timberdale_uartlite_resources),
++ .resources = timberdale_uartlite_resources,
++ },
++ {
++ .name = "xiic-i2c",
++ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
++ .resources = timberdale_xiic_resources,
++ .platform_data = &timberdale_xiic_platform_data,
++ .data_size = sizeof(timberdale_xiic_platform_data),
++ },
++ {
++ .name = "timb-gpio",
++ .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
++ .resources = timberdale_gpio_resources,
++ .platform_data = &timberdale_gpio_platform_data,
++ .data_size = sizeof(timberdale_gpio_platform_data),
++ },
++ {
++ .name = "timb-mlogicore",
++ .num_resources = ARRAY_SIZE(timberdale_mlogicore_resources),
++ .resources = timberdale_mlogicore_resources,
++ },
++ {
++ .name = "timb-video",
++ .num_resources = ARRAY_SIZE(timberdale_video_resources),
++ .resources = timberdale_video_resources,
++ .platform_data = &timberdale_video_platform_data,
++ .data_size = sizeof(timberdale_video_platform_data),
++ },
++ {
++ .name = "timb-radio",
++ .num_resources = ARRAY_SIZE(timberdale_radio_resources),
++ .resources = timberdale_radio_resources,
++ .platform_data = &timberdale_radio_platform_data,
++ .data_size = sizeof(timberdale_radio_platform_data),
++ },
++ {
++ .name = "xilinx_spi",
++ .num_resources = ARRAY_SIZE(timberdale_spi_resources),
++ .resources = timberdale_spi_resources,
++ .platform_data = &timberdale_xspi_platform_data,
++ .data_size = sizeof(timberdale_xspi_platform_data),
++ },
++ {
++ .name = "ks8842",
++ .num_resources = ARRAY_SIZE(timberdale_eth_resources),
++ .resources = timberdale_eth_resources,
++ .platform_data = &timberdale_ks8842_platform_data,
++ .data_size = sizeof(timberdale_ks8842_platform_data)
++ },
++};
++
++static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
++ {
++ .name = "timb-dma",
++ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
++ .resources = timberdale_dma_resources,
++ .platform_data = &timb_dma_platform_data,
++ .data_size = sizeof(timb_dma_platform_data),
++ },
++ {
++ .name = "timb-uart",
++ .num_resources = ARRAY_SIZE(timberdale_uart_resources),
++ .resources = timberdale_uart_resources,
++ },
++ {
++ .name = "xiic-i2c",
++ .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
++ .resources = timberdale_xiic_resources,
++ .platform_data = &timberdale_xiic_platform_data,
++ .data_size = sizeof(timberdale_xiic_platform_data),
++ },
++ {
++ .name = "timb-gpio",
++ .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
++ .resources = timberdale_gpio_resources,
++ .platform_data = &timberdale_gpio_platform_data,
++ .data_size = sizeof(timberdale_gpio_platform_data),
++ },
++ {
++ .name = "timb-video",
++ .num_resources = ARRAY_SIZE(timberdale_video_resources),
++ .resources = timberdale_video_resources,
++ .platform_data = &timberdale_video_platform_data,
++ .data_size = sizeof(timberdale_video_platform_data),
++ },
++ {
++ .name = "timb-radio",
++ .num_resources = ARRAY_SIZE(timberdale_radio_resources),
++ .resources = timberdale_radio_resources,
++ .platform_data = &timberdale_radio_platform_data,
++ .data_size = sizeof(timberdale_radio_platform_data),
++ },
++ {
++ .name = "xilinx_spi",
++ .num_resources = ARRAY_SIZE(timberdale_spi_resources),
++ .resources = timberdale_spi_resources,
++ .platform_data = &timberdale_xspi_platform_data,
++ .data_size = sizeof(timberdale_xspi_platform_data),
++ },
++};
++
++static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
++ {
++ .name = "timb-dma",
++ .num_resources = ARRAY_SIZE(timberdale_dma_resources),
++ .resources = timberdale_dma_resources,
++ .platform_data = &timb_dma_platform_data,
++ .data_size = sizeof(timb_dma_platform_data),
++ },
++ {
++ .name = "timb-uart",
++ .num_resources = ARRAY_SIZE(timberdale_uart_resources),
++ .resources = timberdale_uart_resources,
++ },
++ {
++ .name = "ocores-i2c",
++ .num_resources = ARRAY_SIZE(timberdale_ocores_resources),
++ .resources = timberdale_ocores_resources,
++ .platform_data = &timberdale_ocores_platform_data,
++ .data_size = sizeof(timberdale_ocores_platform_data),
++ },
++ {
++ .name = "timb-gpio",
++ .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
++ .resources = timberdale_gpio_resources,
++ .platform_data = &timberdale_gpio_platform_data,
++ .data_size = sizeof(timberdale_gpio_platform_data),
++ },
++ {
++ .name = "timb-i2s",
++ .num_resources = ARRAY_SIZE(timberdale_i2s_resources),
++ .resources = timberdale_i2s_resources,
++ .platform_data = &timbi2s_platform_data,
++ .data_size = sizeof(timbi2s_platform_data),
++ },
++ {
++ .name = "timb-most",
++ .num_resources = ARRAY_SIZE(timberdale_most_resources),
++ .resources = timberdale_most_resources,
++ .platform_data = &timberdale_mlb_platform_data,
++ .data_size = sizeof(timberdale_mlb_platform_data),
++ },
++ {
++ .name = "timb-video",
++ .num_resources = ARRAY_SIZE(timberdale_video_resources),
++ .resources = timberdale_video_resources,
++ .platform_data = &timberdale_video_platform_data,
++ .data_size = sizeof(timberdale_video_platform_data),
++ },
++ {
++ .name = "timb-radio",
++ .num_resources = ARRAY_SIZE(timberdale_radio_resources),
++ .resources = timberdale_radio_resources,
++ .platform_data = &timberdale_radio_platform_data,
++ .data_size = sizeof(timberdale_radio_platform_data),
++ },
++ {
++ .name = "xilinx_spi",
++ .num_resources = ARRAY_SIZE(timberdale_spi_resources),
++ .resources = timberdale_spi_resources,
++ .platform_data = &timberdale_xspi_platform_data,
++ .data_size = sizeof(timberdale_xspi_platform_data),
++ },
++ {
++ .name = "ks8842",
++ .num_resources = ARRAY_SIZE(timberdale_eth_resources),
++ .resources = timberdale_eth_resources,
++ .platform_data = &timberdale_ks8842_platform_data,
++ .data_size = sizeof(timberdale_ks8842_platform_data)
++ },
++};
++
++static const __devinitconst struct resource timberdale_sdhc_resources[] = {
++ /* located in bar 1 and bar 2 */
++ {
++ .start = SDHC0OFFSET,
++ .end = SDHC0END,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = IRQ_TIMBERDALE_SDHC,
++ .end = IRQ_TIMBERDALE_SDHC,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static __devinitdata struct mfd_cell timberdale_cells_bar1[] = {
++ {
++ .name = "sdhci",
++ .num_resources = ARRAY_SIZE(timberdale_sdhc_resources),
++ .resources = timberdale_sdhc_resources,
++ },
++};
++
++static __devinitdata struct mfd_cell timberdale_cells_bar2[] = {
++ {
++ .name = "sdhci",
++ .num_resources = ARRAY_SIZE(timberdale_sdhc_resources),
++ .resources = timberdale_sdhc_resources,
++ },
++};
++
++static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct pci_dev *pdev = to_pci_dev(dev);
++ struct timberdale_device *priv = pci_get_drvdata(pdev);
++
++ return sprintf(buf, "%d.%d.%d\n", priv->fw.major, priv->fw.minor,
++ priv->fw.config);
++}
++
++static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
++
++/*--------------------------------------------------------------------------*/
++
++static int __devinit timb_probe(struct pci_dev *dev,
++ const struct pci_device_id *id)
++{
++ struct timberdale_device *priv;
++ int err, i;
++ resource_size_t mapbase;
++ struct msix_entry *msix_entries = NULL;
++ u8 ip_setup;
++
++ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++ if (!priv)
++ return -ENOMEM;
++
++ pci_set_drvdata(dev, priv);
++
++ err = pci_enable_device(dev);
++ if (err)
++ goto err_enable;
++
++ mapbase = pci_resource_start(dev, 0);
++ if (!mapbase) {
++ printk(KERN_ERR DRIVER_NAME ": No resource\n");
++ goto err_start;
++ }
++
++ /* create a resource for the PCI master register */
++ priv->ctl_mapbase = mapbase + CHIPCTLOFFSET;
++ if (!request_mem_region(priv->ctl_mapbase, CHIPCTLSIZE, "timb-ctl")) {
++ printk(KERN_ERR DRIVER_NAME ": Failed to request ctl mem\n");
++ goto err_request;
++ }
++
++ priv->ctl_membase = ioremap(priv->ctl_mapbase, CHIPCTLSIZE);
++ if (!priv->ctl_membase) {
++ printk(KERN_ALERT DRIVER_NAME": Map error, ctl\n");
++ goto err_ioremap;
++ }
++
++ /* read the HW config */
++ priv->fw.major = ioread32(priv->ctl_membase + TIMB_REV_MAJOR);
++ priv->fw.minor = ioread32(priv->ctl_membase + TIMB_REV_MINOR);
++ priv->fw.config = ioread32(priv->ctl_membase + TIMB_HW_CONFIG);
++
++ if (priv->fw.major > TIMB_SUPPORTED_MAJOR) {
++ printk(KERN_ERR DRIVER_NAME": The driver supports an older "
++ "version of the FPGA, please update the driver to "
++ "support %d.%d\n", priv->fw.major, priv->fw.minor);
++ goto err_ioremap;
++ }
++ if (priv->fw.major < TIMB_SUPPORTED_MAJOR ||
++ priv->fw.minor < TIMB_REQUIRED_MINOR) {
++ printk(KERN_ERR DRIVER_NAME
++ ": The FPGA image is too old (%d.%d), "
++ "please upgrade the FPGA to at least: %d.%d\n",
++ priv->fw.major, priv->fw.minor,
++ TIMB_SUPPORTED_MAJOR, TIMB_REQUIRED_MINOR);
++ goto err_ioremap;
++ }
++
++ msix_entries = kzalloc(TIMBERDALE_NR_IRQS * sizeof(*msix_entries),
++ GFP_KERNEL);
++ if (!msix_entries)
++ goto err_ioremap;
++
++ for (i = 0; i < TIMBERDALE_NR_IRQS; i++)
++ msix_entries[i].entry = i;
++
++ err = pci_enable_msix(dev, msix_entries, TIMBERDALE_NR_IRQS);
++ if (err) {
++ printk(KERN_WARNING DRIVER_NAME
++ ": MSI-X init failed: %d, expected entries: %d\n",
++ err, TIMBERDALE_NR_IRQS);
++ goto err_msix;
++ }
++
++ err = device_create_file(&dev->dev, &dev_attr_fw_ver);
++ if (err)
++ goto err_create_file;
++
++ /* Reset all FPGA PLB peripherals */
++ iowrite32(0x1, priv->ctl_membase + TIMB_SW_RST);
++
++ /* update IRQ offsets in I2C board info */
++ for (i = 0; i < ARRAY_SIZE(timberdale_i2c_board_info); i++)
++ timberdale_i2c_board_info[i].irq =
++ msix_entries[timberdale_i2c_board_info[i].irq].vector;
++
++ /* Update the SPI configuration depending on the HW (8 or 16 bit) */
++ if (priv->fw.config & TIMB_HW_CONFIG_SPI_8BIT) {
++ timberdale_xspi_platform_data.bits_per_word = 8;
++ timberdale_xspi_platform_data.devices =
++ timberdale_spi_8bit_board_info;
++ timberdale_xspi_platform_data.num_devices =
++ ARRAY_SIZE(timberdale_spi_8bit_board_info);
++ } else {
++ timberdale_xspi_platform_data.bits_per_word = 16;
++ timberdale_xspi_platform_data.devices =
++ timberdale_spi_16bit_board_info;
++ timberdale_xspi_platform_data.num_devices =
++ ARRAY_SIZE(timberdale_spi_16bit_board_info);
++ }
++
++ ip_setup = priv->fw.config & TIMB_HW_VER_MASK;
++ if (ip_setup == TIMB_HW_VER0)
++ err = mfd_add_devices(&dev->dev, -1,
++ timberdale_cells_bar0_cfg0,
++ ARRAY_SIZE(timberdale_cells_bar0_cfg0),
++ &dev->resource[0], msix_entries[0].vector);
++ else if (ip_setup == TIMB_HW_VER1)
++ err = mfd_add_devices(&dev->dev, -1,
++ timberdale_cells_bar0_cfg1,
++ ARRAY_SIZE(timberdale_cells_bar0_cfg1),
++ &dev->resource[0], msix_entries[0].vector);
++ else if (ip_setup == TIMB_HW_VER2)
++ err = mfd_add_devices(&dev->dev, -1,
++ timberdale_cells_bar0_cfg2,
++ ARRAY_SIZE(timberdale_cells_bar0_cfg2),
++ &dev->resource[0], msix_entries[0].vector);
++ else if (ip_setup == TIMB_HW_VER3)
++ err = mfd_add_devices(&dev->dev, -1,
++ timberdale_cells_bar0_cfg3,
++ ARRAY_SIZE(timberdale_cells_bar0_cfg3),
++ &dev->resource[0], msix_entries[0].vector);
++ else {
++ /* unknown version */
++ printk(KERN_ERR"Uknown IP setup: %d.%d.%d\n",
++ priv->fw.major, priv->fw.minor, ip_setup);
++ err = -ENODEV;
++ goto err_mfd;
++ }
++
++ if (err) {
++ printk(KERN_WARNING DRIVER_NAME
++ ": mfd_add_devices failed: %d\n", err);
++ goto err_mfd;
++ }
++
++ err = mfd_add_devices(&dev->dev, 0,
++ timberdale_cells_bar1, ARRAY_SIZE(timberdale_cells_bar1),
++ &dev->resource[1], msix_entries[0].vector);
++ if (err) {
++ printk(KERN_WARNING DRIVER_NAME
++ "mfd_add_devices failed: %d\n", err);
++ goto err_mfd2;
++ }
++
++ /* only version 0 and 3 have the iNand routed to SDHCI */
++ if (((priv->fw.config & TIMB_HW_VER_MASK) == TIMB_HW_VER0) ||
++ ((priv->fw.config & TIMB_HW_VER_MASK) == TIMB_HW_VER3)) {
++ err = mfd_add_devices(&dev->dev, 1, timberdale_cells_bar2,
++ ARRAY_SIZE(timberdale_cells_bar2),
++ &dev->resource[2], msix_entries[0].vector);
++ if (err) {
++ printk(KERN_WARNING DRIVER_NAME
++ ": mfd_add_devices failed: %d\n", err);
++ goto err_mfd2;
++ }
++ }
++
++ kfree(msix_entries);
++
++ printk(KERN_INFO
++ "Found Timberdale Card. Rev: %d.%d, HW config: 0x%02x\n",
++ priv->fw.major, priv->fw.minor, priv->fw.config);
++
++ return 0;
++
++err_mfd2:
++ mfd_remove_devices(&dev->dev);
++err_mfd:
++ device_remove_file(&dev->dev, &dev_attr_fw_ver);
++err_create_file:
++ pci_disable_msix(dev);
++err_msix:
++ iounmap(priv->ctl_membase);
++err_ioremap:
++ release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE);
++err_request:
++ pci_set_drvdata(dev, NULL);
++err_start:
++ pci_disable_device(dev);
++err_enable:
++ kfree(msix_entries);
++ kfree(priv);
++ pci_set_drvdata(dev, NULL);
++ return -ENODEV;
++}
++
++static void __devexit timb_remove(struct pci_dev *dev)
++{
++ struct timberdale_device *priv = pci_get_drvdata(dev);
++
++ mfd_remove_devices(&dev->dev);
++
++ device_remove_file(&dev->dev, &dev_attr_fw_ver);
++
++ iounmap(priv->ctl_membase);
++ release_mem_region(priv->ctl_mapbase, CHIPCTLSIZE);
++
++ pci_disable_msix(dev);
++ pci_disable_device(dev);
++ pci_set_drvdata(dev, NULL);
++ kfree(priv);
++}
++
++static struct pci_device_id timberdale_pci_tbl[] = {
++ { PCI_DEVICE(PCI_VENDOR_ID_TIMB, PCI_DEVICE_ID_TIMB) },
++ { 0 }
++};
++MODULE_DEVICE_TABLE(pci, timberdale_pci_tbl);
++
++static struct pci_driver timberdale_pci_driver = {
++ .name = DRIVER_NAME,
++ .id_table = timberdale_pci_tbl,
++ .probe = timb_probe,
++ .remove = __devexit_p(timb_remove),
++};
++
++static int __init timberdale_init(void)
++{
++ int err;
++
++ err = pci_register_driver(&timberdale_pci_driver);
++ if (err < 0) {
++ printk(KERN_ERR
++ "Failed to register PCI driver for %s device.\n",
++ timberdale_pci_driver.name);
++ return -ENODEV;
++ }
++
++ printk(KERN_INFO "Driver for %s has been successfully registered.\n",
++ timberdale_pci_driver.name);
++
++ return 0;
++}
++
++static void __exit timberdale_exit(void)
++{
++ pci_unregister_driver(&timberdale_pci_driver);
++
++ printk(KERN_INFO "Driver for %s has been successfully unregistered.\n",
++ timberdale_pci_driver.name);
++}
++
++module_init(timberdale_init);
++module_exit(timberdale_exit);
++
++MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
++MODULE_VERSION(DRV_VERSION);
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/mfd/timberdale.h b/drivers/mfd/timberdale.h
+new file mode 100644
+index 0000000..e18fcea
+--- /dev/null
++++ b/drivers/mfd/timberdale.h
+@@ -0,0 +1,164 @@
++/*
++ * timberdale.h timberdale FPGA mfd shim driver defines
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA
++ */
++
++#ifndef MFD_TIMBERDALE_H
++#define MFD_TIMBERDALE_H
++
++#define DRV_VERSION "1.0"
++
++/* This driver only support versions >= 3.8 and < 4.0 */
++#define TIMB_SUPPORTED_MAJOR 3
++
++/* This driver only support minor >= 8 */
++#define TIMB_REQUIRED_MINOR 8
++
++/* Registers of the interrupt controller */
++#define ISR 0x00
++#define IPR 0x04
++#define IER 0x08
++#define IAR 0x0c
++#define SIE 0x10
++#define CIE 0x14
++#define MER 0x1c
++
++/* Registers of the control area */
++#define TIMB_REV_MAJOR 0x00
++#define TIMB_REV_MINOR 0x04
++#define TIMB_HW_CONFIG 0x08
++#define TIMB_SW_RST 0x40
++
++/* bits in the TIMB_HW_CONFIG register */
++#define TIMB_HW_CONFIG_SPI_8BIT 0x80
++
++#define TIMB_HW_VER_MASK 0x0f
++#define TIMB_HW_VER0 0x00
++#define TIMB_HW_VER1 0x01
++#define TIMB_HW_VER2 0x02
++#define TIMB_HW_VER3 0x03
++
++#define OCORESOFFSET 0x0
++#define OCORESEND 0x1f
++
++#define SPIOFFSET 0x80
++#define SPIEND 0xff
++
++#define UARTLITEOFFSET 0x100
++#define UARTLITEEND 0x10f
++
++#define RDSOFFSET 0x180
++#define RDSEND 0x183
++
++#define ETHOFFSET 0x300
++#define ETHEND 0x3ff
++
++#define GPIOOFFSET 0x400
++#define GPIOEND 0x7ff
++
++#define CHIPCTLOFFSET 0x800
++#define CHIPCTLEND 0x8ff
++#define CHIPCTLSIZE (CHIPCTLEND - CHIPCTLOFFSET)
++
++#define INTCOFFSET 0xc00
++#define INTCEND 0xfff
++#define INTCSIZE (INTCEND - INTCOFFSET)
++
++#define MOSTOFFSET 0x1000
++#define MOSTEND 0x13ff
++
++#define UARTOFFSET 0x1400
++#define UARTEND 0x17ff
++
++#define XIICOFFSET 0x1800
++#define XIICEND 0x19ff
++
++#define I2SOFFSET 0x1C00
++#define I2SEND 0x1fff
++
++#define LOGIWOFFSET 0x30000
++#define LOGIWEND 0x37fff
++
++#define MLCOREOFFSET 0x40000
++#define MLCOREEND 0x43fff
++
++#define DMAOFFSET 0x01000000
++#define DMAEND 0x013fffff
++
++/* SDHC0 is placed in PCI bar 1 */
++#define SDHC0OFFSET 0x00
++#define SDHC0END 0xff
++
++/* SDHC1 is placed in PCI bar 2 */
++#define SDHC1OFFSET 0x00
++#define SDHC1END 0xff
++
++#define PCI_VENDOR_ID_TIMB 0x10ee
++#define PCI_DEVICE_ID_TIMB 0xa123
++
++#define IRQ_TIMBERDALE_INIC 0
++#define IRQ_TIMBERDALE_MLB 1
++#define IRQ_TIMBERDALE_GPIO 2
++#define IRQ_TIMBERDALE_I2C 3
++#define IRQ_TIMBERDALE_UART 4
++#define IRQ_TIMBERDALE_DMA 5
++#define IRQ_TIMBERDALE_I2S 6
++#define IRQ_TIMBERDALE_TSC_INT 7
++#define IRQ_TIMBERDALE_SDHC 8
++#define IRQ_TIMBERDALE_ADV7180 9
++#define IRQ_TIMBERDALE_ETHSW_IF 10
++#define IRQ_TIMBERDALE_SPI 11
++#define IRQ_TIMBERDALE_UARTLITE 12
++#define IRQ_TIMBERDALE_MLCORE 13
++#define IRQ_TIMBERDALE_MLCORE_BUF 14
++#define IRQ_TIMBERDALE_RDS 15
++
++#define TIMBERDALE_NR_IRQS 16
++
++/* Some of the interrupts are level triggered, some are edge triggered */
++#define IRQ_TIMBERDALE_EDGE_MASK ((1 << IRQ_TIMBERDALE_ADV7180) | \
++ (1 << IRQ_TIMBERDALE_TSC_INT) | \
++ (1 << IRQ_TIMBERDALE_MLB) | (1 << IRQ_TIMBERDALE_INIC))
++
++#define IRQ_TIMBERDALE_LEVEL_MASK ((1 << IRQ_TIMBERDALE_SPI) | \
++ (1 << IRQ_TIMBERDALE_ETHSW_IF) | (1 << IRQ_TIMBERDALE_SDHC) | \
++ (1 << IRQ_TIMBERDALE_I2S) | (1 << IRQ_TIMBERDALE_UART) | \
++ (1 << IRQ_TIMBERDALE_I2C) | (1 << IRQ_TIMBERDALE_GPIO) | \
++ (1 << IRQ_TIMBERDALE_DMA))
++
++#define GPIO_PIN_ASCB 8
++#define GPIO_PIN_INIC_RST 14
++#define GPIO_PIN_BT_RST 15
++#define GPIO_NR_PINS 16
++
++/* DMA Channels */
++#define DMA_UART_RX 0
++#define DMA_UART_TX 1
++#define DMA_MLB_RX 2
++#define DMA_MLB_TX 3
++#define DMA_VIDEO_RX 4
++#define DMA_VIDEO_DROP 5
++#define DMA_SDHCI_RX 6
++#define DMA_SDHCI_TX 7
++#define DMA_ETH_RX 8
++#define DMA_ETH_TX 9
++
++#endif
++
+diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
+index dd9a09c..06ec723 100644
+--- a/drivers/net/Kconfig
++++ b/drivers/net/Kconfig
+@@ -1730,6 +1730,16 @@ config KS8842
+ This platform driver is for Micrel KSZ8842 / KS8842
+ 2-port ethernet switch chip (managed, VLAN, QoS).
+
++config KS8842_TIMB_DMA
++ bool "Use Timberdale specific DMA engine"
++ depends on KS8842 && MFD_TIMBERDALE
++ select CONFIG_TIMB_DMA
++ help
++ This option enables usage of the timberdale specific DMA engine
++ for the KS8842 driver. Rather than using PIO which results in
++ single accesses over PCIe, the DMA block of the timberdale FPGA
++ will burst data to and from the KS8842.
++
+ config KS8851
+ tristate "Micrel KS8851 SPI"
+ depends on SPI
+diff --git a/drivers/net/Makefile b/drivers/net/Makefile
+index ad1346d..a99b3b8 100644
+--- a/drivers/net/Makefile
++++ b/drivers/net/Makefile
+@@ -20,6 +20,7 @@ obj-$(CONFIG_CHELSIO_T1) += chelsio/
+ obj-$(CONFIG_CHELSIO_T3) += cxgb3/
+ obj-$(CONFIG_EHEA) += ehea/
+ obj-$(CONFIG_CAN) += can/
++obj-$(CONFIG_MOST) += most/
+ obj-$(CONFIG_BONDING) += bonding/
+ obj-$(CONFIG_ATL1) += atlx/
+ obj-$(CONFIG_ATL2) += atlx/
+diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
+index 5c45cb5..bc93d65 100644
+--- a/drivers/net/ks8842.c
++++ b/drivers/net/ks8842.c
+@@ -1,5 +1,5 @@
+ /*
+- * ks8842_main.c timberdale KS8842 ethernet driver
++ * ks8842.c timberdale KS8842 ethernet driver
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+@@ -26,11 +26,22 @@
+ #include <linux/netdevice.h>
+ #include <linux/etherdevice.h>
+ #include <linux/ethtool.h>
++#ifdef CONFIG_KS8842_TIMB_DMA
++#include <linux/ks8842.h>
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <linux/scatterlist.h>
++#endif
+
+ #define DRV_NAME "ks8842"
+
+ /* Timberdale specific Registers */
+-#define REG_TIMB_RST 0x1c
++#define REG_TIMB_RST 0x1c
++#define REG_TIMB_FIFO 0x20
++#define REG_TIMB_ISR 0x24
++#define REG_TIMB_IER 0x28
++#define REG_TIMB_IAR 0x2C
++#define REQ_TIMB_DMA_RESUME 0x30
+
+ /* KS8842 registers */
+
+@@ -73,6 +84,11 @@
+ #define IRQ_RX_ERROR 0x0080
+ #define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \
+ IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
++#ifdef CONFIG_KS8842_TIMB_DMA
++ #define ENABLED_IRQS_IP (IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \
++ IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
++ #define ENABLED_IRQS_DMA (ENABLED_IRQS_IP | IRQ_RX)
++#endif
+ #define REG_ISR 0x02
+ #define REG_RXSR 0x04
+ #define RXSR_VALID 0x8000
+@@ -111,14 +127,62 @@
+ #define REG_P1CR4 0x02
+ #define REG_P1SR 0x04
+
++#ifdef CONFIG_KS8842_TIMB_DMA
++#define DMA_BUFFER_SIZE 2048
++
++#define DMA_DEV(a) ((a->dev->parent) ? a->dev->parent : a->dev)
++
++#define DMA_ONGOING(a) (a->dma_tx.ongoing | a->dma_rx.ongoing)
++
++struct ks8842_dma_ctl {
++ struct dma_chan *chan;
++ struct dma_async_tx_descriptor *adesc;
++ void *buf;
++ struct scatterlist sg;
++ int channel;
++};
++
++struct ks8842_rx_dma_ctl {
++ struct dma_chan *chan;
++ struct dma_async_tx_descriptor *adesc;
++ struct sk_buff *skb;
++ struct scatterlist sg;
++ struct tasklet_struct tasklet;
++ int channel;
++};
++
++#define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \
++ ((adapter)->dma_rx.channel != -1))
++
++#endif
++
+ struct ks8842_adapter {
+ void __iomem *hw_addr;
+ int irq;
+ struct tasklet_struct tasklet;
+ spinlock_t lock; /* spinlock to be interrupt safe */
+- struct platform_device *pdev;
++ struct device *dev;
++ struct work_struct timeout_work;
++ struct net_device *netdev;
++#ifdef CONFIG_KS8842_TIMB_DMA
++ struct ks8842_dma_ctl dma_tx;
++ struct ks8842_rx_dma_ctl dma_rx;
++#endif
+ };
+
++static u8 macaddr[ETH_ALEN];
++
++#ifdef CONFIG_KS8842_TIMB_DMA
++static void ks8842_dma_rx_cb(void *data);
++static void ks8842_dma_tx_cb(void *data);
++
++
++static inline void ks8842_resume_dma(struct ks8842_adapter *adapter)
++{
++ iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME);
++}
++#endif
++
+ static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank)
+ {
+ iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK);
+@@ -195,7 +259,6 @@ static void ks8842_reset(struct ks8842_adapter *adapter)
+ msleep(10);
+ iowrite16(0, adapter->hw_addr + REG_GRR);
+ */
+- iowrite16(32, adapter->hw_addr + REG_SELECT_BANK);
+ iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST);
+ msleep(20);
+ }
+@@ -203,8 +266,10 @@ static void ks8842_reset(struct ks8842_adapter *adapter)
+ static void ks8842_update_link_status(struct net_device *netdev,
+ struct ks8842_adapter *adapter)
+ {
++ u16 p1mbsr = ks8842_read16(adapter, 45, REG_P1MBSR);
++
+ /* check the status of the link */
+- if (ks8842_read16(adapter, 45, REG_P1MBSR) & 0x4) {
++ if (p1mbsr & 0x4) {
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ } else {
+@@ -241,10 +306,8 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
+ /* Enable QMU Transmit flow control / transmit padding / Transmit CRC */
+ ks8842_write16(adapter, 16, 0x000E, REG_TXCR);
+
+- /* enable the receiver, uni + multi + broadcast + flow ctrl
+- + crc strip */
+- ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80 | 0x400,
+- REG_RXCR);
++ /* enable the receiver, uni + multi + broadcast + crc strip */
++ ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80, REG_RXCR);
+
+ /* TX frame pointer autoincrement */
+ ks8842_write16(adapter, 17, 0x4000, REG_TXFDPR);
+@@ -261,13 +324,11 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
+ /* enable no excessive collison drop */
+ ks8842_enable_bits(adapter, 32, 1 << 3, REG_SGCR2);
+
+- /* Enable port 1 force flow control / back pressure / transmit / recv */
+- ks8842_write16(adapter, 48, 0x1E07, REG_P1CR2);
++ /* Enable port 1 / back pressure / transmit / recv */
++ ks8842_write16(adapter, 48, 0xE07, REG_P1CR2);
+
+ /* restart port auto-negotiation */
+ ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4);
+- /* only advertise 10Mbps */
+- ks8842_clear_bits(adapter, 49, 3 << 2, REG_P1CR4);
+
+ /* Enable the transmitter */
+ ks8842_enable_tx(adapter);
+@@ -279,7 +340,17 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
+ ks8842_write16(adapter, 18, 0xffff, REG_ISR);
+
+ /* enable interrupts */
+- ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (KS8842_USE_DMA(adapter)) {
++ iowrite16(ENABLED_IRQS_IP, adapter->hw_addr + REG_TIMB_IER);
++ ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
++ } else {
++#endif
++ ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
++ iowrite16(ENABLED_IRQS, adapter->hw_addr + REG_TIMB_IER);
++#ifdef CONFIG_KS8842_TIMB_DMA
++ }
++#endif
+
+ /* enable the switch */
+ ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
+@@ -302,11 +373,74 @@ static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
+ ks8842_write16(adapter, 39, mac, REG_MACAR3);
+ }
+
++static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
++{
++ unsigned long flags;
++ unsigned i;
++
++ spin_lock_irqsave(&adapter->lock, flags);
++ for (i = 0; i < ETH_ALEN; i++) {
++ ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
++ ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
++ REG_MACAR1 + i);
++ }
++ spin_unlock_irqrestore(&adapter->lock, flags);
++}
++
+ static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
+ {
+ return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
+ }
+
++#ifdef CONFIG_KS8842_TIMB_DMA
++static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
++{
++ struct ks8842_adapter *adapter = netdev_priv(netdev);
++ struct ks8842_dma_ctl *ctl = &adapter->dma_tx;
++ u8 *buf = ctl->buf;
++
++ if (ctl->adesc) {
++ dev_dbg(adapter->dev, "%s: TX ongoing\n", __func__);
++ /* transfer ongoing */
++ return NETDEV_TX_BUSY;
++ }
++
++ sg_dma_len(&ctl->sg) = skb->len + sizeof(u32);
++
++ /* copy data to the TX buffer */
++ /* the control word, enable IRQ, port 1 and the length */
++ *buf++ = 0x00;
++ *buf++ = 0x01; /* Port 1 */
++ *buf++ = skb->len & 0xff;
++ *buf++ = (skb->len >> 8) & 0xff;
++ skb_copy_from_linear_data(skb, buf, skb->len);
++
++ dma_sync_single_range_for_device(DMA_DEV(adapter),
++ sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg),
++ DMA_TO_DEVICE);
++
++ /* make sure the length is a multiple of 4 */
++ if (sg_dma_len(&ctl->sg) % 4)
++ sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
++
++ ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
++ &ctl->sg, 1, DMA_TO_DEVICE,
++ DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
++ if (!ctl->adesc)
++ return NETDEV_TX_BUSY;
++
++ ctl->adesc->callback_param = netdev;
++ ctl->adesc->callback = ks8842_dma_tx_cb;
++ ctl->adesc->tx_submit(ctl->adesc);
++
++ netdev->stats.tx_bytes += skb->len;
++
++ dev_kfree_skb(skb);
++
++ return NETDEV_TX_OK;
++}
++#endif
++
+ static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
+ {
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+@@ -314,7 +448,7 @@ static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
+ u32 *ptr = (u32 *)skb->data;
+ u32 ctrl;
+
+- dev_dbg(&adapter->pdev->dev,
++ dev_dbg(adapter->dev,
+ "%s: len %u head %p data %p tail %p end %p\n",
+ __func__, skb->len, skb->head, skb->data,
+ skb_tail_pointer(skb), skb_end_pointer(skb));
+@@ -344,6 +478,116 @@ static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
+ return NETDEV_TX_OK;
+ }
+
++#ifdef CONFIG_KS8842_TIMB_DMA
++static int __ks8842_start_new_rx_dma(struct net_device *netdev,
++ struct ks8842_adapter *adapter)
++{
++ struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
++ struct scatterlist *sg = &ctl->sg;
++ int err;
++
++ ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE);
++ if (ctl->skb) {
++ sg_init_table(sg, 1);
++ sg_dma_address(sg) = dma_map_single(DMA_DEV(adapter),
++ ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
++ err = dma_mapping_error(DMA_DEV(adapter), sg_dma_address(sg));
++ if (unlikely(err)) {
++ sg_dma_address(sg) = 0;
++ goto out;
++ }
++
++ sg_dma_len(sg) = DMA_BUFFER_SIZE;
++
++ ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
++ sg, 1, DMA_FROM_DEVICE,
++ DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
++
++ if (!ctl->adesc)
++ goto out;
++
++ ctl->adesc->callback_param = netdev;
++ ctl->adesc->callback = ks8842_dma_rx_cb;
++ ctl->adesc->tx_submit(ctl->adesc);
++ } else {
++ err = -ENOMEM;
++ sg_dma_address(sg) = 0;
++ goto out;
++ }
++
++ return err;
++out:
++ if (sg_dma_address(sg))
++ dma_unmap_single(DMA_DEV(adapter), sg_dma_address(sg),
++ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
++ sg_dma_address(sg) = 0;
++ if (ctl->skb)
++ dev_kfree_skb(ctl->skb);
++
++ ctl->skb = NULL;
++
++ printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
++ return err;
++}
++
++static void ks8842_rx_frame_dma_tasklet(unsigned long arg)
++{
++ struct net_device *netdev = (struct net_device *)arg;
++ struct ks8842_adapter *adapter = netdev_priv(netdev);
++ struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
++ struct sk_buff *skb = ctl->skb;
++ dma_addr_t addr = sg_dma_address(&ctl->sg);
++ u32 status;
++
++ ctl->adesc = NULL;
++
++ /* kick next transfer going */
++ __ks8842_start_new_rx_dma(netdev, adapter);
++
++ /* now handle the data we got */
++ dma_unmap_single(DMA_DEV(adapter), addr, DMA_BUFFER_SIZE,
++ DMA_FROM_DEVICE);
++
++ status = *((u32 *)skb->data);
++
++ dev_dbg(adapter->dev, "%s - rx_data: status: %x\n",
++ __func__, status & 0xffff);
++
++ /* check the status */
++ if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
++ int len = (status >> 16) & 0x7ff;
++
++ dev_dbg(adapter->dev, "%s, got package, len: %d, skb: %p\n",
++ __func__, len, skb);
++
++ netdev->stats.rx_packets++;
++ netdev->stats.rx_bytes += len;
++ if (status & RXSR_MULTICAST)
++ netdev->stats.multicast++;
++
++ /* we are not nice to the stack, we want to be nice
++ * to our DMA engine instead, reserve 4 bytes
++ * which is the status word
++ */
++ skb_reserve(skb, 4);
++ skb_put(skb, len);
++
++ skb->protocol = eth_type_trans(skb, netdev);
++ netif_rx(skb);
++ } else {
++ dev_dbg(adapter->dev, "RX error, status: %x\n", status);
++ netdev->stats.rx_errors++;
++ if (status & RXSR_TOO_LONG)
++ netdev->stats.rx_length_errors++;
++ if (status & RXSR_CRC_ERROR)
++ netdev->stats.rx_crc_errors++;
++ if (status & RXSR_RUNT)
++ netdev->stats.rx_frame_errors++;
++ dev_kfree_skb(skb);
++ }
++}
++#endif
++
+ static void ks8842_rx_frame(struct net_device *netdev,
+ struct ks8842_adapter *adapter)
+ {
+@@ -352,14 +596,14 @@ static void ks8842_rx_frame(struct net_device *netdev,
+
+ status &= 0xffff;
+
+- dev_dbg(&adapter->pdev->dev, "%s - rx_data: status: %x\n",
++ dev_dbg(adapter->dev, "%s - rx_data: status: %x\n",
+ __func__, status);
+
+ /* check the status */
+ if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
+ struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
+
+- dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n",
++ dev_dbg(adapter->dev, "%s, got package, len: %d\n",
+ __func__, len);
+ if (skb) {
+ u32 *data;
+@@ -383,7 +627,7 @@ static void ks8842_rx_frame(struct net_device *netdev,
+ } else
+ netdev->stats.rx_dropped++;
+ } else {
+- dev_dbg(&adapter->pdev->dev, "RX error, status: %x\n", status);
++ dev_dbg(adapter->dev, "RX error, status: %x\n", status);
+ netdev->stats.rx_errors++;
+ if (status & RXSR_TOO_LONG)
+ netdev->stats.rx_length_errors++;
+@@ -406,7 +650,7 @@ static void ks8842_rx_frame(struct net_device *netdev,
+ void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
+ {
+ u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
+- dev_dbg(&adapter->pdev->dev, "%s Entry - rx_data: %d\n",
++ dev_dbg(adapter->dev, "%s Entry - rx_data: %d\n",
+ __func__, rx_data);
+ while (rx_data) {
+ ks8842_rx_frame(netdev, adapter);
+@@ -417,7 +661,7 @@ void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
+ void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
+ {
+ u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
+- dev_dbg(&adapter->pdev->dev, "%s - entry, sr: %x\n", __func__, sr);
++ dev_dbg(adapter->dev, "%s - entry, sr: %x\n", __func__, sr);
+ netdev->stats.tx_packets++;
+ if (netif_queue_stopped(netdev))
+ netif_wake_queue(netdev);
+@@ -426,7 +670,7 @@ void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
+ void ks8842_handle_rx_overrun(struct net_device *netdev,
+ struct ks8842_adapter *adapter)
+ {
+- dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
++ dev_dbg(adapter->dev, "%s: entry\n", __func__);
+ netdev->stats.rx_errors++;
+ netdev->stats.rx_fifo_errors++;
+ }
+@@ -445,20 +689,33 @@ void ks8842_tasklet(unsigned long arg)
+ spin_unlock_irqrestore(&adapter->lock, flags);
+
+ isr = ks8842_read16(adapter, 18, REG_ISR);
+- dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr);
++ dev_dbg(adapter->dev, "%s - ISR: 0x%x\n", __func__, isr);
++
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (KS8842_USE_DMA(adapter))
++ isr &= ~IRQ_RX;
++#endif
+
+ /* Ack */
+ ks8842_write16(adapter, 18, isr, REG_ISR);
+
++ /* Ack in the timberdale IP as well */
++ iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR);
++
+ if (!netif_running(netdev))
+ return;
+
+ if (isr & IRQ_LINK_CHANGE)
+ ks8842_update_link_status(netdev, adapter);
+
++ /* should not get IRQ_RX when in DMA mode */
+ if (isr & (IRQ_RX | IRQ_RX_ERROR))
+- ks8842_handle_rx(netdev, adapter);
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (!KS8842_USE_DMA(adapter))
++#endif
++ ks8842_handle_rx(netdev, adapter);
+
++ /* should only happen when not doing DMA */
+ if (isr & IRQ_TX)
+ ks8842_handle_tx(netdev, adapter);
+
+@@ -477,8 +734,18 @@ void ks8842_tasklet(unsigned long arg)
+
+ /* re-enable interrupts, put back the bank selection register */
+ spin_lock_irqsave(&adapter->lock, flags);
+- ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (KS8842_USE_DMA(adapter))
++ ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
++ else
++#endif
++ ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
++
+ iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
++#ifdef CONFIG_KS8842_TIMB_DMA
++ /* resume DMA operations */
++ ks8842_resume_dma(adapter);
++#endif
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ }
+
+@@ -490,11 +757,17 @@ static irqreturn_t ks8842_irq(int irq, void *devid)
+ irqreturn_t ret = IRQ_NONE;
+
+ isr = ks8842_read16(adapter, 18, REG_ISR);
+- dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr);
++ dev_dbg(adapter->dev, "%s - ISR: 0x%x\n", __func__, isr);
+
+ if (isr) {
+- /* disable IRQ */
+- ks8842_write16(adapter, 18, 0x00, REG_IER);
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (KS8842_USE_DMA(adapter))
++ /* disable all but RX IRQ, since the FPGA relies on it*/
++ ks8842_write16(adapter, 18, IRQ_RX, REG_IER);
++ else
++#endif
++ /* disable IRQ */
++ ks8842_write16(adapter, 18, 0x00, REG_IER);
+
+ /* schedule tasklet */
+ tasklet_schedule(&adapter->tasklet);
+@@ -503,23 +776,159 @@ static irqreturn_t ks8842_irq(int irq, void *devid)
+ }
+
+ iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
+-
++#ifdef CONFIG_KS8842_TIMB_DMA
++ ks8842_resume_dma(adapter);
++#endif
+ return ret;
+ }
+
++#ifdef CONFIG_KS8842_TIMB_DMA
++static void ks8842_dma_rx_cb(void *data)
++{
++ struct net_device *netdev = data;
++ struct ks8842_adapter *adapter = netdev_priv(netdev);
++
++ dev_dbg(adapter->dev, "RX DMA finished\n");
++ /* schedule tasklet */
++ if (adapter->dma_rx.adesc)
++ tasklet_schedule(&adapter->dma_rx.tasklet);
++}
++
++static void ks8842_dma_tx_cb(void *data)
++{
++ struct net_device *netdev = data;
++ struct ks8842_adapter *adapter = netdev_priv(netdev);
++ struct ks8842_dma_ctl *ctl = &adapter->dma_tx;
++
++ dev_dbg(adapter->dev, "TX DMA finished\n");
++
++ if (!ctl->adesc)
++ return;
++
++ netdev->stats.tx_packets++;
++ ctl->adesc = NULL;
++
++ if (netif_queue_stopped(netdev))
++ netif_wake_queue(netdev);
++}
++
++static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter)
++{
++ struct ks8842_dma_ctl *tx_ctl = &adapter->dma_tx;
++ struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
++
++ tx_ctl->adesc = NULL;
++ if (tx_ctl->chan) {
++ tx_ctl->chan->device->device_terminate_all(tx_ctl->chan);
++ dma_release_channel(tx_ctl->chan);
++ }
++ tx_ctl->chan = NULL;
++
++ rx_ctl->adesc = NULL;
++ if (rx_ctl->chan) {
++ rx_ctl->chan->device->device_terminate_all(rx_ctl->chan);
++ dma_release_channel(rx_ctl->chan);
++ }
++ rx_ctl->chan = NULL;
++
++ tasklet_kill(&rx_ctl->tasklet);
++
++ if (sg_dma_address(&rx_ctl->sg))
++ dma_unmap_single(DMA_DEV(adapter), sg_dma_address(&rx_ctl->sg),
++ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
++ sg_dma_address(&rx_ctl->sg) = 0;
++
++ if (sg_dma_address(&tx_ctl->sg))
++ dma_unmap_single(DMA_DEV(adapter), sg_dma_address(&tx_ctl->sg),
++ DMA_BUFFER_SIZE, DMA_TO_DEVICE);
++ sg_dma_address(&tx_ctl->sg) = 0;
++
++ dev_kfree_skb(rx_ctl->skb);
++ rx_ctl->skb = NULL;
++ kfree(tx_ctl->buf);
++ tx_ctl->buf = NULL;
++}
++#endif
++
++
++#ifdef CONFIG_KS8842_TIMB_DMA
++static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param)
++{
++ return chan->chan_id == (int)filter_param;
++}
++#endif
+
+ /* Netdevice operations */
+
+ static int ks8842_open(struct net_device *netdev)
+ {
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
++#ifdef CONFIG_KS8842_TIMB_DMA
++ struct ks8842_dma_ctl *tx_ctl = &adapter->dma_tx;
++ struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
++ bool use_dma = false;
++#endif
+ int err;
+
+- dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__);
++ dev_dbg(adapter->dev, "%s - entry\n", __func__);
++
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (KS8842_USE_DMA(adapter)) {
++ dma_cap_mask_t mask;
++
++ dma_cap_zero(mask);
++ dma_cap_set(DMA_SLAVE, mask);
++
++ sg_init_table(&tx_ctl->sg, 1);
++
++ tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
++ (void *)tx_ctl->channel);
++ if (!tx_ctl->chan)
++ goto no_dma;
++
++ /* allocate DMA buffer */
++ tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
++ if (!tx_ctl->buf)
++ goto no_dma;
++ sg_dma_address(&tx_ctl->sg) = dma_map_single(DMA_DEV(adapter),
++ tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
++ err = dma_mapping_error(DMA_DEV(adapter),
++ sg_dma_address(&tx_ctl->sg));
++ if (err) {
++ sg_dma_address(&tx_ctl->sg) = 0;
++ goto no_dma;
++ }
++
++ rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
++ (void *)rx_ctl->channel);
++ if (!rx_ctl->chan)
++ goto no_dma;
++
++ tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet,
++ (unsigned long)netdev);
++
++ /* start RX dma */
++ err = __ks8842_start_new_rx_dma(netdev, adapter);
++ if (err)
++ goto no_dma;
++
++ use_dma = true;
++ }
++no_dma:
++ if (!use_dma) {
++ printk(KERN_WARNING DRV_NAME
++ ": Failed to initiate DMA, falling back to PIO\n");
++ ks8842_dealloc_dma_bufs(adapter);
++ adapter->dma_rx.channel = -1;
++ adapter->dma_tx.channel = -1;
++ }
++#endif
+
+ /* reset the HW */
+ ks8842_reset_hw(adapter);
+
++ ks8842_write_mac_addr(adapter, netdev->dev_addr);
++
+ ks8842_update_link_status(netdev, adapter);
+
+ err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
+@@ -533,11 +942,19 @@ static int ks8842_open(struct net_device *netdev)
+ return 0;
+ }
+
++
+ static int ks8842_close(struct net_device *netdev)
+ {
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+
+- dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__);
++ dev_dbg(adapter->dev, "%s - entry\n", __func__);
++
++ cancel_work_sync(&adapter->timeout_work);
++
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (KS8842_USE_DMA(adapter))
++ ks8842_dealloc_dma_bufs(adapter);
++#endif
+
+ /* free the irq */
+ free_irq(adapter->irq, adapter);
+@@ -554,8 +971,20 @@ static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
+ int ret;
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+
+- dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
+-
++ dev_dbg(adapter->dev, "%s: entry\n", __func__);
++
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (KS8842_USE_DMA(adapter)) {
++ unsigned long flags;
++ ret = ks8842_tx_frame_dma(skb, netdev);
++ /* for now only allow one transfer at the time */
++ spin_lock_irqsave(&adapter->lock, flags);
++ if (adapter->dma_tx.adesc)
++ netif_stop_queue(netdev);
++ spin_unlock_irqrestore(&adapter->lock, flags);
++ return ret;
++ }
++#endif
+ ret = ks8842_tx_frame(skb, netdev);
+
+ if (ks8842_tx_fifo_space(adapter) < netdev->mtu + 8)
+@@ -567,44 +996,77 @@ static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
+ static int ks8842_set_mac(struct net_device *netdev, void *p)
+ {
+ struct ks8842_adapter *adapter = netdev_priv(netdev);
+- unsigned long flags;
+ struct sockaddr *addr = p;
+ char *mac = (u8 *)addr->sa_data;
+- int i;
+
+- dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
++ dev_dbg(adapter->dev, "%s: entry\n", __func__);
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, mac, netdev->addr_len);
+
+- spin_lock_irqsave(&adapter->lock, flags);
+- for (i = 0; i < ETH_ALEN; i++) {
+- ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
+- ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
+- REG_MACAR1 + i);
+- }
+- spin_unlock_irqrestore(&adapter->lock, flags);
++ ks8842_write_mac_addr(adapter, mac);
+ return 0;
+ }
+
+-static void ks8842_tx_timeout(struct net_device *netdev)
++static void ks8842_tx_timeout_work(struct work_struct *work)
+ {
+- struct ks8842_adapter *adapter = netdev_priv(netdev);
++ struct ks8842_adapter *adapter =
++ container_of(work, struct ks8842_adapter, timeout_work);
++ struct net_device *netdev = adapter->netdev;
+ unsigned long flags;
+
+- dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
++ dev_dbg(adapter->dev, "%s: entry\n", __func__);
+
+ spin_lock_irqsave(&adapter->lock, flags);
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (KS8842_USE_DMA(adapter)) {
++ struct ks8842_dma_ctl *tx_ctl = &adapter->dma_tx;
++ struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
++
++ tx_ctl->adesc = NULL;
++ tx_ctl->chan->device->device_terminate_all(tx_ctl->chan);
++
++ rx_ctl->adesc = NULL;
++ rx_ctl->chan->device->device_terminate_all(rx_ctl->chan);
++
++ dma_unmap_single(DMA_DEV(adapter), sg_dma_address(&rx_ctl->sg),
++ DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
++ sg_dma_address(&rx_ctl->sg) = 0;
++
++ dev_kfree_skb(rx_ctl->skb);
++ rx_ctl->skb = NULL;
++ }
++#endif
++
+ /* disable interrupts */
+ ks8842_write16(adapter, 18, 0, REG_IER);
+ ks8842_write16(adapter, 18, 0xFFFF, REG_ISR);
++
++ netif_stop_queue(netdev);
++
+ spin_unlock_irqrestore(&adapter->lock, flags);
+
+ ks8842_reset_hw(adapter);
+
++ ks8842_write_mac_addr(adapter, netdev->dev_addr);
++
+ ks8842_update_link_status(netdev, adapter);
++
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (KS8842_USE_DMA(adapter))
++ __ks8842_start_new_rx_dma(netdev, adapter);
++#endif
++}
++
++static void ks8842_tx_timeout(struct net_device *netdev)
++{
++ struct ks8842_adapter *adapter = netdev_priv(netdev);
++
++ dev_dbg(adapter->dev, "%s: entry\n", __func__);
++
++ schedule_work(&adapter->timeout_work);
+ }
+
+ static const struct net_device_ops ks8842_netdev_ops = {
+@@ -626,7 +1088,11 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
+ struct resource *iomem;
+ struct net_device *netdev;
+ struct ks8842_adapter *adapter;
++#ifdef CONFIG_KS8842_TIMB_DMA
++ struct ks8842_platform_data *pdata = pdev->dev.platform_data;
++#endif
+ u16 id;
++ unsigned i;
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
+@@ -639,6 +1105,8 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
++ adapter->netdev = netdev;
++ INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work);
+ adapter->hw_addr = ioremap(iomem->start, resource_size(iomem));
+ if (!adapter->hw_addr)
+ goto err_ioremap;
+@@ -649,15 +1117,37 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
+ goto err_get_irq;
+ }
+
+- adapter->pdev = pdev;
+-
++ adapter->dev = &pdev->dev;
++#ifdef CONFIG_KS8842_TIMB_DMA
++ if (pdata && (pdata->tx_dma_channel != -1) &&
++ (pdata->rx_dma_channel != -1)) {
++ adapter->dma_rx.channel = pdata->rx_dma_channel;
++ adapter->dma_tx.channel = pdata->tx_dma_channel;
++ } else {
++ adapter->dma_rx.channel = -1;
++ adapter->dma_tx.channel = -1;
++ }
++#endif
+ tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
+ spin_lock_init(&adapter->lock);
+
+ netdev->netdev_ops = &ks8842_netdev_ops;
+ netdev->ethtool_ops = &ks8842_ethtool_ops;
+
+- ks8842_read_mac_addr(adapter, netdev->dev_addr);
++ /* Check if a mac address was given */
++ for (i = 0; i < netdev->addr_len; i++)
++ if (macaddr[i] != 0)
++ break;
++
++ if (i < netdev->addr_len)
++ /* an address was passed, use it */
++ memcpy(netdev->dev_addr, macaddr, netdev->addr_len);
++ else {
++ ks8842_read_mac_addr(adapter, netdev->dev_addr);
++
++ if (!is_valid_ether_addr(netdev->dev_addr))
++ random_ether_addr(netdev->dev_addr);
++ }
+
+ id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE);
+
+@@ -723,6 +1213,10 @@ static void __exit ks8842_exit(void)
+ module_init(ks8842_init);
+ module_exit(ks8842_exit);
+
++/* accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
++module_param_array(macaddr, byte, NULL, 0);
++MODULE_PARM_DESC(macaddr, "KS8842 MAC address to set");
++
+ MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver");
+ MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/net/most/Kconfig b/drivers/net/most/Kconfig
+new file mode 100644
+index 0000000..fbf1214
+--- /dev/null
++++ b/drivers/net/most/Kconfig
+@@ -0,0 +1,14 @@
++menu "MOST Device Drivers"
++ depends on MOST
++
++config MOST_TIMB_MLB
++ tristate "The timberdale MOST block"
++ depends on MOST
++ depends on GENERIC_GPIO
++ depends on HAS_IOMEM
++ select TIMB_DMA
++ default N
++ ---help---
++ Adds support for MOST on the timberdale FPGA.
++
++endmenu
+diff --git a/drivers/net/most/Makefile b/drivers/net/most/Makefile
+new file mode 100644
+index 0000000..5879279
+--- /dev/null
++++ b/drivers/net/most/Makefile
+@@ -0,0 +1,6 @@
++#
++# Makefile for the Linux Media Oriented Systems Transport drivers.
++#
++
++obj-$(CONFIG_MOST_TIMB_MLB) += timbmlb.o
++
+diff --git a/drivers/net/most/timbmlb.c b/drivers/net/most/timbmlb.c
+new file mode 100644
+index 0000000..f23e52a
+--- /dev/null
++++ b/drivers/net/most/timbmlb.c
+@@ -0,0 +1,1171 @@
++/*
++ * timbmlb.c Driver for the timberdale MLB block
++ * Copyright (c) 2009-2010 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/dmaengine.h>
++#include <linux/spinlock.h>
++#include <net/most/most_core.h>
++#include <linux/gpio.h>
++#include <linux/most/timbmlb.h>
++
++#define DRIVER_NAME "timb-most"
++
++#define MLB_REG_CFG 0x00
++#define MLB_REG_CH_CTRL 0x04
++#define MLB_REG_ISR 0x08
++#define MLB_REG_IMR 0x0C
++#define MLB_REG_CH_CFG_1 0x10
++#define MLB_REG_CH_CFG_2 0x14
++#define MLB_REG_CH_CFG_3 0x18
++#define MLB_REG_CH_CFG_4 0x1C
++#define MLB_REG_CH_CFG_5 0x20
++#define MLB_REG_CH_CFG_6 0x24
++#define MLB_REG_CH_CFG_7 0x28
++#define MLB_REG_CTRL_RX 0x2C /* 8 bits */
++#define MLB_REG_CTRL_TX MLB_REG_CTRL_RX
++#define MLB_REG_ASYNC_RX 0x30 /* 32 bits */
++#define MLB_REG_ASYNC_TX MLB_REG_ASYNC_RX
++#define MLB_REG_SYNC_RX 0x34 /* 32 bits */
++#define MLB_REG_SYNC_TX MLB_REG_SYNC_RX
++#define MLB_REG_FIFO_RST 0x38
++
++#define MLB_WR_CFG_CTRL_RX_EMPTY 0x20000
++#define MLB_WR_CFG_ASYNC_RX_EMPTY 0x10000
++#define MLB_CFG_SYNC_TX_EN 0x00200
++#define MLB_CFG_SYNC_RX_EN 0x00100
++#define MLB_CFG_ASYNC_RX_EN 0x00080
++#define MLB_CFG_CTRL_RX_EN 0x00040
++
++#define MLB_CH_CTRL_ASYNC_TX_START 0x8000
++#define MLB_CH_CTRL_ASYNC_RX_BREAK 0x4000
++#define MLB_CH_CTRL_CTRL_TX_START 0x0800
++#define MLB_CH_CTRL_CTRL_RX_BREAK 0x0400
++
++#define MLB_WR_I_SYNC_RX_EMPTY 0x80000
++#define MLB_WR_I_SYNC_RX_ALMOST_FULL 0x40000
++#define MLB_WR_I_SYNC_TX_FULL 0x20000
++#define MLB_WR_I_SYNC_TX_ALMOST_EMPTY 0x10000
++#define MLB_I_ASYNC_TX_READY 0x08000
++#define MLB_I_ASYNC_TX_PROT_ERR 0x04000
++#define MLB_I_ASYNC_TX_RX_BREAK 0x02000
++#define MLB_I_ASYNC_TX_BUSY_BREAK 0x01000
++#define MLB_I_ASYNC_RX_READY 0x00800
++#define MLB_I_ASYNC_RX_PROT_ERR 0x00400
++#define MLB_I_ASYNC_RX_CMD_BREAK 0x00200
++#define MLB_I_SYNC_LOCK 0x00100
++#define MLB_I_CTRL_TX_READY 0x00080
++#define MLB_I_CTRL_TX_PROT_ERR 0x00040
++#define MLB_I_CTRL_TX_RX_BREAK 0x00020
++#define MLB_I_CTRL_TX_BUSY_BREAK 0x00010
++#define MLB_I_CTRL_RX_READY 0x00008
++#define MLB_I_CTRL_RX_PROT_ERR 0x00004
++#define MLB_I_CTRL_RX_CMD_BREAK 0x00002
++#define MLB_I_SYNC_RX_PROT_ERR 0x00001
++
++#define MLB_CH_CFG_NOT_ALLOCATED 0x0000
++#define MLB_CH_CFG_SYNC_TX 0x0001
++#define MLB_CH_CFG_SYNC_RX 0x0002
++#define MLB_CH_CFG_ASYNC_TX 0x0003
++#define MLB_CH_CFG_ASYNC_RX 0x0004
++#define MLB_CH_CFG_CTRL_TX 0x0005
++#define MLB_CH_CFG_CTRL_RX 0x0006
++
++#define MLB_FIFO_RST_CTRL_TX 0x010000
++#define MLB_FIFO_RST_CTRL_RX 0x020000
++#define MLB_FIFO_RST_ASYNC_TX 0x040000
++#define MLB_FIFO_RST_ASYNC_RX 0x080000
++#define MLB_FIFO_RST_SYNC_TX 0x100000
++#define MLB_FIFO_RST_SYNC_RX 0x200000
++#define MLB_FIFO_RST_MLB 0x400000
++#define MLB_FIFO_RST_ALL (MLB_FIFO_RST_CTRL_TX | \
++ MLB_FIFO_RST_CTRL_RX | \
++ MLB_FIFO_RST_ASYNC_TX | \
++ MLB_FIFO_RST_ASYNC_RX | \
++ MLB_FIFO_RST_SYNC_TX | \
++ MLB_FIFO_RST_SYNC_RX | \
++ MLB_FIFO_RST_MLB)
++
++#define ASYNC_SKB_SIZE 1024
++#define SYNC_SKB_SIZE 32
++
++#define SYNC_MAX_DMA_SIZE 4096
++
++#define RX_CHAN 0
++#define TX_CHAN 1
++#define CHANNELS 2
++
++#define SYNC_STATE_DOWN 0
++#define SYNC_STATE_UP 1
++
++#define DMA_DEV(s) ((s->mdev->parent->parent) ? \
++ s->mdev->parent->parent : s->mdev->parent)
++
++struct timbmost {
++ void __iomem *membase;
++ struct most_dev *mdev;
++ int irq;
++ int reset_pin;
++ spinlock_t lock; /* mutual exclusion */
++
++ /* one queue per channel (type) */
++ struct sk_buff_head ctl_q;
++ struct sk_buff_head async_q;
++ struct sk_buff_head sync_q;
++
++ /* The SKB currently written/read into by the DMA engine
++ * only used for the synchronous channel
++ */
++ struct sk_buff *sync_read_skb;
++ dma_addr_t sync_read_handle;
++ struct scatterlist sync_rx_sg;
++ struct sk_buff *sync_write_skb;
++ int sync_write_next_map;
++
++ u8 sync_rx_state;
++ u8 sync_tx_state;
++ int sync_tx_chan_id;
++ int sync_rx_chan_id;
++ struct dma_chan *sync_tx_chan;
++ struct dma_chan *sync_rx_chan;
++ dma_cookie_t sync_tx_cookie;
++ dma_cookie_t sync_rx_cookie;
++ struct tasklet_struct sync_tx_tasklet;
++ struct tasklet_struct sync_rx_tasklet;
++
++ /* channel numbers */
++ u8 ctl_channels[CHANNELS];
++ u8 sync_channels[CHANNELS];
++ u8 async_channels[CHANNELS];
++};
++
++static void timbmost_ctl_write_wake(struct timbmost *self);
++static void timbmost_async_write_wake(struct timbmost *self);
++
++static int skb_dma_map(struct device *dev, struct sk_buff *skb,
++ enum dma_data_direction dir)
++{
++ struct skb_shared_info *sp = skb_shinfo(skb);
++ dma_addr_t map;
++ int i;
++
++ map = dma_map_single(dev, skb->data, skb_headlen(skb), dir);
++ if (dma_mapping_error(dev, map))
++ goto out_err;
++
++ sp->dma_head = map;
++ for (i = 0; i < sp->nr_frags; i++) {
++ skb_frag_t *fp = &sp->frags[i];
++
++ map = dma_map_page(dev, fp->page, fp->page_offset,
++ fp->size, dir);
++ if (dma_mapping_error(dev, map))
++ goto unwind;
++ sp->dma_maps[i] = map;
++ }
++
++ return 0;
++
++unwind:
++ while (--i >= 0) {
++ skb_frag_t *fp = &sp->frags[i];
++
++ dma_unmap_page(dev, sp->dma_maps[i], fp->size, dir);
++ }
++ dma_unmap_single(dev, sp->dma_head, skb_headlen(skb), dir);
++out_err:
++ return -ENOMEM;
++}
++
++static void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
++ enum dma_data_direction dir)
++{
++ struct skb_shared_info *sp = skb_shinfo(skb);
++ int i;
++
++ dma_unmap_single(dev, sp->dma_head, skb_headlen(skb), dir);
++ for (i = 0; i < sp->nr_frags; i++) {
++ skb_frag_t *fp = &sp->frags[i];
++
++ dma_unmap_page(dev, sp->dma_maps[i], fp->size, dir);
++ }
++}
++
++static void __timbmost_dump_regs(struct timbmost *self, const char *caption)
++{
++ dev_dbg(self->mdev->parent, "%s\nMLB_CFG:\t%x\tCH_CTRL:\t%x\n",
++ caption,
++ ioread32(self->membase + MLB_REG_CFG),
++ ioread32(self->membase + MLB_REG_CH_CTRL));
++
++ dev_dbg(self->mdev->parent, "ISTAT:\t%x\tIMASK:\t%x\n",
++ ioread32(self->membase + MLB_REG_ISR),
++ ioread32(self->membase + MLB_REG_IMR));
++
++ dev_dbg(self->mdev->parent, "CH_CFG1:\t%x\tCH_CFG2:\t%x\n",
++ ioread32(self->membase + MLB_REG_CH_CFG_1),
++ ioread32(self->membase + MLB_REG_CH_CFG_2));
++
++ dev_dbg(self->mdev->parent, "CH_CFG3:\t%x\tCH_CFG4:\t%x\n",
++ ioread32(self->membase + MLB_REG_CH_CFG_3),
++ ioread32(self->membase + MLB_REG_CH_CFG_4));
++
++ dev_dbg(self->mdev->parent, "CH_CFG5:\t%x\tCH_CFG6:\t%x\n",
++ ioread32(self->membase + MLB_REG_CH_CFG_5),
++ ioread32(self->membase + MLB_REG_CH_CFG_6));
++
++ dev_dbg(self->mdev->parent, "CH_CFG7:\t%x\n",
++ ioread32(self->membase + MLB_REG_CH_CFG_7));
++}
++
++static void __timbmost_hw_reset(struct timbmost *self)
++{
++ /* disable all interrupts */
++ iowrite32(0, self->membase + MLB_REG_IMR);
++ iowrite32(0, self->membase + MLB_REG_ISR);
++
++ /* disable RX and TX */
++ iowrite32(0, self->membase + MLB_REG_CFG);
++ iowrite32(0, self->membase + MLB_REG_CH_CTRL);
++
++ /* make sure the channels are not allocated */
++ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_1);
++ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_2);
++ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_3);
++ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_4);
++ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_5);
++ iowrite32(MLB_CH_CFG_NOT_ALLOCATED, self->membase + MLB_REG_CH_CFG_6);
++
++ /* reset */
++ iowrite32(MLB_FIFO_RST_ALL, self->membase + MLB_REG_FIFO_RST);
++
++ /* reset the INIC */
++ gpio_direction_output(self->reset_pin, 0);
++ msleep(10);
++ gpio_set_value(self->reset_pin, 1);
++}
++
++/* function called in interrupt context by the DMA engine when transfer finishes
++ */
++static void timbmost_dma_tx_cb(void *data)
++{
++ struct timbmost *self = data;
++
++ tasklet_schedule(&self->sync_tx_tasklet);
++}
++
++static void timbmost_dma_rx_cb(void *data)
++{
++ struct timbmost *self = data;
++
++ tasklet_schedule(&self->sync_rx_tasklet);
++}
++
++static void __timbmost_ctl_rx(struct timbmost *self)
++{
++ u32 cfg;
++ do {
++ struct sk_buff *skb =
++ most_skb_alloc(CTL_FRAME_SIZE, GFP_ATOMIC);
++ if (!skb)
++ return;
++
++ do {
++ u32 word = ioread32(self->membase + MLB_REG_CTRL_RX);
++ int i;
++
++ for (i = 0; i < 4; i++)
++ *skb_put(skb, 1) = (word >> (i * 8)) & 0xff;
++
++ cfg = ioread32(self->membase + MLB_REG_CFG);
++ } while ((skb->len < CTL_FRAME_SIZE) &&
++ !(cfg & MLB_WR_CFG_CTRL_RX_EMPTY));
++
++ /* deliver SKB upstreams */
++ skb->dev = (void *)self->mdev;
++ most_cb(skb)->channel_type = CHAN_CTL;
++ /* only one channel is supported... */
++ most_cb(skb)->channel = self->ctl_channels[RX_CHAN];
++
++ most_recv_frame(skb);
++ } while (!(cfg & MLB_WR_CFG_CTRL_RX_EMPTY));
++}
++
++static void __timbmost_async_rx(struct timbmost *self)
++{
++ /* TODO: The FIFO is 32bit not 8bit */
++ u32 cfg;
++
++ __timbmost_dump_regs(self, "Before read");
++
++ do {
++ struct sk_buff *skb =
++ most_skb_alloc(ASYNC_SKB_SIZE, GFP_ATOMIC);
++ if (!skb)
++ return;
++
++ do {
++ *skb_put(skb, 1) =
++ ioread32(self->membase + MLB_REG_ASYNC_RX);
++ cfg = ioread32(self->membase + MLB_REG_CFG);
++ } while ((skb->len < ASYNC_SKB_SIZE) &&
++ !(cfg & MLB_WR_CFG_ASYNC_RX_EMPTY));
++
++ /* deliver SKB upstreams */
++ skb->dev = (void *)self->mdev;
++ most_cb(skb)->channel_type = CHAN_ASYNC;
++ /* only one channel is supported... */
++ most_cb(skb)->channel = self->async_channels[RX_CHAN];
++
++ most_recv_frame(skb);
++ } while (!(cfg & MLB_WR_CFG_ASYNC_RX_EMPTY));
++}
++
++static void __timbmost_sync_read_wake(struct timbmost *self)
++{
++ struct sk_buff *skb = self->sync_read_skb;
++ struct dma_async_tx_descriptor *desc;
++ struct scatterlist *sg = &self->sync_rx_sg;
++
++ if (skb)
++ return;
++
++ skb = most_skb_alloc(SYNC_SKB_SIZE, GFP_ATOMIC);
++ if (!skb)
++ return;
++
++ sg_init_table(sg, 1);
++ sg_dma_len(sg) = SYNC_SKB_SIZE;
++
++ /* send next fragment */
++ sg_dma_address(sg) = dma_map_single(DMA_DEV(self), skb->data,
++ SYNC_SKB_SIZE, DMA_FROM_DEVICE);
++ if (dma_mapping_error(DMA_DEV(self), sg_dma_address(sg)))
++ goto map_failed;
++
++ desc = self->sync_rx_chan->device->device_prep_slave_sg(
++ self->sync_rx_chan, sg, 1, DMA_FROM_DEVICE,
++ DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
++
++ if (desc) {
++ self->sync_read_skb = skb;
++ desc->callback_param = self;
++ desc->callback = timbmost_dma_rx_cb;
++ self->sync_rx_cookie = desc->tx_submit(desc);
++
++ return;
++ }
++
++ dma_unmap_single(DMA_DEV(self), sg_dma_address(sg), SYNC_SKB_SIZE,
++ DMA_FROM_DEVICE);
++map_failed:
++ dev_kfree_skb(skb);
++}
++
++static void timbmost_sync_rx_tasklet(unsigned long arg)
++{
++ struct timbmost *self = (struct timbmost *)arg;
++ struct sk_buff *skb = self->sync_read_skb;
++
++ BUG_ON(!skb);
++
++ /* unmap DMA */
++ dma_unmap_single(DMA_DEV(self), self->sync_read_handle, SYNC_SKB_SIZE,
++ DMA_FROM_DEVICE);
++
++ if (self->sync_rx_state == SYNC_STATE_DOWN) {
++ dev_kfree_skb(skb);
++ self->sync_read_skb = NULL;
++ return;
++ }
++
++ /* set the length */
++ skb_put(skb, SYNC_SKB_SIZE);
++ /* send the SKB upwards */
++ skb->dev = (void *)self->mdev;
++ most_cb(skb)->channel_type = CHAN_SYNC;
++ /* only one channel is supported... */
++ most_cb(skb)->channel = self->sync_channels[RX_CHAN];
++ most_recv_frame(skb);
++ self->sync_read_skb = NULL;
++
++ __timbmost_sync_read_wake(self);
++}
++
++static void __timbmost_sync_write_wake(struct timbmost *self)
++{
++ struct sk_buff *skb = self->sync_write_skb;
++ struct dma_async_tx_descriptor *desc;
++ struct scatterlist sg;
++
++ dev_dbg(self->mdev->parent, "%s entry\n", __func__);
++
++ if (!skb) {
++ /* check for next SKB */
++ skb = skb_dequeue(&self->sync_q);
++ if (!skb)
++ return;
++
++ if (skb_dma_map(DMA_DEV(self), skb, DMA_TO_DEVICE)) {
++ /* failed to dma map? */
++ dev_kfree_skb(skb);
++ return;
++ }
++ /* next dma map to write is the first ... */
++ self->sync_write_next_map = -1;
++ self->sync_write_skb = skb;
++ dev_dbg(self->mdev->parent, "%s: New skb: fragments: %d\n",
++ __func__, skb_shinfo(skb)->nr_frags);
++ }
++
++ sg_init_table(&sg, 1);
++
++ /* send next fragment */
++ if (self->sync_write_next_map < 0) {
++ sg_dma_len(&sg) = skb_headlen(skb);
++ sg_dma_address(&sg) = skb_shinfo(skb)->dma_head;
++ } else {
++ sg_dma_len(&sg) =
++ skb_shinfo(skb)->frags[self->sync_write_next_map].size;
++ sg_dma_address(&sg) =
++ skb_shinfo(skb)->dma_maps[self->sync_write_next_map];
++ }
++ self->sync_write_next_map++;
++ dev_dbg(self->mdev->parent, "%s: Will send %x, len: %d\n",
++ __func__, (uint32_t)sg_dma_address(&sg), sg_dma_len(&sg));
++
++ desc = self->sync_tx_chan->device->device_prep_slave_sg(
++ self->sync_tx_chan, &sg, 1, DMA_TO_DEVICE,
++ DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
++
++ desc->callback_param = self;
++ desc->callback = timbmost_dma_tx_cb;
++ self->sync_tx_cookie = desc->tx_submit(desc);
++}
++
++static void timbmost_sync_tx_tasklet(unsigned long arg)
++{
++ struct timbmost *self = (struct timbmost *)arg;
++ struct sk_buff *skb = self->sync_write_skb;
++
++ /* TX done, free current SKB, and check for next */
++ BUG_ON(!skb);
++
++ /* check if this was the last DMA map */
++ if (self->sync_tx_state == SYNC_STATE_DOWN ||
++ self->sync_write_next_map >= skb_shinfo(skb)->nr_frags) {
++
++ /* it was the last... */
++ skb_dma_unmap(DMA_DEV(self), skb, DMA_TO_DEVICE);
++ dev_kfree_skb(skb);
++ self->sync_write_skb = NULL;
++ }
++
++ if (self->sync_tx_state != SYNC_STATE_DOWN)
++ __timbmost_sync_write_wake(self);
++}
++
++static void timbmost_sync_start_write(struct timbmost *self)
++{
++ unsigned long flags;
++ struct sk_buff *skb;
++
++ spin_lock_irqsave(&self->lock, flags);
++ skb = self->sync_write_skb;
++ spin_unlock_irqrestore(&self->lock, flags);
++
++ /* transfer is ongoing */
++ if (skb)
++ return;
++
++ __timbmost_sync_write_wake(self);
++}
++
++static irqreturn_t timbmost_irq(int irq, void *devid)
++{
++ struct timbmost *self = (struct timbmost *)devid;
++ u32 isr, imr;
++
++ isr = ioread32(self->membase + MLB_REG_ISR);
++ imr = ioread32(self->membase + MLB_REG_IMR);
++
++ dev_dbg(self->mdev->parent, "%s: entry, isr: %x, imr: %x\n", __func__,
++ isr, imr);
++
++ /* mask out only enabled interrupts */
++ isr &= imr;
++
++ /* ack */
++ iowrite32(isr, self->membase + MLB_REG_ISR);
++
++ if (isr & MLB_I_ASYNC_TX_READY) {
++ /* disable TX interrupts */
++ imr &= ~(MLB_I_ASYNC_TX_READY | MLB_I_ASYNC_TX_PROT_ERR);
++ iowrite32(imr, self->membase + MLB_REG_IMR);
++ /* schedule to send next package */
++ timbmost_async_write_wake(self);
++ }
++
++ if (isr & MLB_I_ASYNC_RX_READY)
++ /* pass data upstreams */
++ __timbmost_async_rx(self);
++
++ if (isr & MLB_I_CTRL_TX_READY) {
++ /* disable TX interrupts */
++ imr &= ~(MLB_I_CTRL_TX_READY | MLB_I_CTRL_TX_PROT_ERR);
++ iowrite32(imr, self->membase + MLB_REG_IMR);
++ /* schedule to send next package */
++ timbmost_ctl_write_wake(self);
++ }
++
++ if (isr & MLB_I_CTRL_RX_READY)
++ /* pass data upstreams */
++ __timbmost_ctl_rx(self);
++
++ if (isr)
++ return IRQ_HANDLED;
++ else
++ return IRQ_NONE;
++}
++
++static bool timbmost_dma_filter_fn(struct dma_chan *chan, void *filter_param)
++{
++ return chan->chan_id == (int)filter_param;
++}
++
++static int timbmost_open(struct most_dev *mdev)
++{
++ struct timbmost *self = (struct timbmost *)mdev->driver_data;
++ int err;
++ dma_cap_mask_t mask;
++
++ dma_cap_zero(mask);
++ dma_cap_set(DMA_SLAVE, mask);
++
++ dev_dbg(mdev->parent, "%s\n", __func__);
++
++ skb_queue_head_init(&self->ctl_q);
++ skb_queue_head_init(&self->sync_q);
++ skb_queue_head_init(&self->async_q);
++
++ spin_lock_init(&self->lock);
++
++ /* request the GPIO reset pin */
++ err = gpio_request(self->reset_pin, DRIVER_NAME);
++ if (err) {
++ printk(KERN_ERR DRIVER_NAME
++ " Failed to request reset pin: %d, err: %d\n",
++ self->reset_pin, err);
++ return err;
++ }
++
++ __timbmost_hw_reset(self);
++
++ self->sync_tx_cookie = -1;
++ self->sync_rx_cookie = -1;
++
++ self->sync_tx_chan = dma_request_channel(mask, timbmost_dma_filter_fn,
++ (void *)self->sync_tx_chan_id);
++ if (!self->sync_tx_chan) {
++ err = -ENODEV;
++ goto err_tx_chan;
++ }
++
++ self->sync_rx_chan = dma_request_channel(mask, timbmost_dma_filter_fn,
++ (void *)self->sync_rx_chan_id);
++ if (!self->sync_rx_chan) {
++ err = -ENODEV;
++ goto err_rx_chan;
++ }
++
++ /* request IRQ */
++ err = request_irq(self->irq, timbmost_irq, IRQF_SHARED, "timb-most",
++ self);
++ if (err)
++ goto err_req_irq;
++
++ return 0;
++
++err_req_irq:
++ dma_release_channel(self->sync_rx_chan);
++err_rx_chan:
++ dma_release_channel(self->sync_tx_chan);
++err_tx_chan:
++ gpio_free(self->reset_pin);
++ return err;
++}
++
++static void timbmost_stop_sync_dma(struct timbmost *self)
++{
++ if (self->sync_read_skb) {
++ most_dbg("Stopping RX\n");
++
++ self->sync_rx_chan->device->device_terminate_all(
++ self->sync_rx_chan);
++ }
++
++ if (self->sync_write_skb) {
++ /* just let it complete */
++ dma_sync_wait(self->sync_tx_chan, self->sync_rx_cookie);
++
++ most_dbg("Stopping TX\n");
++ }
++}
++
++static int timbmost_close(struct most_dev *mdev)
++{
++ struct timbmost *self = (struct timbmost *)mdev->driver_data;
++
++ dev_dbg(mdev->parent, "%s\n", __func__);
++
++ /* free IRQ */
++ free_irq(self->irq, self);
++
++ __timbmost_hw_reset(self);
++
++ /* free GPIO */
++ gpio_free(self->reset_pin);
++
++ /* empty all queues */
++ skb_queue_purge(&self->ctl_q);
++ skb_queue_purge(&self->sync_q);
++ skb_queue_purge(&self->async_q);
++
++ dma_release_channel(self->sync_rx_chan);
++ dma_release_channel(self->sync_tx_chan);
++
++ return 0;
++}
++
++static int __timbmost_conf_channel(struct timbmost *self, u8 channel,
++ u8 channel_mask)
++{
++ int register_offset;
++ int shift;
++ u32 ch_cfg;
++
++ /* only even channel numbers are allowed */
++ if (channel % 2 || channel > 0x3e || channel == 0) {
++ printk(KERN_WARNING DRIVER_NAME": Invalid channel: %d\n",
++ channel);
++ return -EINVAL;
++ }
++
++ channel = (channel / 2) - 1;
++ /* the channel conf is spread out over the 7 channel config registers
++ * each register configures 5 channels, each reg is 32bit
++ */
++ register_offset = MLB_REG_CH_CFG_1 + (channel / 5) * 4;
++
++ /* each register configures 5 channels, 3 bit per channel
++ * lowest bits configures highest channel
++ */
++ shift = (4 - (channel % 5)) * 3;
++
++ ch_cfg = ioread32(self->membase + register_offset);
++ ch_cfg &= ~(0x7 << shift);
++ ch_cfg |= (channel_mask & 0x7) << shift;
++ iowrite32(ch_cfg, self->membase + register_offset);
++ return 0;
++}
++
++static int timbmost_conf_channel(struct most_dev *mdev,
++ enum most_chan_type type, u8 channel, u8 flags)
++{
++ struct timbmost *self = (struct timbmost *)mdev->driver_data;
++ unsigned long irq_flags;
++ u32 imr, cfg;
++ int err = -EINVAL;
++ int chan_idx = (flags & MOST_CONF_FLAG_TX) ? TX_CHAN : RX_CHAN;
++
++ dev_dbg(mdev->parent, "%s: channel: %d, flags: %x\n",
++ __func__, channel, flags);
++
++ if (flags & MOST_CONF_FLAG_UP) {
++ switch (type) {
++ case CHAN_CTL:
++ spin_lock_irqsave(&self->lock, irq_flags);
++ /* we only support one channel at the time */
++ if (self->ctl_channels[chan_idx])
++ goto error;
++
++ /* reset the FIFO */
++ iowrite32((chan_idx == TX_CHAN) ? MLB_FIFO_RST_CTRL_TX :
++ MLB_FIFO_RST_CTRL_RX,
++ self->membase + MLB_REG_FIFO_RST);
++
++ err = __timbmost_conf_channel(self, channel,
++ (chan_idx == TX_CHAN) ? MLB_CH_CFG_CTRL_TX :
++ MLB_CH_CFG_CTRL_RX);
++ if (err)
++ goto error;
++
++ if (chan_idx == RX_CHAN) {
++ /* enable the receiver */
++ cfg = ioread32(self->membase + MLB_REG_CFG);
++ cfg |= MLB_CFG_CTRL_RX_EN;
++ iowrite32(cfg, self->membase + MLB_REG_CFG);
++
++ /* enable RX interrupts */
++ imr = ioread32(self->membase + MLB_REG_IMR);
++ imr |= (MLB_I_CTRL_RX_READY |
++ MLB_I_CTRL_RX_PROT_ERR |
++ MLB_I_CTRL_RX_CMD_BREAK);
++ iowrite32(imr, self->membase + MLB_REG_IMR);
++ }
++ self->ctl_channels[chan_idx] = channel;
++ spin_unlock_irqrestore(&self->lock, irq_flags);
++ break;
++ case CHAN_SYNC:
++ spin_lock_irqsave(&self->lock, irq_flags);
++ /* we only support one channel at the time */
++ if (self->sync_channels[chan_idx])
++ goto error;
++
++ /* reset the FIFO */
++ iowrite32((chan_idx == TX_CHAN) ? MLB_FIFO_RST_SYNC_TX :
++ MLB_FIFO_RST_SYNC_RX,
++ self->membase + MLB_REG_FIFO_RST);
++
++ err = __timbmost_conf_channel(self, channel,
++ (chan_idx == TX_CHAN) ? MLB_CH_CFG_SYNC_TX :
++ MLB_CH_CFG_SYNC_RX);
++ if (err)
++ goto error;
++
++ if (chan_idx == RX_CHAN) {
++ /* enable the receiver */
++ cfg = ioread32(self->membase + MLB_REG_CFG);
++ cfg |= MLB_CFG_SYNC_RX_EN;
++ iowrite32(cfg, self->membase + MLB_REG_CFG);
++
++ /* enable prot error interrupts */
++ imr = ioread32(self->membase + MLB_REG_IMR);
++ imr |= MLB_I_SYNC_RX_PROT_ERR;
++ iowrite32(imr, self->membase + MLB_REG_IMR);
++ /* start RX DMA */
++ self->sync_rx_state = SYNC_STATE_UP;
++ __timbmost_sync_read_wake(self);
++ } else {
++ /* enable the transmitter */
++ cfg = ioread32(self->membase + MLB_REG_CFG);
++ cfg |= MLB_CFG_SYNC_TX_EN;
++ iowrite32(cfg, self->membase + MLB_REG_CFG);
++ self->sync_tx_state = SYNC_STATE_UP;
++ }
++
++ self->sync_channels[chan_idx] = channel;
++ spin_unlock_irqrestore(&self->lock, irq_flags);
++
++ break;
++ case CHAN_ASYNC:
++ spin_lock_irqsave(&self->lock, irq_flags);
++ /* we only support one channel at the time */
++ if (self->async_channels[chan_idx])
++ goto error;
++
++ /* reset the FIFO */
++ iowrite32((chan_idx == TX_CHAN) ?
++ MLB_FIFO_RST_ASYNC_TX : MLB_FIFO_RST_ASYNC_RX,
++ self->membase + MLB_REG_FIFO_RST);
++
++ err = __timbmost_conf_channel(self, channel,
++ (chan_idx == TX_CHAN) ? MLB_CH_CFG_ASYNC_TX :
++ MLB_CH_CFG_ASYNC_RX);
++ if (err)
++ goto error;
++
++ if (chan_idx == RX_CHAN) {
++ /* enable the receiver */
++ cfg = ioread32(self->membase + MLB_REG_CFG);
++ cfg |= MLB_CFG_ASYNC_RX_EN;
++ iowrite32(cfg, self->membase + MLB_REG_CFG);
++
++ /* enable RX interrupts */
++ imr = ioread32(self->membase + MLB_REG_IMR);
++ imr |= (MLB_I_ASYNC_RX_READY |
++ MLB_I_ASYNC_RX_PROT_ERR |
++ MLB_I_ASYNC_RX_CMD_BREAK);
++ iowrite32(imr, self->membase + MLB_REG_IMR);
++ }
++ self->async_channels[chan_idx] = channel;
++ spin_unlock_irqrestore(&self->lock, irq_flags);
++ break;
++ default:
++ printk(KERN_WARNING "timbmlb: Unknown channel type\n");
++ return -EINVAL;
++ }
++ } else {
++ switch (type) {
++ case CHAN_CTL:
++ /* stop any ongoing transfer */
++ spin_lock_irqsave(&self->lock, irq_flags);
++ if (self->ctl_channels[chan_idx] != channel)
++ goto error;
++
++ imr = ioread32(self->membase + MLB_REG_IMR);
++ imr &= ~(MLB_I_CTRL_TX_READY |
++ MLB_I_CTRL_TX_PROT_ERR |
++ MLB_I_CTRL_TX_RX_BREAK |
++ MLB_I_CTRL_TX_BUSY_BREAK |
++ MLB_I_CTRL_RX_READY |
++ MLB_I_CTRL_RX_PROT_ERR |
++ MLB_I_CTRL_RX_CMD_BREAK);
++ iowrite32(imr, self->membase + MLB_REG_IMR);
++
++ /* disable CTL RX */
++ cfg = ioread32(self->membase + MLB_REG_CFG);
++ cfg &= ~MLB_CFG_CTRL_RX_EN;
++ iowrite32(cfg, self->membase + MLB_REG_CFG);
++
++ err = __timbmost_conf_channel(self, channel,
++ MLB_CH_CFG_NOT_ALLOCATED);
++ spin_unlock_irqrestore(&self->lock, irq_flags);
++ skb_queue_purge(&self->ctl_q);
++ self->ctl_channels[chan_idx] = 0;
++ return err;
++ case CHAN_SYNC:
++ spin_lock_irqsave(&self->lock, irq_flags);
++
++ /* TODO: Separate RX and TX */
++ self->sync_tx_state = SYNC_STATE_DOWN;
++ self->sync_rx_state = SYNC_STATE_DOWN;
++
++ /* stop any ongoing transfer */
++ if (self->sync_channels[chan_idx] != channel)
++ goto error;
++
++ spin_unlock_irqrestore(&self->lock, irq_flags);
++ /* stop DMA */
++ timbmost_stop_sync_dma(self);
++ spin_lock_irqsave(&self->lock, irq_flags);
++ imr = ioread32(self->membase + MLB_REG_IMR);
++ imr &= ~MLB_I_SYNC_RX_PROT_ERR;
++ iowrite32(imr, self->membase + MLB_REG_IMR);
++
++ /* disable SYNC TX/RX */
++ cfg = ioread32(self->membase + MLB_REG_CFG);
++ cfg &= ~(MLB_CFG_SYNC_TX_EN |
++ MLB_CFG_SYNC_RX_EN);
++ iowrite32(cfg, self->membase + MLB_REG_CFG);
++
++ err = __timbmost_conf_channel(self, channel,
++ MLB_CH_CFG_NOT_ALLOCATED);
++ self->sync_write_skb = NULL;
++ spin_unlock_irqrestore(&self->lock, irq_flags);
++ skb_queue_purge(&self->sync_q);
++ self->sync_channels[chan_idx] = 0;
++ return err;
++ case CHAN_ASYNC:
++ /* stop any ongoing transfer */
++ spin_lock_irqsave(&self->lock, irq_flags);
++ if (self->async_channels[chan_idx] != channel)
++ goto error;
++ imr = ioread32(self->membase + MLB_REG_IMR);
++ imr &= ~(MLB_I_ASYNC_TX_READY |
++ MLB_I_ASYNC_TX_PROT_ERR |
++ MLB_I_ASYNC_TX_RX_BREAK |
++ MLB_I_ASYNC_TX_BUSY_BREAK |
++ MLB_I_ASYNC_RX_READY |
++ MLB_I_ASYNC_RX_PROT_ERR |
++ MLB_I_ASYNC_RX_CMD_BREAK);
++ iowrite32(imr, self->membase + MLB_REG_IMR);
++
++ /* disable CTL RX */
++ cfg = ioread32(self->membase + MLB_REG_CFG);
++ cfg &= ~MLB_CFG_ASYNC_RX_EN;
++ iowrite32(cfg, self->membase + MLB_REG_CFG);
++
++ err = __timbmost_conf_channel(self, channel,
++ MLB_CH_CFG_NOT_ALLOCATED);
++ spin_unlock_irqrestore(&self->lock, irq_flags);
++ skb_queue_purge(&self->async_q);
++ self->async_channels[chan_idx] = 0;
++ return err;
++ default:
++ return -EINVAL;
++ }
++ }
++ return 0;
++
++error:
++ spin_unlock_irqrestore(&self->lock, irq_flags);
++ return err;
++}
++
++static void timbmost_ctl_write_wake(struct timbmost *self)
++{
++ unsigned long flags;
++ u32 imr;
++ u32 isr;
++ struct sk_buff *skb;
++ int i;
++
++ dev_dbg(self->mdev->parent, "%s entry\n", __func__);
++ __timbmost_dump_regs(self, "Before write");
++
++ spin_lock_irqsave(&self->lock, flags);
++ imr = ioread32(self->membase + MLB_REG_IMR);
++ isr = ioread32(self->membase + MLB_REG_ISR);
++ spin_unlock_irqrestore(&self->lock, flags);
++
++ /* check if the hardware is currently writing
++ check with isr to workaround FPGA interrupt bug */
++ if (imr & ~isr & MLB_I_CTRL_TX_READY)
++ return;
++
++ /* check if we have sync */
++ if (!(isr & MLB_I_SYNC_LOCK))
++ return;
++
++ skb = skb_dequeue(&self->ctl_q);
++ if (!skb)
++ return;
++
++ /* now write to the FIFO */
++ for (i = 0; i < skb->len;) {
++ u32 word = 0;
++ int j;
++
++ for (j = 0; j < 4 && i < skb->len; j++, i++)
++ word |= ((u8 *)skb->data)[i] << j * 8;
++
++ iowrite32(word, self->membase + MLB_REG_CTRL_TX);
++ }
++
++ /* data is in the FIFO, enable proper interrupts */
++ spin_lock_irqsave(&self->lock, flags);
++ imr = ioread32(self->membase + MLB_REG_IMR) | MLB_I_CTRL_TX_READY |
++ MLB_I_CTRL_TX_PROT_ERR;
++ iowrite32(imr, self->membase + MLB_REG_IMR);
++ /* start TX */
++ iowrite32(MLB_CH_CTRL_CTRL_TX_START, self->membase + MLB_REG_CH_CTRL);
++ spin_unlock_irqrestore(&self->lock, flags);
++
++ kfree_skb(skb);
++}
++
++static void timbmost_async_write_wake(struct timbmost *self)
++{
++ unsigned long flags;
++ u32 imr;
++ u32 isr;
++ struct sk_buff *skb;
++ int i;
++
++ spin_lock_irqsave(&self->lock, flags);
++ imr = ioread32(self->membase + MLB_REG_IMR);
++ isr = ioread32(self->membase + MLB_REG_ISR);
++ spin_unlock_irqrestore(&self->lock, flags);
++
++ /* check if the hardware is currently writing */
++ if (imr & MLB_I_ASYNC_TX_READY)
++ return;
++
++ /* check if we have sync */
++ if (!(isr & MLB_I_SYNC_LOCK))
++ return;
++
++ skb = skb_dequeue(&self->async_q);
++ if (!skb)
++ return;
++
++ /* TODO: The FIFO is 32bit not 8bit */
++ /* now write to the FIFO */
++ for (i = 0; i < skb->len; i++)
++ iowrite32(skb->data[i], self->membase + MLB_REG_ASYNC_TX);
++
++ /* data is in the FIFO, enable proper interrupts */
++ spin_lock_irqsave(&self->lock, flags);
++ imr = ioread32(self->membase + MLB_REG_IMR) | MLB_I_ASYNC_TX_READY |
++ MLB_I_ASYNC_TX_PROT_ERR;
++ iowrite32(imr, self->membase + MLB_REG_IMR);
++ /* start TX */
++ iowrite32(MLB_CH_CTRL_ASYNC_TX_START, self->membase + MLB_REG_CH_CTRL);
++ spin_unlock_irqrestore(&self->lock, flags);
++
++ kfree_skb(skb);
++}
++
++static int timbmost_send(struct sk_buff *skb)
++{
++ struct most_dev *mdev = (struct most_dev *)skb->dev;
++ struct timbmost *self = (struct timbmost *)mdev->driver_data;
++
++ dev_dbg(mdev->parent, "%s, type: %d\n",
++ __func__, most_cb(skb)->channel_type);
++
++ switch (most_cb(skb)->channel_type) {
++ case CHAN_CTL:
++ skb_queue_tail(&self->ctl_q, skb);
++ timbmost_ctl_write_wake(self);
++ break;
++ case CHAN_ASYNC:
++ skb_queue_tail(&self->async_q, skb);
++ timbmost_async_write_wake(self);
++ break;
++ case CHAN_SYNC:
++ skb_queue_tail(&self->sync_q, skb);
++ timbmost_sync_start_write(self);
++ break;
++ default:
++ printk(KERN_WARNING "%s: Got unsupported channel type: %d\n",
++ __func__, most_cb(skb)->channel_type);
++ kfree_skb(skb);
++ break;
++ }
++
++ return 0;
++}
++
++static int timbmost_probe(struct platform_device *dev)
++{
++ int err;
++ struct timbmost *self = NULL;
++ struct resource *iomem;
++ struct timbmlb_platform_data *pdata = dev->dev.platform_data;
++
++ if (!pdata) {
++ printk(KERN_ERR DRIVER_NAME "No platform data supplied\n");
++ err = -EINVAL;
++ goto err_mem;
++ }
++
++ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
++ if (!iomem) {
++ err = -EINVAL;
++ goto err_mem;
++ }
++
++ self = kzalloc(sizeof(*self), GFP_KERNEL);
++ if (!self) {
++ err = -ENOMEM;
++ goto err_mem;
++ }
++
++ self->sync_rx_chan_id = pdata->rx_dma_channel;
++ self->sync_tx_chan_id = pdata->tx_dma_channel;
++ tasklet_init(&self->sync_rx_tasklet, timbmost_sync_rx_tasklet,
++ (unsigned long)self);
++ tasklet_init(&self->sync_tx_tasklet, timbmost_sync_tx_tasklet,
++ (unsigned long)self);
++
++ self->mdev = most_alloc_dev();
++ if (!self->mdev) {
++ err = -ENOMEM;
++ goto err_mem;
++ }
++
++ self->mdev->owner = THIS_MODULE;
++ self->mdev->driver_data = self;
++ self->mdev->parent = &dev->dev;
++ self->mdev->open = timbmost_open;
++ self->mdev->close = timbmost_close;
++ self->mdev->send = timbmost_send;
++ self->mdev->conf_channel = timbmost_conf_channel;
++
++ if (!request_mem_region(iomem->start,
++ resource_size(iomem), "timb-most")) {
++ err = -EBUSY;
++ goto err_mem;
++ }
++
++ self->membase = ioremap(iomem->start, resource_size(iomem));
++ if (!self->membase) {
++ printk(KERN_ERR "timbmost: Failed to remap I/O memory\n");
++ err = -ENOMEM;
++ goto err_ioremap;
++ }
++
++ self->reset_pin = pdata->reset_pin;
++
++ /* find interrupt */
++ self->irq = platform_get_irq(dev, 0);
++ if (self->irq < 0) {
++ err = self->irq;
++ goto err_get_irq;
++ }
++
++ /* register to the MOST layer */
++ err = most_register_dev(self->mdev);
++ if (err)
++ goto err_register;
++
++
++ platform_set_drvdata(dev, self);
++
++ return 0;
++
++err_get_irq:
++err_register:
++ iounmap(self->membase);
++err_ioremap:
++ release_mem_region(iomem->start, resource_size(iomem));
++err_mem:
++ if (self && self->mdev)
++ most_free_dev(self->mdev);
++
++ kfree(self);
++
++ printk(KERN_ERR "timb-most: Failed to register: %d\n", err);
++
++ return err;
++}
++
++static int timbmost_remove(struct platform_device *dev)
++{
++ struct timbmost *self = platform_get_drvdata(dev);
++ struct resource *iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
++
++ most_unregister_dev(self->mdev);
++ iounmap(self->membase);
++ release_mem_region(iomem->start, resource_size(iomem));
++ most_free_dev(self->mdev);
++ kfree(self);
++ return 0;
++}
++
++static struct platform_driver timbmost_platform_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = timbmost_probe,
++ .remove = timbmost_remove,
++};
++
++/*--------------------------------------------------------------------------*/
++
++static int __init timbmost_init(void)
++{
++ return platform_driver_register(&timbmost_platform_driver);
++}
++
++static void __exit timbmost_exit(void)
++{
++ platform_driver_unregister(&timbmost_platform_driver);
++}
++
++module_init(timbmost_init);
++module_exit(timbmost_exit);
++
++MODULE_DESCRIPTION("Timberdale MLB driver");
++MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("platform:timb-most");
+diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c
+index 34b31da..cd19a35 100644
+--- a/drivers/serial/timbuart.c
++++ b/drivers/serial/timbuart.c
+@@ -31,6 +31,7 @@
+
+ struct timbuart_port {
+ struct uart_port port;
++ struct uart_driver uart_driver;
+ struct tasklet_struct tasklet;
+ int usedma;
+ u32 last_ier;
+@@ -410,7 +411,7 @@ static struct uart_ops timbuart_ops = {
+ .verify_port = timbuart_verify_port
+ };
+
+-static struct uart_driver timbuart_driver = {
++static const __devinitconst struct uart_driver timbuart_driver_template = {
+ .owner = THIS_MODULE,
+ .driver_name = "timberdale_uart",
+ .dev_name = "ttyTU",
+@@ -419,7 +420,7 @@ static struct uart_driver timbuart_driver = {
+ .nr = 1
+ };
+
+-static int timbuart_probe(struct platform_device *dev)
++static int __devinit timbuart_probe(struct platform_device *dev)
+ {
+ int err;
+ struct timbuart_port *uart;
+@@ -433,6 +434,8 @@ static int timbuart_probe(struct platform_device *dev)
+ goto err_mem;
+ }
+
++ uart->uart_driver = timbuart_driver_template;
++
+ uart->usedma = 0;
+
+ uart->port.uartclk = 3250000 * 16;
+@@ -461,11 +464,11 @@ static int timbuart_probe(struct platform_device *dev)
+
+ tasklet_init(&uart->tasklet, timbuart_tasklet, (unsigned long)uart);
+
+- err = uart_register_driver(&timbuart_driver);
++ err = uart_register_driver(&uart->uart_driver);
+ if (err)
+ goto err_register;
+
+- err = uart_add_one_port(&timbuart_driver, &uart->port);
++ err = uart_add_one_port(&uart->uart_driver, &uart->port);
+ if (err)
+ goto err_add_port;
+
+@@ -474,7 +477,7 @@ static int timbuart_probe(struct platform_device *dev)
+ return 0;
+
+ err_add_port:
+- uart_unregister_driver(&timbuart_driver);
++ uart_unregister_driver(&uart->uart_driver);
+ err_register:
+ kfree(uart);
+ err_mem:
+@@ -484,13 +487,13 @@ err_mem:
+ return err;
+ }
+
+-static int timbuart_remove(struct platform_device *dev)
++static int __devexit timbuart_remove(struct platform_device *dev)
+ {
+ struct timbuart_port *uart = platform_get_drvdata(dev);
+
+ tasklet_kill(&uart->tasklet);
+- uart_remove_one_port(&timbuart_driver, &uart->port);
+- uart_unregister_driver(&timbuart_driver);
++ uart_remove_one_port(&uart->uart_driver, &uart->port);
++ uart_unregister_driver(&uart->uart_driver);
+ kfree(uart);
+
+ return 0;
+diff --git a/include/linux/can/platform/ascb.h b/include/linux/can/platform/ascb.h
+new file mode 100644
+index 0000000..817162b
+--- /dev/null
++++ b/include/linux/can/platform/ascb.h
+@@ -0,0 +1,8 @@
++#ifndef _CAN_PLATFORM_ASCB_H_
++#define _CAN_PLATFORM_ASCB_H_
++
++struct ascb_platform_data {
++ int gpio_pin;
++};
++
++#endif
+diff --git a/include/linux/i2c-xiic.h b/include/linux/i2c-xiic.h
+new file mode 100644
+index 0000000..4f9f225
+--- /dev/null
++++ b/include/linux/i2c-xiic.h
+@@ -0,0 +1,43 @@
++/*
++ * i2c-xiic.h
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Xilinx IIC
++ */
++
++#ifndef _LINUX_I2C_XIIC_H
++#define _LINUX_I2C_XIIC_H
++
++/**
++ * struct xiic_i2c_platform_data - Platform data of the Xilinx I2C driver
++ * @num_devices: Number of devices that shall be added when the driver
++ * is probed.
++ * @devices: The actuall devices to add.
++ *
++ * This purpose of this platform data struct is to be able to provide a number
++ * of devices that should be added to the I2C bus. The reason is that sometimes
++ * the I2C board info is not enough, a new PCI board can for instance be
++ * plugged into a standard PC, and the bus number might be unknown at
++ * early init time.
++ */
++struct xiic_i2c_platform_data {
++ u8 num_devices;
++ struct i2c_board_info const *devices;
++};
++
++#endif /* _LINUX_I2C_XIIC_H */
+diff --git a/include/linux/ks8842.h b/include/linux/ks8842.h
+new file mode 100644
+index 0000000..1d59c45
+--- /dev/null
++++ b/include/linux/ks8842.h
+@@ -0,0 +1,34 @@
++/*
++ * ks8842.h KS8842 platform data struct definition
++ * Copyright (c) 2010 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#ifndef _LINUX_KS8842_H
++#define _LINUX_KS8842_H
++
++/**
++ * struct ks8842_platform_data - Platform data of the KS8842 network driver
++ * @rx_dma_channel: The DMA channel to use for RX, -1 for none.
++ * @tx_dma_channel: The DMA channel to use for RX, -1 for none.
++ *
++ * If no pointer is provided as platform data no DMA will be used
++ */
++struct ks8842_platform_data {
++ int rx_dma_channel;
++ int tx_dma_channel;
++};
++
++#endif
+diff --git a/include/linux/most/timbmlb.h b/include/linux/most/timbmlb.h
+new file mode 100644
+index 0000000..a3b8c76
+--- /dev/null
++++ b/include/linux/most/timbmlb.h
+@@ -0,0 +1,11 @@
++#ifndef __LINUX_MOST_TIMBMLB_H
++#define __LINUX_MOST_TIMBMLB_H
++
++/* Timberdale MLB IP */
++struct timbmlb_platform_data {
++ int reset_pin; /* pin used for reset of the INIC */
++ int rx_dma_channel;
++ int tx_dma_channel;
++};
++
++#endif
+diff --git a/include/linux/socket.h b/include/linux/socket.h
+index 7b3aae2..3334368 100644
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -189,7 +189,8 @@ struct ucred {
+ #define AF_ISDN 34 /* mISDN sockets */
+ #define AF_PHONET 35 /* Phonet sockets */
+ #define AF_IEEE802154 36 /* IEEE802154 sockets */
+-#define AF_MAX 37 /* For now.. */
++#define AF_MOST 37 /* Media Oriented Systems Transport */
++#define AF_MAX 38 /* For now.. */
+
+ /* Protocol families, same as address families. */
+ #define PF_UNSPEC AF_UNSPEC
+@@ -229,6 +230,7 @@ struct ucred {
+ #define PF_ISDN AF_ISDN
+ #define PF_PHONET AF_PHONET
+ #define PF_IEEE802154 AF_IEEE802154
++#define PF_MOST AF_MOST
+ #define PF_MAX AF_MAX
+
+ /* Maximum queue length specifiable by listen. */
+diff --git a/include/linux/timb_dma.h b/include/linux/timb_dma.h
+new file mode 100644
+index 0000000..bb043e9
+--- /dev/null
++++ b/include/linux/timb_dma.h
+@@ -0,0 +1,55 @@
++/*
++ * timb_dma.h timberdale FPGA DMA driver defines
++ * Copyright (c) 2010 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA DMA engine
++ */
++
++#ifndef _LINUX_TIMB_DMA_H
++#define _LINUX_TIMB_DMA_H
++
++/**
++ * struct timb_dma_platform_data_channel - Description of each individual
++ * DMA channel for the timberdale DMA driver
++ * @rx: true if this channel handles data in the direction to
++ * the CPU.
++ * @bytes_per_line: Number of bytes per line, this is specific for channels
++ * handling video data. For other channels this shall be left to 0.
++ * @descriptors: Number of descriptors to allocate for this channel.
++ * @descriptor_elements: Number of elements in each descriptor.
++ *
++ */
++struct timb_dma_platform_data_channel {
++ bool rx;
++ unsigned int bytes_per_line;
++ unsigned int descriptors;
++ unsigned int descriptor_elements;
++};
++
++/**
++ * struct timb_dma_platform_data - Platform data of the timberdale DMA driver
++ * @nr_channels: Number of defined channels in the channels array.
++ * @channels: Definition of the each channel.
++ *
++ */
++struct timb_dma_platform_data {
++ unsigned nr_channels;
++ struct timb_dma_platform_data_channel channels[32];
++};
++
++#endif
+diff --git a/include/media/timb_radio.h b/include/media/timb_radio.h
+new file mode 100644
+index 0000000..fcd32a3
+--- /dev/null
++++ b/include/media/timb_radio.h
+@@ -0,0 +1,36 @@
++/*
++ * timb_radio.h Platform struct for the Timberdale radio driver
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#ifndef _TIMB_RADIO_
++#define _TIMB_RADIO_ 1
++
++#include <linux/i2c.h>
++
++struct timb_radio_platform_data {
++ int i2c_adapter; /* I2C adapter where the tuner and dsp are attached */
++ struct {
++ const char *module_name;
++ struct i2c_board_info *info;
++ } tuner;
++ struct {
++ const char *module_name;
++ struct i2c_board_info *info;
++ } dsp;
++};
++
++#endif
+diff --git a/include/media/timb_video.h b/include/media/timb_video.h
+new file mode 100644
+index 0000000..55334ad
+--- /dev/null
++++ b/include/media/timb_video.h
+@@ -0,0 +1,34 @@
++/*
++ * timb_video.h Platform struct for the Timberdale video driver
++ * Copyright (c) 2009-2010 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#ifndef _TIMB_VIDEO_
++#define _TIMB_VIDEO_ 1
++
++#include <linux/i2c.h>
++
++struct timb_video_platform_data {
++ int dma_channel;
++ int i2c_adapter; /* The I2C adapter where the encoder is attached */
++ struct {
++ const char *module_name;
++ struct i2c_board_info *info;
++ } encoder;
++};
++
++#endif
++
+diff --git a/include/media/v4l2-chip-ident.h b/include/media/v4l2-chip-ident.h
+index 6cc107d..5341e3d 100644
+--- a/include/media/v4l2-chip-ident.h
++++ b/include/media/v4l2-chip-ident.h
+@@ -155,6 +155,9 @@ enum {
+ /* module adv7343: just ident 7343 */
+ V4L2_IDENT_ADV7343 = 7343,
+
++ /* module saa7706h: just ident 7706 */
++ V4L2_IDENT_SAA7706H = 7706,
++
+ /* module wm8739: just ident 8739 */
+ V4L2_IDENT_WM8739 = 8739,
+
+diff --git a/include/net/most/async.h b/include/net/most/async.h
+new file mode 100644
+index 0000000..5a4d49d
+--- /dev/null
++++ b/include/net/most/async.h
+@@ -0,0 +1,12 @@
++#ifndef __ASYNC_H
++#define __ASYNC_H
++
++struct sockaddr_mostasync {
++ sa_family_t most_family;
++ unsigned short most_dev;
++ unsigned char rx_channel;
++ unsigned char tx_channel;
++};
++
++#endif
++
+diff --git a/include/net/most/ctl.h b/include/net/most/ctl.h
+new file mode 100644
+index 0000000..d34726c
+--- /dev/null
++++ b/include/net/most/ctl.h
+@@ -0,0 +1,12 @@
++#ifndef __CTL_H
++#define __CTL_H
++
++struct sockaddr_mostctl {
++ sa_family_t most_family;
++ unsigned short most_dev;
++ unsigned char rx_channel;
++ unsigned char tx_channel;
++};
++
++#endif
++
+diff --git a/include/net/most/dev.h b/include/net/most/dev.h
+new file mode 100644
+index 0000000..bd6dc48
+--- /dev/null
++++ b/include/net/most/dev.h
+@@ -0,0 +1,27 @@
++#ifndef __DEV_H
++#define __DEV_H
++
++struct sockaddr_mostdev {
++ sa_family_t most_family;
++ unsigned short most_dev;
++};
++
++
++/* MOST Dev ioctl defines */
++#define MOSTDEVUP _IOW('M', 201, int)
++#define MOSTDEVDOWN _IOW('M', 202, int)
++
++#define MOSTGETDEVLIST _IOR('M', 210, int)
++
++struct most_dev_req {
++ uint16_t dev_id;
++};
++
++struct most_dev_list_req {
++ uint16_t dev_num;
++ struct most_dev_req dev_req[0];
++};
++
++
++#endif
++
+diff --git a/include/net/most/most.h b/include/net/most/most.h
+new file mode 100644
+index 0000000..8ce75e2
+--- /dev/null
++++ b/include/net/most/most.h
+@@ -0,0 +1,110 @@
++#ifndef __MOST_H
++#define __MOST_H
++
++#include <net/sock.h>
++
++#ifndef AF_MOST
++#define AF_MOST 37
++#define PF_MOST AF_MOST
++#endif
++
++/* Reserve for core and drivers use */
++#define MOST_SKB_RESERVE 8
++
++#define CTL_FRAME_SIZE 32
++
++#define MOSTPROTO_DEV 0
++#define MOSTPROTO_CTL 1
++#define MOSTPROTO_SYNC 2
++#define MOSTPROTO_ASYNC 3
++
++#define MOST_NO_CHANNEL 0xFE
++
++enum {
++ MOST_CONNECTED = 1, /* Equal to TCP_ESTABLISHED makes net code happy */
++ MOST_OPEN,
++ MOST_BOUND,
++};
++
++
++struct most_skb_cb {
++ __u8 channel_type;
++ __u8 channel;
++};
++#define most_cb(skb) ((struct most_skb_cb *)(skb->cb))
++
++struct most_sock {
++ struct sock sk;
++ u8 channel_type;
++ u8 rx_channel;
++ u8 tx_channel;
++ int dev_id;
++ struct most_dev *mdev;
++};
++#define most_sk(sk) ((struct most_sock *)sk)
++
++static inline struct sock *most_sk_alloc(struct net *net,
++ struct proto *pops, u8 channel_type)
++{
++ struct sock *sk = sk_alloc(net, PF_MOST, GFP_ATOMIC, pops);
++ if (sk) {
++ most_sk(sk)->channel_type = channel_type;
++ most_sk(sk)->dev_id = -1;
++ }
++
++ return sk;
++}
++static inline struct sk_buff *most_skb_alloc(unsigned int len, gfp_t how)
++{
++ struct sk_buff *skb = alloc_skb(len + MOST_SKB_RESERVE, how);
++
++ if (skb)
++ skb_reserve(skb, MOST_SKB_RESERVE);
++
++ return skb;
++}
++
++static inline struct sk_buff *most_skb_send_alloc(struct sock *sk,
++ unsigned long len, int nb, int *err)
++{
++ struct sk_buff *skb =
++ sock_alloc_send_skb(sk, len + MOST_SKB_RESERVE, nb, err);
++
++ if (skb)
++ skb_reserve(skb, MOST_SKB_RESERVE);
++
++ return skb;
++}
++
++struct most_sock_list {
++ struct hlist_head head;
++ rwlock_t lock;
++};
++
++struct most_dev *most_dev_get(int index);
++
++int most_sock_register(int proto, struct net_proto_family *ops);
++int most_sock_unregister(int proto);
++void most_sock_link(struct sock *s);
++void most_sock_unlink(struct sock *sk);
++
++int most_send_to_sock(int dev_id, struct sk_buff *skb);
++
++/* default implementation of socket operations */
++int most_sock_release(struct socket *sock);
++int most_sock_bind(struct socket *sock, int dev_id, u8 rx_chan, u8 tx_chan);
++int most_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
++int most_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
++ struct msghdr *msg, size_t len, int flags);
++int most_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
++ struct msghdr *msg, size_t len);
++int most_sock_setsockopt(struct socket *sock, int level, int optname,
++ char __user *optval, unsigned int optlen);
++int most_sock_getsockopt(struct socket *sock, int level, int optname,
++ char __user *optval, int __user *optlen);
++
++extern int dev_sock_init(void);
++extern void dev_sock_cleanup(void);
++
++#endif /* __MOST_H */
++
+diff --git a/include/net/most/most_core.h b/include/net/most/most_core.h
+new file mode 100644
+index 0000000..9373d89
+--- /dev/null
++++ b/include/net/most/most_core.h
+@@ -0,0 +1,137 @@
++#ifndef __MOST_CORE_H
++#define __MOST_CORE_H
++
++#include <net/most/most.h>
++
++enum most_chan_type {
++ CHAN_CTL = 0,
++ CHAN_SYNC,
++ CHAN_ASYNC,
++ CHAN_DEV
++};
++
++#define MOST_CONF_FLAG_UP 0x01
++#define MOST_CONF_FLAG_TX 0x02
++
++enum most_dev_state {
++ MOST_DEV_DOWN = 0,
++ MOST_DEV_UP
++};
++
++struct most_dev {
++
++ struct list_head list;
++ atomic_t refcnt;
++
++ char name[8];
++
++ __u16 id;
++ enum most_dev_state state;
++
++ struct module *owner;
++
++ struct tasklet_struct rx_task;
++ struct tasklet_struct tx_task;
++
++ struct sk_buff_head rx_q;
++ struct sk_buff_head ctl_q;
++ struct sk_buff_head async_q;
++ struct sk_buff_head sync_q;
++
++ /* set by the driver */
++
++ void *driver_data;
++ struct device *parent;
++
++ int (*open)(struct most_dev *mdev);
++ int (*close)(struct most_dev *mdev);
++ int (*conf_channel)(struct most_dev *mdev, enum most_chan_type type,
++ u8 channel, u8 flags);
++ int (*send)(struct sk_buff *skb);
++ int (*can_send)(struct sk_buff *skb);
++};
++
++#ifdef DEBUG
++#define most_dbg(...) printk(__VA_ARGS__)
++#else
++#define most_dbg(...)
++#endif
++
++static inline struct most_dev *most_dev_hold(struct most_dev *d)
++{
++ if (try_module_get(d->owner))
++ return d;
++ return NULL;
++}
++
++static inline void most_dev_put(struct most_dev *d)
++{
++ module_put(d->owner);
++}
++
++static inline void most_sched_tx(struct most_dev *mdev)
++{
++ tasklet_schedule(&mdev->tx_task);
++}
++
++static inline void most_sched_rx(struct most_dev *mdev)
++{
++ tasklet_schedule(&mdev->rx_task);
++}
++
++static inline int most_recv_frame(struct sk_buff *skb)
++{
++ struct most_dev *mdev = (struct most_dev *) skb->dev;
++
++ /* Time stamp */
++ __net_timestamp(skb);
++
++ /* Queue frame for rx task */
++ skb_queue_tail(&mdev->rx_q, skb);
++ most_sched_rx(mdev);
++ return 0;
++}
++
++static inline int __most_configure_channel(struct most_dev *mdev,
++ u8 channel_type, u8 channel, u8 up)
++{
++ if (mdev->state != MOST_DEV_UP)
++ return -ENETDOWN;
++
++ if (mdev->conf_channel)
++ if (channel != MOST_NO_CHANNEL)
++ return mdev->conf_channel(mdev, channel_type, channel,
++ up);
++ return 0;
++}
++
++static inline int most_configure_channels(struct most_dev *mdev,
++ struct most_sock *sk, u8 up)
++{
++ int err;
++ u8 flags = (up) ? MOST_CONF_FLAG_UP : 0;
++
++ err = __most_configure_channel(mdev, sk->channel_type, sk->rx_channel,
++ flags);
++ if (err)
++ return err;
++
++ err = __most_configure_channel(mdev, sk->channel_type, sk->tx_channel,
++ flags | MOST_CONF_FLAG_TX);
++ if (err)
++ __most_configure_channel(mdev, sk->channel_type, sk->rx_channel,
++ (up) ? 0 : MOST_CONF_FLAG_UP);
++ return err;
++}
++
++struct most_dev *most_alloc_dev(void);
++void most_free_dev(struct most_dev *mdev);
++int most_register_dev(struct most_dev *mdev);
++int most_unregister_dev(struct most_dev *mdev);
++
++int most_get_dev_list(void __user *arg);
++int most_open_dev(u16 dev_id);
++int most_close_dev(u16 dev_id);
++
++#endif
++
+diff --git a/include/net/most/sync.h b/include/net/most/sync.h
+new file mode 100644
+index 0000000..aa89d16
+--- /dev/null
++++ b/include/net/most/sync.h
+@@ -0,0 +1,12 @@
++#ifndef __SYNC_H
++#define __SYNC_H
++
++struct sockaddr_mostsync {
++ sa_family_t most_family;
++ unsigned short most_dev;
++ unsigned char rx_channel;
++ unsigned char tx_channel;
++};
++
++#endif
++
+diff --git a/include/sound/timbi2s.h b/include/sound/timbi2s.h
+new file mode 100644
+index 0000000..ebfecfe
+--- /dev/null
++++ b/include/sound/timbi2s.h
+@@ -0,0 +1,33 @@
++/*
++ * timbi2s.h timberdale FPGA I2S platform data
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++#ifndef __INCLUDE_SOUND_TIMBI2S_H
++#define __INCLUDE_SOUND_TIMBI2S_H
++
++struct timbi2s_bus_data {
++ u8 rx;
++ u16 sample_rate;
++ const char *name;
++};
++
++struct timbi2s_platform_data {
++ const struct timbi2s_bus_data *busses;
++ int num_busses;
++ u32 main_clk;
++};
++
++#endif
+diff --git a/net/Kconfig b/net/Kconfig
+index 041c35e..063b84a 100644
+--- a/net/Kconfig
++++ b/net/Kconfig
+@@ -255,6 +255,7 @@ source "net/can/Kconfig"
+ source "net/irda/Kconfig"
+ source "net/bluetooth/Kconfig"
+ source "net/rxrpc/Kconfig"
++source "net/most/Kconfig"
+
+ config FIB_RULES
+ bool
+diff --git a/net/Makefile b/net/Makefile
+index 1542e72..3e4fe8f 100644
+--- a/net/Makefile
++++ b/net/Makefile
+@@ -43,6 +43,7 @@ obj-$(CONFIG_ATM) += atm/
+ obj-$(CONFIG_DECNET) += decnet/
+ obj-$(CONFIG_ECONET) += econet/
+ obj-$(CONFIG_PHONET) += phonet/
++obj-$(CONFIG_MOST) += most/
+ ifneq ($(CONFIG_VLAN_8021Q),)
+ obj-y += 8021q/
+ endif
+diff --git a/net/most/Kconfig b/net/most/Kconfig
+new file mode 100644
+index 0000000..a9fd7f2
+--- /dev/null
++++ b/net/most/Kconfig
+@@ -0,0 +1,38 @@
++#
++# Media Oriented Systems Transport (MOST) network layer core configuration
++#
++
++menuconfig MOST
++ depends on NET
++ tristate "MOST bus subsystem support"
++ ---help---
++ Media Oriented Systems Transport (MOST) is a multimedia
++ communications protocol in the automotive industry.
++
++ If you want MOST support you should say Y here.
++
++config MOST_CTL
++ tristate "Support for Control data over MOST"
++ depends on MOST
++ default N
++ ---help---
++ Support for the control channel of the MOST bus.
++
++config MOST_ASYNC
++ tristate "Support for Asynchronous data over MOST"
++ depends on MOST
++ default N
++ ---help---
++ Support for the asyncronous channel of the MOST bus. Normally
++ used for software download od file transfers.
++
++config MOST_SYNC
++ tristate "Support for Synchronous data over MOST"
++ depends on MOST
++ default N
++ ---help---
++ Support for synchronous channles of the MOST bus. Normally used
++ for streaming media such as audio and video.
++
++
++source "drivers/net/most/Kconfig"
+diff --git a/net/most/Makefile b/net/most/Makefile
+new file mode 100644
+index 0000000..2d81b3d
+--- /dev/null
++++ b/net/most/Makefile
+@@ -0,0 +1,15 @@
++#
++# Makefile for the Linux Media Oriented Systems Transport core.
++#
++
++obj-$(CONFIG_MOST) += most.o
++most-objs := af_most.o most_core.o most_sock.o dev_sock.o
++
++obj-$(CONFIG_MOST_CTL) += ctl.o
++ctl-objs := ctl_sock.o
++
++obj-$(CONFIG_MOST_SYNC) += sync.o
++sync-objs := sync_sock.o
++
++obj-$(CONFIG_MOST_ASYNC) += async.o
++async-objs := async_sock.o
+diff --git a/net/most/af_most.c b/net/most/af_most.c
+new file mode 100644
+index 0000000..157df9f
+--- /dev/null
++++ b/net/most/af_most.c
+@@ -0,0 +1,170 @@
++/*
++ * af_most.c Support for the MOST address family
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <net/most/most.h>
++
++#define MOST_MAX_PROTO 4
++static struct net_proto_family *most_proto[MOST_MAX_PROTO];
++static DEFINE_RWLOCK(most_proto_lock);
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++static struct lock_class_key most_lock_key[MOST_MAX_PROTO];
++static const char *most_key_strings[MOST_MAX_PROTO] = {
++ "sk_lock-AF_MOST-MOSTPROTO_DEV",
++ "sk_lock-AF_MOST-MOSTPROTO_CTL",
++ "sk_lock-AF_MOST-MOSTPROTO_SYNC",
++ "sk_lock-AF_MOST-MOSTPROTO_ASYNC",
++};
++
++static struct lock_class_key most_slock_key[MOST_MAX_PROTO];
++static const char *most_slock_key_strings[MOST_MAX_PROTO] = {
++ "slock-AF_MOST-MOSTPROTO_DEV",
++ "slock-AF_MOST-MOSTPROTO_CTL",
++ "slock-AF_MOST-MOSTPROTO_SYNC",
++ "slock-AF_MOST-MOSTPROTO_ASYNC",
++};
++
++static inline void most_sock_reclassify_lock(struct socket *sock, int proto)
++{
++ struct sock *sk = sock->sk;
++
++ if (!sk)
++ return;
++
++ BUG_ON(sock_owned_by_user(sk));
++
++ sock_lock_init_class_and_name(sk,
++ most_slock_key_strings[proto], &most_slock_key[proto],
++ most_key_strings[proto], &most_lock_key[proto]);
++}
++#else
++static inline void most_sock_reclassify_lock(struct socket *sock, int proto)
++{
++}
++#endif
++
++
++int most_sock_register(int proto, struct net_proto_family *ops)
++{
++ int err = 0;
++
++ if (proto < 0 || proto >= MOST_MAX_PROTO)
++ return -EINVAL;
++
++ write_lock(&most_proto_lock);
++
++ if (most_proto[proto])
++ err = -EEXIST;
++ else
++ most_proto[proto] = ops;
++
++ write_unlock(&most_proto_lock);
++
++ return err;
++}
++EXPORT_SYMBOL(most_sock_register);
++
++int most_sock_unregister(int proto)
++{
++ int err = 0;
++
++ if (proto < 0 || proto >= MOST_MAX_PROTO)
++ return -EINVAL;
++
++ write_lock(&most_proto_lock);
++
++ if (!most_proto[proto])
++ err = -ENOENT;
++ else
++ most_proto[proto] = NULL;
++
++ write_unlock(&most_proto_lock);
++
++ return err;
++}
++EXPORT_SYMBOL(most_sock_unregister);
++
++static int most_sock_create(struct net *net, struct socket *sock, int proto,
++ int kern)
++{
++ int err;
++
++ if (net != &init_net)
++ return -EAFNOSUPPORT;
++
++ if (proto < 0 || proto >= MOST_MAX_PROTO)
++ return -EINVAL;
++
++ if (!most_proto[proto])
++ request_module("most-proto-%d", proto);
++
++ err = -EPROTONOSUPPORT;
++
++ read_lock(&most_proto_lock);
++
++ if (most_proto[proto] && try_module_get(most_proto[proto]->owner)) {
++ err = most_proto[proto]->create(net, sock, proto, kern);
++ most_sock_reclassify_lock(sock, proto);
++ module_put(most_proto[proto]->owner);
++ }
++
++ read_unlock(&most_proto_lock);
++
++ return err;
++}
++
++static struct net_proto_family most_sock_family_ops = {
++ .owner = THIS_MODULE,
++ .family = PF_MOST,
++ .create = most_sock_create,
++};
++
++static int __init most_init(void)
++{
++ int err;
++
++ err = sock_register(&most_sock_family_ops);
++ if (err < 0)
++ return err;
++
++ err = dev_sock_init();
++ if (err < 0) {
++ sock_unregister(PF_MOST);
++ return err;
++ }
++
++ printk(KERN_INFO "MOST is initialized\n");
++
++ return 0;
++}
++
++static void __exit most_exit(void)
++{
++ dev_sock_cleanup();
++
++ sock_unregister(PF_MOST);
++}
++
++subsys_initcall(most_init);
++module_exit(most_exit);
++
++MODULE_DESCRIPTION("MOST Core");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS_NETPROTO(PF_MOST);
++
+diff --git a/net/most/async_sock.c b/net/most/async_sock.c
+new file mode 100644
+index 0000000..3a11b9b
+--- /dev/null
++++ b/net/most/async_sock.c
+@@ -0,0 +1,155 @@
++/*
++ * async_sock.c MOST asyncronous socket support
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Support for MOST asynchronous sockets
++ */
++
++#include <linux/module.h>
++#include <net/most/most.h>
++#include <net/most/most_core.h>
++#include <net/most/async.h>
++
++static int async_sock_bind(struct socket *sock, struct sockaddr *addr,
++ int addr_len)
++{
++ struct sockaddr_mostasync *aaddr = (struct sockaddr_mostasync *)addr;
++
++ if (!aaddr || aaddr->most_family != AF_MOST)
++ return -EINVAL;
++
++ return most_sock_bind(sock, aaddr->most_dev, aaddr->rx_channel,
++ aaddr->tx_channel);
++}
++
++static int async_sock_getname(struct socket *sock, struct sockaddr *addr,
++ int *addr_len, int peer)
++{
++ struct sockaddr_mostasync *aaddr = (struct sockaddr_mostasync *)addr;
++ struct sock *sk = sock->sk;
++ struct most_dev *mdev = most_sk(sk)->mdev;
++
++ if (!mdev)
++ return -EBADFD;
++
++ lock_sock(sk);
++
++ *addr_len = sizeof(*aaddr);
++ aaddr->most_family = AF_MOST;
++ aaddr->most_dev = mdev->id;
++ aaddr->rx_channel = most_sk(sk)->rx_channel;
++ aaddr->tx_channel = most_sk(sk)->tx_channel;
++
++ release_sock(sk);
++ return 0;
++}
++
++
++static const struct proto_ops async_sock_ops = {
++ .family = PF_MOST,
++ .owner = THIS_MODULE,
++ .release = most_sock_release,
++ .bind = async_sock_bind,
++ .getname = async_sock_getname,
++ .sendmsg = most_sock_sendmsg,
++ .recvmsg = most_sock_recvmsg,
++ .ioctl = most_sock_ioctl,
++ .poll = datagram_poll,
++ .listen = sock_no_listen,
++ .shutdown = sock_no_shutdown,
++ .setsockopt = most_sock_setsockopt,
++ .getsockopt = most_sock_getsockopt,
++ .connect = sock_no_connect,
++ .socketpair = sock_no_socketpair,
++ .accept = sock_no_accept,
++ .mmap = sock_no_mmap
++};
++static struct proto async_sk_proto = {
++ .name = "ASYNC",
++ .owner = THIS_MODULE,
++ .obj_size = sizeof(struct most_sock)
++};
++
++static int async_sock_create(struct net *net, struct socket *sock, int protocol,
++ int kern)
++{
++ struct sock *sk;
++
++ if (sock->type != SOCK_DGRAM)
++ return -ESOCKTNOSUPPORT;
++
++ sock->ops = &async_sock_ops;
++
++ sk = most_sk_alloc(net, &async_sk_proto, CHAN_ASYNC);
++ if (!sk)
++ return -ENOMEM;
++
++ sock_init_data(sock, sk);
++
++ sock_reset_flag(sk, SOCK_ZAPPED);
++
++ sk->sk_protocol = protocol;
++
++ sock->state = SS_UNCONNECTED;
++ sk->sk_state = MOST_OPEN;
++
++ most_sock_link(sk);
++ return 0;
++}
++
++static struct net_proto_family async_sock_family_ops = {
++ .family = PF_MOST,
++ .owner = THIS_MODULE,
++ .create = async_sock_create,
++};
++
++
++static int __init async_init(void)
++{
++ int err;
++
++ err = proto_register(&async_sk_proto, 0);
++ if (err < 0)
++ return err;
++
++ err = most_sock_register(MOSTPROTO_ASYNC, &async_sock_family_ops);
++ if (err < 0) {
++ printk(KERN_ERR "MOST socket registration failed\n");
++ return err;
++ }
++
++ printk(KERN_INFO "MOST asynchronous socket layer initialized\n");
++
++ return 0;
++}
++
++static void __exit async_exit(void)
++{
++ if (most_sock_unregister(MOSTPROTO_ASYNC) < 0)
++ printk(KERN_ERR "ASYNC socket unregistration failed\n");
++
++ proto_unregister(&async_sk_proto);
++}
++
++module_init(async_init);
++module_exit(async_exit);
++
++MODULE_DESCRIPTION("Most Asyncronous");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("most-proto-3");
++
+diff --git a/net/most/ctl_sock.c b/net/most/ctl_sock.c
+new file mode 100644
+index 0000000..c10cb32
+--- /dev/null
++++ b/net/most/ctl_sock.c
+@@ -0,0 +1,160 @@
++/*
++ * ctl_sock.c Support for MOST control sockets
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <net/most/most.h>
++#include <net/most/most_core.h>
++#include <net/most/ctl.h>
++
++
++static int ctl_sock_bind(struct socket *sock, struct sockaddr *addr,
++ int addr_len)
++{
++ struct sockaddr_mostctl *caddr = (struct sockaddr_mostctl *) addr;
++
++ if (!caddr || caddr->most_family != AF_MOST)
++ return -EINVAL;
++
++ return most_sock_bind(sock, caddr->most_dev, caddr->rx_channel,
++ caddr->tx_channel);
++}
++
++static int ctl_sock_getname(struct socket *sock, struct sockaddr *addr,
++ int *addr_len, int peer)
++{
++ struct sockaddr_mostctl *caddr = (struct sockaddr_mostctl *) addr;
++ struct sock *sk = sock->sk;
++ struct most_dev *mdev = most_sk(sk)->mdev;
++
++ if (!mdev)
++ return -EBADFD;
++
++ lock_sock(sk);
++
++ *addr_len = sizeof(*caddr);
++ caddr->most_family = AF_MOST;
++ caddr->most_dev = mdev->id;
++ caddr->rx_channel = most_sk(sk)->rx_channel;
++ caddr->tx_channel = most_sk(sk)->tx_channel;
++
++ release_sock(sk);
++ return 0;
++}
++
++int ctl_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
++ struct msghdr *msg, size_t len)
++{
++ if (len != CTL_FRAME_SIZE)
++ return -EINVAL;
++
++ return most_sock_sendmsg(iocb, sock, msg, len);
++}
++
++static const struct proto_ops ctl_sock_ops = {
++ .family = PF_MOST,
++ .owner = THIS_MODULE,
++ .release = most_sock_release,
++ .bind = ctl_sock_bind,
++ .getname = ctl_sock_getname,
++ .sendmsg = most_sock_sendmsg,
++ .recvmsg = most_sock_recvmsg,
++ .ioctl = most_sock_ioctl,
++ .poll = datagram_poll,
++ .listen = sock_no_listen,
++ .shutdown = sock_no_shutdown,
++ .setsockopt = most_sock_setsockopt,
++ .getsockopt = most_sock_getsockopt,
++ .connect = sock_no_connect,
++ .socketpair = sock_no_socketpair,
++ .accept = sock_no_accept,
++ .mmap = sock_no_mmap
++};
++static struct proto ctl_sk_proto = {
++ .name = "CTL",
++ .owner = THIS_MODULE,
++ .obj_size = sizeof(struct most_sock)
++};
++
++static int ctl_sock_create(struct net *net, struct socket *sock, int protocol,
++ int kern)
++{
++ struct sock *sk;
++
++ if (sock->type != SOCK_RAW)
++ return -ESOCKTNOSUPPORT;
++
++ sock->ops = &ctl_sock_ops;
++
++ sk = most_sk_alloc(net, &ctl_sk_proto, CHAN_CTL);
++ if (!sk)
++ return -ENOMEM;
++
++ sock_init_data(sock, sk);
++
++ sock_reset_flag(sk, SOCK_ZAPPED);
++
++ sk->sk_protocol = protocol;
++
++ sock->state = SS_UNCONNECTED;
++ sk->sk_state = MOST_OPEN;
++
++ most_sock_link(sk);
++ return 0;
++}
++
++static struct net_proto_family ctl_sock_family_ops = {
++ .family = PF_MOST,
++ .owner = THIS_MODULE,
++ .create = ctl_sock_create,
++};
++
++
++static int __init ctl_init(void)
++{
++ int err;
++
++ err = proto_register(&ctl_sk_proto, 0);
++ if (err < 0)
++ return err;
++
++ err = most_sock_register(MOSTPROTO_CTL, &ctl_sock_family_ops);
++ if (err < 0) {
++ printk(KERN_ERR "MOST socket registration failed\n");
++ return err;
++ }
++
++ printk(KERN_INFO "MOST control socket layer initialized\n");
++
++ return 0;
++}
++
++static void __exit ctl_exit(void)
++{
++ if (most_sock_unregister(MOSTPROTO_CTL) < 0)
++ printk(KERN_ERR "Control socket unregistration failed\n");
++
++ proto_unregister(&ctl_sk_proto);
++}
++
++module_init(ctl_init);
++module_exit(ctl_exit);
++
++MODULE_DESCRIPTION("Most Control");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("most-proto-1");
++
+diff --git a/net/most/dev_sock.c b/net/most/dev_sock.c
+new file mode 100644
+index 0000000..43b980b
+--- /dev/null
++++ b/net/most/dev_sock.c
+@@ -0,0 +1,171 @@
++/*
++ * dev_sock.c Device MOST sockets, to control the underlaying devices
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <net/most/most.h>
++#include <net/most/most_core.h>
++#include <net/most/dev.h>
++
++/* Ioctls that require bound socket */
++static inline int dev_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
++ unsigned long arg)
++{
++ return -ENOSYS;
++}
++
++static int dev_sock_ioctl(struct socket *sock, unsigned int cmd,
++ unsigned long arg)
++{
++ void __user *argp = (void __user *) arg;
++
++ switch (cmd) {
++ case MOSTDEVUP:
++ return most_open_dev(arg & 0xffff);
++ case MOSTDEVDOWN:
++ return most_close_dev(arg & 0xffff);
++ case MOSTGETDEVLIST:
++ return most_get_dev_list(argp);
++ default:
++ return -EINVAL;
++ }
++}
++
++static int dev_sock_bind(struct socket *sock, struct sockaddr *addr,
++ int addr_len)
++{
++ return -ENOSYS;
++}
++
++static int dev_sock_getname(struct socket *sock, struct sockaddr *addr,
++ int *addr_len, int peer)
++{
++ struct sockaddr_mostdev *daddr = (struct sockaddr_mostdev *) addr;
++ struct sock *sk = sock->sk;
++ struct most_dev *mdev = most_sk(sk)->mdev;
++
++ if (!mdev)
++ return -EBADFD;
++
++ lock_sock(sk);
++
++ *addr_len = sizeof(*daddr);
++ daddr->most_family = AF_MOST;
++ daddr->most_dev = mdev->id;
++
++ release_sock(sk);
++ return 0;
++}
++
++static int dev_sock_setsockopt(struct socket *sock, int level, int optname,
++ char __user *optval, unsigned int optlen)
++{
++ return -ENOSYS;
++}
++
++static int dev_sock_getsockopt(struct socket *sock, int level, int optname,
++ char __user *optval, int __user *optlen)
++{
++ return -ENOSYS;
++}
++
++static const struct proto_ops dev_sock_ops = {
++ .family = PF_MOST,
++ .owner = THIS_MODULE,
++ .release = most_sock_release,
++ .bind = dev_sock_bind,
++ .getname = dev_sock_getname,
++ .sendmsg = sock_no_sendmsg,
++ .recvmsg = sock_no_recvmsg,
++ .ioctl = dev_sock_ioctl,
++ .poll = sock_no_poll,
++ .listen = sock_no_listen,
++ .shutdown = sock_no_shutdown,
++ .setsockopt = dev_sock_setsockopt,
++ .getsockopt = dev_sock_getsockopt,
++ .connect = sock_no_connect,
++ .socketpair = sock_no_socketpair,
++ .accept = sock_no_accept,
++ .mmap = sock_no_mmap
++};
++static struct proto dev_sk_proto = {
++ .name = "DEV",
++ .owner = THIS_MODULE,
++ .obj_size = sizeof(struct most_sock)
++};
++
++static int dev_sock_create(struct net *net, struct socket *sock, int protocol,
++ int kern)
++{
++ struct sock *sk;
++
++ if (sock->type != SOCK_RAW)
++ return -ESOCKTNOSUPPORT;
++
++ sock->ops = &dev_sock_ops;
++
++ sk = most_sk_alloc(net, &dev_sk_proto, CHAN_DEV);
++ if (!sk)
++ return -ENOMEM;
++
++ sock_init_data(sock, sk);
++
++ sock_reset_flag(sk, SOCK_ZAPPED);
++
++ sk->sk_protocol = protocol;
++
++ sock->state = SS_UNCONNECTED;
++ sk->sk_state = MOST_OPEN;
++
++ most_sock_link(sk);
++ return 0;
++}
++
++static struct net_proto_family dev_sock_family_ops = {
++ .family = PF_MOST,
++ .owner = THIS_MODULE,
++ .create = dev_sock_create,
++};
++
++
++int __init dev_sock_init(void)
++{
++ int err;
++
++ err = proto_register(&dev_sk_proto, 0);
++ if (err < 0)
++ return err;
++
++ err = most_sock_register(MOSTPROTO_DEV, &dev_sock_family_ops);
++ if (err < 0) {
++ printk(KERN_ERR "MOST socket registration failed\n");
++ return err;
++ }
++
++ printk(KERN_INFO "MOST device socket layer initialized\n");
++
++ return 0;
++}
++
++void __exit dev_sock_cleanup(void)
++{
++ if (most_sock_unregister(MOSTPROTO_DEV) < 0)
++ printk(KERN_ERR "Device socket unregistration failed\n");
++
++ proto_unregister(&dev_sk_proto);
++}
++
+diff --git a/net/most/most_core.c b/net/most/most_core.c
+new file mode 100644
+index 0000000..e01aa68
+--- /dev/null
++++ b/net/most/most_core.c
+@@ -0,0 +1,287 @@
++/*
++ * most_core.c The MOST core functions
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++
++#include <net/most/most_core.h>
++#include <net/most/dev.h>
++
++/* MOST device list */
++LIST_HEAD(most_dev_list);
++DEFINE_RWLOCK(most_dev_list_lock);
++
++
++int most_open_dev(u16 dev_id)
++{
++ struct most_dev *mdev = most_dev_get(dev_id);
++ int err = 0;
++
++ if (!mdev)
++ return -ENODEV;
++
++ most_dbg("%s: %s, state: %d\n", __func__, mdev->name, mdev->state);
++
++ if (mdev->state == MOST_DEV_UP)
++ err = -EALREADY;
++
++ if (!err)
++ err = mdev->open(mdev);
++ if (!err)
++ mdev->state = MOST_DEV_UP;
++
++ most_dev_put(mdev);
++ most_dbg("%s: %s, state: %d, err: %d\n", __func__,
++ mdev->name, mdev->state, err);
++ return err;
++}
++
++static int __most_close_dev(struct most_dev *mdev)
++{
++ int err = 0;
++
++ most_dbg("%s: %s, state: %d\n", __func__, mdev ? mdev->name : "nil",
++ mdev ? mdev->state : -1);
++
++ if (!mdev)
++ return -ENODEV;
++
++ if (mdev->state == MOST_DEV_DOWN)
++ err = -EALREADY;
++
++ if (!err)
++ err = mdev->close(mdev);
++ if (!err)
++ mdev->state = MOST_DEV_DOWN;
++
++ most_dev_put(mdev);
++ most_dbg("%s: %s, state: %d, err: %d\n", __func__,
++ mdev->name, mdev->state, err);
++ return err;
++}
++
++int most_close_dev(u16 dev_id)
++{
++ return __most_close_dev(most_dev_get(dev_id));
++}
++
++int most_get_dev_list(void __user *arg)
++{
++ struct most_dev_list_req *dl;
++ struct most_dev_req *dr;
++ struct list_head *p;
++ int n = 0, size, err;
++ u16 dev_num;
++
++ if (get_user(dev_num, (u16 __user *) arg))
++ return -EFAULT;
++
++ if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
++ return -EINVAL;
++
++ size = sizeof(*dl) + dev_num * sizeof(*dr);
++
++ dl = kzalloc(size, GFP_KERNEL);
++ if (!dl)
++ return -ENOMEM;
++
++ dr = dl->dev_req;
++
++ read_lock_bh(&most_dev_list_lock);
++ list_for_each(p, &most_dev_list) {
++ struct most_dev *mdev;
++ mdev = list_entry(p, struct most_dev, list);
++ (dr + n)->dev_id = mdev->id;
++ if (++n >= dev_num)
++ break;
++ }
++ read_unlock_bh(&most_dev_list_lock);
++
++ dl->dev_num = n;
++ size = sizeof(*dl) + n * sizeof(*dr);
++
++ err = copy_to_user(arg, dl, size);
++ kfree(dl);
++
++ return err ? -EFAULT : 0;
++}
++
++static int most_send_frame(struct sk_buff *skb)
++{
++ struct most_dev *mdev = (struct most_dev *) skb->dev;
++
++ if (!mdev) {
++ kfree_skb(skb);
++ return -ENODEV;
++ }
++
++ most_dbg("%s: %s type %d len %d\n", __func__, mdev->name,
++ most_cb(skb)->channel_type, skb->len);
++
++ /* Get rid of skb owner, prior to sending to the driver. */
++ skb_orphan(skb);
++
++ return mdev->send(skb);
++}
++
++static void most_send_queue(struct sk_buff_head *q)
++{
++ struct sk_buff *skb;
++
++ while ((skb = skb_dequeue(q))) {
++ struct most_dev *mdev = (struct most_dev *)skb->dev;
++
++ most_dbg("%s: skb %p len %d\n", __func__, skb, skb->len);
++
++ if (!mdev->can_send || mdev->can_send(skb))
++ most_send_frame(skb);
++ else {
++ most_dbg("%s, could not send frame, requeueing\n",
++ __func__);
++ skb_queue_tail(q, skb);
++ break;
++ }
++ }
++}
++
++static void most_tx_task(unsigned long arg)
++{
++ struct most_dev *mdev = (struct most_dev *) arg;
++
++ most_dbg("%s: %s\n", __func__, mdev->name);
++
++ most_send_queue(&mdev->ctl_q);
++ most_send_queue(&mdev->sync_q);
++ most_send_queue(&mdev->async_q);
++}
++
++static void most_rx_task(unsigned long arg)
++{
++ struct most_dev *mdev = (struct most_dev *) arg;
++ struct sk_buff *skb = skb_dequeue(&mdev->rx_q);
++
++ most_dbg("%s: %s\n", __func__, mdev->name);
++
++ while (skb) {
++ /* Send to the sockets */
++ most_send_to_sock(mdev->id, skb);
++ kfree_skb(skb);
++ skb = skb_dequeue(&mdev->rx_q);
++ }
++}
++
++
++/* Get MOST device by index.
++ * Device is held on return. */
++struct most_dev *most_dev_get(int index)
++{
++ struct most_dev *mdev = NULL;
++ struct list_head *p;
++
++ if (index < 0)
++ return NULL;
++
++ read_lock(&most_dev_list_lock);
++ list_for_each(p, &most_dev_list) {
++ struct most_dev *d = list_entry(p, struct most_dev, list);
++ if (d->id == index) {
++ mdev = most_dev_hold(d);
++ break;
++ }
++ }
++ read_unlock(&most_dev_list_lock);
++ return mdev;
++}
++EXPORT_SYMBOL(most_dev_get);
++
++
++/* Alloc MOST device */
++struct most_dev *most_alloc_dev(void)
++{
++ struct most_dev *mdev;
++
++ mdev = kzalloc(sizeof(struct most_dev), GFP_KERNEL);
++ if (!mdev)
++ return NULL;
++
++ mdev->state = MOST_DEV_DOWN;
++
++ return mdev;
++}
++EXPORT_SYMBOL(most_alloc_dev);
++
++
++void most_free_dev(struct most_dev *mdev)
++{
++ kfree(mdev);
++}
++EXPORT_SYMBOL(most_free_dev);
++
++
++/* Register MOST device */
++int most_register_dev(struct most_dev *mdev)
++{
++ struct list_head *head = &most_dev_list, *p;
++ int id = 0;
++
++ if (!mdev->open || !mdev->close || !mdev->send || !mdev->owner)
++ return -EINVAL;
++
++ write_lock_bh(&most_dev_list_lock);
++
++ /* Find first available device id */
++ list_for_each(p, &most_dev_list) {
++ if (list_entry(p, struct most_dev, list)->id != id)
++ break;
++ head = p; id++;
++ }
++
++ sprintf(mdev->name, "most%d", id);
++ mdev->id = id;
++ list_add(&mdev->list, head);
++
++ tasklet_init(&mdev->rx_task, most_rx_task, (unsigned long) mdev);
++ tasklet_init(&mdev->tx_task, most_tx_task, (unsigned long) mdev);
++
++ skb_queue_head_init(&mdev->rx_q);
++ skb_queue_head_init(&mdev->ctl_q);
++ skb_queue_head_init(&mdev->sync_q);
++ skb_queue_head_init(&mdev->async_q);
++
++ write_unlock_bh(&most_dev_list_lock);
++ return 0;
++}
++EXPORT_SYMBOL(most_register_dev);
++
++int most_unregister_dev(struct most_dev *mdev)
++{
++ int ret = 0;
++ most_dbg("%s: %s: state: %d\n", __func__, mdev->name, mdev->state);
++
++ if (mdev->state != MOST_DEV_DOWN)
++ ret = __most_close_dev(mdev);
++
++ write_lock_bh(&most_dev_list_lock);
++ list_del(&mdev->list);
++ write_unlock_bh(&most_dev_list_lock);
++
++ return ret;
++}
++EXPORT_SYMBOL(most_unregister_dev);
++
+diff --git a/net/most/most_sock.c b/net/most/most_sock.c
+new file mode 100644
+index 0000000..ff07383
+--- /dev/null
++++ b/net/most/most_sock.c
+@@ -0,0 +1,315 @@
++/*
++ * most_sock.c Generic functions for MOST sockets
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <net/most/most_core.h>
++
++static struct most_sock_list most_sk_list = {
++ .lock = __RW_LOCK_UNLOCKED(ctl_sk_list.lock)
++};
++
++void most_sock_link(struct sock *sk)
++{
++ write_lock_bh(&most_sk_list.lock);
++ sk_add_node(sk, &most_sk_list.head);
++ write_unlock_bh(&most_sk_list.lock);
++}
++EXPORT_SYMBOL(most_sock_link);
++
++void most_sock_unlink(struct sock *sk)
++{
++ write_lock_bh(&most_sk_list.lock);
++ sk_del_node_init(sk);
++ write_unlock_bh(&most_sk_list.lock);
++}
++EXPORT_SYMBOL(most_sock_unlink);
++
++static int channel_in_use(int dev_id, u8 channel)
++{
++ struct sock *sk;
++ struct hlist_node *node;
++
++ read_lock_bh(&most_sk_list.lock);
++
++ sk_for_each(sk, node, &most_sk_list.head)
++ if (most_sk(sk)->dev_id == dev_id &&
++ sk->sk_state == MOST_BOUND &&
++ (most_sk(sk)->rx_channel == channel ||
++ most_sk(sk)->tx_channel == channel))
++ goto found;
++
++ sk = NULL;
++found:
++ read_unlock_bh(&most_sk_list.lock);
++
++ return sk != NULL;
++}
++
++int most_send_to_sock(int dev_id, struct sk_buff *skb)
++{
++ struct sock *sk;
++ struct hlist_node *node;
++
++ read_lock(&most_sk_list.lock);
++ sk_for_each(sk, node, &most_sk_list.head) {
++ if (most_sk(sk)->dev_id == dev_id &&
++ most_sk(sk)->channel_type == most_cb(skb)->channel_type
++ && most_sk(sk)->rx_channel == most_cb(skb)->channel &&
++ sk->sk_state == MOST_BOUND) {
++
++ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
++ if (nskb)
++ if (sock_queue_rcv_skb(sk, nskb))
++ kfree_skb(nskb);
++ }
++
++ }
++ read_unlock(&most_sk_list.lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(most_send_to_sock);
++
++int most_sock_release(struct socket *sock)
++{
++ struct sock *sk = sock->sk;
++ struct most_dev *mdev;
++
++ most_dbg("%s: sock %p sk %p\n", __func__, sock, sk);
++
++ if (!sk)
++ return 0;
++
++ mdev = most_sk(sk)->mdev;
++
++ most_sock_unlink(sk);
++
++ if (mdev) {
++ if (sk->sk_state == MOST_BOUND)
++ most_configure_channels(mdev, most_sk(sk), 0);
++
++ most_dev_put(mdev);
++ }
++
++ sock_orphan(sk);
++ sock_put(sk);
++ return 0;
++}
++EXPORT_SYMBOL(most_sock_release);
++
++int most_sock_bind(struct socket *sock, int dev_id, u8 rx_chan, u8 tx_chan)
++{
++ struct sock *sk = sock->sk;
++ struct most_dev *mdev = NULL;
++ int err = 0;
++
++ most_dbg("%s: sock %p sk %p, rx: %d, tx: %d\n",
++ __func__, sock, sk, rx_chan, tx_chan);
++
++ lock_sock(sk);
++
++ if (sk->sk_state != MOST_OPEN) {
++ err = -EBADFD;
++ goto done;
++ }
++
++ if (most_sk(sk)->mdev) {
++ err = -EALREADY;
++ goto done;
++ }
++
++ if (channel_in_use(dev_id, rx_chan) ||
++ channel_in_use(dev_id, tx_chan)) {
++ err = -EADDRINUSE;
++ goto done;
++ } else {
++ most_sk(sk)->rx_channel = rx_chan;
++ most_sk(sk)->tx_channel = tx_chan;
++ }
++
++ mdev = most_dev_get(dev_id);
++ if (!mdev) {
++ err = -ENODEV;
++ goto done;
++ }
++
++ err = most_configure_channels(mdev, most_sk(sk), 1);
++ if (err) {
++ most_dev_put(mdev);
++ goto done;
++ }
++
++ most_sk(sk)->mdev = mdev;
++ most_sk(sk)->dev_id = mdev->id;
++
++ sk->sk_state = MOST_BOUND;
++
++done:
++ release_sock(sk);
++ return err;
++}
++EXPORT_SYMBOL(most_sock_bind);
++
++
++int most_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
++{
++ most_dbg("%s\n", __func__);
++ return -EINVAL;
++}
++EXPORT_SYMBOL(most_sock_ioctl);
++
++int most_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
++ struct msghdr *msg, size_t len, int flags)
++{
++ int noblock = flags & MSG_DONTWAIT;
++ struct sock *sk = sock->sk;
++ struct sk_buff *skb;
++ int copied, err;
++
++ most_dbg("%s\n", __func__);
++
++ if (most_sk(sk)->rx_channel == MOST_NO_CHANNEL)
++ return -EOPNOTSUPP;
++
++ if (flags & (MSG_OOB))
++ return -EOPNOTSUPP;
++
++ if (sk->sk_state != MOST_BOUND)
++ return 0;
++
++ skb = skb_recv_datagram(sk, flags, noblock, &err);
++ if (!skb)
++ return err;
++
++ msg->msg_namelen = 0;
++
++ copied = skb->len;
++ if (len < copied) {
++ msg->msg_flags |= MSG_TRUNC;
++ copied = len;
++ }
++
++ skb_reset_transport_header(skb);
++ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
++
++ skb_free_datagram(sk, skb);
++
++ return err ? : copied;
++}
++EXPORT_SYMBOL(most_sock_recvmsg);
++
++int most_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
++ struct msghdr *msg, size_t len)
++{
++ struct sock *sk = sock->sk;
++ struct most_dev *mdev;
++ struct sk_buff *skb;
++ int err;
++
++ most_dbg("%s: sock %p sk %p, channeltype: %d\n",
++ __func__, sock, sk, most_sk(sk)->channel_type);
++
++ if (most_sk(sk)->tx_channel == MOST_NO_CHANNEL)
++ return -EOPNOTSUPP;
++
++ if (msg->msg_flags & MSG_OOB)
++ return -EOPNOTSUPP;
++
++ if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
++ return -EINVAL;
++
++ lock_sock(sk);
++
++ mdev = most_sk(sk)->mdev;
++ if (!mdev) {
++ err = -EBADFD;
++ goto done;
++ }
++
++ skb = sock_alloc_send_skb(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
++ if (!skb)
++ goto done;
++
++ most_cb(skb)->channel = most_sk(sk)->tx_channel;
++ most_cb(skb)->channel_type = most_sk(sk)->channel_type;
++
++ if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
++ err = -EFAULT;
++ goto drop;
++ }
++
++ skb->dev = (void *) mdev;
++
++ skb_queue_tail(&mdev->ctl_q, skb);
++ most_sched_tx(mdev);
++
++ err = len;
++
++done:
++ release_sock(sk);
++ return err;
++
++drop:
++ kfree_skb(skb);
++ goto done;
++}
++EXPORT_SYMBOL(most_sock_sendmsg);
++
++int most_sock_setsockopt(struct socket *sock, int level, int optname,
++ char __user *optval, unsigned int optlen)
++{
++ struct sock *sk = sock->sk;
++ int err = 0;
++
++ most_dbg("%s: sk %p", __func__, sk);
++
++ lock_sock(sk);
++
++ switch (optname) {
++ default:
++ err = -ENOPROTOOPT;
++ break;
++ }
++
++ release_sock(sk);
++ return err;
++}
++EXPORT_SYMBOL(most_sock_setsockopt);
++
++
++int most_sock_getsockopt(struct socket *sock, int level, int optname,
++ char __user *optval, int __user *optlen)
++{
++ struct sock *sk = sock->sk;
++ int err = 0;
++
++ most_dbg("%s: sk %p", __func__, sk);
++
++ lock_sock(sk);
++
++ switch (optname) {
++ default:
++ err = -ENOPROTOOPT;
++ break;
++ }
++
++ release_sock(sk);
++ return err;
++}
++EXPORT_SYMBOL(most_sock_getsockopt);
++
+diff --git a/net/most/sync_sock.c b/net/most/sync_sock.c
+new file mode 100644
+index 0000000..77342f2
+--- /dev/null
++++ b/net/most/sync_sock.c
+@@ -0,0 +1,151 @@
++/*
++ * sync_sock.c Support for MOST synchronous sockets
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <net/most/most.h>
++#include <net/most/most_core.h>
++#include <net/most/sync.h>
++
++static int sync_sock_bind(struct socket *sock, struct sockaddr *addr,
++ int addr_len)
++{
++ struct sockaddr_mostsync *saddr = (struct sockaddr_mostsync *)addr;
++
++ if (!saddr || saddr->most_family != AF_MOST)
++ return -EINVAL;
++
++ return most_sock_bind(sock, saddr->most_dev, saddr->rx_channel,
++ saddr->tx_channel);
++}
++
++static int sync_sock_getname(struct socket *sock, struct sockaddr *addr,
++ int *addr_len, int peer)
++{
++ struct sockaddr_mostsync *saddr = (struct sockaddr_mostsync *)addr;
++ struct sock *sk = sock->sk;
++ struct most_dev *mdev = most_sk(sk)->mdev;
++
++ if (!mdev)
++ return -EBADFD;
++
++ lock_sock(sk);
++
++ *addr_len = sizeof(*saddr);
++ saddr->most_family = AF_MOST;
++ saddr->most_dev = mdev->id;
++ saddr->rx_channel = most_sk(sk)->rx_channel;
++ saddr->tx_channel = most_sk(sk)->tx_channel;
++
++ release_sock(sk);
++ return 0;
++}
++
++
++static const struct proto_ops sync_sock_ops = {
++ .family = PF_MOST,
++ .owner = THIS_MODULE,
++ .release = most_sock_release,
++ .bind = sync_sock_bind,
++ .getname = sync_sock_getname,
++ .sendmsg = most_sock_sendmsg,
++ .recvmsg = most_sock_recvmsg,
++ .ioctl = most_sock_ioctl,
++ .poll = datagram_poll,
++ .listen = sock_no_listen,
++ .shutdown = sock_no_shutdown,
++ .setsockopt = most_sock_setsockopt,
++ .getsockopt = most_sock_getsockopt,
++ .connect = sock_no_connect,
++ .socketpair = sock_no_socketpair,
++ .accept = sock_no_accept,
++ .mmap = sock_no_mmap
++};
++static struct proto sync_sk_proto = {
++ .name = "SYNC",
++ .owner = THIS_MODULE,
++ .obj_size = sizeof(struct most_sock)
++};
++
++static int sync_sock_create(struct net *net, struct socket *sock, int protocol,
++ int kern)
++{
++ struct sock *sk;
++
++ if (sock->type != SOCK_STREAM)
++ return -ESOCKTNOSUPPORT;
++
++ sock->ops = &sync_sock_ops;
++
++ sk = most_sk_alloc(net, &sync_sk_proto, CHAN_SYNC);
++ if (!sk)
++ return -ENOMEM;
++
++ sock_init_data(sock, sk);
++
++ sock_reset_flag(sk, SOCK_ZAPPED);
++
++ sk->sk_protocol = protocol;
++
++ sock->state = SS_UNCONNECTED;
++ sk->sk_state = MOST_OPEN;
++
++ most_sock_link(sk);
++ return 0;
++}
++
++static struct net_proto_family sync_sock_family_ops = {
++ .family = PF_MOST,
++ .owner = THIS_MODULE,
++ .create = sync_sock_create,
++};
++
++
++static int __init sync_init(void)
++{
++ int err;
++
++ err = proto_register(&sync_sk_proto, 0);
++ if (err < 0)
++ return err;
++
++ err = most_sock_register(MOSTPROTO_SYNC, &sync_sock_family_ops);
++ if (err < 0) {
++ printk(KERN_ERR "MOST socket registration failed\n");
++ return err;
++ }
++
++ printk(KERN_INFO "MOST synchronous socket layer initialized\n");
++
++ return 0;
++}
++
++static void __exit sync_exit(void)
++{
++ if (most_sock_unregister(MOSTPROTO_SYNC) < 0)
++ printk(KERN_ERR "SYNC socket unregistration failed\n");
++
++ proto_unregister(&sync_sk_proto);
++}
++
++module_init(sync_init);
++module_exit(sync_exit);
++
++MODULE_DESCRIPTION("Most Syncronous");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("most-proto-2");
++
+diff --git a/sound/drivers/Kconfig b/sound/drivers/Kconfig
+index 84714a6..54ad4e7 100644
+--- a/sound/drivers/Kconfig
++++ b/sound/drivers/Kconfig
+@@ -182,4 +182,17 @@ config SND_AC97_POWER_SAVE_DEFAULT
+ The default time-out value in seconds for AC97 automatic
+ power-save mode. 0 means to disable the power-save mode.
+
++config SND_TIMBERDALE_I2S
++ tristate "The timberdale FPGA I2S driver"
++ depends on MFD_TIMBERDALE && HAS_IOMEM
++ default y
++ help
++ Say Y here to enable driver for the I2S block found within the
++ Timberdale FPGA.
++ There is support for up to 8 I2S channels, in either transmitter
++ or receiver mode.
++
++ To compile this driver as a module, choose M here: the module
++ will be called snd-timbi2s.
++
+ endif # SND_DRIVERS
+diff --git a/sound/drivers/Makefile b/sound/drivers/Makefile
+index d4a07f9..dea2eed 100644
+--- a/sound/drivers/Makefile
++++ b/sound/drivers/Makefile
+@@ -10,6 +10,7 @@ snd-portman2x4-objs := portman2x4.o
+ snd-serial-u16550-objs := serial-u16550.o
+ snd-virmidi-objs := virmidi.o
+ snd-ml403-ac97cr-objs := ml403-ac97cr.o pcm-indirect2.o
++snd-timbi2s-objs := timbi2s.o
+
+ # Toplevel Module Dependency
+ obj-$(CONFIG_SND_DUMMY) += snd-dummy.o
+@@ -19,5 +20,6 @@ obj-$(CONFIG_SND_MTPAV) += snd-mtpav.o
+ obj-$(CONFIG_SND_MTS64) += snd-mts64.o
+ obj-$(CONFIG_SND_PORTMAN2X4) += snd-portman2x4.o
+ obj-$(CONFIG_SND_ML403_AC97CR) += snd-ml403-ac97cr.o
++obj-$(CONFIG_SND_TIMBERDALE_I2S) += snd-timbi2s.o
+
+ obj-$(CONFIG_SND) += opl3/ opl4/ mpu401/ vx/ pcsp/
+diff --git a/sound/drivers/timbi2s.c b/sound/drivers/timbi2s.c
+new file mode 100644
+index 0000000..d1269fa
+--- /dev/null
++++ b/sound/drivers/timbi2s.c
+@@ -0,0 +1,759 @@
++/*
++ * timbi2s.c timberdale FPGA I2S driver
++ * Copyright (c) 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Timberdale FPGA I2S
++ *
++ */
++
++#include <linux/io.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <sound/core.h>
++#include <sound/pcm.h>
++#include <sound/pcm_params.h>
++#include <sound/initval.h>
++#include <sound/timbi2s.h>
++
++#define DRIVER_NAME "timb-i2s"
++
++#define MAX_BUSSES 10
++
++#define TIMBI2S_REG_VER 0x00
++#define TIMBI2S_REG_UIR 0x04
++
++#define TIMBI2S_BUS_PRESCALE 0x00
++#define TIMBI2S_BUS_ICLR 0x04
++#define TIMBI2S_BUS_IPR 0x08
++#define TIMBI2S_BUS_ISR 0x0c
++#define TIMBI2S_BUS_IER 0x10
++
++
++#define TIMBI2S_IRQ_TX_FULL 0x01
++#define TIMBI2S_IRQ_TX_ALMOST_FULL 0x02
++#define TIMBI2S_IRQ_TX_ALMOST_EMPTY 0x04
++#define TIMBI2S_IRQ_TX_EMPTY 0x08
++
++#define TIMBI2S_IRQ_RX_FULL 0x01
++#define TIMBI2S_IRQ_RX_ALMOST_FULL 0x02
++#define TIMBI2S_IRQ_RX_ALMOST_EMPTY 0x04
++#define TIMBI2S_IRQ_RX_NOT_EMPTY 0x08
++
++#define TIMBI2S_BUS_ICOR 0x14
++#define TIMBI2S_ICOR_TX_ENABLE 0x00000001
++#define TIMBI2S_ICOR_RX_ENABLE 0x00000002
++#define TIMBI2S_ICOR_LFIFO_RST 0x00000004
++#define TIMBI2S_ICOR_RFIFO_RST 0x00000008
++#define TIMBI2S_ICOR_FIFO_RST (TIMBI2S_ICOR_LFIFO_RST | TIMBI2S_ICOR_RFIFO_RST)
++#define TIMBI2S_ICOR_SOFT_RST 0x00000010
++#define TIMBI2S_ICOR_WORD_SEL_LEFT_SHIFT 8
++#define TIMBI2S_ICOR_WORD_SEL_LEFT_MASK (0xff << 8)
++#define TIMBI2S_ICOR_WORD_SEL_RIGHT_SHIFT 16
++#define TIMBI2S_ICOR_WORD_SEL_RIGHT_MASK (0xff << 16)
++#define TIMBI2S_ICOR_CLK_MASTER 0x10000000
++#define TIMBI2S_ICOR_RX_ID 0x20000000
++#define TIMBI2S_ICOR_TX_ID 0x40000000
++#define TIMBI2S_ICOR_WORD_SEL 0x80000000
++#define TIMBI2S_BUS_FIFO 0x18
++
++#define TIMBI2S_BUS_REG_AREA_SIZE (TIMBI2S_BUS_FIFO - \
++ TIMBI2S_BUS_PRESCALE + 4)
++#define TIMBI2S_FIRST_BUS_AREA_OFS 0x08
++
++struct timbi2s_bus {
++ u32 flags;
++ u32 prescale;
++ struct snd_pcm *pcm;
++ struct snd_card *card;
++ struct snd_pcm_substream *substream;
++ unsigned buf_pos;
++ spinlock_t lock; /* mutual exclusion */
++ u16 sample_rate;
++};
++
++#define BUS_RX 0x200
++#define BUS_MASTER 0x100
++#define BUS_INDEX_MASK 0xff
++#define BUS_INDEX(b) ((b)->flags & BUS_INDEX_MASK)
++#define BUS_IS_MASTER(b) ((b)->flags & BUS_MASTER)
++#define BUS_IS_RX(b) ((b)->flags & BUS_RX)
++
++#define SET_BUS_INDEX(b, id) ((b)->flags = ((b)->flags & ~BUS_INDEX_MASK) | id)
++#define SET_BUS_MASTER(b) ((b)->flags |= BUS_MASTER)
++#define SET_BUS_RX(b) ((b)->flags |= BUS_RX)
++
++#define TIMBI2S_BUS_OFFSET(bus) (TIMBI2S_FIRST_BUS_AREA_OFS + \
++ TIMBI2S_BUS_REG_AREA_SIZE * BUS_INDEX(bus))
++
++struct timbi2s {
++ void __iomem *membase;
++ int irq;
++ struct tasklet_struct tasklet;
++ u32 main_clk;
++ unsigned num_busses;
++ struct timbi2s_bus busses[0];
++};
++
++#define BITS_PER_CHANNEL 16
++#define NUM_CHANNELS 2
++
++#define SAMPLE_SIZE ((NUM_CHANNELS * BITS_PER_CHANNEL) / 8)
++#define NUM_PERIODS 32
++#define NUM_SAMPLES 256
++
++static struct snd_pcm_hardware timbi2s_rx_hw = {
++ .info = (SNDRV_PCM_INFO_MMAP
++ | SNDRV_PCM_INFO_MMAP_VALID
++ | SNDRV_PCM_INFO_INTERLEAVED),
++ .formats = SNDRV_PCM_FMTBIT_S16_LE,
++ .rates = SNDRV_PCM_RATE_44100,
++ .rate_min = 44100,
++ .rate_max = 44100,
++ .channels_min = 2, /* only stereo */
++ .channels_max = 2,
++ .buffer_bytes_max = NUM_PERIODS * SAMPLE_SIZE * NUM_SAMPLES,
++ .period_bytes_min = SAMPLE_SIZE * NUM_SAMPLES,
++ .period_bytes_max = SAMPLE_SIZE * NUM_SAMPLES,
++ .periods_min = NUM_PERIODS,
++ .periods_max = NUM_PERIODS,
++};
++
++static struct snd_pcm_hardware timbi2s_tx_hw = {
++ .info = (SNDRV_PCM_INFO_MMAP
++ | SNDRV_PCM_INFO_MMAP_VALID
++ | SNDRV_PCM_INFO_INTERLEAVED),
++ .formats = SNDRV_PCM_FMTBIT_S16_LE,
++ .rates = SNDRV_PCM_RATE_44100,
++ .rate_min = 44100,
++ .rate_max = 44100,
++ .channels_min = 2, /* only stereo */
++ .channels_max = 2,
++ .buffer_bytes_max = NUM_PERIODS * SAMPLE_SIZE * NUM_SAMPLES,
++ .period_bytes_min = SAMPLE_SIZE * NUM_SAMPLES,
++ .period_bytes_max = SAMPLE_SIZE * NUM_SAMPLES,
++ .periods_min = NUM_PERIODS,
++ .periods_max = NUM_PERIODS,
++};
++
++static inline void timbi2s_bus_write(struct timbi2s_bus *bus, u32 val, u32 reg)
++{
++ struct timbi2s *i2s = snd_pcm_chip(bus->card);
++
++ iowrite32(val, i2s->membase + TIMBI2S_BUS_OFFSET(bus) + reg);
++}
++
++static inline u32 timbi2s_bus_read(struct timbi2s_bus *bus, u32 reg)
++{
++ struct timbi2s *i2s = snd_pcm_chip(bus->card);
++
++ return ioread32(i2s->membase + TIMBI2S_BUS_OFFSET(bus) + reg);
++}
++
++static u32 timbi2s_calc_prescale(u32 main_clk, u32 sample_rate)
++{
++ u32 halfbit_rate = sample_rate * BITS_PER_CHANNEL * NUM_CHANNELS * 2;
++ return main_clk / halfbit_rate;
++}
++
++static int timbi2s_open(struct snd_pcm_substream *substream)
++{
++ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
++ struct snd_card *card = bus->card;
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Entry, substream: %p, bus: %d\n", __func__, substream,
++ BUS_INDEX(bus));
++
++ if (BUS_IS_RX(bus))
++ runtime->hw = timbi2s_rx_hw;
++ else
++ runtime->hw = timbi2s_tx_hw;
++
++ if (bus->sample_rate == 8000) {
++ runtime->hw.rates = SNDRV_PCM_RATE_8000;
++ runtime->hw.rate_min = 8000;
++ runtime->hw.rate_max = 8000;
++ }
++
++ bus->substream = substream;
++
++ return 0;
++}
++
++static int timbi2s_close(struct snd_pcm_substream *substream)
++{
++ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
++ struct snd_card *card = bus->card;
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Entry, substream: %p, bus: %d\n", __func__, substream,
++ BUS_INDEX(bus));
++
++ bus->substream = NULL;
++
++ return 0;
++}
++
++static int timbi2s_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *hw_params)
++{
++ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
++ struct snd_card *card = bus->card;
++ struct timbi2s *i2s = snd_pcm_chip(card);
++ int err;
++
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Entry, substream: %p, bus: %d\n", __func__,
++ substream, BUS_INDEX(bus));
++
++ bus->prescale = timbi2s_calc_prescale(i2s->main_clk,
++ params_rate(hw_params));
++
++ err = snd_pcm_lib_malloc_pages(substream,
++ params_buffer_bytes(hw_params));
++ if (err < 0)
++ return err;
++
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Rate: %d, format: %d\n", __func__, params_rate(hw_params),
++ params_format(hw_params));
++
++ return 0;
++}
++
++static int timbi2s_hw_free(struct snd_pcm_substream *substream)
++{
++ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
++ struct snd_card *card = bus->card;
++ unsigned long flags;
++
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Entry, substream: %p\n", __func__, substream);
++
++ spin_lock_irqsave(&bus->lock, flags);
++ /* disable interrupts */
++ timbi2s_bus_write(bus, 0, TIMBI2S_BUS_IER);
++ spin_unlock_irqrestore(&bus->lock, flags);
++
++ /* disable TX and RX */
++ timbi2s_bus_write(bus, TIMBI2S_ICOR_FIFO_RST | TIMBI2S_ICOR_SOFT_RST,
++ TIMBI2S_BUS_ICOR);
++
++ return snd_pcm_lib_free_pages(substream);
++}
++
++static int timbi2s_prepare(struct snd_pcm_substream *substream)
++{
++ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
++ struct snd_card *card = bus->card;
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ u32 data;
++
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Entry, substream: %p, bus: %d, buffer: %d, period: %d\n",
++ __func__, substream,
++ BUS_INDEX(bus), (int)snd_pcm_lib_buffer_bytes(substream),
++ (int)snd_pcm_lib_period_bytes(substream));
++
++ if (runtime->dma_addr & 3 || runtime->buffer_size & 3) {
++ dev_err(snd_card_get_device_link(card),
++ "%s: Only word aligned data allowed\n", __func__);
++ return -EINVAL;
++ }
++
++ if (runtime->channels != NUM_CHANNELS) {
++ dev_err(snd_card_get_device_link(card),
++ "%s: Number of channels unsupported %d\n", __func__,
++ runtime->channels);
++ return -EINVAL;
++ }
++
++ /* reset */
++ timbi2s_bus_write(bus, TIMBI2S_ICOR_FIFO_RST | TIMBI2S_ICOR_SOFT_RST,
++ TIMBI2S_BUS_ICOR);
++
++ /* only masters have prescaling, don't write if not needed */
++ if (BUS_IS_MASTER(bus))
++ timbi2s_bus_write(bus, bus->prescale, TIMBI2S_BUS_PRESCALE);
++
++ /* write word select */
++ data = ((BITS_PER_CHANNEL << TIMBI2S_ICOR_WORD_SEL_LEFT_SHIFT) &
++ TIMBI2S_ICOR_WORD_SEL_LEFT_MASK) |
++ ((BITS_PER_CHANNEL << TIMBI2S_ICOR_WORD_SEL_RIGHT_SHIFT) &
++ TIMBI2S_ICOR_WORD_SEL_RIGHT_MASK);
++ timbi2s_bus_write(bus, data, TIMBI2S_BUS_ICOR);
++
++ bus->buf_pos = 0;
++
++ return 0;
++}
++
++static int
++timbi2s_playback_trigger(struct snd_pcm_substream *substream, int cmd)
++{
++ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
++ struct snd_card *card = bus->card;
++ unsigned long flags;
++ u32 data;
++
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Entry, substream: %p, bus: %d, cmd: %d\n", __func__,
++ substream, BUS_INDEX(bus), cmd);
++
++ switch (cmd) {
++ case SNDRV_PCM_TRIGGER_START:
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Got TRIGGER_START command\n", __func__);
++
++ /* start */
++ data = timbi2s_bus_read(bus, TIMBI2S_BUS_ICOR);
++ data |= TIMBI2S_ICOR_TX_ENABLE;
++ timbi2s_bus_write(bus, data, TIMBI2S_BUS_ICOR);
++
++ /* enable interrupts */
++ timbi2s_bus_write(bus, TIMBI2S_IRQ_TX_ALMOST_EMPTY,
++ TIMBI2S_BUS_IER);
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: ISR: %x, ICOR: %x\n", __func__,
++ timbi2s_bus_read(bus, TIMBI2S_BUS_ISR),
++ timbi2s_bus_read(bus, TIMBI2S_BUS_ICOR));
++ break;
++ case SNDRV_PCM_TRIGGER_STOP:
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Got TRIGGER_STOP command\n", __func__);
++
++ spin_lock_irqsave(&bus->lock, flags);
++ /* disable interrupts */
++ timbi2s_bus_write(bus, 0, TIMBI2S_BUS_IER);
++ spin_unlock_irqrestore(&bus->lock, flags);
++
++ /* reset */
++ data = timbi2s_bus_read(bus, TIMBI2S_BUS_ICOR);
++ data &= ~TIMBI2S_ICOR_TX_ENABLE;
++
++ timbi2s_bus_write(bus, data, TIMBI2S_BUS_ICOR);
++ break;
++ default:
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Got unsupported command\n", __func__);
++
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int
++timbi2s_capture_trigger(struct snd_pcm_substream *substream, int cmd)
++{
++ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
++ struct snd_card *card = bus->card;
++ unsigned long flags;
++
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Entry, substream: %p, bus: %d, cmd: %d\n", __func__,
++ substream, BUS_INDEX(bus), cmd);
++
++ switch (cmd) {
++ case SNDRV_PCM_TRIGGER_START:
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Got TRIGGER_START command\n", __func__);
++
++ timbi2s_bus_write(bus, TIMBI2S_ICOR_RX_ENABLE |
++ TIMBI2S_ICOR_FIFO_RST, TIMBI2S_BUS_ICOR);
++
++ timbi2s_bus_write(bus, TIMBI2S_IRQ_RX_ALMOST_FULL,
++ TIMBI2S_BUS_IER);
++ break;
++ case SNDRV_PCM_TRIGGER_STOP:
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Got TRIGGER_STOP command\n", __func__);
++ /* disable interrupts */
++ spin_lock_irqsave(&bus->lock, flags);
++ timbi2s_bus_write(bus, 0, TIMBI2S_BUS_IER);
++ spin_unlock_irqrestore(&bus->lock, flags);
++ /* Stop RX */
++ timbi2s_bus_write(bus, 0, TIMBI2S_BUS_ICOR);
++ break;
++ default:
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Got unsupported command\n", __func__);
++
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static snd_pcm_uframes_t
++timbi2s_pointer(struct snd_pcm_substream *substream)
++{
++ struct timbi2s_bus *bus = snd_pcm_substream_chip(substream);
++ struct snd_card *card = bus->card;
++ snd_pcm_uframes_t ret;
++
++ dev_dbg(snd_card_get_device_link(card),
++ "%s: Entry, substream: %p\n", __func__, substream);
++
++ ret = bytes_to_frames(substream->runtime, bus->buf_pos);
++ if (ret >= substream->runtime->buffer_size)
++ ret -= substream->runtime->buffer_size;
++
++ return ret;
++}
++
++static struct snd_pcm_ops timbi2s_playback_ops = {
++ .open = timbi2s_open,
++ .close = timbi2s_close,
++ .ioctl = snd_pcm_lib_ioctl,
++ .hw_params = timbi2s_hw_params,
++ .hw_free = timbi2s_hw_free,
++ .prepare = timbi2s_prepare,
++ .trigger = timbi2s_playback_trigger,
++ .pointer = timbi2s_pointer,
++};
++
++static struct snd_pcm_ops timbi2s_capture_ops = {
++ .open = timbi2s_open,
++ .close = timbi2s_close,
++ .ioctl = snd_pcm_lib_ioctl,
++ .hw_params = timbi2s_hw_params,
++ .hw_free = timbi2s_hw_free,
++ .prepare = timbi2s_prepare,
++ .trigger = timbi2s_capture_trigger,
++ .pointer = timbi2s_pointer,
++};
++
++static void timbi2s_irq_process_rx(struct timbi2s_bus *bus)
++{
++ struct snd_pcm_runtime *runtime = bus->substream->runtime;
++ u32 buffer_size = snd_pcm_lib_buffer_bytes(bus->substream);
++ u32 ipr = timbi2s_bus_read(bus, TIMBI2S_BUS_IPR);
++ int i;
++
++ dev_dbg(snd_card_get_device_link(bus->card),
++ "%s: Entry, bus: %d, IPR %x\n", __func__, BUS_INDEX(bus), ipr);
++
++ for (i = 0; i < NUM_SAMPLES; i++) {
++ *(u32 *)(runtime->dma_area + bus->buf_pos) =
++ timbi2s_bus_read(bus, TIMBI2S_BUS_FIFO);
++ bus->buf_pos += SAMPLE_SIZE;
++ bus->buf_pos %= buffer_size;
++ }
++
++ timbi2s_bus_write(bus, ipr, TIMBI2S_BUS_ICLR);
++
++ /* inform ALSA that a period was received */
++ snd_pcm_period_elapsed(bus->substream);
++}
++
++static void timbi2s_irq_process_tx(struct timbi2s_bus *bus)
++{
++ struct snd_pcm_runtime *runtime = bus->substream->runtime;
++ u32 buffer_size = snd_pcm_lib_buffer_bytes(bus->substream);
++ u32 ipr = timbi2s_bus_read(bus, TIMBI2S_BUS_IPR);
++ int i;
++
++ dev_dbg(snd_card_get_device_link(bus->card),
++ "%s: Entry, bus: %d, IPR %x\n", __func__, BUS_INDEX(bus), ipr);
++
++ for (i = 0; i < NUM_SAMPLES; i++) {
++ timbi2s_bus_write(bus,
++ *(u32 *)(runtime->dma_area + bus->buf_pos),
++ TIMBI2S_BUS_FIFO);
++ bus->buf_pos += SAMPLE_SIZE;
++ bus->buf_pos %= buffer_size;
++ }
++
++ dev_dbg(snd_card_get_device_link(bus->card), "%s: ISR: %x, ICOR: %x\n",
++ __func__, timbi2s_bus_read(bus, TIMBI2S_BUS_ISR),
++ timbi2s_bus_read(bus, TIMBI2S_BUS_ICOR));
++
++ timbi2s_bus_write(bus, ipr, TIMBI2S_BUS_ICLR);
++
++ /* inform ALSA that a period was received */
++ snd_pcm_period_elapsed(bus->substream);
++}
++
++static void timbi2s_tasklet(unsigned long arg)
++{
++ struct snd_card *card = (struct snd_card *)arg;
++ struct timbi2s *i2s = snd_pcm_chip(card);
++ u32 uir;
++ unsigned i;
++
++ dev_dbg(snd_card_get_device_link(card), "%s: Entry, UIR %x\n",
++ __func__, uir);
++
++ while ((uir = ioread32(i2s->membase + TIMBI2S_REG_UIR)) != 0) {
++ for (i = 0; i < i2s->num_busses; i++)
++ if (uir & (1 << i)) {
++ struct timbi2s_bus *bus = i2s->busses + i;
++ if (BUS_IS_RX(bus))
++ timbi2s_irq_process_rx(bus);
++ else
++ timbi2s_irq_process_tx(bus);
++ }
++ }
++
++ enable_irq(i2s->irq);
++}
++
++static irqreturn_t timbi2s_irq(int irq, void *devid)
++{
++ struct timbi2s *i2s = devid;
++
++ tasklet_schedule(&i2s->tasklet);
++ disable_irq_nosync(i2s->irq);
++
++ return IRQ_HANDLED;
++}
++
++static int timbi2s_setup_busses(struct snd_card *card,
++ struct platform_device *pdev)
++{
++ const struct timbi2s_platform_data *pdata = pdev->dev.platform_data;
++ unsigned i;
++
++ dev_dbg(&pdev->dev, "%s: Entry, no busses: %d, busses: %p\n", __func__,
++ pdata->num_busses, pdata->busses);
++
++ for (i = 0; i < pdata->num_busses; i++) {
++ const struct timbi2s_bus_data *bus_data = pdata->busses + i;
++ int capture = bus_data->rx;
++ int err;
++ u32 ctl;
++ struct timbi2s *i2s = snd_pcm_chip(card);
++ struct timbi2s_bus *bus = i2s->busses + i;
++
++ dev_dbg(&pdev->dev, "%s: Setting up bus: %d\n", __func__, i);
++
++ SET_BUS_INDEX(bus, i);
++ bus->sample_rate = bus_data->sample_rate;
++ bus->card = card;
++ /* prescaling only applies to master busses, we use the
++ * knowledge of that to identify the direction later
++ * eg, bus->prescale != 0 -> master bus
++ */
++ if (capture)
++ SET_BUS_RX(bus);
++
++ spin_lock_init(&bus->lock);
++
++ if (bus->sample_rate != 44100 && bus->sample_rate != 8000) {
++ dev_err(&pdev->dev,
++ "Unsupported bitrate: %d\n", bus->sample_rate);
++ return -EINVAL;
++ }
++
++ dev_dbg(&pdev->dev, "%s: Will check HW direction on bus: %d\n",
++ __func__, BUS_INDEX(bus));
++
++ /* check that the HW agrees with the direction */
++ ctl = timbi2s_bus_read(bus, TIMBI2S_BUS_ICOR);
++ if ((capture && !(ctl & TIMBI2S_ICOR_RX_ID)) ||
++ (!capture && !(ctl & TIMBI2S_ICOR_TX_ID))) {
++ dev_dbg(&pdev->dev,
++ "HW and platform data disagree on direction\n");
++ return -EINVAL;
++ }
++
++ dev_dbg(&pdev->dev, "%s: Will create PCM channel for bus: %d\n",
++ __func__, BUS_INDEX(bus));
++ err = snd_pcm_new(card, bus_data->name ? bus_data->name :
++ card->shortname, i, !capture, capture, &bus->pcm);
++ if (err) {
++ dev_dbg(&pdev->dev, "%s, Failed to create pcm: %d\n",
++ __func__, err);
++ return err;
++ }
++
++ if (capture)
++ snd_pcm_set_ops(bus->pcm, SNDRV_PCM_STREAM_CAPTURE,
++ &timbi2s_capture_ops);
++ if (!capture)
++ snd_pcm_set_ops(bus->pcm, SNDRV_PCM_STREAM_PLAYBACK,
++ &timbi2s_playback_ops);
++
++ dev_dbg(&pdev->dev, "%s: Will preallocate buffers to bus: %d\n",
++ __func__, BUS_INDEX(bus));
++
++ err = snd_pcm_lib_preallocate_pages_for_all(bus->pcm,
++ SNDRV_DMA_TYPE_CONTINUOUS,
++ snd_dma_continuous_data(GFP_KERNEL),
++ NUM_SAMPLES * NUM_PERIODS * SAMPLE_SIZE * 2,
++ NUM_SAMPLES * NUM_PERIODS * SAMPLE_SIZE * 2);
++ if (err) {
++ dev_dbg(&pdev->dev, "%s, Failed to create pcm: %d\n",
++ __func__, err);
++
++ return err;
++ }
++
++ bus->pcm->private_data = bus;
++ bus->pcm->info_flags = 0;
++ strcpy(bus->pcm->name, card->shortname);
++ i2s->num_busses++;
++ }
++
++ return 0;
++}
++
++static int __devinit timbi2s_probe(struct platform_device *pdev)
++{
++ int err;
++ int irq;
++ struct timbi2s *i2s;
++ struct resource *iomem;
++ const struct timbi2s_platform_data *pdata = pdev->dev.platform_data;
++ struct snd_card *card;
++ u32 ver;
++
++ if (!pdata) {
++ err = -ENODEV;
++ goto out;
++ }
++
++ if (pdata->num_busses > MAX_BUSSES) {
++ err = -EINVAL;
++ goto out;
++ }
++
++ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!iomem) {
++ err = -ENODEV;
++ goto out;
++ }
++
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0) {
++ err = -ENODEV;
++ goto out;
++ }
++
++ err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
++ THIS_MODULE, sizeof(struct timbi2s) +
++ sizeof(struct timbi2s_bus) * pdata->num_busses, &card);
++ if (err)
++ goto out;
++
++ strcpy(card->driver, "Timberdale I2S");
++ strcpy(card->shortname, "Timberdale I2S");
++ sprintf(card->longname, "Timberdale I2S Driver");
++
++ snd_card_set_dev(card, &pdev->dev);
++
++ i2s = snd_pcm_chip(card);
++
++ if (!request_mem_region(iomem->start, resource_size(iomem),
++ DRIVER_NAME)) {
++ err = -EBUSY;
++ goto err_region;
++ }
++
++ i2s->membase = ioremap(iomem->start, resource_size(iomem));
++ if (!i2s->membase) {
++ err = -ENOMEM;
++ goto err_ioremap;
++ }
++
++ err = timbi2s_setup_busses(card, pdev);
++ if (err)
++ goto err_setup;
++
++ tasklet_init(&i2s->tasklet, timbi2s_tasklet, (unsigned long)card);
++ i2s->irq = irq;
++ i2s->main_clk = pdata->main_clk;
++
++ err = request_irq(irq, timbi2s_irq, 0, DRIVER_NAME, i2s);
++ if (err)
++ goto err_request_irq;
++
++ err = snd_card_register(card);
++ if (err)
++ goto err_register;
++
++ platform_set_drvdata(pdev, card);
++
++ ver = ioread32(i2s->membase + TIMBI2S_REG_VER);
++
++ printk(KERN_INFO
++ "Driver for Timberdale I2S (ver: %d.%d) successfully probed.\n",
++ ver >> 16 , ver & 0xffff);
++
++ return 0;
++
++err_register:
++ free_irq(irq, card);
++err_request_irq:
++err_setup:
++ iounmap(i2s->membase);
++err_ioremap:
++ release_mem_region(iomem->start, resource_size(iomem));
++err_region:
++ snd_card_free(card);
++out:
++ printk(KERN_ERR DRIVER_NAME": Failed to register: %d\n", err);
++
++ return err;
++}
++
++static int __devexit timbi2s_remove(struct platform_device *pdev)
++{
++ struct snd_card *card = platform_get_drvdata(pdev);
++ struct timbi2s *i2s = snd_pcm_chip(card);
++ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++ tasklet_kill(&i2s->tasklet);
++ free_irq(i2s->irq, i2s);
++
++ iounmap(i2s->membase);
++ release_mem_region(iomem->start, resource_size(iomem));
++ snd_card_free(card);
++
++ platform_set_drvdata(pdev, 0);
++ return 0;
++}
++
++static struct platform_driver timbi2s_platform_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ },
++ .probe = timbi2s_probe,
++ .remove = __devexit_p(timbi2s_remove),
++};
++
++/*--------------------------------------------------------------------------*/
++
++static int __init timbi2s_init(void)
++{
++ return platform_driver_register(&timbi2s_platform_driver);
++}
++
++static void __exit timbi2s_exit(void)
++{
++ platform_driver_unregister(&timbi2s_platform_driver);
++}
++
++module_init(timbi2s_init);
++module_exit(timbi2s_exit);
++
++MODULE_ALIAS("platform:"DRIVER_NAME);
++MODULE_DESCRIPTION("Timberdale I2S bus driver");
++MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
++MODULE_LICENSE("GPL v2");
+
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-rt2860-1-2.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-rt2860-1-2.patch
new file mode 100644
index 0000000..e67a826
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-rt2860-1-2.patch
@@ -0,0 +1,48 @@
+
+We should only send SIOCGIWAP event to notify wpa_supplicant about a lost
+link, not to tell it about our disassociation being done. If we send such
+event in both cases, this driver will drag wpa_supplicant into an infinite
+loop.
+
+Signed-off-by: Samuel Ortiz <sameo@linux.intel.com>
+---
+ drivers/staging/rt2860/sta/assoc.c | 6 ++++--
+ drivers/staging/rt2860/sta_ioctl.c | 6 ++++++
+ 2 files changed, 10 insertions(+), 2 deletions(-)
+
+--
+Index: b/drivers/staging/rt2860/sta/assoc.c
+===================================================================
+--- a/drivers/staging/rt2860/sta/assoc.c 2010-01-05 11:25:22.000000000 +0100
++++ b/drivers/staging/rt2860/sta/assoc.c 2010-01-05 18:09:34.000000000 +0100
+@@ -818,10 +818,11 @@ void MlmeDisassocReqAction(struct rt_rtm
+ COPY_MAC_ADDR(pAd->StaCfg.DisassocSta, pDisassocReq->Addr);
+
+ RTMPSetTimer(&pAd->MlmeAux.DisassocTimer, Timeout); /* in mSec */
+- pAd->Mlme.AssocMachine.CurrState = DISASSOC_WAIT_RSP;
+
+- RtmpOSWrielessEventSend(pAd, SIOCGIWAP, -1, NULL, NULL, 0);
++ pAd->Mlme.AssocMachine.CurrState = DISASSOC_WAIT_RSP;
+
++ if (INFRA_ON(pAd) || ADHOC_ON(pAd))
++ RtmpOSWrielessEventSend(pAd, SIOCGIWAP, -1, NULL, NULL, 0);
+ }
+
+ /*
+Index: b/drivers/staging/rt2860/sta_ioctl.c
+===================================================================
+--- a/drivers/staging/rt2860/sta_ioctl.c 2010-01-05 11:25:22.000000000 +0100
++++ b/drivers/staging/rt2860/sta_ioctl.c 2010-01-05 12:55:16.000000000 +0100
+@@ -602,6 +602,12 @@ int rt_ioctl_siwap(struct net_device *de
+ DBGPRINT(RT_DEBUG_TRACE,
+ ("MLME busy, reset MLME state machine!\n"));
+ }
++
++ /* No need to set our BSSID if it's not changing */
++ if (!memcmp(pAdapter->CommonCfg.Bssid, ap_addr->sa_data, ETH_ALEN) ||
++ !memcmp(pAdapter->MlmeAux.Bssid, ap_addr->sa_data, ETH_ALEN))
++ return 0;
++
+ /* tell CNTL state machine to call NdisMSetInformationComplete() after completing */
+ /* this request, because this request is initiated by NDIS. */
+ pAdapter->MlmeAux.CurrReqIsFromNdis = FALSE;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-rt2860-2-2.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-rt2860-2-2.patch
new file mode 100644
index 0000000..20402b8
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-rt2860-2-2.patch
@@ -0,0 +1,24 @@
+
+When no SSID is set, the reconnect decision should entirely be left to
+userspace. The driver should not decide which AP to associate with based on
+arbitrary policies.
+
+Signed-off-by: Samuel Ortiz <sameo@linux.intel.com>
+---
+ drivers/staging/rt2860/common/mlme.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+Index: b/drivers/staging/rt2860/common/mlme.c
+===================================================================
+--- a/drivers/staging/rt2860/common/mlme.c 2010-01-05 11:25:22.000000000 +0100
++++ b/drivers/staging/rt2860/common/mlme.c 2010-01-05 13:10:32.000000000 +0100
+@@ -1554,7 +1554,8 @@ void MlmeAutoReconnectLastSSID(struct rt
+ else if ((pAd->Mlme.CntlMachine.CurrState == CNTL_IDLE) &&
+ (MlmeValidateSSID
+ (pAd->MlmeAux.AutoReconnectSsid,
+- pAd->MlmeAux.AutoReconnectSsidLen) == TRUE)) {
++ pAd->MlmeAux.AutoReconnectSsidLen) == TRUE) &&
++ (pAd->MlmeAux.AutoReconnectSsidLen != 0)) {
+ struct rt_ndis_802_11_ssid OidSsid;
+ OidSsid.SsidLength = pAd->MlmeAux.AutoReconnectSsidLen;
+ NdisMoveMemory(OidSsid.Ssid, pAd->MlmeAux.AutoReconnectSsid,
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-timberdale-audio-fix.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-timberdale-audio-fix.patch
new file mode 100644
index 0000000..19fdf6d
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-timberdale-audio-fix.patch
@@ -0,0 +1,23 @@
+diff -uNr linux-2.6.32.orig/sound/drivers/timbi2s.c linux-2.6.32.new/sound/drivers/timbi2s.c
+--- linux-2.6.32.orig/sound/drivers/timbi2s.c 2010-02-10 10:48:58.000000000 -0800
++++ linux-2.6.32.new/sound/drivers/timbi2s.c 2010-02-10 10:51:46.000000000 -0800
+@@ -30,6 +30,10 @@
+ #include <sound/initval.h>
+ #include <sound/timbi2s.h>
+
++static int index = SNDRV_DEFAULT_IDX1;
++module_param(index, int, 0444);
++MODULE_PARM_DESC(index, "Index value for Timberdale I2S.");
++
+ #define DRIVER_NAME "timb-i2s"
+
+ #define MAX_BUSSES 10
+@@ -645,7 +649,7 @@
+ goto out;
+ }
+
+- err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
++ err = snd_card_create(index, SNDRV_DEFAULT_STR1,
+ THIS_MODULE, sizeof(struct timbi2s) +
+ sizeof(struct timbi2s_bus) * pdata->num_busses, &card);
+ if (err)
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-vfs-tracepoints.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-vfs-tracepoints.patch
new file mode 100644
index 0000000..7bf425a
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.33-vfs-tracepoints.patch
@@ -0,0 +1,120 @@
+From f56c995174cf42d84fdad06beebacd56e700b05d Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sun, 25 Oct 2009 15:37:04 -0700
+Subject: [PATCH] vfs: Add a trace point in the mark_inode_dirty function
+
+PowerTOP would like to be able to show who is keeping the disk
+busy by dirtying data. The most logical spot for this is in the vfs
+in the mark_inode_dirty() function, doing this on the block level
+is not possible because by the time the IO hits the block layer the
+guilty party can no longer be found ("kjournald" and "pdflush" are not
+useful answers to "who caused this file to be dirty).
+
+The trace point follows the same logic/style as the block_dump code
+and pretty much dumps the same data, just not to dmesg (and thus to
+/var/log/messages) but via the trace events streams.
+
+Signed-of-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+ fs/fs-writeback.c | 4 +++
+ fs/inode.c | 4 +++
+ include/trace/events/vfs.h | 53 ++++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 61 insertions(+), 0 deletions(-)
+ create mode 100644 include/trace/events/vfs.h
+
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 9d5360c..af31caf 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -25,6 +25,7 @@
+ #include <linux/blkdev.h>
+ #include <linux/backing-dev.h>
+ #include <linux/buffer_head.h>
++#include <trace/events/vfs.h>
+ #include "internal.h"
+
+ #define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
+@@ -1061,6 +1062,9 @@ void __mark_inode_dirty(struct inode *inode, int flags)
+ sb->s_op->dirty_inode(inode);
+ }
+
++ if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES))
++ trace_dirty_inode(inode, current);
++
+ /*
+ * make sure that changes are seen by all cpus before we test i_state
+ * -- mikulas
+diff --git a/fs/inode.c b/fs/inode.c
+index 4d8e3be..a61e8ba 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -1624,3 +1624,7 @@ void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
+ inode->i_ino);
+ }
+ EXPORT_SYMBOL(init_special_inode);
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/vfs.h>
++
+diff --git a/include/trace/events/vfs.h b/include/trace/events/vfs.h
+new file mode 100644
+index 0000000..21cf9fb
+--- /dev/null
++++ b/include/trace/events/vfs.h
+@@ -0,0 +1,53 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM vfs
++
++#if !defined(_TRACE_VFS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_VFS_H
++
++/*
++ * Tracepoint for dirtying an inode:
++ */
++TRACE_EVENT(dirty_inode,
++
++ TP_PROTO(struct inode *inode, struct task_struct *task),
++
++ TP_ARGS(inode, task),
++
++ TP_STRUCT__entry(
++ __array( char, comm, TASK_COMM_LEN )
++ __field( pid_t, pid )
++ __array( char, dev, 16 )
++ __array( char, file, 32 )
++ ),
++
++ TP_fast_assign(
++ if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
++ struct dentry *dentry;
++ const char *name = "?";
++
++ dentry = d_find_alias(inode);
++ if (dentry) {
++ spin_lock(&dentry->d_lock);
++ name = (const char *) dentry->d_name.name;
++ }
++
++ memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
++ __entry->pid = task->pid;
++ strlcpy(__entry->file, name, 32);
++ strlcpy(__entry->dev, inode->i_sb->s_id, 16);
++
++ if (dentry) {
++ spin_unlock(&dentry->d_lock);
++ dput(dentry);
++ }
++ }
++ ),
++
++ TP_printk("task=%i (%s) file=%s dev=%s",
++ __entry->pid, __entry->comm, __entry->file, __entry->dev)
++);
++
++#endif /* _TRACE_VFS_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--
+1.6.0.6
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-CVE-tipc-Fix-oops-on-send-prior-to-entering-networked-mode.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-CVE-tipc-Fix-oops-on-send-prior-to-entering-networked-mode.patch
new file mode 100644
index 0000000..06bed6f
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-CVE-tipc-Fix-oops-on-send-prior-to-entering-networked-mode.patch
@@ -0,0 +1,218 @@
+From d0021b252eaf65ca07ed14f0d66425dd9ccab9a6 Mon Sep 17 00:00:00 2001
+From: Neil Horman <nhorman@tuxdriver.com>
+Date: Wed, 3 Mar 2010 08:31:23 +0000
+Subject: [PATCH] tipc: Fix oops on send prior to entering networked mode (v3)
+Patch-mainline: 2.6.34
+
+Fix TIPC to disallow sending to remote addresses prior to entering NET_MODE
+
+user programs can oops the kernel by sending datagrams via AF_TIPC prior to
+entering networked mode. The following backtrace has been observed:
+
+ID: 13459 TASK: ffff810014640040 CPU: 0 COMMAND: "tipc-client"
+[exception RIP: tipc_node_select_next_hop+90]
+RIP: ffffffff8869d3c3 RSP: ffff81002d9a5ab8 RFLAGS: 00010202
+RAX: 0000000000000001 RBX: 0000000000000001 RCX: 0000000000000001
+RDX: 0000000000000000 RSI: 0000000000000001 RDI: 0000000001001001
+RBP: 0000000001001001 R8: 0074736575716552 R9: 0000000000000000
+R10: ffff81003fbd0680 R11: 00000000000000c8 R12: 0000000000000008
+R13: 0000000000000001 R14: 0000000000000001 R15: ffff810015c6ca00
+ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0018
+RIP: 0000003cbd8d49a3 RSP: 00007fffc84e0be8 RFLAGS: 00010206
+RAX: 000000000000002c RBX: ffffffff8005d116 RCX: 0000000000000000
+RDX: 0000000000000008 RSI: 00007fffc84e0c00 RDI: 0000000000000003
+RBP: 0000000000000000 R8: 00007fffc84e0c10 R9: 0000000000000010
+R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+R13: 00007fffc84e0d10 R14: 0000000000000000 R15: 00007fffc84e0c30
+ORIG_RAX: 000000000000002c CS: 0033 SS: 002b
+
+What happens is that, when the tipc module in inserted it enters a standalone
+node mode in which communication to its own address is allowed <0.0.0> but not
+to other addresses, since the appropriate data structures have not been
+allocated yet (specifically the tipc_net pointer). There is nothing stopping a
+client from trying to send such a message however, and if that happens, we
+attempt to dereference tipc_net.zones while the pointer is still NULL, and
+explode. The fix is pretty straightforward. Since these oopses all arise from
+the dereference of global pointers prior to their assignment to allocated
+values, and since these allocations are small (about 2k total), lets convert
+these pointers to static arrays of the appropriate size. All the accesses to
+these bits consider 0/NULL to be a non match when searching, so all the lookups
+still work properly, and there is no longer a chance of a bad dererence
+anywhere. As a bonus, this lets us eliminate the setup/teardown routines for
+those pointers, and elimnates the need to preform any locking around them to
+prevent access while their being allocated/freed.
+
+I've updated the tipc_net structure to behave this way to fix the exact reported
+problem, and also fixed up the tipc_bearers and media_list arrays to fix an
+obvious simmilar problem that arises from issuing tipc-config commands to
+manipulate bearers/links prior to entering networked mode
+
+I've tested this for a few hours by running the sanity tests and stress test
+with the tipcutils suite, and nothing has fallen over. There have been a few
+lockdep warnings, but those were there before, and can be addressed later, as
+they didn't actually result in any deadlock.
+
+Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
+CC: Allan Stephens <allan.stephens@windriver.com>
+CC: David S. Miller <davem@davemloft.net>
+CC: tipc-discussion@lists.sourceforge.net
+
+ bearer.c | 37 ++++++-------------------------------
+ bearer.h | 2 +-
+ net.c | 25 ++++---------------------
+ 3 files changed, 11 insertions(+), 53 deletions(-)
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Yong Wang <yong.y.wang@intel.com>
+---
+ net/tipc/bearer.c | 37 ++++++-------------------------------
+ net/tipc/bearer.h | 2 +-
+ net/tipc/net.c | 25 ++++---------------------
+ 3 files changed, 11 insertions(+), 53 deletions(-)
+
+diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
+index 327011f..7809137 100644
+--- a/net/tipc/bearer.c
++++ b/net/tipc/bearer.c
+@@ -45,10 +45,10 @@
+
+ #define MAX_ADDR_STR 32
+
+-static struct media *media_list = NULL;
++static struct media media_list[MAX_MEDIA];
+ static u32 media_count = 0;
+
+-struct bearer *tipc_bearers = NULL;
++struct bearer tipc_bearers[MAX_BEARERS];
+
+ /**
+ * media_name_valid - validate media name
+@@ -108,9 +108,11 @@ int tipc_register_media(u32 media_type,
+ int res = -EINVAL;
+
+ write_lock_bh(&tipc_net_lock);
+- if (!media_list)
+- goto exit;
+
++ if (tipc_mode != TIPC_NET_MODE) {
++ warn("Media <%s> rejected, not in networked mode yet\n", name);
++ goto exit;
++ }
+ if (!media_name_valid(name)) {
+ warn("Media <%s> rejected, illegal name\n", name);
+ goto exit;
+@@ -660,33 +662,10 @@ int tipc_disable_bearer(const char *name)
+
+
+
+-int tipc_bearer_init(void)
+-{
+- int res;
+-
+- write_lock_bh(&tipc_net_lock);
+- tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC);
+- media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC);
+- if (tipc_bearers && media_list) {
+- res = 0;
+- } else {
+- kfree(tipc_bearers);
+- kfree(media_list);
+- tipc_bearers = NULL;
+- media_list = NULL;
+- res = -ENOMEM;
+- }
+- write_unlock_bh(&tipc_net_lock);
+- return res;
+-}
+-
+ void tipc_bearer_stop(void)
+ {
+ u32 i;
+
+- if (!tipc_bearers)
+- return;
+-
+ for (i = 0; i < MAX_BEARERS; i++) {
+ if (tipc_bearers[i].active)
+ tipc_bearers[i].publ.blocked = 1;
+@@ -695,10 +674,6 @@ void tipc_bearer_stop(void)
+ if (tipc_bearers[i].active)
+ bearer_disable(tipc_bearers[i].publ.name);
+ }
+- kfree(tipc_bearers);
+- kfree(media_list);
+- tipc_bearers = NULL;
+- media_list = NULL;
+ media_count = 0;
+ }
+
+diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
+index ca57348..000228e 100644
+--- a/net/tipc/bearer.h
++++ b/net/tipc/bearer.h
+@@ -114,7 +114,7 @@ struct bearer_name {
+
+ struct link;
+
+-extern struct bearer *tipc_bearers;
++extern struct bearer tipc_bearers[];
+
+ void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
+ struct sk_buff *tipc_media_get_names(void);
+diff --git a/net/tipc/net.c b/net/tipc/net.c
+index 7906608..f25b1cd 100644
+--- a/net/tipc/net.c
++++ b/net/tipc/net.c
+@@ -116,7 +116,8 @@
+ */
+
+ DEFINE_RWLOCK(tipc_net_lock);
+-struct network tipc_net = { NULL };
++struct _zone *tipc_zones[256] = { NULL, };
++struct network tipc_net = { tipc_zones };
+
+ struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref)
+ {
+@@ -158,28 +159,12 @@ void tipc_net_send_external_routes(u32 dest)
+ }
+ }
+
+-static int net_init(void)
+-{
+- memset(&tipc_net, 0, sizeof(tipc_net));
+- tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC);
+- if (!tipc_net.zones) {
+- return -ENOMEM;
+- }
+- return 0;
+-}
+-
+ static void net_stop(void)
+ {
+ u32 z_num;
+
+- if (!tipc_net.zones)
+- return;
+-
+- for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
++ for (z_num = 1; z_num <= tipc_max_zones; z_num++)
+ tipc_zone_delete(tipc_net.zones[z_num]);
+- }
+- kfree(tipc_net.zones);
+- tipc_net.zones = NULL;
+ }
+
+ static void net_route_named_msg(struct sk_buff *buf)
+@@ -282,9 +267,7 @@ int tipc_net_start(u32 addr)
+ tipc_named_reinit();
+ tipc_port_reinit();
+
+- if ((res = tipc_bearer_init()) ||
+- (res = net_init()) ||
+- (res = tipc_cltr_init()) ||
++ if ((res = tipc_cltr_init()) ||
+ (res = tipc_bclink_init())) {
+ return res;
+ }
+--
+1.5.5.1
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-USB-gadget-introduce-g_nokia-gadget-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-USB-gadget-introduce-g_nokia-gadget-driver.patch
new file mode 100644
index 0000000..14b4bfb
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-USB-gadget-introduce-g_nokia-gadget-driver.patch
@@ -0,0 +1,320 @@
+From d96b241bd2ad42b6c49d5f6435c69b23818f001e Mon Sep 17 00:00:00 2001
+From: Felipe Balbi <felipe.balbi@nokia.com>
+Date: Tue, 5 Jan 2010 16:10:13 +0200
+Subject: [PATCH 9/10] USB: gadget: introduce g_nokia gadget driver
+
+Patch-mainline: 2.6.34
+Git-commit: f358f5b40af67caf28b627889d007294614170b2
+
+g_nokia is the gadget driver implementing
+WMCDC Wireless Handset Control Model for the N900
+device.
+
+Signed-off-by: Felipe Balbi <felipe.balbi@nokia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ drivers/usb/gadget/Kconfig | 10 +
+ drivers/usb/gadget/Makefile | 2
+ drivers/usb/gadget/nokia.c | 259 ++++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 271 insertions(+)
+ create mode 100644 drivers/usb/gadget/nokia.c
+
+--- a/drivers/usb/gadget/Kconfig
++++ b/drivers/usb/gadget/Kconfig
+@@ -828,6 +828,16 @@
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module.
+
++config USB_G_NOKIA
++ tristate "Nokia composite gadget"
++ depends on PHONET
++ help
++ The Nokia composite gadget provides support for acm, obex
++ and phonet in only one composite gadget driver.
++
++ It's only really useful for N900 hardware. If you're building
++ a kernel for N900, say Y or M here. If unsure, say N.
++
+ config USB_G_MULTI
+ tristate "Multifunction Composite Gadget (EXPERIMENTAL)"
+ depends on BLOCK && NET
+--- a/drivers/usb/gadget/Makefile
++++ b/drivers/usb/gadget/Makefile
+@@ -44,6 +44,7 @@
+ g_cdc-objs := cdc2.o
+ g_multi-objs := multi.o
+ g_still_image-objs := still_image.o
++g_nokia-objs := nokia.o
+
+ obj-$(CONFIG_USB_ZERO) += g_zero.o
+ obj-$(CONFIG_USB_AUDIO) += g_audio.o
+@@ -57,6 +58,7 @@
+ obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc.o
+ obj-$(CONFIG_USB_G_MULTI) += g_multi.o
+ obj-$(CONFIG_USB_STILL_IMAGE) += g_still_image.o
++obj-$(CONFIG_USB_G_NOKIA) += g_nokia.o
+
+ ifeq ($(CONFIG_USB_GADGET_DEBUG),y)
+ EXTRA_CFLAGS += -DDMA_PPB_MODE
+--- /dev/null
++++ b/drivers/usb/gadget/nokia.c
+@@ -0,0 +1,259 @@
++/*
++ * nokia.c -- Nokia Composite Gadget Driver
++ *
++ * Copyright (C) 2008-2010 Nokia Corporation
++ * Contact: Felipe Balbi <felipe.balbi@nokia.com>
++ *
++ * This gadget driver borrows from serial.c which is:
++ *
++ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
++ * Copyright (C) 2008 by David Brownell
++ * Copyright (C) 2008 by Nokia Corporation
++ *
++ * This software is distributed under the terms of the GNU General
++ * Public License ("GPL") as published by the Free Software Foundation,
++ * version 2 of that License.
++ */
++
++#include <linux/kernel.h>
++#include <linux/utsname.h>
++#include <linux/device.h>
++
++#include "u_serial.h"
++#include "u_ether.h"
++#include "u_phonet.h"
++#include "gadget_chips.h"
++
++/* Defines */
++
++#define NOKIA_VERSION_NUM 0x0211
++#define NOKIA_LONG_NAME "N900 (PC-Suite Mode)"
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * Kbuild is not very cooperative with respect to linking separately
++ * compiled library objects into one module. So for now we won't use
++ * separate compilation ... ensuring init/exit sections work to shrink
++ * the runtime footprint, and giving us at least some parts of what
++ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
++ */
++#include "composite.c"
++#include "usbstring.c"
++#include "config.c"
++#include "epautoconf.c"
++
++#include "u_serial.c"
++#include "f_acm.c"
++#include "f_ecm.c"
++#include "f_obex.c"
++#include "f_serial.c"
++#include "f_phonet.c"
++#include "u_ether.c"
++
++/*-------------------------------------------------------------------------*/
++
++#define NOKIA_VENDOR_ID 0x0421 /* Nokia */
++#define NOKIA_PRODUCT_ID 0x01c8 /* Nokia Gadget */
++
++/* string IDs are assigned dynamically */
++
++#define STRING_MANUFACTURER_IDX 0
++#define STRING_PRODUCT_IDX 1
++#define STRING_DESCRIPTION_IDX 2
++
++static char manufacturer_nokia[] = "Nokia";
++static const char product_nokia[] = NOKIA_LONG_NAME;
++static const char description_nokia[] = "PC-Suite Configuration";
++
++static struct usb_string strings_dev[] = {
++ [STRING_MANUFACTURER_IDX].s = manufacturer_nokia,
++ [STRING_PRODUCT_IDX].s = NOKIA_LONG_NAME,
++ [STRING_DESCRIPTION_IDX].s = description_nokia,
++ { } /* end of list */
++};
++
++static struct usb_gadget_strings stringtab_dev = {
++ .language = 0x0409, /* en-us */
++ .strings = strings_dev,
++};
++
++static struct usb_gadget_strings *dev_strings[] = {
++ &stringtab_dev,
++ NULL,
++};
++
++static struct usb_device_descriptor device_desc = {
++ .bLength = USB_DT_DEVICE_SIZE,
++ .bDescriptorType = USB_DT_DEVICE,
++ .bcdUSB = __constant_cpu_to_le16(0x0200),
++ .bDeviceClass = USB_CLASS_COMM,
++ .idVendor = __constant_cpu_to_le16(NOKIA_VENDOR_ID),
++ .idProduct = __constant_cpu_to_le16(NOKIA_PRODUCT_ID),
++ /* .iManufacturer = DYNAMIC */
++ /* .iProduct = DYNAMIC */
++ .bNumConfigurations = 1,
++};
++
++/*-------------------------------------------------------------------------*/
++
++/* Module */
++MODULE_DESCRIPTION("Nokia composite gadget driver for N900");
++MODULE_AUTHOR("Felipe Balbi");
++MODULE_LICENSE("GPL");
++
++/*-------------------------------------------------------------------------*/
++
++static u8 hostaddr[ETH_ALEN];
++
++static int __init nokia_bind_config(struct usb_configuration *c)
++{
++ int status = 0;
++
++ status = phonet_bind_config(c);
++ if (status)
++ printk(KERN_DEBUG "could not bind phonet config\n");
++
++ status = obex_bind_config(c, 0);
++ if (status)
++ printk(KERN_DEBUG "could not bind obex config %d\n", 0);
++
++ status = obex_bind_config(c, 1);
++ if (status)
++ printk(KERN_DEBUG "could not bind obex config %d\n", 0);
++
++ status = acm_bind_config(c, 2);
++ if (status)
++ printk(KERN_DEBUG "could not bind acm config\n");
++
++ status = ecm_bind_config(c, hostaddr);
++ if (status)
++ printk(KERN_DEBUG "could not bind ecm config\n");
++
++ return status;
++}
++
++static struct usb_configuration nokia_config_500ma_driver = {
++ .label = "Bus Powered",
++ .bind = nokia_bind_config,
++ .bConfigurationValue = 1,
++ /* .iConfiguration = DYNAMIC */
++ .bmAttributes = USB_CONFIG_ATT_ONE,
++ .bMaxPower = 250, /* 500mA */
++};
++
++static struct usb_configuration nokia_config_100ma_driver = {
++ .label = "Self Powered",
++ .bind = nokia_bind_config,
++ .bConfigurationValue = 2,
++ /* .iConfiguration = DYNAMIC */
++ .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
++ .bMaxPower = 50, /* 100 mA */
++};
++
++static int __init nokia_bind(struct usb_composite_dev *cdev)
++{
++ int gcnum;
++ struct usb_gadget *gadget = cdev->gadget;
++ int status;
++
++ status = gphonet_setup(cdev->gadget);
++ if (status < 0)
++ goto err_phonet;
++
++ status = gserial_setup(cdev->gadget, 3);
++ if (status < 0)
++ goto err_serial;
++
++ status = gether_setup(cdev->gadget, hostaddr);
++ if (status < 0)
++ goto err_ether;
++
++ status = usb_string_id(cdev);
++ if (status < 0)
++ goto err_usb;
++ strings_dev[STRING_MANUFACTURER_IDX].id = status;
++
++ device_desc.iManufacturer = status;
++
++ status = usb_string_id(cdev);
++ if (status < 0)
++ goto err_usb;
++ strings_dev[STRING_PRODUCT_IDX].id = status;
++
++ device_desc.iProduct = status;
++
++ /* config description */
++ status = usb_string_id(cdev);
++ if (status < 0)
++ goto err_usb;
++ strings_dev[STRING_DESCRIPTION_IDX].id = status;
++
++ nokia_config_500ma_driver.iConfiguration = status;
++ nokia_config_100ma_driver.iConfiguration = status;
++
++ /* set up other descriptors */
++ gcnum = usb_gadget_controller_number(gadget);
++ if (gcnum >= 0)
++ device_desc.bcdDevice = cpu_to_le16(NOKIA_VERSION_NUM);
++ else {
++ /* this should only work with hw that supports altsettings
++ * and several endpoints, anything else, panic.
++ */
++ pr_err("nokia_bind: controller '%s' not recognized\n",
++ gadget->name);
++ goto err_usb;
++ }
++
++ /* finaly register the configuration */
++ status = usb_add_config(cdev, &nokia_config_500ma_driver);
++ if (status < 0)
++ goto err_usb;
++
++ status = usb_add_config(cdev, &nokia_config_100ma_driver);
++ if (status < 0)
++ goto err_usb;
++
++ dev_info(&gadget->dev, "%s\n", NOKIA_LONG_NAME);
++
++ return 0;
++
++err_usb:
++ gether_cleanup();
++err_ether:
++ gserial_cleanup();
++err_serial:
++ gphonet_cleanup();
++err_phonet:
++ return status;
++}
++
++static int __exit nokia_unbind(struct usb_composite_dev *cdev)
++{
++ gphonet_cleanup();
++ gserial_cleanup();
++ gether_cleanup();
++
++ return 0;
++}
++
++static struct usb_composite_driver nokia_driver = {
++ .name = "g_nokia",
++ .dev = &device_desc,
++ .strings = dev_strings,
++ .bind = nokia_bind,
++ .unbind = __exit_p(nokia_unbind),
++};
++
++static int __init nokia_init(void)
++{
++ return usb_composite_register(&nokia_driver);
++}
++module_init(nokia_init);
++
++static void __exit nokia_cleanup(void)
++{
++ usb_composite_unregister(&nokia_driver);
++}
++module_exit(nokia_cleanup);
++
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-USB-otg-add-notifier-support.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-USB-otg-add-notifier-support.patch
new file mode 100644
index 0000000..3beca4f
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-USB-otg-add-notifier-support.patch
@@ -0,0 +1,85 @@
+From 76ca24d389a3f5eaf11d94efab15d5bef11a0a74 Mon Sep 17 00:00:00 2001
+From: Felipe Balbi <felipe.balbi@nokia.com>
+Date: Thu, 17 Dec 2009 13:01:36 +0200
+Subject: [PATCH 10/10] USB: otg: add notifier support
+
+Patch-mainline: 2.6.34
+Git-commit: e9a20171dfa0aa134d2211126d1310f2daea52cf
+
+The notifier will be used to communicate usb events
+to other drivers like the charger chip.
+
+This can be used as source of information to kick
+usb charger detection as described by the USB
+Battery Charging Specification 1.1 and/or to
+pass bMaxPower field of selected usb_configuration
+to charger chip in order to use that information
+as input current on the charging profile
+setup.
+
+Signed-off-by: Felipe Balbi <felipe.balbi@nokia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ include/linux/usb/otg.h | 25 +++++++++++++++++++++++++
+ 1 files changed, 25 insertions(+), 0 deletions(-)
+
+diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
+index 52bb917..6c0b676 100644
+--- a/include/linux/usb/otg.h
++++ b/include/linux/usb/otg.h
+@@ -9,6 +9,8 @@
+ #ifndef __LINUX_USB_OTG_H
+ #define __LINUX_USB_OTG_H
+
++#include <linux/notifier.h>
++
+ /* OTG defines lots of enumeration states before device reset */
+ enum usb_otg_state {
+ OTG_STATE_UNDEFINED = 0,
+@@ -33,6 +35,14 @@ enum usb_otg_state {
+ OTG_STATE_A_VBUS_ERR,
+ };
+
++enum usb_xceiv_events {
++ USB_EVENT_NONE, /* no events or cable disconnected */
++ USB_EVENT_VBUS, /* vbus valid event */
++ USB_EVENT_ID, /* id was grounded */
++ USB_EVENT_CHARGER, /* usb dedicated charger */
++ USB_EVENT_ENUMERATED, /* gadget driver enumerated */
++};
++
+ #define USB_OTG_PULLUP_ID (1 << 0)
+ #define USB_OTG_PULLDOWN_DP (1 << 1)
+ #define USB_OTG_PULLDOWN_DM (1 << 2)
+@@ -70,6 +80,9 @@ struct otg_transceiver {
+ struct otg_io_access_ops *io_ops;
+ void __iomem *io_priv;
+
++ /* for notification of usb_xceiv_events */
++ struct blocking_notifier_head notifier;
++
+ /* to pass extra port status to the root hub */
+ u16 port_status;
+ u16 port_change;
+@@ -203,6 +216,18 @@ otg_start_srp(struct otg_transceiver *otg)
+ return otg->start_srp(otg);
+ }
+
++/* notifiers */
++static inline int
++otg_register_notifier(struct otg_transceiver *otg, struct notifier_block *nb)
++{
++ return blocking_notifier_chain_register(&otg->notifier, nb);
++}
++
++static inline void
++otg_unregister_notifier(struct otg_transceiver *otg, struct notifier_block *nb)
++{
++ blocking_notifier_chain_unregister(&otg->notifier, nb);
++}
+
+ /* for OTG controller drivers (and maybe other stuff) */
+ extern int usb_bus_start_enum(struct usb_bus *bus, unsigned port_num);
+--
+1.6.0.4
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-cando-dual-touch-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-cando-dual-touch-driver.patch
new file mode 100644
index 0000000..ec35072
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-cando-dual-touch-driver.patch
@@ -0,0 +1,366 @@
+From ff4cd0510defa74c5b9b3c1bd5c5a02b8af53fd1 Mon Sep 17 00:00:00 2001
+From: Priya Vijayan <priya.vijayan@intel.com>
+Date: Tue, 27 Apr 2010 14:43:04 -0700
+Subject: [PATCH] Touch driver and configs for Cando dual touch panels
+
+Added support for the Cando dual touch panels, found in the Lenovo S10-3t.
+
+Signed-off-by: Stephane Chatty <chatty@enac.fr>
+Tested-by: Priya Vijayan <priya.vijayan@intel.com>
+Tested-by: Florian Echtler <floe@butterbrot.org>
+Signed-off-by: Priya Vijayan <priya.vijayan@intel.com>
+---
+ drivers/hid/Kconfig | 6 +
+ drivers/hid/Makefile | 1 +
+ drivers/hid/hid-cando.c | 268 +++++++++++++++++++++++++++++++++++++++++++++++
+ drivers/hid/hid-core.c | 1 +
+ drivers/hid/hid-ids.h | 3 +
+ include/linux/hid.h | 2 +-
+ 6 files changed, 280 insertions(+), 1 deletions(-)
+ create mode 100644 drivers/hid/hid-cando.c
+
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index 24d90ea..37fb241 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -80,6 +80,12 @@ config HID_BELKIN
+ ---help---
+ Support for Belkin Flip KVM and Wireless keyboard.
+
++config HID_CANDO
++ tristate "Cando dual touch panel"
++ depends on USB_HID
++ ---help---
++ Support for Cando dual touch panel.
++
+ config HID_CHERRY
+ tristate "Cherry" if EMBEDDED
+ depends on USB_HID
+diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
+index 0de2dff..b05f921 100644
+--- a/drivers/hid/Makefile
++++ b/drivers/hid/Makefile
+@@ -22,6 +22,7 @@ endif
+ obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o
+ obj-$(CONFIG_HID_APPLE) += hid-apple.o
+ obj-$(CONFIG_HID_BELKIN) += hid-belkin.o
++obj-$(CONFIG_HID_CANDO) += hid-cando.o
+ obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
+ obj-$(CONFIG_HID_CHICONY) += hid-chicony.o
+ obj-$(CONFIG_HID_CYPRESS) += hid-cypress.o
+diff --git a/drivers/hid/hid-cando.c b/drivers/hid/hid-cando.c
+new file mode 100644
+index 0000000..ed8c093
+--- /dev/null
++++ b/drivers/hid/hid-cando.c
+@@ -0,0 +1,268 @@
++/*
++ * HID driver for Cando dual-touch panels
++ *
++ * Copyright (c) 2010 Stephane Chatty <chatty@enac.fr>
++ *
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
++ */
++
++#include <linux/device.h>
++#include <linux/hid.h>
++#include <linux/module.h>
++
++MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
++MODULE_DESCRIPTION("Cando dual-touch panel");
++MODULE_LICENSE("GPL");
++
++#include "hid-ids.h"
++
++struct cando_data {
++ __u16 x, y;
++ __u8 id;
++ __s8 oldest; /* id of the oldest finger in previous frame */
++ bool valid; /* valid finger data, or just placeholder? */
++ bool first; /* is this the first finger in this frame? */
++ __s8 firstid; /* id of the first finger in the frame */
++ __u16 firstx, firsty; /* (x, y) of the first finger in the frame */
++};
++
++static int cando_input_mapping(struct hid_device *hdev, struct hid_input *hi,
++ struct hid_field *field, struct hid_usage *usage,
++ unsigned long **bit, int *max)
++{
++ switch (usage->hid & HID_USAGE_PAGE) {
++
++ case HID_UP_GENDESK:
++ switch (usage->hid) {
++ case HID_GD_X:
++ hid_map_usage(hi, usage, bit, max,
++ EV_ABS, ABS_MT_POSITION_X);
++ /* touchscreen emulation */
++ input_set_abs_params(hi->input, ABS_X,
++ field->logical_minimum,
++ field->logical_maximum, 0, 0);
++ return 1;
++ case HID_GD_Y:
++ hid_map_usage(hi, usage, bit, max,
++ EV_ABS, ABS_MT_POSITION_Y);
++ /* touchscreen emulation */
++ input_set_abs_params(hi->input, ABS_Y,
++ field->logical_minimum,
++ field->logical_maximum, 0, 0);
++ return 1;
++ }
++ return 0;
++
++ case HID_UP_DIGITIZER:
++ switch (usage->hid) {
++ case HID_DG_TIPSWITCH:
++ case HID_DG_CONTACTMAX:
++ return -1;
++ case HID_DG_INRANGE:
++ /* touchscreen emulation */
++ hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
++ return 1;
++ case HID_DG_CONTACTID:
++ hid_map_usage(hi, usage, bit, max,
++ EV_ABS, ABS_MT_TRACKING_ID);
++ return 1;
++ }
++ return 0;
++ }
++
++ return 0;
++}
++
++static int cando_input_mapped(struct hid_device *hdev, struct hid_input *hi,
++ struct hid_field *field, struct hid_usage *usage,
++ unsigned long **bit, int *max)
++{
++ if (usage->type == EV_KEY || usage->type == EV_ABS)
++ clear_bit(usage->code, *bit);
++
++ return 0;
++}
++
++/*
++ * this function is called when a whole finger has been parsed,
++ * so that it can decide what to send to the input layer.
++ */
++static void cando_filter_event(struct cando_data *td, struct input_dev *input)
++{
++ td->first = !td->first; /* touchscreen emulation */
++
++ if (!td->valid) {
++ /*
++ * touchscreen emulation: if this is the second finger and
++ * the first was valid, the first was the oldest; if the
++ * first was not valid and there was a valid finger in the
++ * previous frame, this is a release.
++ */
++ if (td->first) {
++ td->firstid = -1;
++ } else if (td->firstid >= 0) {
++ input_event(input, EV_ABS, ABS_X, td->firstx);
++ input_event(input, EV_ABS, ABS_Y, td->firsty);
++ td->oldest = td->firstid;
++ } else if (td->oldest >= 0) {
++ input_event(input, EV_KEY, BTN_TOUCH, 0);
++ td->oldest = -1;
++ }
++
++ return;
++ }
++
++ input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
++ input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
++ input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
++
++ input_mt_sync(input);
++
++ /*
++ * touchscreen emulation: if there was no touching finger previously,
++ * emit touch event
++ */
++ if (td->oldest < 0) {
++ input_event(input, EV_KEY, BTN_TOUCH, 1);
++ td->oldest = td->id;
++ }
++
++ /*
++ * touchscreen emulation: if this is the first finger, wait for the
++ * second; the oldest is then the second if it was the oldest already
++ * or if there was no first, the first otherwise.
++ */
++ if (td->first) {
++ td->firstx = td->x;
++ td->firsty = td->y;
++ td->firstid = td->id;
++ } else {
++ int x, y, oldest;
++ if (td->id == td->oldest || td->firstid < 0) {
++ x = td->x;
++ y = td->y;
++ oldest = td->id;
++ } else {
++ x = td->firstx;
++ y = td->firsty;
++ oldest = td->firstid;
++ }
++ input_event(input, EV_ABS, ABS_X, x);
++ input_event(input, EV_ABS, ABS_Y, y);
++ td->oldest = oldest;
++ }
++}
++
++
++static int cando_event(struct hid_device *hid, struct hid_field *field,
++ struct hid_usage *usage, __s32 value)
++{
++ struct cando_data *td = hid_get_drvdata(hid);
++
++ if (hid->claimed & HID_CLAIMED_INPUT) {
++ struct input_dev *input = field->hidinput->input;
++
++ switch (usage->hid) {
++ case HID_DG_INRANGE:
++ td->valid = value;
++ break;
++ case HID_DG_CONTACTID:
++ td->id = value;
++ break;
++ case HID_GD_X:
++ td->x = value;
++ break;
++ case HID_GD_Y:
++ td->y = value;
++ cando_filter_event(td, input);
++ break;
++ case HID_DG_TIPSWITCH:
++ /* avoid interference from generic hidinput handling */
++ break;
++
++ default:
++ /* fallback to the generic hidinput handling */
++ return 0;
++ }
++ }
++
++ /* we have handled the hidinput part, now remains hiddev */
++ if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
++ hid->hiddev_hid_event(hid, field, usage, value);
++
++ return 1;
++}
++
++static int cando_probe(struct hid_device *hdev, const struct hid_device_id *id)
++{
++ int ret;
++ struct cando_data *td;
++
++ td = kmalloc(sizeof(struct cando_data), GFP_KERNEL);
++ if (!td) {
++ dev_err(&hdev->dev, "cannot allocate Cando Touch data\n");
++ return -ENOMEM;
++ }
++ hid_set_drvdata(hdev, td);
++ td->first = false;
++ td->oldest = -1;
++ td->valid = false;
++
++ ret = hid_parse(hdev);
++ if (!ret)
++ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
++
++ if (ret)
++ kfree(td);
++
++ return ret;
++}
++
++static void cando_remove(struct hid_device *hdev)
++{
++ hid_hw_stop(hdev);
++ kfree(hid_get_drvdata(hdev));
++ hid_set_drvdata(hdev, NULL);
++}
++
++static const struct hid_device_id cando_devices[] = {
++ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
++ USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
++ { }
++};
++MODULE_DEVICE_TABLE(hid, cando_devices);
++
++static const struct hid_usage_id cando_grabbed_usages[] = {
++ { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
++ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
++};
++
++static struct hid_driver cando_driver = {
++ .name = "cando-touch",
++ .id_table = cando_devices,
++ .probe = cando_probe,
++ .remove = cando_remove,
++ .input_mapping = cando_input_mapping,
++ .input_mapped = cando_input_mapped,
++ .usage_table = cando_grabbed_usages,
++ .event = cando_event,
++};
++
++static int __init cando_init(void)
++{
++ return hid_register_driver(&cando_driver);
++}
++
++static void __exit cando_exit(void)
++{
++ hid_unregister_driver(&cando_driver);
++}
++
++module_init(cando_init);
++module_exit(cando_exit);
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 8455f3d..b126102 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1291,6 +1291,7 @@ static const struct hid_device_id hid_blacklist[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 793691f..6865ca2 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -116,6 +116,9 @@
+ #define USB_VENDOR_ID_BERKSHIRE 0x0c98
+ #define USB_DEVICE_ID_BERKSHIRE_PCWD 0x1140
+
++#define USB_VENDOR_ID_CANDO 0x2087
++#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
++
+ #define USB_VENDOR_ID_CH 0x068e
+ #define USB_DEVICE_ID_CH_PRO_PEDALS 0x00f2
+ #define USB_DEVICE_ID_CH_COMBATSTICK 0x00f4
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 8709365..b978c1e 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -663,7 +663,7 @@ struct hid_ll_driver {
+
+ /* Applications from HID Usage Tables 4/8/99 Version 1.1 */
+ /* We ignore a few input applications that are not widely used */
+-#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002))
++#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || ((a >= 0x000d0002) && (a <= 0x000d0006)))
+
+ /* HID core API */
+
+--
+1.6.2.2
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-cypress-touch-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-cypress-touch-driver.patch
new file mode 100644
index 0000000..2dae9ed
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-cypress-touch-driver.patch
@@ -0,0 +1,870 @@
+From 03e11a278286392dc20de57a24cadbc16d9aac3a Mon Sep 17 00:00:00 2001
+From: Priya Vijayan <priya.vijayan@intel.com>
+Date: Tue, 27 Apr 2010 11:23:00 -0700
+Subject: [PATCH] Touchscreen driver for Cypress panels
+
+This driver is from aava
+
+Signed-off-by: Priya Vijayan <priya.vijayan@intel.com>
+---
+ drivers/input/touchscreen/Kconfig | 8 +
+ drivers/input/touchscreen/Makefile | 1 +
+ drivers/input/touchscreen/cy8ctmg110_ts.c | 815 +++++++++++++++++++++++++++++
+ 3 files changed, 824 insertions(+), 0 deletions(-)
+ create mode 100644 drivers/input/touchscreen/cy8ctmg110_ts.c
+
+diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
+index 6dd2674..5ecf00d 100644
+--- a/drivers/input/touchscreen/Kconfig
++++ b/drivers/input/touchscreen/Kconfig
+@@ -103,6 +103,14 @@ config TOUCHSCREEN_CORGI
+ NOTE: this driver is deprecated, try enable SPI and generic
+ ADS7846-based touchscreen driver.
+
++config TOUCHSCREEN_CY8CTMG110
++ tristate "cy8ctmg110 touchscreen"
++ depends on I2C
++ default y
++ help
++ Say Y here if you have a cy8ctmg110 touchscreen capasitive touchscreen
++ If unsure, say N.
++
+ config TOUCHSCREEN_DA9034
+ tristate "Touchscreen support for Dialog Semiconductor DA9034"
+ depends on PMIC_DA903X
+diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
+index 15ad257..e5b5fae 100644
+--- a/drivers/input/touchscreen/Makefile
++++ b/drivers/input/touchscreen/Makefile
+@@ -12,6 +12,7 @@ obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
+ obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
+ obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
+ obj-$(CONFIG_TOUCHSCREEN_CORGI) += corgi_ts.o
++obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
+ obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dynapro.o
+ obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
+ obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
+diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
+new file mode 100644
+index 0000000..5587385
+--- /dev/null
++++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
+@@ -0,0 +1,815 @@
++/*
++ * cy8ctmg110_ts.c Driver for cypress touch screen controller
++ * Copyright (c) 2009 Aava Mobile
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/input.h>
++#include <linux/interrupt.h>
++#include <asm/io.h>
++#include <linux/i2c.h>
++#include <linux/timer.h>
++#include <linux/gpio.h>
++#include <linux/hrtimer.h>
++
++#include <linux/platform_device.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <asm/ioctl.h>
++#include <asm/ipc_defs.h>
++#include <asm/uaccess.h>
++#include <linux/device.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <asm/ioctl.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/miscdevice.h>
++#include <linux/module.h>
++
++
++#define CY8CTMG110_DRIVER_NAME "cy8ctmg110"
++#define CY8CTMG110_DRIVER_NAME_EXT "cy8ctmg110 ext"
++/*#define MOORESTOWN_CDK*/
++/*#define CY8CTMG110_DEBUG_INFO*/
++/*#define POLL_TOUCH_EVENTS*/
++
++
++
++/*HW definations*/
++
++/*Main touch specific*/
++#define CY8CTMG110_I2C_ADDR 0x38
++#define CY8CTMG110_RESET_PIN_GPIO 43
++#define CY8CTMG110_IRQ_PIN_GPIO 59
++
++/*Extended specific*/
++#define CY8CTMG110_I2C_ADDR_EXT 0x39
++#define CY8CTMG110_RESET_PIN_GPIO_EXT 39
++#define CY8CTMG110_IRQ_PIN_GPIO_EXT 31
++
++
++#define CY8CTMG110_TOUCH_LENGHT 9787
++#define CY8CTMG110_SCREEN_LENGHT 8424
++
++
++/*Main Touch coordinates*/
++#define CY8CTMG110_X_MIN 0
++#define CY8CTMG110_Y_MIN 0
++#define CY8CTMG110_X_MAX 864
++#define CY8CTMG110_Y_MAX 480
++
++
++/*cy8ctmg110 registers defination*/
++#define CY8CTMG110_TOUCH_WAKEUP_TIME 0
++#define CY8CTMG110_TOUCH_SLEEP_TIME 2
++#define CY8CTMG110_TOUCH_X1 3
++#define CY8CTMG110_TOUCH_Y1 5
++#define CY8CTMG110_TOUCH_X2 7
++#define CY8CTMG110_TOUCH_Y2 9
++#define CY8CTMG110_FINGERS 11
++#define CY8CTMG110_GESTURE 12
++#define CY8CTMG110_VERSIONID 13 //not supported in touchpanel FW
++#define CY8CTMG110_REG_MAX 13
++
++#ifdef POLL_TOUCH_EVENTS
++ #define CY8CTMG110_POLL_TIMER_DELAY 1000*1000*100
++ #define TOUCH_MAX_I2C_FAILS 50
++#endif
++
++#define CY8CTMG110_POLL_TIMER_DELAY 1000*1000*100
++
++/* Scale factors for coordinates */
++#define X_SCALE_FACTOR 9387/8424
++#define Y_SCALE_FACTOR 97/100
++
++/* For tracing */
++static u16 g_y_trace_coord = 0;
++
++/*if soutcanyon*/
++static bool isSc = false;
++
++
++/*
++ * Touchtype
++ */
++enum touch_type {
++ TOUCH_KOSKI=1,
++ TOUCH_SC,
++ TOUCH_EXT,
++};
++
++/*
++ * The touch position structure.
++ */
++struct ts_event {
++ int x1;
++ int y1;
++ int x2;
++ int y2;
++ bool event_sended;
++};
++
++/*
++ * The touch driver structure.
++ */
++struct cy8ctmg110 {
++ struct input_dev *input;
++ char phys[32];
++ struct ts_event tc;
++ struct i2c_client *client;
++ bool pending;
++ spinlock_t lock;
++ bool initController;
++ bool sleepmode;
++ int irq_gpio;
++ int reset_gpio;
++ char driver_name[20];
++ struct delayed_work work;
++ enum touch_type version_id;
++#ifdef POLL_TOUCH_EVENTS
++ struct hrtimer timer;
++ int i2c_fail_count;
++#endif
++};
++
++/*
++ * cy8ctmg110_poweroff is the routine that is called when touch hardware
++ * will powered off
++ */
++static void cy8ctmg110_power(struct cy8ctmg110 *ts,bool poweron)
++{
++#ifdef CY8CTMG110_DEBUG_INFO
++ printk("%s power:%d\n",ts->driver_name,poweron);
++#endif
++ if (poweron)
++ gpio_direction_output(ts->reset_gpio, 0);
++ else
++ gpio_direction_output(ts->reset_gpio, 1);
++}
++/*
++ * cy8ctmg110_write_req write regs to the i2c devices
++ *
++ */
++static int cy8ctmg110_write_req(struct cy8ctmg110 *tsc,unsigned char reg,unsigned char len,unsigned char *value)
++{
++ struct i2c_client *client = tsc->client;
++ unsigned int ret;
++ unsigned char i2c_data[]={0,0,0,0,0,0};
++#ifdef CY8CTMG110_DEBUG_INFO
++ printk("cy8ctmg110_init_req:\n");
++#endif
++
++ i2c_data[0]=reg;
++ memcpy(i2c_data+1,value,len);
++
++ {
++ struct i2c_msg msg[] = {
++ { client->addr, 0, len+1, i2c_data },
++ };
++
++ ret = i2c_transfer(client->adapter, msg, 1);
++
++ if (ret != 1) {
++ printk("cy8ctmg110 touch : i2c write data cmd failed \n");
++ return ret;
++ }
++ }
++
++ return 0;
++}
++/*
++ * get_time
++ *
++ */
++#ifdef CY8CTMG110_DEBUG_INFO
++static inline long cy8ctmg110_get_time(void)
++{
++ struct timeval t;
++ do_gettimeofday(&t);
++ return t.tv_usec;
++}
++#endif
++/*
++ * cy8ctmg110_read_req read regs from i2c devise
++ *
++ */
++static int cy8ctmg110_read_req(struct cy8ctmg110 *tsc,unsigned char *i2c_data,unsigned char len ,unsigned char cmd)
++{
++ struct i2c_client *client = tsc->client;
++ unsigned int ret;
++ unsigned char regs_cmd[2]={0,0};
++#ifdef CY8CTMG110_DEBUG_INFO
++ long starttime = cy8ctmg110_get_time();
++#endif
++ regs_cmd[0]=cmd;
++
++
++ /* first write slave position to i2c devices*/
++ {
++ struct i2c_msg msg1[] = {
++ { client->addr, 0, 1, regs_cmd },
++ };
++
++ ret = i2c_transfer(client->adapter, msg1, 1);
++
++ if (ret != 1) {
++#ifdef POLL_TOUCH_EVENTS
++ tsc->i2c_fail_count++;
++#endif
++ return ret;
++ }
++ }
++
++
++ /* Second read data from position*/
++ {
++ struct i2c_msg msg2[] = {
++ { client->addr, I2C_M_RD, len, i2c_data },
++ };
++
++ ret = i2c_transfer(client->adapter, msg2, 1);
++
++
++ if (ret != 1) {
++#ifdef POLL_TOUCH_EVENTS
++ tsc->i2c_fail_count++;
++#endif
++ return ret;
++ }
++ }
++#ifdef CY8CTMG110_DEBUG_INFO
++ printk("%s time to get data bytes read:%d time:%d\n",tsc->driver_name,len,(cy8ctmg110_get_time()-starttime));
++#endif
++ return 0;
++}
++/*
++ * cy8ctmg110_send_event delevery touch event to the userpace
++ * function use normal input interface
++ */
++static void cy8ctmg110_send_event(void *tsc,int x,int y)
++{
++ struct cy8ctmg110 *ts = tsc;
++ struct input_dev *input = ts->input;
++ u16 x2, y2;
++#ifdef CY8CTMG110_DEBUG_INFO
++ printk("cy8ctmg110_send_event\n");
++#endif
++
++ if(ts->tc.event_sended == false){
++
++ if (ts->client->addr==CY8CTMG110_I2C_ADDR_EXT){
++ /*Extended touchpanel*/
++ input_report_key(input, BTN_TOUCH, 1);
++
++
++ if ( ts->pending == true){
++ input_report_rel(input, REL_Y, (ts->tc.x1-x)*2);
++ input_report_rel(input, REL_X, (y - ts->tc.y1)*3);
++ ts->tc.y1 = y;
++ ts->tc.x1 = x;
++ }
++ else{
++ ts->pending = true;
++ ts->tc.y1 = y;
++ ts->tc.x1 = x;
++ }
++
++
++ }
++ else{
++ /*Main touchpanel*/
++ ts->tc.y1 = y;
++ ts->tc.x1 = x;
++ ts->pending = true;
++ input_report_key(input, BTN_TOUCH, 1);
++
++ x2 = y;
++ y2 = x;
++
++ if (isSc == false){
++ /*Main touchpanel in koski*/
++ x2 = (u16)(y*X_SCALE_FACTOR);
++ y2 = (u16)(x*Y_SCALE_FACTOR);
++ }
++
++ input_report_abs(input, ABS_X, x2);
++ input_report_abs(input, ABS_Y, y2);
++ }
++
++ input_sync(input);
++ if(g_y_trace_coord)
++ printk("%s touch position X:%d (was = %d) Y:%d (was = %d)\n",ts->driver_name, x2, y, y2, x);
++ }
++
++}
++
++/*
++ * cy8ctmg110_touch_pos check touch position from i2c devices
++ *
++ */
++static int cy8ctmg110_touch_pos(struct cy8ctmg110 *tsc)
++{
++ unsigned char reg_p[CY8CTMG110_REG_MAX];
++
++ memset(reg_p,0,CY8CTMG110_REG_MAX);
++
++ /*Reading coordinates*/
++ if (cy8ctmg110_read_req(tsc,reg_p,1,CY8CTMG110_FINGERS)==0){
++
++ /*number of touch*/
++ if (reg_p[0]==0){
++ if (tsc->pending == true){
++ struct input_dev *input = tsc->input;
++
++ input_report_key(input, BTN_TOUCH, 0);
++
++ input_sync(input);
++ tsc->tc.event_sended = true;
++#ifdef CY8CTMG110_DEBUG_INFO
++ printk("cy8ctmg110_send_event ts->pending = true;\n");
++#endif
++ tsc->pending = false;
++ }
++ }
++ else {
++
++ if (cy8ctmg110_read_req(tsc,reg_p,4,CY8CTMG110_TOUCH_X1)==0){
++ int x = 0,y = 0;
++ y = reg_p[2]<<8 | reg_p[3];
++ x = reg_p[0]<<8 | reg_p[1];
++
++ if (tsc->tc.x1 != x || tsc->tc.y1 != y){
++ tsc->tc.event_sended = false;
++ cy8ctmg110_send_event(tsc,x,y);
++ }
++ }
++ }
++ }
++ else{
++#ifdef CY8CTMG110_DEBUG_INFO
++ printk("cy8ctmg110 i2c reading error\n");
++#endif
++ }
++
++ return 0;
++}
++/*
++ * cy8ctmg110_read_versionid delevery touch event to the userpace
++ * function use normal input interface
++ */
++static void cy8ctmg110_read_versionid(void *tsc)
++{
++ struct cy8ctmg110 *ts = tsc;
++ unsigned char reg_p[2];
++
++
++ if (cy8ctmg110_read_req(ts,reg_p,1,CY8CTMG110_VERSIONID)==0){
++ printk("%s id 0x%x\n",ts->driver_name,reg_p[0]);
++
++ /*Ugly hack solution if SC
++ */
++
++ if(ts->client->addr==CY8CTMG110_I2C_ADDR_EXT)
++ isSc = true;
++
++ switch (reg_p[0]){
++ case 0x01:
++ ts->version_id = TOUCH_EXT;
++ break;
++ case 0x02:
++ ts->version_id = TOUCH_SC;
++ break;
++ case 0x03:
++ ts->version_id = TOUCH_KOSKI;
++ break;
++ default:
++ ts->version_id = TOUCH_KOSKI;
++ break;
++ }
++ }
++}
++
++
++#ifdef POLL_TOUCH_EVENTS
++/*
++ * if interup is'n in use the touch positions can reads by polling
++ *
++ */
++static enum hrtimer_restart cy8ctmg110_timer(struct hrtimer *handle)
++{
++ struct cy8ctmg110 *ts = container_of(handle, struct cy8ctmg110, timer);
++ unsigned long flags;
++
++ spin_lock_irqsave(&ts->lock, flags);
++#ifdef CY8CTMG110_DEBUG_INFO
++ printk("cy8ctmg110_timer\n");
++#endif
++
++ cy8ctmg110_touch_pos(ts);
++
++ if (ts->i2c_fail_count<TOUCH_MAX_I2C_FAILS)
++ hrtimer_start(&ts->timer, ktime_set(0, CY8CTMG110_POLL_TIMER_DELAY),
++ HRTIMER_MODE_REL);
++
++ spin_unlock_irqrestore(&ts->lock, flags);
++
++ return HRTIMER_NORESTART;
++}
++#endif
++/*
++ * cy8ctmg110_init_controller set init value to touchcontroller
++ *
++ */
++static bool cy8ctmg110_set_sleepmode(struct cy8ctmg110 *ts)
++{
++ unsigned char reg_p[3];
++
++ if(ts->sleepmode==true){
++ reg_p[0] = 0x00; reg_p[1] =0xff; reg_p[2] =5;
++ }else{
++ reg_p[0] = 0x10;reg_p[1] =0xff;reg_p[2] =0;
++ }
++
++ if (cy8ctmg110_write_req(ts,CY8CTMG110_TOUCH_WAKEUP_TIME,3,reg_p)){
++ return false;
++ }
++ ts->initController = true;
++
++ return true;
++}
++
++
++
++static void cy8ctmg110_work(struct work_struct *work)
++{
++ struct cy8ctmg110 *ts =
++ container_of(to_delayed_work(work), struct cy8ctmg110, work);
++
++ cy8ctmg110_touch_pos(ts);
++}
++
++
++/*
++ * cy8ctmg110_irq_handler irq handling function
++ *
++ */
++static irqreturn_t cy8ctmg110_irq_handler(int irq, void *handle)
++{
++ struct cy8ctmg110 * tsc = (struct cy8ctmg110 *)handle;
++
++#ifdef CY8CTMG110_DEBUG_INFO
++ printk("%s cy8ctmg110_irq_handler\n",tsc->driver_name);
++#endif
++ if (tsc->initController == false){
++ if (cy8ctmg110_set_sleepmode(tsc) == true)
++ tsc->initController = true;
++ }
++ else
++ {
++ schedule_delayed_work(&tsc->work,
++ msecs_to_jiffies(1));
++ }
++
++#ifdef POLL_TOUCH_EVENTS
++ /*if interrupt supported in the touch controller
++ timer polling need to stop*/
++ tsc->i2c_fail_count = TOUCH_MAX_I2C_FAILS;
++#endif
++ return IRQ_HANDLED;
++}
++
++
++static int cy8ctmg110_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct cy8ctmg110 *ts;
++ struct input_dev *input_dev;
++ int err;
++
++ if (!i2c_check_functionality(client->adapter,
++ I2C_FUNC_SMBUS_READ_WORD_DATA))
++ return -EIO;
++
++ ts = kzalloc(sizeof(struct cy8ctmg110), GFP_KERNEL);
++ input_dev = input_allocate_device();
++
++ if (!ts || !input_dev) {
++ err = -ENOMEM;
++ goto err_free_mem;
++ }
++
++ ts->client = client;
++ i2c_set_clientdata(client, ts);
++
++ ts->input = input_dev;
++ ts->pending = false;
++ ts->sleepmode = false;
++
++
++ if(client->addr==CY8CTMG110_I2C_ADDR){
++ ts->reset_gpio = CY8CTMG110_RESET_PIN_GPIO;
++ input_dev->name = CY8CTMG110_DRIVER_NAME" Touchscreen";
++ snprintf(ts->driver_name, sizeof(ts->driver_name),"%s", CY8CTMG110_DRIVER_NAME);
++ }
++ else if (client->addr==CY8CTMG110_I2C_ADDR_EXT){
++ ts->reset_gpio = CY8CTMG110_RESET_PIN_GPIO_EXT;
++ input_dev->name = CY8CTMG110_DRIVER_NAME_EXT" Touchscreen";
++ snprintf(ts->driver_name, sizeof(ts->driver_name),"%s", CY8CTMG110_DRIVER_NAME_EXT);
++ }
++
++ snprintf(ts->phys, sizeof(ts->phys),
++ "%s/input0", dev_name(&client->dev));
++
++ INIT_DELAYED_WORK(&ts->work, cy8ctmg110_work);
++
++ input_dev->phys = ts->phys;
++ input_dev->id.bustype = BUS_I2C;
++
++ spin_lock_init(&ts->lock);
++
++ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) |
++ BIT_MASK(EV_REL) | BIT_MASK(EV_ABS);
++
++ input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
++
++ input_set_capability(input_dev, EV_KEY, KEY_F);
++
++
++ input_set_abs_params(input_dev, ABS_X, CY8CTMG110_X_MIN, CY8CTMG110_X_MAX, 0, 0);
++ input_set_abs_params(input_dev, ABS_Y, CY8CTMG110_Y_MIN, CY8CTMG110_Y_MAX, 0, 0);
++ input_dev->relbit[BIT_WORD(REL_X)] = BIT_MASK(REL_X)| BIT_MASK(REL_Y);
++
++ err = gpio_request(ts->reset_gpio, NULL);
++
++ if (err) {
++ printk("GPIO pin %d failed to request.\n", ts->reset_gpio);
++ goto err_free_thread;
++ }
++
++ cy8ctmg110_power(ts,true);
++
++ ts->initController = false;
++#ifdef POLL_TOUCH_EVENTS
++ ts->i2c_fail_count = 0;
++ hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ ts->timer.function = cy8ctmg110_timer;
++
++ hrtimer_start(&ts->timer, ktime_set(10, 0),
++ HRTIMER_MODE_REL);
++#endif
++ err = gpio_request(client->irq, "touch_irq_key");
++
++ if (err < 0) {
++ printk("%s gpio-keys: failed to request GPIO %d,"
++ " error %d\n",ts->driver_name,client->irq, err);
++ goto err_free_thread;
++ }
++
++ err= gpio_direction_input(client->irq);
++
++ if (err < 0) {
++ pr_err("%s gpio-keys: failed to configure input"
++ " direction for GPIO %d, error %d\n",ts->driver_name,client->irq, err);
++ gpio_free(client->irq);
++ goto err_free_thread;
++ }
++
++ ts->irq_gpio = gpio_to_irq(client->irq);
++
++ if (ts->irq_gpio < 0) {
++ err = ts->irq_gpio;
++ pr_err("cy8ctmg110 gpio-keys: Unable to get irq number"
++ " for GPIO %d, error %d\n",
++ ts->irq_gpio, err);
++ gpio_free(ts->irq_gpio);
++ goto err_free_thread;
++ }
++
++ if (client->addr!=CY8CTMG110_I2C_ADDR_EXT){
++ err = request_irq(ts->irq_gpio, cy8ctmg110_irq_handler,
++ IRQF_TRIGGER_RISING | IRQF_SHARED,
++ "touch_reset_key",
++ ts);
++ }
++
++ if (err < 0) {
++ dev_err(&client->dev, "cy8ctmg110 irq %d busy? error %d\n", ts->irq_gpio ,err);
++ goto err_free_thread;
++ }
++
++ err = input_register_device(input_dev);
++ cy8ctmg110_read_versionid(ts);
++
++ if (err)
++ goto err_free_irq;
++
++ return 0;
++
++ err_free_irq:
++ printk("%s err_free_irq\n",ts->driver_name);
++ free_irq(client->irq, ts);
++ err_free_thread:
++ printk("%s err_free_thread\n",ts->driver_name);
++ err_free_mem:
++ printk("%s err_free_mem\n",ts->driver_name);
++ input_free_device(input_dev);
++ kfree(ts);
++
++ return err;
++}
++/*
++ * cy8ctmg110_suspend
++ *
++ */
++static int cy8ctmg110_suspend(struct i2c_client *client, pm_message_t mesg)
++{
++ if (device_may_wakeup(&client->dev))
++ enable_irq_wake(client->irq);
++
++ return 0;
++}
++/*
++ * cy8ctmg110_resume
++ *
++ */
++static int cy8ctmg110_resume(struct i2c_client *client)
++{
++ if (device_may_wakeup(&client->dev))
++ disable_irq_wake(client->irq);
++
++ return 0;
++}
++/*
++ * cy8ctmg110_remove
++ *
++ */
++static int cy8ctmg110_remove(struct i2c_client *client)
++{
++ struct cy8ctmg110 *ts = i2c_get_clientdata(client);
++
++#ifdef CY8CTMG110_DEBUG_INFO
++ printk("cy8ctmg110_remove\n");
++#endif
++
++ cy8ctmg110_power(ts,false);
++#ifdef POLL_TOUCH_EVENTS
++ hrtimer_cancel(&ts->timer);
++#endif
++
++ free_irq(client->irq, ts);
++ input_unregister_device(ts->input);
++ kfree(ts);
++
++ return 0;
++}
++
++static struct i2c_device_id cy8ctmg110_idtable[] = {
++ { CY8CTMG110_DRIVER_NAME, 1 },
++ { CY8CTMG110_DRIVER_NAME_EXT, 1 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(i2c, cy8ctmg110_idtable);
++
++static struct i2c_driver cy8ctmg110_driver = {
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = CY8CTMG110_DRIVER_NAME,
++ .bus = &i2c_bus_type,
++ },
++ .id_table = cy8ctmg110_idtable,
++ .probe = cy8ctmg110_probe,
++ .remove = cy8ctmg110_remove,
++ .suspend = cy8ctmg110_suspend,
++ .resume = cy8ctmg110_resume,
++};
++
++
++static int __init cy8ctmg110_init(void)
++{
++ printk("cy8ctmg110_init\n");
++
++ return i2c_add_driver(&cy8ctmg110_driver);
++}
++
++static void __exit cy8ctmg110_exit(void)
++{
++#ifdef CY8CTMG110_DEBUG_INFO
++ printk("cy8ctmg110_exit\n");
++#endif
++ i2c_del_driver(&cy8ctmg110_driver);
++}
++
++module_init(cy8ctmg110_init);
++module_exit(cy8ctmg110_exit);
++
++
++struct i2c_board_info __initdata koski_i2c_board_info2[] = {
++ {
++ I2C_BOARD_INFO(CY8CTMG110_DRIVER_NAME, CY8CTMG110_I2C_ADDR),
++ .irq = CY8CTMG110_IRQ_PIN_GPIO
++ },
++ {
++ I2C_BOARD_INFO(CY8CTMG110_DRIVER_NAME_EXT, CY8CTMG110_I2C_ADDR_EXT),
++ .irq = CY8CTMG110_IRQ_PIN_GPIO_EXT
++ },
++};
++
++
++static int __init koski_i2c_init(void)
++{
++ printk("init koski board\n");
++
++#ifdef MOORESTOWN_CDK
++ /*init koski i2c*/
++ i2c_register_board_info(1, koski_i2c_board_info2,
++ ARRAY_SIZE(koski_i2c_board_info2));
++#else
++ /*init koski i2c*/
++ i2c_register_board_info(0, koski_i2c_board_info2,
++ ARRAY_SIZE(koski_i2c_board_info2));
++#endif
++ return 0;
++}
++
++module_init(koski_i2c_init);
++
++MODULE_AUTHOR("Samuli Konttila <samuli.konttila@aavamobile.com>");
++MODULE_DESCRIPTION("cy8ctmg110 TouchScreen Driver");
++MODULE_LICENSE("GPL v2");
++
++
++// Aava access from sysfs begin
++static ssize_t aava_query_fw_info_func(struct class *class, char *buf)
++{
++ ssize_t status;
++ int i = 0;
++ unsigned char mrst_fw_ver_info[16];
++
++ printk("!!! aava_query_fw_info_func() ENTER\n");
++
++ status = mrst_get_firmware_version(mrst_fw_ver_info);
++ for (i = 0; i < 16; i++){
++ printk("%x\n", mrst_fw_ver_info[i]);
++ buf[i] = mrst_fw_ver_info[i];
++ }
++
++ return 16;
++}
++
++static ssize_t aava_enable_touch_traces_func(struct class *class, \
++ const char *buf, size_t len)
++{
++ ssize_t status;
++ unsigned long value;
++
++ status = strict_strtoul(buf, 0, &value);
++ printk("!!! aava_enable_touch_traces_func() = %d\n", (int)value);
++
++ g_y_trace_coord = value;
++
++ return len;
++}
++
++static struct class_attribute aava_class_attrs[] = {
++ __ATTR(aava_query_fw_info, 0444, aava_query_fw_info_func, NULL),
++ __ATTR(aava_enable_touch_traces, 0200, NULL, aava_enable_touch_traces_func),
++ __ATTR_NULL,
++};
++
++static struct class aava_class = {
++ .name = "aava",
++ .owner = THIS_MODULE,
++
++ .class_attrs = aava_class_attrs,
++};
++
++static int __init aava_sysfs_init(void)
++{
++ int status;
++
++ status = class_register(&aava_class);
++ if (status < 0)
++ return status;
++
++ return status;
++}
++postcore_initcall(aava_sysfs_init);
++// Aava access from sysfs end
+--
+1.6.2.2
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-drm-i915-Ignore-LVDS-EDID-when-it-is-unavailabe-or-invalid.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-drm-i915-Ignore-LVDS-EDID-when-it-is-unavailabe-or-invalid.patch
new file mode 100644
index 0000000..f2ee811
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-drm-i915-Ignore-LVDS-EDID-when-it-is-unavailabe-or-invalid.patch
@@ -0,0 +1,67 @@
+Subject: [3/3] drm/i915: Ignore LVDS EDID when it is unavailabe or invalid
+Date: Thu, 04 Mar 2010 08:17:31 -0000
+From: Zhenyu Wang <zhenyuw@linux.intel.com>
+Patch-mainline: 2.6.34
+References: https://patchwork.kernel.org/patch/83556/, BMC# 331
+
+From: Zhao Yakui <yakui.zhao@intel.com>
+
+In course of probing the display mode for LVDS, we will firstly try to
+check the EDID for LVDS panel. But on some laptops the EDID is invalid for
+the LVDS panel. In such case it will complain the invalida EDID warning
+message and print the EDID raw data every time when probing the LVDS mode.
+
+https://bugs.freedesktop.org/show_bug.cgi?id=23099
+https://bugs.freedesktop.org/show_bug.cgi?id=26395
+
+Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
+Tested-by: Sitsofe Wheeler <sitsofe@yahoo.com>
+Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Acked-by: Yin Kangkai <kangkai.yin@intel.com>
+
+---
+ drivers/gpu/drm/i915/i915_drv.h | 2 ++
+ drivers/gpu/drm/i915/intel_lvds.c | 13 +++++++++----
+ 2 files changed, 11 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -581,6 +581,8 @@ typedef struct drm_i915_private {
+ /* Reclocking support */
+ bool render_reclock_avail;
+ bool lvds_downclock_avail;
++ /* indicate whether the LVDS EDID is OK */
++ bool lvds_edid_ok;
+ /* indicates the reduced downclock for LVDS*/
+ int lvds_downclock;
+ struct work_struct idle_work;
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -680,10 +680,13 @@ static int intel_lvds_get_modes(struct d
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = 0;
+
+- ret = intel_ddc_get_modes(intel_output);
++ if (dev_priv->lvds_edid_ok) {
++ ret = intel_ddc_get_modes(intel_output);
++ if (ret)
++ return ret;
+
+- if (ret)
+- return ret;
++ dev_priv->lvds_edid_ok = false;
++ }
+
+ /* Didn't get an EDID, so
+ * Set wide sync ranges so we get all modes
+@@ -1097,7 +1100,9 @@ void intel_lvds_init(struct drm_device *
+ * Attempt to get the fixed panel mode from DDC. Assume that the
+ * preferred mode is the right one.
+ */
+- intel_ddc_get_modes(intel_output);
++ dev_priv->lvds_edid_ok = true;
++ if (!intel_ddc_get_modes(intel_output))
++ dev_priv->lvds_edid_ok = false;
+
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ mutex_lock(&dev->mode_config.mutex);
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-enable-hid-dg-contact-count-stantum-and-cando-touch-drivers.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-enable-hid-dg-contact-count-stantum-and-cando-touch-drivers.patch
new file mode 100644
index 0000000..a58990e
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-enable-hid-dg-contact-count-stantum-and-cando-touch-drivers.patch
@@ -0,0 +1,76 @@
+From 3589c3e0ec88c19c330b88f7d37c8092987866e6 Mon Sep 17 00:00:00 2001
+From: Priya Vijayan <priya.vijayan@intel.com>
+Date: Fri, 30 Apr 2010 11:11:10 -0700
+Subject: [PATCH] Enable hid-dg-contactcount in stantum and cando touch drivers
+
+Enable hid-dg-contact count in stantum and cando touch drivers to be able to use with mtdev driver
+
+Patch-mainline: 2.6.34
+
+Signed-off-by: Priya Vijayan <priya.vijayan@intel.com>
+---
+ drivers/hid/hid-cando.c | 8 ++++++++
+ drivers/hid/hid-stantum.c | 11 ++++++++++-
+ 2 files changed, 18 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/hid/hid-cando.c b/drivers/hid/hid-cando.c
+index ed8c093..42b9980 100644
+--- a/drivers/hid/hid-cando.c
++++ b/drivers/hid/hid-cando.c
+@@ -64,6 +64,10 @@ static int cando_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ case HID_DG_TIPSWITCH:
+ case HID_DG_CONTACTMAX:
+ return -1;
++ case HID_DG_CONTACTCOUNT:
++ hid_map_usage(hi, usage, bit, max,
++ EV_ABS, ABS_MT_CONTACT_COUNT);
++ return 1;
+ case HID_DG_INRANGE:
+ /* touchscreen emulation */
+ hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+@@ -169,6 +173,10 @@ static int cando_event(struct hid_device *hid, struct hid_field *field,
+ struct input_dev *input = field->hidinput->input;
+
+ switch (usage->hid) {
++ case HID_DG_CONTACTCOUNT:
++ input_event(input,EV_ABS,ABS_MT_CONTACT_COUNT,value);
++ //input_mt_sync(input);
++ break;
+ case HID_DG_INRANGE:
+ td->valid = value;
+ break;
+diff --git a/drivers/hid/hid-stantum.c b/drivers/hid/hid-stantum.c
+index bb4430f..ac3df05 100644
+--- a/drivers/hid/hid-stantum.c
++++ b/drivers/hid/hid-stantum.c
+@@ -64,10 +64,15 @@ static int stantum_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ case HID_DG_CONFIDENCE:
+ case HID_DG_INPUTMODE:
+ case HID_DG_DEVICEINDEX:
+- case HID_DG_CONTACTCOUNT:
++ //case HID_DG_CONTACTCOUNT:
+ case HID_DG_CONTACTMAX:
+ return -1;
+
++ case HID_DG_CONTACTCOUNT:
++ hid_map_usage(hi, usage, bit, max,
++ EV_ABS, ABS_MT_CONTACT_COUNT);
++ return 1;
++
+ case HID_DG_TIPSWITCH:
+ /* touchscreen emulation */
+ hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+@@ -171,6 +176,10 @@ static int stantum_event(struct hid_device *hid, struct hid_field *field,
+ struct input_dev *input = field->hidinput->input;
+
+ switch (usage->hid) {
++ case HID_DG_CONTACTCOUNT:
++ input_event(input,EV_ABS,ABS_MT_CONTACT_COUNT,value);
++ //input_mt_sync(input);
++ break;
+ case HID_DG_INRANGE:
+ /* this is the last field in a finger */
+ stantum_filter_event(sd, input);
+--
+1.6.2.2
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-fix-marvell-firmware-path.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-fix-marvell-firmware-path.patch
new file mode 100644
index 0000000..20a479c
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-fix-marvell-firmware-path.patch
@@ -0,0 +1,58 @@
+From dd75ba1aee79abfa6948cd3b82a7a7eb97599c91 Mon Sep 17 00:00:00 2001
+From: Prajwal Mohan <prajwal.karur.mohan@intel.com>
+Date: Fri, 9 Apr 2010 18:18:34 -0700
+Subject: [PATCH 106/106] Fixing path for marvell firmware
+
+Patch-mainline: Friday, 9 Apr 2010 18:18:34
+---
+ drivers/net/wireless/libertas/if_sdio.c | 24 ++++++++++++------------
+ 1 files changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
+index 7a73f62..055a581 100644
+--- a/drivers/net/wireless/libertas/if_sdio.c
++++ b/drivers/net/wireless/libertas/if_sdio.c
+@@ -83,28 +83,28 @@ static struct if_sdio_model if_sdio_models[] = {
+ {
+ /* 8385 */
+ .model = IF_SDIO_MODEL_8385,
+- .helper = "sd8385_helper.bin",
+- .firmware = "sd8385.bin",
++ .helper = "libertas/sd8385_helper.bin",
++ .firmware = "libertas/sd8385.bin",
+ },
+ {
+ /* 8686 */
+ .model = IF_SDIO_MODEL_8686,
+- .helper = "sd8686_helper.bin",
+- .firmware = "sd8686.bin",
++ .helper = "libertas/sd8686_helper.bin",
++ .firmware = "libertas/sd8686.bin",
+ },
+ {
+ /* 8688 */
+ .model = IF_SDIO_MODEL_8688,
+- .helper = "sd8688_helper.bin",
+- .firmware = "sd8688.bin",
++ .helper = "libertas/sd8688_helper.bin",
++ .firmware = "libertas/sd8688.bin",
+ },
+ };
+-MODULE_FIRMWARE("sd8385_helper.bin");
+-MODULE_FIRMWARE("sd8385.bin");
+-MODULE_FIRMWARE("sd8686_helper.bin");
+-MODULE_FIRMWARE("sd8686.bin");
+-MODULE_FIRMWARE("sd8688_helper.bin");
+-MODULE_FIRMWARE("sd8688.bin");
++MODULE_FIRMWARE("libertas/sd8385_helper.bin");
++MODULE_FIRMWARE("libertas/sd8385.bin");
++MODULE_FIRMWARE("libertas/sd8686_helper.bin");
++MODULE_FIRMWARE("libertas/sd8686.bin");
++MODULE_FIRMWARE("libertas/sd8688_helper.bin");
++MODULE_FIRMWARE("libertas/sd8688.bin");
+
+ struct if_sdio_packet {
+ struct if_sdio_packet *next;
+--
+1.6.2.5
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-hack-to-fix-aava-camera-sensor-issue.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-hack-to-fix-aava-camera-sensor-issue.patch
new file mode 100644
index 0000000..c7e1a53
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-hack-to-fix-aava-camera-sensor-issue.patch
@@ -0,0 +1,30 @@
+From 67b3a2d6716bfa9d308b26729a2cdeeffb6e8218 Mon Sep 17 00:00:00 2001
+From: Prajwal Mohan <prajwal.karur.mohan@intel.com>
+Date: Thu, 13 May 2010 16:39:43 -0700
+Subject: [PATCH] dirty hack to fix aava camera sensor issue
+
+Signed-off-by: Prajwal Mohan <prajwal.karur.mohan@intel.com>
+Patch-mainline: 2.6.34
+---
+ drivers/media/video/mrstci/mrstov2650/mrstov2650.c | 4 ++++
+ 1 files changed, 4 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/media/video/mrstci/mrstov2650/mrstov2650.c b/drivers/media/video/mrstci/mrstov2650/mrstov2650.c
+index 7f0d478..ce24139 100644
+--- a/drivers/media/video/mrstci/mrstov2650/mrstov2650.c
++++ b/drivers/media/video/mrstci/mrstov2650/mrstov2650.c
+@@ -1111,7 +1111,11 @@ static int ov2650_detect(struct i2c_client *client)
+ ov2650_wakeup();
+
+ ov2650_read(client, OV2650_PID_L, &value);
++ #ifdef CONFIG_MRST_DRM_AAVA
++ if (value != 0x66)
++ #else
+ if (value != 0x52)
++ #endif
+ return -ENODEV;
+
+ return 0;
+--
+1.6.2.5
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-input-synaptics-clickpad-support.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-input-synaptics-clickpad-support.patch
new file mode 100644
index 0000000..92bde9f
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-input-synaptics-clickpad-support.patch
@@ -0,0 +1,142 @@
+From 5f57d67da87332a9a1ba8fa7a33bf0680e1c76e7 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Mon, 19 Apr 2010 10:37:21 -0700
+Subject: [PATCH 1/2] Input: Add support of Synaptics Clickpad device
+Patch-mainline: 2.6.34
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input.git
+Git-commit: 5f57d67da87332a9a1ba8fa7a33bf0680e1c76e7
+References: BMC#99
+
+The new type of touchpads can be detected via a new query command
+0x0c. The clickpad flags are in cap[0]:4 and cap[1]:0 bits.
+
+When the device is detected, the driver now reports only the left
+button as the supported buttons so that X11 driver can detect that
+the device is Clickpad. A Clickpad device gives the button events
+only as the middle button. The kernel driver morphs to the left
+button. The real handling of Clickpad is done rather in X driver
+side.
+
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
+Acked-by: Jian-feng Ding <jian-feng.ding@intel.com>
+---
+ drivers/input/mouse/synaptics.c | 35 ++++++++++++++++++++++++++++++-----
+ drivers/input/mouse/synaptics.h | 4 ++++
+ 2 files changed, 34 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index d3f5243..9ab9ff0 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -136,7 +136,8 @@ static int synaptics_capability(struct psmouse *psmouse)
+ if (synaptics_send_cmd(psmouse, SYN_QUE_CAPABILITIES, cap))
+ return -1;
+ priv->capabilities = (cap[0] << 16) | (cap[1] << 8) | cap[2];
+- priv->ext_cap = 0;
++ priv->ext_cap = priv->ext_cap_0c = 0;
++
+ if (!SYN_CAP_VALID(priv->capabilities))
+ return -1;
+
+@@ -149,7 +150,7 @@ static int synaptics_capability(struct psmouse *psmouse)
+ if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 1) {
+ if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_CAPAB, cap)) {
+ printk(KERN_ERR "Synaptics claims to have extended capabilities,"
+- " but I'm not able to read them.");
++ " but I'm not able to read them.\n");
+ } else {
+ priv->ext_cap = (cap[0] << 16) | (cap[1] << 8) | cap[2];
+
+@@ -161,6 +162,16 @@ static int synaptics_capability(struct psmouse *psmouse)
+ priv->ext_cap &= 0xff0fff;
+ }
+ }
++
++ if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 4) {
++ if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_CAPAB_0C, cap)) {
++ printk(KERN_ERR "Synaptics claims to have extended capability 0x0c,"
++ " but I'm not able to read it.\n");
++ } else {
++ priv->ext_cap_0c = (cap[0] << 16) | (cap[1] << 8) | cap[2];
++ }
++ }
++
+ return 0;
+ }
+
+@@ -347,7 +358,15 @@ static void synaptics_parse_hw_state(unsigned char buf[], struct synaptics_data
+ hw->left = (buf[0] & 0x01) ? 1 : 0;
+ hw->right = (buf[0] & 0x02) ? 1 : 0;
+
+- if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities)) {
++ if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
++ /*
++ * Clickpad's button is transmitted as middle button,
++ * however, since it is primary button, we will report
++ * it as BTN_LEFT.
++ */
++ hw->left = ((buf[0] ^ buf[3]) & 0x01) ? 1 : 0;
++
++ } else if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities)) {
+ hw->middle = ((buf[0] ^ buf[3]) & 0x01) ? 1 : 0;
+ if (hw->w == 2)
+ hw->scroll = (signed char)(buf[1]);
+@@ -592,6 +611,12 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
+
+ dev->absres[ABS_X] = priv->x_res;
+ dev->absres[ABS_Y] = priv->y_res;
++
++ if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
++ /* Clickpads report only left button */
++ __clear_bit(BTN_RIGHT, dev->keybit);
++ __clear_bit(BTN_MIDDLE, dev->keybit);
++ }
+ }
+
+ static void synaptics_disconnect(struct psmouse *psmouse)
+@@ -696,10 +721,10 @@ int synaptics_init(struct psmouse *psmouse)
+
+ priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS;
+
+- printk(KERN_INFO "Synaptics Touchpad, model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx\n",
++ printk(KERN_INFO "Synaptics Touchpad, model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx\n",
+ SYN_ID_MODEL(priv->identity),
+ SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity),
+- priv->model_id, priv->capabilities, priv->ext_cap);
++ priv->model_id, priv->capabilities, priv->ext_cap, priv->ext_cap_0c);
+
+ set_input_params(psmouse->dev, priv);
+
+diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
+index f0f40a3..ae37c5d 100644
+--- a/drivers/input/mouse/synaptics.h
++++ b/drivers/input/mouse/synaptics.h
+@@ -18,6 +18,7 @@
+ #define SYN_QUE_SERIAL_NUMBER_SUFFIX 0x07
+ #define SYN_QUE_RESOLUTION 0x08
+ #define SYN_QUE_EXT_CAPAB 0x09
++#define SYN_QUE_EXT_CAPAB_0C 0x0c
+
+ /* synatics modes */
+ #define SYN_BIT_ABSOLUTE_MODE (1 << 7)
+@@ -48,6 +49,8 @@
+ #define SYN_CAP_VALID(c) ((((c) & 0x00ff00) >> 8) == 0x47)
+ #define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20)
+ #define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12)
++#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16)
++#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100100)
+
+ /* synaptics modes query bits */
+ #define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
+@@ -96,6 +99,7 @@ struct synaptics_data {
+ unsigned long int model_id; /* Model-ID */
+ unsigned long int capabilities; /* Capabilities */
+ unsigned long int ext_cap; /* Extended Capabilities */
++ unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */
+ unsigned long int identity; /* Identification */
+ int x_res; /* X resolution in units/mm */
+ int y_res; /* Y resolution in units/mm */
+--
+1.6.3.3
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-aava-specific-changes-no-audio.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-aava-specific-changes-no-audio.patch
new file mode 100644
index 0000000..7336ae6
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-aava-specific-changes-no-audio.patch
@@ -0,0 +1,3342 @@
+From: Prajwal Mohan <prajwal.karur.mohan@intel.com>
+Date: Tue, 27 Apr 2010 11:23:00 -0700
+Subject: [PATCH] Aava specific patches
+
+This driver is from aava
+
+Signed-off-by: Prajwal Mohan <prajwal.karur.mohan@intel.com>
+Patch-mainline: 2.6.34
+---
+Index: linux-2.6.33/drivers/misc/mrst_test_ipc/ipc_module.c
+===================================================================
+--- linux-2.6.33.orig/drivers/misc/mrst_test_ipc/ipc_module.c
++++ linux-2.6.33/drivers/misc/mrst_test_ipc/ipc_module.c
+@@ -44,8 +44,13 @@
+ #include <linux/uaccess.h>
+ #include <linux/time.h>
+
++
++
+ #include <asm/ipc_defs.h>
+
++#include <linux/device.h>
++#include <linux/ipc_module.h>
++
+ static u32 major;
+ #define MAX_FW_SIZE 264192
+
+@@ -53,9 +58,11 @@ int init_ipc_driver(void);
+ int ipc_ioctl(struct inode *inode, struct file *filp, u32 cmd,
+ unsigned long arg);
+ const struct file_operations ipc_fops = {
++owner:THIS_MODULE,
+ ioctl:ipc_ioctl,
+ };
+
++static struct class *mid_ipc_class;
+
+ int ipc_ioctl(struct inode *inode, struct file *filp, u32 cmd,
+ unsigned long arg)
+@@ -71,6 +78,18 @@ int ipc_ioctl(struct inode *inode, struc
+ u8 *fw_buf = NULL ;
+
+ switch (cmd) {
++ case IPC_IOC_PMIC_REG_READ:
++ cmd = IPC_PMIC_REGISTER_READ;
++ break;
++ case IPC_IOC_PMIC_REG_WRITE:
++ cmd = IPC_PMIC_REGISTER_WRITE;
++ break;
++ default:
++ printk(KERN_INFO "ioctl <UNRECOGNIZED> received\n");
++ break;
++ }
++
++ switch (cmd) {
+ case IPC_PMIC_REGISTER_READ:
+ {
+ printk(KERN_INFO
+@@ -169,6 +188,8 @@ int ipc_ioctl(struct inode *inode, struc
+
+ static int __init ipc_module_init(void)
+ {
++ struct device *dev;
++
+ printk(KERN_INFO "Init ipc_module\n");
+
+ major = register_chrdev(0, "mid_ipc", &ipc_fops);
+@@ -177,6 +198,23 @@ static int __init ipc_module_init(void)
+ return major;
+ }
+
++ mid_ipc_class = class_create(THIS_MODULE, "mid_ipc");
++ if (IS_ERR(mid_ipc_class)) {
++ unregister_chrdev(major, "mid_ipc");
++ return PTR_ERR(mid_ipc_class);
++ }
++
++ dev = device_create(mid_ipc_class,
++ NULL,
++ MKDEV(major, 0),
++ NULL,
++ "mid_ipc" );
++ if (IS_ERR(dev)) {
++ class_destroy(mid_ipc_class);
++ unregister_chrdev(major, "mid_ipc");
++ return PTR_ERR(dev);
++ }
++
+ init_ipc_driver ( ) ;
+ return SUCCESS;
+
+@@ -184,6 +222,8 @@ static int __init ipc_module_init(void)
+
+ static void __exit ipc_module_exit(void)
+ {
++ device_destroy(mid_ipc_class, MKDEV(major, 0));
++ class_destroy(mid_ipc_class);
+ unregister_chrdev(major, "mid_ipc");
+ }
+
+Index: linux-2.6.33/include/linux/Kbuild
+===================================================================
+--- linux-2.6.33.orig/include/linux/Kbuild
++++ linux-2.6.33/include/linux/Kbuild
+@@ -385,3 +385,5 @@ unifdef-y += xfrm.h
+ objhdr-y += version.h
+ header-y += wimax.h
+ header-y += wimax/
++header-y += ipc_module.h
++
+Index: linux-2.6.33/drivers/gpio/gpiolib.c
+===================================================================
+--- linux-2.6.33.orig/drivers/gpio/gpiolib.c
++++ linux-2.6.33/drivers/gpio/gpiolib.c
+@@ -228,11 +228,14 @@ static ssize_t gpio_direction_show(struc
+
+ if (!test_bit(FLAG_EXPORT, &desc->flags))
+ status = -EIO;
+- else
+- status = sprintf(buf, "%s\n",
+- test_bit(FLAG_IS_OUT, &desc->flags)
+- ? "out" : "in");
+-
++ else {
++ status = sprintf(buf,
++ "%s\n",
++ gpio_get_direction( (desc - gpio_desc) ) ==
++ DIRECTION_OUT ?
++ "out" :
++ "in");
++ }
+ mutex_unlock(&sysfs_lock);
+ return status;
+ }
+@@ -1507,6 +1510,29 @@ void gpio_set_value_cansleep(unsigned gp
+ }
+ EXPORT_SYMBOL_GPL(gpio_set_value_cansleep);
+
++enum gpio_direction gpio_get_direction(unsigned gpio)
++{
++ struct gpio_chip *chip;
++ struct gpio_desc *desc = &gpio_desc[gpio];
++
++ chip = gpio_to_chip(gpio);
++ might_sleep_if(extra_checks && chip->can_sleep);
++
++ if (chip->get_direction) {
++ if (chip->get_direction(chip, gpio - chip->base) ==
++ DIRECTION_IN) {
++ clear_bit(FLAG_IS_OUT, &desc->flags);
++ return DIRECTION_IN;
++ } else {
++ set_bit(FLAG_IS_OUT, &desc->flags);
++ return DIRECTION_OUT;
++ }
++ }
++ return test_bit(FLAG_IS_OUT, &desc->flags) ?
++ DIRECTION_OUT :
++ DIRECTION_IN;
++}
++EXPORT_SYMBOL_GPL(gpio_get_direction);
+
+ #ifdef CONFIG_DEBUG_FS
+
+Index: linux-2.6.33/drivers/gpio/langwell_gpio.c
+===================================================================
+--- linux-2.6.33.orig/drivers/gpio/langwell_gpio.c
++++ linux-2.6.33/drivers/gpio/langwell_gpio.c
+@@ -107,6 +107,19 @@ static int lnw_gpio_direction_output(str
+ return 0;
+ }
+
++static enum gpio_direction lnw_gpio_get_direction(struct gpio_chip *chip,
++ unsigned offset)
++{
++ struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
++ u8 reg = offset / 32;
++ void __iomem *gpdr;
++
++ gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]);
++ if (readl(gpdr) & BIT(offset % 32))
++ return DIRECTION_OUT;
++ return DIRECTION_IN;
++}
++
+ static int lnw_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+ {
+ struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
+@@ -240,6 +253,7 @@ static int __devinit lnw_gpio_probe(stru
+ lnw->chip.label = dev_name(&pdev->dev);
+ lnw->chip.direction_input = lnw_gpio_direction_input;
+ lnw->chip.direction_output = lnw_gpio_direction_output;
++ lnw->chip.get_direction = lnw_gpio_get_direction;
+ lnw->chip.get = lnw_gpio_get;
+ lnw->chip.set = lnw_gpio_set;
+ lnw->chip.to_irq = lnw_gpio_to_irq;
+Index: linux-2.6.33/drivers/gpio/langwell_pmic_gpio.c
+===================================================================
+--- linux-2.6.33.orig/drivers/gpio/langwell_pmic_gpio.c
++++ linux-2.6.33/drivers/gpio/langwell_pmic_gpio.c
+@@ -165,15 +165,33 @@ static int pmic_gpio_direction_output(st
+ return rc;
+ }
+
+-static int pmic_gpio_get(struct gpio_chip *chip, unsigned offset)
++static enum gpio_direction pmic_gpio_get_direction(struct gpio_chip *chip,
++ unsigned offset)
+ {
+- /* we only have 8 GPIO can use as input */
+ if (offset > 8) {
+- printk(KERN_ERR
+- "%s: only pin 0-7 support input\n", __func__);
+- return -1;
++ /* GPOWSs and GPOs are always outputs */
++ return DIRECTION_OUT;
+ }
+- return ipc_read_char(GPIO0 + offset) & GPIO_DIN;
++ if (ipc_read_char(GPIO0 + offset) & GPIO_DIR)
++ return DIRECTION_IN;
++ return DIRECTION_OUT;
++}
++
++static int pmic_gpio_get(struct gpio_chip *chip, unsigned offset)
++{
++ if (offset < 8) {
++ /* GPIOSW: Get state according to direction */
++ if (pmic_gpio_get_direction( chip, offset ) == DIRECTION_IN)
++ return (ipc_read_char(GPIO0 + offset) & GPIO_DIN);
++ return (ipc_read_char(GPIO0 + offset) & GPIO_DOU);
++ } else if (offset < 16) {
++ /* GPOSW */
++ return (ipc_read_char(GPOSWCTL0 + offset - 8) & GPOSW_DOU);
++ } else if (offset < 24) {
++ /* GPO */
++ return (ipc_read_char(GPO) & (1 << (offset - 16)));
++ }
++ return 0;
+ }
+
+ static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+@@ -284,6 +302,7 @@ static int __devinit pmic_gpio_probe(str
+ pg->chip.label = "langwell_pmic";
+ pg->chip.direction_input = pmic_gpio_direction_input;
+ pg->chip.direction_output = pmic_gpio_direction_output;
++ pg->chip.get_direction = pmic_gpio_get_direction;
+ pg->chip.get = pmic_gpio_get;
+ pg->chip.set = pmic_gpio_set;
+ pg->chip.to_irq = pmic_gpio_to_irq;
+Index: linux-2.6.33/drivers/gpio/pca953x.c
+===================================================================
+--- linux-2.6.33.orig/drivers/gpio/pca953x.c
++++ linux-2.6.33/drivers/gpio/pca953x.c
+@@ -144,6 +144,24 @@ static int pca953x_gpio_direction_output
+ return 0;
+ }
+
++static enum gpio_direction pca953x_gpio_get_direction(struct gpio_chip *gc,
++ unsigned off)
++{
++ struct pca953x_chip *chip;
++ uint16_t reg_val;
++ int ret;
++
++ chip = container_of(gc, struct pca953x_chip, gpio_chip);
++
++ ret = pca953x_read_reg(chip, PCA953X_DIRECTION, &reg_val);
++ if (ret == 0) {
++ if ( reg_val & (1u << off) )
++ return DIRECTION_IN;
++ return DIRECTION_OUT;
++ }
++ return DIRECTION_IN;
++}
++
+ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
+ {
+ struct pca953x_chip *chip;
+@@ -199,6 +217,7 @@ static void pca953x_setup_gpio(struct pc
+
+ gc->direction_input = pca953x_gpio_direction_input;
+ gc->direction_output = pca953x_gpio_direction_output;
++ gc->get_direction = pca953x_gpio_get_direction;
+ gc->get = pca953x_gpio_get_value;
+ gc->set = pca953x_gpio_set_value;
+ gc->to_irq = pca953x_gpio_to_irq;
+Index: linux-2.6.33/include/asm-generic/gpio.h
+===================================================================
+--- linux-2.6.33.orig/include/asm-generic/gpio.h
++++ linux-2.6.33/include/asm-generic/gpio.h
+@@ -101,6 +101,8 @@ struct gpio_chip {
+ char **names;
+ unsigned can_sleep:1;
+ unsigned exported:1;
++ enum gpio_direction (*get_direction)(struct gpio_chip *chip,
++ unsigned offset);
+ };
+
+ extern const char *gpiochip_is_requested(struct gpio_chip *chip,
+@@ -120,6 +122,7 @@ extern void gpio_free(unsigned gpio);
+
+ extern int gpio_direction_input(unsigned gpio);
+ extern int gpio_direction_output(unsigned gpio, int value);
++extern enum gpio_direction gpio_get_direction(unsigned gpio);
+
+ extern int gpio_get_value_cansleep(unsigned gpio);
+ extern void gpio_set_value_cansleep(unsigned gpio, int value);
+Index: linux-2.6.33/include/linux/gpio.h
+===================================================================
+--- linux-2.6.33.orig/include/linux/gpio.h
++++ linux-2.6.33/include/linux/gpio.h
+@@ -3,6 +3,11 @@
+
+ /* see Documentation/gpio.txt */
+
++enum gpio_direction {
++ DIRECTION_IN = 0,
++ DIRECTION_OUT = 1,
++};
++
+ #ifdef CONFIG_GENERIC_GPIO
+ #include <asm/gpio.h>
+
+@@ -126,6 +131,13 @@ static inline int irq_to_gpio(unsigned i
+ return -EINVAL;
+ }
+
++static inline enum gpio_direction gpio_get_direction(unsigned gpio)
++{
++ /* GPIO can never have been requested or set as {in,out}put */
++ WARN_ON(1);
++ return DIRECTION_IN;
++}
++
+ #endif
+
+ #endif /* __LINUX_GPIO_H */
+Index: linux-2.6.33/include/linux/ipc_module.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/include/linux/ipc_module.h
+@@ -0,0 +1,60 @@
++/*
++ * include/linux/ipc_module.h
++ *
++ * Copyright (C) 2009 Aava Mobile Oy
++ * Written by Mikko Kovanen <mikko.kovanen@aavamobile.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#ifndef IPC_MODULE_H
++#define IPC_MODULE_H
++
++#include <linux/types.h>
++
++#ifndef __IPC_DEFS_H__
++#define E_INVALID_CMD -249
++#define E_READ_USER_CMD -250
++#define E_READ_USER_DATA -251
++#define E_WRITE_USER_DATA -252
++#define E_PMIC_MALLOC -253
++
++#define MAX_PMICREGS 5
++#define MAX_PMIC_MOD_REGS 4
++
++struct pmicreg {
++ __u16 register_address;
++ __u8 value;
++};
++
++struct ipc_pmic_reg_data {
++ _Bool ioc;
++ struct pmicreg pmic_reg_data[MAX_PMICREGS];
++ __u8 num_entries;
++};
++#endif /* __IPC_DEFS_H__ */
++
++#define IPC_IOC_MAGIC 'a'
++
++
++#define IPC_IOC_PMIC_REG_READ _IOR(IPC_IOC_MAGIC, \
++ 0, \
++ struct ipc_pmic_reg_data)
++
++#define IPC_IOC_PMIC_REG_WRITE _IOW(IPC_IOC_MAGIC, \
++ 1, \
++ struct ipc_pmic_reg_data)
++
++#endif /* IPC_MODULE_H */
++
+Index: linux-2.6.33/drivers/input/keyboard/mrst_keypad.c
+===================================================================
+--- linux-2.6.33.orig/drivers/input/keyboard/mrst_keypad.c
++++ linux-2.6.33/drivers/input/keyboard/mrst_keypad.c
+@@ -40,6 +40,9 @@
+ #include <linux/device.h>
+ #include <linux/err.h>
+ #include <linux/gpio.h>
++/*jhuot start*/
++#include <asm/ipc_defs.h>
++/*jhuot end*/
+
+ /*
+ * Keypad Controller registers
+@@ -116,10 +119,10 @@ MODULE_DEVICE_TABLE(pci, keypad_pci_tbl)
+ #define keypad_writel(off, v) writel((v), keypad->mmio_base + (off))
+
+ #define MAX_MATRIX_KEY_NUM (8 * 8)
+-#define MAX_DIRECT_KEY_NUM (4)
++#define MAX_DIRECT_KEY_NUM (2)
+
+-#define MAX_MATRIX_KEY_ROWS (8)
+-#define MAX_MATRIX_KEY_COLS (8)
++#define MAX_MATRIX_KEY_ROWS (7)
++#define MAX_MATRIX_KEY_COLS (7)
+ #define DEBOUNCE_INTERVAL 100
+
+ #define KEY_HALFSHUTTER KEY_PROG1
+@@ -167,7 +170,7 @@ static unsigned int mrst_keycode_fn[MAX_
+
+ /* direct key map */
+ static unsigned int mrst_direct_keycode[MAX_DIRECT_KEY_NUM] = {
+- KEY_VOLUMEUP, KEY_VOLUMEDOWN, KEY_HALFSHUTTER, KEY_FULLSHUTTER,
++ KEY_VOLUMEUP, KEY_VOLUMEDOWN, //KEY_HALFSHUTTER, KEY_FULLSHUTTER,
+ };
+
+ struct mrst_keypad {
+@@ -430,6 +433,8 @@ scan:
+ if ((bits_changed & (1 << row)) == 0)
+ continue;
+
++ printk(KERN_INFO "BUTTONS: "
++ "report key row %d, col %d\n", row, col);
+ input_report_key(keypad->input_dev,
+ lookup_matrix_keycode(keypad, row, col),
+ new_state[col] & (1 << row));
+@@ -513,6 +518,8 @@ static void mrst_keypad_scan_direct(stru
+
+ for (i = 0; i < keypad->direct_key_num; i++) {
+ if (bits_changed & (1 << i)) {
++ printk(KERN_INFO "BUTTONS: "
++ "scan_direct %d\n", keypad->direct_key_map[i]);
+ input_report_key(keypad->input_dev,
+ keypad->direct_key_map[i],
+ (new_state & (1 << i)));
+@@ -528,10 +535,13 @@ static irqreturn_t mrst_keypad_irq_handl
+ struct mrst_keypad *keypad = dev_id;
+ unsigned long kpc = keypad_readl(KPC);
+
++ printk(KERN_INFO "BUTTONS: irq_handler, kpc %lu\n", kpc);
+ if (kpc & KPC_DI)
++ printk(KERN_INFO "BUTTONS: mrst_keypad_scan_direct\n");
+ mrst_keypad_scan_direct(keypad);
+
+ if (kpc & KPC_MI)
++ printk(KERN_INFO "BUTTONS: mrst_keypad_scan_matrix\n");
+ mrst_keypad_scan_matrix(keypad);
+
+ return IRQ_HANDLED;
+@@ -544,13 +554,47 @@ static int mrst_keypad_gpio_init(void)
+ MAX_MATRIX_KEY_COLS + MAX_DIRECT_KEY_NUM;
+
+ /* explicitely tell which pins have been occupied... */
++/*
+ for (i = KEYPAD_MATRIX_GPIO_IN_PIN; i < pins; i++, cnt++) {
+ err = gpio_request(i, NULL);
+ if (err) {
+ printk(KERN_ERR "GPIO pin %d failed to request.\n", i);
+ goto err_request;
+ }
+- }
++ }*/
++
++ for (i = 0; i < MAX_MATRIX_KEY_ROWS; i++){
++ err = gpio_request(KEYPAD_MATRIX_GPIO_IN_PIN + i,NULL);
++
++ if (err) {
++ printk(KERN_ERR "GPIO pin %d failed to request.\n", i);
++ goto err_request;
++ }
++
++ }
++
++ for (i = 0; i < MAX_MATRIX_KEY_COLS; i++)
++ {
++ err = gpio_request(KEYPAD_MATRIX_GPIO_OUT_PIN + i, NULL);
++ if (err) {
++ printk(KERN_ERR "GPIO pin %d failed to request.\n", i);
++ goto err_request;
++ }
++
++ }
++
++ for (i = 0; i < MAX_DIRECT_KEY_NUM; i++)
++ {
++ err = gpio_request(KEYPAD_DIRECT_GPIO_IN_PIN + i,NULL);
++
++ if (err) {
++ printk(KERN_ERR "GPIO pin %d failed to request.\n", i);
++ goto err_request;
++ }
++
++
++ }
++
+
+ for (i = 0; i < MAX_MATRIX_KEY_ROWS; i++)
+ gpio_direction_input(KEYPAD_MATRIX_GPIO_IN_PIN + i);
+@@ -642,6 +686,9 @@ static int __devinit mrst_keypad_probe(s
+ struct mrst_keypad *keypad;
+ struct input_dev *input_dev;
+ int error;
++/* jhuot start */
++ struct ipc_io_bus_master_regs *p_reg_data;
++/* jhuot end */
+
+ #ifndef MODULE
+ printk(KERN_INFO MRST_KEYPAD_DRIVER_NAME "\n");
+@@ -711,6 +758,18 @@ static int __devinit mrst_keypad_probe(s
+ goto failed_free_dev;
+ }
+
++
++/* jhuot start */
++ /* Enable 75 kOhm internal pull-ups for KBD_DKIN0 and KBD_DKIN1 */
++ /*bus: 0x4h, address: 0x20h, bits 0...3 */
++ p_reg_data = kzalloc(sizeof(struct ipc_io_bus_master_regs), GFP_KERNEL);
++ /*01 = W, 04 = bus, 20 = address*/
++ p_reg_data->ctrl_reg_addr = 0x01040020;
++ /*b3-b0 = 1010 (75kOhm pull-ups) = 0xAh*/
++ p_reg_data->ctrl_reg_data = 0xA;
++ ipc_program_io_bus_master(p_reg_data);
++/* jhuot end */
++
+ /* Register the input device */
+ error = input_register_device(input_dev);
+ if (error) {
+Index: linux-2.6.33/drivers/gpu/drm/mrst/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/gpu/drm/mrst/Kconfig
++++ linux-2.6.33/drivers/gpu/drm/mrst/Kconfig
+@@ -23,6 +23,20 @@ config IMG_DOES_NOT_SUPPORT_MENLOW
+ help
+ Choose Menlow
+
++config DRM_MRST_AAVA
++ bool "Aava platform specific MIPI display"
++ depends on DRM_MRST
++ default n
++ help
++ Choose Aava platform MIPI display, temp option
++
++config DRM_MRST_CDK
++ bool "Aava platform specific MIPI display"
++ depends on DRM_MRST && !DRM_MRST_AAVA
++ default y
++ help
++ Choose CDK
++
+ config PVR_RELEASE
+ string "Build IMG kernel services as release"
+ depends on DRM_MRST
+Index: linux-2.6.33/drivers/misc/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/misc/Makefile
++++ linux-2.6.33/drivers/misc/Makefile
+@@ -21,7 +21,7 @@ obj-$(CONFIG_SGI_XP) += sgi-xp/
+ obj-$(CONFIG_SGI_GRU) += sgi-gru/
+ obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o
+ obj-$(CONFIG_HP_ILO) += hpilo.o
+-obj-$(CONFIG_MRST) += intel_mrst.o
++obj-$(CONFIG_X86_MRST) += intel_mrst.o
+ obj-$(CONFIG_ISL29003) += isl29003.o
+ obj-$(CONFIG_MRST_RAR_HANDLER) += memrar.o
+ memrar-y := memrar_allocator.o memrar_handler.o
+Index: linux-2.6.33/drivers/misc/intel_mrst.c
+===================================================================
+--- linux-2.6.33.orig/drivers/misc/intel_mrst.c
++++ linux-2.6.33/drivers/misc/intel_mrst.c
+@@ -112,6 +112,48 @@ static int intel_mrst_sdio_EVP_power_dow
+
+ static int intel_mrst_sdio_8688_power_up(void)
+ {
++/*ouljhuot start*/
++/*WLAN / BT power-up sequence:*/
++/*1. power (GPO4) & reset (GPO3) low*/
++/*2. power (GPO4) high*/
++/*3. reset (GPO3) high*/
++
++/*GPS power-up sequence:*/
++/*1. power (GPO1) & reset (GPO2) low*/
++/*2. VDD_IO and VDD_LP_PLLREG_IN high*/
++/*VDD_IO & VDD_LP_PLLREG_IN == VPMIC_1V8*/
++/*3. usleep(1) (tvddio_nreset min. 500ns)*/
++/*4. reset (GPO2) high*/
++/*5. VDD_COREREG_IN and VDD_RFREG_IN high*/
++ /*VDD_COREREG_IN == VWLAN_GPS_1V8 (GYMXIO)*/
++ /*VDD_RFREG_IN == VGPS_ANA_3V3 (GYMX33)*/
++/*6. power (GPO1) high*/
++/*7. msleep(1);*/
++ unsigned int temp = 0;
++
++ /* Register 0xf4 has 4 GPIO lines connected to the MRVL 8688 * IFX GPS:
++ * bit 4: WiFi PDn
++ * bit 3: WiFi RESETn
++ * bit 2: GPS RESET_N
++ * bit 1: GPS PD_N*/
++
++ /*WLAN POWER and RESET low*/
++ intel_mrst_pmic_read(0xf4, &temp);
++ temp &= ~0x18;
++ intel_mrst_pmic_write(0xf4, temp);
++/* msleep(1);*/
++
++ /*GPS RESET low & POWER low*/
++ intel_mrst_pmic_read(0xf4, &temp);
++ temp &= ~0x6;
++ intel_mrst_pmic_write(0xf4, temp);
++/* usleep(1);*/
++
++ msleep(1);
++ /*GPS RESET high*/
++ temp |= 0x4;
++ intel_mrst_pmic_write(0xf4, temp);
++/*ouljhuot end*/
+ intel_mrst_pmic_write(0x37, 0x3f); /* Set VDDQ for Marvell 8688 */
+ intel_mrst_pmic_write(0x4a, 0x3f); /* Set GYMXIOCNT for Marvell 8688 */
+ intel_mrst_pmic_write(0x4e, 0x3f); /* Set GYMX33CNT for Marvell 8688 */
+@@ -124,6 +166,22 @@ static int intel_mrst_sdio_8688_power_up
+ intel_mrst_pmic_write(0x4c, 0x27); /* Enable V1p8_VWYMXARF for
+ MRVL8688 */
+
++
++/*ouljhuot start*/
++ /*WLAN POWER high*/
++ temp |= 0x10;
++ intel_mrst_pmic_write(0xf4, temp);
++
++ /*WLAN RESET high*/
++ temp |= 0x8;
++ intel_mrst_pmic_write(0xf4, temp);
++
++ /*GPS POWER high*/
++ temp |= 0x2;
++ intel_mrst_pmic_write(0xf4, temp);
++/* msleep(16);*/
++/*ouljhuot end*/
++
+ return 0;
+ }
+
+@@ -153,6 +211,35 @@ static int intel_mrst_bringup_8688_sdio2
+ return 0;
+ }
+
++
++
++
++ /*ouljhuot start*/
++static int intel_mrst_sdio_gps_power_up(void)
++ {
++ unsigned int temp = 0;
++
++ /*GPS RESET low & POWER low*/
++ intel_mrst_pmic_read(0xf4, &temp);
++ temp &= ~0x6;
++ intel_mrst_pmic_write(0xf4, temp);
++ msleep(1);
++ /*GPS RESET high*/
++ temp |= 0x4;
++ intel_mrst_pmic_write(0xf4, temp);
++
++ intel_mrst_pmic_write(0x4a, 0x3f); /* Ensure GYMXIOCNT */
++ intel_mrst_pmic_write(0x4e, 0x3f); /* Ensure GYMX33CNT */
++
++ /*GPS POWER high*/
++ temp |= 0x2;
++ intel_mrst_pmic_write(0xf4, temp);
++ /* Wait to settle */
++ msleep(16);
++
++ return 0;
++ }
++
+ static int intel_mrst_bringup_EVP_sdio2_Option_spi(void)
+ {
+ unsigned int temp = 0;
+@@ -199,7 +286,10 @@ static int __init intel_mrst_module_init
+
+ printk(KERN_INFO "intel_mrst_module_init: bringing up power for "
+ "8688 WLAN on SDIO2 & IFX GPS over SPI...\n");
+- ret = intel_mrst_bringup_8688_sdio2();
++/*ouljhuot start*/
++ ret = intel_mrst_sdio_8688_power_up();
++/* ret = intel_mrst_sdio_gps_power_up();*/
++/*ouljhuot end*/
+
+ #endif /* CONFIG_8688_RC */
+
+Index: linux-2.6.33/drivers/hwmon/lis331dl.c
+===================================================================
+--- linux-2.6.33.orig/drivers/hwmon/lis331dl.c
++++ linux-2.6.33/drivers/hwmon/lis331dl.c
+@@ -45,6 +45,8 @@ MODULE_LICENSE("GPL v2");
+ #define ACCEL_NORMAL_MODE 0
+ #define ACCEL_MEMORY_REBOOT 1
+
++#define POS_READ_MAX_RETRY (5)
++
+ /* internal return values */
+
+ struct acclero_data {
+@@ -93,9 +95,24 @@ static ssize_t x_pos_show(struct device
+ {
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret_val;
++ int retry = 0;
+
++x_retry:
+ ret_val = i2c_smbus_read_byte_data(client, 0x29);
+- return sprintf(buf, "%d\n", ret_val);
++ if (ret_val == -ETIMEDOUT) {
++ dev_dbg(dev, "x pos read timed out, retry %d\n", retry);
++ retry++;
++ if (retry <= POS_READ_MAX_RETRY) {
++ msleep(10);
++ goto x_retry;
++ } else {
++ ret_val = 0;
++ dev_err(dev, "x pos read failed %d retries\n", retry);
++ }
++ }
++ /* ouljkorh, 09.11.2009, change start */
++ return sprintf(buf, "%d\n", (signed char)ret_val);
++ /* ouljkorh, 09.11.2009, change end */
+ }
+
+ static ssize_t y_pos_show(struct device *dev,
+@@ -103,9 +120,24 @@ static ssize_t y_pos_show(struct device
+ {
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret_val;
++ int retry = 0;
+
++y_retry:
+ ret_val = i2c_smbus_read_byte_data(client, 0x2B);
+- return sprintf(buf, "%d\n", ret_val);
++ if (ret_val == -ETIMEDOUT) {
++ dev_dbg(dev, "y pos read timed out, retry %d\n", retry);
++ retry++;
++ if (retry <= POS_READ_MAX_RETRY) {
++ msleep(10);
++ goto y_retry;
++ } else {
++ ret_val = 0;
++ dev_err(dev, "y pos read failed %d retries\n", retry);
++ }
++ }
++ /* ouljkorh, 09.11.2009, change start */
++ return sprintf(buf, "%d\n", (signed char)ret_val);
++ /* ouljkorh, 09.11.2009, change end */
+ }
+
+ static ssize_t z_pos_show(struct device *dev,
+@@ -113,9 +145,24 @@ static ssize_t z_pos_show(struct device
+ {
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret_val;
++ int retry = 0;
+
++z_retry:
+ ret_val = i2c_smbus_read_byte_data(client, 0x2D);
+- return sprintf(buf, "%d\n", ret_val);
++ if (ret_val == -ETIMEDOUT) {
++ dev_dbg(dev, "z pos read timed out, retry %d\n", retry);
++ retry++;
++ if (retry <= POS_READ_MAX_RETRY) {
++ msleep(10);
++ goto z_retry;
++ } else {
++ ret_val = 0;
++ dev_err(dev, "z pos read failed %d retries\n", retry);
++ }
++ }
++ /* ouljkorh, 09.11.2009, change start */
++ return sprintf(buf, "%d\n", (signed char)ret_val);
++ /* ouljkorh, 09.11.2009, change end */
+ }
+
+ static ssize_t xyz_pos_show(struct device *dev,
+@@ -123,11 +170,38 @@ static ssize_t xyz_pos_show(struct devic
+ {
+ int x, y, z;
+ struct i2c_client *client = to_i2c_client(dev);
++ int retry = 0;
+
++xyz_retry:
++ if (retry > POS_READ_MAX_RETRY) {
++ dev_err(dev, "xyz read retry failed\n");
++ x = y = z = 0;
++ return sprintf(buf, "(%d,%d,%d)\n", (signed char)x,
++ (signed char)y, (signed char)z);
++ }
++ retry++;
+ x = i2c_smbus_read_byte_data(client, 0x29);
++ if (x == -ETIMEDOUT) {
++ msleep(100);
++ goto xyz_retry;
++ }
++ msleep(100);
+ y = i2c_smbus_read_byte_data(client, 0x2B);
++ if (y == -ETIMEDOUT) {
++ msleep(100);
++ goto xyz_retry;
++ }
++ msleep(100);
+ z = i2c_smbus_read_byte_data(client, 0x2D);
+- return sprintf(buf, "(%d,%d,%d)\n", x, y, z);
++ if (z == -ETIMEDOUT) {
++ msleep(100);
++ goto xyz_retry;
++ }
++
++ /* ouljkorh, 09.11.2009, change start */
++ return sprintf(buf, "(%d,%d,%d)\n", (signed char)x,
++ (signed char)y, (signed char)z);
++ /* ouljkorh, 09.11.2009, change end */
+ }
+
+ static ssize_t data_rate_store(struct device *dev,
+Index: linux-2.6.33/drivers/usb/gadget/u_serial.c
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/gadget/u_serial.c
++++ linux-2.6.33/drivers/usb/gadget/u_serial.c
+@@ -783,11 +783,6 @@ static int gs_open(struct tty_struct *tt
+ port->open_count = 1;
+ port->openclose = false;
+
+- /* low_latency means ldiscs work in tasklet context, without
+- * needing a workqueue schedule ... easier to keep up.
+- */
+- tty->low_latency = 1;
+-
+ /* if connected, start the I/O stream */
+ if (port->port_usb) {
+ struct gserial *gser = port->port_usb;
+Index: linux-2.6.33/drivers/i2c/busses/i2c-mrst.c
+===================================================================
+--- linux-2.6.33.orig/drivers/i2c/busses/i2c-mrst.c
++++ linux-2.6.33/drivers/i2c/busses/i2c-mrst.c
+@@ -37,7 +37,7 @@
+
+ #include "i2c-mrst.h"
+
+-#define MAX_T_POLL_COUNT 4000 /* FIXME */
++#define MAX_T_POLL_COUNT 8000 /* FIXME */
+ #define DEF_BAR 0
+ #define VERSION "Version 0.5"
+
+Index: linux-2.6.33/arch/x86/kernel/mrst.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/mrst.c
++++ linux-2.6.33/arch/x86/kernel/mrst.c
+@@ -23,6 +23,9 @@
+ #include <linux/input.h>
+ #include <linux/platform_device.h>
+ #include <linux/irq.h>
++/*jhuot, added for MAX3107 data*/
++#include <linux/spi/mrst_spi.h>
++
+
+ #include <asm/string.h>
+ #include <asm/setup.h>
+@@ -267,6 +270,27 @@ void __init x86_mrst_early_setup(void)
+ #define MRST_SPI2_CS_START 4
+ static struct langwell_pmic_gpio_platform_data pmic_gpio_pdata;
+
++#ifdef CONFIG_SERIAL_MAX3107
++static struct mrst_spi_chip spi_slave0 = {
++ .poll_mode = 1,
++ .enable_dma = 0,
++ .type = SPI_FRF_SPI,
++};
++
++static struct spi_board_info mrst_spi_board_info[] __initdata = {
++ {
++ /* the modalias must be the same as spi device driver name */
++ .modalias = "max3107", /* spi_driver name driving device */
++ .max_speed_hz = 3125000,/* default value */
++ .bus_num = 0, /* SPI0 */
++ .chip_select = 0, /* Framework chip select. */
++ .platform_data = NULL, /* fill later */
++ .controller_data = &spi_slave0,
++ .irq = 0x13d,
++ },
++};
++#endif
++
+ static int __init sfi_parse_spib(struct sfi_table_header *table)
+ {
+ struct sfi_table_simple *sb;
+@@ -290,31 +314,48 @@ static int __init sfi_parse_spib(struct
+ pr_info("Moorestown SPI devices info:\n");
+
+ for (i = 0, j = 0; i < num; i++, pentry++) {
+- strncpy(info[j].modalias, pentry->name, 16);
+- info[j].irq = pentry->irq_info;
+- info[j].bus_num = pentry->host_num;
+- info[j].chip_select = pentry->cs;
+- info[j].max_speed_hz = 3125000; /* hard coded */
+- if (info[i].chip_select >= MRST_SPI2_CS_START) {
+- /* these SPI2 devices are not exposed to system as PCI
+- * devices, but they have separate RTE entry in IOAPIC
+- * so we have to enable them one by one here
+- */
+- ioapic = mp_find_ioapic(info[j].irq);
+- irq_attr.ioapic = ioapic;
+- irq_attr.ioapic_pin = info[j].irq;
+- irq_attr.trigger = 1;
+- irq_attr.polarity = 1;
+- io_apic_set_pci_routing(NULL, info[j].irq,
++#ifdef CONFIG_SERIAL_MAX3107
++ if (j != 1) { /*other devices info*/
++#endif
++ strncpy(info[j].modalias, pentry->name, 16);
++ info[j].irq = pentry->irq_info;
++ info[j].bus_num = pentry->host_num;
++ info[j].chip_select = pentry->cs;
++ info[j].max_speed_hz = 3125000; /* hard coded */
++ if (info[i].chip_select >= MRST_SPI2_CS_START) {
++ /* these SPI2 devices are not exposed to system as PCI
++ * devices, but they have separate RTE entry in IOAPIC
++ * so we have to enable them one by one here
++ */
++ ioapic = mp_find_ioapic(info[j].irq);
++ irq_attr.ioapic = ioapic;
++ irq_attr.ioapic_pin = info[j].irq;
++ irq_attr.trigger = 1;
++ irq_attr.polarity = 1;
++ io_apic_set_pci_routing(NULL, info[j].irq,
+ &irq_attr);
+- }
+- info[j].platform_data = pentry->dev_info;
++ }
+
+- if (!strcmp(pentry->name, "pmic_gpio")) {
+- memcpy(&pmic_gpio_pdata, pentry->dev_info, 8);
+- pmic_gpio_pdata.gpiointr = 0xffffeff8;
+- info[j].platform_data = &pmic_gpio_pdata;
++ info[j].platform_data = pentry->dev_info;
++
++ if (!strcmp(pentry->name, "pmic_gpio")) {
++ memcpy(&pmic_gpio_pdata, pentry->dev_info, 8);
++ pmic_gpio_pdata.gpiointr = 0xffffeff8;
++ info[j].platform_data = &pmic_gpio_pdata;
++ }
++#ifdef CONFIG_SERIAL_MAX3107
++ } else { /*MAX3107 info*/
++ info[j] = mrst_spi_board_info[0];
++ }
++
++#endif
++ /* jhuot edit start: change GPS chip select from 2 to 3 */
++ if (info[j].bus_num == 0 && info[j].chip_select == 2) {
++ info[j].chip_select = 3;
++ } else if (info[j].bus_num == 0 && info[j].chip_select == 3) {
++ info[j].chip_select = 2;
+ }
++ /* jhuot edit end */
+ pr_info("info[%d]: name = %16s, irq = 0x%04x, bus = %d, "
+ "cs = %d\n", j, info[j].modalias, info[j].irq,
+ info[j].bus_num, info[j].chip_select);
+Index: linux-2.6.33/drivers/serial/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/serial/Kconfig
++++ linux-2.6.33/drivers/serial/Kconfig
+@@ -540,6 +540,21 @@ config SERIAL_S5PC100
+ help
+ Serial port support for the Samsung S5PC100 SoCs
+
++config SERIAL_MAX3107
++ tristate "MAX3107 support"
++ depends on SPI
++ select SERIAL_CORE
++ help
++ MAX3107 chip support
++
++config MAX3107_LOW_POWER
++ boolean "Enable very low power consumption scheme for Max3107"
++ default n
++ depends on SERIAL_MAX3107
++ help
++ Adds hardware suspend for MAX3107 instead of sleep/auto-sleep,
++ but causes longer latency in wake-up (re-initialization of the chip).
++
+ config SERIAL_MAX3100
+ tristate "MAX3100 support"
+ depends on SPI
+Index: linux-2.6.33/drivers/serial/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/serial/Makefile
++++ linux-2.6.33/drivers/serial/Makefile
+@@ -46,6 +46,7 @@ obj-$(CONFIG_SERIAL_S3C24A0) += s3c24a0.
+ obj-$(CONFIG_SERIAL_S3C6400) += s3c6400.o
+ obj-$(CONFIG_SERIAL_S5PC100) += s3c6400.o
+ obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
++obj-$(CONFIG_SERIAL_MAX3107) += max3107.o
+ obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
+ obj-$(CONFIG_SERIAL_MUX) += mux.o
+ obj-$(CONFIG_SERIAL_68328) += 68328serial.o
+Index: linux-2.6.33/drivers/serial/max3107.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/serial/max3107.c
+@@ -0,0 +1,1484 @@
++/*
++ * max3107.c - spi uart protocol driver for Maxim 3107
++ * Based on max3100.c
++ * by Christian Pellegrin <chripell@evolware.org>
++ * and max3110.c
++ * by Feng Tang <feng.tang@intel.com>
++ *
++ * Copyright (C) Aavamobile 2009
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/serial_core.h>
++#include <linux/serial.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/mrst_spi.h>
++#include <linux/freezer.h>
++#include <linux/platform_device.h>
++#include <linux/gpio.h>
++#include <linux/serial_max3107.h>
++
++/* Debug trace definitions */
++#define DBG_LEVEL 0
++
++#if (DBG_LEVEL > 0)
++#define DBG_TRACE(format,args...) printk(KERN_ERR "%s: " format, \
++ __FUNCTION__ , ## args)
++#else
++#define DBG_TRACE(format,args...)
++#endif
++
++#if (DBG_LEVEL > 1)
++#define DBG_TRACE_SPI_DATA
++#endif
++
++struct max3107_port {
++ /* UART port structure */
++ struct uart_port port;
++
++ /* SPI device structure */
++ struct spi_device *spi;
++
++ /* GPIO chip stucture */
++ struct gpio_chip chip;
++
++ /* Workqueue that does all the magic */
++ struct workqueue_struct *workqueue;
++ struct work_struct work;
++
++ /* Lock for shared data */
++ spinlock_t data_lock;
++
++ /* Device configuration */
++ int ext_clk; /* 1 if external clock used */
++ int loopback; /* Current loopback mode state */
++ int baud; /* Current baud rate */
++
++ /* State flags */
++ int suspended; /* Indicates suspend mode */
++ int tx_fifo_empty; /* Flag for TX FIFO state */
++ int rx_enabled; /* Flag for receiver state */
++ int tx_enabled; /* Flag for transmitter state */
++
++ /* Shared data */
++ u16 irqen_reg; /* Current IRQ enable register value */
++ u16 mode1_reg; /* Current mode1 register value*/
++ int mode1_commit; /* Flag for setting new mode1 register value */
++ u16 lcr_reg; /* Current LCR register value */
++ int lcr_commit; /* Flag for setting new LCR register value */
++ u32 brg_cfg; /* Current Baud rate generator config */
++ int brg_commit; /* Flag for setting new baud rate generator
++ * config
++ */
++
++ int handle_irq; /* Indicates that IRQ should be handled */
++};
++
++/* Platform data structure */
++struct max3107_plat {
++ /* Loopback mode enable */
++ int loopback;
++ /* External clock enable */
++ int ext_clk;
++ /* HW suspend function */
++ void (*max3107_hw_suspend) (struct max3107_port *s, int suspend);
++ /* Polling mode enable */
++ int polled_mode;
++ /* Polling period if polling mode enabled */
++ int poll_time;
++};
++
++
++/* Perform SPI transfer for write/read of device register(s) */
++static int max3107_rw(struct max3107_port *s, u8 *tx, u8 *rx, int len)
++{
++ struct spi_message spi_msg;
++ struct spi_transfer spi_xfer;
++
++ DBG_TRACE("enter\n");
++
++ /* Initialize SPI ,message */
++ spi_message_init(&spi_msg);
++
++ /* Initialize SPI transfer */
++ memset(&spi_xfer, 0, sizeof spi_xfer);
++ spi_xfer.len = len;
++ spi_xfer.tx_buf = tx;
++ spi_xfer.rx_buf = rx;
++ spi_xfer.speed_hz = MAX3107_SPI_SPEED;
++
++ /* Add SPI transfer to SPI message */
++ spi_message_add_tail(&spi_xfer, &spi_msg);
++
++#ifdef DBG_TRACE_SPI_DATA
++ {
++ int i;
++ printk("tx len %d:\n", spi_xfer.len);
++ for (i = 0 ; i < spi_xfer.len && i < 32 ; i++) {
++ printk(" %x", ((u8*)spi_xfer.tx_buf)[i]);
++ }
++ printk("\n");
++ }
++#endif
++
++ /* Perform synchronous SPI transfer */
++ if (spi_sync(s->spi, &spi_msg)) {
++ dev_err(&s->spi->dev, "spi_sync failure\n");
++ return -EIO;
++ }
++
++#ifdef DBG_TRACE_SPI_DATA
++ if (spi_xfer.rx_buf) {
++ int i;
++ printk("rx len %d:\n", spi_xfer.len);
++ for (i = 0 ; i < spi_xfer.len && i < 32 ; i++) {
++ printk(" %x", ((u8*)spi_xfer.rx_buf)[i]);
++ }
++ printk("\n");
++ }
++#endif
++ return 0;
++}
++
++/* Puts received data to circular buffer */
++static void put_data_to_circ_buf(struct max3107_port *s, unsigned char *data,
++ int len)
++{
++ struct uart_port *port = &s->port;
++ struct tty_struct *tty;
++
++ DBG_TRACE("enter\n");
++
++ if (!port->state) {
++ /* UART is not open */
++ dev_warn(&s->spi->dev, "UART is closed\n");
++ return;
++ }
++
++ tty = port->state->port.tty;
++ if (!tty) {
++ /* TTY is not open */
++ dev_warn(&s->spi->dev, "TTY is closed\n");
++ return;
++ }
++
++ /* Insert received data */
++ tty_insert_flip_string(tty, data, len);
++ /* Update RX counter */
++ port->icount.rx += len;
++}
++
++/* Handle data receiving */
++static void max3107_handlerx(struct max3107_port *s, u16 rxlvl)
++{
++ int i;
++ int j;
++ int len; /* SPI transfer buffer length */
++ u16 buf[MAX3107_RX_FIFO_SIZE+2]; /* SPI transfer buffer
++ * +2 for RX FIFO interrupt
++ * disabling and RX level query
++ */
++ u8 valid_str[MAX3107_RX_FIFO_SIZE];
++
++ DBG_TRACE("enter\n");
++
++ if (!s->rx_enabled) {
++ /* RX is disabled */
++ return;
++ }
++
++ if (rxlvl == 0) {
++ /* RX fifo is empty */
++ return;
++ } else if (rxlvl >= MAX3107_RX_FIFO_SIZE) {
++ dev_warn(&s->spi->dev, "Possible RX FIFO overrun %d\n", rxlvl);
++ /* Ensure sanity of RX level */
++ rxlvl = MAX3107_RX_FIFO_SIZE;
++ }
++
++ while (rxlvl) {
++ DBG_TRACE("rxlvl %d\n", rxlvl);
++ /* Clear buffer */
++ memset(buf, 0, sizeof(buf));
++ len = 0;
++ if (s->irqen_reg & MAX3107_IRQ_RXFIFO_BIT) {
++ /* First disable RX FIFO interrupt */
++ DBG_TRACE("Disabling RX INT\n");
++ buf[0] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
++ spin_lock(&s->data_lock);
++ s->irqen_reg &= ~MAX3107_IRQ_RXFIFO_BIT;
++ buf[0] |= s->irqen_reg;
++ spin_unlock(&s->data_lock);
++ len++;
++ }
++ /* Just increase the length by amount of words in FIFO since
++ * buffer was zeroed and SPI transfer of 0x0000 means reading
++ * from RX FIFO
++ */
++ len += rxlvl;
++ /* Append RX level query */
++ buf[len] = MAX3107_RXFIFOLVL_REG;
++ len++;
++
++ /* Perform the SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, len*2)) {
++ dev_err(&s->spi->dev,
++ "SPI transfer for RX handling failed\n");
++ return;
++ }
++
++ /* Skip RX FIFO interrupt disabling word if it was added */
++ j = ((len-1)-rxlvl);
++ /* Read received words */
++ for (i = 0; i < rxlvl; i++, j++) {
++ valid_str[i] = (u8)buf[j];
++ }
++ put_data_to_circ_buf(s, valid_str, rxlvl);
++ /* Get new RX level */
++ rxlvl = (buf[len-1] & MAX3107_SPI_RX_DATA_MASK);
++ }
++
++ if (s->rx_enabled) {
++ /* RX still enabled, re-enable RX FIFO interrupt */
++ DBG_TRACE("Enabling RX INT\n");
++ buf[0] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
++ spin_lock(&s->data_lock);
++ s->irqen_reg |= MAX3107_IRQ_RXFIFO_BIT;
++ buf[0] |= s->irqen_reg;
++ spin_unlock(&s->data_lock);
++ if (max3107_rw(s, (u8 *)buf, NULL, 2)) {
++ dev_err(&s->spi->dev,
++ "RX FIFO interrupt enabling failed\n");
++ }
++ }
++
++ /* Push the received data to receivers */
++ tty_flip_buffer_push(s->port.state->port.tty);
++}
++
++
++/* Handle data sending */
++static void max3107_handletx(struct max3107_port *s)
++{
++ struct circ_buf *xmit = &s->port.state->xmit;
++ int i;
++ int len; /* SPI transfer buffer length */
++ u16 buf[MAX3107_TX_FIFO_SIZE+3]; /* SPI transfer buffer
++ * +3 for TX FIFO empty
++ * interrupt disabling and
++ * enabling and TX enabling
++ */
++
++ DBG_TRACE("enter\n");
++
++ if (!s->tx_fifo_empty) {
++ /* Don't send more data before previous data is sent */
++ return;
++ }
++
++ if (uart_circ_empty(xmit) || uart_tx_stopped(&s->port)) {
++ /* No data to send or TX is stopped */
++ return;
++ }
++
++ /* Get length of data pending in circular buffer */
++ len = uart_circ_chars_pending(xmit);
++ if (len) {
++ /* Limit to size of TX FIFO */
++ if (len > MAX3107_TX_FIFO_SIZE)
++ len = MAX3107_TX_FIFO_SIZE;
++
++ DBG_TRACE("txlen %d\n", len);
++
++ /* Update TX counter */
++ s->port.icount.tx += len;
++
++ /* TX FIFO will no longer be empty */
++ s->tx_fifo_empty = 0;
++
++ i = 0;
++ if (s->irqen_reg & MAX3107_IRQ_TXEMPTY_BIT) {
++ /* First disable TX empty interrupt */
++ DBG_TRACE("Disabling TE INT\n");
++ buf[i] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
++ spin_lock(&s->data_lock);
++ s->irqen_reg &= ~MAX3107_IRQ_TXEMPTY_BIT;
++ buf[i] |= s->irqen_reg;
++ spin_unlock(&s->data_lock);
++ i++;
++ len++;
++ }
++
++ /* Add data to send */
++ for ( ; i < len ; i++ ) {
++ buf[i] = (MAX3107_WRITE_BIT | MAX3107_THR_REG);
++ buf[i] |= ((u16)xmit->buf[xmit->tail] &
++ MAX3107_SPI_TX_DATA_MASK);
++ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
++ }
++
++ if (!(s->irqen_reg & MAX3107_IRQ_TXEMPTY_BIT)) {
++ /* Enable TX empty interrupt */
++ DBG_TRACE("Enabling TE INT\n");
++ buf[i] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
++ spin_lock(&s->data_lock);
++ s->irqen_reg |= MAX3107_IRQ_TXEMPTY_BIT;
++ buf[i] |= s->irqen_reg;
++ spin_unlock(&s->data_lock);
++ i++;
++ len++;
++ }
++ if (!s->tx_enabled) {
++ /* Enable TX */
++ DBG_TRACE("Enable TX\n");
++ buf[i] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG);
++ spin_lock(&s->data_lock);
++ s->mode1_reg &= ~MAX3107_MODE1_TXDIS_BIT;
++ buf[i] |= s->mode1_reg;
++ spin_unlock(&s->data_lock);
++ s->tx_enabled = 1;
++ i++;
++ len++;
++ }
++
++ /* Perform the SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, NULL, len*2)) {
++ dev_err(&s->spi->dev,
++ "SPI transfer for TX handling failed\n");
++ return;
++ }
++ }
++
++ /* Indicate wake up if circular buffer is getting low on data */
++ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
++ uart_write_wakeup(&s->port);
++
++}
++
++/* Handle interrupts
++ * Also reads and returns current RX FIFO level
++ */
++static u16 handle_interrupt(struct max3107_port *s)
++{
++ u16 buf[4]; /* Buffer for SPI transfers */
++ u8 irq_status;
++ u16 rx_level;
++
++ DBG_TRACE("enter\n");
++
++ /* Read IRQ status register */
++ buf[0] = MAX3107_IRQSTS_REG;
++ /* Read status IRQ status register */
++ buf[1] = MAX3107_STS_IRQSTS_REG;
++ /* Read LSR IRQ status register */
++ buf[2] = MAX3107_LSR_IRQSTS_REG;
++ /* Query RX level */
++ buf[3] = MAX3107_RXFIFOLVL_REG;
++
++ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 8)) {
++ dev_err(&s->spi->dev,
++ "SPI transfer for interrupt handling failed\n");
++ return 0;
++ }
++
++ irq_status = (u8)buf[0];
++ DBG_TRACE("IRQSTS %x\n", irq_status);
++ rx_level = (buf[3] & MAX3107_SPI_RX_DATA_MASK);
++
++ if (irq_status & MAX3107_IRQ_LSR_BIT) {
++ /* LSR interrupt */
++ if ( buf[2] & MAX3107_LSR_RXTO_BIT ) {
++ /* RX timeout interrupt,
++ * handled by normal RX handling
++ */
++ DBG_TRACE("RX TO INT\n");
++ }
++ }
++
++ if (irq_status & MAX3107_IRQ_TXEMPTY_BIT) {
++ /* Tx empty interrupt,
++ * disable TX and set tx_fifo_empty flag
++ */
++ DBG_TRACE("TE INT, disabling TX\n");
++ buf[0] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG);
++ spin_lock(&s->data_lock);
++ s->mode1_reg |= MAX3107_MODE1_TXDIS_BIT;
++ buf[0] |= s->mode1_reg;
++ spin_unlock(&s->data_lock);
++ if (max3107_rw(s, (u8 *)buf, NULL, 2))
++ dev_err(&s->spi->dev,
++ "SPI transfer for TX disabling failed\n");
++ s->tx_enabled = 0;
++ s->tx_fifo_empty = 1;
++ }
++
++ if (irq_status & MAX3107_IRQ_RXFIFO_BIT) {
++ /* RX FIFO interrupt,
++ * handled by normal RX handling
++ */
++ DBG_TRACE("RFIFO INT\n");
++ }
++
++ /* Return RX level */
++ return rx_level;
++}
++
++/* Trigger work thread*/
++static void max3107_dowork(struct max3107_port *s)
++{
++ if (!work_pending(&s->work) && !freezing(current) && !s->suspended)
++ queue_work(s->workqueue, &s->work);
++}
++
++/* Work thread */
++static void max3107_work(struct work_struct *w)
++{
++ struct max3107_port *s = container_of(w, struct max3107_port, work);
++ u16 rxlvl = 0;
++ int len; /* SPI transfer buffer length */
++ u16 buf[5]; /* Buffer for SPI transfers */
++
++ DBG_TRACE("enter\n");
++
++ /* Start by reading current RX FIFO level */
++ buf[0] = MAX3107_RXFIFOLVL_REG;
++ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
++ dev_err(&s->spi->dev,
++ "SPI transfer for RX level query failed\n");
++ rxlvl = 0;
++ } else {
++ rxlvl = (buf[0] & MAX3107_SPI_RX_DATA_MASK);
++ }
++
++ do {
++ DBG_TRACE("rxlvl %d\n", rxlvl);
++
++ /* Handle RX */
++ max3107_handlerx(s, rxlvl);
++ rxlvl = 0;
++
++ if (s->handle_irq) {
++ /* Handle pending interrupts
++ * We also get new RX FIFO level since new data may
++ * have been received while pushing received data to
++ * receivers
++ */
++ s->handle_irq = 0;
++ rxlvl = handle_interrupt(s);
++ }
++
++ /* Handle TX */
++ max3107_handletx(s);
++
++ /* Handle configuration changes */
++ len = 0;
++ spin_lock(&s->data_lock);
++ if (s->mode1_commit) {
++ DBG_TRACE("mode1_commit\n");
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG);
++ buf[len++] |= s->mode1_reg;
++ s->mode1_commit = 0;
++ }
++ if (s->lcr_commit) {
++ DBG_TRACE("lcr_commit\n");
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_LCR_REG);
++ buf[len++] |= s->lcr_reg;
++ s->lcr_commit = 0;
++ }
++ if (s->brg_commit) {
++ DBG_TRACE("brg_commit\n");
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_BRGDIVMSB_REG);
++ buf[len++] |= ((s->brg_cfg >> 16) &
++ MAX3107_SPI_TX_DATA_MASK);
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_BRGDIVLSB_REG);
++ buf[len++] |= ((s->brg_cfg >> 8) &
++ MAX3107_SPI_TX_DATA_MASK);
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_BRGCFG_REG);
++ buf[len++] |= ((s->brg_cfg) & 0xff);
++ s->brg_commit = 0;
++ }
++ spin_unlock(&s->data_lock);
++
++ if (len > 0) {
++ if (max3107_rw(s, (u8 *)buf, NULL, len*2))
++ dev_err(&s->spi->dev,
++ "SPI transfer for config failed\n");
++ }
++
++ /* Reloop if interrupt handling indicated data in RX FIFO */
++ } while (rxlvl);
++
++}
++
++/* Set sleep mode */
++static void max3107_set_sleep(struct max3107_port *s, int mode)
++{
++ u16 buf[1]; /* Buffer for SPI transfer */
++
++ DBG_TRACE("enter, mode %d\n", mode);
++
++ buf[0] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG);
++ spin_lock(&s->data_lock);
++ switch (mode) {
++ case MAX3107_DISABLE_FORCED_SLEEP:
++ s->mode1_reg &= ~MAX3107_MODE1_FORCESLEEP_BIT;
++ break;
++ case MAX3107_ENABLE_FORCED_SLEEP:
++ s->mode1_reg |= MAX3107_MODE1_FORCESLEEP_BIT;
++ break;
++ case MAX3107_DISABLE_AUTOSLEEP:
++ s->mode1_reg &= ~MAX3107_MODE1_AUTOSLEEP_BIT;
++ break;
++ case MAX3107_ENABLE_AUTOSLEEP:
++ s->mode1_reg |= MAX3107_MODE1_AUTOSLEEP_BIT;
++ break;
++ default:
++ spin_unlock(&s->data_lock);
++ dev_warn(&s->spi->dev, "invalid sleep mode\n");
++ return;
++ }
++ buf[0] |= s->mode1_reg;
++ spin_unlock(&s->data_lock);
++
++ if (max3107_rw(s, (u8 *)buf, NULL, 2))
++ dev_err(&s->spi->dev, "SPI transfer for sleep mode failed\n");
++
++ if (mode == MAX3107_DISABLE_AUTOSLEEP ||
++ mode == MAX3107_DISABLE_FORCED_SLEEP ) {
++ msleep(MAX3107_WAKEUP_DELAY);
++ }
++}
++
++/* Perform full register initialization */
++static void max3107_register_init(struct max3107_port *s)
++{
++ int len = 0; /* SPI transfer buffer length */
++ u16 buf[11]; /* Buffer for SPI transfers */
++
++ DBG_TRACE("enter\n");
++
++ /* 1. Configure baud rate, 9600 as default */
++ s->baud = 9600;
++ if (s->ext_clk)
++ s->brg_cfg = MAX3107_BRG_B9600;
++ else
++ s->brg_cfg = MAX3107_BRG_IB9600;
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_BRGDIVMSB_REG);
++ buf[len++] |= ((s->brg_cfg >> 16) & MAX3107_SPI_TX_DATA_MASK);
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_BRGDIVLSB_REG);
++ buf[len++] |= ((s->brg_cfg >> 8) & MAX3107_SPI_TX_DATA_MASK);
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_BRGCFG_REG);
++ buf[len++] |= ((s->brg_cfg) & 0xff);
++
++ /* 2. Configure LCR register, 8N1 mode by default */
++ s->lcr_reg = MAX3107_LCR_WORD_LEN_8;
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_LCR_REG);
++ buf[len++] |= s->lcr_reg;
++
++ /* 3. Configure MODE 1 register */
++ s->mode1_reg = 0;
++ /* Enable IRQ pin */
++ s->mode1_reg |= MAX3107_MODE1_IRQSEL_BIT;
++ /* Disable TX */
++ s->mode1_reg |= MAX3107_MODE1_TXDIS_BIT;
++ s->tx_enabled = 0;
++ /* RX is enabled */
++ s->rx_enabled = 1;
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG);
++ buf[len++] |= s->mode1_reg;
++
++ /* 4. Configure MODE 2 register */
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_MODE2_REG);
++ if (s->loopback) {
++ /* Enable loopback */
++ buf[len] |= MAX3107_MODE2_LOOPBACK_BIT;
++ }
++ /* Reset FIFOs */
++ buf[len++] |= MAX3107_MODE2_FIFORST_BIT;
++ s->tx_fifo_empty = 1;
++
++ /* 5. Configure FIFO trigger level register */
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_FIFOTRIGLVL_REG);
++ /* RX FIFO trigger for 16 words, TX FIFO trigger not used */
++ buf[len++] |= (MAX3107_FIFOTRIGLVL_RX(16) | MAX3107_FIFOTRIGLVL_TX(0));
++
++ /* 6. Configure flow control levels */
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_FLOWLVL_REG);
++ /* Flow control halt level 96, resume level 48 */
++ buf[len++] |= (MAX3107_FLOWLVL_RES(48) | MAX3107_FLOWLVL_HALT(96));
++
++ /* 7. Configure flow control */
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_FLOWCTRL_REG);
++ /* Enable auto CTS and auto RTS flow control */
++ buf[len++] |= (MAX3107_FLOWCTRL_AUTOCTS_BIT |
++ MAX3107_FLOWCTRL_AUTORTS_BIT);
++
++ /* 8. Configure RX timeout register */
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_RXTO_REG);
++ /* Timeout after 48 character intervals */
++ buf[len++] |= 0x0030;
++
++ /* 9. Configure LSR interrupt enable register */
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_LSR_IRQEN_REG);
++ /* Enable RX timeout interrupt */
++ buf[len++] |= MAX3107_LSR_RXTO_BIT;
++
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, NULL, len*2))
++ dev_err(&s->spi->dev, "SPI transfer for init failed\n");
++
++ len = 0;
++ /* 10. Clear IRQ status register by reading it */
++ buf[len++] = MAX3107_IRQSTS_REG;
++
++ /* 11. Configure interrupt enable register */
++ /* Enable LSR interrupt */
++ s->irqen_reg = MAX3107_IRQ_LSR_BIT;
++ /* Enable RX FIFO interrupt */
++ s->irqen_reg |= MAX3107_IRQ_RXFIFO_BIT;
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
++ buf[len++] |= s->irqen_reg;
++
++ /* 12. Clear FIFO reset that was set in step 6 */
++ buf[len] = (MAX3107_WRITE_BIT | MAX3107_MODE2_REG);
++ if (s->loopback) {
++ /* Keep loopback enabled */
++ buf[len] |= MAX3107_MODE2_LOOPBACK_BIT;
++ }
++ buf[len++] |= 0x0000;
++
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, len*2))
++ dev_err(&s->spi->dev, "SPI transfer for init failed\n");
++
++}
++
++/* IRQ handler */
++static irqreturn_t max3107_irq(int irqno, void *dev_id)
++{
++ struct max3107_port *s = dev_id;
++
++ if (irqno != s->spi->irq) {
++ /* Unexpected IRQ */
++ return IRQ_NONE;
++ }
++
++ /* Indicate irq */
++ s->handle_irq = 1;
++
++ /* Trigger work thread */
++ max3107_dowork(s);
++
++ return IRQ_HANDLED;
++}
++
++/* HW suspension function
++ *
++ * Currently autosleep is used to decrease current consumption, alternative
++ * approach would be to set the chip to reset mode if UART is not being
++ * used but that would mess the GPIOs
++ *
++ */
++static void max3107_hw_susp(struct max3107_port *s, int suspend)
++{
++ DBG_TRACE("enter, suspend %d\n", suspend);
++
++ if (suspend) {
++ /* Suspend requested,
++ * enable autosleep to decrease current consumption
++ */
++ s->suspended = 1;
++ max3107_set_sleep(s, MAX3107_ENABLE_AUTOSLEEP);
++ } else {
++ /* Resume requested,
++ * disable autosleep
++ */
++ s->suspended = 0;
++ max3107_set_sleep(s, MAX3107_DISABLE_AUTOSLEEP);
++ }
++}
++
++/* Modem status IRQ enabling */
++static void max3107_enable_ms(struct uart_port *port)
++{
++ /* Modem status not supported */
++}
++
++/* Data send function */
++static void max3107_start_tx(struct uart_port *port)
++{
++ struct max3107_port *s = container_of(port, struct max3107_port, port);
++
++ DBG_TRACE("enter\n");
++
++ /* Trigger work thread for sending data */
++ max3107_dowork(s);
++}
++
++/* Function for checking that there is no pending transfers */
++static unsigned int max3107_tx_empty(struct uart_port *port)
++{
++ struct max3107_port *s = container_of(port, struct max3107_port, port);
++
++ DBG_TRACE("returning %d\n",
++ (s->tx_fifo_empty && uart_circ_empty(&s->port.state->xmit)));
++ return (s->tx_fifo_empty && uart_circ_empty(&s->port.state->xmit));
++}
++
++/* Function for stopping RX */
++static void max3107_stop_rx(struct uart_port *port)
++{
++ struct max3107_port *s = container_of(port, struct max3107_port, port);
++
++ DBG_TRACE("enter\n");
++
++ /* Set RX disabled in MODE 1 register */
++ spin_lock(&s->data_lock);
++ s->mode1_reg |= MAX3107_MODE1_RXDIS_BIT;
++ s->mode1_commit = 1;
++ spin_unlock(&s->data_lock);
++ /* Set RX disabled */
++ s->rx_enabled = 0;
++ /* Trigger work thread for doing the actual configuration change */
++ max3107_dowork(s);
++}
++
++/* Function for returning control pin states */
++static unsigned int max3107_get_mctrl(struct uart_port *port)
++{
++ /* DCD and DSR are not wired and CTS/RTS is handled automatically
++ * so just indicate DSR and CAR asserted
++ */
++ return (TIOCM_DSR | TIOCM_CAR);
++}
++
++/* Function for setting control pin states */
++static void max3107_set_mctrl(struct uart_port *port, unsigned int mctrl)
++{
++ /* DCD and DSR are not wired and CTS/RTS is hadnled automatically
++ * so do nothing
++ */
++}
++
++/* Function for configuring UART parameters */
++static void max3107_set_termios(struct uart_port *port,
++ struct ktermios *termios,
++ struct ktermios *old)
++{
++ struct max3107_port *s = container_of(port, struct max3107_port, port);
++ struct tty_struct *tty;
++ int baud;
++ u16 new_lcr = 0;
++ u32 new_brg = 0;
++
++ DBG_TRACE("enter\n");
++
++ if (!port->state) {
++ /* UART is not open */
++ dev_warn(&s->spi->dev, "UART is closed\n");
++ return;
++ }
++
++ tty = port->state->port.tty;
++ if (!tty) {
++ /* TTY is not open */
++ dev_warn(&s->spi->dev, "TTY is closed\n");
++ return;
++ }
++
++ if (old) {
++ if ((termios->c_cflag == old->c_cflag) &&
++ (RELEVANT_IFLAG(termios->c_iflag) ==
++ RELEVANT_IFLAG(old->c_iflag))) {
++ /* Nothing relevant is changing */
++ return;
++ }
++ }
++
++ /* Get new LCR register values */
++ /* Word size */
++ if ((termios->c_cflag & CSIZE) == CS7)
++ new_lcr |= MAX3107_LCR_WORD_LEN_7;
++ else
++ new_lcr |= MAX3107_LCR_WORD_LEN_8;
++
++ /* Parity */
++ if (termios->c_cflag & PARENB) {
++ new_lcr |= MAX3107_LCR_PARITY_BIT;
++ if (!(termios->c_cflag & PARODD))
++ new_lcr |= MAX3107_LCR_EVENPARITY_BIT;
++ }
++
++ /* Stop bits */
++ if (termios->c_cflag & CSTOPB) {
++ /* 2 stop bits */
++ new_lcr |= MAX3107_LCR_STOPLEN_BIT;
++ }
++
++ /* Mask termios capabilities we don't support */
++ termios->c_cflag &= ~CMSPAR;
++
++ /* Set status ignore mask */
++ s->port.ignore_status_mask = 0;
++ if (termios->c_iflag & IGNPAR)
++ s->port.ignore_status_mask |= MAX3107_ALL_ERRORS;
++
++ /* Set low latency to immediately handle pushed data */
++ s->port.state->port.tty->low_latency = 1;
++
++ /* Get new baud rate generator configuration */
++ baud = tty_get_baud_rate(tty);
++ switch (baud) {
++ case 300:
++ new_brg = s->ext_clk ? MAX3107_BRG_B300 : MAX3107_BRG_IB300;
++ break;
++ case 600:
++ new_brg = s->ext_clk ? MAX3107_BRG_B600 : MAX3107_BRG_IB600;
++ break;
++ case 1200:
++ new_brg = s->ext_clk ? MAX3107_BRG_B1200 : MAX3107_BRG_IB1200;
++ break;
++ case 2400:
++ new_brg = s->ext_clk ? MAX3107_BRG_B2400 : MAX3107_BRG_IB2400;
++ break;
++ case 4800:
++ new_brg = s->ext_clk ? MAX3107_BRG_B4800 : MAX3107_BRG_IB4800;
++ break;
++ case 9600:
++ new_brg = s->ext_clk ? MAX3107_BRG_B9600 : MAX3107_BRG_IB9600;
++ break;
++ case 19200:
++ new_brg = s->ext_clk ? MAX3107_BRG_B19200 : MAX3107_BRG_IB19200;
++ break;
++ case 38400:
++ new_brg = s->ext_clk ? MAX3107_BRG_B38400 : MAX3107_BRG_IB38400;
++ break;
++ case 57600:
++ new_brg = s->ext_clk ? MAX3107_BRG_B57600 : MAX3107_BRG_IB57600;
++ break;
++ case 115200:
++ new_brg = s->ext_clk ? MAX3107_BRG_B115200 : MAX3107_BRG_IB115200;
++ break;
++ case 230400:
++ new_brg = s->ext_clk ? MAX3107_BRG_B230400 : MAX3107_BRG_IB230400;
++ break;
++ case 460800:
++ new_brg = s->ext_clk ? MAX3107_BRG_B460800 : MAX3107_BRG_IB460800;
++ break;
++ case 921600:
++ new_brg = s->ext_clk ? MAX3107_BRG_B921600 : MAX3107_BRG_IB921600;
++ break;
++ default:
++ /* Use previous */
++ baud = s->baud;
++ new_brg = s->brg_cfg;
++ tty_termios_encode_baud_rate(termios, baud, baud);
++ }
++ s->baud = baud;
++
++ /* Update timeout according to new baud rate */
++ uart_update_timeout(port, termios->c_cflag, baud);
++
++ spin_lock(&s->data_lock);
++ if (s->lcr_reg != new_lcr) {
++ s->lcr_reg = new_lcr;
++ s->lcr_commit = 1;
++ }
++ if (s->brg_cfg != new_brg) {
++ s->brg_cfg = new_brg;
++ s->brg_commit = 1;
++ }
++ spin_unlock(&s->data_lock);
++
++ /* Trigger work thread for doing the actual configuration change */
++ max3107_dowork(s);
++}
++
++/* Port shutdown function */
++static void max3107_shutdown(struct uart_port *port)
++{
++ struct max3107_port *s = container_of(port, struct max3107_port, port);
++
++ DBG_TRACE("enter\n");
++
++ if (s->suspended) {
++ /* Resume HW */
++ max3107_hw_susp(s, 0);
++ }
++
++ /* Free the interrupt */
++ free_irq(s->spi->irq, s);
++
++ if (s->workqueue) {
++ /* Flush and destroy work queue */
++ flush_workqueue(s->workqueue);
++ destroy_workqueue(s->workqueue);
++ s->workqueue = NULL;
++ }
++
++ /* Suspend HW */
++ max3107_hw_susp(s, 1);
++}
++
++/* Port startup function */
++static int max3107_startup(struct uart_port *port)
++{
++ struct max3107_port *s = container_of(port, struct max3107_port, port);
++
++ DBG_TRACE("enter\n");
++
++ /* Initialize work queue */
++ s->workqueue = create_freezeable_workqueue("max3107");
++ if (!s->workqueue) {
++ dev_err(&s->spi->dev, "Workqueue creation failed\n");
++ return -EBUSY;
++ }
++ INIT_WORK(&s->work, max3107_work);
++
++ /* Setup IRQ */
++ if (request_irq(s->spi->irq, max3107_irq, IRQF_TRIGGER_FALLING,
++ "max3107", s)) {
++ dev_err(&s->spi->dev, "IRQ reguest failed\n");
++ destroy_workqueue(s->workqueue);
++ s->workqueue = NULL;
++ return -EBUSY;
++ }
++
++ /* Resume HW */
++ max3107_hw_susp(s, 0);
++
++ /* Init registers */
++ max3107_register_init(s);
++
++ return 0;
++}
++
++/* Port type function */
++static const char *max3107_type(struct uart_port *port)
++{
++ struct max3107_port *s = container_of(port, struct max3107_port, port);
++ return s->spi->modalias;
++}
++
++/* Port release function */
++static void max3107_release_port(struct uart_port *port)
++{
++ /* Do nothing */
++}
++
++/* Port request function */
++static int max3107_request_port(struct uart_port *port)
++{
++ /* Do nothing */
++ return 0;
++}
++
++/* Port config function */
++static void max3107_config_port(struct uart_port *port, int flags)
++{
++ struct max3107_port *s = container_of(port, struct max3107_port, port);
++
++ /* Use PORT_MAX3100 since we are at least int the same serie */
++ s->port.type = PORT_MAX3100;
++}
++
++/* Port verify function */
++static int max3107_verify_port(struct uart_port *port,
++ struct serial_struct *ser)
++{
++ if (ser->type == PORT_UNKNOWN || ser->type == PORT_MAX3100)
++ return 0;
++
++ return -EINVAL;
++}
++
++/* Port stop TX function */
++static void max3107_stop_tx(struct uart_port *port)
++{
++ /* Do nothing */
++}
++
++/* Port break control function */
++static void max3107_break_ctl(struct uart_port *port, int break_state)
++{
++ /* We don't support break control, do nothing */
++}
++
++/* GPIO direction query function */
++static enum gpio_direction max3107_gpio_get_direction(struct gpio_chip *chip,
++ unsigned offset)
++{
++ struct max3107_port *s = container_of(chip, struct max3107_port, chip);
++ u16 buf[1]; /* Buffer for SPI transfer */
++
++ DBG_TRACE("enter\n");
++
++ if (offset >= MAX3107_GPIO_COUNT) {
++ dev_err(&s->spi->dev, "Invalid GPIO\n");
++ return -EINVAL;
++ }
++
++ /* Read current GPIO configuration register */
++ buf[0] = MAX3107_GPIOCFG_REG;
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
++ dev_err(&s->spi->dev,
++ "SPI transfer for GPIO config read failed\n");
++ return -EIO;
++ }
++ buf[0] &= MAX3107_SPI_RX_DATA_MASK;
++
++ /* Check the direction bit */
++ if (buf[0] & (0x0001 << offset))
++ return DIRECTION_OUT;
++ return DIRECTION_IN;
++}
++
++/* GPIO direction to input function */
++static int max3107_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
++{
++ struct max3107_port *s = container_of(chip, struct max3107_port, chip);
++ u16 buf[1]; /* Buffer for SPI transfer */
++
++ DBG_TRACE("enter\n");
++
++ if (offset >= MAX3107_GPIO_COUNT) {
++ dev_err(&s->spi->dev, "Invalid GPIO\n");
++ return -EINVAL;
++ }
++
++ /* Read current GPIO configuration register */
++ buf[0] = MAX3107_GPIOCFG_REG;
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
++ dev_err(&s->spi->dev,
++ "SPI transfer for GPIO config read failed\n");
++ return -EIO;
++ }
++ buf[0] &= MAX3107_SPI_RX_DATA_MASK;
++
++ /* Set GPIO to input */
++ buf[0] &= ~(0x0001 << offset);
++
++ /* Write new GPIO configuration register value */
++ buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIOCFG_REG);
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, NULL, 2)) {
++ dev_err(&s->spi->dev,
++ "SPI transfer for GPIO config write failed\n");
++ return -EIO;
++ }
++ return 0;
++}
++
++/* GPIO direction to output function */
++static int max3107_gpio_direction_out(struct gpio_chip *chip, unsigned offset,
++ int value)
++{
++ struct max3107_port *s = container_of(chip, struct max3107_port, chip);
++ u16 buf[2]; /* Buffer for SPI transfers */
++
++ DBG_TRACE("enter\n");
++
++ if (offset >= MAX3107_GPIO_COUNT) {
++ dev_err(&s->spi->dev, "Invalid GPIO\n");
++ return -EINVAL;
++ }
++
++ /* Read current GPIO configuration and data registers */
++ buf[0] = MAX3107_GPIOCFG_REG;
++ buf[1] = MAX3107_GPIODATA_REG;
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 4)) {
++ dev_err(&s->spi->dev,
++ "SPI transfer for GPIO config and data read failed\n");
++ return -EIO;
++ }
++ buf[0] &= MAX3107_SPI_RX_DATA_MASK;
++ buf[1] &= MAX3107_SPI_RX_DATA_MASK;
++
++ /* Set GPIO to output */
++ buf[0] |= (0x0001 << offset);
++ /* Set value */
++ if (value)
++ buf[1] |= (0x0001 << offset);
++ else
++ buf[1] &= ~(0x0001 << offset);
++
++ /* Write new GPIO configuration and data register values */
++ buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIOCFG_REG);
++ buf[1] |= (MAX3107_WRITE_BIT | MAX3107_GPIODATA_REG);
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, NULL, 4)) {
++ dev_err(&s->spi->dev,
++ "SPI transfer for GPIO config and data write failed\n");
++ return -EIO;
++ }
++ return 0;
++}
++
++/* GPIO value query function */
++static int max3107_gpio_get(struct gpio_chip *chip, unsigned offset)
++{
++ struct max3107_port *s = container_of(chip, struct max3107_port, chip);
++ u16 buf[1]; /* Buffer for SPI transfer */
++
++ DBG_TRACE("enter\n");
++
++ if (offset >= MAX3107_GPIO_COUNT) {
++ dev_err(&s->spi->dev, "Invalid GPIO\n");
++ return -EINVAL;
++ }
++
++ /* Read current GPIO data register */
++ buf[0] = MAX3107_GPIODATA_REG;
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
++ dev_err(&s->spi->dev,
++ "SPI transfer for GPIO data read failed\n");
++ return -EIO;
++ }
++ buf[0] &= MAX3107_SPI_RX_DATA_MASK;
++
++ /* Return value */
++ return buf[0] & (0x0001 << offset);
++}
++
++/* GPIO value set function */
++static void max3107_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
++{
++ struct max3107_port *s = container_of(chip, struct max3107_port, chip);
++ u16 buf[2]; /* Buffer for SPI transfers */
++
++ DBG_TRACE("enter\n");
++
++ if (offset >= MAX3107_GPIO_COUNT) {
++ dev_err(&s->spi->dev, "Invalid GPIO\n");
++ return;
++ }
++
++ /* Read current GPIO configuration registers*/
++ buf[0] = MAX3107_GPIODATA_REG;
++ buf[1] = MAX3107_GPIOCFG_REG;
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 4)) {
++ dev_err(&s->spi->dev,
++ "SPI transfer for GPIO data and config read failed\n");
++ return;
++ }
++ buf[0] &= MAX3107_SPI_RX_DATA_MASK;
++ buf[1] &= MAX3107_SPI_RX_DATA_MASK;
++
++ if (!(buf[1] & (0x0001 << offset))) {
++ /* Configured as input, can't set value */
++ dev_warn(&s->spi->dev,
++ "Trying to set value for input GPIO\n");
++ return;
++ }
++
++ /* Set value */
++ if (value)
++ buf[0] |= (0x0001 << offset);
++ else
++ buf[0] &= ~(0x0001 << offset);
++
++ /* Write new GPIO data register value */
++ buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIODATA_REG);
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, NULL, 2)) {
++ dev_err(&s->spi->dev,
++ "SPI transfer for GPIO data write failed\n");
++ }
++}
++
++/* Platform data */
++static struct max3107_plat max3107_plat_data = {
++ .loopback = 0,
++ .ext_clk = 1,
++#ifdef CONFIG_MAX3107_LOW_POWER
++ .max3107_hw_suspend = &max3107_hw_susp,
++#endif /* CONFIG_MAX3107_LOW_POWER */
++ .polled_mode = 0,
++ .poll_time = 0,
++};
++
++/* Port functions */
++static struct uart_ops max3107_ops = {
++ .tx_empty = max3107_tx_empty,
++ .set_mctrl = max3107_set_mctrl,
++ .get_mctrl = max3107_get_mctrl,
++ .stop_tx = max3107_stop_tx,
++ .start_tx = max3107_start_tx,
++ .stop_rx = max3107_stop_rx,
++ .enable_ms = max3107_enable_ms,
++ .break_ctl = max3107_break_ctl,
++ .startup = max3107_startup,
++ .shutdown = max3107_shutdown,
++ .set_termios = max3107_set_termios,
++ .type = max3107_type,
++ .release_port = max3107_release_port,
++ .request_port = max3107_request_port,
++ .config_port = max3107_config_port,
++ .verify_port = max3107_verify_port,
++};
++
++/* UART driver data */
++static struct uart_driver max3107_uart_driver = {
++ .owner = THIS_MODULE,
++ .driver_name = "ttyMAX",
++ .dev_name = "ttyMAX",
++ .major = MAX3107_MAJOR,
++ .minor = MAX3107_MINOR,
++ .nr = 1,
++};
++
++/* GPIO chip data */
++static struct gpio_chip max3107_gpio_chip = {
++ .owner = THIS_MODULE,
++ .get_direction = max3107_gpio_get_direction,
++ .direction_input = max3107_gpio_direction_in,
++ .direction_output = max3107_gpio_direction_out,
++ .get = max3107_gpio_get,
++ .set = max3107_gpio_set,
++ .can_sleep = 1,
++ .base = MAX3107_GPIO_BASE,
++ .ngpio = MAX3107_GPIO_COUNT,
++};
++
++/* Device probe function */
++static int __devinit max3107_probe(struct spi_device *spi)
++{
++ struct max3107_port *s;
++ struct max3107_plat *pdata = &max3107_plat_data;
++ u16 buf[2]; /* Buffer for SPI transfers */
++ int retval;
++
++ DBG_TRACE("enter\n");
++
++ /* Reset the chip */
++ if (gpio_request(MAX3107_RESET_GPIO, "max3107")) {
++ printk(KERN_ERR "Requesting RESET GPIO failed\n");
++ return -EIO;
++ }
++ if (gpio_direction_output(MAX3107_RESET_GPIO, 0)) {
++ printk(KERN_ERR "Setting RESET GPIO to 0 failed\n");
++ gpio_free(MAX3107_RESET_GPIO);
++ return -EIO;
++ }
++ msleep(MAX3107_RESET_DELAY);
++ if (gpio_direction_output(MAX3107_RESET_GPIO, 1)) {
++ printk(KERN_ERR "Setting RESET GPIO to 1 failed\n");
++ gpio_free(MAX3107_RESET_GPIO);
++ return -EIO;
++ }
++ gpio_free(MAX3107_RESET_GPIO);
++ msleep(MAX3107_WAKEUP_DELAY);
++
++ /* Allocate port structure */
++ s = kzalloc(sizeof(*s), GFP_KERNEL);
++ if (!s) {
++ printk(KERN_ERR "Allocating port structure failed\n");
++ return -ENOMEM;
++ }
++
++ /* Initialize shared data lock */
++ spin_lock_init(&s->data_lock);
++
++ /* SPI intializations */
++ dev_set_drvdata(&spi->dev, s);
++ spi->mode = SPI_MODE_0;
++ spi->dev.platform_data = pdata;
++ spi->bits_per_word = 16;
++ s->ext_clk = pdata->ext_clk;
++ s->loopback = pdata->loopback;
++ spi_setup(spi);
++ s->spi = spi;
++
++ /* Check REV ID to ensure we are talking to what we expect */
++ buf[0] = MAX3107_REVID_REG;
++ if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
++ dev_err(&s->spi->dev, "SPI transfer for REVID read failed\n");
++ return -EIO;
++ }
++ if ((buf[0] & MAX3107_SPI_RX_DATA_MASK) != MAX3107_REVID1 &&
++ (buf[0] & MAX3107_SPI_RX_DATA_MASK) != MAX3107_REVID2) {
++ dev_err(&s->spi->dev, "REVID %x does not match\n",
++ (buf[0] & MAX3107_SPI_RX_DATA_MASK) );
++ return -ENODEV;
++ }
++
++ /* Disable all interrupts */
++ buf[0] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG | 0x0000);
++ buf[0] |= 0x0000;
++
++ /* Configure clock source */
++ buf[1] = (MAX3107_WRITE_BIT | MAX3107_CLKSRC_REG);
++ if (s->ext_clk) {
++ /* External clock */
++ buf[1] |= MAX3107_CLKSRC_EXTCLK_BIT;
++ }
++ /* PLL bypass */
++ buf[1] |= MAX3107_CLKSRC_PLLBYP_BIT;
++
++ /* Perform SPI transfer */
++ if (max3107_rw(s, (u8 *)buf, NULL, 4)) {
++ dev_err(&s->spi->dev, "SPI transfer for init failed\n");
++ return -EIO;
++ }
++
++
++ /* Register UART driver */
++ retval = uart_register_driver(&max3107_uart_driver);
++ if (retval) {
++ dev_err(&s->spi->dev, "Registering UART driver failed\n");
++ return retval;
++ }
++
++ /* Initialize UART port data */
++ s->port.fifosize = 128;
++ s->port.ops = &max3107_ops;
++ s->port.line = 0;
++ s->port.dev = &spi->dev;
++ s->port.uartclk = 9600;
++ s->port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF;
++ s->port.irq = s->spi->irq;
++ /* Use PORT_MAX3100 since we are at least int the same serie */
++ s->port.type = PORT_MAX3100;
++
++ /* Add UART port */
++ retval = uart_add_one_port(&max3107_uart_driver, &s->port);
++ if (retval < 0) {
++ dev_err(&s->spi->dev, "Adding UART port failed\n");
++ return retval;
++ }
++
++ /* Initialize GPIO chip data */
++ s->chip = max3107_gpio_chip;
++ s->chip.label = spi->modalias;
++ s->chip.dev = &spi->dev;
++
++ /* Add GPIO chip */
++ retval = gpiochip_add(&s->chip);
++ if (retval) {
++ dev_err(&s->spi->dev, "Adding GPIO chip failed\n");
++ return retval;
++ }
++
++ /* Go to suspend mode */
++ max3107_hw_susp(s, 1);
++
++ return 0;
++}
++
++/* Driver remove function */
++static int __devexit max3107_remove(struct spi_device *spi)
++{
++ struct max3107_port *s = dev_get_drvdata(&spi->dev);
++
++ DBG_TRACE("enter\n");
++
++ /* Remove GPIO chip */
++ if (gpiochip_remove(&s->chip))
++ dev_warn(&s->spi->dev, "Removing GPIO chip failed\n");
++
++ /* Remove port */
++ if (uart_remove_one_port(&max3107_uart_driver, &s->port))
++ dev_warn(&s->spi->dev, "Removing UART port failed\n");
++
++ /* Unregister UART driver */
++ uart_unregister_driver(&max3107_uart_driver);
++
++ /* Free port structure */
++ kfree(s);
++
++ return 0;
++}
++
++/* Driver suspend function */
++static int max3107_suspend(struct spi_device *spi, pm_message_t state)
++{
++#ifdef CONFIG_PM
++ struct max3107_port *s = dev_get_drvdata(&spi->dev);
++
++ DBG_TRACE("enter\n");
++
++ /* Suspend UART port */
++ uart_suspend_port(&max3107_uart_driver, &s->port);
++
++ /* Go to suspend mode */
++ max3107_hw_susp(s, 1);
++#endif /* CONFIG_PM */
++ return 0;
++}
++
++/* Driver resume function */
++static int max3107_resume(struct spi_device *spi)
++{
++#ifdef CONFIG_PM
++ struct max3107_port *s = dev_get_drvdata(&spi->dev);
++
++ DBG_TRACE("enter\n");
++
++ /* Resume from suspend */
++ max3107_hw_susp(s, 0);
++
++ /* Resume UART port */
++ uart_resume_port(&max3107_uart_driver, &s->port);
++#endif /* CONFIG_PM */
++ return 0;
++}
++
++/* Spi driver data */
++static struct spi_driver max3107_driver = {
++ .driver = {
++ .name = "max3107",
++ .bus = &spi_bus_type,
++ .owner = THIS_MODULE,
++ },
++ .probe = max3107_probe,
++ .remove = __devexit_p(max3107_remove),
++ .suspend = max3107_suspend,
++ .resume = max3107_resume,
++};
++
++/* Driver init function */
++static int __init max3107_init(void)
++{
++ DBG_TRACE("enter\n");
++ return spi_register_driver(&max3107_driver);
++}
++
++/* Driver exit function */
++static void __exit max3107_exit(void)
++{
++ DBG_TRACE("enter\n");
++ spi_unregister_driver(&max3107_driver);
++}
++
++module_init(max3107_init);
++module_exit(max3107_exit);
++
++MODULE_DESCRIPTION("MAX3107 driver");
++MODULE_AUTHOR("Aavamobile");
++MODULE_ALIAS("max3107-spi-uart");
++MODULE_LICENSE("GPLv2");
+Index: linux-2.6.33/drivers/spi/mrst_spi.c
+===================================================================
+--- linux-2.6.33.orig/drivers/spi/mrst_spi.c
++++ linux-2.6.33/drivers/spi/mrst_spi.c
+@@ -1364,8 +1364,16 @@ static struct pci_driver mrst_spi_driver
+ .resume = mrst_spi_resume,
+ };
+
++/*
++ * spi_register_master will call scan board info, and MRST
++ * should only have one board_info registered
++ */
+ static int __init mrst_spi_init(void)
+ {
++/*#ifdef CONFIG_SERIAL_MAX3107*/
++/* spi_register_board_info(mrst_spi_board_info,*/
++/* ARRAY_SIZE(mrst_spi_board_info));*/
++/*#endif*/
+ return pci_register_driver(&mrst_spi_driver);
+ }
+
+Index: linux-2.6.33/include/linux/serial_max3107.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/include/linux/serial_max3107.h
+@@ -0,0 +1,352 @@
++/*
++ * max3107.h - spi uart protocol driver header for Maxim 3107
++ *
++ * Copyright (C) Aavamobile 2009
++ * Based on serial_max3100.h by Christian Pellegrin
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++
++#ifndef _LINUX_SERIAL_MAX3107_H
++#define _LINUX_SERIAL_MAX3107_H
++
++/* Serial definitions */
++#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
++
++/* Serial error status definitions */
++#define MAX3107_PARITY_ERROR 1
++#define MAX3107_FRAME_ERROR 2
++#define MAX3107_OVERRUN_ERROR 4
++#define MAX3107_ALL_ERRORS (MAX3107_PARITY_ERROR | \
++ MAX3107_FRAME_ERROR | \
++ MAX3107_OVERRUN_ERROR)
++
++
++/* TTY definitions */
++#define MAX3107_MAJOR TTY_MAJOR
++#define MAX3107_MINOR 65
++
++
++/* GPIO definitions */
++#define MAX3107_GPIO_BASE 88
++#define MAX3107_GPIO_COUNT 4
++
++
++/* GPIO connected to chip's reset pin */
++#define MAX3107_RESET_GPIO 87
++
++
++/* Chip reset delay */
++#define MAX3107_RESET_DELAY 10
++
++/* Chip wakeup delay */
++#define MAX3107_WAKEUP_DELAY 50
++
++
++/* Sleep mode definitions */
++#define MAX3107_DISABLE_FORCED_SLEEP 0
++#define MAX3107_ENABLE_FORCED_SLEEP 1
++#define MAX3107_DISABLE_AUTOSLEEP 2
++#define MAX3107_ENABLE_AUTOSLEEP 3
++
++
++/* Definitions for register access with SPI transfers
++ *
++ * SPI transfer format:
++ *
++ * Master to slave bits xzzzzzzzyyyyyyyy
++ * Slave to master bits aaaaaaaabbbbbbbb
++ *
++ * where:
++ * x = 0 for reads, 1 for writes
++ * z = register address
++ * y = new register value if write, 0 if read
++ * a = unspecified
++ * b = register value if read, unspecified if write
++ */
++
++/* SPI speed */
++#define MAX3107_SPI_SPEED (3125000 * 2)
++
++/* Write bit */
++#define MAX3107_WRITE_BIT (1 << 15)
++
++/* SPI TX data mask */
++#define MAX3107_SPI_RX_DATA_MASK (0x00ff)
++
++/* SPI RX data mask */
++#define MAX3107_SPI_TX_DATA_MASK (0x00ff)
++
++/* Register access masks */
++#define MAX3107_RHR_REG (0x0000) /* RX FIFO */
++#define MAX3107_THR_REG (0x0000) /* TX FIFO */
++#define MAX3107_IRQEN_REG (0x0100) /* IRQ enable */
++#define MAX3107_IRQSTS_REG (0x0200) /* IRQ status */
++#define MAX3107_LSR_IRQEN_REG (0x0300) /* LSR IRQ enable */
++#define MAX3107_LSR_IRQSTS_REG (0x0400) /* LSR IRQ status */
++#define MAX3107_SPCHR_IRQEN_REG (0x0500) /* Special char IRQ enable */
++#define MAX3107_SPCHR_IRQSTS_REG (0x0600) /* Special char IRQ status */
++#define MAX3107_STS_IRQEN_REG (0x0700) /* Status IRQ enable */
++#define MAX3107_STS_IRQSTS_REG (0x0800) /* Status IRQ status */
++#define MAX3107_MODE1_REG (0x0900) /* MODE1 */
++#define MAX3107_MODE2_REG (0x0a00) /* MODE2 */
++#define MAX3107_LCR_REG (0x0b00) /* LCR */
++#define MAX3107_RXTO_REG (0x0c00) /* RX timeout */
++#define MAX3107_HDPIXDELAY_REG (0x0d00) /* Auto transceiver delays */
++#define MAX3107_IRDA_REG (0x0e00) /* IRDA settings */
++#define MAX3107_FLOWLVL_REG (0x0f00) /* Flow control levels */
++#define MAX3107_FIFOTRIGLVL_REG (0x1000) /* FIFO IRQ trigger levels */
++#define MAX3107_TXFIFOLVL_REG (0x1100) /* TX FIFO level */
++#define MAX3107_RXFIFOLVL_REG (0x1200) /* RX FIFO level */
++#define MAX3107_FLOWCTRL_REG (0x1300) /* Flow control */
++#define MAX3107_XON1_REG (0x1400) /* XON1 character */
++#define MAX3107_XON2_REG (0x1500) /* XON2 character */
++#define MAX3107_XOFF1_REG (0x1600) /* XOFF1 character */
++#define MAX3107_XOFF2_REG (0x1700) /* XOFF2 character */
++#define MAX3107_GPIOCFG_REG (0x1800) /* GPIO config */
++#define MAX3107_GPIODATA_REG (0x1900) /* GPIO data */
++#define MAX3107_PLLCFG_REG (0x1a00) /* PLL config */
++#define MAX3107_BRGCFG_REG (0x1b00) /* Baud rate generator conf */
++#define MAX3107_BRGDIVLSB_REG (0x1c00) /* Baud rate divisor LSB */
++#define MAX3107_BRGDIVMSB_REG (0x1d00) /* Baud rate divisor MSB */
++#define MAX3107_CLKSRC_REG (0x1e00) /* Clock source */
++#define MAX3107_REVID_REG (0x1f00) /* Revision identification */
++
++/* IRQ register bits */
++#define MAX3107_IRQ_LSR_BIT (1 << 0) /* LSR interrupt */
++#define MAX3107_IRQ_SPCHR_BIT (1 << 1) /* Special char interrupt */
++#define MAX3107_IRQ_STS_BIT (1 << 2) /* Status interrupt */
++#define MAX3107_IRQ_RXFIFO_BIT (1 << 3) /* RX FIFO interrupt */
++#define MAX3107_IRQ_TXFIFO_BIT (1 << 4) /* TX FIFO interrupt */
++#define MAX3107_IRQ_TXEMPTY_BIT (1 << 5) /* TX FIFO empty interrupt */
++#define MAX3107_IRQ_RXEMPTY_BIT (1 << 6) /* RX FIFO empty interrupt */
++#define MAX3107_IRQ_CTS_BIT (1 << 7) /* CTS interrupt */
++
++/* LSR register bits */
++#define MAX3107_LSR_RXTO_BIT (1 << 0) /* RX timeout */
++#define MAX3107_LSR_RXOVR_BIT (1 << 1) /* RX overrun */
++#define MAX3107_LSR_RXPAR_BIT (1 << 2) /* RX parity error */
++#define MAX3107_LSR_FRERR_BIT (1 << 3) /* Frame error */
++#define MAX3107_LSR_RXBRK_BIT (1 << 4) /* RX break */
++#define MAX3107_LSR_RXNOISE_BIT (1 << 5) /* RX noise */
++#define MAX3107_LSR_UNDEF6_BIT (1 << 6) /* Undefined/not used */
++#define MAX3107_LSR_CTS_BIT (1 << 7) /* CTS pin state */
++
++/* Special character register bits */
++#define MAX3107_SPCHR_XON1_BIT (1 << 0) /* XON1 character */
++#define MAX3107_SPCHR_XON2_BIT (1 << 1) /* XON2 character */
++#define MAX3107_SPCHR_XOFF1_BIT (1 << 2) /* XOFF1 character */
++#define MAX3107_SPCHR_XOFF2_BIT (1 << 3) /* XOFF2 character */
++#define MAX3107_SPCHR_BREAK_BIT (1 << 4) /* RX break */
++#define MAX3107_SPCHR_MULTIDROP_BIT (1 << 5) /* 9-bit multidrop addr char */
++#define MAX3107_SPCHR_UNDEF6_BIT (1 << 6) /* Undefined/not used */
++#define MAX3107_SPCHR_UNDEF7_BIT (1 << 7) /* Undefined/not used */
++
++/* Status register bits */
++#define MAX3107_STS_GPIO0_BIT (1 << 0) /* GPIO 0 interrupt */
++#define MAX3107_STS_GPIO1_BIT (1 << 1) /* GPIO 1 interrupt */
++#define MAX3107_STS_GPIO2_BIT (1 << 2) /* GPIO 2 interrupt */
++#define MAX3107_STS_GPIO3_BIT (1 << 3) /* GPIO 3 interrupt */
++#define MAX3107_STS_UNDEF4_BIT (1 << 4) /* Undefined/not used */
++#define MAX3107_STS_CLKREADY_BIT (1 << 5) /* Clock ready */
++#define MAX3107_STS_SLEEP_BIT (1 << 6) /* Sleep interrupt */
++#define MAX3107_STS_UNDEF7_BIT (1 << 7) /* Undefined/not used */
++
++/* MODE1 register bits */
++#define MAX3107_MODE1_RXDIS_BIT (1 << 0) /* RX disable */
++#define MAX3107_MODE1_TXDIS_BIT (1 << 1) /* TX disable */
++#define MAX3107_MODE1_TXHIZ_BIT (1 << 2) /* TX pin three-state */
++#define MAX3107_MODE1_RTSHIZ_BIT (1 << 3) /* RTS pin three-state */
++#define MAX3107_MODE1_TRNSCVCTRL_BIT (1 << 4) /* Transceiver ctrl enable */
++#define MAX3107_MODE1_FORCESLEEP_BIT (1 << 5) /* Force sleep mode */
++#define MAX3107_MODE1_AUTOSLEEP_BIT (1 << 6) /* Auto sleep enable */
++#define MAX3107_MODE1_IRQSEL_BIT (1 << 7) /* IRQ pin enable */
++
++/* MODE2 register bits */
++#define MAX3107_MODE2_RST_BIT (1 << 0) /* Chip reset */
++#define MAX3107_MODE2_FIFORST_BIT (1 << 1) /* FIFO reset */
++#define MAX3107_MODE2_RXTRIGINV_BIT (1 << 2) /* RX FIFO INT invert */
++#define MAX3107_MODE2_RXEMPTINV_BIT (1 << 3) /* RX FIFO empty INT invert */
++#define MAX3107_MODE2_SPCHR_BIT (1 << 4) /* Special chr detect enable */
++#define MAX3107_MODE2_LOOPBACK_BIT (1 << 5) /* Internal loopback enable */
++#define MAX3107_MODE2_MULTIDROP_BIT (1 << 6) /* 9-bit multidrop enable */
++#define MAX3107_MODE2_ECHOSUPR_BIT (1 << 7) /* ECHO suppression enable */
++
++/* LCR register bits */
++#define MAX3107_LCR_LENGTH0_BIT (1 << 0) /* Word length bit 0 */
++#define MAX3107_LCR_LENGTH1_BIT (1 << 1) /* Word length bit 1
++ *
++ * Word length bits table:
++ * 00 -> 5 bit words
++ * 01 -> 6 bit words
++ * 10 -> 7 bit words
++ * 11 -> 8 bit words
++ */
++#define MAX3107_LCR_STOPLEN_BIT (1 << 2) /* STOP length bit
++ *
++ * STOP length bit table:
++ * 0 -> 1 stop bit
++ * 1 -> 1-1.5 stop bits if
++ * word length is 5,
++ * 2 stop bits otherwise
++ */
++#define MAX3107_LCR_PARITY_BIT (1 << 3) /* Parity bit enable */
++#define MAX3107_LCR_EVENPARITY_BIT (1 << 4) /* Even parity bit enable */
++#define MAX3107_LCR_FORCEPARITY_BIT (1 << 5) /* 9-bit multidrop parity */
++#define MAX3107_LCR_TXBREAK_BIT (1 << 6) /* TX break enable */
++#define MAX3107_LCR_RTS_BIT (1 << 7) /* RTS pin control */
++#define MAX3107_LCR_WORD_LEN_5 (0x0000)
++#define MAX3107_LCR_WORD_LEN_6 (0x0001)
++#define MAX3107_LCR_WORD_LEN_7 (0x0002)
++#define MAX3107_LCR_WORD_LEN_8 (0x0003)
++
++
++/* IRDA register bits */
++#define MAX3107_IRDA_IRDAEN_BIT (1 << 0) /* IRDA mode enable */
++#define MAX3107_IRDA_SIR_BIT (1 << 1) /* SIR mode enable */
++#define MAX3107_IRDA_SHORTIR_BIT (1 << 2) /* Short SIR mode enable */
++#define MAX3107_IRDA_MIR_BIT (1 << 3) /* MIR mode enable */
++#define MAX3107_IRDA_RXINV_BIT (1 << 4) /* RX logic inversion enable */
++#define MAX3107_IRDA_TXINV_BIT (1 << 5) /* TX logic inversion enable */
++#define MAX3107_IRDA_UNDEF6_BIT (1 << 6) /* Undefined/not used */
++#define MAX3107_IRDA_UNDEF7_BIT (1 << 7) /* Undefined/not used */
++
++/* Flow control trigger level register masks */
++#define MAX3107_FLOWLVL_HALT_MASK (0x000f) /* Flow control halt level */
++#define MAX3107_FLOWLVL_RES_MASK (0x00f0) /* Flow control resume level */
++#define MAX3107_FLOWLVL_HALT(words) ((words/8) & 0x000f)
++#define MAX3107_FLOWLVL_RES(words) (((words/8) & 0x000f) << 4)
++
++/* FIFO interrupt trigger level register masks */
++#define MAX3107_FIFOTRIGLVL_TX_MASK (0x000f) /* TX FIFO trigger level */
++#define MAX3107_FIFOTRIGLVL_RX_MASK (0x00f0) /* RX FIFO trigger level */
++#define MAX3107_FIFOTRIGLVL_TX(words) ((words/8) & 0x000f)
++#define MAX3107_FIFOTRIGLVL_RX(words) (((words/8) & 0x000f) << 4)
++
++/* Flow control register bits */
++#define MAX3107_FLOWCTRL_AUTORTS_BIT (1 << 0) /* Auto RTS flow ctrl enable */
++#define MAX3107_FLOWCTRL_AUTOCTS_BIT (1 << 1) /* Auto CTS flow ctrl enable */
++#define MAX3107_FLOWCTRL_GPIADDR_BIT (1 << 2) /* Enables that GPIO inputs
++ * are used in conjunction with
++ * XOFF2 for definition of
++ * special character */
++#define MAX3107_FLOWCTRL_SWFLOWEN_BIT (1 << 3) /* Auto SW flow ctrl enable */
++#define MAX3107_FLOWCTRL_SWFLOW0_BIT (1 << 4) /* SWFLOW bit 0 */
++#define MAX3107_FLOWCTRL_SWFLOW1_BIT (1 << 5) /* SWFLOW bit 1
++ *
++ * SWFLOW bits 1 & 0 table:
++ * 00 -> no transmitter flow
++ * control
++ * 01 -> receiver compares
++ * XON2 and XOFF2
++ * and controls
++ * transmitter
++ * 10 -> receiver compares
++ * XON1 and XOFF1
++ * and controls
++ * transmitter
++ * 11 -> receiver compares
++ * XON1, XON2, XOFF1 and
++ * XOFF2 and controls
++ * transmitter
++ */
++#define MAX3107_FLOWCTRL_SWFLOW2_BIT (1 << 6) /* SWFLOW bit 2 */
++#define MAX3107_FLOWCTRL_SWFLOW3_BIT (1 << 7) /* SWFLOW bit 3
++ *
++ * SWFLOW bits 3 & 2 table:
++ * 00 -> no received flow
++ * control
++ * 01 -> transmitter generates
++ * XON2 and XOFF2
++ * 10 -> transmitter generates
++ * XON1 and XOFF1
++ * 11 -> transmitter generates
++ * XON1, XON2, XOFF1 and
++ * XOFF2
++ */
++
++/* GPIO configuration register bits */
++#define MAX3107_GPIOCFG_GP0OUT_BIT (1 << 0) /* GPIO 0 output enable */
++#define MAX3107_GPIOCFG_GP1OUT_BIT (1 << 1) /* GPIO 1 output enable */
++#define MAX3107_GPIOCFG_GP2OUT_BIT (1 << 2) /* GPIO 2 output enable */
++#define MAX3107_GPIOCFG_GP3OUT_BIT (1 << 3) /* GPIO 3 output enable */
++#define MAX3107_GPIOCFG_GP0OD_BIT (1 << 4) /* GPIO 0 open-drain enable */
++#define MAX3107_GPIOCFG_GP1OD_BIT (1 << 5) /* GPIO 1 open-drain enable */
++#define MAX3107_GPIOCFG_GP2OD_BIT (1 << 6) /* GPIO 2 open-drain enable */
++#define MAX3107_GPIOCFG_GP3OD_BIT (1 << 7) /* GPIO 3 open-drain enable */
++
++/* GPIO DATA register bits */
++#define MAX3107_GPIODATA_GP0OUT_BIT (1 << 0) /* GPIO 0 output value */
++#define MAX3107_GPIODATA_GP1OUT_BIT (1 << 1) /* GPIO 1 output value */
++#define MAX3107_GPIODATA_GP2OUT_BIT (1 << 2) /* GPIO 2 output value */
++#define MAX3107_GPIODATA_GP3OUT_BIT (1 << 3) /* GPIO 3 output value */
++#define MAX3107_GPIODATA_GP0IN_BIT (1 << 4) /* GPIO 0 input value */
++#define MAX3107_GPIODATA_GP1IN_BIT (1 << 5) /* GPIO 1 input value */
++#define MAX3107_GPIODATA_GP2IN_BIT (1 << 6) /* GPIO 2 input value */
++#define MAX3107_GPIODATA_GP3IN_BIT (1 << 7) /* GPIO 3 input value */
++
++/* PLL configuration register masks */
++#define MAX3107_PLLCFG_PREDIV_MASK (0x003f) /* PLL predivision value */
++#define MAX3107_PLLCFG_PLLFACTOR_MASK (0x00c0) /* PLL multiplication factor */
++
++/* Baud rate generator configuration register masks and bits */
++#define MAX3107_BRGCFG_FRACT_MASK (0x000f) /* Fractional portion of
++ * Baud rate generator divisor
++ */
++#define MAX3107_BRGCFG_2XMODE_BIT (1 << 4) /* Double baud rate */
++#define MAX3107_BRGCFG_4XMODE_BIT (1 << 5) /* Quadruple baud rate */
++#define MAX3107_BRGCFG_UNDEF6_BIT (1 << 6) /* Undefined/not used */
++#define MAX3107_BRGCFG_UNDEF7_BIT (1 << 7) /* Undefined/not used */
++
++/* Clock source register bits */
++#define MAX3107_CLKSRC_INTOSC_BIT (1 << 0) /* Internal osc enable */
++#define MAX3107_CLKSRC_CRYST_BIT (1 << 1) /* Crystal osc enable */
++#define MAX3107_CLKSRC_PLL_BIT (1 << 2) /* PLL enable */
++#define MAX3107_CLKSRC_PLLBYP_BIT (1 << 3) /* PLL bypass */
++#define MAX3107_CLKSRC_EXTCLK_BIT (1 << 4) /* External clock enable */
++#define MAX3107_CLKSRC_UNDEF5_BIT (1 << 5) /* Undefined/not used */
++#define MAX3107_CLKSRC_UNDEF6_BIT (1 << 6) /* Undefined/not used */
++#define MAX3107_CLKSRC_CLK2RTS_BIT (1 << 7) /* Baud clk to RTS pin */
++
++
++/* HW definitions */
++#define MAX3107_RX_FIFO_SIZE 128
++#define MAX3107_TX_FIFO_SIZE 128
++#define MAX3107_REVID1 0x00a0
++#define MAX3107_REVID2 0x00a1
++
++
++/* Baud rate generator configuration values for external clock */
++#define MAX3107_BRG_B300 (0x0A9400 | 0x05)
++#define MAX3107_BRG_B600 (0x054A00 | 0x03)
++#define MAX3107_BRG_B1200 (0x02A500 | 0x01)
++#define MAX3107_BRG_B2400 (0x015200 | 0x09)
++#define MAX3107_BRG_B4800 (0x00A900 | 0x04)
++#define MAX3107_BRG_B9600 (0x005400 | 0x0A)
++#define MAX3107_BRG_B19200 (0x002A00 | 0x05)
++#define MAX3107_BRG_B38400 (0x001500 | 0x03)
++#define MAX3107_BRG_B57600 (0x000E00 | 0x02)
++#define MAX3107_BRG_B115200 (0x000700 | 0x01)
++#define MAX3107_BRG_B230400 (0x000300 | 0x08)
++#define MAX3107_BRG_B460800 (0x000100 | 0x0c)
++#define MAX3107_BRG_B921600 (0x000100 | 0x1c)
++
++/* Baud rate generator configuration values for internal clock */
++#define MAX3107_BRG_IB300 (0x008000 | 0x00)
++#define MAX3107_BRG_IB600 (0x004000 | 0x00)
++#define MAX3107_BRG_IB1200 (0x002000 | 0x00)
++#define MAX3107_BRG_IB2400 (0x001000 | 0x00)
++#define MAX3107_BRG_IB4800 (0x000800 | 0x00)
++#define MAX3107_BRG_IB9600 (0x000400 | 0x00)
++#define MAX3107_BRG_IB19200 (0x000200 | 0x00)
++#define MAX3107_BRG_IB38400 (0x000100 | 0x00)
++#define MAX3107_BRG_IB57600 (0x000000 | 0x0B)
++#define MAX3107_BRG_IB115200 (0x000000 | 0x05)
++#define MAX3107_BRG_IB230400 (0x000000 | 0x03)
++#define MAX3107_BRG_IB460800 (0x000000 | 0x00)
++#define MAX3107_BRG_IB921600 (0x000000 | 0x00)
++
++#endif /* _LINUX_SERIAL_MAX3107_H */
+Index: linux-2.6.33/include/drm/drm_mode.h
+===================================================================
+--- linux-2.6.33.orig/include/drm/drm_mode.h
++++ linux-2.6.33/include/drm/drm_mode.h
+@@ -160,9 +160,9 @@ struct drm_mode_get_encoder {
+ #define DRM_MODE_CONNECTOR_DisplayPort 10
+ #define DRM_MODE_CONNECTOR_HDMIA 11
+ #define DRM_MODE_CONNECTOR_HDMIB 12
+-#define DRM_MODE_CONNECTOR_TV 13
++#define DRM_MODE_CONNECTOR_TV 15
+ #define DRM_MODE_CONNECTOR_eDP 14
+-#define DRM_MODE_CONNECTOR_MIPI 15
++#define DRM_MODE_CONNECTOR_MIPI 13
+
+ struct drm_mode_get_connector {
+
+Index: linux-2.6.33/drivers/spi/hh2serial.c
+===================================================================
+--- linux-2.6.33.orig/drivers/spi/hh2serial.c
++++ linux-2.6.33/drivers/spi/hh2serial.c
+@@ -1,7 +1,17 @@
++/******************************************************************************
++
++ Copyright (c) 2009
++ Infineon Technologies AG
++ Am Campeon 1-12; 81726 Munich, Germany
++
++ For licensing information, see the file 'LICENSE' in the root folder of
++ this software module.
++
++******************************************************************************/
+ /*
+- * HH2 SPI Serial driver
++ * HH2 SPI Serial driver Version 0.2 Beta
+ *
+- * Copyright (C) 2009 Markus Burvall (Markus.Burvall@swedenconnectivity.com)
++ * Written by: 2009 Markus Burvall (Markus.Burvall@swedenconnectivity.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+@@ -10,15 +20,10 @@
+ */
+
+
+-#define DEBUG 1
+-
+-//#define HH2_TTY_ECHO
+-//#define HH2_TTY_SEND_POLL
+-//#define HH2_NO_SPI
+ #define HH2SERIAL_SPI_16BIT
+-//#define HH2SERIAL_ENABLE_DEBUG
+ #define HH2SERIAL_SPI_POLL
+
++/*#define HH2SERIAL_SHOW_ERRORS*/
+
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -66,6 +71,7 @@ struct hh2serial_dev {
+ atomic_t tty_need_read;
+ atomic_t spi_irq_pending;
+ int mthread_up;
++ int hhRxBufferBytes;
+ };
+
+ static const char driver_name[] = "hh2serial";
+@@ -89,13 +95,7 @@ static struct hh2serial_dev priv0;
+ #define GPSD_DREAD 0xC0 /* bit 7 and 6 */
+ #define GPSD_CRWRITE 0x00 /* All zero */
+
+-#ifdef HH2SERIAL_SPI_16BIT
+-/* HH2 DATA OPERATIONS */
+-#define GPSD_16BIT_SRREAD 0x8000 /* bit 7 */
+-#define GPSD_16BIT_DWRITE 0x4000 /* bit 6 */
+-#define GPSD_16BIT_DREAD 0xC000 /* bit 7 and 6 */
+-#define GPSD_16BIT_CRWRITE 0x0000 /* All zero */
+-#endif
++
+
+ /* HH2 STATUS REGISTER */
+ #define GPSS_TCNT 0x1F /* bits [4..0] */
+@@ -192,9 +192,7 @@ int hh2serial_spi_get_rx_len(struct hh2s
+ buf_ptr = x.rx_buf;
+
+ #ifdef HH2SERIAL_ENABLE_DEBUG
+- printk(KERN_INFO "hh2serial RD:%02X, %02X\n",
+- *buf_ptr,
+- buf_ptr[1]);
++ printk(KERN_INFO "hh2serial RD:%02X, %02X\n", *buf_ptr, buf_ptr[1]);
+ #endif
+
+ #ifndef HH2SERIAL_SPI_16BIT
+@@ -203,33 +201,56 @@ int hh2serial_spi_get_rx_len(struct hh2s
+ ret = *buf_ptr & GPSS_TCNT;
+
+ /* Check buffer overrun or underrun errors */
++#ifdef HH2SERIAL_SHOW_ERRORS
+ if (*buf_ptr & GPSS_TERR)
+ printk(KERN_INFO "hh2serial HH2 transmitter underrun!\n");
+
+ if (*buf_ptr & GPSS_RERR)
+ printk(KERN_INFO "hh2serial HH2 receiver overrun!\n");
+-
++#endif
++ if (*buf_ptr & GPSS_REMPTY)
++ {
++ hh2serial->hhRxBufferBytes = HH2SERIAL_SPI_MAX_BYTES;
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "hh2serial HH2 rx empty!\n");
++#endif
++ }
+ #else
+ /* 16 bit second byte is status register */
+ /* Available bytes */
+ ret = buf_ptr[1] & GPSS_TCNT;
+
+ /* Check buffer overrun or underrun errors */
++#ifdef HH2SERIAL_SHOW_ERRORS
+ if (buf_ptr[1] & GPSS_TERR)
+ printk(KERN_INFO "hh2serial HH2 transmitter underrun!\n");
+
+ if (buf_ptr[1] & GPSS_RERR)
+ printk(KERN_INFO "hh2serial HH2 receiver overrun!\n");
+ #endif
++
++ if (buf_ptr[1] & GPSS_REMPTY)
++ {
++ hh2serial->hhRxBufferBytes = HH2SERIAL_SPI_MAX_BYTES;
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "hh2serial HH2 rx empty!\n");
++#endif
++ }
++#endif
+ /* Take care of errors */
+ /* FIX ME */
+
+ #ifdef HH2SERIAL_ENABLE_DEBUG
+- printk(KERN_INFO "hh2serial SR:%02X, rx len %d\n",
+- buf_ptr[1],
+- ret);
++ printk(KERN_INFO "hh2serial SR:%02X, rx len %d\n", buf_ptr[1], ret);
+ #endif
+ }
++ else
++ {
++#ifdef HH2SERIAL_SHOW_ERRORS
++printk(KERN_INFO "hh2serial Rd_status, spi_sync failed: %d\n",ret);
++#endif
++ ret = 0;
++ }
+
+ kfree(local_buf);
+ return ret;
+@@ -332,11 +353,22 @@ int hh2serial_spi_read(struct hh2serial_
+ available_rd = *buf_ptr & GPSS_TCNT;
+
+ /* Check buffer overrun or underrun errors */
++#ifdef HH2SERIAL_SHOW_ERRORS
+ if (*buf_ptr & GPSS_TERR)
+ printk(KERN_INFO "hh2serial HH2 transmitter underrun!\n");
+
+ if (*buf_ptr & GPSS_RERR)
+ printk(KERN_INFO "hh2serial HH2 receiver overrun!\n");
++#endif
++
++ if (*buf_ptr & GPSS_REMPTY)
++ {
++ hh2serial->hhRxBufferBytes = HH2SERIAL_SPI_MAX_BYTES;
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "hh2serial HH2 rx empty!\n");
++#endif
++
++ }
+ #else
+ /* 16 bit second byte is status register */
+ /* Every other byte is status register */
+@@ -345,6 +377,7 @@ int hh2serial_spi_read(struct hh2serial_
+ available_rd = (buf_ptr[len_inc_hdr-1] & GPSS_TCNT) - 1;
+
+ /* Check buffer overrun or underrun errors */
++#ifdef HH2SERIAL_SHOW_ERRORS
+ if (buf_ptr[len_inc_hdr-1] & GPSS_TERR)
+ printk(KERN_INFO "hh2serial HH2 transmitter underrun!\n");
+
+@@ -352,6 +385,14 @@ int hh2serial_spi_read(struct hh2serial_
+ printk(KERN_INFO "hh2serial HH2 receiver overrun!\n");
+ #endif
+
++ if (buf_ptr[len_inc_hdr-1] & GPSS_REMPTY)
++ {
++ hh2serial->hhRxBufferBytes = HH2SERIAL_SPI_MAX_BYTES;
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "hh2serial HH2 rx empty!\n");
++#endif
++ }
++#endif
+
+ #ifdef HH2SERIAL_ENABLE_DEBUG
+ printk(KERN_INFO "hh2serial_spi_read len inc hdr wr:%d, avail rd %d, cs_change:%d\n",
+@@ -388,6 +429,13 @@ int hh2serial_spi_read(struct hh2serial_
+ #endif
+
+ }
++ else
++ {
++#ifdef HH2SERIAL_SHOW_ERRORS
++printk(KERN_INFO "hh2serial spi_read, spi_sync failed: %d\n",status);
++#endif
++
++ }
+
+ kfree(local_buf);
+ return status;
+@@ -435,8 +483,8 @@ int hh2serial_spi_write(struct hh2serial
+ x.len = len_inc_hdr;
+ spi_message_add_tail(&x, &message);
+
+- /* Allocate and make room for 1 byte header */
+- local_buf = kzalloc(HH2SERIAL_BUFSIZE+1, GFP_KERNEL);
++ /* Allocate and make room for 1 byte header(RX and TX) */
++ local_buf = kzalloc(HH2SERIAL_BUFSIZE+2, GFP_KERNEL);
+ if (!local_buf)
+ return -ENOMEM;
+
+@@ -453,7 +501,6 @@ int hh2serial_spi_write(struct hh2serial
+ int byte_index = 2;
+ while (byte_index < len_inc_hdr)
+ {
+-
+ local_buf[byte_index] = txbuf[byte_index];
+ local_buf[byte_index+1] = GPSD_DWRITE;
+ byte_index = byte_index + 2;
+@@ -495,24 +542,55 @@ int hh2serial_spi_write(struct hh2serial
+ available_rd = *buf_ptr & GPSS_TCNT;
+
+ /* Check buffer overrun or underrun errors */
++#ifdef HH2SERIAL_SHOW_ERRORS
+ if (*buf_ptr & GPSS_TERR)
+ printk(KERN_INFO "hh2serial HH2 transmitter underrun!\n");
+
+ if (*buf_ptr & GPSS_RERR)
+ printk(KERN_INFO "hh2serial HH2 receiver overrun!\n");
++#endif
++ if (*buf_ptr & GPSS_REMPTY)
++ {
++ /* Buffer was empty but len bytes has been written after that */
++ hh2serial->hhRxBufferBytes = HH2SERIAL_SPI_MAX_BYTES-len;
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "hh2serial HH2 rx empty!\n");
++#endif
++ }
++ else
++ {
++ hh2serial->hhRxBufferBytes -= len;
++ }
+ #else
++ /* FIXME_Only last status is interesting or? */
++ /* Might have to check every status register to se if empty */
+ /* 16 bit second byte is status register */
+ /* Available bytes */
+- available_rd = buf_ptr[1] & GPSS_TCNT;
++ available_rd = buf_ptr[len_inc_hdr-1] & GPSS_TCNT;
+
+ /* Check buffer overrun or underrun errors */
+- if (buf_ptr[1] & GPSS_TERR)
++#ifdef HH2SERIAL_SHOW_ERRORS
++ if (buf_ptr[len_inc_hdr-1] & GPSS_TERR)
+ printk(KERN_INFO "hh2serial HH2 transmitter underrun!\n");
+
+- if (buf_ptr[1] & GPSS_RERR)
++ if (buf_ptr[len_inc_hdr-1] & GPSS_RERR)
+ printk(KERN_INFO "hh2serial HH2 receiver overrun!\n");
+ #endif
+
++ if (buf_ptr[len_inc_hdr-1] & GPSS_REMPTY)
++ {
++ /* Buffer was empty but one byte has been written after that */
++ hh2serial->hhRxBufferBytes = HH2SERIAL_SPI_MAX_BYTES-1;
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "hh2serial HH2 rx empty!\n");
++#endif
++ }
++ else
++ {
++ /* Only 8 bit of every 16 is data */
++ hh2serial->hhRxBufferBytes -= (len/2);
++ }
++#endif
+
+ #ifdef HH2SERIAL_ENABLE_DEBUG
+ printk(KERN_INFO "hh2serial_spi_write:%02X, %02X\n",
+@@ -526,9 +604,14 @@ int hh2serial_spi_write(struct hh2serial
+
+ *spiAvailData = available_rd;
+
+-
+ }
++ else
++ {
++#ifdef HH2SERIAL_SHOW_ERRORS
++printk(KERN_INFO "hh2serial spi_write, spi_sync failed: %d\n",status);
++#endif
+
++ }
+
+
+ kfree(local_buf);
+@@ -616,61 +699,77 @@ static inline void hh2serial_write_circ_
+ #ifdef HH2SERIAL_ENABLE_DEBUG
+ printk(KERN_INFO "Bytes in circ buffer: %d\n", left);
+ #endif
+- while (left) {
+- /* MrB Change below to 1 and word length to 16 to write 16 bit
+- word by word */
+-#ifndef HH2SERIAL_SPI_16BIT
+- len = (left >= HH2SERIAL_SPI_MAX_BYTES) ? HH2SERIAL_SPI_MAX_BYTES : left;
+-#else
+- len = (left >= HH2SERIAL_SPI_MAX_BYTES) ? HH2SERIAL_SPI_MAX_BYTES : left;
+-#endif
+-
+- memset(obuf, 0, len);
+- memset(ibuf, 0, len);
+- for (i = 0; i < len; i++) {
++ while (left || (rxlen > 0)) {
+
+- obuf[i] = (u8)xmit->buf[xmit->tail];
+-
+- xmit->tail = (xmit->tail + 1) &
+- (UART_XMIT_SIZE - 1);
+- }
+-#ifndef HH2SERIAL_SPI_16BIT
++ if (left)
++ {
++ /* FIXME len = MIN(left , hhRxBufferBytes)
++ if len 0 is then only read status register and read */
++
++ len = (left >= priv->hhRxBufferBytes) ? priv->hhRxBufferBytes : left;
++
++
++ if (len > 0)
++ {
++ memset(obuf, 0, len);
++ memset(ibuf, 0, len);
++ for (i = 0; i < len; i++) {
++
++ obuf[i] = (u8)xmit->buf[xmit->tail];
++
++ xmit->tail = (xmit->tail + 1) &
++ (UART_XMIT_SIZE - 1);
++ }
++ #ifndef HH2SERIAL_SPI_16BIT
++
++ /* FIXME check status */
++ hh2serial_spi_write(priv, (u8 *)obuf,
++ &rxlen, len);
++
++ #else
++ /* len * 2 since 16 bits instead of 8 bits */
++ /* FIXME check status */
++ hh2serial_spi_write(priv, (u8 *)obuf,
++ &rxlen, len*2);
++
++ #endif
++ left -= len;
++ }
++ else /* Read rx len */
++ {
++ #ifdef HH2SERIAL_SHOW_ERRORS
++ printk(KERN_INFO "hh2serial wr buf2spi, rxBuf full?\n");
++ #endif
++ rxlen = hh2serial_spi_get_rx_len(priv);
+
+- hh2serial_spi_write(priv, (u8 *)obuf,
+- &rxlen, len);
+
+-#else
+- /* len * 2 since 16 bits instead of 8 bits */
+- hh2serial_spi_write(priv, (u8 *)obuf,
+- &rxlen, len*2);
+-
+-#endif
+- left -= len;
+- }
++ }
++ }
+ #ifdef HH2SERIAL_ENABLE_DEBUG
+ printk(KERN_INFO "hh2serial: Bytes avail to read: %d\n", rxlen);
+ #endif
+ /* Read if available bytes */
+ /* FIXME: Could add a maximum read loop here */
+- while (rxlen > 0)
+- {
+-
+- len = rxlen;
+-#ifndef HH2SERIAL_SPI_16BIT
+- hh2serial_spi_read(priv, (u8 *)ibuf, &rxlen, len);
+-#else
+- hh2serial_spi_read(priv, (u8 *)ibuf, &rxlen, len*2);
+-#endif
+-
+- for (i = 0, j = 0; i < len; i++) {
+- valid_str[j++] = (u8)(ibuf[i]);
+- }
++ if (rxlen > 0)
++ {
+
+- if (j)
+- hh2serial_write2tty(priv, valid_str, j);
+-
+- priv->port.icount.tx += len;
+- }
++ len = rxlen;
++ #ifndef HH2SERIAL_SPI_16BIT
++ hh2serial_spi_read(priv, (u8 *)ibuf, &rxlen, len);
++ #else
++ hh2serial_spi_read(priv, (u8 *)ibuf, &rxlen, len*2);
++ #endif
++
++ for (i = 0, j = 0; i < len; i++) {
++ valid_str[j++] = (u8)(ibuf[i]);
++ }
++
++ if (j)
++ hh2serial_write2tty(priv, valid_str, j);
++
++ priv->port.icount.tx += len;
++ }
++ }
+ }
+ }
+ #endif
+@@ -793,7 +892,7 @@ static int hh2serial_main_thread(void *_
+ /* Read from tty send to spi */
+ #ifdef HH2SERIAL_ENABLE_DEBUG
+ printk(KERN_INFO "hh2serial: Read from tty send to spi\n");
+-#endif
++#endif
+ /* Read from tty send to spi */
+ /* Receive data from spi send to UART */
+
+@@ -1153,11 +1252,13 @@ static int hh2serial_startup(struct uart
+ struct hh2serial_dev *priv = container_of(port, struct hh2serial_dev, port);
+ FUNC_ENTER();
+
++ /* Initialize RxBuffer to 0 */
++ priv->hhRxBufferBytes = 0;
+ #ifdef HH2SERIAL_SPI_POLL
+ priv->poll_thread = kthread_run(hh2serial_poll_thread,
+ priv, "hh2serial_poll");
+ if (IS_ERR(priv->poll_thread)) {
+- printk(KERN_INFO "hh2serial Failed to start poll thread: %ld",
++ printk(KERN_INFO "hh2serial Failed to start poll thread: %ld",
+ PTR_ERR(priv->poll_thread));
+ }
+ #endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-analog-accelerometer-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-analog-accelerometer-driver.patch
new file mode 100644
index 0000000..c8264db
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-analog-accelerometer-driver.patch
@@ -0,0 +1,462 @@
+Index: linux-2.6.33/drivers/hwmon/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/hwmon/Kconfig
++++ linux-2.6.33/drivers/hwmon/Kconfig
+@@ -63,6 +63,13 @@ config SENSORS_EMC1403
+ Threshold values can be configured using sysfs.
+ Data from the different diode are accessible via sysfs.
+
++config SENSORS_MRST_ANALOG_ACCEL
++ tristate "Moorestown Analog Accelerometer"
++ depends on LNW_IPC
++ help
++ If you say yes here you get support for the Analog Accelerometer Devices
++ x y Z data can be accessed via sysfs.
++
+ config HWMON_DEBUG_CHIP
+ bool "Hardware Monitoring Chip debugging messages"
+ default n
+Index: linux-2.6.33/drivers/hwmon/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/hwmon/Makefile
++++ linux-2.6.33/drivers/hwmon/Makefile
+@@ -103,6 +103,7 @@ obj-$(CONFIG_SENSORS_ISL29020) += isl290
+ obj-$(CONFIG_SENSORS_HMC6352) += hmc6352.o
+ obj-$(CONFIG_SENSORS_LIS331DL) += lis331dl.o
+ obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o
++obj-$(CONFIG_SENSORS_MRST_ANALOG_ACCEL) += mrst_analog_accel.o
+
+ ifeq ($(CONFIG_HWMON_DEBUG_CHIP),y)
+ EXTRA_CFLAGS += -DDEBUG
+Index: linux-2.6.33/drivers/hwmon/mrst_analog_accel.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/hwmon/mrst_analog_accel.c
+@@ -0,0 +1,381 @@
++/*
++ * mrst_analog_accel.c - Intel analog accelerometer driver for Moorestown
++ *
++ * Copyright (C) 2009 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/i2c.h>
++#include <linux/hwmon.h>
++#include <linux/hwmon-sysfs.h>
++#include <linux/hwmon-vid.h>
++#include <linux/err.h>
++#include <linux/delay.h>
++#include <asm/ipc_defs.h>
++
++MODULE_AUTHOR("Ramesh Agarwal");
++MODULE_DESCRIPTION("Intel Moorestown Analog Accelerometer Driver");
++MODULE_LICENSE("GPL v2");
++
++/* PMIC ADC INTERRUPT REGISTERS */
++#define PMIC_ADC_ACC_REG_ADCINT 0x5F /*ADC interrupt register */
++#define PMIC_ADC_ACC_REG_MADCINT 0x60 /*ADC interrupt mask register */
++
++/* PMIC ADC CONTROL REGISTERS */
++#define PMIC_ADC_ACC_REG_ADCCNTL1 0x61 /*ADC control register */
++#define PMIC_ADC_ACC_REG_ADCCNTL2 0x62 /*ADC gain regs channel 10-17 */
++#define PMIC_ADC_ACC_REG_ADCCNTL3 0x63 /*ADC gain regs channel 18-21 */
++
++/* PMIC Data Register base */
++#define PMIC_ADC_DATA_REG_BASE 0x64
++
++/* PMIC Channel Mapping Register base */
++#define PMIC_ADC_MAPPING_BASE 0xA4
++
++/* Number of PMIC sample registers */
++#define PMIC_ADC_REG_MAX 32 /* Max no of available channel */
++
++#define PMIC_ADC_X_REG_HIGH(index) (PMIC_ADC_DATA_REG_BASE \
++ + (index * 2))
++#define PMIC_ADC_X_REG_LOW(index) (PMIC_ADC_DATA_REG_BASE \
++ + (index * 2) + 1)
++#define PMIC_ADC_Y_REG_HIGH(index) (PMIC_ADC_DATA_REG_BASE \
++ + (index * 2) + 2)
++#define PMIC_ADC_Y_REG_LOW(index) (PMIC_ADC_DATA_REG_BASE \
++ + (index * 2) + 3)
++#define PMIC_ADC_Z_REG_HIGH(index) (PMIC_ADC_DATA_REG_BASE \
++ + (index * 2) + 4)
++#define PMIC_ADC_Z_REG_LOW(index) (PMIC_ADC_DATA_REG_BASE \
++ + (index * 2) + 5)
++
++/* Number of registers to read at a time */
++#define REG_READ_PER_IPC 4 /* Read 4 at a time although the */
++ /* IPC will support max 5 */
++
++#define END_OF_CHANNEL_VALUE 0x1F /* Used to indicate the last */
++ /* channel being used */
++
++/* PMIC ADC channels for Accelero Meter */
++#define PMIC_ADC_ACC_ADC_ACC_CH14 0xE
++#define PMIC_ADC_ACC_ADC_ACC_CH15 0xF
++#define PMIC_ADC_ACC_ADC_ACC_CH16 0x10
++
++static unsigned int mrst_analog_reg_idx;
++
++/* Use IPC to read the value of the register and display
++ * X value
++ */
++static ssize_t
++mrst_analog_accel_x_axis_data_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ unsigned int ret_val;
++ struct ipc_pmic_reg_data ipc_data;
++
++ ipc_data.ioc = FALSE; /* No need to generate MSI */
++ ipc_data.num_entries = 2;
++ ipc_data.pmic_reg_data[0].register_address =
++ PMIC_ADC_X_REG_HIGH(mrst_analog_reg_idx); /* X Higher 8 bits */
++ ipc_data.pmic_reg_data[1].register_address =
++ PMIC_ADC_X_REG_LOW(mrst_analog_reg_idx); /* X lower 3 bits */
++ if (ipc_pmic_register_read(&ipc_data) != 0) {
++ printk(KERN_ALERT
++ "\nmrst_analog_accel:PMIC reg read using IPC failed\n");
++ return -1;
++ }
++ ret_val = ipc_data.pmic_reg_data[0].value << 3; /* X higher 8 bits */
++ /* lower 3 bits */
++ ret_val = ret_val | (ipc_data.pmic_reg_data[1].value & 0x07);
++ return sprintf(buf, "%d\n", ret_val);
++}
++
++/* Use IPC to read the value of the register and display
++ * Y value */
++static ssize_t
++mrst_analog_accel_y_axis_data_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ unsigned int ret_val;
++ struct ipc_pmic_reg_data ipc_data;
++
++ ipc_data.ioc = FALSE; /* No need to generate MSI */
++ ipc_data.num_entries = 2;
++ ipc_data.pmic_reg_data[0].register_address =
++ PMIC_ADC_Y_REG_HIGH(mrst_analog_reg_idx); /* Y higher 8 bits */
++ ipc_data.pmic_reg_data[1].register_address =
++ PMIC_ADC_Y_REG_LOW(mrst_analog_reg_idx); /* Y lower 3 bits */
++ if (ipc_pmic_register_read(&ipc_data) != 0) {
++ printk(KERN_ALERT
++ "\nmrst_analog_accel:PMIC reg read using IPC failed\n");
++ return -1;
++ }
++ ret_val = ipc_data.pmic_reg_data[0].value << 3; /* Y higher 8 bits */
++ /* Y lower 3 bits */
++ ret_val = ret_val | (ipc_data.pmic_reg_data[1].value & 0x07);
++ return sprintf(buf, "%d\n", ret_val);
++}
++
++/* Use IPC to read the value of the register and display
++ * Z value */
++static ssize_t
++mrst_analog_accel_z_axis_data_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ unsigned int ret_val;
++ struct ipc_pmic_reg_data ipc_data;
++
++ ipc_data.ioc = FALSE; /* No need to generate MSI */
++ ipc_data.num_entries = 2;
++ ipc_data.pmic_reg_data[0].register_address =
++ PMIC_ADC_Z_REG_HIGH(mrst_analog_reg_idx);
++ ipc_data.pmic_reg_data[1].register_address =
++ PMIC_ADC_Z_REG_LOW(mrst_analog_reg_idx); /* Z lower 3 bits */
++ if (ipc_pmic_register_read(&ipc_data) != 0) {
++ printk(KERN_ALERT
++ "\nmrst_analog_accel:PMIC reg read using IPC failed\n");
++ return -1;
++ }
++ ret_val = ipc_data.pmic_reg_data[0].value << 3; /* Z higher 8 bits */
++ /* Z lower 3 bits */
++ ret_val = ret_val | (ipc_data.pmic_reg_data[1].value & 0x07);
++ return sprintf(buf, "%d\n", ret_val);
++}
++
++
++static DEVICE_ATTR(acc_x_axis, S_IRUGO,
++ mrst_analog_accel_x_axis_data_show, NULL);
++static DEVICE_ATTR(acc_y_axis, S_IRUGO,
++ mrst_analog_accel_y_axis_data_show, NULL);
++static DEVICE_ATTR(acc_z_axis, S_IRUGO,
++ mrst_analog_accel_z_axis_data_show, NULL);
++
++static struct attribute *mid_att_acc[] = {
++ &dev_attr_acc_x_axis.attr,
++ &dev_attr_acc_y_axis.attr,
++ &dev_attr_acc_z_axis.attr,
++ NULL
++};
++
++static struct attribute_group m_analog_gr = {
++ .name = "mrst_analog_accel",
++ .attrs = mid_att_acc
++};
++
++static int
++mrst_analog_accel_initialize(void)
++{
++ struct ipc_pmic_mod_reg_data ipc_mod_data;
++ struct ipc_pmic_reg_data ipc_data;
++ u8 retval = 0;
++ u8 mad_cntrl = 0; /* MADCINT register value */
++ u8 adc_cntrl2 = 0; /* ADCCNTL2 register value */
++ int i, j;
++
++ /* Initialize the register index to use to be zero */
++ mrst_analog_reg_idx = 0;
++
++ /* check if the ADC is enabled or not
++ * Read ADCCNTL1 registers */
++ ipc_data.ioc = FALSE; /* No need to generate MSI */
++ ipc_data.num_entries = 1;
++ ipc_data.pmic_reg_data[0].register_address =
++ PMIC_ADC_ACC_REG_ADCCNTL1;
++ ipc_data.pmic_reg_data[0].value = 0;
++
++ retval = ipc_pmic_register_read(&ipc_data);
++ if (retval != 0) {
++ printk(KERN_ALERT
++ "\nmrst_analog_accel:PMIC register read failed\n");
++ return retval;
++ }
++
++ adc_cntrl2 = ipc_data.pmic_reg_data[0].value;
++
++ if ((adc_cntrl2 >> 7) & 0x1) {
++ /* If the ADC is enabled find the set of registers to use
++ ** Loop through the channel mapping register to find out the
++ ** first free one
++ */
++ for (i = 0;
++ (i < PMIC_ADC_REG_MAX) && (mrst_analog_reg_idx == 0);
++ i += REG_READ_PER_IPC) {
++
++ ipc_data.num_entries = REG_READ_PER_IPC;
++ ipc_data.ioc = FALSE; /* No need to generate MSI */
++
++ /* Reading 4 regs at a time instead of reading each
++ * reg one by one since IPC is an expensive operation
++ */
++ for (j = 0; j < REG_READ_PER_IPC; j++) {
++ ipc_data.pmic_reg_data[j].register_address =
++ PMIC_ADC_MAPPING_BASE + i + j;
++ ipc_data.pmic_reg_data[j].value = 0;
++ }
++ retval = ipc_pmic_register_read(&ipc_data);
++ if (retval != 0) {
++ printk(KERN_ALERT
++ "\nmrst_analog_accel:PMIC regs read failed\n");
++ return retval;
++ }
++ for (j = 0; j < REG_READ_PER_IPC; j++) {
++ if (ipc_data.pmic_reg_data[j].value
++ == END_OF_CHANNEL_VALUE) {
++ mrst_analog_reg_idx = i + j;
++ break;
++ }
++ }
++ }
++ }
++ /* Check to see if there are enough registers to map the channel */
++ if ((mrst_analog_reg_idx + 3) >= PMIC_ADC_REG_MAX) {
++ printk(KERN_ALERT
++ "\nmrst_analog_accel:Not enough regs to map the channels\n");
++ return -1;
++ }
++
++ /* Update the mapping registers for the accelerometer*/
++ ipc_data.num_entries = 4;
++ ipc_data.ioc = FALSE; /* No need to generate MSI */
++ ipc_data.pmic_reg_data[0].register_address =
++ PMIC_ADC_MAPPING_BASE + mrst_analog_reg_idx;
++ ipc_data.pmic_reg_data[0].value = PMIC_ADC_ACC_ADC_ACC_CH14;
++
++ ipc_data.pmic_reg_data[1].register_address =
++ PMIC_ADC_MAPPING_BASE + mrst_analog_reg_idx + 1;
++ ipc_data.pmic_reg_data[1].value = PMIC_ADC_ACC_ADC_ACC_CH15;
++
++ ipc_data.pmic_reg_data[2].register_address =
++ PMIC_ADC_MAPPING_BASE + mrst_analog_reg_idx + 2;
++ ipc_data.pmic_reg_data[2].value = PMIC_ADC_ACC_ADC_ACC_CH16;
++
++ ipc_data.pmic_reg_data[3].register_address =
++ PMIC_ADC_MAPPING_BASE + mrst_analog_reg_idx + 3 ;
++ ipc_data.pmic_reg_data[3].value = END_OF_CHANNEL_VALUE;
++
++ retval = ipc_pmic_register_write(&ipc_data, FALSE);
++ if (retval != 0) {
++ printk(KERN_ALERT
++ "\nmrst_analog_accel:PMIC reg write failed\n");
++ return retval;
++ }
++
++ /* If the ADC was not enabled, enable it now */
++ if (!(adc_cntrl2 >> 7) & 0x1) {
++ /* Mask the round robin completion interrupt */
++ ipc_mod_data.ioc = FALSE; /* No need to generate MSI */
++ ipc_mod_data.num_entries = 1;
++ mad_cntrl = 0x01;
++ ipc_mod_data.pmic_mod_reg_data[0].register_address =
++ PMIC_ADC_ACC_REG_MADCINT;
++ ipc_mod_data.pmic_mod_reg_data[0].value = mad_cntrl;
++ ipc_mod_data.pmic_mod_reg_data[0].bit_map = 0x01;
++
++ retval = ipc_pmic_register_read_modify(&ipc_mod_data);
++ if (retval != 0) {
++ printk(KERN_ALERT
++ "\nmrst_analog_accel:PMIC reg modify failed\n");
++ return retval;
++ }
++
++ adc_cntrl2 = 0xc6; /*27ms delay,start round robin,
++ enable full power */
++ ipc_data.ioc = FALSE; /* No need to generate MSI */
++ ipc_data.num_entries = 1;
++ ipc_data.pmic_reg_data[0].register_address =
++ PMIC_ADC_ACC_REG_ADCCNTL1;
++ ipc_data.pmic_reg_data[0].value = adc_cntrl2;
++ retval = ipc_pmic_register_write(&ipc_data, FALSE);
++ if (retval != 0)
++ return retval;
++ }
++ return retval;
++}
++
++static struct platform_device *mrst_analog_accel_pdev;
++static struct device *mrst_analog_accel_hwmon;
++
++static int
++mrst_analog_accel_unregister(void)
++{
++
++ printk(KERN_ALERT "\nStart Exit\n\n");
++ sysfs_remove_group(&mrst_analog_accel_hwmon->kobj, &m_analog_gr);
++ hwmon_device_unregister(mrst_analog_accel_hwmon);
++ platform_device_unregister(mrst_analog_accel_pdev);
++ printk(KERN_ALERT "\n\nEnd Exit\n");
++ return 0;
++}
++
++
++static int __init
++mrst_analog_accel_module_init(void)
++{
++ int retval = 0;
++
++ mrst_analog_accel_pdev =
++ platform_device_register_simple("mrst_analog_accel",
++ 0, NULL, 0);
++ if (IS_ERR(mrst_analog_accel_pdev)) {
++ retval = PTR_ERR(mrst_analog_accel_pdev);
++ printk(KERN_ALERT
++ "\nmrst_analog_accel:Registration with the platform failed\n");
++ goto accelero_reg_failed;
++ }
++ printk(KERN_ALERT
++ "\nmrst_analog_accel:Registered with the platform\n");
++
++ retval = mrst_analog_accel_initialize();
++ if (retval == 0) {
++ retval = sysfs_create_group(&mrst_analog_accel_pdev->dev.kobj,
++ &m_analog_gr);
++ if (retval) {
++ printk(KERN_ALERT
++ "\nmrst_analog_accel:device_create_file 1 failed\n");
++ goto accelero_reg_failed;
++ }
++ mrst_analog_accel_hwmon =
++ hwmon_device_register(&mrst_analog_accel_pdev->dev);
++ if (IS_ERR(mrst_analog_accel_hwmon)) {
++ retval = PTR_ERR(mrst_analog_accel_hwmon);
++ mrst_analog_accel_hwmon = NULL;
++ printk(KERN_ALERT
++ "\nmrst_analog_accel:Registration with hwmon failed\n");
++ }
++ } else {
++ printk(KERN_ALERT
++ "\nmrst_analog_accel:Initialization failed: %d\n", retval);
++ }
++
++accelero_reg_failed:
++ return retval;
++}
++
++static void __exit
++mrst_analog_accel_module_exit(void)
++{
++
++ mrst_analog_accel_unregister();
++}
++
++module_init(mrst_analog_accel_module_init);
++module_exit(mrst_analog_accel_module_exit);
+Index: linux-2.6.33/drivers/hwmon/lis331dl.c
+===================================================================
+--- linux-2.6.33.orig/drivers/hwmon/lis331dl.c
++++ linux-2.6.33/drivers/hwmon/lis331dl.c
+@@ -186,33 +186,10 @@ invarg:
+ return -EINVAL;
+ }
+
+-static ssize_t reboot_mem_store(struct device *dev,
+- struct device_attribute *attr, const char *buf, size_t count)
+-{
+- struct i2c_client *client = to_i2c_client(dev);
+- struct acclero_data *data = i2c_get_clientdata(client);
+- unsigned int ret_val, set_val;
+- unsigned long val;
+-
+- if (strict_strtoul(buf, 10, &val))
+- return -EINVAL;
+- ret_val = i2c_smbus_read_byte_data(client, 0x21);
+- if (val == ACCEL_MEMORY_REBOOT) {
+- mutex_lock(&data->update_lock);
+- set_val = (ret_val | (1 << 6)); /* setting the 6th bit */
+- i2c_write_current_data(client, 0x21, set_val);
+- mutex_unlock(&data->update_lock);
+- } else
+- return -EINVAL;
+- return count;
+-}
+-
+ static DEVICE_ATTR(data_rate, S_IRUGO | S_IWUSR,
+ data_rate_show, data_rate_store);
+ static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
+ power_mode_show, power_mode_store);
+-static DEVICE_ATTR(reboot_mem, S_IWUSR, NULL,
+- reboot_mem_store);
+ static DEVICE_ATTR(x, S_IRUGO, x_pos_show, NULL);
+ static DEVICE_ATTR(y, S_IRUGO, y_pos_show, NULL);
+ static DEVICE_ATTR(z, S_IRUGO, z_pos_show, NULL);
+@@ -221,7 +198,6 @@ static DEVICE_ATTR(curr_pos, S_IRUGO, xy
+ static struct attribute *mid_att_acclero[] = {
+ &dev_attr_data_rate.attr,
+ &dev_attr_power_state.attr,
+- &dev_attr_reboot_mem.attr,
+ &dev_attr_x.attr,
+ &dev_attr_y.attr,
+ &dev_attr_z.attr,
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-1-8.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-1-8.patch
new file mode 100644
index 0000000..795bf1a
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-1-8.patch
@@ -0,0 +1,1433 @@
+From c3cccfca9868aaf6e67a77c46a859a18d6384492 Mon Sep 17 00:00:00 2001
+From: R, Dharageswari <dharageswari.r@intel.com>
+Date: Thu, 29 Apr 2010 20:14:06 +0530
+Subject: [PATCH] ADR-Post-Beta-0.05.002.03-1/8-Adding Moorestown Audio Drivers - SST driver
+
+This patch is the first patch in the series of eight patches which add up SST
+driver and MAD driver. The SST driver is a driver for the SST DSP engine.This
+patch adds the SST driver main file intel_sst.c which contains the init, exit,
+probe, interrupt routine, as well as PCI suspend and resume implementations.
+intel_sst_dsp.c file implements the SST FW initialization as well as FW and
+library download steps.This patch also contains the intel_lpe.h header file
+which is placed in include folder for MAD driver (ALSA Sound card driver for
+Moorestown given in patch 7/8) to use. This file contains the definition of
+interfaces exposed by SST drivers along with definition of all the controls
+for the sound card to be used by MAD Driver
+
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+
+ new file: include/sound/intel_lpe.h
+ new file: sound/pci/sst/intel_sst.c
+ new file: sound/pci/sst/intel_sst_dsp.c
+Patch-mainline: 2.6.35?
+---
+ include/sound/intel_lpe.h | 148 +++++++++
+ sound/pci/sst/intel_sst.c | 527 ++++++++++++++++++++++++++++++
+ sound/pci/sst/intel_sst_dsp.c | 706 +++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 1381 insertions(+), 0 deletions(-)
+ create mode 100644 include/sound/intel_lpe.h
+ create mode 100644 sound/pci/sst/intel_sst.c
+ create mode 100644 sound/pci/sst/intel_sst_dsp.c
+
+diff --git a/include/sound/intel_lpe.h b/include/sound/intel_lpe.h
+new file mode 100644
+index 0000000..70e7a1f
+--- /dev/null
++++ b/include/sound/intel_lpe.h
+@@ -0,0 +1,148 @@
++#ifndef __INTEL_SST_H__
++#define __INTEL_SST_H__
++/*
++ * intel_lpe.h - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corporation
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This driver exposes the audio engine functionalities to the ALSA
++ * and middleware.
++ * This file is shared between the SST and MAD drivers
++ */
++
++#define SST_CARD_NAMES "intel_mid_card"
++
++/* control list Pmic & Lpe */
++/* Input controls */
++enum port_status {
++ ACTIVATE = 1,
++ DEACTIVATE,
++};
++
++/* Card states */
++enum sst_card_states {
++ SND_CARD_UN_INIT = 0,
++ SND_CARD_INIT_DONE,
++};
++
++enum sst_controls {
++ SST_SND_ALLOC = 0x1000,
++ SST_SND_PAUSE = 0x1001,
++ SST_SND_RESUME = 0x1002,
++ SST_SND_DROP = 0x1003,
++ SST_SND_FREE = 0x1004,
++ SST_SND_BUFFER_POINTER = 0x1005,
++ SST_SND_STREAM_INIT = 0x1006,
++ SST_SND_START = 0x1007,
++ SST_SND_STREAM_PROCESS = 0x1008,
++ SST_MAX_CONTROLS = 0x1008,
++ SST_CONTROL_BASE = 0x1000,
++ SST_ENABLE_RX_TIME_SLOT = 0x1009,
++
++};
++
++struct pcm_stream_info {
++ int str_id;
++ void *mad_substream;
++ void (*period_elapsed) (void *mad_substream);
++ unsigned long long buffer_ptr;
++ int sfreq;
++};
++
++struct stream_buffer {
++ unsigned long addr;
++ int length;
++};
++
++static inline long sst_get_time(void)
++{
++ struct timeval t;
++ do_gettimeofday(&t);
++ return t.tv_usec;
++}
++
++#ifndef CONFIG_SND_AUDIO_DBG_PRINT
++#define printk(format, arg...) do {} while (0);
++#endif
++
++struct snd_pmic_ops {
++ int card_status;
++ int num_channel;
++ int input_dev_id;
++ int mute_status;
++ int output_dev_id;
++ int (*set_input_dev) (u8 value);
++ int (*set_output_dev) (u8 value);
++
++ int (*set_mute) (int dev_id, u8 value);
++ int (*get_mute) (int dev_id, u8 *value);
++
++ int (*set_vol) (int dev_id, u8 value);
++ int (*get_vol) (int dev_id, u8 *value);
++
++ int (*init_card) (void);
++ int (*set_pcm_audio_params) (int sfreq, int word_size ,int num_channel);
++ int (*set_pcm_voice_params) (void);
++ int (*set_voice_port) (int status);
++ int (*set_audio_port) (int status);
++
++ int (*power_up_pmic_pb) (unsigned int port);
++ int (*power_up_pmic_cp) (unsigned int port);
++ int (*power_down_pmic_pb) (void);
++ int (*power_down_pmic_cp) (void);
++ int (*power_down_pmic) (void);
++};
++
++struct intel_sst_card_ops {
++ char *module_name;
++ int vendor_id;
++ int (*control_set) (int control_element, void *value);
++ int (*send_buffer) (int str_id, struct stream_buffer *mad_buf);
++ struct snd_pmic_ops *scard_ops;
++};
++
++/* periphral interrupt interface */
++enum lpe_periphral {
++ LPE_DMA = 1,
++ LPE_SSP0,
++ LPE_SSP1
++};
++
++/* modified for generic access */
++struct sc_reg_access {
++ u16 reg_addr;
++ u8 value;
++ u8 mask;
++};
++enum sc_reg_access_type {
++ PMIC_READ = 0,
++ PMIC_WRITE,
++ PMIC_READ_MODIFY,
++};
++
++int register_sst_card(struct intel_sst_card_ops *card);
++void unregister_sst_card(struct intel_sst_card_ops *card);
++int lpe_mask_periphral_intr(enum lpe_periphral device);
++int lpe_unmask_periphral_intr(enum lpe_periphral device);
++int lpe_periphral_intr_status(enum lpe_periphral device, int *status);
++#endif /* __INTEL_SST_H__ */
+diff --git a/sound/pci/sst/intel_sst.c b/sound/pci/sst/intel_sst.c
+new file mode 100644
+index 0000000..c6e68b8
+--- /dev/null
++++ b/sound/pci/sst/intel_sst.c
+@@ -0,0 +1,527 @@
++/*
++ * intel_sst.c - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This driver exposes the audio engine functionalities to the ALSA
++ * and middleware.
++ *
++ * This file contains all init functions
++ */
++
++#include <linux/cdev.h>
++#include <linux/pci.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/syscalls.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/interrupt.h>
++#include <linux/list.h>
++#include <linux/workqueue.h>
++#include <linux/firmware.h>
++#include <linux/mutex.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#ifdef CONFIG_MSTWN_POWER_MGMT
++#include <linux/intel_mid.h>
++#endif
++#include <sound/intel_lpe.h>
++#include <sound/intel_sst_ioctl.h>
++#include "intel_sst_fw_ipc.h"
++#include "intel_sst_common.h"
++
++
++MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
++MODULE_DESCRIPTION("Intel (R) Moorestown Audio Engine Driver");
++MODULE_LICENSE("GPL v2");
++MODULE_VERSION(SST_DRIVER_VERSION);
++
++struct intel_sst_drv *sst_drv_ctx;
++
++/* fops Routines */
++static const struct file_operations intel_sst_fops = {
++ .owner = THIS_MODULE,
++ .open = intel_sst_open,
++ .release = intel_sst_release,
++ .read = intel_sst_read,
++ .write = intel_sst_write,
++ .ioctl = intel_sst_ioctl,
++ .mmap = intel_sst_mmap,
++ .aio_read = intel_sst_aio_read,
++ .aio_write = intel_sst_aio_write,
++};
++
++spinlock_t pe_slock;
++unsigned int pe_inprogress = 0;
++
++/**
++* intel_sst_interrupt - Interrupt service routine for SST
++* @irq: irq number of interrupt
++* @dev_id: pointer to device structre
++*
++* This function is called by OS when SST device raises
++* an interrupt. This will be result of write in IPC register
++* Source can be busy or done interrupt
++*/
++static irqreturn_t intel_sst_interrupt(int irq, void *context)
++{
++ union interrupt_reg isr;
++ union ipc_header header;
++ union interrupt_reg imr;
++ struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
++ unsigned int size = 0;
++ int str_id;
++ struct stream_info *stream ;
++
++
++ /* Interrupt arrived, check src */
++ isr.full = readl(drv->shim + SST_ISRX);
++
++ if (isr.part.busy_interrupt) {
++ header.full = readl(drv->shim + SST_IPCD);
++ if (header.part.large)
++ size = header.part.data;
++ if (header.part.msg_id & REPLY_MSG) {
++ sst_drv_ctx->ipc_process_msg.header = header;
++
++ if (header.part.msg_id == IPC_SST_PERIOD_ELAPSED) {
++ sst_clear_interrupt();
++
++ /*spin_lock(&pe_slock);*/
++ if (pe_inprogress == 1) {
++ /*spin_unlock(&pe_slock);*/
++ return IRQ_HANDLED;
++ }
++
++ pe_inprogress = 1;
++ //spin_unlock(&pe_slock);
++
++ str_id = header.part.str_id;
++ stream = &sst_drv_ctx->streams[str_id];
++ if (stream->period_elapsed)
++ stream->period_elapsed(stream->pcm_substream);
++ //spin_lock(&pe_slock);
++ pe_inprogress = 0;
++ //spin_unlock(&pe_slock);
++ return IRQ_HANDLED;
++ } else {
++ memcpy_fromio(sst_drv_ctx->ipc_process_msg.mailbox,
++ drv->mailbox + SST_MAILBOX_RCV, size);
++ queue_work(sst_drv_ctx->process_msg_wq,
++ &sst_drv_ctx->ipc_process_msg.wq);
++ }
++ } else {
++ sst_drv_ctx->ipc_process_reply.header = header;
++ memcpy_fromio(sst_drv_ctx->ipc_process_reply.mailbox,
++ drv->mailbox + SST_MAILBOX_RCV, size);
++ queue_work(sst_drv_ctx->process_reply_wq,
++ &sst_drv_ctx->ipc_process_reply.wq);
++ }
++ /* mask busy inetrrupt */
++ imr.full = readl(drv->shim + SST_IMRX);
++ imr.part.busy_interrupt = 1;
++ /* dummy register for shim workaround */
++ writel(imr.full, sst_drv_ctx->shim + SST_ISRD);
++ writel(imr.full, drv->shim + SST_IMRX);
++ return IRQ_HANDLED;
++ } else if (isr.part.done_interrupt) {
++ /* Clear done bit */
++ header.full = readl(drv->shim + SST_IPCX);
++ header.part.done = 0;
++ /* dummy register for shim workaround */
++ writel(header.full, sst_drv_ctx->shim + SST_ISRD);
++ writel(header.full, drv->shim + SST_IPCX);
++ /* write 1 to clear status register */;
++ isr.part.done_interrupt = 1;
++ /* dummy register for shim workaround */
++ writel(isr.full, sst_drv_ctx->shim + SST_ISRD);
++ writel(isr.full, drv->shim + SST_ISRX);
++ queue_work(sst_drv_ctx->post_msg_wq,
++ &sst_drv_ctx->ipc_post_msg.wq);
++ return IRQ_HANDLED;
++ } else
++ return IRQ_NONE;
++
++}
++
++
++/* PCI Routines */
++
++static struct pci_device_id intel_sst_ids[] = {
++ { 0x8086, 0x080A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
++ { 0, }
++};
++MODULE_DEVICE_TABLE(pci, intel_sst_ids);
++
++
++/*
++* intel_sst_probe - PCI probe function
++* @pci: PCI device structure
++* @pci_id: PCI device ID structure
++*
++* This function is called by OS when a device is found
++* This enables the device, interrupt etc
++*/
++static int __devinit intel_sst_probe(struct pci_dev *pci,
++ const struct pci_device_id *pci_id)
++{
++ int i, ret = 0;
++ static struct mutex drv_ctx_lock;
++ mutex_init(&drv_ctx_lock);
++
++ mutex_lock(&drv_ctx_lock);
++ if (sst_drv_ctx) {
++ printk(KERN_ERR
++ "SST ERR: Only one sst handle is supported\n");
++ mutex_unlock(&drv_ctx_lock);
++ return -EBUSY;
++ }
++
++ sst_drv_ctx = kzalloc(sizeof(*sst_drv_ctx), GFP_KERNEL);
++ if (!sst_drv_ctx) {
++ printk(KERN_ERR
++ "SST ERR: intel_sst malloc fail\n");
++ mutex_unlock(&drv_ctx_lock);
++ return -ENOMEM;
++ }
++ mutex_unlock(&drv_ctx_lock);
++
++ mutex_init(&sst_drv_ctx->stream_cnt_lock);
++ sst_drv_ctx->pmic_state = SND_MAD_UN_INIT;
++
++ mutex_lock(&sst_drv_ctx->stream_cnt_lock);
++ sst_drv_ctx->stream_cnt = 0;
++ sst_drv_ctx->encoded_cnt = 0;
++ sst_drv_ctx->am_cnt = 0;
++ mutex_unlock(&sst_drv_ctx->stream_cnt_lock);
++ sst_drv_ctx->pb_streams = 0;
++ sst_drv_ctx->cp_streams = 0;
++ sst_drv_ctx->unique_id = 0;
++ sst_drv_ctx->pmic_port_instance = SST_DEFAULT_PMIC_PORT;
++
++ INIT_LIST_HEAD(&sst_drv_ctx->ipc_dispatch_list);
++ INIT_WORK(&sst_drv_ctx->ipc_post_msg.wq, sst_post_message);
++ INIT_WORK(&sst_drv_ctx->ipc_process_msg.wq, sst_process_message);
++ INIT_WORK(&sst_drv_ctx->ipc_process_reply.wq, sst_process_reply);
++ INIT_WORK(&sst_drv_ctx->mad_ops.wq, sst_process_mad_ops);
++ init_waitqueue_head(&sst_drv_ctx->wait_queue);
++
++ sst_drv_ctx->mad_wq = create_workqueue("sst_mad_wq");
++ if (!sst_drv_ctx->mad_wq)
++ goto do_free_drv_ctx;
++ sst_drv_ctx->post_msg_wq = create_workqueue("sst_post_msg_wq");
++ if (!sst_drv_ctx->post_msg_wq)
++ goto free_mad_wq;
++ sst_drv_ctx->process_msg_wq = create_workqueue("sst_process_msg_wqq");
++ if (!sst_drv_ctx->process_msg_wq)
++ goto free_post_msg_wq;
++ sst_drv_ctx->process_reply_wq = create_workqueue("sst_proces_reply_wq");
++ if (!sst_drv_ctx->process_reply_wq)
++ goto free_process_msg_wq;
++
++ for (i = 0; i < MAX_ACTIVE_STREAM; i++) {
++ sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
++ sst_drv_ctx->alloc_block[i].ops_block.condition = false;
++ }
++ mutex_init(&sst_drv_ctx->list_lock);
++
++ for (i = 1; i < MAX_NUM_STREAMS; i++) {
++ struct stream_info *stream = &sst_drv_ctx->streams[i];
++ INIT_LIST_HEAD(&stream->bufs);
++ mutex_init(&stream->lock);
++ spin_lock_init(&stream->pcm_lock);
++ }
++ sst_drv_ctx->mmap_mem = NULL;
++ sst_drv_ctx->mmap_len = SST_MMAP_PAGES * PAGE_SIZE;
++ while (sst_drv_ctx->mmap_len > 0) {
++ sst_drv_ctx->mmap_mem =
++ kzalloc(sst_drv_ctx->mmap_len, GFP_KERNEL);
++ if (sst_drv_ctx->mmap_mem) {
++ printk(KERN_DEBUG "SST DBG:Got memory %p size 0x%x \n",
++ sst_drv_ctx->mmap_mem,
++ sst_drv_ctx->mmap_len);
++ break;
++ }
++ sst_drv_ctx->mmap_len -= (SST_MMAP_STEP * PAGE_SIZE);
++ if (sst_drv_ctx->mmap_len <= 0) {
++ printk(KERN_ERR
++ "SST ERR: Couldnt get any +\
++ mem...abort!!\n");
++ ret = -ENOMEM;
++ goto free_process_reply_wq;
++ }
++ printk(KERN_DEBUG "SST DBG:Failed...trying %d\n", \
++ sst_drv_ctx->mmap_len);
++ }
++
++ /* Init the device */
++ ret = pci_enable_device(pci);
++ if (ret) {
++ printk(KERN_ERR
++ "SST ERR: device cant be enabled\n");
++ goto do_free_mem;
++ }
++ sst_drv_ctx->pci = pci_dev_get(pci);
++ ret = pci_request_regions(pci, SST_DRV_NAME);
++ if (ret)
++ goto do_disable_device;
++ /* map registers */
++ /* SST Shim */
++ sst_drv_ctx->shim_phy_add =
++ (unsigned long) pci_resource_start(pci, 1);
++ sst_drv_ctx->shim = pci_ioremap_bar(pci, 1);
++ if (!sst_drv_ctx->shim)
++ goto do_release_regions;
++ printk(KERN_DEBUG "SST DBG:SST Shim Ptr %p \n", sst_drv_ctx->shim);
++
++ /* Shared SRAM */
++ sst_drv_ctx->mailbox = pci_ioremap_bar(pci, 2);
++ if (!sst_drv_ctx->mailbox)
++ goto do_unmap_shim;
++ printk(KERN_DEBUG "SST DBG:SRAM Ptr %p \n", sst_drv_ctx->mailbox);
++
++ /* IRAM */
++ sst_drv_ctx->iram = pci_ioremap_bar(pci, 3);
++ if (!sst_drv_ctx->iram)
++ goto do_unmap_sram;
++ printk(KERN_DEBUG "SST DBG:IRAM Ptr %p \n", sst_drv_ctx->iram);
++
++ /* DRAM */
++ sst_drv_ctx->dram = pci_ioremap_bar(pci, 4);
++ if (!sst_drv_ctx->dram)
++ goto do_unmap_iram;
++ printk(KERN_DEBUG "SST DBG:DRAM Ptr %p \n", sst_drv_ctx->dram);
++
++ sst_drv_ctx->sst_state = SST_UN_INIT;
++ /* Register the ISR */
++ ret = request_irq(pci->irq, intel_sst_interrupt,
++ IRQF_SHARED, SST_DRV_NAME, sst_drv_ctx);
++ if (ret)
++ goto do_unmap_dram;
++ printk(KERN_DEBUG "SST DBG:Registered IRQ 0x%x\n", pci->irq);
++
++ /* Register with /dev */
++ ret = register_chrdev(INTEL_SST_MAJOR, SST_DRV_NAME, &intel_sst_fops);
++ if (ret) {
++ printk(KERN_ERR
++ "SST ERR: couldn't register +\
++ device number\n");
++ goto do_free_irq;
++ }
++
++ sst_drv_ctx->lpe_stalled = 0;
++ printk(KERN_DEBUG "SST DBG:...successfully done!!!\n");
++ return ret;
++
++do_free_irq:
++ free_irq(pci->irq, sst_drv_ctx);
++do_unmap_dram:
++ iounmap(sst_drv_ctx->dram);
++do_unmap_iram:
++ iounmap(sst_drv_ctx->iram);
++do_unmap_sram:
++ iounmap(sst_drv_ctx->mailbox);
++do_unmap_shim:
++ iounmap(sst_drv_ctx->shim);
++do_release_regions:
++ pci_release_regions(pci);
++do_disable_device:
++ pci_disable_device(pci);
++do_free_mem:
++ kfree(sst_drv_ctx->mmap_mem);
++free_process_reply_wq:
++ destroy_workqueue(sst_drv_ctx->process_reply_wq);
++free_process_msg_wq:
++ destroy_workqueue(sst_drv_ctx->process_msg_wq);
++free_post_msg_wq:
++ destroy_workqueue(sst_drv_ctx->post_msg_wq);
++free_mad_wq:
++ destroy_workqueue(sst_drv_ctx->mad_wq);
++do_free_drv_ctx:
++ kfree(sst_drv_ctx);
++ printk(KERN_ERR
++ "SST ERR: Probe failed with 0x%x \n", ret);
++ return ret;
++}
++
++/**
++* intel_sst_remove - PCI remove function
++* @pci: PCI device structure
++*
++* This function is called by OS when a device is unloaded
++* This frees the interrupt etc
++*/
++static void __devexit intel_sst_remove(struct pci_dev *pci)
++{
++ pci_dev_put(sst_drv_ctx->pci);
++ sst_drv_ctx->sst_state = SST_UN_INIT;
++ unregister_chrdev(INTEL_SST_MAJOR, SST_DRV_NAME);
++ free_irq(pci->irq, sst_drv_ctx);
++ iounmap(sst_drv_ctx->dram);
++ iounmap(sst_drv_ctx->iram);
++ iounmap(sst_drv_ctx->mailbox);
++ iounmap(sst_drv_ctx->shim);
++ sst_drv_ctx->pmic_state = SND_MAD_UN_INIT;
++ kfree(sst_drv_ctx->mmap_mem);
++ destroy_workqueue(sst_drv_ctx->process_reply_wq);
++ destroy_workqueue(sst_drv_ctx->process_msg_wq);
++ destroy_workqueue(sst_drv_ctx->post_msg_wq);
++ destroy_workqueue(sst_drv_ctx->mad_wq);
++ kfree(sst_drv_ctx);
++ pci_release_region(pci, 2);
++ pci_release_region(pci, 3);
++ pci_release_region(pci, 4);
++ pci_release_region(pci, 5);
++ pci_set_drvdata(pci, NULL);
++}
++
++/* Power Management */
++
++/**
++* intel_sst_suspend - PCI suspend function
++* @pci: PCI device structure
++* @state: PM message
++*
++* This function is called by OS when a power event occurs
++*/
++static int intel_sst_suspend(struct pci_dev *pci, pm_message_t state)
++{
++ int i;
++ printk(KERN_DEBUG "SST DBG:intel_sst_suspend called\n");
++
++ /* Pause all running streams */
++ for (i = 1; i < MAX_NUM_STREAMS; i++) {
++ if (sst_drv_ctx->streams[i].status == STREAM_RUNNING) {
++ sst_drv_ctx->active_streams[i] = true;
++ sst_pause_stream(i);
++ } else
++ sst_drv_ctx->active_streams[i] = false;
++ }
++
++ pci_set_drvdata(pci, sst_drv_ctx);
++
++ /* Send msg to FW FIXME */
++
++ /* Disable everything */
++ /* free_irq(pci->irq, sst_drv_ctx); */
++ pci_disable_device(pci);
++ pci_save_state(pci);
++ pci_set_power_state(pci, pci_choose_state(pci, state));
++ return 0;
++}
++
++/**
++* intel_sst_resume - PCI resume function
++* @pci: PCI device structure
++* @state: PM message
++*
++* This function is called by OS when a power event occurs
++*/
++static int intel_sst_resume(struct pci_dev *pci)
++{
++ int i;
++
++ printk(KERN_DEBUG "SST DBG:\nintel_sst_resume called\n");
++ sst_drv_ctx = pci_get_drvdata(pci);
++ if (pci->irq)
++ printk(KERN_DEBUG "SST DBG:irq %d \n", pci->irq);
++
++ pci_set_power_state(pci, PCI_D0);
++ pci_restore_state(pci);
++
++ /* ret = request_irq(pci->irq, intel_sst_interrupt,
++ IRQF_SHARED, "intel_sst_engine", sst_drv_ctx);
++ if (ret) {
++ return ret;
++ } */
++
++ /* Send msg to FW FIXME */
++
++ /* Start all paused streams */
++ for (i = 1; i < MAX_NUM_STREAMS; i++) {
++ if (sst_drv_ctx->active_streams[i] == true)
++ sst_resume_stream(i);
++ }
++ return 0;
++}
++
++
++static struct pci_driver driver = {
++ .name = SST_DRV_NAME,
++ .id_table = intel_sst_ids,
++ .probe = intel_sst_probe,
++ .remove = __devexit_p(intel_sst_remove),
++ .suspend = intel_sst_suspend,
++ .resume = intel_sst_resume,
++};
++
++/**
++* intel_sst_init - Module init function
++*
++* Registers with PCI
++* Registers with /dev
++*Init all data strutures
++*/
++static int __init intel_sst_init(void)
++{
++ /* Init all variables, data structure etc....*/
++ int ret = 0;
++ printk(KERN_ERR
++ "INFO: ******** SST DRIVER +\
++ loading.. Ver: %s\n", SST_DRIVER_VERSION);
++
++ /* Register with PCI */
++ ret = pci_register_driver(&driver);
++ if (ret)
++ printk(KERN_ERR
++ "SST ERR: PCI register failed \n");
++ sst_spi_mode_enable();
++ return ret;
++}
++
++/**
++* intel_sst_exit - Module exit function
++*
++* Unregisters with PCI
++* Unregisters with /dev
++* Frees all data strutures
++*/
++static void __exit intel_sst_exit(void)
++{
++ int i;
++
++ for (i = 0; i < MAX_NUM_STREAMS; i++)
++ sst_free_stream(i);
++
++ /* Send msg to FW TBD */
++ pci_unregister_driver(&driver);
++
++ flush_scheduled_work();
++ printk(KERN_DEBUG "SST DBG:...unloaded\n");
++ return;
++}
++
++module_init(intel_sst_init);
++module_exit(intel_sst_exit);
+diff --git a/sound/pci/sst/intel_sst_dsp.c b/sound/pci/sst/intel_sst_dsp.c
+new file mode 100644
+index 0000000..bc78918
+--- /dev/null
++++ b/sound/pci/sst/intel_sst_dsp.c
+@@ -0,0 +1,706 @@
++/*
++ * intel_sst_dsp.h - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This driver exposes the audio engine functionalities to the ALSA
++ * and middleware.
++ *
++ * This file contains all dsp controlling functions like firmware download,
++ * setting/resetting dsp cores, etc
++ */
++#include <linux/cdev.h>
++#include <linux/pci.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/syscalls.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/interrupt.h>
++#include <linux/list.h>
++#include <linux/workqueue.h>
++#include <linux/firmware.h>
++#include <linux/mutex.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <asm/ipc_defs.h>
++#include <sound/intel_lpe.h>
++#include <sound/intel_sst_ioctl.h>
++#include "intel_sst_fw_ipc.h"
++#include "intel_sst_common.h"
++
++
++MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
++MODULE_DESCRIPTION("Intel (R) Moorestown Audio Engine Driver");
++MODULE_LICENSE("GPL v2");
++MODULE_VERSION(SST_DRIVER_VERSION);
++
++/**
++* this function writes registers through SCU IPC
++*/
++static int sst_scu_ipc_write(u32 addr, u32 value)
++{
++ int retval = 0, retry = 3;
++ struct ipc_reg_data ipc_reg = {0};
++
++ ipc_reg.address = addr;
++ ipc_reg.data = value;
++ ipc_reg.ioc = 1;
++
++ while (retry) {
++ retval = mrst_ipc_write32(&ipc_reg);
++ if (!retval)
++ break;
++ retry--;
++ /* error */
++ printk(KERN_ERR "SST ERR: IPC +\
++ write failed %x\n", retval);
++ }
++ return retval;
++}
++
++/**
++* this function reads registers through SCU IPC
++*/
++static int sst_scu_ipc_read(u32 addr, u32 *value)
++{
++ int retval = 0, retry = 3;
++ struct ipc_reg_data ipc_reg = {0};
++
++ ipc_reg.address = addr;
++ ipc_reg.data = 0;
++ ipc_reg.ioc = 1;
++
++ while (retry) {
++ retval = mrst_ipc_read32(&ipc_reg);
++ if (!retval)
++ break;
++ retry--;
++ printk(KERN_ERR
++ "SST ERR: IPC read failed %x\n ", retval);
++ }
++ *value = ipc_reg.data;
++ printk(KERN_DEBUG "SST DBG:The read value +\
++ from the mrst_ipc is ::0x%08x\n", *value);
++ return retval;
++}
++/**
++* Resetting SST DSP
++*/
++static int intel_sst_reset_dsp(void)
++{
++ union config_status_reg csr;
++ int retval;
++ unsigned int value = 0;
++
++ retval = sst_scu_ipc_read(CHIP_REV_REG, &value);
++ if (retval)
++ return retval;
++
++#if 0
++ /* A2-CHANGES */
++ if (((value & CHIP_REV_ADDR) >> 3) == CHIP_REV_A1) {
++ sst_drv_ctx->chip_rev_id = CHIP_A1_50;
++ /* SCU FW Changes*/
++ retval = sst_scu_ipc_write(AUD_SHIM_BASE_ADDR,
++ AUD_SHIM_RATIO_1_1);
++ } else {
++ if (DSP_CLOCK_SPEED == CLK_100MHZ) {
++ sst_drv_ctx->chip_rev_id = CHIP_A2_100;
++ /* SCU FW Changes*/
++ retval = sst_scu_ipc_write(AUD_SHIM_BASE_ADDR,
++ AUD_SHIM_RATIO);
++ } else if (DSP_CLOCK_SPEED == CLK_50MHZ) {
++ sst_drv_ctx->chip_rev_id = CHIP_A2_50;
++ /* SCU FW Changes*/
++
++ retval = sst_scu_ipc_write(AUD_SHIM_BASE_ADDR,
++ AUD_SHIM_RATIO_1_1);
++ } else {
++ printk(KERN_ERR "SST ERR: +\
++ Invalid clock speed\n ");
++ return -EIO;
++ }
++ } /*else {
++ printk(KERN_ERR
++ "SST ERR: Invalid chip revision Type\n");
++ return -EIO;
++ }*/
++#endif
++ /* to fix SPI driver bug, which sets to 1 */
++ csr.full = 0x3a2;
++ writel(csr.full, sst_drv_ctx->shim + SST_ISRX);
++ writel(csr.full, sst_drv_ctx->shim + SST_CSR);
++ /* ----------- */
++
++ csr.full = readl(sst_drv_ctx->shim + SST_CSR);
++ csr.part.strb_cntr_rst = 0;
++ csr.part.run_stall = 0x1;
++ csr.part.bypass = 0x7;
++ csr.part.sst_reset = 0x1;
++ writel(csr.full, sst_drv_ctx->shim + SST_ISRX);
++ writel(csr.full, sst_drv_ctx->shim + SST_CSR);
++
++ printk(KERN_DEBUG "SST DBG:Chip version +\
++ is:: %d\n", value);
++ return retval;
++}
++
++/**
++* Start the SST DSP processor
++*/
++static int sst_start(void)
++{
++ union config_status_reg csr;
++
++ csr.full = readl(sst_drv_ctx->shim + SST_CSR);
++ csr.part.bypass = 0;
++ writel(csr.full, sst_drv_ctx->shim + SST_ISRX);
++ writel(csr.full, sst_drv_ctx->shim + SST_CSR);
++
++#if 0
++ retval = sst_scu_ipc_write(
++ sst_drv_ctx->shim_phy_add + SST_CSR, csr.full);
++ if (retval != 0)
++ printk(KERN_ERR
++ "SST ERR: scu ipc write start failed %d ", retval);
++ csr.full = readl(sst_drv_ctx->shim + SST_CSR);
++ csr.part.sst_reset = 0;
++ csr.part.run_stall = 0;
++ if ((sst_drv_ctx->chip_rev_id == CHIP_A2_50) ||
++ (sst_drv_ctx->chip_rev_id == CHIP_A2_100)) {
++ csr.part.strb_cntr_rst = 1;
++ if (sst_drv_ctx->chip_rev_id == CHIP_A2_100)
++ csr.part.sst_clk = 1;
++ }
++#endif
++ csr.part.run_stall = 0;
++ csr.part.sst_reset = 0;
++ csr.part.strb_cntr_rst = 1;
++ printk(KERN_DEBUG "SST DBG:Setting SST to execute 0x%x \n", csr.full);
++ writel(csr.full, sst_drv_ctx->shim + SST_ISRX);
++ writel(csr.full, sst_drv_ctx->shim + SST_CSR);
++
++#if 0
++ return sst_scu_ipc_write(
++ sst_drv_ctx->shim_phy_add + SST_CSR, csr.full);
++#endif
++ return 0;
++}
++
++/**
++* Parse modules that need to be placed in SST IRAM and DRAM
++*/
++static int sst_parse_module(struct fw_module_header *module)
++{
++ struct dma_block_info *block;
++ u32 count;
++ void __iomem *ram;
++
++ printk(KERN_DEBUG "SST DBG:module sign=%s sz=0x%x blks=0x%x typ=0x%x ep=0x%x sz=0x%x\n",
++ module->signature, module->mod_size,
++ module->blocks, module->type,
++ module->entry_point, sizeof(*module));
++
++ block = (void *)module + sizeof(*module);
++
++ for (count = 0; count < module->blocks; count++) {
++ if (block->size <= 0) {
++ printk(KERN_ERR "SST ERR: +\
++ block size invalid\n ");
++ return -EINVAL;
++ }
++ switch (block->type) {
++ case SST_IRAM:
++ ram = sst_drv_ctx->iram;
++ break;
++ case SST_DRAM:
++ ram = sst_drv_ctx->dram;
++ break;
++ default:
++ printk(KERN_ERR
++ "SST ERR:wrng ram typ0x%x +\
++ inblock0x%x\n", block->type, count);
++ return -EINVAL;
++ }
++ memcpy_toio(ram + block->ram_offset,
++ (void *)block + sizeof(*block), block->size);
++ block = (void *)block + sizeof(*block) + block->size;
++ }
++ return 0;
++}
++
++/**
++* sst_parse_fw_image - FW parse and load
++* This function is called to parse and download the FW image
++*/
++static int sst_parse_fw_image(const struct firmware *sst_fw)
++{
++ struct fw_header *header;
++ u32 count;
++ int ret_val;
++ struct fw_module_header *module;
++
++ BUG_ON(!sst_fw);
++
++ /* Read the header information from the data pointer */
++ header = (struct fw_header *)sst_fw->data;
++
++ /* verify FW */
++ if ((strncmp(header->signature, SST_FW_SIGN, 4) != 0) ||
++ (sst_fw->size != header->file_size + sizeof(*header))) {
++ /* Invalid FW signature */
++ printk(KERN_ERR
++ "SST ERR: InvalidFW sgn/filesiz mismtch\n ");
++ return -EINVAL;
++ }
++ printk(KERN_DEBUG "SST DBG:header sign=%s size=0x%x modules=0x%x fmt=0x%x size=0x%x\n",
++ header->signature, header->file_size, header->modules,
++ header->file_format, sizeof(*header));
++ module = (void *)sst_fw->data + sizeof(*header);
++ for (count = 0; count < header->modules; count++) {
++ /* module */
++ ret_val = sst_parse_module(module);
++ if (ret_val)
++ return ret_val;
++ module = (void *)module + sizeof(*module) + module->mod_size ;
++ }
++
++ printk(KERN_DEBUG "SST DBG:done....\n");
++ return 0;
++}
++
++/**
++* sst_load_fw - function to reset FW
++* @fw: Pointer to loaded FW
++* This function is called when the FW is loaded
++*/
++int sst_load_fw(const struct firmware *fw, void *context)
++{
++ int ret_val;
++
++ printk(KERN_DEBUG "SST DBG:called \n");
++ BUG_ON(!fw);
++
++ /* TBD: Checksum, tamper check etc */
++ ret_val = intel_sst_reset_dsp();
++ if (ret_val)
++ return ret_val;
++ /* putting the sst state to init */
++ sst_drv_ctx->sst_state = SST_UN_INIT;
++
++ ret_val = sst_parse_fw_image(fw);
++ if (ret_val)
++ return ret_val;
++
++ sst_drv_ctx->sst_state = SST_FW_LOADED;
++ /* 7. ask scu to reset the bypass bits */
++ /* 8.bring sst out of reset */
++ ret_val = sst_start();
++ if (ret_val)
++ return ret_val;
++
++ printk(KERN_DEBUG "SST DBG:...successful!!!\n");
++ return ret_val;
++}
++
++/**
++* This function is called when any codec/post processing library
++* needs to be downloaded
++*/
++static int sst_download_library(const struct firmware *fw_lib,
++ struct snd_sst_lib_download_info *lib)
++{
++ /* send IPC message and wait */
++ int i;
++ u8 pvt_id;
++ struct ipc_post *msg = NULL;
++ union config_status_reg csr;
++ struct snd_sst_str_type str_type = {0};
++ int retval = 0;
++
++ if (sst_create_large_msg(&msg))
++ return -ENOMEM;
++
++ pvt_id = sst_assign_pvt_id(sst_drv_ctx);
++ i = sst_get_block_stream(sst_drv_ctx);
++ printk(KERN_DEBUG "SST DBG:alloc block +\
++ allocated = %d, pvt_id %d\n", i, pvt_id);
++ if (i < 0) {
++ kfree(msg);
++ return -ENOMEM;
++ }
++ sst_drv_ctx->alloc_block[i].sst_id = pvt_id;
++ sst_fill_header(&msg->header, IPC_IA_PREP_LIB_DNLD, 1, 0);
++ msg->header.part.data = sizeof(u32) + sizeof(str_type);
++ str_type.codec_type = lib->dload_lib.lib_info.lib_type;
++ str_type.pvt_id = pvt_id;
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32), &str_type, sizeof(str_type));
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[i]);
++ if (retval) {
++ /* error */
++ sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
++ printk(KERN_ERR
++ "SST ERR: Prep codec downloaded failed %d\n", retval);
++ return -EIO;
++ }
++ printk(KERN_DEBUG "SST DBG:FW responded, ready for download now...\n");
++ /* downloading on success */
++ sst_drv_ctx->sst_state = SST_FW_LOADED;
++ csr.full = readl(sst_drv_ctx->shim + SST_CSR);
++ printk(KERN_DEBUG "SST DBG:CSR reg 0x%x \n", csr.full);
++ csr.part.run_stall = 1;
++ printk(KERN_DEBUG "SST DBG:HALT CSR reg setting to 0x%x \n", csr.full);
++ writel(csr.full, sst_drv_ctx->shim + SST_ISRX);
++ writel(csr.full, sst_drv_ctx->shim + SST_CSR);
++#if 0
++ retval = sst_scu_ipc_write(
++ sst_drv_ctx->shim_phy_add + SST_CSR, csr.full);
++ if (retval) {
++ /* error */
++ sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
++ printk(KERN_ERR
++ "SST ERR: IPC failed to Halt SST 0x%x\n", retval);
++ return -EAGAIN;
++ }
++#endif
++ csr.full = readl(sst_drv_ctx->shim + SST_CSR);
++ csr.part.bypass = 0x7;
++ printk(KERN_DEBUG "SST DBG:Bypass CSR reg +\
++ setting to 0x%x \n", csr.full);
++ writel(csr.full, sst_drv_ctx->shim + SST_ISRX);
++ writel(csr.full, sst_drv_ctx->shim + SST_CSR);
++#if 0
++ retval = sst_scu_ipc_write(
++ sst_drv_ctx->shim_phy_add + SST_CSR, csr.full);
++ if (retval) {
++ /* error */
++ sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
++ printk(KERN_ERR
++ "SST ERR: IPC failed to Bypass SST 0x%x\n", retval);
++ csr.part.bypass = 0x0;
++ /* bring LPE out of run stall */
++ /* send error mesages to FW- TBD FIXME */
++ csr.part.run_stall = 0x0;
++ printk(KERN_DEBUG "SST DBG:Bypass CSR reg +\
++ setting to 0x%x \n", csr.full);
++ retval = sst_scu_ipc_write(sst_drv_ctx->shim_phy_add + SST_CSR,
++ csr.full);
++ if (retval) {
++ /* prepare to download firmware again
++ for the next time TBD FIXME
++ sst_drv_ctx->sst_state = SST_UN_INIT;*/
++ }
++ return -EAGAIN;
++ }
++#endif
++ sst_parse_fw_image(fw_lib);
++
++ /* set the FW to running again */
++ csr.full = readl(sst_drv_ctx->shim + SST_CSR);
++ csr.part.bypass = 0x0;
++ printk(KERN_DEBUG "SST DBG:Bypass CSR reg +\
++ setting to 0x%x \n", csr.full);
++ writel(csr.full, sst_drv_ctx->shim + SST_ISRX);
++ writel(csr.full, sst_drv_ctx->shim + SST_CSR);
++#if 0
++ retval = sst_scu_ipc_write(
++ sst_drv_ctx->shim_phy_add + SST_CSR, csr.full);
++ if (retval) {
++ sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
++ printk(KERN_ERR\
++ "SST ERR: BypassCSR regclear failed 0x%x\n", retval);
++ /* bring LPE out of run stall */
++ /* send error mesages to FW- TBD FIXME */
++ csr.part.run_stall = 0x0;
++ printk(KERN_DEBUG "SST DBG:Bypass CSR +\
++ reg setting to 0x%x \n", csr.full);
++ retval = sst_scu_ipc_write(sst_drv_ctx->shim_phy_add + SST_CSR,
++ csr.full);
++ if (retval) {
++ /* prepare to download firmware again
++ for the next time TBD FIXME
++ sst_drv_ctx->sst_state = SST_UN_INIT;*/
++ }
++ return -EAGAIN;
++ }
++#endif
++ csr.full = readl(sst_drv_ctx->shim + SST_CSR);
++ csr.part.run_stall = 0;
++ printk(KERN_DEBUG "SST DBG:Stalll CSR reg +\
++ setting to 0x%x \n", csr.full);
++ writel(csr.full, sst_drv_ctx->shim + SST_ISRX);
++ writel(csr.full, sst_drv_ctx->shim + SST_CSR);
++#if 0
++ retval = sst_scu_ipc_write(
++ sst_drv_ctx->shim_phy_add + SST_CSR, csr.full);
++ if (retval) {
++ sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
++ printk(KERN_ERR
++ "SST ERR: Stall CSR reg clear failed 0x%x \n", retval);
++ if (retval) {
++ /* prepare to download firmware again
++ for the next time TBD FIXME
++ sst_drv_ctx->sst_state = SST_UN_INIT;*/
++ }
++ return -EAGAIN;
++ }
++#endif
++ /* send download complete and wait */
++ if (sst_create_large_msg(&msg)) {
++ sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
++ return -ENOMEM;
++ }
++
++ sst_fill_header(&msg->header, IPC_IA_LIB_DNLD_CMPLT, 1, 0);
++ msg->header.part.data = sizeof(u32) + sizeof(*lib);
++ lib->pvt_id = pvt_id;
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32), lib, sizeof(*lib));
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ printk(KERN_DEBUG "SST DBG:Waiting for FW to respond on Download complete \n");
++ sst_drv_ctx->alloc_block[i].ops_block.condition = false;
++ retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[i]);
++ if (retval) {
++ /* error */
++ sst_drv_ctx->sst_state = SST_FW_RUNNING;
++ /* shouldnt we set it to error state??? TBD */
++ sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
++ return -EIO;
++ }
++
++ printk(KERN_DEBUG "SST DBG:FW responded sucess on Download complete \n");
++ sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
++ sst_drv_ctx->sst_state = SST_FW_RUNNING;
++ return 0;
++
++}
++
++/**
++* This function is called befoer downloading the codec/postprocessing
++* library is set for download to SST DSP
++*/
++static int sst_validate_library(const struct firmware *fw_lib,
++ struct lib_slot_info *slot,
++ u32 *entry_point)
++{
++ struct fw_header *header;
++ struct fw_module_header *module;
++ struct dma_block_info *block;
++ unsigned int n_blk, isize = 0, dsize = 0;
++ int err = 0;
++
++ header = (struct fw_header *)fw_lib->data;
++ if (header->modules != 1) {
++ printk(KERN_ERR\
++ "SST ERR: Module no mismatch found\n ");
++ err = -EINVAL;
++ goto exit;
++ }
++ module = (void *)fw_lib->data + sizeof(*header);
++ *entry_point = module->entry_point;
++ printk(KERN_DEBUG "SST DBG:Module entry point 0x%x \n", *entry_point);
++ printk(KERN_DEBUG "SST DBG:Module Sign %s, Size 0x%x, Blocks 0x%x Type 0x%x \n",
++ module->signature, module->mod_size,
++ module->blocks, module->type);
++
++ block = (void *)module + sizeof(*module);
++ for (n_blk = 0; n_blk < module->blocks; n_blk++) {
++ switch (block->type) {
++ case SST_IRAM:
++ isize += block->size;
++ break;
++ case SST_DRAM:
++ dsize += block->size;
++ break;
++ default:
++ printk(KERN_ERR
++ "SST ERR: Invalid blk type for 0x%x\n ", n_blk);
++ err = -EINVAL;
++ goto exit;
++ }
++ block = (void *)block + sizeof(*block) + block->size;
++ }
++ if (isize > slot->iram_size || dsize > slot->dram_size) {
++ printk(KERN_ERR
++ "SST ERR: library exceeds size allocated \n");
++ err = -EINVAL;
++ goto exit;
++ } else
++ printk(KERN_DEBUG "SST DBG:Library is safe for download...\n");
++
++ printk(KERN_DEBUG "SST DBG:iram 0x%x, dram 0x%x, allowed iram 0x%x, allowed dram 0x%x\n",
++ isize, dsize, slot->iram_size, slot->dram_size);
++exit:
++ return err;
++
++}
++
++/**
++* This function is called when FW requests for a particular libary download
++* This function prepares the library to download
++*/
++int sst_load_library(struct snd_sst_lib_download *lib, u8 ops, u32 pvt_id)
++{
++ char buf[20];
++ const char *type, *dir;
++ int len = 0, error = 0;
++ u32 entry_point;
++ const struct firmware *fw_lib;
++ struct snd_sst_lib_download_info dload_info = {{{0},},};
++
++ memset(buf, 0, sizeof(buf));
++
++ printk(KERN_DEBUG "SST DBG:Lib Type 0x%x, Slot 0x%x, ops 0x%x \n",
++ lib->lib_info.lib_type, lib->slot_info.slot_num, ops);
++ printk(KERN_DEBUG "SST DBG:Version 0x%x, name %s, caps 0x%x media type 0x%x \n",
++ lib->lib_info.lib_version, lib->lib_info.lib_name,
++ lib->lib_info.lib_caps, lib->lib_info.media_type);
++
++ printk(KERN_DEBUG "SST DBG:IRAM Size 0x%x, offset 0x%x, DRAM Size 0x%x, offset 0x%x \n",
++ lib->slot_info.iram_size, lib->slot_info.iram_offset,
++ lib->slot_info.dram_size, lib->slot_info.dram_offset);
++
++ switch (lib->lib_info.lib_type) {
++ case SST_CODEC_TYPE_MP3:
++ type = "mp3_";
++ break;
++ case SST_CODEC_TYPE_AAC:
++ type = "aac_";
++ break;
++ case SST_CODEC_TYPE_AACP:
++ type = "aac_v1_";
++ break;
++ case SST_CODEC_TYPE_eAACP:
++ type = "aac_v2_";
++ break;
++ case SST_CODEC_TYPE_WMA9:
++ type = "wma9_";
++ break;
++ default:
++ printk(KERN_ERR "SST ERR: +\
++ Invalid codec type \n");
++ error = -EINVAL;
++ goto wake;
++ }
++
++ if (ops == STREAM_OPS_CAPTURE)
++ dir = "enc_";
++ else
++ dir = "dec_";
++ strncpy(buf, type, strlen(type));
++ strncpy(buf + strlen(type), dir, strlen(dir));
++ len = strlen(type) + strlen(dir);
++ len += snprintf(buf + len, sizeof(buf) - len, "%d",
++ lib->slot_info.slot_num);
++ len += snprintf(buf + len, sizeof(buf) - len, ".bin");
++
++ printk(KERN_DEBUG "SST DBG:Requesting %s \n", buf);
++
++ error = request_firmware(&fw_lib, buf, &sst_drv_ctx->pci->dev);
++ if (error) {
++ printk(KERN_ERR
++ "SST ERR: library load failed %d \n", error);
++ goto wake;
++ }
++ error = sst_validate_library(fw_lib, &lib->slot_info, &entry_point);
++ if (error)
++ goto wake_free;
++
++ lib->mod_entry_pt = entry_point;
++ memcpy(&dload_info.dload_lib, lib, sizeof(*lib));
++ error = sst_download_library(fw_lib, &dload_info);
++ if (error)
++ goto wake_free;
++
++ /* lib is downloaded and init send alloc again */
++ printk(KERN_DEBUG "SST DBG:Library is downloaded now... \n");
++wake_free:
++ /* sst_wake_up_alloc_block(sst_drv_ctx, pvt_id, error, NULL); */
++ release_firmware(fw_lib);
++wake:
++ return error;
++}
++
++/* This Function set the bit banging*/
++int sst_spi_mode_enable()
++{
++
++ void __iomem *logical_ptr_to_bang;
++ u32 regbase = SPI_MODE_ENABLE_BASE_ADDR, range = 0x38;
++ u32 data;
++ u32 mask = 0x400000;
++ int retval;
++ int i = 0;
++
++
++ logical_ptr_to_bang = ioremap_nocache(regbase, range);
++ if (!logical_ptr_to_bang) {
++ dev_err(&sst_drv_ctx->pci->dev, \
++ "SST ERR: SSP0 bit bang -IOREMAP Failed \n");
++ return -1;
++ }
++
++ /* spi mode enable */
++ iowrite32(0x0000000f, logical_ptr_to_bang);
++ iowrite32(0x33301dc3, logical_ptr_to_bang + 0x4);
++ iowrite32(0x02010007, logical_ptr_to_bang + 0x2c);
++ iowrite32(0x00000000, logical_ptr_to_bang + 0x30);
++ iowrite32(0x00000000, logical_ptr_to_bang + 0x34);
++ iowrite32(0x0000008f, logical_ptr_to_bang);
++
++
++ retval = sst_scu_ipc_write(0xff12b004, 0x3);
++ retval = sst_scu_ipc_write(0xff12b000, 0x01070034);
++ retval = sst_scu_ipc_write(0xff12b004, 0x99);
++ retval = sst_scu_ipc_write(0xff12b000, 0x01070038);
++
++ data = ioread32(logical_ptr_to_bang+0x8);
++ dev_err(&sst_drv_ctx->pci->dev,\
++ "SST ERR: SSP0 bit bang SSCR val = 0x%08x \n", data);
++ data = data & mask;
++ while (data == mask) {
++ retval = sst_scu_ipc_write(0xff12b004, 0x3);
++ retval = sst_scu_ipc_write(0xff12b000, 0x01070034);
++ retval = sst_scu_ipc_write(0xff12b004, 0x2);
++ retval = sst_scu_ipc_write(0xff12b000, 0x01070034);
++ data = ioread32(logical_ptr_to_bang+0x8);
++ data = data & mask;
++ i++;
++ }
++ dev_err(&sst_drv_ctx->pci->dev, \
++ "SST ERR: SSP0 bit bang while loop counter= %4d \n ", i);
++ retval = sst_scu_ipc_write(0xff12b004, 0x0);
++ retval = sst_scu_ipc_write(0xff12b000, 0x01070038);
++
++ return retval;
++}
+--
+1.6.2.2
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-2-8.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-2-8.patch
new file mode 100644
index 0000000..cc3616f
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-2-8.patch
@@ -0,0 +1,900 @@
+From 4f7fcea7402d7d788fe959bc9b7ced86af72d806 Mon Sep 17 00:00:00 2001
+From: R, Dharageswari <dharageswari.r@intel.com>
+Date: Thu, 29 Apr 2010 20:20:22 +0530
+Subject: [PATCH] ADR-Post-Beta-0.05.002.03-2/8-Adding Moorestown Audio Drivers: SST header files
+
+This patch adds the common header files.
+intel_sst_common.h - This header files is private to SST driver and contain the
+common structures like SST ops, SST register offsets, debugging macro,
+sst stream definitions, and Shim register definitions.
+intel_sst_pvt.c - Utility functions used by SST driver and function
+prototypes of common functions are implemented in this file
+
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+
+ new file: sound/pci/sst/intel_sst_common.h
+ new file: sound/pci/sst/intel_sst_pvt.c
+Patch-mainline: 2.6.35?
+---
+ sound/pci/sst/intel_sst_common.h | 538 ++++++++++++++++++++++++++++++++++++++
+ sound/pci/sst/intel_sst_pvt.c | 323 +++++++++++++++++++++++
+ 2 files changed, 861 insertions(+), 0 deletions(-)
+ create mode 100644 sound/pci/sst/intel_sst_common.h
+ create mode 100644 sound/pci/sst/intel_sst_pvt.c
+
+diff --git a/sound/pci/sst/intel_sst_common.h b/sound/pci/sst/intel_sst_common.h
+new file mode 100644
+index 0000000..d9a720d
+--- /dev/null
++++ b/sound/pci/sst/intel_sst_common.h
+@@ -0,0 +1,538 @@
++#ifndef __INTEL_SST_COMMON_H__
++#define __INTEL_SST_COMMON_H__
++/*
++ * intel_sst_common.h - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corporation
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * Common private declarations for SST
++ */
++#include <linux/time.h>
++#ifdef CONFIG_MSTWN_POWER_MGMT
++#include <linux/intel_mid.h>
++#endif
++/* #define SND_LOOP_TEST */
++
++#define SST_DRIVER_VERSION "0.05.002.03"
++#define SST_VERSION_NUM 0x050203
++
++/* driver names */
++#define SST_DRV_NAME "intel_sst_driver"
++#define SST_FW_STD_FILENAME "fw_sst.bin"
++
++
++enum sst_states {
++ SST_FW_LOADED = 1,
++ SST_FW_RUNNING,
++ SST_UN_INIT,
++ SST_ERROR,
++};
++
++#define MAX_ACTIVE_STREAM 3
++#define MAX_ENC_STREAM 1
++#define MAX_AM_HANDLES 1
++#define ALLOC_TIMEOUT 5000
++/* SST numbers */
++#define SST_BLOCK_TIMEOUT 5000
++#define TARGET_DEV_BLOCK_TIMEOUT 5000
++
++/* FIXME */
++#define INTEL_SST_MAJOR 255
++#define BLOCK_UNINIT -1
++#define RX_TIMESLOT_UNINIT -1
++/* Chip revision ID */
++
++/*
++#define CHIP_A1_50 0x01
++#define CHIP_A2_50 0x02
++#define CHIP_A2_100 0x03
++*/
++
++/*
++#define DSP_CLOCK_SPEED 100 */ /* 50: 50MHz, 100: 100MHz */
++
++/* SST register map */
++#define SST_CSR 0x00
++#define SST_PISR 0x08
++#define SST_PIMR 0x10
++#define SST_ISRX 0x18
++#define SST_IMRX 0x28
++#define SST_IPCX 0x38 /* IPC IA-SST */
++#define SST_IPCD 0x40 /* IPC SST-IA */
++#define SST_ISRD 0x20 /* dummy register for shim workaround */
++#define SST_SHIM_SIZE 0X44
++
++#define SPI_MODE_ENABLE_BASE_ADDR 0xffae4000
++#define FW_SIGNATURE_SIZE 4
++
++/* PMIC and SST hardware states */
++enum sst_mad_states {
++ SND_MAD_UN_INIT = 0,
++ SND_MAD_INIT_DONE,
++};
++
++/* stream states */
++enum sst_stream_states {
++ STREAM_UN_INIT = 0, /* Freed/Not used stream */
++ STREAM_RUNNING = 1, /* Running */
++ STREAM_PAUSED = 2, /* Paused stream */
++ STREAM_DECODE = 4, /* stream is in decoding only state */
++ STREAM_INIT = 5, /* stream init, waiting for data */
++};
++
++
++enum sst_ram_type{
++ SST_IRAM = 1,
++ SST_DRAM = 2,
++};
++/* SST shim registers to structure mapping */
++union config_status_reg {
++ struct {
++ u32 rsvd0:1;
++ u32 sst_reset:1;
++ u32 hw_rsvd:3;
++ u32 sst_clk:2;
++ u32 bypass:3;
++ u32 run_stall:1;
++ u32 rsvd1:2;
++ u32 strb_cntr_rst:1;
++ u32 rsvd:18;
++ } part;
++ u32 full;
++};
++
++union interrupt_reg {
++ struct {
++ u32 done_interrupt:1;
++ u32 busy_interrupt:1;
++ u32 rsvd:30;
++ } part;
++ u32 full;
++};
++
++union sst_pisr_reg {
++ struct {
++ u32 pssp0:1;
++ u32 pssp1:1;
++ u32 rsvd0:3;
++ u32 dmac:1;
++ u32 rsvd1:26;
++ } part;
++ u32 full;
++};
++
++union sst_pimr_reg {
++ struct {
++ u32 ssp0:1;
++ u32 ssp1:1;
++ u32 rsvd0:3;
++ u32 dmac:1;
++ u32 rsvd1:10;
++ u32 ssp0_sc:1;
++ u32 ssp1_sc:1;
++ u32 rsvd2:3;
++ u32 dmac_sc:1;
++ u32 rsvd3:10;
++ } part;
++ u32 full;
++};
++
++
++struct sst_stream_bufs {
++ struct list_head node;
++ u32 size;
++ const char *addr;
++ u32 data_copied;
++ bool in_use;
++ u32 offset;
++};
++
++struct snd_sst_user_cap_list {
++ unsigned int iov_index; /* index of iov */
++ unsigned long iov_offset; /* offset in iov */
++ unsigned long offset; /* offset in kmem */
++ unsigned long size; /* size copied */
++ struct list_head node;
++};
++/*
++This structure is used to block a user/fw data call to another
++fw/user call
++*/
++struct sst_block {
++ bool condition; /* condition for blocking check */
++ int ret_code; /* ret code when block is released */
++ void *data; /* data to be appsed for block if any */
++ bool on;
++};
++
++enum snd_sst_buf_type {
++ SST_BUF_USER_STATIC = 1,
++ SST_BUF_USER_DYNAMIC,
++ SST_BUF_MMAP_STATIC,
++ SST_BUF_MMAP_DYNAMIC,
++};
++enum snd_src {
++ SST_DRV = 1,
++ MAD_DRV = 2
++};
++/*
++structure that holds the stream information
++*/
++struct stream_info {
++ unsigned int status;
++ unsigned int prev;
++ u8 codec;
++ unsigned int sst_id;
++ unsigned int ops;
++ struct list_head bufs;
++ struct mutex lock; /* mutex */
++ spinlock_t pcm_lock;
++ bool mmapped;
++ unsigned int sg_index; /* current buf Index */
++ unsigned char *cur_ptr; /* Current static bufs */
++ struct snd_sst_buf_entry *buf_entry;
++ struct sst_block data_blk; /* stream ops block */
++ struct sst_block ctrl_blk; /* stream control cmd block */
++ enum snd_sst_buf_type buf_type;
++ void *pcm_substream;
++ void (*period_elapsed) (void *pcm_substream);
++ unsigned int sfreq;
++ void *decode_ibuf, *decode_obuf;
++ unsigned int decode_isize, decode_osize;
++ u8 decode_ibuf_type, decode_obuf_type;
++ unsigned int idecode_alloc;
++ unsigned int need_draining;
++ unsigned int str_type;
++ u32 curr_bytes;
++ u32 cumm_bytes;
++ u32 src; /* hack to remove */
++};
++
++
++
++/*
++this structure is used for blocking the user's alloc calls to
++fw's response to alloc calls
++*/
++struct stream_alloc_block {
++ int sst_id; /* session id of blocked stream */
++ struct sst_block ops_block; /* ops block struture */
++};
++
++#define SST_FW_SIGN "$SST"
++#define SST_FW_LIB_SIGN "$LIB"
++
++/* FW file headers */
++struct fw_header {
++ unsigned char signature[FW_SIGNATURE_SIZE]; /* FW signature */
++ u32 file_size; /* size of fw minus this header */
++ u32 modules; /* # of modules */
++ u32 file_format; /* version of header format */
++ u32 reserved[4];
++};
++
++struct fw_module_header {
++ unsigned char signature[FW_SIGNATURE_SIZE]; /* module signature */
++ u32 mod_size; /* size of module */
++ u32 blocks; /* # of blocks */
++ u32 type; /* codec type, pp lib */
++ u32 entry_point;
++};
++
++struct dma_block_info {
++ enum sst_ram_type type; /* IRAM/DRAM */
++ u32 size; /* Bytes */
++ u32 ram_offset; /* Offset in I/DRAM */
++ u32 rsvd; /* Reserved field */
++};
++
++struct ioctl_pvt_data {
++ int str_id;
++ int pvt_id;
++};
++
++struct sst_ipc_msg_wq {
++ union ipc_header header;
++ char mailbox[SST_MAILBOX_SIZE];
++ struct work_struct wq;
++};
++
++struct mad_ops_wq {
++ int stream_id;
++ enum sst_controls control_op;
++ struct work_struct wq;
++
++};
++
++#define SST_MMAP_PAGES (640*1024 / PAGE_SIZE)
++#define SST_MMAP_STEP (40*1024 / PAGE_SIZE)
++
++/* driver ops */
++struct intel_sst_drv {
++ bool pmic_state;
++ int pmic_vendor;
++ int sst_state;
++/* int chip_rev_id; */
++ void __iomem *shim;
++ void __iomem *mailbox;
++ void __iomem *iram;
++ void __iomem *dram;
++ unsigned int shim_phy_add;
++ struct list_head ipc_dispatch_list;
++ struct work_struct ipc_post_msg_wq;
++ struct sst_ipc_msg_wq ipc_process_msg;
++ struct sst_ipc_msg_wq ipc_process_reply;
++ struct sst_ipc_msg_wq ipc_post_msg;
++ struct mad_ops_wq mad_ops;
++ wait_queue_head_t wait_queue;
++ struct workqueue_struct *mad_wq;
++ struct workqueue_struct *post_msg_wq;
++ struct workqueue_struct *process_msg_wq;
++ struct workqueue_struct *process_reply_wq;
++
++ struct stream_info streams[MAX_NUM_STREAMS];
++ struct stream_alloc_block alloc_block[MAX_ACTIVE_STREAM];
++ struct sst_block tgt_dev_blk, fw_info_blk,
++ vol_info_blk, mute_info_blk, hs_info_blk;
++ struct mutex list_lock;/* mutex for IPC list locking */
++ struct snd_pmic_ops *scard_ops;
++ struct pci_dev *pci;
++ int active_streams[MAX_NUM_STREAMS];
++ void *mmap_mem;
++ struct mutex stream_cnt_lock;
++ unsigned int mmap_len;
++ unsigned int unique_id;
++ unsigned int stream_cnt; /* total streams */
++ unsigned int encoded_cnt; /* enocded streams only */
++ unsigned int am_cnt;
++ unsigned int pb_streams; /* pb streams active */
++ unsigned int cp_streams; /* cp streams active */
++ unsigned int lpe_stalled; /* LPE is stalled or not */
++ unsigned int pmic_port_instance; /*pmic port instance enabled*/
++ int rx_time_slot_status;
++ unsigned int lpaudio_start; /* 1 - LPA stream(MP3 pb) in progress*/
++ unsigned int audio_start; /* 1 - LPA stream(Non-MP3 pb) in progress*/
++};
++
++extern struct intel_sst_drv *sst_drv_ctx;
++
++/* register definitions */
++/*SCU FW Changes*/
++/*#define AUD_CLK_ADDR 0xff11d83c
++#define AUD_CLK_DISABLE 0x80008008
++#define AUD_CLK_50MHZ 0x80008301
++#define AUD_CLK_RATIO_1_2 0x80000301
++#define AUD_CLK_RATIO_8008 0x80008008
++#define AUD_CLK_RATIO_8101 0x80008101
++#define AUD_CLK_RATIO_0101 0x80000101
++#define AUD_SYS_ADDR 0xff11d118
++#define AUD_SYS_RESET 0x7ffffcff
++#define AUD_SYS_SET 0x7fffffff
++#define AUD_SHIM_BASE_ADDR 0xffae8000 */
++/*
++#define AUD_SHIM_RATIO_1_1 0x382
++#define AUD_SHIM_RATIO 0x3a2
++*/
++/*SCU FW Changes*/
++/*#define AUD_CLK_200 0xff11d200
++#define AUD_CLK_204 0xff11d204
++#define AUD_INIT_VAL 0x0*/
++#define CHIP_REV_REG 0xff108000
++#define CHIP_REV_ADDR 0x78
++/*
++#define CHIP_REV_A1 0x0
++#define CHIP_REV_A2 0x3
++#define CLK_50MHZ 50
++#define CLK_100MHZ 100
++*/
++/* misc definitions */
++#define FW_DWNL_ID 0xFF
++#define LOOP1 0x11111111
++#define LOOP2 0x22222222
++#define LOOP3 0x33333333
++#define LOOP4 0x44444444
++
++#define SST_DEFAULT_PMIC_PORT 1 /*audio port*/
++/* NOTE: status will +ve for good cases and -ve for error ones */
++#define MAX_STREAM_FIELD 255
++
++int sst_alloc_stream(char *params, unsigned int stream_ops, u8 codec,
++ unsigned int session_id);
++int sst_alloc_stream_response(unsigned int str_id,
++ struct snd_sst_str_type *type);
++int sst_stalled(void);
++int sst_pause_stream(int id);
++int sst_resume_stream(int id);
++int sst_enable_rx_timeslot(int status);
++int sst_drop_stream(int id);
++int sst_free_stream(int id);
++int sst_play_frame(int streamID);
++int sst_capture_frame(int streamID);
++int sst_set_stream_param(int streamID, struct snd_sst_params *str_param);
++int sst_target_device_select(struct snd_sst_target_device *target_device);
++int sst_decode(int str_id, struct snd_sst_dbufs *dbufs);
++int sst_get_decoded_bytes(int str_id, unsigned long long *bytes);
++int sst_get_fw_info(struct snd_sst_fw_info *info);
++int sst_get_stream_params(int str_id,
++ struct snd_sst_get_stream_params *get_params);
++int sst_drain_stream(int str_id);
++int sst_get_vol(struct snd_sst_vol *set_vol);
++int sst_set_vol(struct snd_sst_vol *set_vol);
++int sst_set_mute(struct snd_sst_mute *set_mute);
++
++
++void sst_post_message(struct work_struct *work);
++void sst_process_message(struct work_struct *work);
++void sst_process_reply(struct work_struct *work);
++void sst_process_mad_ops(struct work_struct *work);
++void sst_process_mad_jack_detection(struct work_struct *work);
++
++int intel_sst_ioctl(struct inode *i_node, struct file *file_ptr,
++ unsigned int cmd, unsigned long arg);
++int intel_sst_open(struct inode *i_node, struct file *file_ptr);
++int intel_sst_release(struct inode *i_node, struct file *file_ptr);
++int intel_sst_read(struct file *file_ptr, char __user *buf,
++ size_t count, loff_t *ppos);
++int intel_sst_write(struct file *file_ptr, const char __user *buf,
++ size_t count, loff_t *ppos);
++int intel_sst_mmap(struct file *fp, struct vm_area_struct *vma);
++ssize_t intel_sst_aio_write(struct kiocb *kiocb, const struct iovec *iov,
++ unsigned long nr_segs, loff_t offset);
++ssize_t intel_sst_aio_read(struct kiocb *kiocb, const struct iovec *iov,
++ unsigned long nr_segs, loff_t offset);
++
++int sst_load_fw(const struct firmware *fw, void *context);
++int sst_load_library(struct snd_sst_lib_download *lib, u8 ops, u32 pvt_id);
++int sst_spi_mode_enable(void);
++int sst_get_block_stream(struct intel_sst_drv *sst_drv_ctx);
++
++void sst_print_hex(unsigned char *buf, unsigned int size);
++int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
++ struct sst_block *block);
++int sst_wait_interruptible_timeout(struct intel_sst_drv *sst_drv_ctx,
++ struct sst_block *block, int timeout);
++int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx,
++ struct stream_alloc_block *block);
++int sst_create_large_msg(struct ipc_post **arg);
++int sst_create_short_msg(struct ipc_post **arg);
++void sst_print_params(struct snd_sst_params *str_params);
++void sst_wake_up_alloc_block(struct intel_sst_drv *sst_drv_ctx,
++ u8 sst_id, int status, void *data);
++void sst_clear_interrupt(void);
++
++/**
++* this function is an inline function that sets the headers before
++* sending a message
++*/
++static inline void sst_fill_header(union ipc_header *header,
++ int msg, int large, int strID)
++{
++ header->part.msg_id = msg;
++ header->part.str_id = strID;
++ header->part.large = large;
++ header->part.done = 0;
++ header->part.busy = 1;
++ header->part.data = 0;
++}
++
++/**
++* this inline function assigns a private id for calls that dont have stream
++* context yet
++*/
++static inline unsigned int sst_assign_pvt_id(struct intel_sst_drv *sst_drv_ctx)
++{
++ sst_drv_ctx->unique_id++;
++ if (sst_drv_ctx->unique_id >= MAX_NUM_STREAMS)
++ sst_drv_ctx->unique_id = 1;
++ return sst_drv_ctx->unique_id;
++}
++
++/**
++* this function initialzes stream context
++*/
++static inline void sst_init_stream(struct stream_info *stream,
++ int codec, int str_type, int sst_id, int ops)
++{
++ stream->status = STREAM_INIT;
++ stream->prev = STREAM_UN_INIT;
++ stream->codec = codec;
++ stream->sst_id = sst_id;
++ stream->str_type = str_type;
++ stream->ops = ops;
++ stream->data_blk.on = false;
++ stream->data_blk.condition = false;
++ stream->data_blk.ret_code = 0;
++ stream->data_blk.data = NULL;
++ stream->ctrl_blk.on = false;
++ stream->ctrl_blk.condition = false;
++ stream->ctrl_blk.ret_code = 0;
++ stream->ctrl_blk.data = NULL;
++ stream->need_draining = false;
++ stream->decode_ibuf = NULL;
++ stream->decode_isize = 0;
++ stream->mmapped = false;
++}
++
++/**
++* this function resets the stream contexts
++*/
++static inline void sst_clean_stream(struct stream_info *stream)
++{
++ struct sst_stream_bufs *bufs = NULL, *_bufs;
++ stream->status = STREAM_UN_INIT;
++ stream->prev = STREAM_UN_INIT;
++ mutex_lock(&stream->lock);
++ list_for_each_entry_safe(bufs, _bufs, &stream->bufs, node) {
++ list_del(&bufs->node);
++ kfree(bufs);
++ }
++ mutex_unlock(&stream->lock);
++
++ if (stream->ops != STREAM_OPS_PLAYBACK_DRM)
++ kfree(stream->decode_ibuf);
++}
++
++/**
++* this function generates events for OSPM
++*/
++static inline int sst_ospm_send_event(int event)
++{
++#ifdef CONFIG_MSTWN_POWER_MGMT
++ return ospm_generate_netlink_event(AUDIO_SUBSYTEM_ID, event);
++#else
++ return 0;
++#endif
++}
++
++/**
++* this function validates the stream id
++*/
++static inline int sst_validate_strid(int str_id)
++{
++ if (str_id <= 0 || str_id >= MAX_NUM_STREAMS)
++ return -EINVAL;
++ else
++ return 0;
++}
++
++#endif /* __INTEL_SST_COMMON_H__ */
+diff --git a/sound/pci/sst/intel_sst_pvt.c b/sound/pci/sst/intel_sst_pvt.c
+new file mode 100644
+index 0000000..95d79be
+--- /dev/null
++++ b/sound/pci/sst/intel_sst_pvt.c
+@@ -0,0 +1,323 @@
++/*
++ * intel_sst_pvt.c - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This driver exposes the audio engine functionalities to the ALSA
++ * and middleware.
++ *
++ * This file contains all private functions
++ */
++
++#include <linux/cdev.h>
++#include <linux/pci.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/syscalls.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/interrupt.h>
++#include <linux/list.h>
++#include <linux/workqueue.h>
++#include <linux/firmware.h>
++#include <linux/mutex.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <sound/intel_lpe.h>
++#include <sound/intel_sst_ioctl.h>
++#include "intel_sst_fw_ipc.h"
++#include "intel_sst_common.h"
++
++/**
++* this function assigns a block for the calls that dont have stream context yet
++* the blocks are used for waiting on Firmware's response for any operation
++*/
++int sst_get_block_stream(struct intel_sst_drv *sst_drv_ctx)
++{
++ int i;
++
++ for (i = 0; i < MAX_ACTIVE_STREAM; i++) {
++ if (sst_drv_ctx->alloc_block[i].sst_id == BLOCK_UNINIT) {
++ sst_drv_ctx->alloc_block[i].ops_block.condition = false;
++ sst_drv_ctx->alloc_block[i].ops_block.ret_code = 0;
++ sst_drv_ctx->alloc_block[i].sst_id = 0;
++ break;
++ }
++ }
++ if (i == MAX_ACTIVE_STREAM) {
++ printk(KERN_ERR
++ "SST ERR: max alloc_stream reached");
++ i = -EBUSY; /* active stream limit reached */
++ }
++ return i;
++}
++
++/**
++* this function is a debug function that is used to print contents of a buffer
++*/
++void sst_print_hex(unsigned char *buf, unsigned int size)
++{
++ unsigned int i;
++
++ for (i = 0; i < size; i++) {
++ printk(KERN_DEBUG "SST DBG:%02x ", buf[i]);
++ if ((i != 0) && ((i % 8) == 0))
++ printk(KERN_DEBUG "SST DBG:\n");
++ }
++}
++/**
++* this function waits without a timeout (and is interruptable) for a
++* given block event
++*/
++int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
++ struct sst_block *block)
++{
++ int retval = 0;
++
++ if (!wait_event_interruptible(sst_drv_ctx->wait_queue,
++ block->condition)) {
++ /* event wake */
++ if (block->ret_code < 0) {
++ printk(KERN_ERR
++ "SST ERR: stream failed %d\n"\
++ , block->ret_code);
++ retval = -EBUSY;
++ } else {
++ printk(KERN_DEBUG "SST DBG:event up\n");
++ retval = 0;
++ }
++ } else {
++ printk(KERN_ERR
++ "SST ERR: signal interrupted\n");
++ retval = -EINTR;
++ }
++ return retval;
++
++}
++
++/**
++* this function waits with a timeout value (and is interruptle) on a
++* given block event
++*/
++int sst_wait_interruptible_timeout(
++ struct intel_sst_drv *sst_drv_ctx,
++ struct sst_block *block, int timeout)
++{
++ int retval = 0;
++
++ printk(KERN_DEBUG "SST DBG:waiting....\n");
++ if (wait_event_interruptible_timeout(sst_drv_ctx->wait_queue,
++ block->condition,
++ msecs_to_jiffies(timeout))) {
++ if (block->ret_code < 0) {
++ printk(KERN_ERR
++ "SST ERR: stream failed %d\n"\
++ , block->ret_code);
++ } else
++ printk(KERN_DEBUG "SST DBG:event up\n");
++ retval = block->ret_code;
++ } else {
++ block->on = false;
++ printk(KERN_ERR
++ "SST ERR: timeout occured...\n");
++ /* settign firmware state as uninit so that the
++ firmware will get redownloaded on next request
++ this is because firmare not responding for 5 sec
++ is equalant to some unrecoverable error of FW
++ sst_drv_ctx->sst_state = SST_UN_INIT;*/
++ retval = -EBUSY;
++ }
++ return retval;
++
++}
++
++/**
++* this function waits with on a given block event
++*/
++int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx,
++ struct stream_alloc_block *block)
++{
++ int retval = 0;
++
++ /* NOTE:
++ Observed that FW processes the alloc msg and replies even
++ before the alloc thread has finished execution */
++ printk(KERN_DEBUG "SST DBG:waiting for %x, +\
++ condition %x \n", block->sst_id,
++ block->ops_block.condition);
++ if (wait_event_interruptible_timeout(sst_drv_ctx->wait_queue,
++ block->ops_block.condition,
++ msecs_to_jiffies(SST_BLOCK_TIMEOUT))) {
++ /* event wake */
++ printk(KERN_DEBUG "SST DBG:Event wake +\
++ ... %x \n", block->ops_block.condition);
++ printk(KERN_DEBUG "SST DBG:message +\
++ ret: %d\n", block->ops_block.ret_code);
++ retval = block->ops_block.ret_code;
++ } else {
++ block->ops_block.on = false;
++ printk(KERN_ERR
++ "SST ERR: Wait timed-out %x \n",\
++ block->ops_block.condition);
++ /* settign firmware state as uninit so that the
++ firmware will get redownloaded on next request
++ this is because firmare not responding for 5 sec
++ is equalant to some unrecoverable error of FW
++ sst_drv_ctx->sst_state = SST_UN_INIT;*/
++ retval = -EBUSY;
++ }
++ return retval;
++
++}
++
++/**
++* this function allocats structures to send a large message to the firmware
++*/
++int sst_create_large_msg(struct ipc_post **arg)
++{
++ struct ipc_post *msg;
++
++ msg = kzalloc(sizeof(struct ipc_post), GFP_ATOMIC);
++ if (!msg) {
++ printk(KERN_ERR
++ "SST ERR: kzalloc msg failed \n");
++ return -ENOMEM;
++ }
++
++ msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_ATOMIC);
++ if (!msg->mailbox_data) {
++ kfree(msg);
++ printk(KERN_ERR
++ "SST ERR: kzalloc mailbox_data failed");
++ return -ENOMEM;
++ };
++ *arg = msg;
++ return 0;
++}
++
++/**
++* this function allocats structures to send a short message to the firmware
++*/
++int sst_create_short_msg(struct ipc_post **arg)
++{
++ struct ipc_post *msg;
++
++ msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
++ if (!msg) {
++ printk(KERN_ERR
++ "SST ERR: kzalloc msg failed \n");
++ return -ENOMEM;
++ }
++ msg->mailbox_data = NULL;
++ *arg = msg;
++ return 0;
++}
++
++/**
++* this function is a debug funtion to print the stream parameters
++*/
++void sst_print_params(struct snd_sst_params *str_params)
++{
++ switch (str_params->codec) {
++ case SST_CODEC_TYPE_PCM:
++ printk(KERN_DEBUG "SST DBG:pcm \n");
++ printk(KERN_DEBUG "SST DBG:chan=%d, sfreq = %d, wd_sz = %d \
++ brate = %d buffer_size= 0x%d\
++ period_cnt = %d\n",
++ str_params->sparams.uc.pcm_params.num_chan,
++ str_params->sparams.uc.pcm_params.sfreq,
++ str_params->sparams.uc.pcm_params.pcm_wd_sz,
++ str_params->sparams.uc.pcm_params.brate,
++ // str_params->sparams.uc.pcm_params.frame_size,
++ // str_params->sparams.uc.pcm_params.samples_per_frame,
++ str_params->sparams.uc.pcm_params.buffer_size,
++ str_params->sparams.uc.pcm_params.period_count);
++ break;
++
++ case SST_CODEC_TYPE_MP3:
++ printk(KERN_DEBUG "SST DBG:mp3 \n");
++ printk(KERN_DEBUG "SST DBG:chan=%d, brate=%d, sfreq = %d, wd_sz = %d\n",
++ str_params->sparams.uc.mp3_params.num_chan,
++ str_params->sparams.uc.mp3_params.brate,
++ str_params->sparams.uc.mp3_params.sfreq,
++ str_params->sparams.uc.mp3_params.pcm_wd_sz);
++ break;
++
++ case SST_CODEC_TYPE_AAC:
++ printk(KERN_DEBUG "SST DBG:aac \n");
++ printk(KERN_DEBUG "SST DBG:chan=%d, brate=%d, sfreq = %d, wd_sz = %d,asrate=%d\n",
++ str_params->sparams. uc.aac_params.num_chan,
++ str_params->sparams.uc.aac_params.brate,
++ str_params->sparams.uc.aac_params.sfreq,
++ str_params->sparams.uc.aac_params.pcm_wd_sz,
++ str_params->sparams.uc.aac_params.aac_srate);
++ printk(KERN_DEBUG "SST DBG:mpgid=%d profile=%d, aot = %d\n",
++ str_params->sparams.uc.aac_params.mpg_id,
++ str_params->sparams.uc.aac_params.aac_profile,
++ str_params->sparams.uc.aac_params.aot);
++ break;
++ case SST_CODEC_TYPE_WMA9:
++ printk(KERN_DEBUG "SST DBG:wma type \n");
++ printk(KERN_DEBUG "SST DBG:chan=%d, brate=%d, sfreq = %d, wd_sz = %d, tag=%d\n",
++ str_params->sparams. uc.wma_params.num_chan,
++ str_params->sparams.uc.wma_params.brate,
++ str_params->sparams.uc.wma_params.sfreq,
++ str_params->sparams.uc.wma_params.pcm_wd_sz,
++ str_params->sparams.uc.wma_params.format_tag);
++ printk(KERN_DEBUG "SST DBG:mask=%d, +\
++ b align=%d, enc opt =%d, op align =%d\n",
++ str_params->sparams.uc.wma_params.channel_mask,
++ str_params->sparams.uc.wma_params.block_align,
++ str_params->sparams.uc.wma_params.wma_encode_opt,
++ str_params->sparams.uc.wma_params.op_align);
++ break;
++ default:
++ printk(KERN_DEBUG "SST DBG:other +\
++ codec 0x%x\n", str_params->codec);
++ }
++}
++
++/**
++* this function wakes up a sleeping block event based on the response
++*/
++void sst_wake_up_alloc_block(struct intel_sst_drv *sst_drv_ctx,
++ u8 sst_id, int status, void *data)
++{
++ int i;
++
++ /* Unblock with retval code */
++ for (i = 0; i < MAX_ACTIVE_STREAM; i++) {
++ if (sst_id == sst_drv_ctx->alloc_block[i].sst_id) {
++ sst_drv_ctx->alloc_block[i].ops_block.condition = true;
++ sst_drv_ctx->alloc_block[i].ops_block.ret_code = status;
++ sst_drv_ctx->alloc_block[i].ops_block.data = data;
++ printk(KERN_DEBUG "SST DBG:wake id %d, +\
++ sst_id %d condition %x\n", i,
++ sst_drv_ctx->alloc_block[i].sst_id,
++ sst_drv_ctx->alloc_block[i].ops_block.condition);
++ wake_up(&sst_drv_ctx->wait_queue);
++ break;
++ }
++ }
++}
+--
+1.6.2.2
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-3-8.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-3-8.patch
new file mode 100644
index 0000000..aca3e46
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-3-8.patch
@@ -0,0 +1,2532 @@
+From 3657fc661cea2b120a7516cb66002fcd3af34e35 Mon Sep 17 00:00:00 2001
+From: R, Dharageswari <dharageswari.r@intel.com>
+Date: Thu, 29 Apr 2010 20:23:41 +0530
+Subject: [PATCH] ADR-Post-Beta-0.05.002.03-3/8-Moorestown Audio Drivers: SST interface modules
+
+This patch adds the SST driver interface module. Interface module is the
+one which talks to upper/other layer in the SST Driver
+intel_sst_interface.c - implements the MAD driver registration & deregistration
+functions. SST driver is also a character driver, so that player/middleware can
+communicate with SST driver. All char driver routines like open, close, read,
+write and ioctl are implemented here. The ioctl operations are used by
+middleware/players to open/close, control and configure astream as well as to
+transfer the data.
+intel_sst_ioctl.h - exposes the IOCTL definition for players/middleware
+as well as the various structure for passing stream parameters
+
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+
+ new file: include/sound/intel_sst_ioctl.h
+ new file: sound/pci/sst/intel_sst_interface.c
+Patch-mainline: 2.6.35?
+---
+ include/sound/intel_sst_ioctl.h | 390 +++++++
+ sound/pci/sst/intel_sst_interface.c | 2099 +++++++++++++++++++++++++++++++++++
+ 2 files changed, 2489 insertions(+), 0 deletions(-)
+ create mode 100644 include/sound/intel_sst_ioctl.h
+ create mode 100644 sound/pci/sst/intel_sst_interface.c
+
+diff --git a/include/sound/intel_sst_ioctl.h b/include/sound/intel_sst_ioctl.h
+new file mode 100644
+index 0000000..442b388
+--- /dev/null
++++ b/include/sound/intel_sst_ioctl.h
+@@ -0,0 +1,390 @@
++#ifndef __INTEL_SST_IOCTL_H__
++#define __INTEL_SST_IOCTL_H__
++/*
++ * intel_sst_ipc.h - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corporation
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This file defines all sst ioctls
++ */
++
++/* codec and post/pre processing related info */
++
++enum sst_codec_types {
++/* AUDIO/MUSIC CODEC Type Definitions */
++ SST_CODEC_TYPE_UNKNOWN = 0,
++ SST_CODEC_TYPE_PCM, /* Pass through Audio codec */
++ SST_CODEC_TYPE_MP3,
++ SST_CODEC_TYPE_MP24,
++ SST_CODEC_TYPE_AAC,
++ SST_CODEC_TYPE_AACP,
++ SST_CODEC_TYPE_eAACP,
++ SST_CODEC_TYPE_WMA9,
++ SST_CODEC_TYPE_WMA10,
++ SST_CODEC_TYPE_WMA10P,
++ SST_CODEC_TYPE_RA,
++ SST_CODEC_TYPE_DDAC3,
++ SST_CODEC_TYPE_STEREO_TRUE_HD,
++ SST_CODEC_TYPE_STEREO_HD_PLUS,
++
++ /* VOICE CODEC Type Definitions */
++ SST_CODEC_TYPE_VOICE_PCM = 0x21, /* Pass through voice codec */
++ SST_CODEC_SRC = 0x64,
++ SST_CODEC_MIXER = 0x65,
++ SST_CODEC_DOWN_MIXER = 0x66,
++ SST_CODEC_VOLUME_CONTROL = 0x67,
++ SST_CODEC_OEM1 = 0xC8,
++ SST_CODEC_OEM2 = 0xC9,
++};
++
++enum snd_sst_stream_ops {
++ STREAM_OPS_PLAYBACK = 0, /* Decode */
++ STREAM_OPS_CAPTURE, /* Encode */
++ STREAM_OPS_PLAYBACK_DRM, /* Play Audio/Voice */
++ STREAM_OPS_PLAYBACK_ALERT, /* Play Audio/Voice */
++ STREAM_OPS_CAPTURE_VOICE_CALL, /* CSV Voice recording */
++};
++
++enum stream_type {
++ STREAM_TYPE_MUSIC = 1,
++ STREAM_TYPE_VOICE
++};
++
++/* Firmware Version info */
++struct snd_sst_fw_version {
++ __u8 build; /* build number*/
++ __u8 minor; /* minor number*/
++ __u8 major; /* major number*/
++ __u8 type; /* build type */
++};
++
++/* Port info structure */
++struct snd_sst_port_info {
++ __u16 port_type;
++ __u16 reserved;
++};
++
++/* Mixer info structure */
++struct snd_sst_mix_info {
++ __u16 max_streams;
++ __u16 reserved;
++};
++
++/* PCM Parameters */
++struct snd_pcm_params {
++ __u16 codec; /* codec type */
++ __u8 num_chan; /* 1=Mono, 2=Stereo */
++ __u8 pcm_wd_sz; /* 16/24 - bit*/
++ __u32 brate; /* Bitrate in bits per second */
++ __u32 sfreq; /* Sampling rate in Hz */
++// __u16 frame_size;
++// __u16 samples_per_frame; /* Frame size num samples per frame */
++ __u32 buffer_size;
++ __u32 period_count; /* period elapsed time count, in samples,*/
++};
++
++/* MP3 Music Parameters Message */
++struct snd_mp3_params {
++ __u16 codec;
++ __u8 num_chan; /* 1=Mono, 2=Stereo */
++ __u8 pcm_wd_sz; /* 16/24 - bit*/
++ __u32 brate; /* Use the hard coded value. */
++ __u32 sfreq; /* Sampling freq eg. 8000, 441000, 48000 */
++ __u8 crc_check; /* crc_check - disable (0) or enable (1) */
++ __u8 op_align; /* op align 0- 16 bit, 1- MSB, 2 LSB*/
++ __u16 reserved; /* Unused */
++};
++
++#define AAC_BIT_STREAM_ADTS 0
++#define AAC_BIT_STREAM_ADIF 1
++#define AAC_BIT_STREAM_RAW 2
++
++/* AAC Music Parameters Message */
++struct snd_aac_params {
++ __u16 codec;
++ __u8 num_chan; /* 1=Mono, 2=Stereo*/
++ __u8 pcm_wd_sz; /* 16/24 - bit*/
++ __u32 brate;
++ __u32 sfreq; /* Sampling freq eg. 8000, 441000, 48000 */
++ __u32 aac_srate; /* Plain AAC decoder operating sample rate */
++ __u8 mpg_id; /* 0=MPEG-2, 1=MPEG-4 */
++ __u8 bs_format; /* input bit stream format adts=0, adif=1, raw=2 */
++ __u8 aac_profile; /* 0=Main Profile, 1=LC profile, 3=SSR profile */
++ __u8 ext_chl; /* No.of external channels */
++ __u8 aot; /* Audio object type. 1=Main , 2=LC , 3=SSR, 4=SBR*/
++ __u8 op_align; /* output alignment 0=16 bit , 1=MSB, 2= LSB align */
++ __u8 brate_type; /* 0=CBR, 1=VBR */
++ __u8 crc_check; /* crc check 0= disable, 1=enable */
++ __s8 bit_stream_format[8]; /* input bit stream format adts/adif/raw */
++ __u8 jstereo; /* Joint stereo Flag */
++ __u8 sbr_present; /* 1 = SBR Present, 0 = SBR absent, for RAW */
++ __u8 downsample; /* 1 = Downsampling ON, 0 = Downsampling OFF */
++ __u8 num_syntc_elems; /* 1- Mono/stereo, 0 - Dual Mono, 0 - for raw */
++ __s8 syntc_id[2]; /* 0 for ID_SCE(Dula Mono), -1 for raw */
++ __s8 syntc_tag[2]; /* raw - -1 and 0 -16 for rest of the streams */
++ __u8 pce_present; /* Flag. 1- present 0 - not present, for RAW */
++ __u8 sbr_type; /* sbr_type: 0-plain aac, 1-aac-v1, 2-aac-v2 */
++ __u8 outchmode; /* 0- mono, 1-stereo, 2-dual mono 3-Parametric stereo */
++ __u8 ps_present;
++
++};
++
++/* WMA Music Parameters Message */
++struct snd_wma_params {
++ __u16 codec;
++ __u8 num_chan; /* 1=Mono, 2=Stereo */
++ __u8 pcm_wd_sz; /* 16/24 - bit*/
++ __u32 brate; /* Use the hard coded value. */
++ __u32 sfreq; /* Sampling freq eg. 8000, 441000, 48000 */
++ __u32 channel_mask; /* Channel Mask */
++ __u16 format_tag; /* Format Tag */
++ __u16 block_align; /* packet size */
++ __u16 wma_encode_opt;/* Encoder option */
++ __u8 op_align; /* op align 0- 16 bit, 1- MSB, 2 LSB */
++ __u8 pcm_src; /* input pcm bit width */
++};
++
++/* Pre processing param structure */
++struct snd_prp_params {
++ __u32 reserved; /* No pre-processing defined yet */
++};
++
++/* Post processing Capability info structure */
++struct snd_sst_postproc_info {
++ __u32 src_min; /* Supported SRC Min sampling freq */
++ __u32 src_max; /* Supported SRC Max sampling freq */
++ __u8 src; /* 0=Not supported, 1=Supported */
++ __u8 bass_boost; /* 0=Not Supported, 1=Supported */
++ __u8 stereo_widening; /* 0=Not Supported, 1=Supported */
++ __u8 volume_control; /* 0=Not Supported, 1=Supported */
++ __s16 min_vol; /* Minimum value of Volume in dB */
++ __s16 max_vol; /* Maximum value of Volume in dB */
++ __u8 mute_control; /* 0=No Mute, 1=Mute */
++ __u8 reserved1;
++ __u16 reserved2;
++};
++
++/* pre processing Capability info structure */
++struct snd_sst_prp_info {
++ __s16 min_vol; /* Minimum value of Volume in dB */
++ __s16 max_vol; /* Maximum value of Volume in dB */
++ __u8 volume_control; /* 0=Not Supported, 1=Supported */
++ __u8 reserved1; /* for 32 bit alignment */
++ __u16 reserved2; /* for 32 bit alignment */
++} __attribute__ ((packed));
++
++/* Firmware capabilities info */
++struct snd_sst_fw_info {
++ struct snd_sst_fw_version fw_version; /* Firmware version */
++ __u8 audio_codecs_supported[8]; /* Codecs supported by FW */
++ __u32 recommend_min_duration; /* Min duration for Lowpower Playback */
++ __u8 max_pcm_streams_supported; /* Max num of PCM streams supported */
++ __u8 max_enc_streams_supported; /* Max number of Encoded streams */
++ __u16 reserved; /* 32 bit alignment*/
++ struct snd_sst_postproc_info pop_info; /* Post processing capability */
++ struct snd_sst_prp_info prp_info; /* pre_processing mod cap info */
++ struct snd_sst_port_info port_info[2]; /* Port info */
++ struct snd_sst_mix_info mix_info; /* Mixer info */
++ __u32 min_input_buf; /* minmum i/p buffer for decode */
++};
++
++/* Add the codec parameter structures for new codecs to be supported */
++#define CODEC_PARAM_STRUCTURES \
++ struct snd_pcm_params pcm_params; \
++ struct snd_mp3_params mp3_params; \
++ struct snd_aac_params aac_params; \
++ struct snd_wma_params wma_params;
++
++/* Pre and Post Processing param structures */
++#define PPP_PARAM_STRUCTURES \
++ struct snd_prp_params prp_params;
++
++/* Codec params struture */
++union snd_sst_codec_params {
++ CODEC_PARAM_STRUCTURES;
++};
++
++/* Pre-processing params struture */
++union snd_sst_ppp_params{
++ PPP_PARAM_STRUCTURES;
++};
++
++struct snd_sst_stream_params {
++ union snd_sst_codec_params uc;
++} __attribute__ ((packed));
++
++struct snd_sst_params {
++ __u32 result;
++ __u32 stream_id;
++ __u8 codec;
++ __u8 ops;
++ __u8 stream_type;
++ struct snd_sst_stream_params sparams;
++};
++
++/* ioctl related stuff here */
++struct snd_sst_pmic_config {
++ __u32 sfreq; /* Sampling rate in Hz */
++ __u16 num_chan; /* Mono =1 or Stereo =2 */
++ __u16 pcm_wd_sz; /* Number of bits per sample */
++} __attribute__ ((packed));
++
++struct snd_sst_get_stream_params {
++ struct snd_sst_params codec_params;
++ struct snd_sst_pmic_config pcm_params;
++};
++
++enum snd_sst_target_type {
++ SND_SST_TARGET_PMIC = 1,
++ SND_SST_TARGET_LPE,
++ SND_SST_TARGET_OTHER,
++};
++
++enum snd_sst_device_type {
++ SND_SST_DEVICE_SSP = 1,
++ SND_SST_DEVICE_PCM,
++ SND_SST_DEVICE_OTHER,
++};
++
++/*enum snd_sst_device_mode {
++ SND_SST_PCM_MODE_I2S = 0,
++ SND_SST_PCM_MODE_PCM1,
++};*/
++enum snd_sst_device_mode {
++
++ SND_SST_DEV_MODE_PCM_MODE1 = 1, /*(1 16-bit word, bit-length frame sync)*/
++ SND_SST_DEV_MODE_PCM_MODE2,
++ SND_SST_DEV_MODE_PCM_MODE3,
++ SND_SST_DEV_MODE_PCM_MODE4_RIGHT_JUSTIFIED,
++ SND_SST_DEV_MODE_PCM_MODE4_LEFT_JUSTIFIED,
++ SND_SST_DEV_MODE_PCM_MODE4_I2S, /*(I2S mode, 16-bit words)*/
++};
++
++enum snd_sst_port_action {
++ SND_SST_PORT_PREPARE = 1,
++ SND_SST_PORT_ACTIVATE,
++};
++
++/* Target selection per device structure */
++struct snd_sst_slot_info {
++ __u8 mix_enable; /* Mixer enable or disable */
++ __u8 device_type;
++ __u8 device_instance; /* 0, 1, 2 */
++ __u8 target_type;
++ __u16 slot[2];
++ __u8 master;
++ __u8 action;
++ __u16 device_mode;
++ struct snd_sst_pmic_config pcm_params;
++} __attribute__ ((packed));
++
++#define SST_MAX_TARGET_DEVICES 3
++/* Target device list structure */
++struct snd_sst_target_device {
++ __u32 device_route;
++ struct snd_sst_slot_info devices[SST_MAX_TARGET_DEVICES];
++} __attribute__ ((packed));
++
++struct snd_sst_driver_info {
++ __u32 version; /* Version of the driver */
++ __u32 active_pcm_streams;
++ __u32 active_enc_streams;
++ __u32 max_pcm_streams;
++ __u32 max_enc_streams;
++ __u32 buf_per_stream;
++};
++
++struct snd_sst_vol {
++ __u32 stream_id;
++ __s32 volume;
++ __u32 ramp_duration;
++ __u32 ramp_type; /* Ramp type, default=0 */
++};
++
++struct snd_sst_mute {
++ __u32 stream_id;
++ __u32 mute;
++};
++
++enum snd_sst_buff_type {
++ SST_BUF_USER = 1,
++ SST_BUF_MMAP,
++ SST_BUF_RAR,
++};
++
++struct snd_sst_mmap_buff_entry {
++ unsigned int offset;
++ unsigned int size;
++};
++
++struct snd_sst_mmap_buffs {
++ unsigned int entries;
++ enum snd_sst_buff_type type;
++ struct snd_sst_mmap_buff_entry *buff;
++};
++
++struct snd_sst_buff_entry {
++ void *buffer;
++ unsigned int size;
++};
++
++struct snd_sst_buffs {
++ unsigned int entries;
++ __u8 type;
++ struct snd_sst_buff_entry *buff_entry;
++};
++
++struct snd_sst_dbufs {
++ unsigned long long input_bytes_consumed;
++ unsigned long long output_bytes_produced;
++ struct snd_sst_buffs *ibufs;
++ struct snd_sst_buffs *obufs;
++};
++
++/*IOCTL defined here */
++/*SST MMF IOCTLS only */
++#define SNDRV_SST_STREAM_SET_PARAMS _IOR('L', 0x00, \
++ struct snd_sst_stream_params *)
++#define SNDRV_SST_STREAM_GET_PARAMS _IOWR('L', 0x01, \
++ struct snd_sst_get_stream_params *)
++#define SNDRV_SST_STREAM_GET_TSTAMP _IOWR('L', 0x02, __u64 *)
++#define SNDRV_SST_STREAM_DECODE _IOWR('L', 0x03, struct snd_sst_dbufs *)
++#define SNDRV_SST_STREAM_BYTES_DECODED _IOWR('L', 0x04, __u64 *)
++#define SNDRV_SST_STREAM_START _IO('A', 0x42)
++#define SNDRV_SST_STREAM_DROP _IO('A', 0x43)
++#define SNDRV_SST_STREAM_DRAIN _IO('A', 0x44)
++#define SNDRV_SST_STREAM_PAUSE _IOW('A', 0x45, int)
++#define SNDRV_SST_STREAM_RESUME _IO('A', 0x47)
++#define SNDRV_SST_MMAP_PLAY _IOW('L', 0x05, struct snd_sst_mmap_buffs *)
++#define SNDRV_SST_MMAP_CAPTURE _IOW('L', 0x06, struct snd_sst_mmap_buffs *)
++/*SST common ioctls */
++#define SNDRV_SST_DRIVER_INFO _IOR('L', 0x10, struct snd_sst_driver_info *)
++#define SNDRV_SST_SET_VOL _IOW('L', 0x11, struct snd_sst_vol *)
++#define SNDRV_SST_GET_VOL _IOW('L', 0x12, struct snd_sst_vol *)
++#define SNDRV_SST_MUTE _IOW('L', 0x13, struct snd_sst_mute *)
++/*AM Ioctly only */
++#define SNDRV_SST_FW_INFO _IOR('L', 0x20, struct snd_sst_fw_info *)
++#define SNDRV_SST_SET_TARGET_DEVICE _IOW('L', 0x21, \
++ struct snd_sst_target_device *)
++
++#endif /* __INTEL_SST_IOCTL_H__ */
+diff --git a/sound/pci/sst/intel_sst_interface.c b/sound/pci/sst/intel_sst_interface.c
+new file mode 100644
+index 0000000..5f84245
+--- /dev/null
++++ b/sound/pci/sst/intel_sst_interface.c
+@@ -0,0 +1,2099 @@
++/*
++ * intel_sst_interface.c - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corporation
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * This driver exposes the audio engine functionalities to the ALSA
++ * and middleware.
++ * Upper layer interfaces (MAD driver, MMF) to SST driver
++ */
++
++#include <linux/cdev.h>
++#include <linux/pci.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/syscalls.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/fcntl.h>
++#include <linux/uaccess.h>
++#include <linux/interrupt.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/workqueue.h>
++#include <linux/pci.h>
++#include <linux/firmware.h>
++#include <linux/ioctl.h>
++#include <linux/rar/rar_register.h>
++#include <linux/rar/memrar.h>
++#include <asm/div64.h>
++#include <sound/intel_lpe.h>
++#include <sound/intel_sst_ioctl.h>
++#include "intel_sst_fw_ipc.h"
++#include "intel_sst_common.h"
++
++#define AM_MODULE 1
++#define STREAM_MODULE 0
++
++/**
++* This function is called when the FW needs to be downloaded to SST DSP engine
++*/
++static int sst_download_fw(void)
++{
++ int retval;
++ const struct firmware *fw_sst;
++
++ printk(KERN_DEBUG "SST DBG:SST Downloading FW now...\n");
++ retval = request_firmware(&fw_sst,
++ SST_FW_STD_FILENAME,
++ &sst_drv_ctx->pci->dev);
++ if (retval) {
++ printk(KERN_ERR
++ "SST ERR: req fw failed %d \n", retval);
++ return retval;
++ }
++ sst_drv_ctx->alloc_block[0].sst_id = FW_DWNL_ID;
++ retval = sst_load_fw(fw_sst, NULL);
++ if (retval)
++ goto end_restore;
++
++ sst_drv_ctx->alloc_block[0].ops_block.condition = false;
++ retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[0]);
++ if (retval)
++ printk(KERN_ERR
++ "SST ERR: fw download failed %d \n" , retval);
++end_restore:
++ release_firmware(fw_sst);
++ sst_drv_ctx->alloc_block[0].sst_id = BLOCK_UNINIT;
++ return retval;
++}
++
++/**
++* intel_sst_open - opens a handle to driver
++* @i_node: inode structure
++* @file_ptr:pointer to file
++*
++* This function is called by OS when a user space component
++* tries to get a driver handle. Only one handle at a time
++* will be allowed
++*/
++int intel_sst_open(struct inode *i_node, struct file *file_ptr)
++{
++ dev_t device = i_node->i_rdev;
++ unsigned int retval = 0;
++
++ if (sst_drv_ctx->pmic_state != SND_MAD_INIT_DONE) {
++ printk(KERN_ERR
++ "SST ERR: Sound card not availble \n ");
++ return -EIO;
++ }
++
++ if (sst_drv_ctx->sst_state == SST_UN_INIT) {
++ /* FW is not downloaded */
++ retval = sst_download_fw();
++ if (retval) {
++ printk(KERN_ERR
++ "SST ERR: FW download failed...abort\n");
++ return -ENODEV;
++ }
++ }
++ if (device == MKDEV(INTEL_SST_MAJOR, 0)) {
++ /* app open */
++ mutex_lock(&sst_drv_ctx->stream_cnt_lock);
++ if (sst_drv_ctx->encoded_cnt < MAX_ENC_STREAM) {
++ struct ioctl_pvt_data *data =
++ kzalloc(sizeof(struct ioctl_pvt_data),
++ GFP_KERNEL);
++ if (!data) {
++ printk(KERN_ERR
++ "SST ERR:error rsrvin data mry\n");
++ mutex_unlock(&sst_drv_ctx->stream_cnt_lock);
++ return -ENOMEM;
++ }
++
++
++ sst_drv_ctx->encoded_cnt++;
++ /*sst_drv_ctx->stream_cnt++;*/
++ mutex_unlock(&sst_drv_ctx->stream_cnt_lock);
++ data->pvt_id = sst_assign_pvt_id(sst_drv_ctx);
++ data->str_id = 0;
++ file_ptr->private_data = (void *)data;
++ printk(KERN_DEBUG "SST DBG:sst id allocated = %d!\n", data->pvt_id);
++ } else
++ retval = -EACCES;
++ } else if (device == MKDEV(INTEL_SST_MAJOR, 1)) {
++ /* audio manager open */
++ mutex_lock(&sst_drv_ctx->stream_cnt_lock);
++ if (sst_drv_ctx->am_cnt < MAX_AM_HANDLES) {
++ sst_drv_ctx->am_cnt++;
++ printk(KERN_DEBUG "SST DBG:AM handle opened...\n");
++ } else
++ retval = -EACCES;
++
++ mutex_unlock(&sst_drv_ctx->stream_cnt_lock);
++ } else
++ retval = -EINVAL;
++ return retval;
++}
++
++void free_stream_context(unsigned int str_id)
++{
++ struct stream_info *stream;
++
++ if (!sst_validate_strid(str_id)) {
++ /* str_id is valid, so stream is alloacted */
++ stream = &sst_drv_ctx->streams[str_id];
++ if (stream->ops == STREAM_OPS_PLAYBACK ||
++ stream->ops == STREAM_OPS_PLAYBACK_DRM) {
++ sst_drv_ctx->pb_streams--;
++ if (sst_drv_ctx->pb_streams == 0 && sst_drv_ctx->cp_streams > 0)
++ sst_drv_ctx->scard_ops->power_down_pmic_pb();
++ } else if (stream->ops == STREAM_OPS_CAPTURE) {
++ sst_drv_ctx->cp_streams--;
++ if (sst_drv_ctx->cp_streams == 0 && sst_drv_ctx->pb_streams > 0)
++ sst_drv_ctx->scard_ops->power_down_pmic_cp();
++ }
++#ifdef CONFIG_MSTWN_POWER_MGMT
++ if(stream->codec == SST_CODEC_TYPE_MP3){
++ sst_drv_ctx->lpaudio_start--;
++ if(!sst_drv_ctx->lpaudio_start) {
++ sst_ospm_send_event(OSPM_EVENT_LPAUDIO_STOP);
++ printk(KERN_DEBUG "SST DBG:Free_stream:lpaudio_start:%d", sst_drv_ctx->lpaudio_start);
++ printk(KERN_DEBUG "SST DBG:Free_stream:Sending OSPM_EVENT_LPAUDIO_STOP...\n");
++ }
++ }else {
++ sst_drv_ctx->audio_start--;
++ if(!sst_drv_ctx->audio_start) {
++ sst_ospm_send_event(OSPM_EVENT_SUBSYS_STOP_PLAY);
++ printk(KERN_DEBUG "SST DBG:Free_stream:audio_start:%d", sst_drv_ctx->audio_start);
++ printk(KERN_DEBUG "SST DBG:Free_stream:Sending OSPM_EVENT_SUBSYS_STOP_PLAY...\n");
++ }
++ }
++#endif
++ if(sst_drv_ctx->pb_streams == 0 &&
++ sst_drv_ctx->cp_streams == 0) {
++ sst_drv_ctx->scard_ops->power_down_pmic();
++ }
++
++ if (sst_free_stream(str_id))
++ sst_clean_stream(&sst_drv_ctx->streams[str_id]);
++ }
++}
++
++/**
++* intel_sst_release - releases a handle to driver
++* @i_node: inode structure
++* @file_ptr: pointer to file
++*
++* This function is called by OS when a user space component
++* tries to release a driver handle.
++*/
++int intel_sst_release(struct inode *i_node, struct file *file_ptr)
++{
++ dev_t device = i_node->i_rdev;
++
++ printk(KERN_DEBUG "SST DBG:Release called \n");
++ if (device == MKDEV(INTEL_SST_MAJOR, 0)) {
++ struct ioctl_pvt_data *data =
++ (struct ioctl_pvt_data *)file_ptr->private_data;
++
++ /* app close */
++ printk(KERN_DEBUG "SST DBG:Closing app handle \n");
++ mutex_lock(&sst_drv_ctx->stream_cnt_lock);
++ sst_drv_ctx->encoded_cnt--;
++ sst_drv_ctx->stream_cnt--;
++ mutex_unlock(&sst_drv_ctx->stream_cnt_lock);
++ free_stream_context(data->str_id);
++ kfree(data);
++ } else if (device == MKDEV(INTEL_SST_MAJOR, 1)) {
++ /* audio manager close */
++ mutex_lock(&sst_drv_ctx->stream_cnt_lock);
++ sst_drv_ctx->am_cnt--;
++ mutex_unlock(&sst_drv_ctx->stream_cnt_lock);
++ printk(KERN_DEBUG "SST DBG:AM handle closed \n");
++ }
++ return 0;
++}
++
++/**
++* intel_sst_mmap - mmaps a kernel buffer to user space for copying data
++* @vma: vm area structure instance
++* @file_ptr: pointer to file
++*
++* This function is called by OS when a user space component
++* tries to get mmap memory from driver
++*/
++int intel_sst_mmap(struct file *file_ptr, struct vm_area_struct *vma)
++{
++ int retval, length;
++ struct ioctl_pvt_data *data =
++ (struct ioctl_pvt_data *)file_ptr->private_data;
++ int str_id = data->str_id;
++ void *mem_area;
++
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return -EINVAL;
++
++ length = vma->vm_end - vma->vm_start;
++ printk(KERN_DEBUG "SST DBG:called for stream %d length 0x%x\n", str_id, length);
++
++ if (length > sst_drv_ctx->mmap_len)
++ return -ENOMEM;
++ if (!sst_drv_ctx->mmap_mem)
++ return -EIO;
++
++ /* round it up to the page bondary */
++ /*mem_area = (void *)((((unsigned long)sst_drv_ctx->mmap_mem)
++ + PAGE_SIZE - 1) & PAGE_MASK);*/
++ mem_area = (void *) PAGE_ALIGN((unsigned int) sst_drv_ctx->mmap_mem);
++
++ /* map the whole physically contiguous area in one piece */
++ retval = remap_pfn_range(vma,
++ vma->vm_start,
++ virt_to_phys((void *)mem_area) >> PAGE_SHIFT,
++ length,
++ vma->vm_page_prot);
++ if (retval) {
++ sst_drv_ctx->streams[str_id].mmapped = false;
++ printk(KERN_ERR "SST ERR: mapping failed %d ",retval);
++ } else
++ sst_drv_ctx->streams[str_id].mmapped = true;
++
++ printk(KERN_DEBUG "SST DBG:mmap ret 0x%x \n", retval);
++ return retval;
++}
++
++/**
++* intel_sst_mmap_play_capture - sets mmap data buffers to play/capture
++*/
++static int intel_sst_mmap_play_capture(u32 str_id,
++ struct snd_sst_mmap_buffs *mmap_buf)
++{
++ struct sst_stream_bufs *bufs;
++ int retval, i;
++ struct stream_info *stream;
++ struct snd_sst_mmap_buff_entry *buf_entry;
++
++ printk(KERN_DEBUG "SST DBG:called for str_id %d \n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval) {
++ printk(KERN_ERR "SST ERR: val +\
++ failed %d ", retval);
++ return -EINVAL;
++ }
++ BUG_ON(!mmap_buf);
++
++ stream = &sst_drv_ctx->streams[str_id];
++ if (stream->mmapped != true) {
++ printk(KERN_ERR "SST ERR: stream not mapped! ");
++ return -EIO;
++ }
++
++ if (stream->status == STREAM_UN_INIT ||
++ stream->status == STREAM_DECODE) {
++ printk(KERN_ERR
++ "SST ERR: BAD REQUEST!, streamstate +\
++ is %d\n", stream->status);
++ return -EBADRQC;
++ }
++ stream->curr_bytes = 0;
++ stream->cumm_bytes = 0;
++
++ printk(KERN_DEBUG "SST DBG:new buffers count %d status %d\n",
++ mmap_buf->entries, stream->status);
++ buf_entry = mmap_buf->buff;
++ for (i = 0; i < mmap_buf->entries; i++) {
++ BUG_ON(!buf_entry);
++ bufs = kzalloc(sizeof(*bufs), GFP_KERNEL);
++ if (!bufs) {
++ printk(KERN_ERR
++ "SST ERR: mem allocation failed \n ");
++ return -ENOMEM;
++ }
++ bufs->size = buf_entry->size;
++ bufs->offset = buf_entry->offset;
++ bufs->addr = sst_drv_ctx->mmap_mem;
++ bufs->in_use = false;
++ buf_entry++;
++ /* locking here */
++ mutex_lock(&stream->lock);
++ list_add_tail(&bufs->node, &stream->bufs);
++ mutex_unlock(&stream->lock);
++ }
++
++ mutex_lock(&stream->lock);
++ stream->data_blk.condition = false;
++ stream->data_blk.ret_code = 0;
++ if (stream->status == STREAM_INIT &&
++ stream->prev != STREAM_UN_INIT &&
++ stream->need_draining != true) {
++ stream->prev = stream->status;
++ stream->status = STREAM_RUNNING;
++ if (stream->ops == STREAM_OPS_PLAYBACK) {
++ printk(KERN_DEBUG "SST DBG:play frames...\n");
++ if (sst_play_frame(str_id) < 0) {
++ printk(KERN_ERR
++ "SST ERR: play frames failed \n");
++ mutex_unlock(&stream->lock);
++ return -EIO;
++ }
++ } else if (stream->ops == STREAM_OPS_CAPTURE) {
++ printk(KERN_DEBUG "SST DBG:capture frames...\n");
++ if (sst_capture_frame(str_id) < 0) {
++ printk(KERN_ERR
++ "SST ERR: capture frames failed \n");
++ mutex_unlock(&stream->lock);
++ return -EIO;
++ }
++ }
++ }
++ mutex_unlock(&stream->lock);
++ /* Block the call for reply */
++ if (!list_empty(&stream->bufs)) {
++ printk(KERN_DEBUG "SST DBG:ioctl waiting...\n");
++ stream->data_blk.on = true;
++ retval = sst_wait_interruptible(sst_drv_ctx,
++ &stream->data_blk);
++ }
++
++ if (retval >= 0)
++ retval = stream->cumm_bytes;
++ printk(KERN_DEBUG "SST DBG:end of play/rec +\
++ ioctl bytes = %d!!\n", retval);
++ return retval;
++}
++
++/**
++* intel_sst_play_capture - sets user data buffers to play/capture
++*/
++static int intel_sst_play_capture(struct stream_info *stream, int str_id)
++{
++ int retval;
++
++ stream->data_blk.ret_code = 0;
++ stream->data_blk.on = true;
++ stream->data_blk.condition = false;
++
++ mutex_lock(&stream->lock);
++ if (stream->status == STREAM_INIT && stream->prev != STREAM_UN_INIT) {
++ /* stream is started */
++ stream->prev = stream->status;
++ stream->status = STREAM_RUNNING;
++ }
++
++ if (stream->status == STREAM_INIT && stream->prev == STREAM_UN_INIT) {
++ /* stream is not started yet */
++ printk(KERN_DEBUG "SST DBG:Stream isnt started yet state %d, prev %d \n",
++ stream->status, stream->prev);
++ } else if ((stream->status == STREAM_RUNNING ||
++ stream->status == STREAM_PAUSED) &&
++ stream->need_draining != true) {
++ /* stream is started */
++ if (stream->ops == STREAM_OPS_PLAYBACK ||
++ stream->ops == STREAM_OPS_PLAYBACK_DRM) {
++ if (sst_play_frame(str_id) < 0) {
++ printk(KERN_ERR
++ "SST ERR: play frames failed \n");
++ mutex_unlock(&stream->lock);
++ return -EIO;
++ }
++ } else if (stream->ops == STREAM_OPS_CAPTURE) {
++ if (sst_capture_frame(str_id) < 0) {
++ printk(KERN_ERR
++ "SST ERR: capture frames failed \n ");
++ mutex_unlock(&stream->lock);
++ return -EIO;
++ }
++ }
++ } else {
++ printk(KERN_ERR
++ "SST ERR: Streamstate %d invalid,prev %d\n",\
++ stream->status, stream->prev);
++ mutex_unlock(&stream->lock);
++ return -EIO;
++ }
++ mutex_unlock(&stream->lock);
++ /* Block the call for reply */
++ printk(KERN_DEBUG "SST DBG:write waiting...\n");
++
++ retval = sst_wait_interruptible(sst_drv_ctx, &stream->data_blk);
++ if (retval) {
++ stream->status = STREAM_INIT;
++ printk(KERN_DEBUG "SST DBG:wait returned error...\n");
++ }
++ printk(KERN_DEBUG "SST DBG:write returning\n");
++ return retval;
++}
++
++/**
++* snd_sst_fill_kernel_list - fills kernel list with buffer addresses for
++* SST DSP driver to process
++*/
++static int snd_sst_fill_kernel_list(struct stream_info *stream,
++ const struct iovec *iovec, unsigned long nr_segs,
++ struct list_head *copy_to_list)
++{
++ struct sst_stream_bufs *stream_bufs;
++ unsigned long index, data_not_copied, mmap_len;
++ unsigned char *bufp;
++ unsigned long size, copied_size;
++ int retval = 0, add_to_list = 0;
++ static int sent_offset;
++ static unsigned long sent_index;
++
++ stream_bufs = kzalloc(sizeof(*stream_bufs), GFP_KERNEL);
++ if (!stream_bufs) {
++ printk(KERN_ERR
++ "SST ERR: memory allocation failed \n ");
++ return -ENOMEM;
++ }
++ stream_bufs->addr = sst_drv_ctx->mmap_mem;
++ if (stream->ops == STREAM_OPS_PLAYBACK_DRM) {
++ for (index = stream->sg_index; index < nr_segs; index++) {
++ __u32 rar_handle;
++ struct sst_stream_bufs *stream_bufs =
++ kzalloc(sizeof(*stream_bufs), GFP_KERNEL);
++
++ stream->sg_index = index;
++ if (!stream_bufs) {
++ printk(KERN_ERR
++ "SST ERR: mry alocation failed \n");
++ return -ENOMEM;
++ }
++ retval = copy_from_user((void *) &rar_handle,
++ iovec[index].iov_base,
++ sizeof(__u32));
++ if (retval != 0) {
++ printk(KERN_ERR
++ "SST ERR: copy from user +\
++ failed\n");
++ return -EIO;
++ }
++ stream_bufs->addr = (char *)rar_handle;
++ printk(KERN_DEBUG "SST DBG:rar handle +\
++ received = 0x%x\n", (__u32)stream_bufs->addr);
++ stream_bufs->in_use = false;
++ stream_bufs->size = iovec[0].iov_len;
++ printk(KERN_DEBUG "size = 0x%x", stream_bufs->size);
++ /* locking here */
++ mutex_lock(&stream->lock);
++ list_add_tail(&stream_bufs->node, &stream->bufs);
++ mutex_unlock(&stream->lock);
++ }
++ stream->sg_index = index;
++ return retval;
++ }
++ mmap_len = sst_drv_ctx->mmap_len;
++ stream_bufs->addr = sst_drv_ctx->mmap_mem;
++ bufp = stream->cur_ptr;
++
++ printk(KERN_DEBUG "SST DBG:mmap_len - %lx\n", mmap_len);
++ copied_size = 0;
++
++ if (!stream->sg_index)
++ sent_index = sent_offset = 0;
++
++ for (index = stream->sg_index; index < nr_segs; index++) {
++ stream->sg_index = index;
++ printk(KERN_DEBUG "SST DBG:index - %lx, cur_ptr - %p\n", index, stream->cur_ptr);
++ printk(KERN_DEBUG "SST DBG:base - %p, size - 0x%x\n", iovec[index].iov_base,
++ iovec[index].iov_len);
++ printk(KERN_DEBUG "SST DBG:bufp - %p\n", bufp);
++ if (!stream->cur_ptr)
++ bufp = iovec[index].iov_base;
++
++ size = ((unsigned long)iovec[index].iov_base
++ + iovec[index].iov_len) - (unsigned long) bufp;
++
++ printk(KERN_DEBUG "SST DBG:size - %lx\n", size);
++ if ((copied_size + size) > mmap_len)
++ size = mmap_len - copied_size;
++
++ printk(KERN_DEBUG "SST DBG:size - %lx\n", size);
++
++ if (stream->ops == STREAM_OPS_PLAYBACK) {
++ printk(KERN_DEBUG "SST DBG:Playback stream copying now....\n");
++ data_not_copied = copy_from_user(
++ (void *)(stream_bufs->addr + copied_size),
++ bufp, size);
++ if (data_not_copied > 0) {
++ /* Clean up the list and return error code */
++ printk(KERN_ERR
++ "SST ERR: cpyfrmusr not coped -%ld", data_not_copied);
++ retval = -EIO;
++ break;
++ }
++ } else if (stream->ops == STREAM_OPS_CAPTURE) {
++ struct snd_sst_user_cap_list *entry =
++ kzalloc(sizeof(*entry), GFP_KERNEL);
++
++ if (!entry) {
++ printk(KERN_ERR
++ "SST ERR: mem allocation failed \n");
++ /* FIXME cleanup prev */
++ return -ENOMEM;
++ }
++ entry->iov_index = index;
++ entry->iov_offset = (unsigned long) bufp -
++ (unsigned long)iovec[index].iov_base;
++ entry->offset = copied_size;
++ entry->size = size;
++ printk(KERN_DEBUG "SST DBG:ENTRY:ioindx %d,iooff %ld,koff %ld,ksz %ld \n",
++ entry->iov_index, entry->iov_offset,
++ entry->offset, entry->size);
++ list_add_tail(&entry->node, copy_to_list);
++ }
++
++ printk(KERN_DEBUG "SST DBG:cur_ptr - %lx\n", (unsigned long) stream->cur_ptr);
++ stream->cur_ptr = bufp + size;
++
++ if (((unsigned long)iovec[index].iov_base
++ + iovec[index].iov_len) < ((unsigned long)iovec[index].iov_base) ) {
++ printk(KERN_DEBUG "SST DBG:Buffer overflows");
++ return -EINVAL;
++ }
++
++ if (((unsigned long)iovec[index].iov_base
++ + iovec[index].iov_len) ==
++ (unsigned long)stream->cur_ptr) {
++ stream->cur_ptr = NULL;
++ stream->sg_index++;
++ }
++
++ copied_size += size;
++ printk(KERN_DEBUG "SST DBG:copied_size - %lx\n", copied_size);
++ if ((copied_size >= mmap_len) ||
++ (stream->sg_index == nr_segs)) {
++ add_to_list = 1;
++ }
++
++ if (add_to_list) {
++ stream_bufs->in_use = false;
++ stream_bufs->size = copied_size;
++ /* locking here */
++ mutex_lock(&stream->lock);
++ list_add_tail(&stream_bufs->node, &stream->bufs);
++ mutex_unlock(&stream->lock);
++ break;
++ }
++ }
++ return retval;
++}
++
++/**
++* snd_sst_copy_userbuf_capture - This function copies the captured data
++* returned from SST DSP engine to the user buffers
++*/
++static int snd_sst_copy_userbuf_capture(struct stream_info *stream,
++ const struct iovec *iovec,
++ struct list_head *copy_to_list)
++{
++ struct snd_sst_user_cap_list *entry, *_entry;
++ struct sst_stream_bufs *kbufs = NULL, *_kbufs;
++ int retval = 0;
++ unsigned long data_not_copied;
++
++ /* copy sent buffers */
++ /* FIXME optimze */
++ printk(KERN_DEBUG "SST DBG:capture stream copying to user now...\n");
++ list_for_each_entry_safe(kbufs, _kbufs, &stream->bufs, node) {
++ if (kbufs->in_use == true) {
++ /* copy to user */
++ list_for_each_entry_safe(entry, _entry,
++ copy_to_list, node) {
++ printk(KERN_DEBUG "SST DBG:filling now... \n");
++ printk(KERN_DEBUG "SST DBG:iindx %d,ioff %ld,koff %ld,ksz %ld \n",
++ entry->iov_index, entry->iov_offset,
++ entry->offset, entry->size);
++ printk(KERN_DEBUG "SST DBG:Copying at %p size %lx\n",
++ iovec[entry->iov_index].iov_base +
++ entry->iov_offset,
++ entry->size);
++ data_not_copied = copy_to_user((void *)
++ iovec[entry->iov_index].iov_base +
++ entry->iov_offset,
++ kbufs->addr + entry->offset,
++ entry->size);
++ if (data_not_copied > 0) {
++ /* Clean up the list and return error */
++ printk(KERN_ERR
++ "SST ERR: copy to user err -%ld\n ", data_not_copied);
++ retval = -EIO;
++ break;
++ }
++ list_del(&entry->node);
++ kfree(entry);
++ }
++ printk(KERN_DEBUG "SST DBG:coming out of loop\n");
++ }
++ }
++ printk(KERN_DEBUG "SST DBG:end of cap copy\n");
++ return retval;
++}
++
++/*
++ * snd_sst_userbufs_play_cap - constructs the list from user buffers
++ * @iovec: pointer to iovec structure
++ * @nr_segs: number entries in the iovec structure
++ * @str_id: stream id
++ * @stream: pointer to stream_info structure
++ * This function will traverse the user list and copy the data to the kernel
++ * space buffers.
++ */ /* FIXME cleanups in this fn no mem leaks due to link list */
++static int snd_sst_userbufs_play_cap(const struct iovec *iovec,
++ unsigned long nr_segs, unsigned int str_id,
++ struct stream_info *stream)
++{
++ int retval;
++ LIST_HEAD(copy_to_list);
++
++
++ retval = snd_sst_fill_kernel_list(stream, iovec, nr_segs,
++ &copy_to_list);
++
++ retval = intel_sst_play_capture(stream, str_id);
++ if (retval < 0)
++ return retval;
++
++ if (stream->ops == STREAM_OPS_CAPTURE) {
++ retval = snd_sst_copy_userbuf_capture(stream, iovec,
++ &copy_to_list);
++ }
++ return retval;
++}
++
++/**
++* intel_sst_read_write - This function is common function across read/write
++* for user buffers called from system calls
++*/
++static int intel_sst_read_write(unsigned int str_id, char __user *buf,
++ size_t count)
++{
++ int retval;
++ struct stream_info *stream;
++ struct iovec iovec;
++ unsigned long nr_segs;
++
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return -EINVAL;
++ stream = &sst_drv_ctx->streams[str_id];
++ if (stream->mmapped == true) {
++ printk(KERN_ERR
++ "SST ERR: user write and stream is mapped");
++ return -EIO;
++ }
++ if (!count) {
++ printk(KERN_ERR
++ "SST ERR: args invalid %d", retval);
++ return -EINVAL;
++ }
++ stream->curr_bytes = 0;
++ stream->cumm_bytes = 0;
++ /* copy user buf details */
++ printk(KERN_DEBUG "SST DBG:new buffers %p, copy size %d, status %d\n" ,
++ buf, (int) count, (int) stream->status);
++
++ stream->buf_type = SST_BUF_USER_STATIC;
++ iovec.iov_base = (void *)buf;
++ iovec.iov_len = count;
++ nr_segs = 1;
++
++ do {
++ retval = snd_sst_userbufs_play_cap(&iovec, nr_segs,
++ str_id, stream);
++
++ if (retval < 0)
++ break;
++
++ } while (stream->sg_index < nr_segs);
++
++ stream->sg_index = 0;
++ stream->cur_ptr = NULL;
++ if (retval >= 0)
++ retval = stream->cumm_bytes;
++ printk(KERN_DEBUG "SST DBG:end of play/rec bytes = %d!!\n", retval);
++ return retval;
++}
++
++/*
++ * intel_sst_write - This function is called when user tries to play out data
++ * @file_ptr: pointer to file
++ * @buf: user buffer to be played out
++ * @count: size of tthe buffer
++ * @offset: offset to start from
++ */
++int intel_sst_write(struct file *file_ptr, const char __user *buf,
++ size_t count, loff_t *offset)
++{
++ struct ioctl_pvt_data *data =
++ (struct ioctl_pvt_data *)file_ptr->private_data;
++ int str_id = data->str_id;
++ struct stream_info *stream = &sst_drv_ctx->streams[str_id];
++
++ printk(KERN_DEBUG "SST DBG:called for %d\n", str_id);
++ if (stream->status == STREAM_UN_INIT ||
++ stream->status == STREAM_DECODE) {
++ printk(KERN_ERR "SST ERR: BAD REQUEST ");
++ return -EBADRQC;
++ }
++ return intel_sst_read_write(str_id, (char __user *)buf, count);
++}
++
++/*
++ * intel_sst_aio_write - This function is called when user tries to play out
++ * multiple data buffers
++ * @kiocb: pointer to a structure containing file pointer
++ * @iov: list of user buffer to be played out
++ * @nr_segs: number of entries
++ * @offset: offset to start from
++ */
++ssize_t intel_sst_aio_write(struct kiocb *kiocb, const struct iovec *iov,
++ unsigned long nr_segs, loff_t offset)
++{
++ int retval;
++ struct ioctl_pvt_data *data =
++ (struct ioctl_pvt_data *)kiocb->ki_filp->private_data;
++ int str_id = data->str_id;
++ struct stream_info *stream;
++
++ printk(KERN_DEBUG "SST DBG:entry - %ld\n", nr_segs);
++
++ if (is_sync_kiocb(kiocb) == false) {
++ printk(KERN_ERR
++ "SST ERR: aio_writ frm userspace is not alowed\n");
++ return -EINVAL;
++ }
++
++ printk(KERN_DEBUG "SST DBG:called for str_id %d \n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval) {
++ printk(KERN_ERR
++ "SST ERR: invalid stream id \n ");
++ return -EINVAL;
++ }
++ stream = &sst_drv_ctx->streams[str_id];
++ if (stream->mmapped == true) {
++ printk(KERN_ERR
++ "SST ERR: user write & stream is mapped");
++ return -EIO;
++ }
++ if (stream->status == STREAM_UN_INIT ||
++ stream->status == STREAM_DECODE) {
++ printk(KERN_ERR "SST ERR: BAD REQUEST");
++ return -EBADRQC;
++ }
++ stream->curr_bytes = 0;
++ stream->cumm_bytes = 0;
++ printk(KERN_DEBUG "SST DBG:new segs %ld, offset %d, status %d\n" ,
++ nr_segs, (int) offset, (int) stream->status);
++ stream->buf_type = SST_BUF_USER_STATIC;
++ do {
++ retval = snd_sst_userbufs_play_cap(iov, nr_segs,
++ str_id, stream);
++ if (retval < 0)
++ break;
++
++ } while (stream->sg_index < nr_segs);
++
++ stream->sg_index = 0;
++ stream->cur_ptr = NULL;
++ if (retval >= 0)
++ retval = stream->cumm_bytes;
++ printk(KERN_DEBUG "SST DBG:end of play/rec bytes = %d!!\n", retval);
++ return retval;
++}
++
++/*
++ * intel_sst_read - This function is called when user tries to capture data
++ * @file_ptr: pointer to file
++ * @buf: user buffer to be filled with captured data
++ * @count: size of tthe buffer
++ * @offset: offset to start from
++ */
++int intel_sst_read(struct file *file_ptr, char __user *buf,
++ size_t count, loff_t *offset)
++{
++ struct ioctl_pvt_data *data =
++ (struct ioctl_pvt_data *)file_ptr->private_data;
++ int str_id = data->str_id;
++ struct stream_info *stream = &sst_drv_ctx->streams[str_id];
++
++ printk(KERN_DEBUG "SST DBG:called for %d\n", str_id);
++ if (stream->status == STREAM_UN_INIT ||
++ stream->status == STREAM_DECODE) {
++ printk(KERN_ERR"SST ERR: BAD REQUEST!\n");
++ return -EBADRQC;
++ }
++ return intel_sst_read_write(str_id, buf, count);
++}
++
++/*
++ * intel_sst_aio_read - This function is called when user tries to capture out
++ * multiple data buffers
++ * @kiocb: pointer to a structure containing file pointer
++ * @iov: list of user buffer to be filled with captured
++ * @nr_segs: number of entries
++ * @offset: offset to start from
++ */
++
++ssize_t intel_sst_aio_read(struct kiocb *kiocb, const struct iovec *iov,
++ unsigned long nr_segs, loff_t offset)
++{
++ int retval;
++ struct ioctl_pvt_data *data =
++ (struct ioctl_pvt_data *)kiocb->ki_filp->private_data;
++ int str_id = data->str_id;
++ struct stream_info *stream;
++
++ printk(KERN_DEBUG "SST DBG:entry - %ld\n", nr_segs);
++
++ if (is_sync_kiocb(kiocb) == false) {
++ printk(KERN_DEBUG "SST DBG:aio_read from user space is not allowed\n");
++ return -EINVAL;
++ }
++
++ printk(KERN_DEBUG "SST DBG:called for str_id %d \n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return -EINVAL;
++ stream = &sst_drv_ctx->streams[str_id];
++ if (stream->mmapped == true) {
++ printk(KERN_ERR
++ "SST ERR: user write stream is mapped!!! ");
++ return -EIO;
++ }
++ if (stream->status == STREAM_UN_INIT ||
++ stream->status == STREAM_DECODE) {
++ printk(KERN_ERR "SST ERR: BAD REQUEST!\n ");
++ return -EBADRQC;
++ }
++ stream->curr_bytes = 0;
++ stream->cumm_bytes = 0;
++
++ printk(KERN_DEBUG "SST DBG:new segs %ld, offset %d, status %d\n" ,
++ nr_segs, (int) offset, (int) stream->status);
++ stream->buf_type = SST_BUF_USER_STATIC;
++ do {
++ retval = snd_sst_userbufs_play_cap(iov, nr_segs,
++ str_id, stream);
++ if (retval < 0)
++ break;
++
++ } while (stream->sg_index < nr_segs);
++
++ stream->sg_index = 0;
++ stream->cur_ptr = NULL;
++ if (retval >= 0)
++ retval = stream->cumm_bytes;
++ printk(KERN_DEBUG "SST DBG:end of play/rec bytes = %d!!\n", retval);
++ return retval;
++}
++
++/*
++ * sst_print_stream_params - prints the stream parameters (debug fn)
++ */
++static void sst_print_stream_params(struct snd_sst_get_stream_params *get_prm)
++{
++ printk(KERN_DEBUG "SST DBG:codec params:result =%d\n",
++ get_prm->codec_params.result);
++ printk(KERN_DEBUG "SST DBG:codec params:stream = %d\n",
++ get_prm->codec_params.stream_id);
++ printk(KERN_DEBUG "SST DBG:codec params:codec = %d\n",
++ get_prm->codec_params.codec);
++ printk(KERN_DEBUG "SST DBG:codec params:ops = %d\n",
++ get_prm->codec_params.ops);
++ printk(KERN_DEBUG "SST DBG:codec params:stream_type= %d\n",
++ get_prm->codec_params.stream_type);
++ printk(KERN_DEBUG "SST DBG:pcmparams:sfreq= %d\n",
++ get_prm->pcm_params.sfreq);
++ printk(KERN_DEBUG "SST DBG:pcmparams:num_chan= %d\n",
++ get_prm->pcm_params.num_chan);
++ printk(KERN_DEBUG "SST DBG:pcmparams:pcm_wd_sz= %d\n",
++ get_prm->pcm_params.pcm_wd_sz);
++ return;
++}
++
++/*
++ * sst_print_fw_info - prints the firmware information (debug fn)
++ */
++static void sst_print_fw_info(struct snd_sst_fw_info *fw_info)
++{
++ printk(KERN_DEBUG "SST DBG:build = %d\n", fw_info->fw_version.build);
++ printk(KERN_DEBUG "SST DBG:minor = %d\n", fw_info->fw_version.minor);
++ printk(KERN_DEBUG "SST DBG:major= %d\n", fw_info->fw_version.major);
++ printk(KERN_DEBUG "SST DBG:max pcm = %d\n", fw_info->max_pcm_streams_supported);
++ printk(KERN_DEBUG "SST DBG:max enc = %d\n", fw_info->max_enc_streams_supported);
++ printk(KERN_DEBUG "SST DBG:min input buf = %d\n", fw_info->min_input_buf);
++ printk(KERN_DEBUG "SST DBG:pop:src_min= %d\n", fw_info->pop_info.src_min);
++ printk(KERN_DEBUG "SST DBG:pop:src_max= %d\n", fw_info->pop_info.src_max);
++ printk(KERN_DEBUG "SST DBG:pop:src= %d\n", fw_info->pop_info.src);
++ printk(KERN_DEBUG "SST DBG:pop:bass_boost= %d\n", fw_info->pop_info.bass_boost);
++ printk(KERN_DEBUG "SST DBG:pop:stereo_widening= %d\n", fw_info->pop_info.stereo_widening);
++ printk(KERN_DEBUG "SST DBG:pop:volume_control= %d\n", fw_info->pop_info.volume_control);
++ printk(KERN_DEBUG "SST DBG:pop:min_vol= %d\n", fw_info->pop_info.min_vol);
++ printk(KERN_DEBUG "SST DBG:pop:max_vol= %d\n", fw_info->pop_info.max_vol);
++ printk(KERN_DEBUG "SST DBG:prp:min_vol= %d\n", fw_info->prp_info.min_vol);
++ printk(KERN_DEBUG "SST DBG:prp:max_vol= %d\n", fw_info->prp_info.max_vol);
++ printk(KERN_DEBUG "SST DBG:prp:volume_control= %d\n", fw_info->prp_info.volume_control);
++ printk(KERN_DEBUG "SST DBG:mix:max streams = %d\n", fw_info->mix_info.max_streams);
++ printk(KERN_DEBUG "SST DBG:port0:port_type = %d\n", fw_info->port_info[0].port_type);
++ printk(KERN_DEBUG "SST DBG:port1:port_type = %d\n", fw_info->port_info[1].port_type);
++ return;
++}
++
++/*
++ * sst_get_stream_allocated - this function gets a stream allocated with
++ * the given params
++ */
++static int sst_get_stream_allocated(struct snd_sst_params *str_param,
++ u32 block, u32 pvt_id)
++{
++ int retval;
++ retval = sst_alloc_stream((char *) &str_param->sparams, str_param->ops,
++ str_param->codec, pvt_id);
++ if (retval) {
++ printk(KERN_ERR
++ "SST ERR: sst_alloc_stream failed %d \n", retval);
++ return retval;
++ }
++ /* Block the call for reply */
++ retval = sst_wait_timeout(sst_drv_ctx,
++ &sst_drv_ctx->alloc_block[block]);
++
++ return retval;
++}
++
++/*
++ * set_port_params - this function sets the port parameters at Sound card end
++ */
++static void set_port_params(struct snd_sst_params *str_param,
++ enum snd_sst_stream_ops ops)
++{
++ /*int sfreq = str_param->sparams.uc.pcm_params.sfreq;
++ int word_size = str_param->sparams.uc.pcm_params.pcm_wd_sz;
++
++ printk(KERN_DEBUG "SST DBG:sampling frequency = %d wd_size = %d \n", sfreq, word_size);
++
++ if (ops == STREAM_OPS_PLAYBACK ||
++ ops == STREAM_OPS_PLAYBACK_DRM) {
++ printk(KERN_DEBUG "SST DBG:Setting playback path and port settings...\n");
++ sst_drv_ctx->scard_ops.set_pcm_audio_params(sfreq,
++ word_size);
++ } else if (ops == STREAM_OPS_CAPTURE) {
++ printk(KERN_DEBUG "SST DBG:Setting capture path...\n");
++ sst_drv_ctx->scard_ops->set_pcm_audio_params(sfreq, word_size);
++ }*/
++ return;
++}
++
++/*
++ * sst_get_sfreq - this function returns the frequency of the stream
++ */
++static int sst_get_sfreq(struct snd_sst_params *str_param)
++{
++ switch (str_param->codec) {
++ case SST_CODEC_TYPE_PCM:
++ return str_param->sparams.uc.pcm_params.sfreq;
++ case SST_CODEC_TYPE_MP3:
++ return str_param->sparams.uc.mp3_params.sfreq;
++ case SST_CODEC_TYPE_AAC:
++ return str_param->sparams.uc.aac_params.sfreq;;
++ case SST_CODEC_TYPE_WMA9:
++ return str_param->sparams.uc.wma_params.sfreq;;
++ default:
++ return 0;
++ }
++}
++
++/*
++ * sst_stalled - this function checks if the lpe is in stalled state
++ */
++int sst_stalled(void)
++{
++ int retry = 1000;
++ int retval = -1;
++
++ while(retry) {
++ if ( !sst_drv_ctx->lpe_stalled )
++ return 0;
++ //wait for time and re-check
++ mdelay(1);
++
++ retry--;
++ }
++
++ printk(KERN_DEBUG "SST DBG:LPE is in Stalled State\n");
++ return retval;
++}
++/*
++ * sst_get_stream - this function prepares for stream allocation
++ */
++static int sst_get_stream(struct snd_sst_params *str_param, u32 pvt_id)
++{
++ int i, retval;
++ struct stream_info *str_info;
++
++ /* stream is not allocated, we are allocating */
++ i = sst_get_block_stream(sst_drv_ctx);
++ printk(KERN_DEBUG "SST DBG:alloc block allocated = %d\n", i);
++ if (i < 0)
++ return -ENOMEM;
++ sst_drv_ctx->alloc_block[i].sst_id = pvt_id;
++
++ retval = sst_get_stream_allocated(str_param, i, pvt_id);
++ if (retval == -(SST_LIB_ERR_LIB_DNLD_REQUIRED)) {
++ /* codec download is required */
++ struct snd_sst_alloc_response *response =
++ sst_drv_ctx->alloc_block[i].ops_block.data;
++ printk(KERN_DEBUG "SST DBG:Codec is required.... trying that\n");
++ retval = sst_load_library(&response->lib_dnld,
++ str_param->ops, pvt_id);
++ kfree(response);
++
++ if (!retval) {
++ printk(KERN_DEBUG "SST DBG:codec was downloaded sucesfully \n");
++ printk(KERN_DEBUG "SST DBG:try alloac again\n");
++ sst_drv_ctx->alloc_block[i].ops_block.condition = false;
++
++ retval = sst_get_stream_allocated(str_param, i, pvt_id);
++
++ if (retval <= 0)
++ goto err;
++ set_port_params(str_param, str_param->ops);
++
++ printk(KERN_DEBUG "SST DBG:Allocation done stream id %d \n", retval);
++ } else {
++ printk(KERN_DEBUG "SST DBG:codec download failed \n");
++ retval = -EIO;
++ goto err;
++ }
++ } else if (retval <= 0)
++ goto err;
++ else
++ set_port_params(str_param, str_param->ops);
++
++ /* store sampling freq */
++ str_info = &sst_drv_ctx->streams[retval];
++ str_info->sfreq = sst_get_sfreq(str_param);
++
++ /* power on the analog, if reqd */
++ if (str_param->ops == STREAM_OPS_PLAYBACK ||
++ str_param->ops == STREAM_OPS_PLAYBACK_DRM) {
++
++ sst_drv_ctx->scard_ops->power_up_pmic_pb(
++ sst_drv_ctx->pmic_port_instance);
++ /*Only if the playback is MP3 - Send a message*/
++#ifdef CONFIG_MSTWN_POWER_MGMT
++ if(str_info->codec == SST_CODEC_TYPE_MP3) {
++ sst_ospm_send_event(OSPM_EVENT_LPAUDIO_START);
++ sst_drv_ctx->lpaudio_start++;
++ printk(KERN_DEBUG "SST DBG:lpaudio_start:%d", sst_drv_ctx->lpaudio_start);
++ printk(KERN_DEBUG "SST DBG:Sending OSPM_EVENT_LPAUDIO_START...\n");
++ }else {/*Only if the playback is non - MP3- Send a messageif not sent already*/
++ if(sst_drv_ctx->audio_start == 0) {
++ sst_ospm_send_event(OSPM_EVENT_SUBSYS_START_PLAY);
++ sst_drv_ctx->audio_start++;
++ printk(KERN_DEBUG "SST DBG:audio_start:%d", sst_drv_ctx->audio_start);
++ printk(KERN_DEBUG "SST DBG:Sending OSPM_EVENT_SUBSYS_START_PLAY...\n");
++
++ }
++ else {
++ sst_drv_ctx->audio_start++;
++ }
++ }
++#endif
++ sst_drv_ctx->pb_streams++;
++ } else if (str_param->ops == STREAM_OPS_CAPTURE) {
++
++ sst_drv_ctx->scard_ops->power_up_pmic_cp(
++ sst_drv_ctx->pmic_port_instance);
++ /*Send a messageif not sent already*/
++#ifdef CONFIG_MSTWN_POWER_MGMT
++ if(sst_drv_ctx->audio_start == 0) {
++ sst_ospm_send_event(OSPM_EVENT_SUBSYS_START_PLAY);
++ printk(KERN_DEBUG "SST DBG:audio_start:%d", sst_drv_ctx->audio_start);
++ printk(KERN_DEBUG "SST DBG:Sending OSPM_EVENT_SUBSYS_START_PLAY...\n");
++ sst_drv_ctx->audio_start++;
++ }else {
++ sst_drv_ctx->audio_start++;
++ }
++#endif
++ sst_drv_ctx->cp_streams++;
++ }
++
++err:
++ sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
++ return retval;
++}
++
++/**
++* intel_sst_ioctl - recieves the device ioctl's
++* @i_node: inode structure
++* @file_ptr: pointer to file
++* @cmd: Ioctl cmd
++* @arg: data
++*
++* This function is called by OS when a user space component
++* sends an Ioctl to SST driver
++*/
++int intel_sst_ioctl(struct inode *i_node, struct file *file_ptr,
++ unsigned int cmd, unsigned long arg)
++{
++ int retval = 0;
++ struct ioctl_pvt_data *data = NULL;
++ int str_id = 0, minor = 0;
++ dev_t device = i_node->i_rdev;
++
++ if (device == MKDEV(INTEL_SST_MAJOR, 0)) {
++ minor = 0;
++ data = (struct ioctl_pvt_data *)
++ file_ptr->private_data;
++ str_id = data->str_id;
++ } else if (device == MKDEV(INTEL_SST_MAJOR, 1))
++ minor = 1;
++ else
++ return -EINVAL;
++
++ if (sst_drv_ctx->sst_state != SST_FW_RUNNING) {
++ printk(KERN_ERR
++ "SST ERR: SST Not runng %d\n " , sst_drv_ctx->sst_state);
++ return -EBUSY;
++ }
++
++ switch (_IOC_NR(cmd)) {
++ case _IOC_NR(SNDRV_SST_STREAM_PAUSE):
++ printk(KERN_DEBUG "SST DBG:SNDRV_SST_IOCTL_PAUSE recieved for %d!\n", str_id);
++ if (minor != STREAM_MODULE) {
++ printk(KERN_ERR
++ "SST ERR: called for AM handle minor%d\n", minor);
++ retval = -EINVAL;
++ break;
++ }
++ retval = sst_pause_stream(str_id);
++ break;
++
++ case _IOC_NR(SNDRV_SST_STREAM_RESUME):
++ printk(KERN_DEBUG "SST DBG:SNDRV_SST_IOCTL_RESUME recieved!\n");
++ if (minor != STREAM_MODULE) {
++ printk(KERN_ERR
++ "SST ERR: caled for AM handle minor %d\n", minor);
++ retval = -EINVAL;
++ break;
++ }
++ retval = sst_resume_stream(str_id);
++ break;
++
++ case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): {
++ struct snd_sst_params *str_param = (struct snd_sst_params *)arg;
++
++ printk(KERN_DEBUG "SST DBG:IOCTL_SET_PARAMS recieved!\n");
++ if (minor != STREAM_MODULE) {
++ printk(KERN_ERR
++ "SST ERR: caled for AM handle minor %d\n", minor);
++ retval = -EINVAL;
++ break;
++ }
++ sst_print_params(str_param);
++
++ if (!str_id) {
++ retval = sst_get_stream(str_param, data->pvt_id);
++ if (retval > 0) {
++ struct stream_info *str_info;
++ sst_drv_ctx->stream_cnt++;
++ data->str_id = retval;
++ str_info = &sst_drv_ctx->streams[retval];
++ str_info->src = SST_DRV;
++ retval = copy_to_user(&str_param->stream_id,
++ &retval, sizeof(__u32));
++ } else {
++ if (retval == -SST_ERR_INVALID_PARAMS)
++ retval = -EINVAL;
++ }
++ } else {
++ printk(KERN_DEBUG "SST DBG:SET_STREAM_PARAMS recieved!\n");
++ /* allocated set params only */
++ retval = sst_set_stream_param(str_id, str_param);
++ /* Block the call for reply */
++ if (!retval) {
++ int sfreq = 0, word_size = 0, num_channel = 0;
++ sfreq = str_param->sparams.uc.pcm_params.sfreq;
++ word_size = str_param->sparams.
++ uc.pcm_params.pcm_wd_sz;
++ num_channel = str_param->sparams.uc.pcm_params.num_chan;
++ if (str_param->ops == STREAM_OPS_CAPTURE) {
++ printk(KERN_DEBUG "SST DBG:SST sampling frequency= %d\n",
++ sfreq);
++ sst_drv_ctx->scard_ops->\
++ set_pcm_audio_params(sfreq, word_size, num_channel);
++ }
++ }
++ }
++ break;
++ }
++ case _IOC_NR(SNDRV_SST_SET_VOL): {
++ struct snd_sst_vol *set_vol;
++ struct snd_sst_vol *rec_vol = (struct snd_sst_vol *)arg;
++ printk(KERN_DEBUG "SST DBG:SNDRV_SST_SET_VOLUME recieved for %d!\n",
++ rec_vol->stream_id);
++ if (minor == STREAM_MODULE && rec_vol->stream_id == 0) {
++ printk(KERN_DEBUG "SST DBG:invalid operation!\n");
++ retval = -EPERM;
++ break;
++ }
++ set_vol = kzalloc(sizeof(*set_vol), GFP_ATOMIC);
++ if (!set_vol) {
++ printk(KERN_DEBUG "SST DBG:mem allocation failed\n");
++ retval = -ENOMEM;
++ break;
++ }
++ retval = copy_from_user(set_vol, rec_vol, sizeof(*set_vol));
++ if (retval) {
++ printk(KERN_DEBUG "SST DBG:copy failed\n");
++ retval = -EAGAIN;
++ break;
++ }
++ retval = sst_set_vol(set_vol);
++ kfree(set_vol);
++ break;
++ }
++ case _IOC_NR(SNDRV_SST_GET_VOL): {
++ struct snd_sst_vol *rec_vol = (struct snd_sst_vol *)arg;
++ struct snd_sst_vol get_vol;
++ printk(KERN_DEBUG "SST DBG:IOCTL_GET_VOLUME recieved for stream = %d!\n",
++ rec_vol->stream_id);
++ if (minor == STREAM_MODULE && rec_vol->stream_id == 0) {
++ printk(KERN_DEBUG "SST DBG:invalid operation!\n");
++ retval = -EPERM;
++ break;
++ }
++ get_vol.stream_id = rec_vol->stream_id;
++ retval = sst_get_vol(&get_vol);
++ if (retval) {
++ printk(KERN_ERR
++ "SST ERR: Get volume failed = %d\n", retval);
++ retval = -EIO;
++ break;
++ }
++ printk(KERN_DEBUG "SST DBG:id = %d\n, vol = %d, ramp_dur = %d, ramp_type=%d\n",
++ get_vol.stream_id, get_vol.volume,
++ get_vol.ramp_duration, get_vol.ramp_type);
++ retval = copy_to_user((struct snd_sst_vol *)arg,
++ &get_vol, sizeof(get_vol));
++ if (retval) {
++ printk(KERN_ERR
++ "SST ERR: copy to user failed %d\n", retval);
++ retval = -EIO;
++ break;
++ }
++ /*sst_print_get_vol_info(str_id, &get_vol);*/
++ break;
++ }
++
++ case _IOC_NR(SNDRV_SST_MUTE): {
++ struct snd_sst_mute *set_mute;
++ struct snd_sst_vol *rec_mute = (struct snd_sst_vol *)arg;
++ printk(KERN_DEBUG "SST DBG:SNDRV_SST_SET_VOLUME recieved for %d!\n",
++ rec_mute->stream_id);
++ if (minor == STREAM_MODULE && rec_mute->stream_id == 0) {
++ printk(KERN_DEBUG "SST DBG:invalid operation!\n");
++ retval = -EPERM;
++ break;
++ }
++ set_mute = kzalloc(sizeof(*set_mute), GFP_ATOMIC);
++ if (!set_mute) {
++ printk(KERN_DEBUG "SST DBG:mem allocation failed\n");
++ retval = -ENOMEM;
++ break;
++ }
++ retval = copy_from_user(set_mute, rec_mute, sizeof(*set_mute));
++ if (retval) {
++ printk(KERN_DEBUG "SST DBG:copy failed\n");
++ retval = -EAGAIN;
++ break;
++ }
++ retval = sst_set_mute(set_mute);
++ kfree(set_mute);
++ break;
++ }
++ case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): {
++ struct snd_sst_get_stream_params get_params;
++
++ printk(KERN_DEBUG "SST DBG:IOCTL_GET_PARAMS recieved!\n");
++ if (minor != 0) {
++ printk(KERN_ERR
++ "SST ERR: called for AM handle minor %d\n", minor);
++ retval = -EINVAL;
++ break;
++ }
++
++ retval = sst_get_stream_params(str_id, &get_params);
++ if (retval) {
++ printk(KERN_ERR
++ "SST ERR: Get params failed = %d\n", retval);
++ retval = -EIO;
++ break;
++ }
++ retval = copy_to_user((struct snd_sst_get_stream_params *)arg,
++ &get_params, sizeof(get_params));
++ if (retval) {
++ printk(KERN_ERR
++ "SST ERR: copy to user failed %d\n" , retval);
++ retval = -EIO;
++ break;
++ }
++ sst_print_stream_params(&get_params);
++ break;
++ }
++
++ case _IOC_NR(SNDRV_SST_MMAP_PLAY):
++ case _IOC_NR(SNDRV_SST_MMAP_CAPTURE):
++ printk(KERN_DEBUG "SST DBG:SNDRV_SST_MMAP_PLAY/CAPTURE recieved!\n");
++ if (minor != STREAM_MODULE) {
++ printk(KERN_ERR
++ "SST ERR: called for AM handle minor %d\n" , minor);
++ retval = -EINVAL;
++ break;
++ }
++ retval = intel_sst_mmap_play_capture(str_id,
++ (struct snd_sst_mmap_buffs *)arg);
++ break;
++
++ case _IOC_NR(SNDRV_SST_STREAM_DROP):
++ printk(KERN_DEBUG "SST DBG:SNDRV_SST_IOCTL_DROP recieved!\n");
++ if (minor != STREAM_MODULE) {
++ retval = -EINVAL;
++ break;
++ }
++ retval = sst_drop_stream(str_id);
++ break;
++
++ case _IOC_NR(SNDRV_SST_STREAM_GET_TSTAMP): {
++ unsigned long long *ms = (unsigned long long *)arg;
++ struct snd_sst_tstamp tstamp = {0};
++ unsigned long long time, freq, mod;
++
++ printk(KERN_DEBUG "SST DBG:SNDRV_SST_STREAM_GET_TSTAMP recieved!\n");
++ if (minor != STREAM_MODULE) {
++ printk(KERN_ERR
++ "SST ERR: called for AM handle minor %d\n", minor);
++ retval = -EINVAL;
++ break;
++ }
++ memcpy_fromio(&tstamp,
++ ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP)
++ +(str_id * sizeof(tstamp))),
++ sizeof(tstamp));
++ time = tstamp.samples_rendered;
++ printk(KERN_DEBUG "SST DBG:samples rendered! = 0x%llx\n", time);
++ freq = (unsigned long long) tstamp.sampling_frequency;
++ printk(KERN_DEBUG "SST DBG:freq = %llx\n", freq);
++ time = time * 1000; /* converting it to ms */
++ mod = do_div(time, freq);
++ printk(KERN_DEBUG "SST DBG:mod = 0x%llx\n", mod);
++ printk(KERN_DEBUG "SST DBG:msec = 0x%llx\n", time);
++ retval = copy_to_user(ms, &time, sizeof(*ms));
++ if (retval)
++ printk(KERN_ERR
++ "SST ERR: copy failed = %d\n", retval);
++ break;
++ }
++
++ case _IOC_NR(SNDRV_SST_STREAM_START):{
++ struct stream_info *stream;
++
++ printk(KERN_DEBUG "SST DBG:SNDRV_SST_STREAM_START recieved!\n");
++ if (minor != STREAM_MODULE) {
++ printk(KERN_ERR
++ "SST ERR: called for AM handle minor %d\n", minor);
++ retval = -EINVAL;
++ break;
++ }
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ break;
++ stream = &sst_drv_ctx->streams[str_id];
++ mutex_lock(&stream->lock);
++ if (stream->status == STREAM_INIT &&
++ stream->need_draining != true) {
++ printk(KERN_DEBUG "SST DBG:calling play frames...\n");
++ stream->prev = stream->status;
++ stream->status = STREAM_RUNNING;
++ if (stream->ops == STREAM_OPS_PLAYBACK ||
++ stream->ops == STREAM_OPS_PLAYBACK_DRM) {
++ retval = sst_play_frame(str_id);
++ /*sst_ospm_send_event(
++ OSPM_EVENT_SUBSYS_START_PLAY);*/
++ } else if (stream->ops == STREAM_OPS_CAPTURE)
++ retval = sst_capture_frame(str_id);
++ else {
++ printk(KERN_ERR
++ "SST ERR: Invalid ops 0x%x\n" , stream->ops);
++ retval = -EINVAL;
++ mutex_unlock(
++ &sst_drv_ctx->streams[str_id].lock);
++ break;
++ }
++ if (retval < 0) {
++ printk(KERN_ERR
++ "SST ERR: play/cptur frame fail \n");
++ stream->status = STREAM_INIT;
++ mutex_unlock(
++ &sst_drv_ctx->streams[str_id].lock);
++ break;
++ }
++ } else {
++ printk(KERN_ERR
++ "SST ERR: Inv strt for stream%d state0x%x\n", \
++ str_id, stream->status);
++ retval = -EINVAL;
++ }
++ mutex_unlock(&sst_drv_ctx->streams[str_id].lock);
++ break;
++ }
++
++ case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): {
++ struct snd_sst_target_device *target_device;
++
++ printk(KERN_DEBUG "SST DBG:SNDRV_SST_SET_TARGET_PLAYBACK DEVICE recieved!\n");
++ target_device = (struct snd_sst_target_device *)arg;
++ BUG_ON(!target_device);
++ if (minor != AM_MODULE) {
++ printk(KERN_ERR
++ "SST ERR: called for non AM handle minor %d\n", minor);
++ retval = -EINVAL;
++ break;
++ }
++ retval = sst_target_device_select(target_device);
++ break;
++ }
++
++ case _IOC_NR(SNDRV_SST_DRIVER_INFO): {
++ struct snd_sst_driver_info *info =
++ (struct snd_sst_driver_info *)arg;
++
++ printk(KERN_DEBUG "SST DBG:SNDRV_SST_DRIVER_INFO recived \n");
++ info->version = SST_VERSION_NUM;
++ /* hard coding, shud get sumhow later */
++ info->active_pcm_streams = sst_drv_ctx->stream_cnt -
++ sst_drv_ctx->encoded_cnt;
++ info->active_enc_streams = sst_drv_ctx->encoded_cnt;
++ info->max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM;
++ info->max_enc_streams = MAX_ENC_STREAM;
++ info->buf_per_stream = sst_drv_ctx->mmap_len;
++ break;
++ }
++
++ case _IOC_NR(SNDRV_SST_STREAM_DECODE): {
++ struct snd_sst_dbufs *param =
++ (struct snd_sst_dbufs *)arg, dbufs_local;
++ int i;
++ struct snd_sst_buffs ibufs, obufs;
++ struct snd_sst_buff_entry ibuf_temp[param->ibufs->entries],
++ obuf_temp[param->obufs->entries];
++
++ printk(KERN_DEBUG "SST DBG:SNDRV_SST_STREAM_DECODE recived \n");
++ if (minor != STREAM_MODULE) {
++ printk(KERN_ERR
++ "SST ERR: called for AM handle minor %d\n", minor);
++ retval = -EINVAL;
++ break;
++ }
++ if (!param) {
++ printk(KERN_ERR "SST ERR: null param passed\n");
++ retval = -EINVAL;
++ break;
++ }
++
++ dbufs_local.input_bytes_consumed = param->input_bytes_consumed;
++ dbufs_local.output_bytes_produced =
++ param->output_bytes_produced;
++ dbufs_local.ibufs = &ibufs;
++ dbufs_local.obufs = &obufs;
++ dbufs_local.ibufs->entries = param->ibufs->entries;
++ dbufs_local.ibufs->type = param->ibufs->type;
++ dbufs_local.obufs->entries = param->obufs->entries;
++ dbufs_local.obufs->type = param->obufs->type;
++
++ dbufs_local.ibufs->buff_entry = ibuf_temp;
++ for (i = 0; i < dbufs_local.ibufs->entries; i++) {
++ ibuf_temp[i].buffer =
++ param->ibufs->buff_entry[i].buffer;
++ ibuf_temp[i].size =
++ param->ibufs->buff_entry[i].size;
++ }
++ dbufs_local.obufs->buff_entry = obuf_temp;
++ for (i = 0; i < dbufs_local.obufs->entries; i++) {
++ obuf_temp[i].buffer =
++ param->obufs->buff_entry[i].buffer;
++ obuf_temp[i].size =
++ param->obufs->buff_entry[i].size;
++ }
++ retval = sst_decode(str_id, &dbufs_local);
++ if (retval) {
++ printk(KERN_ERR"SST ERR: decoding failed \n");
++ retval = -EAGAIN;
++ }
++ retval = copy_to_user(&param->input_bytes_consumed,
++ &dbufs_local.input_bytes_consumed,
++ sizeof(unsigned long long));
++ if (retval) {
++ printk(KERN_ERR"SST ERR: copy to user failed \n");
++ retval = -EAGAIN;
++ break;
++ }
++ retval = copy_to_user(&param->output_bytes_produced,
++ &dbufs_local.output_bytes_produced,
++ sizeof(unsigned long long));
++ if (retval) {
++ printk(KERN_ERR"SST ERR: copy to user failed \n");
++ retval = -EAGAIN;
++ break;
++ }
++ printk(KERN_DEBUG "SST DBG:input_bytes_consumed=%lld\n",
++ param->input_bytes_consumed);
++ printk(KERN_DEBUG "SST DBG:output_bytes_produced=%lld\n",
++ param->output_bytes_produced);
++ printk(KERN_DEBUG "SST DBG:ibufs->entries=%d\n", param->ibufs->entries);
++ printk(KERN_DEBUG "SST DBG:input_consumed = %lld, output_produced = %lld \n",
++ param->input_bytes_consumed,
++ param->output_bytes_produced);
++ printk(KERN_DEBUG "SST DBG:first ibufs size=%d\n",
++ param->ibufs->buff_entry[0].size);
++ printk(KERN_DEBUG "SST DBG:first ibufs addr=%p\n",
++ param->ibufs->buff_entry[0].buffer);
++ printk(KERN_DEBUG "SST DBG:obufs->entries=%d\n", param->obufs->entries);
++ printk(KERN_DEBUG "SST DBG:first obufs size=%d\n",
++ param->obufs->buff_entry[0].size);
++ printk(KERN_DEBUG "SST DBG:first obufs addr=%p\n",
++ param->obufs->buff_entry[0].buffer);
++ break;
++ }
++
++ case _IOC_NR(SNDRV_SST_STREAM_DRAIN):
++ printk(KERN_DEBUG "SST DBG:SNDRV_SST_STREAM_DRAIN recived \n");
++ if (minor != STREAM_MODULE) {
++ printk(KERN_ERR
++ "SST ERR: caled for AM handle minr %d\n", minor);
++ retval = -EINVAL;
++ break;
++ }
++ retval = sst_drain_stream(str_id);
++ break;
++
++ case _IOC_NR(SNDRV_SST_STREAM_BYTES_DECODED): {
++ unsigned long long *bytes = (unsigned long long *)arg;
++ struct snd_sst_tstamp tstamp = {0};
++
++ printk(KERN_DEBUG "SST DBG:SNDRV_SST_STREAM_BYTES_DECODED recieved!\n");
++ if (minor != STREAM_MODULE) {
++ printk(KERN_ERR
++ "SST ERR: caled for AM hndle minr %d\n", minor);
++ retval = -EINVAL;
++ break;
++ }
++ memcpy_fromio(&tstamp,
++ ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP)
++ +(str_id * sizeof(tstamp))),
++ sizeof(tstamp));
++ retval = copy_to_user(bytes, &tstamp.bytes_processed,
++ sizeof(*bytes));
++ printk(KERN_DEBUG "SST DBG:bytes processed =%lld\n", tstamp.bytes_processed);
++ if (retval)
++ printk(KERN_ERR
++ "SST ERR: copy failed = %d\n", retval);
++ break;
++ }
++ case _IOC_NR(SNDRV_SST_FW_INFO): {
++ struct snd_sst_fw_info *fw_info;
++
++ printk(KERN_DEBUG "SST DBG:SNDRV_SST_FW_INFO recived \n");
++
++ fw_info = kzalloc(sizeof(*fw_info), GFP_ATOMIC);
++ if (!fw_info) {
++ printk(KERN_ERR "SST ERR: mem alocation fail\n");
++ retval = -ENOMEM;
++ break;
++ }
++ retval = sst_get_fw_info(fw_info);
++ if (retval) {
++ printk(KERN_ERR
++ "SST ERR: sst_get_fw_info fail = %d\n", retval);
++ kfree(fw_info);
++ break;
++ }
++ retval = copy_to_user((struct snd_sst_dbufs *)arg,
++ fw_info, sizeof(*fw_info));
++ if (retval) {
++ printk(KERN_ERR
++ "SST ERR: copy to user failed %d\n", retval);
++ kfree(fw_info);
++ retval = -EIO;
++ break;
++ }
++ sst_print_fw_info(fw_info);
++ kfree(fw_info);
++ break;
++ }
++ default:
++ printk(KERN_DEBUG "SST DBG:IOCTL not supported yet !\n");
++ retval = -ENOTTY;
++ }
++ printk(KERN_DEBUG "SST DBG:...complete ret code = %d\n", retval);
++
++ return retval;
++}
++
++/*
++ Intelmid driver interface Routines
++*/
++
++void sst_process_mad_ops(struct work_struct *work)
++{
++ struct mad_ops_wq *mad_ops =
++ container_of(work, struct mad_ops_wq, wq);
++ int retval = 0;
++ struct stream_info *stream;
++
++ switch (mad_ops->control_op) {
++ case SST_SND_PAUSE:
++ retval = sst_pause_stream(mad_ops->stream_id);
++ break;
++ case SST_SND_RESUME:
++ retval = sst_resume_stream(mad_ops->stream_id);
++ break;
++ case SST_SND_DROP:
++ retval = sst_drop_stream(mad_ops->stream_id);
++ break;
++ case SST_SND_STREAM_PROCESS:
++ printk(KERN_DEBUG "SST DBG:play/capt frames...\n");
++ stream = &sst_drv_ctx->streams[mad_ops->stream_id];
++ if (stream->status == STREAM_UN_INIT)
++ return;
++ stream->prev = stream->status;
++ stream->status = STREAM_RUNNING;
++ stream->data_blk.on = false;
++ if (stream->ops == STREAM_OPS_PLAYBACK)
++ retval = sst_play_frame(mad_ops->stream_id);
++ else if (stream->ops == STREAM_OPS_CAPTURE)
++ retval = sst_capture_frame(mad_ops->stream_id);
++ else
++ printk(KERN_ERR
++ "SST ERR: invalid stream ops invoked \n");
++ if (retval < 0)
++ printk(KERN_ERR
++ "SST ERR: play/captur frames failed \n");
++ break;
++ default:
++ printk(KERN_ERR
++ "SST ERR: wrong control_ops reported\n");
++ }
++ return;
++}
++/**
++* sst_control_set - Set Control params
++* @control_list: list of controls to be set
++*
++* This function is called by MID sound card driver to set
++* SST/Sound card controls. This is registered with MID driver
++*/
++int sst_control_set(int control_element, void *value)
++{
++ int retval = 0, str_id = 0, status;
++ struct stream_info *stream;
++
++ if (sst_drv_ctx->sst_state == SST_UN_INIT) {
++ /* FW is not downloaded */
++ printk(KERN_DEBUG "SST DBG:DSP Downloading FW now...\n");
++ retval = sst_download_fw();
++ if (retval) {
++ printk(KERN_ERR
++ "SST ERR: FW download failed = 0x%x, abort\n", retval);
++ return retval;
++ }
++ }
++
++ switch (control_element) {
++ case SST_SND_ALLOC: {
++ struct snd_sst_params *str_param;
++ int pcm_id = sst_assign_pvt_id(sst_drv_ctx);
++ struct stream_info *str_info;
++
++ str_param = (struct snd_sst_params *)value;
++ BUG_ON(!str_param);
++ sst_print_params(str_param);
++ retval = sst_get_stream(str_param, pcm_id);
++ if (retval >= 0)
++ sst_drv_ctx->stream_cnt++;
++ /*if (str_param->ops == STREAM_OPS_PLAYBACK ||
++ str_param->ops == STREAM_OPS_PLAYBACK_DRM)
++ sst_ospm_send_event(OSPM_EVENT_SUBSYS_START_PLAY);*/
++ str_info = &sst_drv_ctx->streams[retval];
++ str_info->src = MAD_DRV;
++ break;
++ }
++
++ case SST_SND_PAUSE:
++ case SST_SND_RESUME:
++ case SST_SND_DROP:
++ sst_drv_ctx->mad_ops.control_op = control_element;
++ sst_drv_ctx->mad_ops.stream_id = *(int *)value;
++ queue_work(sst_drv_ctx->mad_wq, &sst_drv_ctx->mad_ops.wq);
++ break;
++
++ case SST_SND_FREE:
++ str_id = *(int *)value;
++ stream = &sst_drv_ctx->streams[str_id];
++ free_stream_context(str_id);
++ stream->pcm_substream = NULL;
++ stream->period_elapsed = NULL;
++ sst_drv_ctx->stream_cnt--;
++ break;
++
++ case SST_SND_STREAM_INIT: {
++ struct pcm_stream_info *str_info;
++ struct stream_info *stream;
++
++ printk(KERN_DEBUG "SST DBG:stream init called\n");
++ str_info = (struct pcm_stream_info *)value;
++ str_id = str_info->str_id;
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ break;
++
++ stream = &sst_drv_ctx->streams[str_id];
++ printk(KERN_DEBUG "SST DBG:setting the period ptrs\n");
++ stream->pcm_substream = str_info->mad_substream;
++ stream->period_elapsed = str_info->period_elapsed;
++ stream->sfreq = str_info->sfreq;
++ stream->prev = stream->status;
++ stream->status = STREAM_INIT;
++ break;
++ }
++
++ case SST_SND_BUFFER_POINTER: {
++ struct pcm_stream_info *stream_info;
++ struct snd_sst_tstamp fw_tstamp = {0,};
++ struct stream_info *stream;
++
++ // printk(KERN_DEBUG "SST DBG:buffer pointer query\n");
++
++ stream_info = (struct pcm_stream_info *)value;
++ str_id = stream_info->str_id;
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ break;
++ stream = &sst_drv_ctx->streams[str_id];
++
++ if (!stream->pcm_substream)
++ break;
++ memcpy_fromio(&fw_tstamp,
++ ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP)
++ +(str_id * sizeof(fw_tstamp))),
++ sizeof(fw_tstamp));
++
++ // printk(KERN_DEBUG "SST DBG:strid = %d\n", str_id);
++
++ if (stream->ops == STREAM_OPS_PLAYBACK)
++ stream_info->buffer_ptr = fw_tstamp.samples_rendered;
++ else
++ stream_info->buffer_ptr = fw_tstamp.samples_processed;
++ /* printk(KERN_DEBUG "SST DBG:samples played = %lld\n",
++ stream_info->buffer_ptr);
++ */
++ break;
++ }
++ case SST_ENABLE_RX_TIME_SLOT: {
++ status = *(int *)value;
++ sst_drv_ctx->rx_time_slot_status = status ;
++ printk(KERN_DEBUG "SST DBG:in case:: **********SST_ENABLE_RX_TIME_SLOT*********** \n");
++ sst_enable_rx_timeslot(status);
++ break;
++ }
++ default:
++ /* Illegal case */
++ printk(KERN_ERR"SST ERR: illegal req\n");
++ return -EINVAL;
++ }
++// printk(KERN_DEBUG "SST DBG:...complete ret code = %d\n", retval);
++
++ return retval;
++}
++
++
++/**
++* sst_send_data_to_HW - send data buffers
++* @buffer_data: user buffer
++*
++* This function is called by MID sound card driver to send buffer
++* to HW. This is registered with MID driver
++*/
++int sst_send_buffer_to_HW(int str_id, struct stream_buffer *mad_buf)
++{
++ /* recvd a buffer map it to stream */
++ /* this is a PCM stream and playback */
++ int retval = 0;
++ bool flag_add = false;
++ struct sst_stream_bufs *sst_buf = NULL, *_sst_buf;
++ struct stream_info *stream;
++
++ if (!mad_buf || !mad_buf->addr || !mad_buf->length) {
++ printk(KERN_ERR
++ "SST ERR: Null Ptr or buf size = 0\n");
++ return -EINVAL;
++ }
++
++ if (sst_drv_ctx->sst_state != SST_FW_RUNNING) {
++ printk(KERN_ERR
++ "SST ERR: SST Not runng: %d\n", sst_drv_ctx->sst_state);
++ return -EBUSY;
++ }
++
++ retval = sst_validate_strid(str_id);
++ if (retval < 0)
++ return -EINVAL;
++
++ stream = &sst_drv_ctx->streams[str_id];
++ printk(KERN_DEBUG "SST DBG:stream status = %d strid=%d\n", stream->status, str_id);
++ printk(KERN_DEBUG "SST DBG:stream codec = %d, prevstate=%d\n",
++ stream->codec, stream->prev);
++ if (stream->status == STREAM_UN_INIT) {
++ printk(KERN_ERR"SST ERR: BAD REQUEST!\n");
++ return -EBADRQC;
++ }
++ printk(KERN_DEBUG "SST DBG:received addr=0x%x size = 0x%x\n",
++ (unsigned int)mad_buf->addr, mad_buf->length);
++ /* list is not empty */
++ list_for_each_entry_safe(sst_buf, _sst_buf, &stream->bufs, node) {
++ if (sst_buf->in_use == true)
++ continue;
++ else if ((int) mad_buf->addr !=
++ (int)sst_buf->addr + sst_buf->size)
++ continue;
++ else {
++ sst_buf->size += mad_buf->length;
++ flag_add = true;
++ printk(KERN_DEBUG "SST DBG:inc addr = 0x%p, base = 0x%x inc_val = 0x%x\n",
++ sst_buf->addr, sst_buf->size, mad_buf->length);
++ break;
++ }
++ }
++
++ if (flag_add == false) {
++ sst_buf = kzalloc(sizeof(*sst_buf), GFP_ATOMIC);
++ if (!sst_buf)
++ return -ENOMEM;
++ sst_buf->size = mad_buf->length;
++ sst_buf->addr = (void *)mad_buf->addr;
++ sst_buf->offset = 0;
++ sst_buf->in_use = false;
++ /*adding without locking FIXME*/
++ if( in_interrupt()) {
++ list_add_tail(&sst_buf->node, &stream->bufs);
++ } else {
++ spin_lock(&stream->pcm_lock);
++ list_add_tail(&sst_buf->node, &stream->bufs);
++ spin_unlock(&stream->pcm_lock);
++ }
++
++
++ flag_add = true;
++ printk(KERN_DEBUG "SST DBG:entry added addr = 0x%x size = 0x%x\n",
++ (unsigned int)mad_buf->addr, mad_buf->length);
++ }
++
++ if (stream->status == STREAM_INIT) {
++ sst_drv_ctx->mad_ops.control_op = SST_SND_STREAM_PROCESS;
++ sst_drv_ctx->mad_ops.stream_id = str_id;
++ queue_work(sst_drv_ctx->mad_wq, &sst_drv_ctx->mad_ops.wq);
++ }
++
++ return retval;
++}
++
++struct intel_sst_card_ops sst_pmic_ops = {
++ .control_set = sst_control_set,
++ .send_buffer = sst_send_buffer_to_HW,
++};
++
++/**
++* register_sst_card- function for sound card to register
++* @card: pointer to structure of operations
++* This function is called card driver loads and is ready for registration
++*/
++int register_sst_card(struct intel_sst_card_ops *card)
++{
++
++ if (!card || !card->module_name) {
++ printk(KERN_ERR "SST ERR: Null Pointer Passed\n");
++ return -EINVAL;
++ }
++
++ if (sst_drv_ctx->pmic_state == SND_MAD_UN_INIT) {
++ /* register this driver */
++ if ((strncmp(SST_CARD_NAMES, card->module_name,
++ strlen(SST_CARD_NAMES))) == 0) {
++ sst_drv_ctx->pmic_vendor = card->vendor_id;
++ sst_drv_ctx->scard_ops = card->scard_ops;
++ sst_pmic_ops.module_name = card->module_name;
++ sst_drv_ctx->pmic_state = SND_MAD_INIT_DONE;
++ sst_drv_ctx->rx_time_slot_status = RX_TIMESLOT_UNINIT;
++ card->control_set = sst_pmic_ops.control_set;
++ card->send_buffer = sst_pmic_ops.send_buffer;
++ sst_drv_ctx->scard_ops->card_status = SND_CARD_UN_INIT;
++ /* initialize card to know good state */
++ /*sst_drv_ctx->scard_ops->init_card();*/
++ return 0;
++ } else {
++ printk(KERN_ERR
++ "SST ERR: strcmp failed %s \n", card->module_name);
++ return -EINVAL;
++ }
++
++ } else {
++ /* already registered a driver */
++ printk(KERN_ERR
++ "SST ERR: Repeat for register..denied\n");
++ return -EBADRQC;
++ }
++ return 0;
++}
++EXPORT_SYMBOL_GPL(register_sst_card);
++
++/**
++* unregister_sst_card- function for sound card to un-register
++* @card: pointer to structure of operations
++* This function is called when card driver unloads
++*/
++void unregister_sst_card(struct intel_sst_card_ops *card)
++{
++ if (sst_pmic_ops.module_name == card->module_name) {
++ /* unreg */
++ sst_pmic_ops.module_name = "";
++ sst_drv_ctx->pmic_state = SND_MAD_UN_INIT;
++ printk(KERN_DEBUG "SST DBG:Unregistered %s\n", card->module_name);
++ }
++ return;
++}
++EXPORT_SYMBOL_GPL(unregister_sst_card);
++
++/**
++* lpe_mask_periphral_intr- function to mask SST DSP peripheral interrupt
++* @device: device interrupt that needs masking
++*/
++int lpe_mask_periphral_intr(enum lpe_periphral device)
++{
++ union sst_pimr_reg pimr = {{0},};
++ if (!sst_drv_ctx)
++ return -EIO;
++
++ pimr.full = readl(sst_drv_ctx->shim + SST_PIMR);
++
++ switch (device) {
++ case LPE_DMA:
++ pimr.part.dmac_sc = 1;
++ /* dummy register for shim workaround */
++ writel(pimr.full, sst_drv_ctx->shim + SST_ISRD);
++ writel(pimr.full, sst_drv_ctx->shim + SST_PIMR);
++ break;
++
++ case LPE_SSP0:
++ break;
++
++ case LPE_SSP1:
++ break;
++
++ default:
++ break;
++ }
++ return 0;
++}
++EXPORT_SYMBOL_GPL(lpe_mask_periphral_intr);
++
++/**
++* lpe_unmask_periphral_intr- function to unmask SST DSP peripheral interrupt
++* @device: device interrupt that needs unmasking
++*/
++int lpe_unmask_periphral_intr(enum lpe_periphral device)
++{
++ union sst_pimr_reg pimr = {{0},};
++ if (!sst_drv_ctx)
++ return -EIO;
++
++ pimr.full = readl(sst_drv_ctx->shim + SST_PIMR);
++
++ switch (device) {
++ case LPE_DMA:
++ pimr.part.dmac_sc = 0;
++ /* dummy register for shim workaround */
++ writel(pimr.full, sst_drv_ctx->shim + SST_ISRD);
++ writel(pimr.full, sst_drv_ctx->shim + SST_PIMR);
++ break;
++
++ case LPE_SSP0:
++ break;
++
++ case LPE_SSP1:
++ break;
++
++ default:
++ break;
++ }
++ return 0;
++
++}
++EXPORT_SYMBOL_GPL(lpe_unmask_periphral_intr);
++
++/**
++* lpe_periphral_intr_status- function returns SST peripheral interrupt status
++* @device: device for which the status is enquired
++* @status: out parameters with the status of the peripheral device
++*/
++int lpe_periphral_intr_status(enum lpe_periphral device, int *status)
++{
++ union sst_pisr_reg pisr = {{0},};
++ if (!sst_drv_ctx)
++ return -EIO;
++
++ pisr.full = readl(sst_drv_ctx->shim + SST_PISR);
++
++ switch (device) {
++ case LPE_DMA:
++ *status = pisr.part.dmac;
++ break;
++
++ case LPE_SSP0:
++ break;
++
++ case LPE_SSP1:
++ break;
++
++ default:
++ break;
++ }
++ return 0;
++}
++EXPORT_SYMBOL_GPL(lpe_periphral_intr_status);
+--
+1.6.2.2
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-4-8.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-4-8.patch
new file mode 100644
index 0000000..da2912b
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-4-8.patch
@@ -0,0 +1,1285 @@
+From 395150e235d193ad9c9e5071d4752e8f436db99c Mon Sep 17 00:00:00 2001
+From: R, Dharageswari <dharageswari.r@intel.com>
+Date: Thu, 29 Apr 2010 20:25:00 +0530
+Subject: [PATCH] ADR-Post-Beta-0.05.002.03-4/8-Adding Moorestown Audio Drivers: SST IPC modules
+
+This adds the IPC module which uses Inter process mechanism to communicate
+between driver & SST engine. The SST engine is a DSP processor.
+To communicate between IA processor and DSP, IPC doorbell registers are used.
+A write to these registers triggers an interrupt to other side.
+The format of messages and "mailbox" for message payload is defined
+in intel_sst_fw_ipc.h
+
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+
+ new file: sound/pci/sst/intel_sst_fw_ipc.h
+ new file: sound/pci/sst/intel_sst_ipc.c
+Patch-mainline: 2.6.35?
+---
+ sound/pci/sst/intel_sst_fw_ipc.h | 403 ++++++++++++++++++
+ sound/pci/sst/intel_sst_ipc.c | 843 ++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 1246 insertions(+), 0 deletions(-)
+ create mode 100644 sound/pci/sst/intel_sst_fw_ipc.h
+ create mode 100644 sound/pci/sst/intel_sst_ipc.c
+
+diff --git a/sound/pci/sst/intel_sst_fw_ipc.h b/sound/pci/sst/intel_sst_fw_ipc.h
+new file mode 100644
+index 0000000..f2fad9c
+--- /dev/null
++++ b/sound/pci/sst/intel_sst_fw_ipc.h
+@@ -0,0 +1,403 @@
++#ifndef __INTEL_SST_FW_IPC_H__
++#define __INTEL_SST_FW_IPC_H__
++/*
++* intel_sst_fw_ipc.h - Intel SST Driver for audio engine
++*
++* Copyright (C) 2008-10 Intel Corporation
++* Author: Vinod Koul <vinod.koul@intel.com>
++* Harsha Priya <priya.harsha@intel.com>
++* Dharageswari R <dharageswari.r@intel.com>
++* KP Jeeja <jeeja.kp@intel.com>
++* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; version 2 of the License.
++*
++* This program is distributed in the hope that it will be useful, but
++* WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++* General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License along
++* with this program; if not, write to the Free Software Foundation, Inc.,
++* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++*
++* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++*
++* This driver exposes the audio engine functionalities to the ALSA
++* and middleware.
++* This file has definitions shared between the firmware and driver
++*/
++
++#define MAX_NUM_STREAMS 4
++#define MAX_DBG_RW_BYTES 80
++#define MAX_NUM_SCATTER_BUFFERS 8
++#define MAX_LOOP_BACK_DWORDS 8
++/* IPC base address and mailbox, timestamp offsets */
++#define SST_MAILBOX_SIZE 0x0400
++#define SST_MAILBOX_SEND 0x0000
++#define SST_MAILBOX_RCV 0x0804
++#define SST_TIME_STAMP 0x1800
++#define SST_RESERVED_OFFSET 0x1840
++#define SST_CHEKPOINT_OFFSET 0x1C00
++#define REPLY_MSG 0x80
++
++/* Message ID's for IPC messages */
++/* Bits B7: SST or IA/SC ; B6-B4: Msg Category; B3-B0: Msg Type */
++
++/* I2L Firmware/Codec Download msgs */
++#define IPC_IA_PREP_LIB_DNLD 0x01
++#define IPC_IA_LIB_DNLD_CMPLT 0x02
++
++#define IPC_IA_SET_PMIC_TYPE 0x03
++#define IPC_IA_GET_FW_VERSION 0x04
++#define IPC_IA_GET_FW_BUILD_INF 0x05
++#define IPC_IA_GET_FW_INFO 0x06
++
++/* I2L Codec Config/control msgs */
++#define IPC_IA_SET_CODEC_PARAMS 0x10
++#define IPC_IA_GET_CODEC_PARAMS 0x11
++#define IPC_IA_SET_PPP_PARAMS 0x12
++#define IPC_IA_GET_PPP_PARAMS 0x13
++#define IPC_IA_PLAY_FRAMES 0x14
++#define IPC_IA_CAPT_FRAMES 0x15
++#define IPC_IA_PLAY_VOICE 0x16
++#define IPC_IA_CAPT_VOICE 0x17
++#define IPC_IA_DECODE_FRAMES 0x18
++
++/* I2L Stream config/control msgs */
++#define IPC_IA_ALLOC_STREAM 0x20 /* Allocate a stream ID */
++#define IPC_IA_FREE_STREAM 0x21 /* Free the stream ID */
++#define IPC_IA_SET_STREAM_PARAMS 0x22
++#define IPC_IA_GET_STREAM_PARAMS 0x23
++#define IPC_IA_PAUSE_STREAM 0x24
++#define IPC_IA_RESUME_STREAM 0x25
++#define IPC_IA_DROP_STREAM 0x26
++#define IPC_IA_DRAIN_STREAM 0x27 /* Short msg with str_id */
++#define IPC_IA_TARGET_DEV_SELECT 0x28
++#define IPC_IA_CONTROL_ROUTING 0x29
++
++#define IPC_IA_SET_STREAM_VOL 0x2A /*Vol for stream, pre mixer */
++#define IPC_IA_GET_STREAM_VOL 0x2B
++#define IPC_IA_SET_STREAM_MUTE 0x2C
++#define IPC_IA_GET_STREAM_MUTE 0x2D
++#define IPC_IA_SET_MASTER_VOL 0x2E /* set vol for post mixer */
++#define IPC_IA_GET_MASTER_VOL 0x2F /* Get Volume for post mixer */
++#define IPC_IA_SET_MASTER_MUTE 0x30 /* Set Master Mute post mixer */
++#define IPC_IA_GET_MASTER_MUTE 0x31 /* Get Master Mute; post mixer */
++
++/* Debug msgs */
++#define IPC_IA_DBG_MEM_READ 0x40
++#define IPC_IA_DBG_MEM_WRITE 0x41
++#define IPC_IA_DBG_LOOP_BACK 0x42
++
++/* L2I Firmware/Codec Download msgs */
++#define IPC_IA_FW_INIT_CMPLT 0x81
++#define IPC_IA_LPE_GETTING_STALLED 0x82
++#define IPC_IA_LPE_UNSTALLED 0x83
++
++/* L2I Codec Config/control msgs */
++#define IPC_SST_GET_PLAY_FRAMES 0x90 /* Request IA more data */
++#define IPC_SST_GET_CAPT_FRAMES 0x91 /* Request IA more data */
++#define IPC_SST_BUF_UNDER_RUN 0x92 /* PB Under run and stopped */
++#define IPC_SST_BUF_OVER_RUN 0x93 /* CAP Under run and stopped */
++#define IPC_SST_DRAIN_END 0x94 /* PB Drain complete and stopped */
++#define IPC_SST_CHNGE_SSP_PARAMS 0x95 /* PB SSP parameters changed */
++#define IPC_SST_STREAM_PROCESS_FATAL_ERR 0x96/* error in processing a stream */
++#define IPC_SST_PERIOD_ELAPSED 0x97 /* period elapsed */
++#define IPC_IA_TARGET_DEV_CHNGD 0x98 /* error in processing a stream */
++
++/* L2S messages */
++#define IPC_SC_DDR_LINK_UP 0xC0
++#define IPC_SC_DDR_LINK_DOWN 0xC1
++
++/* L2I Error reporting msgs */
++#define IPC_IA_MEM_ALLOC_FAIL 0xE0
++#define IPC_IA_PROC_ERR 0xE1 /* error in processing a
++ stream can be used by playback and
++ capture modules */
++
++/* L2I Debug msgs */
++#define IPC_IA_PRINT_STRING 0xF0
++
++#define IPC_IA_ENABLE_RX_TIME_SLOT 0x2E /* Enable Rx time slot 0 or 1 */
++
++
++/* Command Response or Acknowledge message to any IPC message will have
++ * same message ID and stream ID information which is sent.
++ * There is no specific Ack message ID. The data field is used as response
++ * meaning.
++ */
++enum ackData {
++ IPC_ACK_SUCCESS = 0,
++ IPC_ACK_FAILURE
++};
++
++
++enum sst_error_codes {
++ /* Error code,response to msgId: Description */
++ /* Common error codes */
++ SST_SUCCESS = 0, /* Success */
++ SST_ERR_INVALID_STREAM_ID, /* Invalid stream ID */
++ SST_ERR_INVALID_MSG_ID, /* Invalid message ID */
++ SST_ERR_INVALID_STREAM_OP, /* Invalid stream operation request */
++ SST_ERR_INVALID_PARAMS, /* Invalid params */
++ SST_ERR_INVALID_CODEC, /* Invalid codec type */
++ SST_ERR_INVALID_MEDIA_TYPE, /* Invalid media type */
++ SST_ERR_STREAM_ERR, /* ANY: Stream control or config or
++ processing error */
++
++ /* IPC specific error codes */
++ SST_IPC_ERR_CALL_BACK_NOT_REGD, /* Call back for msg not regd */
++ SST_IPC_ERR_STREAM_NOT_ALLOCATED, /* Stream is not allocated */
++ SST_IPC_ERR_STREAM_ALLOC_FAILED, /* ALLOC:Stream alloc failed */
++ SST_IPC_ERR_GET_STREAM_FAILED, /* ALLOC:Get stream id failed*/
++ SST_ERR_MOD_NOT_AVAIL, /* SET/GET: Mod(AEC/AGC/ALC) not available */
++ SST_ERR_MOD_DNLD_RQD, /* SET/GET: Mod(AEC/AGC/ALC) download required */
++ SST_ERR_STREAM_STOPPED, /* ANY: Stream is in stopped state */
++ SST_ERR_STREAM_IN_USE, /* ANY: Stream is already in use */
++
++ /* Capture specific error codes */
++ SST_CAP_ERR_INCMPLTE_CAPTURE_MSG,/* ANY:Incomplete message */
++ SST_CAP_ERR_CAPTURE_FAIL, /* ANY:Capture op failed */
++ SST_CAP_ERR_GET_DDR_NEW_SGLIST,
++ SST_CAP_ERR_UNDER_RUN, /* lack of input data */
++ SST_CAP_ERR_OVERFLOW, /* lack of output space */
++
++ /* Playback specific error codes*/
++ SST_PB_ERR_INCMPLTE_PLAY_MSG, /* ANY: Incomplete message */
++ SST_PB_ERR_PLAY_FAIL, /* ANY: Playback operation failed */
++ SST_PB_ERR_GET_DDR_NEW_SGLIST,
++
++ /* Codec manager specific error codes */
++ SST_LIB_ERR_LIB_DNLD_REQUIRED, /* ALLOC: Codec download required */
++ SST_LIB_ERR_LIB_NOT_SUPPORTED, /* Library is not supported */
++
++ /* Library manager specific error codes */
++ SST_SCC_ERR_PREP_DNLD_FAILED, /* Failed to prepare for codec download */
++ SST_SCC_ERR_LIB_DNLD_RES_FAILED, /* Lib download resume failed */
++ /* Scheduler specific error codes */
++ SST_SCH_ERR_FAIL, /* REPORT: */
++
++ /* DMA specific error codes */
++ SST_DMA_ERR_NO_CHNL_AVAILABLE, /* DMA Ch not available */
++ SST_DMA_ERR_INVALID_INPUT_PARAMS, /* Invalid input params */
++ SST_DMA_ERR_CHNL_ALREADY_SUSPENDED, /* Ch is suspended */
++ SST_DMA_ERR_CHNL_ALREADY_STARTED, /* Ch already started */
++ SST_DMA_ERR_CHNL_NOT_ENABLED, /* Ch not enabled */
++ SST_DMA_ERR_TRANSFER_FAILED, /* Transfer failed */
++ SST_SSP_ERR_ALREADY_ENABLED, /* REPORT: SSP already enabled */
++ SST_SSP_ERR_ALREADY_DISABLED, /* REPORT: SSP already disabled */
++ SST_SSP_ERR_NOT_INITIALIZED,
++
++ /* Other error codes */
++ SST_ERR_MOD_INIT_FAIL, /* Firmware Module init failed */
++
++ /* FW init error codes */
++ SST_RDR_ERR_IO_DEV_SEL_NOT_ALLOWED,
++ SST_RDR_ERR_ROUTE_ALREADY_STARTED,
++ SST_RDR_PREP_CODEC_DNLD_FAILED,
++
++ /* Memory debug error codes */
++ SST_ERR_DBG_MEM_READ_FAIL,
++ SST_ERR_DBG_MEM_WRITE_FAIL,
++
++ /* Decode error codes */
++ SST_ERR_DEC_NEED_INPUT_BUF,
++
++};
++
++enum dbg_mem_data_type {
++ /* Data type of debug read/write */
++ DATA_TYPE_U32,
++ DATA_TYPE_U16,
++ DATA_TYPE_U8,
++};
++
++/* CAUTION NOTE: All IPC message body must be multiple of 32 bits.*/
++
++/* IPC Header */
++union ipc_header {
++ struct {
++ u32 msg_id:8; /* Message ID - Max 256 Message Types */
++ u32 str_id:3; /* Undefined for SC communication */
++ u32 large:1; /* Large Message if large = 1 */
++ u32 reserved:4;/* Reserved for future use */
++ u32 data:14; /* Ack/Info for msg, size of msg in Mailbox */
++ u32 done:1; /* bit 30 */
++ u32 busy:1; /* bit 31 */
++ } part;
++ u32 full;
++} __attribute__ ((packed));
++
++struct ipc_header_fw_init {
++ struct snd_sst_fw_version fw_version;/* Firmware version details */
++ u16 result; /* Fw init result */
++ u8 module_id; /* Module ID in case of error */
++ u8 debug_info; /* Debug info from Module ID in case of fail */
++} __attribute__ ((packed));
++
++/* Firmware build info */
++struct sst_fw_build_info {
++ unsigned char date[16]; /* Firmware build date */
++ unsigned char time[16]; /* Firmware build time */
++} __attribute__ ((packed));
++
++/* Address and size info of a frame buffer in DDR */
++struct sst_address_info {
++ u32 addr; /* Address at IA */
++ u32 size; /* Size of the buffer */
++} __attribute__ ((packed));
++
++/* Time stamp */
++struct snd_sst_tstamp {
++ u64 samples_processed; /* capture - data in DDR */
++ u64 samples_rendered; /* playback - data rendered */
++ u64 bytes_processed; /* bytes decoded or encoded */
++ u32 sampling_frequency; /* eg: 48000, 44100 */
++
++};
++
++/* Frame info to play or capture */
++struct sst_frame_info {
++ u16 num_entries; /* number of entries to follow */
++ u16 rsrvd;
++ struct sst_address_info addr[MAX_NUM_SCATTER_BUFFERS];
++} __attribute__ ((packed));
++
++/* Frames info for decode */
++struct snd_sst_decode_info {
++ unsigned long long input_bytes_consumed;
++ unsigned long long output_bytes_produced;
++ struct sst_frame_info frames_in;
++ struct sst_frame_info frames_out;
++} __attribute__ ((packed));
++/* SST to IA print debug message*/
++struct ipc_sst_ia_print_params {
++ u32 string_size; /* Max value is 160 */
++ u8 prt_string[160]; /* Null terminated Char string */
++} __attribute__ ((packed));
++/* Voice data message */
++struct snd_sst_voice_data {
++ u16 num_bytes; /* Number of valid voice data bytes */
++ u8 pcm_wd_size; /* 0=8 bit, 1=16 bit 2=32 bit */
++ u8 reserved; /* Reserved */
++ u8 voice_data_buf[0]; /* Voice data buffer in bytes, little endian */
++} __attribute__ ((packed));
++
++/* SST to IA memory read debug message */
++struct ipc_sst_ia_dbg_mem_rw {
++ u16 num_bytes; /* Maximum of MAX_DBG_RW_BYTES */
++ u16 data_type; /* enum: dbg_mem_data_type */
++ u32 address; /* Memory address of data memory of data_type */
++ u8 rw_bytes[MAX_DBG_RW_BYTES];/* Maximum of 64 bytes can be RW */
++} __attribute__ ((packed));
++
++struct ipc_sst_ia_dbg_loop_back {
++ u16 num_dwords; /* Maximum of MAX_DBG_RW_BYTES */
++ u16 increment_val;/* Increments dwords by this value, 0- no increment */
++ u32 lpbk_dwords[MAX_LOOP_BACK_DWORDS];/* Maximum of 8 dwords loopback */
++} __attribute__ ((packed));
++
++/* Stream type params struture for Alloc stream */
++struct snd_sst_str_type {
++ u8 codec_type; /* Codec type */
++ u8 str_type; /* 1 = voice 2 = music */
++ u8 operation; /* Playback or Capture */
++ u8 protected_str; /* 0=Non DRM, 1=DRM */
++ u8 pvt_id; /* Driver Private ID */
++ u8 reserved; /* Reserved */
++ u16 result; /* Result used for acknowledgment */
++} __attribute__ ((packed));
++
++/* Library info structure */
++struct module_info {
++ u32 lib_version;
++ u32 lib_type;/*TBD- KLOCKWORK u8 lib_type;*/
++ u32 media_type;
++ u8 lib_name[12];
++ u32 lib_caps;
++ unsigned char b_date[16]; /* Lib build date */
++ unsigned char b_time[16]; /* Lib build time */
++} __attribute__ ((packed));
++
++/* Library slot info */
++struct lib_slot_info {
++ u8 slot_num; /* 1 or 2 */
++ u8 reserved1;
++ u16 reserved2;
++ u32 iram_size; /* slot size in IRAM */
++ u32 dram_size; /* slot size in DRAM */
++ u32 iram_offset; /* starting offset of slot in IRAM */
++ u32 dram_offset; /* starting offset of slot in DRAM */
++} __attribute__ ((packed));
++
++struct snd_sst_lib_download {
++ struct module_info lib_info; /* library info type, capabilities etc */
++ struct lib_slot_info slot_info; /* slot info to be downloaded */
++ u32 mod_entry_pt;
++};
++
++struct snd_sst_lib_download_info {
++ struct snd_sst_lib_download dload_lib;
++ u16 result; /* Result used for acknowledgment */
++ u8 pvt_id; /* Private ID */
++ u8 reserved; /* for alignment */
++};
++
++/* Alloc stream params structure */
++struct snd_sst_alloc_params {
++ struct snd_sst_str_type str_type;
++ struct snd_sst_stream_params stream_params;
++};
++
++struct snd_sst_fw_get_stream_params {
++ struct snd_sst_stream_params codec_params;
++ struct snd_sst_pmic_config pcm_params;
++};
++
++/* Alloc stream response message */
++struct snd_sst_alloc_response {
++ struct snd_sst_str_type str_type; /* Stream type for allocation */
++ struct snd_sst_lib_download lib_dnld; /* Valid only for codec dnld */
++};
++
++/* Drop response */
++struct snd_sst_drop_response {
++ u32 result;
++ u32 bytes;
++};
++
++/* CSV Voice call routing structure */
++struct snd_sst_control_routing {
++ u8 control; /* 0=start, 1=Stop */
++ u8 reserved[3]; /* Reserved- for 32 bit alignment */
++};
++
++
++
++/* struct ipc_msg_body {
++ union {
++ CODEC_PARAM_STRUCTURES;
++ PPP_PARAM_STRUCTURES;
++ struct snd_sst_alloc_params alloc_params;
++ struct snd_sst_alloc_response alloc_response;
++ struct snd_sst_stream_params stream_params;
++ struct sst_frame_info frames_info;
++ struct ipc_sst_ia_print_params print_params;
++ struct ipc_sst_ia_dbg_mem_rw dbg_mem_rw;
++ struct ipc_sst_ia_dbg_loop_back loop_back;
++ struct pmic_pcm_params ssp_params;
++ } u;
++};*/
++
++
++
++struct ipc_post {
++ struct list_head node;
++ union ipc_header header; /* driver specific */
++ char *mailbox_data;
++};
++
++#endif /* __INTEL_SST_FW_IPC_H__ */
+diff --git a/sound/pci/sst/intel_sst_ipc.c b/sound/pci/sst/intel_sst_ipc.c
+new file mode 100644
+index 0000000..710cf8f
+--- /dev/null
++++ b/sound/pci/sst/intel_sst_ipc.c
+@@ -0,0 +1,843 @@
++/*
++ * intel_sst_ipc.c - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corporation
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This file defines all ipc functions
++ */
++
++#include <linux/cdev.h>
++#include <linux/pci.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/syscalls.h>
++#include <linux/file.h>
++#include <linux/interrupt.h>
++#include <linux/list.h>
++#include <linux/workqueue.h>
++#include <linux/mutex.h>
++#include <linux/firmware.h>
++#include <sound/intel_lpe.h>
++#include <sound/intel_sst_ioctl.h>
++#include "intel_sst_fw_ipc.h"
++#include "intel_sst_common.h"
++
++/**
++* Debug function to test basic IPC between driver and SST firmware
++*/
++static void sst_send_loop_test(int loop_no)
++{
++ struct ipc_post *msg = NULL;
++ struct ipc_sst_ia_dbg_loop_back loop_msg;
++ static int large_num;
++
++ printk(KERN_DEBUG "SST DBG:Loop testing %d \n", loop_no);
++
++ if (large_num >= 4) {
++ printk(KERN_DEBUG "SST DBG:Loop testing complete.....\n");
++ return;
++ }
++ if (loop_no >= 4) {
++ /* large loop */
++ large_num++;
++ printk(KERN_DEBUG "SST DBG:Large msg \n");
++ if (sst_create_large_msg(&msg))
++ return;
++
++ loop_msg.increment_val = 1;
++ loop_msg.lpbk_dwords[0] = LOOP1;
++ loop_msg.lpbk_dwords[1] = LOOP2;
++ loop_msg.lpbk_dwords[2] = LOOP3;
++ loop_msg.lpbk_dwords[3] = LOOP4;
++ loop_msg.num_dwords = 4;
++ sst_fill_header(&msg->header, IPC_IA_DBG_LOOP_BACK, 1, loop_no);
++ msg->header.part.data = sizeof(u32) + sizeof(loop_msg);
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32),
++ &loop_msg, sizeof(loop_msg));
++ } else {
++ /* short loop */
++ printk(KERN_DEBUG "SST DBG:Loop Short msg \n");
++ if (sst_create_short_msg(&msg))
++ return;
++ sst_fill_header(&msg->header, IPC_IA_DBG_LOOP_BACK, 0, loop_no);
++ }
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ return;
++}
++
++/**
++* this function sends the sound card type to sst dsp engine
++*/
++static void sst_send_sound_card_type(void)
++{
++ struct ipc_post *msg = NULL;
++
++ printk(KERN_DEBUG "SST DBG:...called\n");
++
++ if (sst_create_short_msg(&msg))
++ return;
++
++ sst_fill_header(&msg->header, IPC_IA_SET_PMIC_TYPE, 0, 0);
++ msg->header.part.data = sst_drv_ctx->pmic_vendor;
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ return;
++}
++
++/**
++* sst_post_message - Posts message to SST
++* @work: Pointer to work structure
++*
++* This function is called by any component in driver which
++* wants to send an IPC message. This will post message only if
++* busy bit is free
++*/
++void sst_post_message(struct work_struct *work)
++{
++ struct ipc_post *msg;
++ union ipc_header header;
++ union interrupt_reg imr;
++ int retval = 0;
++ imr.full = 0;
++
++ /*To check if LPE is in stalled state.*/
++ retval = sst_stalled();
++ if (retval < 0) {
++ printk(KERN_ERR "SST ERR: SST is in stalled state \n");
++ return;
++ }
++ printk(KERN_DEBUG "SST DBG:..called \n");
++ mutex_lock(&sst_drv_ctx->list_lock);
++
++ /* check list */
++ if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) {
++ /* list is empty, mask imr */
++ printk(KERN_DEBUG "SST DBG: Empty msg queue... masking \n");
++ imr.full = readl(sst_drv_ctx->shim + SST_IMRX);
++ if( imr.part.done_interrupt == 0) {
++ imr.part.done_interrupt = 1;
++ /* dummy register for shim workaround */
++ writel(imr.full, sst_drv_ctx->shim + SST_ISRD);
++ writel(imr.full, sst_drv_ctx->shim + SST_IMRX);
++ }
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ return;
++ }
++
++ /* check busy bit */
++ header.full = readl(sst_drv_ctx->shim + SST_IPCX);
++ if (header.part.busy) {
++ /* busy, unmask */
++ printk(KERN_DEBUG "SST DBG:Busy not free... unmasking\n");
++ imr.full = readl(sst_drv_ctx->shim + SST_IMRX);
++ imr.part.done_interrupt = 0;
++ /* dummy register for shim workaround */
++ writel(imr.full, sst_drv_ctx->shim + SST_ISRD);
++ writel(imr.full, sst_drv_ctx->shim + SST_IMRX);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ return;
++ }
++ /* copy msg from list */
++ msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next,
++ struct ipc_post, node);
++ list_del(&msg->node);
++ printk(KERN_DEBUG "SST DBG:Post message: \
++ header = %x\n", msg->header.full);
++ printk(KERN_DEBUG "SST DBG:size: = %x\n", msg->header.part.data);
++ if (msg->header.part.large)
++ memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
++ msg->mailbox_data, msg->header.part.data);
++ /* dummy register for shim workaround */
++ writel(msg->header.full, sst_drv_ctx->shim + SST_ISRD);
++ writel(msg->header.full, sst_drv_ctx->shim + SST_IPCX);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++
++ kfree(msg->mailbox_data);
++ kfree(msg);
++ printk(KERN_DEBUG "SST DBG:...done\n");
++ return;
++}
++
++/**
++* this function clears the interrupt register after the interrupt
++* bottom half is complete allowing next interrupt to arrive
++*/
++void sst_clear_interrupt(void)
++{
++ union interrupt_reg isr;
++ union interrupt_reg imr;
++ union ipc_header clear_ipc;
++
++// printk(KERN_DEBUG "SST DBG:sst clearing interrupt \n");
++ imr.full = readl(sst_drv_ctx->shim + SST_IMRX);
++ isr.full = readl(sst_drv_ctx->shim + SST_ISRX);
++ /* write 1 to clear */;
++ isr.part.busy_interrupt = 1;
++ /* dummy register for shim workaround */
++ writel(isr.full, sst_drv_ctx->shim + SST_ISRD);
++ writel(isr.full, sst_drv_ctx->shim + SST_ISRX);
++ /* Set IA done bit */
++ clear_ipc.full = readl(sst_drv_ctx->shim + SST_IPCD);
++ clear_ipc.part.busy = 0;
++ clear_ipc.part.done = 1;
++ clear_ipc.part.data = IPC_ACK_SUCCESS;
++ /* dummy register for shim workaround */
++ writel(clear_ipc.full, sst_drv_ctx->shim + SST_ISRD);
++ writel(clear_ipc.full, sst_drv_ctx->shim + SST_IPCD);
++ /* un mask busy interrupt */
++ imr.part.busy_interrupt = 0;
++ /* dummy register for shim workaround */
++ writel(imr.full, sst_drv_ctx->shim + SST_ISRD);
++ writel(imr.full, sst_drv_ctx->shim + SST_IMRX);
++}
++
++/**
++* sst_process_message - Processes message from SST
++* @work: Pointer to work structure
++*
++* This function is scheduled by ISR
++* It take a msg from process_queue and does action based on msg
++*/
++void sst_process_message(struct work_struct *work)
++{
++ struct sst_ipc_msg_wq *msg =
++ container_of(work, struct sst_ipc_msg_wq, wq);
++ int str_id = msg->header.part.str_id;
++ struct stream_info *stream ;
++
++ printk(KERN_DEBUG "SST DBG:called \n");
++
++ /* based on msg in list call respective handler */
++ switch (msg->header.part.msg_id) {
++ case IPC_SST_BUF_UNDER_RUN:
++ case IPC_SST_BUF_OVER_RUN:
++ if (sst_validate_strid(str_id)) {
++ printk(KERN_ERR
++ "SST ERR: stream id %d invalid\n", str_id);
++ break;
++ }
++ printk(KERN_ERR
++ "SST ERR: Buffer under/overrun for %d\n",\
++ msg->header.part.str_id);
++ printk(KERN_DEBUG "SST DBG:Got Underrun & not to send data...ignore\n");
++ break;
++
++ case IPC_SST_GET_PLAY_FRAMES:
++ {
++ struct stream_info *stream ;
++
++ if (sst_validate_strid(str_id)) {
++ printk(KERN_ERR
++ "SST ERR: stream id %d invalid\n", str_id);
++ break;
++ }
++ /* call sst_play_frame */
++ stream = &sst_drv_ctx->streams[str_id];
++ printk(KERN_DEBUG "SST DBG:sst_play_frames for %d\n", \
++ msg->header.part.str_id);
++ mutex_lock(&sst_drv_ctx->streams[str_id].lock);
++ sst_play_frame(msg->header.part.str_id);
++ mutex_unlock(&sst_drv_ctx->streams[str_id].lock);
++ break;
++ }
++
++ case IPC_SST_PERIOD_ELAPSED:
++ {
++ struct snd_sst_tstamp fw_tstamp = {0,};
++ if (sst_validate_strid(str_id)) {
++ printk(KERN_ERR
++ "SST ERR: stream id %d invalid\n", str_id);
++ break;
++ }
++ stream = &sst_drv_ctx->streams[str_id];
++
++ printk(KERN_DEBUG "SST DBG:Period elapsed \n");
++ memcpy_fromio(&fw_tstamp,
++ ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP) +
++ (str_id * sizeof(fw_tstamp))),
++ sizeof(fw_tstamp));
++ printk(KERN_DEBUG "SST DBG:samples \
++ played = %lld\n", fw_tstamp.samples_processed);
++ printk(KERN_DEBUG "SST DBG:diff in \
++ mesg = %d\n", msg->header.part.data);
++ sst_clear_interrupt();
++ if (stream->period_elapsed)
++ stream->period_elapsed(stream->pcm_substream);
++ return;
++ }
++
++ case IPC_SST_GET_CAPT_FRAMES:
++ /* call sst_capture_frame */
++ if (sst_validate_strid(str_id)) {
++ printk(KERN_ERR
++ "SST ERR: stream id %d invalid\n", str_id);
++ break;
++ }
++ stream = &sst_drv_ctx->streams[str_id];
++ printk(KERN_DEBUG "SST DBG:sst_capture_frames \
++ for %d\n", msg->header.part.str_id);
++ mutex_lock(&stream->lock);
++ if (stream->mmapped == false && stream->src == SST_DRV) {
++ printk(KERN_DEBUG "SST DBG:waking up block for copy...\n");
++ stream->data_blk.ret_code = 0;
++ stream->data_blk.condition = true;
++ stream->data_blk.on = false;
++ wake_up(&sst_drv_ctx->wait_queue);
++ } else
++ sst_capture_frame(msg->header.part.str_id);
++ mutex_unlock(&stream->lock);
++ break;
++
++ case IPC_IA_PRINT_STRING:
++ printk(KERN_DEBUG "SST DBG:been asked to print something by fw\n");
++ /* TBD */
++ break;
++
++ case IPC_IA_FW_INIT_CMPLT: {
++ /* send next data to FW */
++ struct ipc_header_fw_init *init =
++ (struct ipc_header_fw_init *)msg->mailbox;
++ int major = init->fw_version.major;
++ int minor = init->fw_version.minor;
++ int build = init->fw_version.build;
++
++ printk(KERN_DEBUG "SST DBG:*** FW Init msg came*** \n");
++ if (!init->result) {
++ sst_drv_ctx->sst_state = SST_FW_RUNNING;
++ printk(KERN_DEBUG "SST DBG:FW Version %x.%x \n",
++ init->fw_version.major, init->fw_version.minor);
++ printk(KERN_DEBUG "SST DBG:Build No %x Type %x \n",
++ init->fw_version.build, init->fw_version.type);
++#ifdef SND_LOOP_TEST
++ sst_send_loop_test(0);
++#endif
++ sst_send_sound_card_type();
++
++ pr_info(
++ "INFO: ***SST FW VERSION*** +\
++ = %02d.%02d.%02d\n", \
++ major, \
++ minor, build);
++
++ printk(KERN_DEBUG "SST DBG:Time slot Status %d\n", sst_drv_ctx->rx_time_slot_status);
++ if((sst_drv_ctx->rx_time_slot_status != RX_TIMESLOT_UNINIT) && (sst_drv_ctx->sst_state == SST_FW_RUNNING))
++ sst_enable_rx_timeslot(sst_drv_ctx->rx_time_slot_status);
++
++ } else {
++ sst_drv_ctx->sst_state = SST_ERROR;
++ printk(KERN_DEBUG "SST DBG:FW Init \
++ failed, Error %x\n", init->result);
++ printk(KERN_DEBUG "SST DBG:FW Init failed, Module %x, Debug Info %x \n",
++ init->module_id, init->debug_info);
++ }
++ printk(KERN_DEBUG "SST DBG:Waking up... open\n");
++ sst_wake_up_alloc_block(sst_drv_ctx, FW_DWNL_ID, 0, NULL);
++ break;
++ }
++
++ case IPC_SST_STREAM_PROCESS_FATAL_ERR:
++ if (sst_validate_strid(str_id)) {
++ printk(KERN_ERR
++ "SST ERR: stream id %d invalid\n", str_id);
++ break;
++ }
++ printk(KERN_ERR
++ "SST ERR: codec fatal error %x for +\
++ stream %d... \n",\
++ msg->header.full, \
++ msg->header.part.str_id);
++ printk(KERN_ERR
++ "SST ERR: Dropping the stream \n");
++ sst_drop_stream(msg->header.part.str_id);
++ break;
++ case IPC_IA_LPE_GETTING_STALLED:
++ sst_drv_ctx->lpe_stalled = 1;
++ break;
++ case IPC_IA_LPE_UNSTALLED:
++ sst_drv_ctx->lpe_stalled = 0;
++ break;
++ default:
++ /* Illegal case */
++ printk(KERN_ERR
++ "SST ERR: Unhandled case msg_id %x +\
++ message %x\n",\
++ msg->header.part.msg_id, msg->header.full);
++ }
++ sst_clear_interrupt();
++ return;
++}
++
++/**
++* sst_process_reply - Processes reply message from SST
++* @work: Pointer to work structure
++*
++* This function is scheduled by ISR
++* It take a reply msg from response_queue and
++* does action based on msg
++*/
++void sst_process_reply(struct work_struct *work)
++{
++ struct sst_ipc_msg_wq *msg =
++ container_of(work, struct sst_ipc_msg_wq, wq);
++
++ int str_id = msg->header.part.str_id;
++ struct stream_info *str_info;
++ switch (msg->header.part.msg_id) {
++ case IPC_IA_TARGET_DEV_SELECT: {
++ if (!msg->header.part.data) {
++ sst_drv_ctx->tgt_dev_blk.ret_code = 0;
++ } else {
++ printk(KERN_ERR
++ "SST ERR: Msg %x reply +\
++ error %x \n",\
++ msg->header.part.msg_id, msg->header.part.data);
++ sst_drv_ctx->tgt_dev_blk.ret_code =
++ -msg->header.part.data;
++ }
++
++ if (sst_drv_ctx->tgt_dev_blk.on == true) {
++ sst_drv_ctx->tgt_dev_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++ }
++ case IPC_IA_GET_FW_INFO: {
++ struct snd_sst_fw_info *fw_info =
++ (struct snd_sst_fw_info *)msg->mailbox;
++ if (msg->header.part.large) {
++ int major = fw_info->fw_version.major;
++ int minor = fw_info->fw_version.minor;
++ int build = fw_info->fw_version.build;
++ printk(KERN_DEBUG "SST DBG:Msg \
++ succedded %x \n", msg->header.part.msg_id);
++ dev_info(&sst_drv_ctx->pci->dev, \
++ "INFO: ***FW VERSION*** +\
++ = %02d.%02d.%02d\n", major, \
++ minor, build);
++
++ memcpy_fromio(sst_drv_ctx->fw_info_blk.data,
++ ((struct snd_sst_fw_info *)(msg->mailbox)),
++ sizeof(struct snd_sst_fw_info));
++ sst_drv_ctx->fw_info_blk.ret_code = 0;
++ } else {
++ printk(KERN_ERR
++ "SST ERR: Msg %x reply +\
++ error %x \n",\
++ msg->header.part.msg_id, msg->header.part.data);
++ sst_drv_ctx->fw_info_blk.ret_code =
++ -msg->header.part.data;
++ }
++ if (sst_drv_ctx->fw_info_blk.on == true) {
++ printk(KERN_DEBUG "SST DBG:Memcopy succedded \n");
++ sst_drv_ctx->fw_info_blk.on = false;
++ sst_drv_ctx->fw_info_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++ }
++ case IPC_IA_SET_STREAM_MUTE: {
++ if (!msg->header.part.data) {
++ printk(KERN_DEBUG "SST DBG:Msg \
++ succedded %x \n", msg->header.part.msg_id);
++ sst_drv_ctx->mute_info_blk.ret_code = 0;
++ } else {
++ printk(KERN_ERR
++ "SST ERR: Msg %x reply +\
++ error %x \n", \
++ msg->header.part.msg_id, msg->header.part.data);
++ sst_drv_ctx->mute_info_blk.ret_code =
++ -msg->header.part.data;
++
++ }
++ if (sst_drv_ctx->mute_info_blk.on == true) {
++ sst_drv_ctx->mute_info_blk.on = false;
++ sst_drv_ctx->mute_info_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++ }
++ case IPC_IA_SET_STREAM_VOL: {
++ if (!msg->header.part.data) {
++ printk(KERN_DEBUG "SST DBG:Msg \
++ succedded %x \n", msg->header.part.msg_id);
++ sst_drv_ctx->vol_info_blk.ret_code = 0;
++ } else {
++ printk(KERN_ERR
++ "SST ERR: Msg %x reply +\
++ error %x \n",\
++ msg->header.part.msg_id, \
++ msg->header.part.data);
++ sst_drv_ctx->vol_info_blk.ret_code =
++ -msg->header.part.data;
++
++ }
++
++ if (sst_drv_ctx->vol_info_blk.on == true) {
++ sst_drv_ctx->vol_info_blk.on = false;
++ sst_drv_ctx->vol_info_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++ }
++ case IPC_IA_GET_STREAM_VOL:
++ if (msg->header.part.large) {
++ printk(KERN_DEBUG "SST DBG:Large Msg Received Successfully\n");
++ printk(KERN_DEBUG "SST DBG:Msg \
++ succedded %x \n", msg->header.part.msg_id);
++ memcpy_fromio(sst_drv_ctx->vol_info_blk.data,
++ (void *) msg->mailbox,
++ sizeof(struct snd_sst_vol));
++ sst_drv_ctx->vol_info_blk.ret_code = 0;
++ } else {
++ printk(KERN_ERR
++ "SST ERR: Msg %x +\
++ reply error %x \n",\
++ msg->header.part.msg_id, msg->header.part.data);
++ sst_drv_ctx->vol_info_blk.ret_code =
++ -msg->header.part.data;
++ }
++ if (sst_drv_ctx->vol_info_blk.on == true) {
++ sst_drv_ctx->vol_info_blk.on = false;
++ sst_drv_ctx->vol_info_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++
++ case IPC_IA_GET_STREAM_PARAMS:
++ if (sst_validate_strid(str_id)) {
++ printk(KERN_ERR
++ "SST ERR: stream id %d invalid\n", str_id);
++ break;
++ }
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (msg->header.part.large) {
++ printk(KERN_DEBUG "SST DBG:The Large \
++ message for get stream params\n");
++ printk(KERN_DEBUG "SST DBG:Msg +\
++ succedded %x \n", msg->header.part.msg_id);
++ memcpy_fromio(str_info->ctrl_blk.data,
++ ((void *)(msg->mailbox)),
++ sizeof(struct snd_sst_fw_get_stream_params));
++ str_info->ctrl_blk.ret_code = 0;
++ } else {
++ printk(KERN_ERR
++ "SST ERR: The message for +\
++ get params is not large\n");
++ printk(KERN_ERR
++ "SST ERR: Msg %x reply error %x \n",\
++ msg->header.part.msg_id, msg->header.part.data);
++ str_info->ctrl_blk.ret_code =
++ -msg->header.part.data;
++ }
++ if (str_info->ctrl_blk.on == true) {
++ str_info->ctrl_blk.on = false;
++ str_info->ctrl_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++ case IPC_IA_DECODE_FRAMES:
++ if (sst_validate_strid(str_id)) {
++ printk(KERN_ERR
++ "SST ERR: stream id %d invalid\n", str_id);
++ break;
++ }
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (msg->header.part.large) {
++ printk(KERN_DEBUG "SST DBG:Msg \
++ succedded %x \n", msg->header.part.msg_id);
++ memcpy_fromio(str_info->data_blk.data,
++ ((void *)(msg->mailbox)),
++ sizeof(struct snd_sst_decode_info));
++ str_info->data_blk.ret_code = 0;
++ } else {
++ printk(KERN_ERR
++ "SST ERR: Msg %x reply error %x \n",\
++ msg->header.part.msg_id, msg->header.part.data);
++ str_info->data_blk.ret_code =
++ -msg->header.part.data;
++ }
++ if (str_info->data_blk.on == true) {
++ str_info->data_blk.on = false;
++ str_info->data_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++ case IPC_IA_DRAIN_STREAM:
++ if (sst_validate_strid(str_id)) {
++ printk(KERN_ERR
++ "SST ERR: stream id %d invalid\n", str_id);
++ break;
++ }
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (!msg->header.part.data) {
++ printk(KERN_DEBUG "SST DBG:Msg \
++ succedded %x \n", msg->header.part.msg_id);
++ str_info->ctrl_blk.ret_code = 0;
++
++ } else {
++ printk(KERN_ERR
++ "SST ERR: Msg %x reply error %x \n",\
++ msg->header.part.msg_id, msg->header.part.data);
++ str_info->ctrl_blk.ret_code = -msg->header.part.data;
++
++ }
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (str_info->data_blk.on == true) {
++ str_info->data_blk.on = false;
++ str_info->data_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++
++ case IPC_IA_DROP_STREAM:
++ if (sst_validate_strid(str_id)) {
++ printk(KERN_ERR
++ "SST ERR: stream id %d invalid\n", str_id);
++ break;
++ }
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (msg->header.part.large) {
++ struct snd_sst_drop_response *drop_resp =
++ (struct snd_sst_drop_response *)msg->mailbox;
++
++ printk(KERN_DEBUG "SST DBG:Drop returns with bytes 0x%x \n",
++ drop_resp->bytes);
++
++ str_info->curr_bytes = drop_resp->bytes;
++ str_info->ctrl_blk.ret_code = 0;
++ } else {
++ printk(KERN_ERR
++ "SST ERR: Msg %x reply error %x \n", \
++ msg->header.part.msg_id, msg->header.part.data);
++ str_info->ctrl_blk.ret_code = -msg->header.part.data;
++ }
++ if (str_info->ctrl_blk.on == true) {
++ str_info->ctrl_blk.on = false;
++ str_info->ctrl_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++ case IPC_IA_ENABLE_RX_TIME_SLOT:
++ if (!msg->header.part.data) {
++ printk(KERN_DEBUG "SST DBG:Msg \
++ succedded IPC_IA_ENABLE_RX_TIME_SLOT %x \n", msg->header.part.msg_id);
++ sst_drv_ctx->hs_info_blk.ret_code = 0;
++ } else {
++ printk(KERN_ERR
++ "SST ERR: Msg %x +\
++ reply error %x \n", msg->header.part.msg_id, \
++ msg->header.part.data);
++ sst_drv_ctx->hs_info_blk.ret_code = -msg->header.part.data;
++ }
++
++
++ if (sst_drv_ctx->hs_info_blk.on == true) {
++ sst_drv_ctx->hs_info_blk.on = false;
++ sst_drv_ctx->hs_info_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++ case IPC_IA_PAUSE_STREAM:
++ case IPC_IA_RESUME_STREAM:
++ case IPC_IA_SET_STREAM_PARAMS:
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (!msg->header.part.data) {
++ printk(KERN_DEBUG "SST DBG:Msg \
++ succedded %x \n", msg->header.part.msg_id);
++ str_info->ctrl_blk.ret_code = 0;
++ } else {
++ printk(KERN_ERR
++ "SST ERR: Msg %x +\
++ reply error %x \n", msg->header.part.msg_id, \
++ msg->header.part.data);
++ str_info->ctrl_blk.ret_code = -msg->header.part.data;
++ }
++ if (sst_validate_strid(str_id)) {
++ printk(KERN_ERR
++ "SST ERR: stream id %d +\
++ invalid\n", str_id);
++ break;
++ }
++
++ if (str_info->ctrl_blk.on == true) {
++ str_info->ctrl_blk.on = false;
++ str_info->ctrl_blk.condition = true;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ break;
++
++ case IPC_IA_FREE_STREAM:
++ if (!msg->header.part.data) {
++ printk(KERN_DEBUG "SST DBG:Stream %d freed\n", str_id);
++ } else {
++ printk(KERN_ERR
++ "SST ERR: Free for %d +\
++ returned error %x\n", str_id, msg->header.part.data);
++ }
++ break;
++ case IPC_IA_ALLOC_STREAM: {
++ /* map to stream, call play */
++ struct snd_sst_alloc_response *resp =
++ (struct snd_sst_alloc_response *)msg->mailbox;
++ if (resp->str_type.result) {
++ /* error case */
++ struct snd_sst_alloc_response *lib = NULL;
++ printk(KERN_ERR
++ "SST ERR: error +\
++ alloc stream = %x \n", resp->str_type.result);
++ if (resp->str_type.result ==
++ SST_LIB_ERR_LIB_DNLD_REQUIRED) {
++ lib = kzalloc(sizeof(*lib), GFP_ATOMIC);
++ if (!lib) {
++ printk(KERN_ERR
++ "SST ERR: +\
++ mem allocation failed \n");
++ break;
++ }
++ memcpy(lib, msg->mailbox, sizeof(*lib));
++ /* library needs to be downloaded */
++ printk(KERN_DEBUG "SST DBG:Codec Download required \n");
++ }
++ sst_wake_up_alloc_block(sst_drv_ctx,
++ resp->str_type.pvt_id,
++ (-resp->str_type.result), lib);
++ break;
++ }
++ sst_alloc_stream_response(str_id, &resp->str_type);
++ break;
++ }
++
++ case IPC_IA_DBG_LOOP_BACK:
++ /* Debug loop back msg */
++ printk(KERN_DEBUG "SST DBG:Loop back came \n");
++ if (msg->header.part.data)
++ printk(KERN_DEBUG "SST DBG:Possible error if not large \n");
++ printk(KERN_DEBUG "SST DBG:Loop ID: %d\n", str_id);
++ if (msg->header.part.large) {
++ struct ipc_sst_ia_dbg_loop_back *loop_msg =
++ (struct ipc_sst_ia_dbg_loop_back *)msg->mailbox;
++ int i;
++ printk(KERN_DEBUG "SST DBG:Got large loop back: Words %d\n",
++ loop_msg->num_dwords);
++ for (i = 0; i < loop_msg->num_dwords; i++) {
++ printk(KERN_DEBUG "SST DBG:Loop \
++ Word %d = %d \n", i,
++ loop_msg->lpbk_dwords[i]);
++ }
++ }
++ sst_send_loop_test((str_id + 1));
++ break;
++
++ case IPC_IA_PLAY_FRAMES:
++ case IPC_IA_CAPT_FRAMES:
++ if (sst_validate_strid(str_id)) {
++ printk(KERN_ERR
++ "SST ERR: stream id %d invalid\n" , str_id);
++ break;
++ }
++ printk(KERN_DEBUG "SST DBG:Ack for play/capt frames recived \n");
++ break;
++
++ case IPC_IA_PREP_LIB_DNLD: {
++ struct snd_sst_str_type *str_type =
++ (struct snd_sst_str_type *)msg->mailbox;
++ printk(KERN_DEBUG "SST DBG:Prep Lib \
++ download %x\n", msg->header.part.msg_id);
++ if (str_type->result) {
++ printk(KERN_ERR
++ "SST ERR: Error in prep lib +\
++ download 0x%x\n" ,\
++ str_type->result);
++ } else
++ printk(KERN_DEBUG "SST DBG:Need to download codec now...\n");
++ /* FIXME remove this workaround */
++ str_type->result = 0;
++ sst_wake_up_alloc_block(sst_drv_ctx, str_type->pvt_id,
++ str_type->result, NULL);
++ break;
++ }
++
++ case IPC_IA_LIB_DNLD_CMPLT: {
++ struct snd_sst_lib_download_info *resp =
++ (struct snd_sst_lib_download_info *)msg->mailbox;
++ int retval = resp->result;
++
++ printk(KERN_DEBUG "SST DBG:Lib download \
++ cmplt %x\n", msg->header.part.msg_id);
++ if (resp->result) {
++ printk(KERN_ERR
++ "SST ERR: Error in +\
++ lib dload %x\n",\
++ resp->result);
++ } else {
++ printk(KERN_DEBUG "SST DBG:Codec download complete...\n");
++ printk(KERN_DEBUG "SST DBG:Downloaded codec Type %d Ver %d Built %s: %s\n",
++ resp->dload_lib.lib_info.lib_type,
++ resp->dload_lib.lib_info.lib_version,
++ resp->dload_lib.lib_info.b_date,
++ resp->dload_lib.lib_info.b_time);
++ }
++ sst_wake_up_alloc_block(sst_drv_ctx, resp->pvt_id,
++ retval, NULL);
++ break;
++ }
++
++ case IPC_IA_GET_FW_VERSION: {
++ struct ipc_header_fw_init *version =
++ (struct ipc_header_fw_init *)msg->mailbox;
++ int major = version->fw_version.major;
++ int minor = version->fw_version.minor;
++ int build = version->fw_version.build;
++ dev_info(&sst_drv_ctx->pci->dev, \
++ "INFO: ***LOADED SST FW +\
++ VERSION*** = %02d.%02d.%02d\n",\
++ major, minor, build);
++ break;
++ }
++ case IPC_IA_GET_FW_BUILD_INF: {
++ struct sst_fw_build_info *build =
++ (struct sst_fw_build_info *)msg->mailbox;
++ dev_info(&sst_drv_ctx->pci->dev, \
++ "INFO: Build date %s +\
++ Time %s",\
++ build->date, build->time);
++ break;
++ }
++ case IPC_IA_SET_PMIC_TYPE:
++ break;
++ default:
++ /* Illegal case */
++ printk(KERN_ERR
++ "SST ERR: process reply :default +\
++ case = %x\n" , msg->header.full);
++
++ }
++ sst_clear_interrupt();
++ return;
++}
+--
+1.6.2.2
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-5-8.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-5-8.patch
new file mode 100644
index 0000000..888b6a0
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-5-8.patch
@@ -0,0 +1,1690 @@
+From 174aae2c1dcb1b7c49188e13167c21687ef96edc Mon Sep 17 00:00:00 2001
+From: R, Dharageswari <dharageswari.r@intel.com>
+Date: Thu, 29 Apr 2010 20:26:02 +0530
+Subject: [PATCH] ADR-Post-Beta-0.05.002.03-5/8-Moorestown Audio Drivers: SST stream ops module
+
+This adds the stream module which contains the function for stream
+operations & control. For a stream the control and data are two major parts.
+This module implements the control (play/pause/resume/stop/free/alloc)
+for a stream. It also implements data play/capture frames where buffers are
+sent/received to FW. The objective of SST driver is to achieve Low power
+playback by utilizing DSP as much aspossible. So SST gets large music
+buffers from player/middleware and sends them to FW in a scatter gather list.
+The FW decodes and renders them, while IA can goto low power states
+
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+
+ new file: sound/pci/sst/intel_sst_stream.c
+Patch-mainline: 2.6.35?
+---
+ sound/pci/sst/intel_sst_stream.c | 1658 ++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 1658 insertions(+), 0 deletions(-)
+ create mode 100644 sound/pci/sst/intel_sst_stream.c
+
+diff --git a/sound/pci/sst/intel_sst_stream.c b/sound/pci/sst/intel_sst_stream.c
+new file mode 100644
+index 0000000..07a4e55
+--- /dev/null
++++ b/sound/pci/sst/intel_sst_stream.c
+@@ -0,0 +1,1658 @@
++/*
++ * intel_sst_stream.c - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This file contains the stream operations of SST driver
++ */
++
++#include <linux/cdev.h>
++#include <linux/pci.h>
++#include <linux/kernel.h>
++#include <linux/syscalls.h>
++#include <linux/file.h>
++#include <linux/interrupt.h>
++#include <linux/list.h>
++#include <linux/uaccess.h>
++#include <linux/firmware.h>
++#ifdef CONFIG_MSTWN_POWER_MGMT
++#include <linux/intel_mid.h>
++#endif
++#include <linux/rar/rar_register.h>
++#include <linux/rar/memrar.h>
++#include <sound/intel_sst_ioctl.h>
++#include <sound/intel_lpe.h>
++#include "intel_sst_fw_ipc.h"
++#include "intel_sst_common.h"
++/**
++* sst_alloc_stream - Send msg for a new stream ID
++* @params: stream params
++* @stream_ops: operation of stream PB/capture
++* @codec: codec for stream
++* @session_id: pvt_id passed by MMF to distinguish stream
++*
++* This function is called by any function which wants to start
++* a new stream. This also check if a stream exists which is idle
++* it initializes idle stream id to this request
++*/
++int sst_alloc_stream(char *params, unsigned int stream_ops,
++ u8 codec, unsigned int session_id)
++{
++ struct ipc_post *msg = NULL;
++ struct snd_sst_alloc_params alloc_param = {{0,},};
++
++ printk(KERN_DEBUG "SST DBG:entering sst_alloc_stream \n");
++ printk(KERN_DEBUG "SST DBG:%d %d %d\n", stream_ops, codec, session_id);
++
++ BUG_ON(!params);
++
++ /* send msg to FW to allocate a stream */
++ if (sst_create_large_msg(&msg))
++ return -ENOMEM;
++
++ sst_fill_header(&msg->header, IPC_IA_ALLOC_STREAM, 1, 0);
++ msg->header.part.data = sizeof(alloc_param) + sizeof(u32);
++ alloc_param.str_type.codec_type = codec;
++ alloc_param.str_type.str_type = STREAM_TYPE_MUSIC; /* music */
++ alloc_param.str_type.operation = stream_ops;
++ alloc_param.str_type.protected_str = 0; /* non drm */
++ alloc_param.str_type.pvt_id = session_id;
++ memcpy(&alloc_param.stream_params, params,
++ sizeof(struct snd_sst_stream_params));
++
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32), &alloc_param,
++ sizeof(alloc_param));
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ printk(KERN_DEBUG "SST DBG:alloc stream done\n");
++ return 0;
++}
++
++/**
++* sst_get_stream_params - Send msg to query for stream parameters
++* @str_id: stream id for which the parameters are queried for
++* @get_params: out parameters to which the parameters are copied to
++*
++* This function is called when the stream parameters are queiried for
++*/
++int sst_get_stream_params(int str_id,
++ struct snd_sst_get_stream_params *get_params)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++ struct stream_info *str_info;
++ struct snd_sst_fw_get_stream_params *fw_params;
++
++ printk(KERN_DEBUG "SST DBG:get_stream for %d\n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (str_info->status != STREAM_UN_INIT) {
++ if (str_info->ctrl_blk.on == true) {
++ printk(KERN_ERR\
++ "SST ERR: control path is already in use \n");
++ return -EINVAL;
++ }
++ if (sst_create_short_msg(&msg)) {
++ printk(KERN_ERR
++ "SST ERR: message creation failed\n");
++ return -ENOMEM;
++ }
++ fw_params = kzalloc(sizeof(*fw_params), GFP_ATOMIC);
++ if (!fw_params) {
++ printk(KERN_ERR
++ "SST ERR: mem allcoation failed\n ");
++ return -ENOMEM;
++ }
++
++ sst_fill_header(&msg->header, IPC_IA_GET_STREAM_PARAMS,
++ 0, str_id);
++ str_info->ctrl_blk.condition = false;
++ str_info->ctrl_blk.ret_code = 0;
++ str_info->ctrl_blk.on = true;
++ str_info->ctrl_blk.data = (void *) fw_params;
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
++ if (retval) {
++ get_params->codec_params.result = retval;
++ kfree(fw_params);
++ return -EIO;
++ }
++ memcpy(&get_params->pcm_params, &fw_params->pcm_params,
++ sizeof(fw_params->pcm_params));
++ memcpy(&get_params->codec_params.sparams,
++ &fw_params->codec_params,
++ sizeof(fw_params->codec_params));
++ get_params->codec_params.result = 0;
++ get_params->codec_params.stream_id = str_id;
++ get_params->codec_params.codec = str_info->codec;
++ get_params->codec_params.ops = str_info->ops;
++ get_params->codec_params.stream_type = str_info->str_type;
++ kfree(fw_params);
++ } else {
++ printk(KERN_DEBUG "SST DBG:Stream is not in the init state\n");
++ }
++ return retval;
++}
++
++/**
++* sst_get_fw_info - Send msg to query for firmware configurations
++* @info: out param that holds the firmare configurations
++*
++* This function is called when the firmware configurations are queiried for
++*/
++int sst_get_fw_info(struct snd_sst_fw_info *info)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++
++ printk(KERN_DEBUG "SST DBG:...called \n");
++
++ if (sst_create_short_msg(&msg)) {
++ printk(KERN_ERR
++ "SST ERR: message creation failed\n");
++ return -ENOMEM;
++ }
++
++ sst_fill_header(&msg->header, IPC_IA_GET_FW_INFO, 0, 0);
++ sst_drv_ctx->fw_info_blk.condition = false;
++ sst_drv_ctx->fw_info_blk.ret_code = 0;
++ sst_drv_ctx->fw_info_blk.on = true;
++ sst_drv_ctx->fw_info_blk.data = info;
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &sst_drv_ctx->fw_info_blk, SST_BLOCK_TIMEOUT);
++ if (retval) {
++ printk(KERN_ERR \
++ "SST ERR: error in fw_info = %d\n", retval);
++ retval = -EIO;
++ }
++ return retval;
++}
++
++
++/**
++* sst_alloc_stream_response - process alloc reply
++* @str_id: stream id for which the stream has been allocated
++* @type the stream parameters that are allocated
++
++* This function is called by firmware as a response to stream allcoation
++* request
++*/
++int sst_alloc_stream_response(unsigned int str_id,
++ struct snd_sst_str_type *type)
++{
++ int retval = 0, i, valid_str = 0;
++ struct ipc_post *msg = NULL;
++
++ /* allocation succesfull */
++ printk(KERN_DEBUG "SST DBG:stream number given = %d \n", str_id);
++
++ for (i = 0; i < MAX_ACTIVE_STREAM; i++) {
++ if (type->pvt_id == sst_drv_ctx->alloc_block[i].sst_id) {
++ valid_str = 1;
++ break;
++ }
++ }
++ if (!valid_str) {
++ /* this is not valid stream */
++ printk(KERN_ERR \
++ "SST ERR: Invalid stream allocation detetcted... freeing\n");
++ if (sst_create_short_msg(&msg))
++ return -ENOMEM;
++ sst_fill_header(&msg->header, IPC_IA_FREE_STREAM, 0, str_id);
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ return 0;
++ }
++
++ sst_init_stream(&sst_drv_ctx->streams[str_id], type->codec_type,
++ type->str_type, type->pvt_id, type->operation);
++
++ printk(KERN_DEBUG "SST DBG:stream pvt id = %d \n", type->pvt_id);
++
++ /* Unblock with retval code */
++ sst_wake_up_alloc_block(sst_drv_ctx, type->pvt_id, str_id, NULL);
++ return retval;
++}
++
++/**
++* sst_pause_stream - Send msg for a pausing stream
++* @str_id: stream ID
++*
++* This function is called by any function which wants to pause
++* an already running stream.
++*/
++int sst_pause_stream(int str_id)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++ struct stream_info *str_info;
++
++ printk(KERN_DEBUG "SST DBG:sst_pause_stream for %d\n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (str_info->status == STREAM_PAUSED)
++ return 0;
++ if (str_info->status == STREAM_RUNNING ||
++ str_info->status == STREAM_INIT) {
++ if (str_info->prev == STREAM_UN_INIT)
++ return -EBADRQC;
++ if (str_info->ctrl_blk.on == true) {
++ printk(KERN_ERR \
++ "SST ERR: control path is already in use\n ");
++ return -EINVAL;
++ }
++ if (sst_create_short_msg(&msg))
++ return -ENOMEM;
++
++ sst_fill_header(&msg->header, IPC_IA_PAUSE_STREAM, 0, str_id);
++ str_info->ctrl_blk.condition = false;
++ str_info->ctrl_blk.ret_code = 0;
++ str_info->ctrl_blk.on = true;
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node,
++ &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
++ if (retval == 0) {
++ str_info->prev = str_info->status;
++ str_info->status = STREAM_PAUSED;
++ /*sst_ospm_send_event(OSPM_EVENT_SUBSYS_STOP_PLAY);*/
++ } else if (retval == SST_ERR_INVALID_STREAM_ID) {
++ retval = -EINVAL;
++ sst_clean_stream(str_info);
++ }
++ } else {
++ retval = -EBADRQC;
++ printk(KERN_ERR "SST ERR: +\
++ BADQRC for stream\n ");
++ }
++
++ return retval;
++}
++int sst_enable_rx_timeslot(int status)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++
++ if (sst_create_short_msg(&msg)) {
++ printk(KERN_ERR \
++ "SST ERR: short message mem +\
++ allocation failed\n");
++ return -ENOMEM;
++ }
++ printk(KERN_DEBUG "SST DBG:ipc message sending:: **********SST_ENABLE_RX_TIME_SLOT*********** \n");
++ sst_fill_header(&msg->header, IPC_IA_ENABLE_RX_TIME_SLOT, 0, 0);
++ msg->header.part.data = status;
++ sst_drv_ctx->hs_info_blk.condition = false;
++ sst_drv_ctx->hs_info_blk.ret_code = 0;
++ sst_drv_ctx->hs_info_blk.on = true;
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node,
++ &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &sst_drv_ctx->hs_info_blk, SST_BLOCK_TIMEOUT);
++ return retval;
++}
++
++/**
++* sst_resume_stream - Send msg for resuming stream
++* @str_id: stream ID
++*
++* This function is called by any function which wants to resume
++* an already paused stream.
++*/
++int sst_resume_stream(int str_id)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++ struct stream_info *str_info;
++
++ printk(KERN_DEBUG "SST DBG:sst_resume_stream for %d\n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (str_info->status == STREAM_RUNNING)
++ return 0;
++ if (str_info->status == STREAM_PAUSED) {
++ if (str_info->ctrl_blk.on == true) {
++ printk(KERN_ERR \
++ "SST ERR: control path is already in use\n");
++ return -EINVAL;
++ }
++ if (sst_create_short_msg(&msg)) {
++ printk(KERN_ERR \
++ "SST ERR: short message mem +\
++ allocation failed\n");
++ return -ENOMEM;
++ }
++ sst_fill_header(&msg->header, IPC_IA_RESUME_STREAM, 0, str_id);
++ str_info->ctrl_blk.condition = false;
++ str_info->ctrl_blk.ret_code = 0;
++ str_info->ctrl_blk.on = true;
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node,
++ &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
++ if (!retval) {
++ if (str_info->prev == STREAM_RUNNING)
++ str_info->status = STREAM_RUNNING;
++ else
++ str_info->status = STREAM_INIT;
++ str_info->prev = STREAM_PAUSED;
++ /*sst_ospm_send_event(OSPM_EVENT_SUBSYS_START_PLAY);*/
++ } else if (retval == -SST_ERR_INVALID_STREAM_ID) {
++ retval = -EINVAL;
++ sst_clean_stream(str_info);
++ }
++ } else {
++ retval = -EBADRQC;
++ printk(KERN_ERR "SST ERR: BADQRC for stream\n");
++ }
++
++ return retval;
++}
++
++
++/**
++* sst_drop_stream - Send msg for stopping stream
++* @str_id: stream ID
++*
++* This function is called by any function which wants to stop
++* a stream.
++*/
++int sst_drop_stream(int str_id)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++ struct sst_stream_bufs *bufs = NULL, *_bufs;
++ struct stream_info *str_info;
++
++ printk(KERN_DEBUG "SST DBG:sst_drop_stream for %d\n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++ str_info = &sst_drv_ctx->streams[str_id];
++
++ if (str_info->status != STREAM_UN_INIT &&
++ str_info->status != STREAM_DECODE) {
++ if (str_info->ctrl_blk.on == true) {
++ printk(KERN_ERR \
++ "SST ERR: control path is already in use\n");
++ return -EINVAL;
++ }
++ if (sst_create_short_msg(&msg)) {
++ printk(KERN_ERR \
++ "SST ERR: short message mem +\
++ allocation failed\n");
++ return -ENOMEM;
++ }
++ sst_fill_header(&msg->header, IPC_IA_DROP_STREAM, 0, str_id);
++ str_info->ctrl_blk.condition = false;
++ str_info->ctrl_blk.ret_code = 0;
++ str_info->ctrl_blk.on = true;
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node,
++ &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
++ if (!retval) {
++ printk(KERN_DEBUG "SST DBG:drop success\n");
++ str_info->prev = STREAM_UN_INIT;
++ str_info->status = STREAM_INIT;
++ if (str_info->src != MAD_DRV) {
++ mutex_lock(&str_info->lock);
++ list_for_each_entry_safe(bufs, _bufs,
++ &str_info->bufs, node) {
++ list_del(&bufs->node);
++ kfree(bufs);
++ }
++ mutex_unlock(&str_info->lock);
++ }
++ str_info->cumm_bytes += str_info->curr_bytes;
++ /*sst_ospm_send_event(OSPM_EVENT_SUBSYS_STOP_PLAY);*/
++ } else if (retval == -SST_ERR_INVALID_STREAM_ID) {
++ retval = -EINVAL;
++ sst_clean_stream(str_info);
++ }
++ if (str_info->data_blk.on == true) {
++ str_info->data_blk.condition = true;
++ str_info->data_blk.ret_code = retval;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ } else {
++ retval = -EBADRQC;
++ printk(KERN_ERR "SST ERR:BADQRC for stream\n");
++ }
++ return retval;
++}
++
++/**
++* sst_drain_stream - Send msg for draining stream
++* @str_id: stream ID
++*
++* This function is called by any function which wants to drain
++* a stream.
++*/
++int sst_drain_stream(int str_id)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++ struct stream_info *str_info;
++
++ printk(KERN_DEBUG "SST DBG:sst_drain_stream for %d\n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++ str_info = &sst_drv_ctx->streams[str_id];
++
++ if (str_info->status != STREAM_RUNNING &&
++ str_info->status != STREAM_INIT &&
++ str_info->status != STREAM_PAUSED) {
++ printk(KERN_ERR \
++ "SST ERR: BADQRC for stream = %d\n", str_info->status);
++ return -EBADRQC;
++ }
++
++ if (str_info->status == STREAM_INIT) {
++ if (sst_create_short_msg(&msg)) {
++ printk(KERN_ERR\
++ "SST ERR: short message mem +\
++ allocation failed\n");
++ return -ENOMEM;
++ }
++ sst_fill_header(&msg->header, IPC_IA_DRAIN_STREAM, 0, str_id);
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ } else
++ str_info->need_draining = true;
++ str_info->data_blk.condition = false;
++ str_info->data_blk.ret_code = 0;
++ str_info->data_blk.on = true;
++ retval = sst_wait_interruptible(sst_drv_ctx, &str_info->data_blk);
++ str_info->need_draining = false;
++ if (retval == -SST_ERR_INVALID_STREAM_ID) {
++ retval = -EINVAL;
++ sst_clean_stream(str_info);
++ }
++ return retval;
++}
++
++/**
++* sst_free_stream - Frees a stream
++* @str_id: stream ID
++*
++* This function is called by any function which wants to free
++* a stream.
++*/
++int sst_free_stream(int str_id)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++ struct stream_info *str_info;
++
++ printk(KERN_DEBUG "SST DBG:sst_free_stream for %d\n", str_id);
++
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++ str_info = &sst_drv_ctx->streams[str_id];
++
++ if (str_info->status != STREAM_UN_INIT) {
++ if (sst_create_short_msg(&msg)) {
++ printk(KERN_ERR \
++ "SST ERR: short message mem +\
++ allocation failed\n");
++ return -ENOMEM;
++ }
++ sst_fill_header(&msg->header, IPC_IA_FREE_STREAM, 0, str_id);
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ str_info->prev = str_info->status;
++ str_info->status = STREAM_UN_INIT;
++ if (str_info->data_blk.on == true) {
++ str_info->data_blk.condition = true;
++ str_info->data_blk.ret_code = 0;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++ sst_clean_stream(str_info);
++ printk(KERN_DEBUG "SST DBG:Stream freed\n");
++ /*sst_ospm_send_event(OSPM_EVENT_SUBSYS_STOP_PLAY);*/
++
++ } else {
++ retval = -EBADRQC;
++ printk(KERN_DEBUG "SST DBG:BADQRC for stream\n");
++ }
++
++ return retval;
++}
++
++
++/**
++* sst_set_stream_param - Send msg for setting stream parameters
++* @id: stream id
++* @params: stream params
++*
++* This function sets stream params during runtime
++*/
++int sst_set_stream_param(int str_id, struct snd_sst_params *str_param)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++ struct stream_info *str_info;
++
++ BUG_ON(!str_param);
++ if(sst_drv_ctx->streams[str_id].ops != str_param->ops) {
++ printk(KERN_ERR "SST ERR: Invalid operation\n");
++ return -EINVAL;
++ }
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++ printk(KERN_DEBUG "SST DBG:set_stream for %d\n", str_id);
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (sst_drv_ctx->streams[str_id].status == STREAM_INIT) {
++ if (str_info->ctrl_blk.on == true) {
++ printk(KERN_ERR \
++ "SST ERR: control path is already in use\n");
++ return -EINVAL;
++ }
++ if (sst_create_large_msg(&msg))
++ return -ENOMEM;
++
++ sst_fill_header(&msg->header,
++ IPC_IA_SET_STREAM_PARAMS, 1, str_id);
++ str_info->ctrl_blk.condition = false;
++ str_info->ctrl_blk.ret_code = 0;
++ str_info->ctrl_blk.on = true;
++ msg->header.part.data = sizeof(u32) +
++ sizeof(str_param->sparams);
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32), &str_param->sparams,
++ sizeof(str_param->sparams));
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
++ if (retval < 0) {
++ retval = -EIO;
++ sst_clean_stream(str_info);
++ }
++ } else {
++ retval = -EBADRQC;
++ printk(KERN_ERR "SST ERR: BADQRC for stream\n");
++ }
++ return retval;
++}
++
++/**
++* sst_get_vol - This fuction allows to get the premix gain or gain of a stream
++* @get_vol: this is an output param through which the volume
++* structure is passed back to user
++*
++* This function is called when the premix gain or stream gain is queried for
++*/
++int sst_get_vol(struct snd_sst_vol *get_vol)
++{
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++ struct snd_sst_vol *fw_get_vol;
++ int str_id = get_vol->stream_id;
++
++ printk(KERN_DEBUG "SST DBG:get vol called \n");
++
++ if (sst_create_short_msg(&msg))
++ return -ENOMEM;
++
++ sst_fill_header(&msg->header,
++ IPC_IA_GET_STREAM_VOL, 0, str_id);
++ sst_drv_ctx->vol_info_blk.condition = false;
++ sst_drv_ctx->vol_info_blk.ret_code = 0;
++ sst_drv_ctx->vol_info_blk.on = true;
++ fw_get_vol = kzalloc(sizeof(*fw_get_vol), GFP_ATOMIC);
++ if (!fw_get_vol) {
++ printk(KERN_ERR "SST ERR: mem +\
++ allcoation failed\n");
++ return -ENOMEM;
++ }
++ sst_drv_ctx->vol_info_blk.data = (void *)fw_get_vol;
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &sst_drv_ctx->vol_info_blk, SST_BLOCK_TIMEOUT);
++ if (retval)
++ retval = -EIO;
++ else {
++ printk(KERN_DEBUG "SST DBG:stream +\
++ id = %d\n", fw_get_vol->stream_id);
++ printk(KERN_DEBUG "SST DBG:+\
++ volume = %d\n", fw_get_vol->volume);
++ printk(KERN_DEBUG "SST DBG:ramp +\
++ dur = %d\n", fw_get_vol->ramp_duration);
++ printk(KERN_DEBUG "SST DBG:ramp_type +\
++ = %d\n", fw_get_vol->ramp_type);
++ memcpy(get_vol, fw_get_vol, sizeof(*fw_get_vol));
++ }
++ return retval;
++}
++
++/**
++* sst_set_vol - This fuction allows to set the premix gain or gain of a stream
++* @set_vol: this holds the volume structure that needs to be set
++*
++* This function is called when premix gain or stream gain is requested to be set
++*/
++int sst_set_vol(struct snd_sst_vol *set_vol)
++{
++
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++
++ printk(KERN_DEBUG "SST DBG:set vol called \n");
++
++ if (sst_create_large_msg(&msg)) {
++ printk(KERN_ERR "SST ERR:+\
++ message creation failed\n");
++ return -ENOMEM;
++ }
++ sst_fill_header(&msg->header, IPC_IA_SET_STREAM_VOL, 1,
++ set_vol->stream_id);
++
++ msg->header.part.data = sizeof(u32) + sizeof(*set_vol);
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32), set_vol, sizeof(*set_vol));
++ sst_drv_ctx->vol_info_blk.condition = false;
++ sst_drv_ctx->vol_info_blk.ret_code = 0;
++ sst_drv_ctx->vol_info_blk.on = true;
++ sst_drv_ctx->vol_info_blk.data = set_vol;
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &sst_drv_ctx->vol_info_blk, SST_BLOCK_TIMEOUT);
++ if (retval) {
++ printk(KERN_ERR \
++ "SST ERR: error in set_vol = %d\n", retval);
++ retval = -EIO;
++ }
++ return retval;
++}
++
++/**
++* sst_get_vol - This fuction allows to set premix mute or soft mute of a stream
++* @set_mute: this holds the mute structure that needs to be set
++*
++* This function is called when premix mute or stream mute is requested to be set
++*/
++int sst_set_mute(struct snd_sst_mute *set_mute)
++{
++
++ int retval = 0;
++ struct ipc_post *msg = NULL;
++
++ printk(KERN_DEBUG "SST DBG:set mute called \n");
++
++ if (sst_create_large_msg(&msg)) {
++ printk(KERN_ERR "SST ERR: +\
++ message creation failed\n");
++ return -ENOMEM;
++ }
++ sst_fill_header(&msg->header, IPC_IA_SET_STREAM_MUTE, 1,
++ set_mute->stream_id);
++ sst_drv_ctx->mute_info_blk.condition = false;
++ sst_drv_ctx->mute_info_blk.ret_code = 0;
++ sst_drv_ctx->mute_info_blk.on = true;
++ sst_drv_ctx->mute_info_blk.data = set_mute;
++
++ msg->header.part.data = sizeof(u32) + sizeof(*set_mute);
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32), set_mute,
++ sizeof(*set_mute));
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &sst_drv_ctx->mute_info_blk, SST_BLOCK_TIMEOUT);
++ if (retval) {
++ printk(KERN_ERR\
++ "SST ERR: error in set_mute +\
++ = %d\n", retval);
++ retval = -EIO;
++ }
++ return retval;
++}
++
++int sst_parse_target(struct snd_sst_slot_info *slot)
++{
++
++ if (slot->device_type == SND_SST_DEVICE_PCM) {
++ /*pcm device, check params*/
++ if(slot->device_instance == 1) {
++ if ((slot->device_mode != SND_SST_DEV_MODE_PCM_MODE4_I2S)
++ && (slot->device_mode != SND_SST_DEV_MODE_PCM_MODE4_RIGHT_JUSTIFIED))
++ goto err;
++ } else if(slot->device_instance == 0) {
++ if (slot->device_mode != SND_SST_DEV_MODE_PCM_MODE2)
++ goto err;
++ if (slot->pcm_params.sfreq != 8000 ||
++ slot->pcm_params.num_chan != 1 ||
++ slot->pcm_params.pcm_wd_sz != 16)
++ goto err;
++
++ } else {
++ err:
++ dev_err(&sst_drv_ctx->pci->dev, "SST ERR: i/p params incorrect\n");
++ return -EINVAL;
++ }
++ }
++ /*params ok, now process*/
++ if (slot->target_type == SND_SST_TARGET_PMIC &&
++ slot->device_instance == 1) {
++ sst_drv_ctx->pmic_port_instance = 1;
++ sst_drv_ctx->scard_ops->set_audio_port(ACTIVATE);
++ sst_drv_ctx->scard_ops->set_voice_port(DEACTIVATE);
++ sst_drv_ctx->scard_ops->set_pcm_audio_params(
++ slot->pcm_params.sfreq,
++ slot->pcm_params.pcm_wd_sz,
++ slot->pcm_params.num_chan);
++ if (sst_drv_ctx->pb_streams )
++ sst_drv_ctx->scard_ops->power_up_pmic_pb(1);
++ if (sst_drv_ctx->cp_streams)
++ sst_drv_ctx->scard_ops->power_up_pmic_cp(1);
++ } else if ((slot->target_type == SND_SST_TARGET_PMIC ||
++ slot->target_type == SND_SST_TARGET_OTHER) &&
++ slot->device_instance == 0) {
++ sst_drv_ctx->pmic_port_instance = 0;
++ sst_drv_ctx->scard_ops->set_audio_port(DEACTIVATE);
++ sst_drv_ctx->scard_ops->set_voice_port(ACTIVATE);
++ if (sst_drv_ctx->pb_streams )
++ sst_drv_ctx->scard_ops->power_up_pmic_pb(0);
++ if (sst_drv_ctx->cp_streams)
++ sst_drv_ctx->scard_ops->power_up_pmic_cp(0);
++ }
++ return 0;
++}
++
++int sst_send_target(struct snd_sst_target_device *target)
++{
++ int retval;
++ struct ipc_post *msg;
++
++ if (sst_create_large_msg(&msg)) {
++ dev_err(&sst_drv_ctx->pci->dev, "SST ERR: message creation failed\n");
++ return -ENOMEM;
++ }
++ sst_fill_header(&msg->header, IPC_IA_TARGET_DEV_SELECT, 1, 0);
++ sst_drv_ctx->tgt_dev_blk.condition = false;
++ sst_drv_ctx->tgt_dev_blk.ret_code = 0;
++ sst_drv_ctx->tgt_dev_blk.on = true;
++
++ msg->header.part.data = sizeof(u32) + sizeof(*target);
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32), target,
++ sizeof(*target));
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ printk(KERN_DEBUG "SST DBG:message sent- waiting\n");
++ retval = sst_wait_interruptible_timeout(sst_drv_ctx,
++ &sst_drv_ctx->tgt_dev_blk, TARGET_DEV_BLOCK_TIMEOUT);
++ if (retval)
++ dev_err(&sst_drv_ctx->pci->dev, "SST ERR: +\
++ target device ipc failed = 0x%x\n", retval);
++ return retval;
++
++}
++/**
++* sst_target_device_select - This fuction sets the target device configurations
++* @target_device: this parameter holds the configurations to be set
++*
++* This function is called when the user layer wants to change the target
++* device's configurations
++*/
++
++int sst_target_device_select(struct snd_sst_target_device *target)
++{
++ int retval, i;
++
++ printk(KERN_DEBUG "SST DBG:Target Device Select\n");
++
++ if (target->device_route < 0 || target->device_route > 2) {
++ dev_err(&sst_drv_ctx->pci->dev, "SST ERR: device route is invalid\n");
++ return -EINVAL;
++ }
++
++ if (target->device_route != 0) {
++ dev_err(&sst_drv_ctx->pci->dev, "SST ERR: Unsupported config\n");
++ return -EIO;
++ }
++ for (i = 0; i < SST_MAX_TARGET_DEVICES; i++) {
++ if (target->devices[i].action == SND_SST_PORT_ACTIVATE) {
++ printk(KERN_DEBUG "SST DBG:activate called in %d\n", i);
++ retval = sst_parse_target(&target->devices[i]);
++ if (retval)
++ return retval;
++ } else if (target->devices[i].action == SND_SST_PORT_PREPARE)
++ printk(KERN_DEBUG "SST DBG:PREPARE in %d, FWding\n", i);
++ }
++ return sst_send_target(target);
++}
++
++/**
++* This function gets the physical address of the secure memory from the handle
++*/
++static inline int sst_get_RAR(struct RAR_buffer *buffers, int count)
++{
++ int retval = 0, rar_status = 0;
++
++ rar_status = rar_handle_to_bus(buffers, count);
++
++ if (count != rar_status) {
++ printk(KERN_ERR "SST ERR: The rar CALL Failed");
++ retval = -EIO;
++ }
++ if (buffers->info.type != RAR_TYPE_AUDIO) {
++ printk(KERN_ERR "SST ERR:Invalid RAR type\n");
++ return -EINVAL;
++ }
++ return retval;
++}
++
++/**
++* This function creates the scatter gather list to be sent to firmware to
++* capture/playback data
++*/
++static int sst_create_sg_list(struct stream_info *stream,
++ struct sst_frame_info *sg_list)
++{
++ struct sst_stream_bufs *kbufs = NULL;
++ struct RAR_buffer rar_buffers;
++ int retval = 0;
++ int i = 0;
++
++ list_for_each_entry(kbufs, &stream->bufs, node) {
++ if (kbufs->in_use == false) {
++ if (stream->ops == STREAM_OPS_PLAYBACK_DRM) {
++ printk(KERN_DEBUG "DRM playback handling \n");
++ rar_buffers.info.handle = (__u32)kbufs->addr;
++ rar_buffers.info.size = kbufs->size;
++ printk(KERN_DEBUG "rar handle = 0x%x size=0x%x", rar_buffers.info.handle, rar_buffers.info.size);
++ retval = sst_get_RAR(&rar_buffers, 1);
++
++ if (retval)
++ return retval;
++ sg_list->addr[i].addr = rar_buffers.bus_address;
++ sg_list->addr[i].size = (__u32)kbufs->size; /* rar_buffers.info.size; */
++ printk(KERN_DEBUG "SST DBG:phy addr[%d] 0x%x +\
++ Size 0x%x\n", i, sg_list->addr[i].addr,\
++ sg_list->addr[i].size);
++ } else {
++ sg_list->addr[i].addr =
++ virt_to_phys((void *)
++ kbufs->addr + kbufs->offset);
++ sg_list->addr[i].size = kbufs->size;
++ printk(KERN_DEBUG "SST DBG:phy addr[%d] 0x%x +\
++ Size 0x%x\n", i,\
++ sg_list->addr[i].addr,\
++ kbufs->size);
++ }
++ stream->curr_bytes += sg_list->addr[i].size;
++ kbufs->in_use = true;
++ i++;
++ }
++ if (i >= MAX_NUM_SCATTER_BUFFERS)
++ break;
++ }
++
++ sg_list->num_entries = i;
++ printk(KERN_DEBUG "SST DBG:sg list entries +\
++ = %d \n", sg_list->num_entries);
++ return i;
++}
++
++/**
++* sst_play_frame - Send msg for sending stream frames
++* @str_id: ID of stream
++*
++* This function is called to send data to be played out
++* to the firmware
++*/
++int sst_play_frame(int str_id)
++{
++ int i = 0, retval = 0;
++ struct ipc_post *msg = NULL;
++ struct sst_frame_info sg_list = {0};
++ struct sst_stream_bufs *kbufs = NULL, *_kbufs;
++ struct stream_info *stream;
++
++ printk(KERN_DEBUG "SST DBG:play frame for %d\n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++
++ stream = &sst_drv_ctx->streams[str_id];
++ /* clear prev sent buffers */
++ list_for_each_entry_safe(kbufs, _kbufs, &stream->bufs, node) {
++ if (kbufs->in_use == true) {
++ if ((!stream->period_elapsed) && !(in_interrupt()) ){
++ spin_lock(&stream->pcm_lock);
++ list_del(&kbufs->node);
++ kfree(kbufs);
++ printk(KERN_DEBUG "SST DBG:del node \n");
++ spin_unlock(&stream->pcm_lock);
++ }
++ else {
++ list_del(&kbufs->node);
++ kfree(kbufs);
++ printk(KERN_DEBUG "SST DBG:del node \n");
++ }
++ }
++ }
++ /* update bytes sent */
++ stream->cumm_bytes += stream->curr_bytes;
++ stream->curr_bytes = 0;
++ if (list_empty(&stream->bufs)) {
++ /* no user buffer available */
++ printk(KERN_DEBUG "SST DBG:Null buffer!!!!stream +\
++ status = %d \n", stream->status);
++ stream->prev = stream->status;
++ stream->status = STREAM_INIT;
++ printk(KERN_DEBUG "SST DBG:new stream status +\
++ = %d \n", stream->status);
++ if (stream->need_draining == true) {
++ printk(KERN_DEBUG "SST DBG:draining stream \n");
++ if (sst_create_short_msg(&msg)) {
++ printk(KERN_ERR \
++ "SST ERR: short message mem +\
++ alloc failed\n");
++ return -ENOMEM;
++ }
++ sst_fill_header(&msg->header, IPC_IA_DRAIN_STREAM,
++ 0, str_id);
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node,
++ &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ } else if (stream->data_blk.on == true) {
++ printk(KERN_DEBUG "SST DBG:user list is empty.. wake \n");
++ /* unblock */
++ stream->data_blk.ret_code = 0;
++ stream->data_blk.condition = true;
++ stream->data_blk.on = false;
++ wake_up(&sst_drv_ctx->wait_queue);
++ }
++#ifdef CONFIG_MSTWN_POWER_MGMT
++ sst_ospm_send_event(OSPM_EVENT_AUDIO_BUF_EMPTY);
++#endif
++ return 0;
++ }
++
++ /* create list */
++ i = sst_create_sg_list(stream, &sg_list);
++
++ /* post msg */
++ if (sst_create_large_msg(&msg))
++ return -ENOMEM;
++
++ sst_fill_header(&msg->header, IPC_IA_PLAY_FRAMES, 1, str_id);
++ msg->header.part.data = sizeof(u32) + sizeof(sg_list);
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32), &sg_list, sizeof(sg_list));
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ return 0;
++
++}
++
++/**
++* sst_capture_frame - Send msg for sending stream frames
++* @str_id: ID of stream
++*
++* This function is called to capture data from the firmware
++*/
++int sst_capture_frame(int str_id)
++{
++ int i = 0, retval = 0;
++ struct ipc_post *msg = NULL;
++ struct sst_frame_info sg_list = {0};
++ struct sst_stream_bufs *kbufs = NULL, *_kbufs;
++ struct stream_info *stream;
++
++
++ printk(KERN_DEBUG "SST DBG:capture frame for %d\n", str_id);
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++ stream = &sst_drv_ctx->streams[str_id];
++ /*update bytes sent*/
++ /*stream->cumm_bytes += stream->curr_bytes;
++ stream->curr_bytes = 0;*/
++ /* clear prev sent buffers */
++ list_for_each_entry_safe(kbufs, _kbufs, &stream->bufs, node) {
++ if (kbufs->in_use == true) {
++ list_del(&kbufs->node);
++ kfree(kbufs);
++ printk(KERN_DEBUG "SST DBG:del node \n");
++ }
++ }
++ if (list_empty(&stream->bufs)) {
++ /* no user buffer available */
++ printk(KERN_DEBUG "SST DBG:Null buffer!!!!stream +\
++ status = %d \n", stream->status);
++ stream->prev = stream->status;
++ stream->status = STREAM_INIT;
++ printk(KERN_DEBUG "SST DBG:new stream +\
++ status = %d \n", stream->status);
++ if (stream->data_blk.on == true) {
++ printk(KERN_DEBUG "SST DBG:user list is empty.. wake \n");
++ /* unblock */
++ stream->data_blk.ret_code = 0;
++ stream->data_blk.condition = true;
++ stream->data_blk.on = false;
++ wake_up(&sst_drv_ctx->wait_queue);
++
++ }
++#ifdef CONFIG_MSTWN_POWER_MGMT
++ sst_ospm_send_event(OSPM_EVENT_AUDIO_BUF_FULL);
++#endif
++ return 0;
++ }
++
++ /* create new sg list */
++ i = sst_create_sg_list(stream, &sg_list);
++
++ /* post msg */
++ if (sst_create_large_msg(&msg))
++ return -ENOMEM;
++
++ sst_fill_header(&msg->header, IPC_IA_CAPT_FRAMES, 1, str_id);
++ msg->header.part.data = sizeof(u32) + sizeof(sg_list);
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32), &sg_list, sizeof(sg_list));
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++
++
++ /*update bytes recevied*/
++ stream->cumm_bytes += stream->curr_bytes;
++ stream->curr_bytes = 0;
++
++ printk(KERN_DEBUG "SST DBG:Cum bytes = %d \n", stream->cumm_bytes);
++ return 0;
++}
++
++/**
++* This function is used to calculate the minimum size of input buffers given
++*/
++static unsigned int calculate_min_size(struct snd_sst_buffs *bufs)
++{
++ int i, min_val = bufs->buff_entry[0].size;
++ for (i = 1 ; i < bufs->entries; i++) {
++ if (bufs->buff_entry[i].size < min_val)
++ min_val = bufs->buff_entry[i].size;
++ }
++ printk(KERN_DEBUG "SST DBG:min_val = %d\n", min_val);
++ return min_val;
++}
++
++/**
++* This function is used to calculate the maximum size of input buffers given
++*/
++static unsigned int calculate_max_size(struct snd_sst_buffs *bufs)
++{
++ int i, max_val = bufs->buff_entry[0].size;
++ for (i = 1 ; i < bufs->entries; i++) {
++ if (bufs->buff_entry[i].size > max_val)
++ max_val = bufs->buff_entry[i].size;
++ }
++ printk(KERN_DEBUG "SST DBG:max_val = %d\n", max_val);
++ return max_val;
++}
++
++/**
++* This function is used to allocate input and output buffers to be sent to
++* the firmware that will take encoded data and return decoded data
++*/
++static int sst_allocate_decode_buf(struct stream_info *str_info,
++ struct snd_sst_dbufs *dbufs,
++ unsigned int cum_input_given,
++ unsigned int cum_output_given)
++{
++ if (str_info->ops == STREAM_OPS_PLAYBACK_DRM) {
++
++ if (dbufs->ibufs->type == SST_BUF_RAR && dbufs->obufs->type == SST_BUF_RAR ) {
++ if (dbufs->ibufs->entries == dbufs->obufs->entries)
++ return 0;
++ else {
++ printk(KERN_ERR "SST ERR: +\
++ RAR buffer entries do not match \n");
++ return -EINVAL;
++ }
++ }
++ else
++ str_info->decode_osize = cum_output_given;
++ return 0;
++
++ }
++ if (!str_info->decode_ibuf) {
++ printk(KERN_DEBUG "SST DBG:no input buffers, trying full size\n");
++ str_info->decode_isize = cum_input_given;
++ str_info->decode_ibuf = kzalloc(str_info->decode_isize,
++ GFP_KERNEL);
++ str_info->idecode_alloc = str_info->decode_isize;
++ }
++ if (!str_info->decode_ibuf) {
++ printk(KERN_DEBUG "SST DBG:buff alloaction failed, trying max size\n");
++ str_info->decode_isize = calculate_max_size(dbufs->ibufs);
++ str_info->decode_ibuf = kzalloc(str_info->decode_isize,
++ GFP_KERNEL);
++ str_info->idecode_alloc = str_info->decode_isize;
++ }
++ if (!str_info->decode_ibuf) {
++ printk(KERN_DEBUG "SST DBG:buff alloaction failed, trying min size\n");
++ str_info->decode_isize = calculate_min_size(dbufs->ibufs);
++ str_info->decode_ibuf = kzalloc(str_info->decode_isize,
++ GFP_KERNEL);
++ if (!str_info->decode_ibuf) {
++ printk(KERN_ERR \
++ "SST ERR: mem allocation failed\n");
++ return -ENOMEM;
++ }
++ str_info->idecode_alloc = str_info->decode_isize;
++ }
++ str_info->decode_osize = cum_output_given;
++ if (str_info->decode_osize > sst_drv_ctx->mmap_len)
++ str_info->decode_osize = sst_drv_ctx->mmap_len;
++ return 0;
++}
++
++/**
++* This function is used to send the message to firmware to decode the data
++*/
++static int sst_send_decode_mess(int str_id, struct stream_info *str_info,
++ struct snd_sst_decode_info *dec_info)
++{
++ struct ipc_post *msg = NULL;
++ int retval = 0;
++
++ printk(KERN_DEBUG "SST DBG:called \n");
++
++ if ( str_info->decode_ibuf_type == SST_BUF_RAR ) {
++ dec_info->frames_in.addr[0].addr = (unsigned long) str_info->decode_ibuf;
++ dec_info->frames_in.addr[0].size = str_info->decode_isize;
++
++ } else {
++ dec_info->frames_in.addr[0].addr = virt_to_phys((void *)
++ str_info->decode_ibuf);
++ dec_info->frames_in.addr[0].size = str_info->decode_isize;
++ }
++
++
++ if ( str_info->decode_obuf_type == SST_BUF_RAR ) {
++ dec_info->frames_out.addr[0].addr = (unsigned long) str_info->decode_obuf;
++ dec_info->frames_out.addr[0].size = str_info->decode_osize;
++
++ } else {
++ dec_info->frames_out.addr[0].addr = virt_to_phys((void *)
++ str_info->decode_obuf ) ;
++ dec_info->frames_out.addr[0].size = str_info->decode_osize;
++ }
++
++ dec_info->frames_in.num_entries = 1;
++ dec_info->frames_out.num_entries = 1;
++ dec_info->frames_in.rsrvd = 0;
++ dec_info->frames_out.rsrvd = 0;
++ dec_info->input_bytes_consumed = 0;
++ dec_info->output_bytes_produced = 0;
++ if (sst_create_large_msg(&msg)) {
++ printk(KERN_ERR "SST ERR: message +\
++ creation failed\n");
++ return -ENOMEM;
++ }
++
++ sst_fill_header(&msg->header, IPC_IA_DECODE_FRAMES, 1, str_id);
++ msg->header.part.data = sizeof(u32) + sizeof(*dec_info);
++ memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
++ memcpy(msg->mailbox_data + sizeof(u32), dec_info,
++ sizeof(*dec_info));
++ mutex_lock(&sst_drv_ctx->list_lock);
++ list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
++ mutex_unlock(&sst_drv_ctx->list_lock);
++ str_info->data_blk.condition = false;
++ str_info->data_blk.ret_code = 0;
++ str_info->data_blk.on = true;
++ str_info->data_blk.data = dec_info;
++ sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
++ retval = sst_wait_interruptible(sst_drv_ctx, &str_info->data_blk);
++ return retval;
++}
++
++/**
++* This function is used to prepare the kernel input buffers with contents before
++* sendind for decode
++*/
++static int sst_prepare_input_buffers(struct stream_info *str_info,
++ struct snd_sst_dbufs *dbufs,
++ int *input_index, int *in_copied, int *input_index_valid_size, int *new_entry_flag)
++{
++ int i, cpy_size, retval = 0;
++
++ printk(KERN_DEBUG "SST DBG:input_index = %d, +\
++ input entries = %d\n", *input_index,
++ dbufs->ibufs->entries);
++ for (i = *input_index; i < dbufs->ibufs->entries; i++) {
++ if (str_info->ops == STREAM_OPS_PLAYBACK_DRM) {
++ struct RAR_buffer rar_buffers;
++ __u32 info;
++ retval = copy_from_user((void *) &info,
++ dbufs->ibufs->buff_entry[i].buffer,
++ sizeof(__u32));
++ if (retval) {
++ printk(KERN_ERR "SST ERR:copy from user failed\n");
++ return -EAGAIN;
++ }
++ rar_buffers.info.type = dbufs->ibufs->type;
++ rar_buffers.info.size = dbufs->ibufs->buff_entry[i].size;
++ rar_buffers.info.handle = info;
++ printk(KERN_DEBUG "rar handle in DnR(input buffer function)=0x%x size=0x%x" \
++ , rar_buffers.info.handle, rar_buffers.info.size);
++ retval = sst_get_RAR(&rar_buffers, 1);
++ if (retval) {
++ printk(KERN_ERR "SST ERR: RAR API failed\n");
++ return retval;
++ }
++ str_info->decode_ibuf = (void *) ((unsigned long) rar_buffers.bus_address);
++ printk(KERN_DEBUG "RAR buffer address in DnR (input buffer function):: +\
++ 0x%lu", (unsigned long) str_info->decode_ibuf);
++ printk(KERN_DEBUG "rar handle in DnR decode funtion/output b_add rar +\
++ = 0x%lu", (unsigned long) rar_buffers.bus_address);
++
++ *input_index = i + 1;
++
++ str_info->decode_isize = dbufs->ibufs->buff_entry[i].size;
++ str_info->decode_ibuf_type = dbufs->ibufs->type;
++ *in_copied = str_info->decode_isize;
++ printk(KERN_DEBUG "rar address in input prepare= 0x%lu size=0x%x +\
++ incopied Size =%d ", (unsigned long) str_info->decode_ibuf, +\
++ str_info->decode_isize ,*in_copied);
++ break;
++ }
++ *input_index = i;
++ if (*input_index_valid_size == 0)
++ *input_index_valid_size = dbufs->ibufs->buff_entry[i].size;
++ printk(KERN_DEBUG "SST DBG:inout addr = %p, size = %d\n",
++ dbufs->ibufs->buff_entry[i].buffer,
++ *input_index_valid_size);
++ printk(KERN_DEBUG "SST DBG:decode_isize = %d, in_copied = %d\n",
++ str_info->decode_isize, *in_copied);
++ if (*input_index_valid_size <=
++ (str_info->decode_isize - *in_copied))
++ cpy_size = *input_index_valid_size;
++ else
++ cpy_size = str_info->decode_isize - *in_copied;
++
++ printk(KERN_DEBUG "SST DBG:cpy size = %d\n", cpy_size);
++ if (!dbufs->ibufs->buff_entry[i].buffer) {
++ printk(KERN_ERR "SST ERR: +\
++ input buffer is null\n");
++ return -EINVAL;
++ }
++ printk(KERN_DEBUG "SST DBG:Trying copy To: %p, From %p, size %d\n",
++ str_info->decode_ibuf + *in_copied,
++ dbufs->ibufs->buff_entry[i].buffer, cpy_size);
++
++ retval =
++ copy_from_user((void *)(str_info->decode_ibuf + *in_copied),
++ (void *) dbufs->ibufs->buff_entry[i].buffer,
++ cpy_size);
++ if (retval) {
++ printk(KERN_ERR\
++ "SST ERR: copy from user failed \n");
++ return -EIO;
++ }
++ *in_copied += cpy_size;
++ *input_index_valid_size -= cpy_size;
++ printk(KERN_DEBUG "SST DBG:in buff size = %d, in_copied = %d\n",
++ *input_index_valid_size, *in_copied);
++ if (*input_index_valid_size != 0) {
++ printk(KERN_DEBUG "SST DBG:more input buffers left \n");
++ dbufs->ibufs->buff_entry[i].buffer +=
++ cpy_size;
++ break;
++ }
++ if (*in_copied == str_info->decode_isize &&
++ *input_index_valid_size == 0 && (i+1) <= dbufs->ibufs->entries) {
++ printk(KERN_DEBUG "SST DBG:all input buffers copied\n");
++ *new_entry_flag = true;
++ *input_index = i + 1;
++ break;
++ }
++ }
++ return retval;
++}
++
++/**
++* This function is used to copy the decoded data from kernel buffers to
++* the user output buffers with contents after decode
++*/
++static int sst_prepare_output_buffers(struct stream_info *str_info,
++ struct snd_sst_dbufs *dbufs,
++ int *output_index, int output_size,
++ int *out_copied)
++
++{
++ int i, cpy_size, retval = 0;
++ printk(KERN_DEBUG "SST DBG:output_index = %d, output entries = %d\n",
++ *output_index,
++ dbufs->obufs->entries);
++ for (i = *output_index; i < dbufs->obufs->entries; i++) {
++ *output_index = i;
++ printk(KERN_DEBUG "SST DBG:output addr = %p, size = %d\n",
++ dbufs->obufs->buff_entry[i].buffer,
++ dbufs->obufs->buff_entry[i].size);
++ printk(KERN_DEBUG "SST DBG:output_size = %d, out_copied = %d\n",
++ output_size, *out_copied);
++ if (dbufs->obufs->buff_entry[i].size <
++ (output_size - *out_copied))
++ cpy_size = dbufs->obufs->buff_entry[i].size;
++ else
++ cpy_size = output_size - *out_copied;
++ printk(KERN_DEBUG "SST DBG:cpy size = %d\n", cpy_size);
++ printk(KERN_DEBUG "SST DBG:Trying copy To: %p, From %p, size %d\n",
++ dbufs->obufs->buff_entry[i].buffer,
++ sst_drv_ctx->mmap_mem + *out_copied,
++ cpy_size);
++ retval = copy_to_user(dbufs->obufs->buff_entry[i].buffer,
++ sst_drv_ctx->mmap_mem + *out_copied,
++ cpy_size);
++ if (retval) {
++ printk(KERN_ERR "SST ERR: +\
++ copy to user failed \n");
++ return -EIO;
++ } else
++ printk(KERN_DEBUG "SST DBG:copy to user passed \n");
++ *out_copied += cpy_size;
++ dbufs->obufs->buff_entry[i].size -= cpy_size;
++ printk(KERN_DEBUG "SST DBG:output buff size = %d, out_copied = %d\n",
++ dbufs->obufs->buff_entry[i].size, *out_copied);
++ if (dbufs->obufs->buff_entry[i].size != 0) {
++ *output_index = i;
++ dbufs->obufs->buff_entry[i].buffer += cpy_size;
++ break;
++ } else if (*out_copied == output_size) {
++ *output_index = i + 1;
++ break;
++ }
++ }
++ return retval;
++}
++
++/**
++* sst_decode - Send msg for decoding frames
++* @str_id: ID of stream
++* @dbufs - param that holds the user input and output buffers and sizes
++* This function is called to decode data from the firmware
++*/
++int sst_decode(int str_id, struct snd_sst_dbufs *dbufs)
++{
++ int retval = 0, i;
++ unsigned long long total_input = 0 , total_output = 0;
++ unsigned int cum_input_given = 0 , cum_output_given = 0;
++ int copy_in_done = false, copy_out_done = false;
++ int input_index = 0, output_index = 0;
++ int input_index_valid_size = 0;
++ int in_copied, out_copied;
++ int new_entry_flag;
++ u64 output_size;
++ struct stream_info *str_info;
++ struct snd_sst_decode_info dec_info;
++
++ printk(KERN_DEBUG "SST DBG:...called \n");
++ sst_drv_ctx->scard_ops->power_down_pmic();
++ printk(KERN_DEBUG "SST DBG: Powering_down_PMIC.... \n");
++ retval = sst_validate_strid(str_id);
++ if (retval)
++ return retval;
++
++ str_info = &sst_drv_ctx->streams[str_id];
++ if (str_info->status != STREAM_INIT) {
++ printk(KERN_ERR "SST ERR: invalid +\
++ stream state = %d\n", str_info->status);
++ return -EINVAL;
++ }
++
++ str_info->prev = str_info->status;
++ str_info->status = STREAM_DECODE;
++ for (i = 0; i < dbufs->ibufs->entries; i++)
++ cum_input_given += dbufs->ibufs->buff_entry[i].size;
++ for (i = 0; i < dbufs->obufs->entries; i++)
++ cum_output_given += dbufs->obufs->buff_entry[i].size;
++ /* input and output buffer allocation */
++ retval = sst_allocate_decode_buf(str_info, dbufs,
++ cum_input_given, cum_output_given);
++ str_info->decode_isize = str_info->idecode_alloc;
++ if (retval) {
++ printk(KERN_ERR "SST ERR: +\
++ mem allocation failed, abort!!!\n");
++ retval = -ENOMEM;
++ goto finish;
++ }
++
++ str_info->decode_ibuf_type = dbufs->ibufs->type;
++ str_info->decode_obuf_type = dbufs->obufs->type;
++
++ while ((copy_out_done == false) && (copy_in_done == false)) {
++ in_copied = 0;
++ new_entry_flag = false;
++ retval = sst_prepare_input_buffers(str_info,\
++ dbufs, &input_index, &in_copied, &input_index_valid_size, &new_entry_flag);
++ printk(KERN_DEBUG "SST DBG:prep inbuf ret %d\n", retval);
++ if (retval) {
++ printk(KERN_ERR \
++ "SST ERR: prepare in buffers failed\n");
++ goto finish;
++ }
++
++ printk(KERN_DEBUG "rar handle output buffer type = 0x%x ", dbufs->obufs->type);
++ if (str_info->ops != STREAM_OPS_PLAYBACK_DRM)
++ str_info->decode_obuf = sst_drv_ctx->mmap_mem;
++ else {
++
++ if( dbufs->obufs->type == SST_BUF_RAR ) {
++ struct RAR_buffer rar_buffers;
++ __u32 info;
++ retval = copy_from_user((void *) &info,
++ dbufs->obufs->buff_entry[output_index].buffer,
++ sizeof(__u32));
++
++ rar_buffers.info.size = dbufs->obufs->buff_entry[output_index].size;
++ rar_buffers.info.handle = info;
++ printk(KERN_DEBUG "rar handle in DnR(decode funtion)= 0x%x size=0x%x",\
++ rar_buffers.info.handle, rar_buffers.info.size);
++ retval = sst_get_RAR(&rar_buffers, 1);
++ if (retval)
++ return retval;
++
++
++ str_info->decode_obuf = (void *) ((unsigned long)rar_buffers.bus_address);
++ str_info->decode_osize = dbufs->obufs->buff_entry[output_index].size;
++ str_info->decode_obuf_type = dbufs->obufs->type;
++ printk(KERN_DEBUG "SST DBG:DRM handling\n");
++ printk(KERN_DEBUG "rar handle in DnR decode funtion/output b_add = 0x%lu +\
++ output Size=0x%x", (unsigned long) str_info->decode_obuf ,\
++ str_info->decode_osize);
++ }
++ else
++ {
++ str_info->decode_obuf = sst_drv_ctx->mmap_mem;
++ str_info->decode_osize = dbufs->obufs->buff_entry[output_index].size;
++
++ }
++ }
++
++ if (str_info->ops != STREAM_OPS_PLAYBACK_DRM) {
++ if (str_info->decode_isize > in_copied) {
++ str_info->decode_isize = in_copied;
++ printk(KERN_DEBUG "SST DBG:input size modified = %d\n",
++ str_info->decode_isize);
++ }
++ }
++
++
++ retval = sst_send_decode_mess(str_id, str_info, &dec_info);
++ if (retval) {
++ printk(KERN_ERR \
++ "SST ERR: sending message failed\n");
++ goto finish;
++ }
++ printk(KERN_DEBUG "SST DBG:in_copied = %d, consumed = %lld, produced = %lld\n",
++ in_copied,
++ dec_info.input_bytes_consumed,
++ dec_info.output_bytes_produced);
++ if (dbufs->obufs->type == SST_BUF_RAR) {
++ output_index += 1;
++ if (output_index == dbufs->obufs->entries) {
++ copy_in_done = true;
++ printk(KERN_DEBUG "SST DBG:all input copy done\n");
++ }
++ total_output += dec_info.output_bytes_produced;
++ } else {
++ out_copied = 0;
++ output_size = dec_info.output_bytes_produced;
++ retval = sst_prepare_output_buffers(str_info, dbufs,
++ &output_index, output_size, &out_copied);
++ if (retval) {
++ printk(KERN_ERR \
++ "SST ERR: prepare out +\
++ buffers failed\n");
++ goto finish;
++ }
++ if (str_info->ops != STREAM_OPS_PLAYBACK_DRM) {
++ if (in_copied != dec_info.input_bytes_consumed) {
++ int bytes_left = in_copied - dec_info.input_bytes_consumed;
++ printk(KERN_DEBUG "SST DBG:input left to be copied = %d \n",
++ bytes_left);
++ if(new_entry_flag == true) {
++ input_index--;
++ }
++ while (bytes_left) {
++ unsigned int size_sent = dbufs->ibufs->buff_entry[input_index].size - input_index_valid_size;
++ if (bytes_left == size_sent) {
++ bytes_left = 0;
++ } else if (bytes_left < size_sent) {
++ dbufs->ibufs->buff_entry[input_index].buffer += (size_sent - bytes_left);
++ dbufs->ibufs->buff_entry[input_index].size -= (size_sent - bytes_left);
++ bytes_left = 0;
++ } else {
++ bytes_left -= size_sent;
++ input_index--;
++ input_index_valid_size = 0;
++ }
++ }
++
++ }
++ }
++
++ total_output += out_copied;
++ if (str_info->decode_osize != out_copied) {
++ str_info->decode_osize -= out_copied;
++ printk(KERN_DEBUG "SST DBG:output size modified = %d\n",
++ str_info->decode_osize);
++ }
++ }
++ total_input += dec_info.input_bytes_consumed;
++
++ if (str_info->ops == STREAM_OPS_PLAYBACK_DRM) {
++ if (total_input == cum_input_given)
++ copy_in_done = true;
++ copy_out_done = true;
++
++ } else {
++ if (total_output == cum_output_given) {
++ copy_out_done = true;
++ printk(KERN_DEBUG "SST DBG:all output copy done\n");
++ }
++
++ if (total_input == cum_input_given) {
++ copy_in_done = true;
++ printk(KERN_DEBUG "SST DBG:all input copy done\n");
++ }
++ }
++
++ printk(KERN_DEBUG "SST DBG:copy_out_done = %d, copy_in_done = %d \n",
++ copy_out_done, copy_in_done);
++ }
++
++finish:
++ dbufs->input_bytes_consumed = total_input;
++ dbufs->output_bytes_produced = total_output;
++ str_info->status = str_info->prev;
++ str_info->prev = STREAM_DECODE;
++ str_info->decode_ibuf = NULL;
++ kfree(str_info->decode_ibuf);
++
++ return retval;
++}
+--
+1.6.2.2
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-6-8.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-6-8.patch
new file mode 100644
index 0000000..c75fd6d
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-6-8.patch
@@ -0,0 +1,2861 @@
+From 14d8fbc882908c792b3640b3f2730990539d6645 Mon Sep 17 00:00:00 2001
+From: R, Dharageswari <dharageswari.r@intel.com>
+Date: Thu, 29 Apr 2010 20:27:36 +0530
+Subject: [PATCH] ADR-Post-Beta-0.05.002.03-6/8-Moorestown Audio Drivers: SST sound card modules
+
+This adds the support for vendor PMICs for widget control and configurations.
+The Moorestown platform supports three different vendor implementation of PMIC.
+The sound card, ie all analog components like DAC, ADC, mixer settings are
+different for three vendors. This module implements these settings for each
+of the vendor.
+All PMIC vendors - NEC, Freescale and MAXIM are supported
+
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+
+ new file: sound/pci/sst/intelmid_snd_control.h
+ new file: sound/pci/sst/intelmid_v0_control.c
+ new file: sound/pci/sst/intelmid_v1_control.c
+ new file: sound/pci/sst/intelmid_v2_control.c
+Patch-mainline: 2.6.35?
+---
+ sound/pci/sst/intelmid_snd_control.h | 114 ++++
+ sound/pci/sst/intelmid_v0_control.c | 813 +++++++++++++++++++++++++++
+ sound/pci/sst/intelmid_v1_control.c | 846 ++++++++++++++++++++++++++++
+ sound/pci/sst/intelmid_v2_control.c | 1031 ++++++++++++++++++++++++++++++++++
+ 4 files changed, 2804 insertions(+), 0 deletions(-)
+ create mode 100644 sound/pci/sst/intelmid_snd_control.h
+ create mode 100644 sound/pci/sst/intelmid_v0_control.c
+ create mode 100644 sound/pci/sst/intelmid_v1_control.c
+ create mode 100644 sound/pci/sst/intelmid_v2_control.c
+
+diff --git a/sound/pci/sst/intelmid_snd_control.h b/sound/pci/sst/intelmid_snd_control.h
+new file mode 100644
+index 0000000..0cb9eb1
+--- /dev/null
++++ b/sound/pci/sst/intelmid_snd_control.h
+@@ -0,0 +1,114 @@
++#ifndef __INTELMID_SND_CTRL_H__
++#define __INTELMID_SND_CTRL_H__
++/*
++ * intelmid_snd_control.h - Intel Sound card driver for MID
++ *
++ * Copyright (C) 2008-10 Intel Corporation
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This file defines all snd control functions
++ */
++
++/*
++Mask bits
++*/
++#define MASK0 0x01 /* 0000 0001 */
++#define MASK1 0x02 /* 0000 0010 */
++#define MASK2 0x04 /* 0000 0100 */
++#define MASK3 0x08 /* 0000 1000 */
++#define MASK4 0x10 /* 0001 0000 */
++#define MASK5 0x20 /* 0010 0000 */
++#define MASK6 0x40 /* 0100 0000 */
++#define MASK7 0x80 /* 1000 0000 */
++/*
++value bits
++*/
++#define VALUE0 0x01 /* 0000 0001 */
++#define VALUE1 0x02 /* 0000 0010 */
++#define VALUE2 0x04 /* 0000 0100 */
++#define VALUE3 0x08 /* 0000 1000 */
++#define VALUE4 0x10 /* 0001 0000 */
++#define VALUE5 0x20 /* 0010 0000 */
++#define VALUE6 0x40 /* 0100 0000 */
++#define VALUE7 0x80 /* 1000 0000 */
++
++#define MUTE 0 /* ALSA Passes 0 for mute */
++#define UNMUTE 1 /* ALSA Passes 1 for unmute */
++
++#define MAX_VOL_PMIC_VENDOR0 0x3f /* max vol in dB for stereo & voice DAC */
++#define MIN_VOL_PMIC_VENDOR0 0 /* min vol in dB for stereo & voice DAC */
++/* Head phone volume control */
++#define MAX_HP_VOL_PMIC_VENDOR1 6 /* max volume in dB for HP */
++#define MIN_HP_VOL_PMIC_VENDOR1 (-84) /* min volume in dB for HP */
++#define MAX_HP_VOL_INDX_PMIC_VENDOR1 40 /* Number of HP volume control values */
++
++/* Mono Earpiece Volume control */
++#define MAX_EP_VOL_PMIC_VENDOR1 0 /* max volume in dB for EP */
++#define MIN_EP_VOL_PMIC_VENDOR1 (-75) /* min volume in dB for EP */
++#define MAX_EP_VOL_INDX_PMIC_VENDOR1 32 /* Number of EP volume control values */
++
++int sst_sc_reg_access(struct sc_reg_access *sc_access,
++ int type, int num_val);
++extern struct snd_pmic_ops snd_pmic_ops_fs;
++extern struct snd_pmic_ops snd_pmic_ops_mx;
++extern struct snd_pmic_ops snd_pmic_ops_nc;
++
++/* device */
++enum SND_INPUT_DEVICE {
++ HS_MIC,
++ AMIC,
++ DMIC,
++ IN_UNDEFINED
++};
++
++enum SND_OUTPUT_DEVICE {
++ STEREO_HEADPHONE,
++ INTERNAL_SPKR,
++ OUT_UNDEFINED
++};
++
++enum SND_CARDS {
++ SND_FS = 0,
++ SND_MX,
++ SND_NC,
++};
++
++enum pmic_controls {
++ PMIC_SND_HP_MIC_MUTE = 0x0001,
++ PMIC_SND_AMIC_MUTE = 0x0002,
++ PMIC_SND_DMIC_MUTE = 0x0003,
++ PMIC_SND_CAPTURE_VOL = 0x0004,
++/* Output controls */
++ PMIC_SND_LEFT_PB_VOL = 0x0010,
++ PMIC_SND_RIGHT_PB_VOL = 0x0011,
++ PMIC_SND_LEFT_HP_MUTE = 0x0012,
++ PMIC_SND_RIGHT_HP_MUTE = 0x0013,
++ PMIC_SND_LEFT_SPEAKER_MUTE = 0x0014,
++ PMIC_SND_RIGHT_SPEAKER_MUTE = 0x0015,
++/* Other controls */
++ PMIC_SND_MUTE_ALL = 0x0020,
++ PMIC_MAX_CONTROLS = 0x0020,
++};
++
++#endif /* __INTELMID_SND_CTRL_H__ */
++
++
+diff --git a/sound/pci/sst/intelmid_v0_control.c b/sound/pci/sst/intelmid_v0_control.c
+new file mode 100644
+index 0000000..3252fa5
+--- /dev/null
++++ b/sound/pci/sst/intelmid_v0_control.c
+@@ -0,0 +1,813 @@
++/*
++ * intel_sst_v0_control.c - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corporation
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This file contains the control operations of vendor 1
++ */
++
++#include <linux/cdev.h>
++#include <linux/pci.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/syscalls.h>
++#include <linux/file.h>
++#include <linux/fcntl.h>
++#include <linux/uaccess.h>
++#include <linux/interrupt.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/workqueue.h>
++#include <linux/pci.h>
++#include <linux/firmware.h>
++#include <sound/intel_lpe.h>
++#include <sound/intel_sst_ioctl.h>
++#include "intel_sst_fw_ipc.h"
++#include "intel_sst_common.h"
++#include "intelmid_snd_control.h"
++
++
++enum _reg_v1 {
++ VOICEPORT1 = 0x180,
++ VOICEPORT2 = 0x181,
++ AUDIOPORT1 = 0x182,
++ AUDIOPORT2 = 0x183,
++ MISCVOICECTRL = 0x184,
++ MISCAUDCTRL = 0x185,
++ DMICCTRL1 = 0x186,
++ DMICCTRL2 = 0x187,
++ MICCTRL = 0x188,
++ MICLICTRL1 = 0x189,
++ MICLICTRL2 = 0x18A,
++ MICLICTRL3 = 0x18B,
++ VOICEDACCTRL1 = 0x18C,
++ STEREOADCCTRL = 0x18D,
++ AUD15 = 0x18E,
++ AUD16 = 0x18F,
++ AUD17 = 0x190,
++ AUD18 = 0x191,
++ RMIXOUTSEL = 0x192,
++ ANALOGLBR = 0x193,
++ ANALOGLBL = 0x194,
++ POWERCTRL1 = 0x195,
++ POWERCTRL2 = 0x196,
++ HEADSETDETECTINT = 0x197,
++ HEADSETDETECTINTMASK = 0x198,
++ TRIMENABLE = 0x199,
++};
++
++int rev_id = 0x20;
++
++int fs_init_card(void)
++{
++ struct sc_reg_access sc_access[] = {
++ {0x180, 0x00, 0x0},
++ {0x181, 0x00, 0x0},
++ {0x182, 0xF8, 0x0},
++ {0x183, 0x08, 0x0},
++ {0x184, 0x00, 0x0},
++ {0x185, 0x40, 0x0},
++ {0x186, 0x06, 0x0},
++ {0x187, 0x80, 0x0},
++ {0x188, 0x00, 0x0},
++ {0x189, 0x39, 0x0},
++ {0x18a, 0x39, 0x0},
++ {0x18b, 0x1F, 0x0},
++ {0x18c, 0x00, 0x0},
++ {0x18d, 0x00, 0x0},
++ {0x18e, 0x39, 0x0},
++ {0x18f, 0x39, 0x0},
++ {0x190, 0x39, 0x0},
++ {0x191, 0x11, 0x0},
++ {0x192, 0x0E, 0x0},
++ {0x193, 0x00, 0x0},
++ {0x194, 0x00, 0x0},
++ {0x195, 0x06, 0x0},
++ {0x196, 0x7F, 0x0},
++ {0x197, 0x00, 0x0},
++ {0x198, 0x0B, 0x0},
++ {0x199, 0x00, 0x0},
++ {0x037, 0x3F, 0x0},
++ };
++
++ snd_pmic_ops_fs.card_status = SND_CARD_INIT_DONE;
++ return sst_sc_reg_access(sc_access, PMIC_WRITE, 27);
++}
++
++int fs_enable_audiodac(int value)
++{
++ struct sc_reg_access sc_access[3];
++ sc_access[0].reg_addr = AUD16;
++ sc_access[1].reg_addr = AUD17;
++ sc_access[2].reg_addr = AUD15;
++ sc_access[0].mask = sc_access[1].mask = sc_access[2].mask = MASK7;
++
++ if (PMIC_SND_MUTE_ALL == snd_pmic_ops_fs.mute_status )
++ return 0;
++ if (value == MUTE) {
++ sc_access[0].value = sc_access[1].value =
++ sc_access[2].value = 0x80;
++
++ } else {
++ sc_access[0].value = sc_access[1].value =
++ sc_access[2].value = 0x0;
++ }
++ if(snd_pmic_ops_fs.num_channel == 1)
++ sc_access[1].value = sc_access[2].value = 0x80;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
++
++}
++int fs_power_up_pb(unsigned int port)
++{
++ struct sc_reg_access sc_access[] = {
++ {POWERCTRL1, 0xC6, 0xC6},
++ {POWERCTRL2, 0x30, 0x30},
++ };
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++ retval = fs_enable_audiodac(MUTE);
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++
++ if (retval)
++ return retval;
++
++ return fs_enable_audiodac(UNMUTE);
++}
++
++int fs_power_up_cp(unsigned int port)
++{
++ struct sc_reg_access sc_access[] = {
++ {POWERCTRL2, 0x02, 0x02}, /*NOTE power up A ADC only as*/
++ /*as turning on V ADC causes noise*/
++ };
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++}
++
++int fs_power_down_pb(void)
++{
++ struct sc_reg_access sc_access[] = {
++ {POWERCTRL1, 0x00, 0xC6},
++ {POWERCTRL2, 0x00, 0x30},
++ };
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++ retval = fs_enable_audiodac(MUTE);
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++ if (retval)
++ return retval;
++ return fs_enable_audiodac(UNMUTE);
++}
++
++int fs_power_down(void)
++{
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++ return retval;
++}
++
++int fs_power_down_cp(void)
++{
++ struct sc_reg_access sc_access[] = {
++ {POWERCTRL2, 0x00, 0x03},
++ };
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++}
++
++int fs_set_pcm_voice_params(void)
++{
++ struct sc_reg_access sc_access[] = {
++ {0x180, 0xA0, 0},
++ {0x181, 0x04, 0},
++ {0x182, 0x0, 0},
++ {0x183, 0x0, 0},
++ {0x184, 0x18, 0},
++ {0x185, 0x40, 0},
++ {0x186, 0x06, 0},
++ {0x187, 0x0, 0},
++ {0x188, 0x10, 0},
++ {0x189, 0x39, 0},
++ {0x18a, 0x39, 0},
++ {0x18b, 0x02, 0},
++ {0x18c, 0x0, 0},
++ {0x18d, 0x0, 0},
++ {0x18e, 0x39, 0},
++ {0x18f, 0x0, 0},
++ {0x190, 0x0, 0},
++ {0x191, 0x20, 0},
++ {0x192, 0x20, 0},
++ {0x193, 0x0, 0},
++ {0x194, 0x0, 0},
++ {0x195, 0x6, 0},
++ {0x196, 0x25, 0},
++ {0x197, 0x0, 0},
++ {0x198, 0xF, 0},
++ {0x199, 0x0, 0},
++ };
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++ return sst_sc_reg_access(sc_access, PMIC_WRITE, 26);
++}
++
++int fs_set_audio_port(int status)
++{
++ struct sc_reg_access sc_access[2];
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++ if (status == DEACTIVATE) {
++ /* Deactivate audio port-tristate and power */
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = MASK6|MASK7;
++ sc_access[0].reg_addr = AUDIOPORT1;
++ sc_access[1].value = 0x00;
++ sc_access[1].mask = MASK4|MASK5;
++ sc_access[1].reg_addr = POWERCTRL2;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++ } else if (status == ACTIVATE) {
++ /* activate audio port */
++ sc_access[0].value = 0xC0;
++ sc_access[0].mask = MASK6|MASK7;
++ sc_access[0].reg_addr = AUDIOPORT1;
++ sc_access[1].value = 0x30;
++ sc_access[1].mask = MASK4|MASK5;
++ sc_access[1].reg_addr = POWERCTRL2;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++ } else
++ return -EINVAL;
++}
++
++int fs_set_voice_port(int status)
++{
++ struct sc_reg_access sc_access[2];
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++ if (status == DEACTIVATE) {
++ /* Deactivate audio port-tristate and power */
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = MASK6|MASK7;
++ sc_access[0].reg_addr = VOICEPORT1;
++ sc_access[1].value = 0x00;
++ sc_access[1].mask = MASK0|MASK1;
++ sc_access[1].reg_addr = POWERCTRL2;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++ } else if (status == ACTIVATE) {
++ /* activate audio port */
++ sc_access[0].value = 0xC0;
++ sc_access[0].mask = MASK6|MASK7;
++ sc_access[0].reg_addr = VOICEPORT1;
++ sc_access[1].value = 0x03;
++ sc_access[1].mask = MASK0|MASK1;
++ sc_access[1].reg_addr = POWERCTRL2;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++ } else
++ return -EINVAL;
++}
++
++int fs_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
++{
++ u8 config1 = 0;
++ struct sc_reg_access sc_access[4];
++ int retval = 0, num_value = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++ switch (sfreq) {
++ case 8000:
++ config1 = 0x00;
++ break;
++ case 11025:
++ config1 = 0x01;
++ break;
++ case 12000:
++ config1 = 0x02;
++ break;
++ case 16000:
++ config1 = 0x03;
++ break;
++ case 22050:
++ config1 = 0x04;
++ break;
++ case 24000:
++ config1 = 0x05;
++ break;
++ case 26000:
++ config1 = 0x06;
++ break;
++ case 32000:
++ config1 = 0x07;
++ break;
++ case 44100:
++ config1 = 0x08;
++ break;
++ case 48000:
++ config1 = 0x09;
++ break;
++ }
++ snd_pmic_ops_fs.num_channel = num_channel;
++ if(snd_pmic_ops_fs.num_channel == 1)
++ {
++ sc_access[0].reg_addr = AUD17;
++ sc_access[1].reg_addr = AUD15;
++ sc_access[0].mask = sc_access[1].mask = MASK7;
++ sc_access[0].value = sc_access[1].value = 0x80;
++ sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++
++ }
++ else
++ {
++ sc_access[0].reg_addr = AUD17;
++ sc_access[1].reg_addr = AUD15;
++ sc_access[0].mask = sc_access[1].mask = MASK7;
++ sc_access[0].value = sc_access[1].value = 0x00;
++ sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++
++ }
++ printk(KERN_DEBUG "SST DBG:sfreq:%d, +\
++ Register value = %x\n", sfreq, config1);
++
++ if (word_size == 24) {
++ sc_access[0].reg_addr = AUDIOPORT1;
++ sc_access[0].mask = MASK0|MASK1|MASK2|MASK3;
++ sc_access[0].value = 0xFB;
++
++
++ sc_access[1].reg_addr = AUDIOPORT2;
++ sc_access[1].value = config1 |0x10;
++ sc_access[1].mask = MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6;
++
++ sc_access[2].reg_addr = MISCAUDCTRL;
++ sc_access[2].value = 0x02;
++ sc_access[2].mask = 0x02;
++
++ num_value = 3 ;
++
++ } else {
++
++ sc_access[0].reg_addr = AUDIOPORT2;
++ sc_access[0].value = config1;
++ sc_access[0].mask = MASK0|MASK1|MASK2|MASK3;
++
++ sc_access[1].reg_addr = MISCAUDCTRL;
++ sc_access[1].value = 0x00;
++ sc_access[1].mask = 0x02;
++ num_value = 2;
++ }
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_value);
++
++}
++
++int fs_set_selected_input_dev(u8 value)
++{
++ struct sc_reg_access sc_access_dmic[] = {
++ {MICCTRL, 0x81, 0xf7},
++ {MICLICTRL3, 0x00, 0xE0},
++ };
++ struct sc_reg_access sc_access_mic[] = {
++ {MICCTRL, 0x10, MASK2|MASK4|MASK5|MASK6|MASK7},
++ {MICLICTRL3, 0x00, 0xE0},
++ };
++ struct sc_reg_access sc_access_hsmic[] = {
++ {MICCTRL, 0x40, MASK2|MASK4|MASK5|MASK6|MASK7},
++ {MICLICTRL3, 0x00, 0xE0},
++ };
++
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++
++ switch (value) {
++ case AMIC:
++ printk(KERN_DEBUG "SST DBG:Selecting amic not supported in mono cfg\n");
++ return sst_sc_reg_access(sc_access_mic, PMIC_READ_MODIFY, 2);
++ break;
++
++ case HS_MIC:
++ printk(KERN_DEBUG "SST DBG:Selecting hsmic\n");
++ return sst_sc_reg_access(sc_access_hsmic,
++ PMIC_READ_MODIFY, 2);
++ break;
++
++ case DMIC:
++ printk(KERN_DEBUG "SST DBG:Selecting dmic\n");
++ return sst_sc_reg_access(sc_access_dmic, PMIC_READ_MODIFY, 2);
++ break;
++
++ default:
++ printk(KERN_ERR "SST ERR: +\
++ rcvd illegal request \n");
++ return -EINVAL;
++
++ }
++}
++
++int fs_set_selected_output_dev(u8 value)
++{
++ struct sc_reg_access sc_access_hp[] = {
++ {0x191, 0x11, 0x0},
++ {0x192, 0x0E, 0x0},
++ {0x195, 0x06, 0x0},
++ {0x196, 0x7E, 0x0},
++ };
++ struct sc_reg_access sc_access_is[] = {
++ {0x191, 0x17, 0xFF},
++ {0x192, 0x08, 0xFF},
++ {0x195, 0xC0, 0xFF},
++ {0x196, 0x12, 0x12},
++ };
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++
++ switch (value) {
++ case STEREO_HEADPHONE:
++ printk(KERN_DEBUG "SST DBG:Selecting headphone \n");
++ return sst_sc_reg_access(sc_access_hp, PMIC_WRITE, 4);
++ break;
++
++ case INTERNAL_SPKR:
++ printk(KERN_DEBUG "SST DBG:Selecting internal spkr\n");
++ return sst_sc_reg_access(sc_access_is, PMIC_READ_MODIFY, 4);
++ break;
++
++ default:
++ printk(KERN_ERR "SST ERR: +\
++ rcvd illegal request \n");
++ return -EINVAL;
++
++ }
++}
++
++int fs_set_mute(int dev_id, u8 value)
++{
++ struct sc_reg_access sc_access[6] = {{0,},};
++ int reg_num = 0;
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++
++ snd_pmic_ops_fs.mute_status = dev_id;
++
++ printk(KERN_DEBUG "SST DBG:dev_id:0x%x value:0x%x\n", dev_id, value);
++ switch (dev_id) {
++ case PMIC_SND_DMIC_MUTE:
++ sc_access[0].reg_addr = MICCTRL;
++ sc_access[1].reg_addr = MICLICTRL1;
++ sc_access[2].reg_addr = MICLICTRL2;
++ sc_access[0].mask = MASK5;
++ sc_access[1].mask = sc_access[2].mask = MASK6;
++ if (value == MUTE) {
++ sc_access[0].value = 0x20;
++ sc_access[2].value = sc_access[1].value = 0x40;
++ } else
++ sc_access[0].value = sc_access[1].value
++ = sc_access[2].value = 0x0;
++ reg_num = 3;
++ break;
++ case PMIC_SND_HP_MIC_MUTE:
++ case PMIC_SND_AMIC_MUTE:
++ sc_access[0].reg_addr = MICLICTRL1;
++ sc_access[1].reg_addr = MICLICTRL2;
++ sc_access[0].mask = sc_access[1].mask = MASK6;
++ if (value == MUTE)
++ sc_access[0].value = sc_access[1].value = 0x40;
++ else
++ sc_access[0].value = sc_access[1].value = 0x0;
++ reg_num = 2;
++ break;
++ case PMIC_SND_LEFT_SPEAKER_MUTE:
++ case PMIC_SND_LEFT_HP_MUTE:
++ sc_access[0].reg_addr = AUD16;
++ sc_access[1].reg_addr = AUD15;
++
++ sc_access[0].mask = sc_access[1].mask = MASK7;
++ if (value == MUTE)
++ sc_access[0].value = sc_access[1].value = 0x80;
++ else
++ sc_access[0].value = sc_access[1].value = 0x0;
++ reg_num = 2;
++ break;
++ case PMIC_SND_RIGHT_HP_MUTE:
++ case PMIC_SND_RIGHT_SPEAKER_MUTE:
++ sc_access[0].reg_addr = AUD17;
++ sc_access[1].reg_addr = AUD15;
++ sc_access[0].mask = sc_access[1].mask = MASK7;
++ if (value == MUTE)
++ sc_access[0].value = sc_access[1].value = 0x80;
++ else
++ sc_access[0].value = sc_access[1].value = 0x0;
++ if(snd_pmic_ops_fs.num_channel == 1)
++ sc_access[0].value = sc_access[1].value = 0x80;
++ reg_num = 2;
++ break;
++ case PMIC_SND_MUTE_ALL:
++ sc_access[0].reg_addr = AUD16;
++ sc_access[1].reg_addr = AUD17;
++ sc_access[2].reg_addr = AUD15;
++ sc_access[3].reg_addr = MICCTRL;
++ sc_access[4].reg_addr = MICLICTRL1;
++ sc_access[5].reg_addr = MICLICTRL2;
++ sc_access[0].mask = sc_access[1].mask =
++ sc_access[2].mask = MASK7;
++ sc_access[3].mask = MASK5;
++ sc_access[4].mask = sc_access[5].mask = MASK6;
++
++ if (value == MUTE) {
++ sc_access[0].value =
++ sc_access[1].value = sc_access[2].value = 0x80;
++ sc_access[3].value = 0x20;
++ sc_access[4].value = sc_access[5].value = 0x40;
++
++ } else {
++ sc_access[0].value = sc_access[1].value =
++ sc_access[2].value = sc_access[3].value =
++ sc_access[4].value = sc_access[5].value = 0x0;
++ }
++ if(snd_pmic_ops_fs.num_channel == 1)
++ sc_access[1].value = sc_access[2].value = 0x80;
++ reg_num = 6;
++ break;
++
++ }
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, reg_num);
++}
++
++int fs_set_vol(int dev_id, u8 value)
++{
++ struct sc_reg_access sc_acces, sc_access[4] = {{0},};
++ int reg_num = 0;
++ int retval = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++
++ switch (dev_id) {
++ case PMIC_SND_LEFT_PB_VOL:
++ printk(KERN_DEBUG "SST DBG:PMIC_SND_LEFT_PB_VOL:%d \n", value);
++ sc_access[0].value = sc_access[1].value = value;
++ sc_access[0].reg_addr = AUD16;
++ sc_access[1].reg_addr = AUD15;
++ sc_access[0].mask = sc_access[1].mask =
++ (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
++ reg_num = 2;
++ break;
++
++ case PMIC_SND_RIGHT_PB_VOL:
++ printk(KERN_DEBUG "SST DBG:PMIC_SND_RIGHT_PB_VOL:%d \n", value);
++ sc_access[0].value = sc_access[1].value = value;
++ sc_access[0].reg_addr = AUD17;
++ sc_access[1].reg_addr = AUD15;
++ sc_access[0].mask = sc_access[1].mask =
++ (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
++ if(snd_pmic_ops_fs.num_channel == 1) {
++ sc_access[0].value = sc_access[1].value = 0x80;
++ sc_access[0].mask = sc_access[1].mask = MASK7;
++ }
++ reg_num = 2;
++ break;
++
++ case PMIC_SND_CAPTURE_VOL:
++ printk(KERN_DEBUG "SST DBG:PMIC_SND_CAPTURE_VOL:%d \n", value);
++ sc_access[0].reg_addr = MICLICTRL1;
++ sc_access[1].reg_addr = MICLICTRL2;
++ sc_access[2].reg_addr = DMICCTRL1;
++ sc_access[2].value = 0x3F - value;
++ sc_access[0].value = sc_access[1].value = value;
++ sc_acces.reg_addr = MICLICTRL3;
++ sc_acces.value = value;
++ sc_acces.mask = (MASK0|MASK1|MASK2|MASK3|MASK5|MASK6|MASK7);
++ retval = sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
++ sc_access[0].mask = sc_access[1].mask =
++ sc_access[2].mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
++ reg_num = 3;
++ break;
++
++ default:
++ printk(KERN_ERR "SST ERR: +\
++ rcvd illegal request \n");
++ return -EINVAL;
++ }
++
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, reg_num);
++}
++
++int fs_get_mute(int dev_id, u8 *value)
++{
++ struct sc_reg_access sc_access[6] = {{0,},};
++
++ int retval = 0, temp_value = 0, mask = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++
++ switch (dev_id) {
++
++ case PMIC_SND_AMIC_MUTE:
++ case PMIC_SND_HP_MIC_MUTE:
++ sc_access[0].reg_addr = MICLICTRL1;
++ mask = MASK6;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ, 1);
++ if (sc_access[0].value & mask)
++ *value = MUTE;
++ else
++ *value = UNMUTE;
++ break;
++ case PMIC_SND_DMIC_MUTE:
++ sc_access[0].reg_addr = MICCTRL;
++ mask = MASK5;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ, 1);
++ temp_value = (sc_access[0].value & mask);
++ if (temp_value == 0)
++ *value = UNMUTE;
++ else
++ *value = MUTE;
++ break;
++
++ case PMIC_SND_LEFT_HP_MUTE:
++ case PMIC_SND_LEFT_SPEAKER_MUTE:
++ sc_access[0].reg_addr = AUD16;
++ mask = MASK7;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ, 1);
++ temp_value = sc_access[0].value & mask;
++ if (temp_value == 0)
++ *value = UNMUTE;
++ else
++ *value = MUTE;
++ break;
++ case PMIC_SND_RIGHT_HP_MUTE:
++ case PMIC_SND_RIGHT_SPEAKER_MUTE:
++ sc_access[0].reg_addr = AUD17;
++ mask = MASK7;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ, 1);
++ temp_value = sc_access[0].value & mask;
++ if (temp_value == 0)
++ *value = UNMUTE;
++ else
++ *value = MUTE;
++ break;
++ case PMIC_SND_MUTE_ALL:
++ sc_access[0].reg_addr = AUD15;
++ sc_access[1].reg_addr = AUD16;
++ sc_access[2].reg_addr = AUD17;
++ sc_access[3].reg_addr = MICCTRL;
++ sc_access[4].reg_addr = MICLICTRL1;
++ sc_access[5].reg_addr = MICLICTRL2;
++ sc_access[0].mask = sc_access[1].mask =
++ sc_access[2].mask = MASK7;
++ sc_access[3].mask = MASK5;
++ sc_access[4].mask = sc_access[5].mask = MASK6;
++
++ retval = sst_sc_reg_access(sc_access, PMIC_READ, 6);
++ if (((sc_access[0].value & sc_access[0].mask) ==
++ sc_access[0].mask)
++ && ((sc_access[1].value & sc_access[1].mask) ==
++ sc_access[1].mask)
++ && ((sc_access[2].value & sc_access[2].mask) ==
++ sc_access[2].mask)
++ && ((sc_access[3].value & sc_access[3].mask) ==
++ sc_access[3].mask)
++ && ((sc_access[4].value & sc_access[4].mask) ==
++ sc_access[4].mask)
++ && ((sc_access[5].value & sc_access[5].mask) ==
++ sc_access[5].mask))
++ *value = MUTE;
++ else
++ *value = UNMUTE;
++ break;
++ default:
++ printk(KERN_ERR "SST ERR: +\
++ rcvd illegal request \n");
++ return -EINVAL;
++ }
++
++ return retval;
++}
++
++int fs_get_vol(int dev_id, u8 *value)
++{
++ struct sc_reg_access sc_access = {0,};
++ int retval = 0, mask = 0;
++
++ if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
++ retval = fs_init_card();
++ if (retval)
++ return retval;
++
++ switch (dev_id) {
++ case PMIC_SND_CAPTURE_VOL:
++ printk(KERN_DEBUG "SST DBG:PMIC_SND_CAPTURE_VOL\n");
++ sc_access.reg_addr = MICLICTRL1;
++ mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
++ break;
++ case PMIC_SND_LEFT_PB_VOL:
++ printk(KERN_DEBUG "SST DBG:PMIC_SND_LEFT_PB_VOL\n");
++ sc_access.reg_addr = AUD16;
++ mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
++ break;
++ case PMIC_SND_RIGHT_PB_VOL:
++ printk(KERN_DEBUG "SST DBG:PMIC_SND_LEFT_PB_VOL\n");
++ sc_access.reg_addr = AUD17;
++ mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
++ break;
++ default:
++ printk(KERN_ERR "SST ERR: +\
++ rcvd illegal request \n");
++ return -EINVAL;
++ }
++
++ retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
++ printk(KERN_DEBUG "SST DBG:value read = 0x%x\n", sc_access.value);
++ *value = (sc_access.value) & mask;
++ printk(KERN_DEBUG "SST DBG:value returned = 0x%x\n", *value);
++ return retval;
++}
++
++struct snd_pmic_ops snd_pmic_ops_fs = {
++ .set_input_dev = fs_set_selected_input_dev,
++ .set_output_dev = fs_set_selected_output_dev,
++ .set_mute = fs_set_mute,
++ .get_mute = fs_get_mute,
++ .set_vol = fs_set_vol,
++ .get_vol = fs_get_vol,
++ .init_card = fs_init_card,
++ .set_pcm_audio_params = fs_set_pcm_audio_params,
++ .set_pcm_voice_params = fs_set_pcm_voice_params,
++ .set_voice_port = fs_set_voice_port,
++ .set_audio_port = fs_set_audio_port,
++ .power_up_pmic_pb = fs_power_up_pb,
++ .power_up_pmic_cp = fs_power_up_cp,
++ .power_down_pmic_pb = fs_power_down_pb,
++ .power_down_pmic_cp = fs_power_down_cp,
++ .power_down_pmic = fs_power_down,
++};
+diff --git a/sound/pci/sst/intelmid_v1_control.c b/sound/pci/sst/intelmid_v1_control.c
+new file mode 100644
+index 0000000..740ffaf
+--- /dev/null
++++ b/sound/pci/sst/intelmid_v1_control.c
+@@ -0,0 +1,846 @@
++/*
++ * intel_sst_v1_control.c - Intel SST Driver for audio engine
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This file contains the control operations of vendor 2
++ */
++
++#include <linux/cdev.h>
++#include <linux/pci.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/syscalls.h>
++#include <linux/file.h>
++#include <linux/fcntl.h>
++#include <linux/uaccess.h>
++#include <linux/interrupt.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/workqueue.h>
++#include <linux/pci.h>
++#include <linux/firmware.h>
++#include <sound/intel_lpe.h>
++#include <sound/intel_sst_ioctl.h>
++#include "intel_sst_fw_ipc.h"
++#include "intel_sst_common.h"
++#include "intelmid_snd_control.h"
++
++enum _reg_v2 {
++
++ MASTER_CLOCK_PRESCALAR = 0x205,
++ SET_MASTER_AND_LR_CLK1 = 0x20b,
++ SET_MASTER_AND_LR_CLK2 = 0x20c,
++ MASTER_MODE_AND_DATA_DELAY = 0x20d,
++ DIGITAL_INTERFACE_TO_DAI2 = 0x20e,
++ CLK_AND_FS1 = 0x208,
++ CLK_AND_FS2 = 0x209,
++ DAI2_TO_DAC_HP = 0x210,
++ HP_OP_SINGLE_ENDED = 0x224,
++ ENABLE_OPDEV_CTRL = 0x226,
++ ENABLE_DEV_AND_USE_XTAL = 0x227,
++
++ /* Max audio subsystem (PQ49) MAX 8921 */
++ AS_IP_MODE_CTL = 0xF9,
++ AS_LEFT_SPKR_VOL_CTL = 0xFA, /* Mono Earpiece volume control */
++ AS_RIGHT_SPKR_VOL_CTL = 0xFB,
++ AS_LEFT_HP_VOL_CTL = 0xFC,
++ AS_RIGHT_HP_VOL_CTL = 0xFD,
++ AS_OP_MIX_CTL = 0xFE,
++ AS_CONFIG = 0xFF,
++
++ /* Headphone volume control & mute registers */
++ VOL_CTRL_LT = 0x21c,
++ VOL_CTRL_RT = 0x21d,
++
++};
++
++int mx_init_card(void)
++{
++ struct sc_reg_access sc_access[] = {
++ {0x200, 0x80, 0x00},
++ {0x201, 0xC0, 0x00},
++ {0x202, 0x00, 0x00},
++ {0x203, 0x00, 0x00},
++ {0x204, 0x02, 0x00},
++ {0x205, 0x10, 0x00},
++ {0x206, 0x60, 0x00},
++ {0x207, 0x00, 0x00},
++ {0x208, 0x90, 0x00},
++ {0x209, 0x51, 0x00},
++ {0x20a, 0x00, 0x00},
++ {0x20b, 0x10, 0x00},
++ {0x20c, 0x00, 0x00},
++ {0x20d, 0x00, 0x00},
++ {0x20e, 0x11, 0x00},
++ {0x20f, 0x00, 0x00},
++ {0x210, 0x84, 0x00},
++ {0x211, 0xB3, 0x00},
++ {0x212, 0x00, 0x00},
++ {0x213, 0x00, 0x00},
++ {0x214, 0x41, 0x00},
++ {0x215, 0x00, 0x00},
++ {0x216, 0x00, 0x00},
++ {0x217, 0x00, 0x00},
++ {0x218, 0x03, 0x00},
++ {0x219, 0x03, 0x00},
++ {0x21a, 0x00, 0x00},
++ {0x21b, 0x00, 0x00},
++ {0x21c, 0x09, 0x00},
++ {0x21d, 0x09, 0x00},
++ {0x21e, 0x00, 0x00},
++ {0x21f, 0x00, 0x00},
++ {0x220, 0x54, 0x00},
++ {0x221, 0x54, 0x00},
++ {0x222, 0x50, 0x00},
++ {0x223, 0x00, 0x00},
++ {0x224, 0x04, 0x00},
++ {0x225, 0x80, 0x00},
++ {0x226, 0x0F, 0x00},
++ {0x227, 0x08, 0x00},
++ {0xf9, 0x40, 0x00},
++ {0xfa, 0x1f, 0x00},
++ {0xfb, 0x1f, 0x00},
++ {0xfc, 0x1f, 0x00},
++ {0xfd, 0x1f, 0x00},
++ {0xfe, 0x00, 0x00},
++ {0xff, 0x0c, 0x00},
++ };
++ snd_pmic_ops_mx.card_status = SND_CARD_INIT_DONE;
++ snd_pmic_ops_mx.num_channel = 2;
++ return sst_sc_reg_access(sc_access, PMIC_WRITE, 47);
++}
++
++
++
++int mx_enable_audiodac(int value)
++{
++ struct sc_reg_access sc_access[3];
++ int mute_val = 0;
++ int mute_val1 = 0;
++ u8 vol_value;
++
++ sc_access[0].reg_addr = AS_LEFT_HP_VOL_CTL;
++ sc_access[1].reg_addr = AS_RIGHT_HP_VOL_CTL;
++
++
++ if (value == UNMUTE) {
++
++ mute_val = 0x1f;
++ mute_val1 = 0x00;
++
++ } else {
++
++ mute_val = 0x00;
++ mute_val1 = 0x40;
++
++ }
++
++
++ sc_access[0].mask = sc_access[1].mask = MASK0|MASK1|MASK2|MASK3|MASK4;
++ sc_access[0].value = sc_access[1].value = mute_val;
++
++ sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++
++ sc_access[0].reg_addr = VOL_CTRL_LT;
++ sc_access[1].reg_addr = VOL_CTRL_RT;
++ sc_access[0].mask = sc_access[1].mask = MASK6;
++ sc_access[0].value = sc_access[1].value = mute_val1;
++ if ( snd_pmic_ops_mx.num_channel == 1)
++ sc_access[1].value = 0x40;
++
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++
++
++
++}
++int mx_power_up_pb(unsigned int port)
++{
++
++ int retval = 0;
++ struct sc_reg_access sc_access[3];
++
++
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT)
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++
++
++
++ retval = mx_enable_audiodac(MUTE);
++
++ mdelay(10);
++
++ sc_access[0].reg_addr = AS_CONFIG;
++ sc_access[0].mask = MASK7;
++ sc_access[0].value = 0x80;
++
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++
++
++
++ sc_access[0].reg_addr = ENABLE_OPDEV_CTRL;
++ sc_access[0].mask = MASK3|MASK2;
++ sc_access[0].value = 0x0C;
++
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ sc_access[0].reg_addr = ENABLE_DEV_AND_USE_XTAL;
++ sc_access[0].mask = MASK7|MASK3;
++ sc_access[0].value = 0x88;
++
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++
++
++
++ retval = mx_enable_audiodac(UNMUTE);
++
++ return retval;
++
++}
++
++int mx_power_up_cp(unsigned int port)
++{
++ struct sc_reg_access sc_access[] = {
++ {0x226, 0x03, MASK1|MASK0},
++ {0x227, 0x88, MASK7|MASK3},
++ };
++ int retval = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT)
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++}
++
++int mx_power_down_pb(void)
++{
++
++ struct sc_reg_access sc_access[3];
++ int retval = 0;
++
++
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT)
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++
++ retval = mx_enable_audiodac(MUTE);
++
++
++ sc_access[0].reg_addr = ENABLE_OPDEV_CTRL;
++ sc_access[0].mask = MASK3|MASK2;
++ sc_access[0].value = 0x00;
++
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++
++
++ retval = mx_enable_audiodac(UNMUTE);
++
++ return retval;
++
++
++}
++
++int mx_power_down_cp(void)
++{
++ struct sc_reg_access sc_access[] = {
++ {0x226, 0x00, MASK1|MASK0},
++ };
++ int retval = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT)
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++
++}
++
++int mx_power_down(void)
++{
++ int retval = 0;
++ struct sc_reg_access sc_access[3];
++
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT)
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++
++
++ retval = mx_enable_audiodac(MUTE);
++
++ sc_access[0].reg_addr = AS_CONFIG;
++ sc_access[0].mask = MASK7;
++ sc_access[0].value = 0x00;
++
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++
++ sc_access[0].reg_addr = ENABLE_DEV_AND_USE_XTAL;
++ sc_access[0].mask = MASK7;
++ sc_access[0].value = 0x00;
++
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++
++ retval = mx_enable_audiodac(UNMUTE);
++
++ return retval;
++
++
++}
++
++int mx_set_voice_port(int status)
++{
++ int retval = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT)
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++
++ return retval;
++}
++
++int mx_set_audio_port(int status)
++{
++ int retval = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT)
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++ return retval;
++}
++int mx_set_pcm_voice_params(void)
++{
++ int retval = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT)
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++
++ return retval;
++}
++
++int mx_set_pcm_audio_params(int sfreq, int word_size , int num_channel)
++{
++ int config1 = 0, config2 = 0, filter = 0xB3;
++ struct sc_reg_access sc_access[5];
++ int retval = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT)
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++
++
++ switch (sfreq) {
++ case 8000:
++ config1 = 0x10;
++ config2 = 0x00;
++ filter = 0x33;
++ break;
++ case 11025:
++ config1 = 0x16;
++ config2 = 0x0d;
++ break;
++ case 12000:
++ config1 = 0x18;
++ config2 = 0x00;
++ break;
++ case 16000:
++ config1 = 0x20;
++ config2 = 0x00;
++ break;
++ case 22050:
++ config1 = 0x2c;
++ config2 = 0x1a;
++ break;
++ case 24000:
++ config1 = 0x30;
++ config2 = 0x00;
++ break;
++ case 32000:
++ config1 = 0x40;
++ config2 = 0x00;
++ break;
++ case 44100:
++ config1 = 0x58;
++ config2 = 0x33;
++ break;
++ case 48000:
++ config1 = 0x60;
++ config2 = 0x00;
++ break;
++ }
++
++ snd_pmic_ops_mx.num_channel = num_channel;
++ /*mute the right channel if MONO*/
++ if(snd_pmic_ops_mx.num_channel == 1)
++ {
++
++ sc_access[0].reg_addr = VOL_CTRL_RT;
++ sc_access[0].value = 0x40;
++ sc_access[0].mask = MASK6;
++
++ sc_access[1].reg_addr = 0x224;
++ sc_access[1].value = 0x05;
++ sc_access[1].mask = MASK0|MASK1|MASK2;
++
++ sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++
++ }
++ else
++ {
++ sc_access[0].reg_addr = VOL_CTRL_RT;
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = MASK6;
++
++ sc_access[1].reg_addr = 0x224;
++ sc_access[1].value = 0x04;
++ sc_access[1].mask = MASK0|MASK1|MASK2;
++
++ sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++
++ }
++ sc_access[0].reg_addr = 0x206;
++ sc_access[0].value = config1;
++ sc_access[1].reg_addr = 0x207;
++ sc_access[1].value = config2;
++
++ if (word_size == 16) {
++ sc_access[2].value = 0x51;
++ sc_access[3].value = 0x31;
++ }
++ else if (word_size == 24) {
++ sc_access[2].value =0x52;
++ sc_access[3].value = 0x92;
++
++ }
++
++ sc_access[2].reg_addr = 0x209;
++ sc_access[3].reg_addr = 0x20e;
++
++ sc_access[4].reg_addr = 0x211;
++ sc_access[4].value = filter;
++
++ sst_sc_reg_access(sc_access, PMIC_WRITE, 5);
++
++ return 0;
++}
++
++int mx_set_selected_output_dev(u8 dev_id)
++{
++ struct sc_reg_access sc_access[6];
++ int num_reg = 0;
++ int retval = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT)
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++ printk(KERN_DEBUG "SST DBG:mx_set_selected_output_dev +\
++ dev_id:0x%x\n", dev_id);
++ switch (dev_id) {
++ case STEREO_HEADPHONE:
++ sc_access[0].reg_addr = 0x226;
++ sc_access[0].value = 0x0C;
++ sc_access[0].mask = 0x3F;
++
++ /*sc_access[3].reg_addr = 0xFA;
++ sc_access[3].value = 0x00;
++ sc_access[3].mask = MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6|MASK7;
++
++ sc_access[5].reg_addr = 0xFB;
++ sc_access[5].value = 0x00;
++ sc_access[5].mask = MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6|MASK7;
++
++
++ sc_access[1].reg_addr = 0xFC;
++ sc_access[1].value = 0x1F;
++ sc_access[1].mask = 0x1F;
++
++ sc_access[4].reg_addr = 0xFD;
++ sc_access[4].value = 0x1F;
++ sc_access[4].mask = 0x1F; */
++
++ sc_access[1].reg_addr = 0xFF;
++ sc_access[1].value = 0x0C;
++ sc_access[1].mask = MASK2|MASK3|MASK5|MASK6|MASK4;
++
++ num_reg = 2;
++ break;
++ case INTERNAL_SPKR:
++ sc_access[0].reg_addr = 0x226;
++ sc_access[0].value = 0x3F;
++ sc_access[0].mask = 0x3F;
++
++
++ /*sc_access[1].reg_addr = 0xFA;
++ sc_access[1].value = 0x1F;
++ sc_access[1].mask = 0x1F;
++
++ sc_access[2].reg_addr = 0xFB;
++ sc_access[2].value = 0x1F;
++ sc_access[2].mask = 0x1F;
++
++ sc_access[3].reg_addr = 0xFC;
++ sc_access[3].value = 0x00;
++ sc_access[3].mask = MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6|MASK7;
++
++
++
++ sc_access[4].reg_addr = 0xFD;
++ sc_access[4].value = 0x00;
++ sc_access[4].mask = MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6|MASK7; */
++
++ sc_access[1].reg_addr = 0xFF;
++ sc_access[1].value = 0x30;
++ sc_access[1].mask = MASK2|MASK3|MASK5|MASK6|MASK4;
++
++ num_reg = 2;
++ break;
++ }
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_reg);
++}
++
++int mx_set_selected_input_dev(u8 dev_id)
++{
++ struct sc_reg_access sc_access[2];
++ int num_reg = 0;
++ int retval = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT)
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++ snd_pmic_ops_mx.input_dev_id = dev_id;
++ printk(KERN_DEBUG "SST DBG:mx_set_selected_input_+\
++ dev dev_id:0x%x\n", dev_id);
++
++ switch (dev_id) {
++ case AMIC:
++ sc_access[0].reg_addr = 0x223;
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = MASK7|MASK6|MASK5|MASK4|MASK0;
++ sc_access[1].reg_addr = 0x222;
++ sc_access[1].value = 0x50;
++ sc_access[1].mask = MASK7|MASK6|MASK5|MASK4;
++ num_reg = 2;
++ break;
++
++ case HS_MIC:
++ sc_access[0].reg_addr = 0x223;
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = MASK7|MASK6|MASK5|MASK4|MASK0;
++ sc_access[1].reg_addr = 0x222;
++ sc_access[1].value = 0x70;
++ sc_access[1].mask = MASK7|MASK6|MASK5|MASK4;
++ num_reg = 2;
++ break;
++ case DMIC:
++ sc_access[1].reg_addr = 0x222;
++ sc_access[1].value = 0x00;
++ sc_access[1].mask = MASK7|MASK6|MASK5|MASK4|MASK0;
++ sc_access[0].reg_addr = 0x223;
++ sc_access[0].value = 0x20;
++ sc_access[0].mask = MASK7|MASK6|MASK5|MASK4|MASK0;
++ num_reg = 2;
++ break;
++ }
++ return sst_sc_reg_access(sc_access, PMIC_WRITE, num_reg);
++}
++
++int mx_set_mute(int dev_id, u8 value)
++{
++ struct sc_reg_access sc_access[5], sc_acces;
++ int num_reg = 0;
++ int retval = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT)
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++
++
++
++ printk(KERN_DEBUG "SST DBG:mx_set_mute +\
++ dev_id:0x%x , value:%d \n", dev_id, value);
++
++ switch (dev_id) {
++ case PMIC_SND_DMIC_MUTE:
++ case PMIC_SND_AMIC_MUTE:
++ case PMIC_SND_HP_MIC_MUTE:
++ sc_access[0].reg_addr = 0x220;
++ sc_access[1].reg_addr = 0x221;
++ sc_access[2].reg_addr = 0x223;
++ if (value == MUTE) {
++ sc_access[0].value = 0x00;
++ sc_access[1].value = 0x00;
++ if (snd_pmic_ops_mx.input_dev_id == DMIC)
++ sc_access[2].value = 0x00;
++ else
++ sc_access[2].value = 0x20;
++ } else {
++ sc_access[0].value = 0x20;
++ sc_access[1].value = 0x20;
++ if (snd_pmic_ops_mx.input_dev_id == DMIC)
++ sc_access[2].value = 0x20;
++ else
++ sc_access[2].value = 0x00;
++ }
++ sc_access[0].mask = MASK5|MASK6;
++ sc_access[1].mask = MASK5|MASK6;
++ sc_access[2].mask = MASK5|MASK6;
++ num_reg = 3;
++ break;
++ case PMIC_SND_LEFT_SPEAKER_MUTE:
++ case PMIC_SND_LEFT_HP_MUTE:
++ sc_access[0].reg_addr = VOL_CTRL_LT;
++ if (value == MUTE)
++ sc_access[0].value = 0x40;
++ else
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = MASK6;
++ num_reg = 1;
++ break;
++ case PMIC_SND_RIGHT_SPEAKER_MUTE:
++ case PMIC_SND_RIGHT_HP_MUTE:
++ sc_access[0].reg_addr = VOL_CTRL_RT;
++ if(snd_pmic_ops_mx.num_channel == 1)
++ value = MUTE;
++ if (value == MUTE)
++ sc_access[0].value = 0x40;
++ else
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = MASK6;
++ num_reg = 1;
++ break;
++
++ case PMIC_SND_MUTE_ALL:
++ sc_access[0].reg_addr = VOL_CTRL_RT;
++ sc_access[1].reg_addr = VOL_CTRL_LT;
++ sc_access[2].reg_addr = 0x220;
++ sc_access[3].reg_addr = 0x221;
++ sc_access[4].reg_addr = 0x223;
++ if (value == MUTE) {
++ sc_access[0].value = sc_access[1].value = 0x40;
++ sc_access[2].value = 0x00;
++ sc_access[3].value = 0x00;
++ sc_acces.reg_addr = 0x222;
++ sc_acces.value = 0x00;
++ sc_acces.mask = MASK4|MASK5|MASK6|MASK7;
++ sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
++ sc_access[4].value = 0x00;
++
++ } else {
++ sc_access[0].value = sc_access[1].value = 0x00;
++ sc_access[2].value = 0x20;
++ sc_access[3].value = 0x20;
++ sc_access[4].value = 0x20;
++ }
++ if(snd_pmic_ops_mx.num_channel == 1)
++ sc_access[0].value = 0x40;
++ sc_access[0].mask = sc_access[1].mask = MASK6;
++ sc_access[2].mask = MASK5|MASK6;
++ sc_access[3].mask = MASK5|MASK6|MASK2|MASK4;
++ sc_access[4].mask = MASK5|MASK6;
++ num_reg = 5;
++ break;
++ }
++
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_reg);
++}
++
++int mx_set_vol(int dev_id, u8 value)
++{
++ struct sc_reg_access sc_access[2] = {{0},};
++ int num_reg = 0;
++ int retval = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT)
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++
++ printk(KERN_DEBUG "SST DBG:mx_set_vol dev_id:0x%x , \
++ value:%d \n", dev_id, value);
++ switch (dev_id) {
++ case PMIC_SND_CAPTURE_VOL:
++ sc_access[0].reg_addr = 0x220;
++ sc_access[1].reg_addr = 0x221;
++ sc_access[0].value = sc_access[1].value = -value;
++ sc_access[0].mask = sc_access[1].mask =
++ (MASK0|MASK1|MASK2|MASK3|MASK4);
++ num_reg = 2;
++ break;
++ case PMIC_SND_LEFT_PB_VOL:
++ sc_access[0].value = -value;
++ sc_access[0].reg_addr = VOL_CTRL_LT;
++ sc_access[0].mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
++ num_reg = 1;
++ break;
++ case PMIC_SND_RIGHT_PB_VOL:
++ sc_access[0].value = -value;
++ sc_access[0].reg_addr = VOL_CTRL_RT;
++ sc_access[0].mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
++ if(snd_pmic_ops_mx.num_channel == 1) {
++ sc_access[0].value = 0x40;
++ sc_access[0].mask = MASK6;
++ sc_access[0].reg_addr = VOL_CTRL_RT;
++ }
++ num_reg = 1;
++ break;
++ }
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_reg);
++}
++
++int mx_get_mute(int dev_id, u8 *value)
++{
++ struct sc_reg_access sc_access[4] = {{0},};
++ int retval = 0, num_reg = 0, mask = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT)
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++
++ switch (dev_id) {
++ case PMIC_SND_DMIC_MUTE:
++ case PMIC_SND_AMIC_MUTE:
++ case PMIC_SND_HP_MIC_MUTE:
++ sc_access[0].reg_addr = 0x220;
++ mask = MASK5|MASK6;
++ num_reg = 1;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ, num_reg);
++ if (retval)
++ return retval;
++ *value = sc_access[0].value & mask;
++ if (*value)
++ *value = UNMUTE;
++ else
++ *value = MUTE;
++ return retval;
++
++ case PMIC_SND_LEFT_HP_MUTE:
++ case PMIC_SND_LEFT_SPEAKER_MUTE:
++ sc_access[0].reg_addr = VOL_CTRL_LT;
++ num_reg = 1;
++ mask = MASK6;
++ break;
++ case PMIC_SND_RIGHT_HP_MUTE:
++ case PMIC_SND_RIGHT_SPEAKER_MUTE:
++ sc_access[0].reg_addr = VOL_CTRL_RT;
++ num_reg = 1;
++ mask = MASK6;
++ break;
++ case PMIC_SND_MUTE_ALL:
++ sc_access[0].reg_addr = VOL_CTRL_RT;
++ sc_access[1].reg_addr = VOL_CTRL_LT;
++ sc_access[2].reg_addr = 0x220;
++ sc_access[3].reg_addr = 0x221;
++ num_reg = 4;
++ retval = sst_sc_reg_access(sc_access, PMIC_READ, num_reg);
++ if (((sc_access[0].value & MASK6) == MASK6) &&
++ ((sc_access[1].value & MASK6) == MASK6) &&
++ ((sc_access[2].value & (MASK5|MASK6)) == 0) &&
++ ((sc_access[3].value & (MASK5|MASK6)) == 0))
++ *value = MUTE;
++ else
++ *value = UNMUTE;
++ break;
++ }
++ retval = sst_sc_reg_access(sc_access, PMIC_READ, num_reg);
++ if (retval)
++ return retval;
++ *value = sc_access[0].value & mask;
++ if (*value)
++ *value = MUTE;
++ else
++ *value = UNMUTE;
++ return retval;
++}
++
++int mx_get_vol(int dev_id, u8 *value)
++{
++ struct sc_reg_access sc_access = {0,};
++ int retval = 0, mask = 0, num_reg = 0;
++
++ if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT)
++ retval = mx_init_card();
++ if (retval)
++ return retval;
++
++ switch (dev_id) {
++ case PMIC_SND_CAPTURE_VOL:
++ sc_access.reg_addr = 0x220;
++ mask = MASK0|MASK1|MASK2|MASK3|MASK4|MASK5;
++ num_reg = 1;
++ break;
++ case PMIC_SND_LEFT_PB_VOL:
++ sc_access.reg_addr = VOL_CTRL_LT;
++ mask = MASK0|MASK1|MASK2|MASK3|MASK4|MASK5;
++ num_reg = 1;
++ break;
++ case PMIC_SND_RIGHT_PB_VOL:
++ sc_access.reg_addr = VOL_CTRL_RT;
++ mask = MASK0|MASK1|MASK2|MASK3|MASK4|MASK5;
++ num_reg = 1;
++ break;
++ }
++ retval = sst_sc_reg_access(&sc_access, PMIC_READ, num_reg);
++ if (retval)
++ return retval;
++ *value = -(sc_access.value & mask);
++ printk(KERN_DEBUG "SST DBG:value extracted 0x%x\n", *value);
++ return retval;
++}
++
++struct snd_pmic_ops snd_pmic_ops_mx = {
++ .set_input_dev = mx_set_selected_input_dev,
++ .set_output_dev = mx_set_selected_output_dev,
++ .set_mute = mx_set_mute,
++ .get_mute = mx_get_mute,
++ .set_vol = mx_set_vol,
++ .get_vol = mx_get_vol,
++ .init_card = mx_init_card,
++ .set_pcm_audio_params = mx_set_pcm_audio_params,
++ .set_pcm_voice_params = mx_set_pcm_voice_params,
++ .set_voice_port = mx_set_voice_port,
++ .set_audio_port = mx_set_audio_port,
++ .power_up_pmic_pb = mx_power_up_pb,
++ .power_up_pmic_cp = mx_power_up_cp,
++ .power_down_pmic_pb = mx_power_down_pb,
++ .power_down_pmic_cp = mx_power_down_cp,
++ .power_down_pmic = mx_power_down,
++};
++
+diff --git a/sound/pci/sst/intelmid_v2_control.c b/sound/pci/sst/intelmid_v2_control.c
+new file mode 100644
+index 0000000..d42b564
+--- /dev/null
++++ b/sound/pci/sst/intelmid_v2_control.c
+@@ -0,0 +1,1031 @@
++/*
++ * intelmid_v2_control.c - Intel Sound card driver for MID
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Vinod Koul <vinod.koul@intel.com>
++ * Harsha Priya <priya.harsha@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This file contains the control operations of vendor 3
++ */
++
++#include <linux/cdev.h>
++#include <linux/pci.h>
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/syscalls.h>
++#include <linux/file.h>
++#include <linux/fcntl.h>
++#include <linux/uaccess.h>
++#include <linux/interrupt.h>
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/workqueue.h>
++#include <linux/pci.h>
++#include <linux/firmware.h>
++#include <sound/intel_lpe.h>
++#include <sound/intel_sst_ioctl.h>
++#include "intel_sst_fw_ipc.h"
++#include "intel_sst_common.h"
++#include "intelmid_snd_control.h"
++
++enum reg_v3 {
++ VAUDIOCNT = 0x51,
++ VOICEPORT1 = 0x100,
++ VOICEPORT2 = 0x101,
++ AUDIOPORT1 = 0x102,
++ AUDIOPORT2 = 0x103,
++ ADCSAMPLERATE = 0x104,
++ DMICCTRL1 = 0x105,
++ DMICCTRL2 = 0x106,
++ MICCTRL = 0x107,
++ MICSELVOL = 0x108,
++ LILSEL = 0x109,
++ LIRSEL = 0x10a,
++ VOICEVOL = 0x10b,
++ AUDIOLVOL = 0x10c,
++ AUDIORVOL = 0x10d,
++ LMUTE = 0x10e,
++ RMUTE = 0x10f,
++ POWERCTRL1 = 0x110,
++ POWERCTRL2 = 0x111,
++ DRVPOWERCTRL = 0x112,
++ VREFPLL = 0x113,
++ PCMBUFCTRL = 0x114,
++ SOFTMUTE = 0x115,
++ DTMFPATH = 0x116,
++ DTMFVOL = 0x117,
++ DTMFFREQ = 0x118,
++ DTMFHFREQ = 0x119,
++ DTMFLFREQ = 0x11a,
++ DTMFCTRL = 0x11b,
++ DTMFASON = 0x11c,
++ DTMFASOFF = 0x11d,
++ DTMFASINUM = 0x11e,
++ CLASSDVOL = 0x11f,
++ VOICEDACAVOL = 0x120,
++ AUDDACAVOL = 0x121,
++ LOMUTEVOL = 0x122,
++ HPLVOL = 0x123,
++ HPRVOL = 0x124,
++ MONOVOL = 0x125,
++ LINEOUTMIXVOL = 0x126,
++ EPMIXVOL = 0x127,
++ LINEOUTLSEL = 0x128,
++ LINEOUTRSEL = 0x129,
++ EPMIXOUTSEL = 0x12a,
++ HPLMIXSEL = 0x12b,
++ HPRMIXSEL = 0x12c,
++ LOANTIPOP = 0x12d,
++};
++
++
++int nc_init_card(void)
++{
++ struct sc_reg_access sc_access[] = {
++ {VAUDIOCNT, 0x25, 0},
++ {VOICEPORT1, 0x00, 0},
++ {VOICEPORT2, 0x00, 0},
++ {AUDIOPORT1, 0x98, 0},
++ {AUDIOPORT2, 0x09, 0},
++ {AUDIOLVOL, 0x0e, 0},
++ {AUDIORVOL, 0x0e, 0},
++ {LMUTE, 0x03, 0},
++ {RMUTE, 0x03, 0},
++ {POWERCTRL1, 0x00, 0},
++ {POWERCTRL2, 0x00, 0},
++ {DRVPOWERCTRL, 0x00, 0},
++ {VREFPLL, 0x10, 0},
++ {HPLMIXSEL, 0xee, 0},
++ {HPRMIXSEL, 0xf6, 0},
++ {PCMBUFCTRL, 0x0, 0},
++ {VOICEVOL, 0x0e, 0},
++ {HPLVOL, 0x06, 0},
++ {HPRVOL, 0x06, 0},
++ {MICCTRL, 0x11, 0x00},
++ {ADCSAMPLERATE, 0x8B, 0x00},
++ {MICSELVOL, 0x5B, 0x00},
++ {LILSEL, 0x46, 0},
++ {LIRSEL, 0x06, 0},
++ {LOANTIPOP, 0x00, 0},
++ };
++ snd_pmic_ops_nc.card_status = SND_CARD_INIT_DONE;
++ sst_sc_reg_access(sc_access, PMIC_WRITE, 25);
++ snd_pmic_ops_nc.mute_status = 1;
++ printk(KERN_DEBUG "SST DBG:init complete!!\n");
++ return 0;
++}
++
++int nc_enable_audiodac(int value)
++{
++ struct sc_reg_access sc_access[3];
++ int mute_val = 0;
++
++ printk(KERN_DEBUG "SST DBG:+\
++ PMIC_SND_INPUT_MUTE_: value::%d\n", snd_pmic_ops_nc.mute_status );
++ if (snd_pmic_ops_nc.mute_status == MUTE)
++ return 0;
++
++ if ((snd_pmic_ops_nc.output_dev_id == INTERNAL_SPKR) &&
++ (value == UNMUTE))
++ return 0;
++ if (value == UNMUTE) {
++ /* unmute the system, set the 7th bit to zero */
++ mute_val = 0x00;
++ } else {
++ /* MUTE:Set the seventh bit */
++ mute_val = 0x04;
++
++ }
++ sc_access[0].reg_addr = LMUTE;
++ sc_access[1].reg_addr = RMUTE;
++ sc_access[0].mask = sc_access[1].mask = MASK2;
++ sc_access[0].value = sc_access[1].value = mute_val;
++
++ if (snd_pmic_ops_nc.num_channel == 1)
++ sc_access[1].value = 0x04;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++
++}
++
++
++int nc_power_up_pb(unsigned int port)
++{
++ struct sc_reg_access sc_access[7];
++ int retval = 0;
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++ if (port == 0xFF)
++ return 0;
++ nc_enable_audiodac(MUTE);
++ mdelay(30);
++
++ printk(KERN_DEBUG "SST DBG:powering up pb....\n");
++
++ sc_access[0].reg_addr = VAUDIOCNT;
++ sc_access[0].value = 0x27;
++ sc_access[0].mask = 0x27;
++ sc_access[1].reg_addr = VREFPLL;
++ if (port == 0) {
++ sc_access[1].value = 0x3A;
++ sc_access[1].mask = 0x3A;
++ } else if (port == 1) {
++ sc_access[1].value = 0x35;
++ sc_access[1].mask = 0x35;
++ }
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++
++
++
++ sc_access[0].reg_addr = POWERCTRL1;
++ if (port == 0) {
++ sc_access[0].value = 0x40;
++ sc_access[0].mask = 0x40;
++ } else if (port == 1) {
++ sc_access[0].value = 0x01;
++ sc_access[0].mask = 0x01;
++ }
++ sc_access[1].reg_addr = POWERCTRL2;
++ sc_access[1].value = 0x0C;
++ sc_access[1].mask = 0x0C;
++
++ sc_access[2].reg_addr = DRVPOWERCTRL;
++ sc_access[2].value = 0x86;
++ sc_access[2].mask = 0x86;
++
++ sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
++
++ mdelay(30);
++
++ return nc_enable_audiodac(UNMUTE);
++
++}
++
++int nc_power_up_cp(unsigned int port)
++{
++ struct sc_reg_access sc_access[5];
++ int retval = 0;
++
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++
++ printk(KERN_DEBUG "SST DBG:powering up cp....\n");
++
++ if (port == 0xFF)
++ return 0;
++ sc_access[0].reg_addr = VAUDIOCNT;
++ sc_access[0].value = 0x27;
++ sc_access[0].mask = 0x27;
++ sc_access[1].reg_addr = VREFPLL;
++ if (port == 0) {
++ sc_access[1].value = 0x3E;
++ sc_access[1].mask = 0x3E;
++ } else if (port == 1) {
++ sc_access[1].value = 0x35;
++ sc_access[1].mask = 0x35;
++ }
++
++ retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++
++
++ sc_access[0].reg_addr = POWERCTRL1;
++ if (port == 0) {
++ sc_access[0].value = 0xB4;
++ sc_access[0].mask = 0xB4;
++ } else if (port == 1) {
++ sc_access[0].value = 0xBF;
++ sc_access[0].mask = 0xBF;
++ }
++ sc_access[1].reg_addr = POWERCTRL2;
++ if (port == 0) {
++ sc_access[1].value = 0x0C;
++ sc_access[1].mask = 0x0C;
++ } else if (port == 1) {
++ sc_access[1].value = 0x02;
++ sc_access[1].mask = 0x02;
++ }
++
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++
++}
++int nc_power_down(void)
++{
++ int retval = 0;
++ struct sc_reg_access sc_access[5];
++
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++ nc_enable_audiodac(MUTE);
++
++
++ printk(KERN_DEBUG "SST DBG:powering dn nc_power_down ....\n");
++
++
++
++ mdelay(30);
++
++ sc_access[0].reg_addr = DRVPOWERCTRL;
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = 0x00;
++
++ sst_sc_reg_access(sc_access, PMIC_WRITE, 1);
++
++ sc_access[0].reg_addr = POWERCTRL1;
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = 0x00;
++
++ sc_access[1].reg_addr = POWERCTRL2;
++ sc_access[1].value = 0x00;
++ sc_access[1].mask = 0x00;
++
++
++
++ sst_sc_reg_access(sc_access, PMIC_WRITE, 2);
++
++ mdelay(30);
++ sc_access[0].reg_addr = VREFPLL;
++ sc_access[0].value = 0x10;
++ sc_access[0].mask = 0x10;
++
++ sc_access[1].reg_addr = VAUDIOCNT;
++ sc_access[1].value = 0x25;
++ sc_access[1].mask = 0x25;
++
++
++ retval = sst_sc_reg_access(sc_access, PMIC_WRITE, 2);
++
++ mdelay(30);
++ return nc_enable_audiodac(UNMUTE);
++}
++int nc_power_down_pb(void)
++{
++
++ int retval = 0;
++ struct sc_reg_access sc_access[5];
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++ printk(KERN_DEBUG "SST DBG:powering dn pb....\n");
++
++ nc_enable_audiodac(MUTE);
++
++
++ mdelay(30);
++
++
++ sc_access[0].reg_addr = DRVPOWERCTRL;
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = 0x00;
++
++ sst_sc_reg_access(sc_access, PMIC_WRITE, 1);
++
++ mdelay(30);
++
++ sc_access[0].reg_addr = POWERCTRL1;
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = 0x41;
++
++ sc_access[1].reg_addr = POWERCTRL2;
++ sc_access[1].value = 0x00;
++ sc_access[1].mask = 0x0C;
++
++ sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
++
++ mdelay(30);
++
++ return nc_enable_audiodac(UNMUTE);
++
++
++}
++
++int nc_power_down_cp(void)
++{
++ struct sc_reg_access sc_access[] = {
++ {POWERCTRL1, 0x00, 0xBF},
++ {POWERCTRL2, 0x00, 0x02},
++ };
++ int retval = 0;
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++ printk(KERN_DEBUG "SST DBG:powering dn cp....\n");
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++}
++
++int nc_set_pcm_voice_params(void)
++{
++ struct sc_reg_access sc_access[] = {
++ {0x100, 0xD5, 0},
++ {0x101, 0x08, 0},
++ {0x104, 0x03, 0},
++ {0x107, 0x10, 0},
++ {0x10B, 0x0E, 0},
++ {0x10E, 0x03, 0},
++ {0x10F, 0x03, 0},
++ {0x114, 0x13, 0},
++ {0x115, 0x00, 0},
++ {0x128, 0xFE, 0},
++ {0x129, 0xFE, 0},
++ {0x12A, 0xFE, 0},
++ {0x12B, 0xDE, 0},
++ {0x12C, 0xDE, 0},
++ };
++ int retval = 0;
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++ sst_sc_reg_access(sc_access, PMIC_WRITE, 14);
++ printk(KERN_DEBUG "SST DBG:Voice parameters set successfully!!\n");
++ return 0;
++}
++int nc_audio_init(void)
++{
++ struct sc_reg_access sc_acces, sc_access[] = {
++ {0x100, 0x00, 0},
++ {0x101, 0x00, 0},
++ {0x104, 0x8B, 0},
++ {0x107, 0x11, 0},
++ {0x10B, 0x0E, 0},
++ {0x10E, 0x03, 0},
++ {0x10F, 0x03, 0},
++ {0x114, 0x00, 0},
++ {0x115, 0x00, 0},
++ {0x128, 0x00, 0},
++ {0x129, 0x00, 0},
++ {0x12A, 0x00, 0},
++ {0x12B, 0xee, 0},
++ {0x12C, 0xf6, 0},
++ };
++
++ sst_sc_reg_access(sc_access, PMIC_WRITE, 14);
++ printk(KERN_DEBUG "SST DBG:Audio Init successfully!!\n");
++ if(snd_pmic_ops_nc.num_channel == 1) {
++ sc_acces.value = 0x07;
++ sc_acces.reg_addr = RMUTE;
++ printk(KERN_DEBUG "SST DBG:PMIC_SND_RIGHT_HP_MUTE:: value::%d\n",
++ sc_acces.value);
++ sc_acces.mask = MASK2;
++ sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
++ }
++ else {
++ sc_acces.value = 0x00;
++ sc_acces.reg_addr = RMUTE;
++ printk(KERN_DEBUG "SST DBG:PMIC_SND_RIGHT_HP_MUTE:: value::%d\n",
++ sc_acces.value);
++ sc_acces.mask = MASK2;
++ sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
++ }
++
++ return 0;
++}
++
++int nc_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
++{
++ int config2 = 0;
++ struct sc_reg_access sc_access;
++ int retval = 0;
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++ switch (sfreq) {
++ case 8000:
++ config2 = 0x00;
++ break;
++ case 11025:
++ config2 = 0x01;
++ break;
++ case 12000:
++ config2 = 0x02;
++ break;
++ case 16000:
++ config2 = 0x03;
++ break;
++ case 22050:
++ config2 = 0x04;
++ break;
++ case 24000:
++ config2 = 0x05;
++ break;
++ case 32000:
++ config2 = 0x07;
++ break;
++ case 44100:
++ config2 = 0x08;
++ break;
++ case 48000:
++ config2 = 0x09;
++ break;
++ }
++
++ snd_pmic_ops_nc.num_channel = num_channel;
++ if(snd_pmic_ops_nc.num_channel == 1)
++ {
++
++ sc_access.value = 0x07;
++ sc_access.reg_addr = RMUTE;
++ printk(KERN_DEBUG "SST DBG:PMIC_SND_RIGHT_HP_MUTE:: value::%d\n",
++ sc_access.value);
++ sc_access.mask = MASK2;
++ sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
++ }
++ else
++ {
++ sc_access.value = 0x00;
++ sc_access.reg_addr = RMUTE;
++ printk(KERN_DEBUG "SST DBG:PMIC_SND_RIGHT_HP_MUTE:: value::%d\n",
++ sc_access.value);
++ sc_access.mask = MASK2;
++ sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
++
++
++ }
++
++ printk(KERN_DEBUG "SST DBG:word_size = %d\n", word_size);
++
++ if(word_size == 24) {
++ sc_access.reg_addr = AUDIOPORT2;
++ sc_access.value = config2 |0x10;
++ sc_access.mask = 0x1F;
++ }
++ else {
++ sc_access.value = config2;
++ sc_access.mask = 0x1F;
++ sc_access.reg_addr = AUDIOPORT2;
++ }
++ sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
++
++ printk(KERN_DEBUG "SST DBG:word_size = %d\n", word_size);
++ sc_access.reg_addr = AUDIOPORT1;
++ sc_access.mask = MASK5|MASK4|MASK1|MASK0;
++ if (word_size == 16)
++ sc_access.value = 0x98;
++ else if (word_size == 24)
++ sc_access.value = 0xAB;
++
++ return sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
++
++
++
++}
++
++int nc_set_audio_port(int status)
++{
++ struct sc_reg_access sc_access[2] = {{0,},};
++ int retval = 0;
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++ if (status == DEACTIVATE) {
++ /* Deactivate audio port-tristate and power */
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = MASK4|MASK5;
++ sc_access[0].reg_addr = AUDIOPORT1;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ } else if (status == ACTIVATE) {
++ /* activate audio port */
++ nc_audio_init();
++ sc_access[0].value = 0x10;
++ sc_access[0].mask = MASK4|MASK5 ;
++ sc_access[0].reg_addr = AUDIOPORT1;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ } else
++ return -EINVAL;
++
++}
++
++int nc_set_voice_port(int status)
++{
++ struct sc_reg_access sc_access[2] = {{0,},};
++ int retval = 0;
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++ if (status == DEACTIVATE) {
++ /* Activate Voice port */
++ sc_access[0].value = 0x00;
++ sc_access[0].mask = MASK4;
++ sc_access[0].reg_addr = VOICEPORT1;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ } else if (status == ACTIVATE) {
++ /* Deactivate voice port */
++ nc_set_pcm_voice_params();
++ sc_access[0].value = 0x10;
++ sc_access[0].mask = MASK4;
++ sc_access[0].reg_addr = VOICEPORT1;
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++ } else
++ return -EINVAL;
++}
++int nc_set_selected_output_dev(u8 value)
++{
++ struct sc_reg_access sc_acces, sc_access_HP[] = {
++ {LMUTE, 0x02, 0x06},
++ {RMUTE, 0x02, 0x06}
++ };
++ struct sc_reg_access sc_access_IS[] = {
++ {LMUTE, 0x04, 0x06},
++ {RMUTE, 0x04, 0x06}
++ };
++ int retval = 0;
++
++ snd_pmic_ops_nc.output_dev_id = value;
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++ printk(KERN_DEBUG "SST DBG:nc set selected output:%d \n", value);
++ switch (value) {
++ case STEREO_HEADPHONE:
++ retval = sst_sc_reg_access(sc_access_HP, PMIC_WRITE, 2);
++ if(snd_pmic_ops_nc.num_channel == 1) {
++ sc_acces.value = 0x07;
++ sc_acces.reg_addr = RMUTE;
++ printk(KERN_DEBUG "SST DBG:PMIC_SND_RIGHT_HP_MUTE:: value::%d\n",
++ sc_acces.value);
++ sc_acces.mask = MASK2;
++ retval = sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
++ }
++ break;
++ case INTERNAL_SPKR:
++ return sst_sc_reg_access(sc_access_IS, PMIC_WRITE, 2);
++ default:
++ printk(KERN_ERR "SST ERR: +\
++ rcvd illegal request: %d \n", value);
++ return -EINVAL;
++ }
++ return retval;
++}
++
++int nc_set_mute(int dev_id, u8 value)
++{
++ struct sc_reg_access sc_access[3];
++ u8 mute_val, cap_mute;
++ int retval = 0;
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++ printk(KERN_DEBUG "SST DBG:set device id::%d, +\
++ value %d\n", dev_id, value);
++
++ switch (dev_id) {
++ case PMIC_SND_MUTE_ALL:
++ printk(KERN_DEBUG "SST DBG:+\
++ PMIC_SND_MUTE_ALL: value::%d \n", value);
++ snd_pmic_ops_nc.mute_status = value;
++ if (value == UNMUTE) {
++ /* unmute the system, set the 7th bit to zero */
++ mute_val = cap_mute = 0x00;
++
++ } else {
++ /* MUTE:Set the seventh bit */
++ mute_val = 0x80;
++ cap_mute = 0x40;
++ }
++ sc_access[0].reg_addr = AUDIOLVOL;
++ sc_access[1].reg_addr = AUDIORVOL;
++ sc_access[0].mask = sc_access[1].mask = MASK7;
++ sc_access[0].value = sc_access[1].value = mute_val;
++ if(snd_pmic_ops_nc.num_channel == 1)
++ sc_access[1].value = 0x80;
++ if (!sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2)) {
++ sc_access[0].reg_addr = 0x109;
++ sc_access[1].reg_addr = 0x10a;
++ sc_access[2].reg_addr = 0x105;
++ sc_access[0].mask = sc_access[1].mask = sc_access[2].mask = MASK6;
++ sc_access[0].value = sc_access[1].value = sc_access[2].value = cap_mute;
++
++ if ((snd_pmic_ops_nc.input_dev_id == AMIC ) ||
++ (snd_pmic_ops_nc.input_dev_id == DMIC ))
++ sc_access[1].value = 0x40;
++ if (snd_pmic_ops_nc.input_dev_id == HS_MIC)
++ sc_access[0].value = 0x40;
++ return sst_sc_reg_access(sc_access,
++ PMIC_READ_MODIFY, 3);
++ }
++ break;
++ case PMIC_SND_HP_MIC_MUTE:
++ printk(KERN_DEBUG "SST DBG:+\
++ PMIC_SND_HPMIC_MUTE: value::%d\n", value);
++ if (value == UNMUTE) {
++ /* unmute the system, set the 6th bit to one */
++ sc_access[0].value = 0x00;
++ } else {
++ /* mute the system, reset the 6th bit to zero */
++ sc_access[0].value = 0x40;
++ }
++ sc_access[0].reg_addr = LIRSEL;
++ sc_access[0].mask = MASK6;
++ break;
++ case PMIC_SND_AMIC_MUTE:
++ printk(KERN_DEBUG "SST DBG:+\
++ PMIC_SND_AMIC_MUTE: value::%d\n", value);
++ if (value == UNMUTE) {
++ /* unmute the system, set the 6th bit to one */
++ sc_access[0].value = 0x00;
++ } else {
++ /* mute the system, reset the 6th bit to zero */
++ sc_access[0].value = 0x40;
++ }
++ sc_access[0].reg_addr = LILSEL;
++ sc_access[0].mask = MASK6;
++ break;
++
++ case PMIC_SND_DMIC_MUTE:
++ printk(KERN_DEBUG "SST DBG:+\
++ PMIC_SND_INPUT_MUTE_DMIC: value::%d\n", value);
++ if (value == UNMUTE) {
++ /* unmute the system, set the 6th bit to one */
++ sc_access[1].value = 0x00;
++ sc_access[0].value = 0x00;
++ } else {
++ /* mute the system, reset the 6th bit to zero */
++ sc_access[1].value = 0x40;
++ sc_access[0].value = 0x40;
++ }
++ sc_access[0].reg_addr = DMICCTRL1;
++ sc_access[0].mask = MASK6;
++ sc_access[1].reg_addr = LILSEL;
++ sc_access[1].mask = MASK6;
++ return sst_sc_reg_access(sc_access,
++ PMIC_READ_MODIFY, 2);
++ break;
++
++ case PMIC_SND_LEFT_HP_MUTE:
++ case PMIC_SND_RIGHT_HP_MUTE:
++ snd_pmic_ops_nc.mute_status = value;
++ if (value == UNMUTE)
++ sc_access[0].value = 0x0;
++ else
++ sc_access[0].value = 0x04;
++
++ if (dev_id == PMIC_SND_LEFT_HP_MUTE) {
++ sc_access[0].reg_addr = LMUTE;
++ printk(KERN_DEBUG "SST DBG:PMIC_SND_LEFT_HP_MUTE:: value::%d\n",
++ sc_access[0].value);
++ } else {
++ if(snd_pmic_ops_nc.num_channel == 1)
++ sc_access[0].value = 0x04;
++ sc_access[0].reg_addr = RMUTE;
++ printk(KERN_DEBUG "SST DBG:PMIC_SND_RIGHT_HP_MUTE:: value::%d\n",
++ sc_access[0].value);
++ }
++ sc_access[0].mask = MASK2;
++ break;
++ case PMIC_SND_LEFT_SPEAKER_MUTE:
++ case PMIC_SND_RIGHT_SPEAKER_MUTE:
++ if (value == UNMUTE)
++ sc_access[0].value = 0x00;
++ else
++ sc_access[0].value = 0x03;
++ sc_access[0].reg_addr = LMUTE;
++ printk(KERN_DEBUG "SST DBG:+\
++ PMIC_SND_SPEAKER_MUTE %d\n", sc_access[0].value);
++ sc_access[0].mask = MASK1;
++ break;
++ default:
++ printk(KERN_ERR "SST ERR: +\
++ Invalid Device_id \n");
++ return -EINVAL;
++ }
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
++
++}
++
++int nc_set_vol(int dev_id, u8 value)
++{
++ struct sc_reg_access sc_access;
++ int retval = 0;
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++ printk(KERN_DEBUG "SST DBG:set volume:%d\n", dev_id);
++ switch (dev_id) {
++ case PMIC_SND_CAPTURE_VOL:
++ printk(KERN_DEBUG "SST DBG:PMIC_SND_CAPTURE_VOL:: +\
++ value::%d \n", value);
++ sc_access.value = -value;
++ sc_access.reg_addr = LILSEL;
++ sc_access.mask = MASK0|MASK1|MASK2|MASK3|MASK4|MASK5;
++ break;
++
++ case PMIC_SND_LEFT_PB_VOL:
++ printk(KERN_DEBUG "SST DBG:+\
++ PMIC_SND_LEFT_HP_VOL:%d \n", value);
++ sc_access.value = -value;
++ sc_access.reg_addr = AUDIOLVOL;
++ sc_access.mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
++ break;
++
++ case PMIC_SND_RIGHT_PB_VOL:
++ printk(KERN_DEBUG "SST DBG:PMIC_SND_RIGHT_HP_VOL: +\
++ value::%d\n", value);
++ if(snd_pmic_ops_nc.num_channel == 1) {
++ sc_access.value = 0x04;
++ sc_access.reg_addr = RMUTE;
++ sc_access.mask = MASK2;
++ } else {
++ sc_access.value = -value;
++ sc_access.reg_addr = AUDIORVOL;
++ sc_access.mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
++ }
++ break;
++
++ default:
++ printk(KERN_ERR "SST ERR: +\
++ Invalid Device_id \n");
++ return -EINVAL;
++
++ }
++ /* sst_sc_read_modify(&reg_adrs, &difference, 1);*/
++ return sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
++}
++
++int nc_set_selected_input_dev(u8 value)
++{
++ struct sc_reg_access sc_access[6];
++ u8 num_val;
++ int retval = 0;
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++ snd_pmic_ops_nc.input_dev_id = value;
++
++ printk(KERN_DEBUG "SST DBG:nc set selected input:%d \n", value);
++
++ switch (value) {
++ case AMIC:
++ printk(KERN_DEBUG "SST DBG:Selecting AMIC\n");
++ sc_access[0].reg_addr = 0x107;
++ sc_access[0].value = 0x40;
++ sc_access[0].mask = MASK6|MASK4|MASK3|MASK1|MASK0;
++ sc_access[1].reg_addr = 0x10a;
++ sc_access[1].value = 0x40;
++ sc_access[1].mask = MASK6;
++ sc_access[2].reg_addr = 0x109;
++ sc_access[2].value = 0x00;
++ sc_access[2].mask = MASK6;
++ num_val = 3;
++ break;
++
++ case HS_MIC:
++ printk(KERN_DEBUG "SST DBG:+\
++ Selecting HS_MIC\n");
++ sc_access[0].reg_addr = 0x107;
++ sc_access[0].mask = MASK6|MASK4|MASK3|MASK1|MASK0;
++ sc_access[0].value = 0x10;
++ sc_access[1].reg_addr = 0x109;
++ sc_access[1].mask = MASK6;
++ sc_access[1].value = 0x40;
++ sc_access[2].reg_addr = 0x10a;
++ sc_access[2].mask = MASK6;
++ sc_access[2].value = 0x00;
++ num_val = 3;
++ break;
++
++ case DMIC:
++ printk(KERN_DEBUG "SST DBG:DMIC\n");
++ sc_access[0].reg_addr = 0x107;
++ sc_access[0].mask = MASK6|MASK4|MASK3|MASK1|MASK0;
++ sc_access[0].value = 0x0B;
++ sc_access[1].reg_addr = 0x105;
++ sc_access[1].value = 0x80;
++ sc_access[1].mask = MASK7|MASK6;
++ sc_access[2].reg_addr = 0x10a;
++ sc_access[2].value = 0x40;
++ sc_access[2].mask = MASK6;
++ num_val = 3;
++ break;
++ default:
++ printk(KERN_ERR "SST ERR:+\
++ rcvd illegal request: %d \n", value);
++ return -EINVAL;
++ }
++ return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_val);
++}
++
++int nc_get_mute(int dev_id, u8 *value)
++{
++ int retval = 0, mask = 0;
++ struct sc_reg_access sc_access = {0,};
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++ printk(KERN_DEBUG "SST DBG:get mute::%d\n", dev_id);
++
++ switch (dev_id) {
++ case PMIC_SND_MUTE_ALL:
++ printk(KERN_DEBUG "SST DBG:PMIC_SND_INPUT_MASTER_MUTE: +\
++ value::%d\n", *value);
++ sc_access.reg_addr = AUDIOLVOL;
++ sc_access.mask = MASK7;
++ break;
++ case PMIC_SND_AMIC_MUTE:
++ printk(KERN_DEBUG "SST DBG:PMIC_SND_INPUT_MUTE_MIC1\n");
++ sc_access.reg_addr = LILSEL;
++ mask = MASK6;
++ break;
++ case PMIC_SND_HP_MIC_MUTE:
++ printk(KERN_DEBUG "SST DBG:PMIC_SND_INPUT_MUTE_MIC2\n");
++ sc_access.reg_addr = LIRSEL;
++ mask = MASK6;
++ break;
++ case PMIC_SND_LEFT_HP_MUTE:
++ case PMIC_SND_RIGHT_HP_MUTE:
++ mask = MASK2;
++ printk(KERN_DEBUG "SST DBG:PMIC_SN_LEFT/RIGHT_HP_MUTE\n");
++ if (dev_id == PMIC_SND_RIGHT_HP_MUTE)
++ sc_access.reg_addr = RMUTE;
++ else
++ sc_access.reg_addr = LMUTE;
++ break;
++
++ case PMIC_SND_LEFT_SPEAKER_MUTE:
++ printk(KERN_DEBUG "SST DBG:PMIC_MONO_EARPIECE_MUTE\n");
++ sc_access.reg_addr = RMUTE;
++ mask = MASK1;
++ break;
++ case PMIC_SND_DMIC_MUTE:
++ printk(KERN_DEBUG "SST DBG:PMIC_SND_INPUT_MUTE_DMIC\n");
++ sc_access.reg_addr = 0x105;
++ mask = MASK6;
++ break;
++ default:
++ printk(KERN_ERR "SST ERR: +\
++ Invalid Device_id \n");
++ return -EINVAL;
++
++ }
++ retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
++ printk(KERN_DEBUG "SST DBG:reg value = %d\n", sc_access.value);
++ if (retval)
++ return retval;
++ *value = (sc_access.value) & mask;
++ printk(KERN_DEBUG "SST DBG:masked value = %d\n", *value);
++ if (*value)
++ *value = 0;
++ else
++ *value = 1;
++ printk(KERN_DEBUG "SST DBG:value returned = 0x%x\n", *value);
++ return retval;
++}
++
++int nc_get_vol(int dev_id, u8 *value)
++{
++ int retval = 0, mask = 0;
++ struct sc_reg_access sc_access = {0,};
++
++ if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
++ retval = nc_init_card();
++ if (retval)
++ return retval;
++
++ switch (dev_id) {
++ case PMIC_SND_CAPTURE_VOL:
++ printk(KERN_DEBUG "SST DBG:PMIC_SND_INPUT_CAPTURE_VOL\n");
++ sc_access.reg_addr = LILSEL;
++ mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
++ break;
++
++ case PMIC_SND_RIGHT_PB_VOL:
++ printk(KERN_DEBUG "SST DBG:GET_VOLUME_PMIC_LEFT_HP_VOL\n");
++ sc_access.reg_addr = AUDIOLVOL;
++ mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
++ break;
++
++ case PMIC_SND_LEFT_PB_VOL:
++ printk(KERN_DEBUG "SST DBG:GET_VOLUME_PMIC_RIGHT_HP_VOL\n");
++ sc_access.reg_addr = AUDIORVOL;
++ mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
++ break;
++
++ default:
++ printk(KERN_ERR "SST ERR: +\
++ Invalid Device_id = %d \n", dev_id);
++ return -EINVAL;
++
++ }
++ retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
++ printk(KERN_DEBUG "SST DBG:value read = 0x%x\n", sc_access.value);
++ *value = (sc_access.value) & mask;
++ *value = -*value;
++ printk(KERN_DEBUG "SST DBG:value returned = 0x%x\n", *value);
++ return retval;
++}
++
++struct snd_pmic_ops snd_pmic_ops_nc = {
++ .set_input_dev = nc_set_selected_input_dev,
++ .set_output_dev = nc_set_selected_output_dev,
++ .set_mute = nc_set_mute,
++ .get_mute = nc_get_mute,
++ .set_vol = nc_set_vol,
++ .get_vol = nc_get_vol,
++ .init_card = nc_init_card,
++ .set_pcm_audio_params = nc_set_pcm_audio_params,
++ .set_pcm_voice_params = nc_set_pcm_voice_params,
++ .set_voice_port = nc_set_voice_port,
++ .set_audio_port = nc_set_audio_port,
++ .power_up_pmic_pb = nc_power_up_pb,
++ .power_up_pmic_cp = nc_power_up_cp,
++ .power_down_pmic_pb = nc_power_down_pb,
++ .power_down_pmic_cp = nc_power_down_cp,
++ .power_down_pmic = nc_power_down,
++};
+--
+1.6.2.2
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-7-8.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-7-8.patch
new file mode 100644
index 0000000..557b67d
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-7-8.patch
@@ -0,0 +1,2391 @@
+From d88bb1ae711414e9bca4a23a7d6375cb4bad18f4 Mon Sep 17 00:00:00 2001
+From: R, Dharageswari <dharageswari.r@intel.com>
+Date: Thu, 29 Apr 2010 20:28:59 +0530
+Subject: [PATCH] ADR-Post-Beta-0.05.002.03-7/8-Moorestown Audio Drivers: sound card ALSA driver
+
+This adds support for Moorestown ALSA Sound card driver.
+This is an ALSA driver for supporting PCM playback/capture in
+traditional ALSA way. Anyone who chooses not to use DSP for
+decoding/encoding can use ALSA path to play/capture, but obvious loss will
+be power. This driver registers the control interface and PCM interface with
+the SST driver which finally sends it to the hardware. This driver allows any
+subsystem in OS which wants to use the audio-subsystems to be routed
+through the ALSA.The patch includes ALSA driver header file for handling
+mixer controls for Intel MAD chipset.This patch also includes enum additions to
+jack.h of ALSA Framework
+
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+
+ modified: include/sound/jack.h
+ new file: sound/pci/sst/intelmid.c
+ new file: sound/pci/sst/intelmid.h
+ new file: sound/pci/sst/intelmid_ctrl.c
+ new file: sound/pci/sst/intelmid_ctrl.h
+ new file: sound/pci/sst/intelmid_pvt.c
+Patch-mainline: 2.6.35?
+---
+ include/sound/jack.h | 2 +
+ sound/pci/sst/intelmid.c | 1205 +++++++++++++++++++++++++++++++++++++++++
+ sound/pci/sst/intelmid.h | 170 ++++++
+ sound/pci/sst/intelmid_ctrl.c | 555 +++++++++++++++++++
+ sound/pci/sst/intelmid_ctrl.h | 33 ++
+ sound/pci/sst/intelmid_pvt.c | 343 ++++++++++++
+ 6 files changed, 2308 insertions(+), 0 deletions(-)
+ create mode 100644 sound/pci/sst/intelmid.c
+ create mode 100644 sound/pci/sst/intelmid.h
+ create mode 100644 sound/pci/sst/intelmid_ctrl.c
+ create mode 100644 sound/pci/sst/intelmid_ctrl.h
+ create mode 100644 sound/pci/sst/intelmid_pvt.c
+
+diff --git a/include/sound/jack.h b/include/sound/jack.h
+index f236e42..791c550 100644
+--- a/include/sound/jack.h
++++ b/include/sound/jack.h
+@@ -42,6 +42,8 @@ enum snd_jack_types {
+ SND_JACK_MECHANICAL = 0x0008, /* If detected separately */
+ SND_JACK_VIDEOOUT = 0x0010,
+ SND_JACK_AVOUT = SND_JACK_LINEOUT | SND_JACK_VIDEOOUT,
++ SND_JACK_HS_SHORT_PRESS = SND_JACK_HEADSET | 0x0020,
++ SND_JACK_HS_LONG_PRESS = SND_JACK_HEADSET | 0x0040,
+ };
+
+ struct snd_jack {
+diff --git a/sound/pci/sst/intelmid.c b/sound/pci/sst/intelmid.c
+new file mode 100644
+index 0000000..c5a3b36
+--- /dev/null
++++ b/sound/pci/sst/intelmid.c
+@@ -0,0 +1,1205 @@
++/*
++ * intelmid.c - Intel Sound card driver for MID
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Harsha Priya <priya.harsha@intel.com>
++ * Vinod Koul <vinod.koul@intel.com>
++ * Dharageswari R <dharageswari.r@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * ALSA driver for Intel MID sound card chipset
++ */
++#include <linux/spi/spi.h>
++#include <linux/io.h>
++#include <linux/delay.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/moduleparam.h>
++#include <linux/sched.h>
++#include <sound/core.h>
++#include <sound/control.h>
++#include <sound/pcm.h>
++#include <sound/jack.h>
++#include <sound/pcm_params.h>
++#include <sound/info.h>
++#include <sound/initval.h>
++
++#include <sound/pcm-indirect.h>
++#include <sound/intel_lpe.h>
++#include <sound/intel_sst_ioctl.h>
++/* #include <net/netlink.h>
++#include <net/genetlink.h> */
++
++#include "intelmid_snd_control.h"
++#include "intelmid.h"
++#include "intelmid_ctrl.h"
++
++MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
++MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
++MODULE_DESCRIPTION("Intel MAD Sound card driver");
++MODULE_LICENSE("GPL v2");
++MODULE_SUPPORTED_DEVICE("{Intel,Intel_MAD}");
++
++
++static int card_index = SNDRV_DEFAULT_IDX1;/* Index 0-MAX */
++static char *card_id = SNDRV_DEFAULT_STR1; /* ID for this card */
++
++module_param(card_index, int, 0444);
++MODULE_PARM_DESC(card_index, "Index value for INTELMAD soundcard.");
++module_param(card_id, charp, 0444);
++MODULE_PARM_DESC(card_id, "ID string for INTELMAD soundcard.");
++
++int sst_card_vendor_id;
++int audio_interrupt_enable = 0;
++
++/* Data path functionalities */
++static struct snd_pcm_hardware snd_intelmad_stream = {
++ .info = (SNDRV_PCM_INFO_INTERLEAVED |
++ SNDRV_PCM_INFO_DOUBLE |
++ SNDRV_PCM_INFO_PAUSE |
++ SNDRV_PCM_INFO_RESUME |
++ SNDRV_PCM_INFO_MMAP|
++ SNDRV_PCM_INFO_MMAP_VALID |
++ /* SNDRV_PCM_INFO_BATCH | */
++ SNDRV_PCM_INFO_BLOCK_TRANSFER |
++ SNDRV_PCM_INFO_SYNC_START),
++ .formats = (SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_U16 |
++ /* SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 | */
++ SNDRV_PCM_FMTBIT_S24 | SNDRV_PCM_FMTBIT_U24),
++ .rates = (SNDRV_PCM_RATE_8000|
++ SNDRV_PCM_RATE_44100 |
++ SNDRV_PCM_RATE_48000),
++ .rate_min = MIN_RATE,
++
++ .rate_max = MAX_RATE,
++ .channels_min = MIN_CHANNEL,
++ .channels_max = MAX_CHANNEL,
++ .buffer_bytes_max = MAX_BUFFER,
++ .period_bytes_min = MIN_PERIOD_BYTES,
++ .period_bytes_max = MAX_PERIOD_BYTES,
++ .periods_min = MIN_PERIODS,
++ .periods_max = MAX_PERIODS,
++ .fifo_size = FIFO_SIZE,
++};
++
++static int snd_intelmad_pcm_ack(struct snd_pcm_substream *substream)
++{
++ struct mad_stream_pvt *stream;
++ struct snd_pcm_indirect *rec;
++
++ WARN_ON(!substream);
++ WARN_ON(!substream->runtime);
++
++ stream = substream->runtime->private_data;
++ WARN_ON(!stream);
++
++// printk(KERN_DEBUG "SST DBG:called %d\n", stream->stream_status);
++ if (stream->stream_status != INIT) {
++
++ rec = &stream->pcm_indirect;
++ if (substream->stream == STREAM_OPS_PLAYBACK) {
++// printk(KERN_DEBUG "SST DBG:calling indirect playback transfer\n");
++ snd_pcm_indirect_playback_transfer(substream, rec,
++ send_buffer_to_sst);
++ } else if (substream->stream == STREAM_OPS_CAPTURE) {
++// printk(KERN_DEBUG "SST DBG:calling indirect capture transfer\n");
++ snd_pcm_indirect_capture_transfer(substream, rec,
++ send_buffer_to_sst);
++ }
++
++ stream->stream_status = RUNNING;
++ }
++ return 0;
++}
++
++/**
++* snd_intelmad_pcm_trigger - stream activities are handled here
++* @substream:substream for which the stream function is called
++*@cmd:the stream commamd thats requested from upper layer
++* This function is called whenever an a stream activity is invoked
++*/
++static int snd_intelmad_pcm_trigger(struct snd_pcm_substream *substream,
++ int cmd)
++{
++ int ret_val = 0;
++ struct snd_intelmad *intelmaddata;
++ struct mad_stream_pvt *stream;
++ struct stream_buffer buffer_to_sst;
++
++// printk(KERN_DEBUG "SST DBG:called\n");
++
++ WARN_ON(!substream);
++
++ intelmaddata = snd_pcm_substream_chip(substream);
++ stream = substream->runtime->private_data;
++
++ WARN_ON(!intelmaddata->sstdrv_ops);
++ WARN_ON(!intelmaddata->sstdrv_ops->scard_ops);
++
++ switch (cmd) {
++ case SNDRV_PCM_TRIGGER_START:
++ stream->substream = substream;
++/*
++ printk(KERN_DEBUG "SST DBG:pcm_size+\
++ =%d\n", snd_pcm_lib_buffer_bytes(substream));
++*/
++ stream->stream_status = STARTED;
++
++ if (substream->stream == STREAM_OPS_PLAYBACK)
++ snd_intelmad_pcm_ack(substream);
++ else if (substream->stream == STREAM_OPS_CAPTURE) {
++ buffer_to_sst.length =
++ frames_to_bytes(substream->runtime,
++ substream->runtime->buffer_size);
++ buffer_to_sst.addr = (unsigned long)
++ substream->runtime->dma_area;
++ ret_val = intelmaddata->sstdrv_ops->send_buffer(
++ stream->stream_info.str_id,
++ &buffer_to_sst);
++ stream->dbg_cum_bytes +=
++ frames_to_bytes(substream->runtime,
++ substream->runtime->buffer_size);
++ stream->stream_status = RUNNING;
++ }
++ break;
++ case SNDRV_PCM_TRIGGER_STOP:
++// printk(KERN_DEBUG "SST DBG:in stop\n");
++ ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_DROP,
++ &stream->stream_info.str_id);
++ if (ret_val)
++ return ret_val;
++ stream->stream_status = DROPPED;
++ break;
++ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
++// printk(KERN_DEBUG "SST DBG:in pause\n");
++ ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_PAUSE,
++ &stream->stream_info.str_id);
++ if (ret_val)
++ return ret_val;
++ stream->stream_status = PAUSED;
++ break;
++ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++// printk(KERN_DEBUG "SST DBG:in pause release \n");
++ ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_RESUME,
++ &stream->stream_info.str_id);
++ if (ret_val)
++ return ret_val;
++ stream->stream_status = RUNNING;
++ break;
++ default:
++ return -EINVAL;
++ }
++ return ret_val;
++}
++
++/**
++* snd_intelmad_pcm_prepare- internal preparation before starting a stream
++* @substream: substream for which the function is called
++* This function is called when a stream is started for internal preparation.
++*/
++static int snd_intelmad_pcm_prepare(struct snd_pcm_substream *substream)
++{
++ struct mad_stream_pvt *stream;
++ int ret_val = 0;
++ struct snd_intelmad *intelmaddata;
++
++ printk(KERN_DEBUG "SST DBG:called \n");
++
++ WARN_ON(!substream);
++ stream = substream->runtime->private_data;
++ intelmaddata = snd_pcm_substream_chip(substream);
++ printk(KERN_DEBUG "SST DBG:pb cnt = %d cap cnt = %d\n",\
++ intelmaddata->playback_cnt,
++ intelmaddata->capture_cnt);
++
++ if(stream->stream_info.str_id) {
++ printk(KERN_DEBUG "SST DBG:Prepare called for already set stream\n");
++ ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_DROP,
++ &stream->stream_info.str_id);
++
++ } else {
++ ret_val = snd_intelmad_alloc_stream(substream);
++ if (ret_val < 0)
++ return ret_val;
++ stream->dbg_cum_bytes = 0;
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++ intelmaddata->playback_cnt++;
++ else
++ intelmaddata->capture_cnt++;
++ printk(KERN_DEBUG "SST DBG:period size = %d \n",
++ (int)substream->runtime->period_size);
++ printk(KERN_DEBUG "SST DBG:buf size = %d \n",
++ (int)substream->runtime->buffer_size);
++ memset(&stream->pcm_indirect, 0, sizeof(stream->pcm_indirect));
++ stream->pcm_indirect.hw_buffer_size =
++ snd_pcm_lib_buffer_bytes(substream);
++ stream->pcm_indirect.sw_buffer_size =
++ stream->pcm_indirect.hw_buffer_size;
++ /* return back the stream id */
++ snprintf(substream->pcm->id, sizeof(substream->pcm->id),
++ "%d", stream->stream_info.str_id);
++ printk(KERN_DEBUG "SST DBG:stream id to user = %s\n", substream->pcm->id);
++ }
++ ret_val = snd_intelmad_init_stream(substream);
++ if (ret_val)
++ return ret_val;
++
++ return ret_val;
++}
++
++static int snd_intelmad_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *hw_params)
++{
++ int ret_val;
++
++ printk(KERN_DEBUG "SST DBG:called\n");
++ ret_val = snd_pcm_lib_malloc_pages(substream,
++ params_buffer_bytes(hw_params));
++ memset(substream->runtime->dma_area, 0,
++ params_buffer_bytes(hw_params));
++ return ret_val;
++}
++
++static int snd_intelmad_hw_free(struct snd_pcm_substream *substream)
++{
++ printk(KERN_DEBUG "SST DBG:called\n");
++ return snd_pcm_lib_free_pages(substream);
++}
++
++/**
++* snd_intelmad_pcm_pointer- to send the current buffer pointer processed by hw
++* @substream: substream for which the function is called
++* This function is called by ALSA framework to get the current hw buffer ptr
++* when a period is elapsed
++*/
++static snd_pcm_uframes_t snd_intelmad_pcm_pointer
++ (struct snd_pcm_substream *substream)
++{
++ /* struct snd_pcm_runtime *runtime = substream->runtime; */
++ struct mad_stream_pvt *stream;
++ struct snd_intelmad *intelmaddata;
++ int ret_val;
++ unsigned long buf_size;
++
++ WARN_ON(!substream);
++
++ intelmaddata = snd_pcm_substream_chip(substream);
++ stream = substream->runtime->private_data;
++ if (stream->stream_status == INIT)
++ return 0;
++
++ ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_BUFFER_POINTER,
++ &stream->stream_info);
++ if (ret_val) {
++ dev_err(&intelmaddata->spi->dev,\
++ "SST ERR: error code = 0x%x \n", ret_val);
++ return ret_val;
++ }
++/* printk(KERN_DEBUG "SST DBG:samples reported out 0x%llx \n",
++ stream->stream_info.buffer_ptr);
++ printk(KERN_DEBUG "SST DBG:Frame bits:: %d period_count :: %d \n",
++ (int)substream->runtime->frame_bits,
++ (int)substream->runtime->period_size);
++*/
++ if (substream->stream == STREAM_OPS_PLAYBACK) {
++ if(SNDRV_PCM_POS_XRUN == stream->stream_info.buffer_ptr)
++ return SNDRV_PCM_POS_XRUN;
++ }
++
++ buf_size = frames_to_bytes(substream->runtime,
++ stream->stream_info.buffer_ptr);
++
++// printk(KERN_DEBUG "SST DBG: bytes reported out = 0x%lx\n", buf_size);
++ if (buf_size > stream->dbg_cum_bytes)
++ dev_err(&intelmaddata->spi->dev, "SST ERR: excess reported \n");
++
++ if (substream->stream == STREAM_OPS_PLAYBACK)
++ return snd_pcm_indirect_playback_pointer(
++ substream, &stream->pcm_indirect, buf_size);
++ else
++ return snd_pcm_indirect_capture_pointer(
++ substream, &stream->pcm_indirect, buf_size);
++}
++
++/**
++* snd_intelmad_close- to free parameteres when stream is stopped
++* @substream: substream for which the function is called
++* This function is called by ALSA framework when stream is stopped
++*/
++static int snd_intelmad_close(struct snd_pcm_substream *substream)
++{
++ struct snd_intelmad *intelmaddata;
++ struct mad_stream_pvt *stream;
++ int ret_val = 0;
++
++ WARN_ON(!substream);
++
++ stream = substream->runtime->private_data;
++
++ printk(KERN_DEBUG "SST DBG:called \n");
++ intelmaddata = snd_pcm_substream_chip(substream);
++
++ printk(KERN_DEBUG "SST DBG:str id = %d\n", stream->stream_info.str_id);
++ if (stream->stream_info.str_id) {
++ /* SST API to actually stop/free the stream */
++ ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_FREE,
++ &stream->stream_info.str_id);
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++ intelmaddata->playback_cnt--;
++ else
++ intelmaddata->capture_cnt--;
++ }
++ printk(KERN_DEBUG "SST DBG:pb cnt = %d cap cnt = %d\n", intelmaddata->playback_cnt,
++ intelmaddata->capture_cnt);
++ kfree(substream->runtime->private_data);
++ return ret_val;
++}
++
++/**
++* snd_intelmad_open- to set runtime parameters during stream start
++* @substream: substream for which the function is called
++* This function is called by ALSA framework when stream is started
++*/
++static int snd_intelmad_open(struct snd_pcm_substream *substream)
++{
++ struct snd_intelmad *intelmaddata;
++ struct snd_pcm_runtime *runtime;
++ struct mad_stream_pvt *stream;
++
++ WARN_ON(!substream);
++
++ printk(KERN_DEBUG "SST DBG:called \n");
++
++ intelmaddata = snd_pcm_substream_chip(substream);
++ runtime = substream->runtime;
++ /* set the runtime hw parameter with local snd_pcm_hardware struct */
++ runtime->hw = snd_intelmad_stream;
++ /* setup the internal datastruture stream pointers based on it being
++ playback or capture stream */
++ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
++ if (!stream)
++ return -ENOMEM;
++ stream->stream_info.str_id = 0;
++ stream->stream_status = INIT;
++ runtime->private_data = stream;
++ return snd_pcm_hw_constraint_integer(runtime,
++ SNDRV_PCM_HW_PARAM_PERIODS);
++}
++
++static struct snd_pcm_ops snd_intelmad_playback_ops = {
++ .open = snd_intelmad_open,
++ .close = snd_intelmad_close,
++ .ioctl = snd_pcm_lib_ioctl,
++ .hw_params = snd_intelmad_hw_params,
++ .hw_free = snd_intelmad_hw_free,
++ .prepare = snd_intelmad_pcm_prepare,
++ .trigger = snd_intelmad_pcm_trigger,
++ .pointer = snd_intelmad_pcm_pointer,
++ .ack = snd_intelmad_pcm_ack,
++};
++
++static struct snd_pcm_ops snd_intelmad_capture_ops = {
++ .open = snd_intelmad_open,
++ .close = snd_intelmad_close,
++ .ioctl = snd_pcm_lib_ioctl,
++ .hw_params = snd_intelmad_hw_params,
++ .hw_free = snd_intelmad_hw_free,
++ .prepare = snd_intelmad_pcm_prepare,
++ .trigger = snd_intelmad_pcm_trigger,
++ .pointer = snd_intelmad_pcm_pointer,
++ .ack = snd_intelmad_pcm_ack,
++};
++
++
++#ifdef REG_IRQ
++/**
++* snd_intelmad_intr_handler- interrupt handler
++*@irq : irq number of the interrupt received
++*@dev: device context
++* This function is called when an interrupt is raised at the sound card
++*/
++static irqreturn_t snd_intelmad_intr_handler(int irq, void *dev)
++{
++ struct snd_intelmad *intelmaddata =
++ (struct snd_intelmad *)dev;
++ u8 intsts;
++
++ memcpy_fromio(&intsts,
++ ((void *)(intelmaddata->int_base)),
++ sizeof(u8));
++ intelmaddata->mad_jack_msg.intsts = intsts;
++ intelmaddata->mad_jack_msg.intelmaddata = intelmaddata;
++
++ queue_work(intelmaddata->mad_jack_wq, &intelmaddata->mad_jack_msg.wq);
++
++ return IRQ_HANDLED;
++}
++
++void sst_mad_send_jack_report(struct snd_jack *jack, int buttonpressevent , int status)
++{
++
++ if (!jack) {
++ printk(KERN_DEBUG "SST DBG:MAD error jack empty \n");
++
++ } else {
++ printk(KERN_DEBUG "SST DBG:MAD sending jack +\
++ report for = %d!!!\n", status);
++ if (jack)
++ printk(KERN_DEBUG "SST DBG:MAD sending +\
++ jack report for = %d !!!\n", jack->type);
++
++ snd_jack_report(jack, status);
++
++ /*button pressed and released */
++ if (buttonpressevent)
++ snd_jack_report(jack, 0);
++ printk(KERN_DEBUG "SST DBG:MAD sending jack report Done !!!\n");
++ }
++
++
++
++}
++
++void sst_mad_jackdetection_fs(u8 intsts , struct snd_intelmad *intelmaddata)
++{
++ struct snd_jack *jack = NULL;
++ unsigned int present = 0, jack_event_flag = 0, buttonpressflag = 0;
++ struct sc_reg_access sc_access[] = {
++ {0x187, 0x00, MASK7},
++ {0x188, 0x10, MASK4},
++ {0x18b, 0x10, MASK4},
++ };
++
++ struct sc_reg_access sc_access_write[] = {
++ {0x198, 0x00, 0x0},
++ };
++
++ if (intsts & 0x4) {
++
++ if (!(audio_interrupt_enable)) {
++ printk(KERN_DEBUG "SST DBG:Audio interrupt enable\n");
++ sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
++
++ sst_sc_reg_access(sc_access_write, PMIC_WRITE, 1);
++ audio_interrupt_enable = 1;
++ intelmaddata->jack[0].jack_status = 0;
++ intelmaddata->jack[1].jack_status = 0;
++
++ }
++ /* send headphone detect */
++ printk(KERN_DEBUG "SST DBG:MAD headphone +\
++ = %d!!!\n", intsts & 0x4);
++ jack = &intelmaddata->jack[0].jack;
++ present = !(intelmaddata->jack[0].jack_status);
++ intelmaddata->jack[0].jack_status = present;
++ jack_event_flag = 1;
++
++ }
++
++ if (intsts & 0x2) {
++ /* send short push */
++ printk(KERN_DEBUG "SST DBG:MAD short push +\
++ = %d!!!\n", intsts & 0x2);
++ jack = &intelmaddata->jack[2].jack;
++ present = 1;
++ jack_event_flag = 1;
++ buttonpressflag = 1;
++ }
++ if (intsts & 0x1) {
++ /* send long push */
++ printk(KERN_DEBUG "SST DBG:MAD long push+\
++ = %d!!!\n", intsts & 0x1);
++ jack = &intelmaddata->jack[3].jack;
++ present = 1;
++ jack_event_flag = 1;
++ buttonpressflag = 1;
++ }
++ if (intsts & 0x8) {
++ if (!(audio_interrupt_enable)) {
++ printk(KERN_DEBUG "SST DBG:Audio interrupt enable\n");
++ sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
++
++ sst_sc_reg_access(sc_access_write, PMIC_WRITE, 1);
++ audio_interrupt_enable = 1;
++ intelmaddata->jack[0].jack_status = 0;
++ intelmaddata->jack[1].jack_status = 0;
++ }
++ /* send headset detect */
++ printk(KERN_DEBUG "SST DBG:MAD headset +\
++ = %d!!!\n", intsts & 0x8);
++ jack = &intelmaddata->jack[1].jack;
++ present = !(intelmaddata->jack[1].jack_status);
++ intelmaddata->jack[1].jack_status = present;
++ jack_event_flag = 1;
++ }
++
++
++ if (jack_event_flag)
++ sst_mad_send_jack_report( jack, buttonpressflag, present);
++}
++
++
++void sst_mad_jackdetection_mx(u8 intsts, struct snd_intelmad *intelmaddata)
++{
++ u8 value = 0,jack_prev_state = 0;
++ struct snd_jack *jack = NULL;
++ unsigned int present = 0, jack_event_flag = 0, buttonpressflag = 0;
++ time_t timediff;
++ struct sc_reg_access sc_access_read = {0,};
++
++
++
++ printk(KERN_DEBUG "SST DBG:previos value: = 0x%x \n" ,intelmaddata->jack_prev_state);
++
++ if (!(audio_interrupt_enable)) {
++ printk(KERN_DEBUG "SST DBG:Audio interrupt enable\n");
++ intelmaddata->jack_prev_state = 0xC0;
++ audio_interrupt_enable = 1;
++ }
++
++ if (intsts & 0x2) {
++ jack_prev_state = intelmaddata->jack_prev_state;
++ if(intelmaddata->pmic_status == PMIC_INIT) {
++ sc_access_read.reg_addr = 0x201;
++ sst_sc_reg_access(&sc_access_read, PMIC_READ, 1);
++ value = (sc_access_read.value);
++ printk(KERN_DEBUG "value returned = 0x%x\n", value);
++ }
++
++ if ((jack_prev_state == 0xc0) && (value == 0x40) ) {
++ //headset detected.
++ printk(KERN_DEBUG "MAD headset inserted\n");
++ jack = &intelmaddata->jack[1].jack;
++ present= 1;
++ jack_event_flag = 1;
++ intelmaddata->jack[1].jack_status = 1;
++
++ }
++
++ if ((jack_prev_state == 0xc0 ) && ( value == 0x00) ) {
++ //headphone detected.
++ printk(KERN_DEBUG "MAD headphone inserted\n");
++ jack = &intelmaddata->jack[0].jack;
++ present= 1;
++ jack_event_flag = 1;
++
++ }
++
++ if ( (jack_prev_state == 0x40 ) && ( value == 0xc0) ) {
++ //headset removed
++ printk(KERN_DEBUG "Jack headset status %d\n",\
++ intelmaddata->jack[1].jack_status);
++ printk(KERN_DEBUG "MAD headset removed \n");
++ jack = &intelmaddata->jack[1].jack;
++ present= 0;
++ jack_event_flag = 1;
++ intelmaddata->jack[1].jack_status = 0;
++ }
++
++ if ( (jack_prev_state == 0x00 ) && ( value == 0xc0) ) {
++ //headphone detected.
++ printk(KERN_DEBUG "Jack headphone status %d\n",\
++ intelmaddata->jack[0].jack_status);
++ printk(KERN_DEBUG "MAD headphone removed\n");
++ jack = &intelmaddata->jack[0].jack;
++ present= 0;
++ jack_event_flag = 1;
++ }
++
++ if ( (jack_prev_state == 0x40 ) && (value == 0x00) ) {
++ //button pressed
++ do_gettimeofday(&intelmaddata->jack[1].buttonpressed);
++ printk(KERN_DEBUG "MAD button press detected n");
++ }
++
++
++ if( (jack_prev_state == 0x00 ) && ( value == 0x40) ) {
++ if ( intelmaddata->jack[1].jack_status ) {
++ //button pressed
++ do_gettimeofday(&intelmaddata->jack[1].buttonreleased);
++ /*button pressed */
++ printk(KERN_DEBUG "MAD Button Released detected time \n" );
++ timediff = intelmaddata->jack[1].buttonreleased.tv_sec -
++ intelmaddata->jack[1].buttonpressed.tv_sec;
++ buttonpressflag = 1;
++ if(timediff > 1) {
++ printk(KERN_DEBUG "MAD long press detected time \n" );
++ /* send headphone detect/undetect */
++ jack = &intelmaddata->jack[3].jack;
++ present= 1;
++ jack_event_flag = 1;
++ }
++ else {
++ printk(KERN_DEBUG "MAD short press detected time \n" );
++ /* send headphone detect/undetect */
++ jack = &intelmaddata->jack[2].jack;
++ present= 1;
++ jack_event_flag = 1;
++ }
++ }
++
++ }
++ intelmaddata->jack_prev_state = value ;
++
++ }
++
++ if (jack_event_flag)
++ sst_mad_send_jack_report( jack, buttonpressflag, present);
++}
++
++
++void sst_mad_jackdetection_nec(u8 intsts, struct snd_intelmad *intelmaddata)
++{
++ u8 value = 0;
++ struct snd_jack *jack = NULL;
++ unsigned int present = 0, jack_event_flag = 0, buttonpressflag = 0;
++
++ struct sc_reg_access sc_access_read = {0,};
++
++ if (intelmaddata->pmic_status == PMIC_INIT) {
++ sc_access_read.reg_addr = 0x132;
++ sst_sc_reg_access(&sc_access_read, PMIC_READ, 1);
++ value = (sc_access_read.value);
++ printk(KERN_DEBUG "SST DBG:value returned = 0x%x\n", value);
++ }
++ if (intsts & 0x1) {
++ printk(KERN_DEBUG "SST DBG:MAD headset detected\n");
++ /* send headset detect/undetect */
++ jack = &intelmaddata->jack[1].jack;
++ present = (value == 0x1) ? 1 : 0;
++ jack_event_flag = 1;
++ }
++ if (intsts & 0x2) {
++ printk(KERN_DEBUG "SST DBG:MAD headphone detected\n");
++ /* send headphone detect/undetect */
++ jack = &intelmaddata->jack[0].jack;
++ present = (value == 0x2) ? 1 : 0;
++ jack_event_flag = 1;
++ }
++ if (intsts & 0x4) {
++ printk(KERN_DEBUG "SST DBG:MAD short push detected\n");
++ /* send short push */
++ jack = &intelmaddata->jack[2].jack;
++ present = 1;
++ jack_event_flag = 1;
++ buttonpressflag = 1;
++ }
++ if (intsts & 0x8) {
++ printk(KERN_DEBUG "SST DBG:MAD long push detected\n");
++ /* send long push */
++ jack = &intelmaddata->jack[3].jack;
++ present = 1;
++ jack_event_flag = 1;
++ buttonpressflag = 1;
++ }
++
++ if (jack_event_flag)
++ sst_mad_send_jack_report( jack, buttonpressflag, present);
++
++
++}
++
++void sst_process_mad_jack_detection(struct work_struct *work)
++{
++ u8 intsts;
++ struct mad_jack_msg_wq *mad_jack_detect =
++ container_of(work, struct mad_jack_msg_wq, wq);
++
++ struct snd_intelmad *intelmaddata =
++ mad_jack_detect->intelmaddata;
++
++ intsts = mad_jack_detect->intsts;
++
++ switch (intelmaddata->sstdrv_ops->vendor_id) {
++ case SND_FS:
++ sst_mad_jackdetection_fs(intsts,intelmaddata);
++ break;
++ case SND_MX:
++ sst_mad_jackdetection_mx(intsts,intelmaddata);
++ break;
++ case SND_NC:
++ sst_mad_jackdetection_nec(intsts,intelmaddata);
++ break;
++ }
++}
++
++
++static int __devinit snd_intelmad_register_irq(
++ struct snd_intelmad *intelmaddata)
++{
++ int ret_val;
++ u32 regbase = AUDINT_BASE, regsize = 8;
++
++ printk(KERN_DEBUG "SST DBG:irq reg done, now mapping... regbase 0x%x, regsize 0x%x\n",
++ regbase, regsize);
++ intelmaddata->int_base = ioremap_nocache(regbase, regsize);
++ if (!intelmaddata->int_base)
++ dev_err(&intelmaddata->spi->dev, "SST ERR: +\
++ Mapping of cache failed \n");
++
++ /* interpret irq field */
++ printk(KERN_DEBUG "SST DBG:irq = 0x%x\n", intelmaddata->irq);
++ ret_val = request_irq(intelmaddata->irq,
++ snd_intelmad_intr_handler,
++ IRQF_SHARED, DRIVER_NAME,
++ intelmaddata);
++ if (ret_val)
++ dev_err(&intelmaddata->spi->dev, "SST ERR: cannot +\
++ register IRQ \n");
++ return ret_val;
++}
++
++/*static int __devinit snd_intelmad_register_netlink(void)
++{
++ int ret_val;
++
++ ret_val = genl_register_family(&audio_event_genl_family);
++ if (ret_val) {
++ printk(KERN_DEBUG "SST DBG:netlink registration failed\n");
++ return ret_val;
++ }
++ ret_val = genl_register_mc_group(&audio_event_genl_family,
++ &audio_event_mcgrp);
++ if (ret_val) {
++ printk(KERN_DEBUG "SST DBG:netlink +\
++ group registration failed\n");
++ genl_unregister_family(&audio_event_genl_family);
++ return ret_val;
++ }
++ return ret_val;
++}*/
++#endif
++
++static int __devinit snd_intelmad_sst_register(
++ struct snd_intelmad *intelmaddata)
++{
++ int ret_val;
++ struct sc_reg_access pmic_reg = {0,};
++
++ pmic_reg.reg_addr = 0;
++ ret_val = sst_sc_reg_access(&pmic_reg, PMIC_READ, 1);
++
++ if (ret_val)
++ return ret_val;
++
++ sst_card_vendor_id = pmic_reg.value & (MASK2|MASK1|MASK0);
++ printk(KERN_DEBUG "SST DBG:orginal reg n extrated vendor id = 0x%x %d\n",
++ pmic_reg.value, sst_card_vendor_id);
++ if (sst_card_vendor_id < 0 || sst_card_vendor_id > 2) {
++ dev_err(&intelmaddata->spi->dev, \
++ "SST ERR: vendor card not supported!! \n");
++ return -EIO;
++ }
++ intelmaddata->sstdrv_ops->module_name = SST_CARD_NAMES;
++ intelmaddata->sstdrv_ops->vendor_id = sst_card_vendor_id;
++ intelmaddata->sstdrv_ops->scard_ops =
++ intelmad_vendor_ops[sst_card_vendor_id];
++
++ /* registering with SST driver to get access to SST APIs to use */
++ ret_val = register_sst_card(intelmaddata->sstdrv_ops);
++ if (ret_val) {
++ dev_err(&intelmaddata->spi->dev, \
++ "SST ERR: sst card registration failed \n");
++ return ret_val;
++ }
++
++ sst_card_vendor_id = intelmaddata->sstdrv_ops->vendor_id;
++ intelmaddata->pmic_status = PMIC_UNINIT;
++ return ret_val;
++}
++
++/* Driver Init/exit functionalities */
++/**
++* snd_intelmad_pcm- to setup pcm for the card
++* @card: pointer to the sound card structure
++*@intelmaddata: pointer to internal context
++* This function is called from probe function to set up pcm params and functions
++*/
++static int __devinit snd_intelmad_pcm(struct snd_card *card,
++ struct snd_intelmad *intelmaddata)
++{
++ struct snd_pcm *pcm;
++ int i, ret_val = 0;
++ char name[32] = INTEL_MAD;
++
++ WARN_ON(!card);
++ WARN_ON(!intelmaddata);
++
++ for (i = 0; i < MAX_DEVICES; i++) {
++ ret_val = snd_pcm_new(card, name, i, PLAYBACK_COUNT,
++ CAPTURE_COUNT, &pcm);
++ if (ret_val)
++ break;
++ /* setup the ops for playback and capture streams */
++ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
++ &snd_intelmad_playback_ops);
++ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
++ &snd_intelmad_capture_ops);
++ /* setup private data which can be retrieved when required */
++ pcm->private_data = intelmaddata;
++ pcm->info_flags = 0;
++ strncpy(pcm->name, card->shortname, strlen(card->shortname));
++ /* allocate dma pages for ALSA stream operations */
++ snd_pcm_lib_preallocate_pages_for_all(pcm,
++ SNDRV_DMA_TYPE_CONTINUOUS,
++ snd_dma_continuous_data(GFP_KERNEL),
++ MIN_BUFFER, MAX_BUFFER);
++ }
++ return ret_val;
++}
++
++/**
++* snd_intelmad_jack- to setup jack settings of the card
++*@intelmaddata: pointer to internal context
++* This function is called from probe function to set up mixer controls
++*/
++static int __devinit snd_intelmad_jack(struct snd_intelmad *intelmaddata)
++{
++ struct snd_jack *jack;
++ int retval;
++
++ printk(KERN_DEBUG "SST DBG:called\n");
++ jack = &intelmaddata->jack[0].jack;
++ retval = snd_jack_new(intelmaddata->card, "Headphone",
++ SND_JACK_HEADPHONE, &jack);
++ if (retval < 0)
++ return retval;
++ snd_jack_report(jack, 0);
++
++ jack->private_data = jack;
++ intelmaddata->jack[0].jack = *jack;
++
++
++ jack = &intelmaddata->jack[1].jack;
++ retval = snd_jack_new(intelmaddata->card, "Headset",
++ SND_JACK_HEADSET, &jack);
++ if (retval < 0)
++ return retval;
++
++
++
++ jack->private_data = jack;
++ intelmaddata->jack[1].jack = *jack;
++
++
++ jack = &intelmaddata->jack[2].jack;
++ retval = snd_jack_new(intelmaddata->card, "Short Press",
++ SND_JACK_HS_SHORT_PRESS, &jack);
++ if (retval < 0)
++ return retval;
++
++
++ jack->private_data = jack;
++ intelmaddata->jack[2].jack = *jack;
++
++
++ jack = &intelmaddata->jack[3].jack;
++ retval = snd_jack_new(intelmaddata->card, "Long Press",
++ SND_JACK_HS_LONG_PRESS, &jack);
++ if (retval < 0)
++ return retval;
++
++
++ jack->private_data = jack;
++ intelmaddata->jack[3].jack = *jack;
++
++ return retval;
++}
++
++/**
++* snd_intelmad_mixer- to setup mixer settings of the card
++*@intelmaddata: pointer to internal context
++* This function is called from probe function to set up mixer controls
++*/
++static int __devinit snd_intelmad_mixer(struct snd_intelmad *intelmaddata)
++{
++ struct snd_card *card;
++ unsigned int idx;
++ int ret_val = 0;
++ char *mixername = "IntelMAD Controls";
++
++ WARN_ON(!intelmaddata);
++
++ card = intelmaddata->card;
++
++ strncpy(card->mixername, mixername, strlen(mixername));
++ /* add all widget controls and expose the same */
++ for (idx = 0; idx < MAX_CTRL; idx++) {
++ ret_val = snd_ctl_add(card,
++ snd_ctl_new1(&snd_intelmad_controls[idx],
++ intelmaddata));
++ printk(KERN_DEBUG "SST DBG:mixer[idx]=%d added \n", idx);
++ if (ret_val) {
++ dev_err(&intelmaddata->spi->dev, \
++ "SST ERR: adding of control +\
++ failed index = %d \n", idx);
++ break;
++ }
++ }
++ return ret_val;
++}
++
++/**
++* snd_intelmad_dev_free- to free device
++*@device: pointer to the device
++* This function is called when driver module is removed
++*/
++static int snd_intelmad_dev_free(struct snd_device *device)
++{
++ struct snd_intelmad *intelmaddata;
++
++ WARN_ON(!device);
++
++ intelmaddata = device->device_data;
++
++ printk(KERN_DEBUG "SST DBG:called\n");
++ snd_card_free(intelmaddata->card);
++ /*genl_unregister_family(&audio_event_genl_family);*/
++ unregister_sst_card(intelmaddata->sstdrv_ops);
++
++ /* free allocated memory for internal context */
++ destroy_workqueue(intelmaddata->mad_jack_wq);
++ kfree(intelmaddata->sstdrv_ops);
++ kfree(intelmaddata);
++ return 0;
++}
++
++/**
++* snd_intelmad_create- called from probe to create a snd device
++*@intelmaddata : pointer to the internal context
++*@card : pointer to the sound card
++* This function is called when driver module is started
++*/
++static int __devinit snd_intelmad_create(
++ struct snd_intelmad *intelmaddata,
++ struct snd_card *card)
++{
++ int ret_val;
++ static struct snd_device_ops ops = {
++ .dev_free = snd_intelmad_dev_free,
++ };
++
++ WARN_ON(!intelmaddata);
++ WARN_ON(!card);
++ /* ALSA api to register for the device */
++ ret_val = snd_device_new(card, SNDRV_DEV_LOWLEVEL, intelmaddata, &ops);
++ return ret_val;
++}
++
++/*********************************************************************
++ * SPI Functions
++ *********************************************************************/
++
++
++/**
++* snd_intelmad_probe- function registred for init
++*@spi : pointer to the spi device context
++* This function is called when the device is initialized
++*/
++int __devinit snd_intelmad_probe(struct spi_device *spi)
++{
++ struct snd_card *card;
++ int ret_val;
++ struct snd_intelmad *intelmaddata;
++
++ printk(KERN_DEBUG "SST DBG:called \n");
++
++ /* allocate memory for saving internal context and working */
++ intelmaddata = kzalloc(sizeof(*intelmaddata), GFP_KERNEL);
++ if (!intelmaddata)
++ return -ENOMEM;
++
++ /* allocate memory for LPE API set */
++ intelmaddata->sstdrv_ops = kzalloc(sizeof(struct intel_sst_card_ops),
++ GFP_KERNEL);
++ if (!intelmaddata->sstdrv_ops) {
++ dev_err(&intelmaddata->spi->dev, "SST ERR: +\
++ mem alloctn fail \n");
++ kfree(intelmaddata);
++ return -ENOMEM;
++ }
++
++ /* create a card instance with ALSA framework */
++ ret_val = snd_card_create(card_index, card_id, THIS_MODULE, 0, &card);
++ if (ret_val) {
++ dev_err(&intelmaddata->spi->dev, "SST +\
++ ERR: snd_card_create fail \n");
++ goto free_allocs;
++ }
++
++ intelmaddata->spi = spi;
++ intelmaddata->irq = spi->irq;
++ dev_set_drvdata(&spi->dev, intelmaddata);
++ intelmaddata->card = card;
++ intelmaddata->card_id = card_id;
++ intelmaddata->card_index = card_index;
++ intelmaddata->playback_cnt = intelmaddata->capture_cnt = 0;
++ strncpy(card->driver, INTEL_MAD, strlen(INTEL_MAD));
++ strncpy(card->shortname, INTEL_MAD, strlen(INTEL_MAD));
++
++
++ intelmaddata->sstdrv_ops->module_name = SST_CARD_NAMES;
++ /* registering with LPE driver to get access to SST APIs to use */
++ ret_val = snd_intelmad_sst_register(intelmaddata);
++ if (ret_val) {
++ dev_err(&intelmaddata->spi->dev,\
++ "SST ERR:+ snd_intelmad_sst_register failed \n");
++ goto free_allocs;
++ }
++
++ intelmaddata->pmic_status = PMIC_INIT;
++
++ ret_val = snd_intelmad_pcm(card, intelmaddata);
++ if (ret_val) {
++ dev_err(&intelmaddata->spi->dev,\
++ "SST ERR: snd_intelmad_pcm failed \n");
++ goto free_allocs;
++ }
++
++ ret_val = snd_intelmad_mixer(intelmaddata);
++ if (ret_val) {
++ dev_err(&intelmaddata->spi->dev,\
++ "SST ERR: snd_intelmad_mixer failed \n");
++ goto free_allocs;
++ }
++
++ ret_val = snd_intelmad_jack(intelmaddata);
++ if (ret_val) {
++ dev_err(&intelmaddata->spi->dev,\
++ "SST ERR: snd_intelmad_jack failed \n");
++ goto free_allocs;
++ }
++
++ /*create work queue for jack interrupt*/
++
++ INIT_WORK(&intelmaddata->mad_jack_msg.wq, \
++ sst_process_mad_jack_detection);
++
++ intelmaddata->mad_jack_wq = create_workqueue("sst_mad_jack_wq");
++ if (!intelmaddata->mad_jack_wq)
++ goto free_mad_jack_wq;
++
++#ifdef REG_IRQ
++ ret_val = snd_intelmad_register_irq(intelmaddata);
++ if (ret_val) {
++ dev_err(&intelmaddata->spi->dev,\
++ "SST ERR: snd_intelmad_register_irq fail \n");
++ goto free_allocs;
++ }
++ /*ret_val = snd_intelmad_register_netlink();
++ if (ret_val) {
++ printk(KERN_DEBUG "SST DBG:...complete\n");
++ return ret_val;
++ }*/
++#endif
++
++ /* internal function call to register device with ALSA */
++ ret_val = snd_intelmad_create(intelmaddata, card);
++ if (ret_val) {
++ dev_err(&intelmaddata->spi->dev,\
++ "SST ERR: snd_intelmad_create failed \n");
++ goto free_allocs;
++ }
++ card->private_data = &intelmaddata;
++ snd_card_set_dev(card, &spi->dev);
++ ret_val = snd_card_register(card);
++ if (ret_val) {
++ dev_err(&intelmaddata->spi->dev,\
++ "SST ERR: snd_card_register failed \n");
++ goto free_allocs;
++ }
++
++ printk(KERN_DEBUG "SST DBG:...complete\n");
++ return ret_val;
++
++free_mad_jack_wq:
++ destroy_workqueue(intelmaddata->mad_jack_wq);
++
++free_allocs:
++ /* TODO: unregister IRQ */
++ dev_err(&intelmaddata->spi->dev, "SST ERR: probe failed \n");
++ /* snd_card_free(card); */
++ kfree(intelmaddata->sstdrv_ops);
++ kfree(intelmaddata);
++ return ret_val;
++}
++
++
++/**
++* snd_intelmad_remove- function registred for exit
++*@spi : pointer to the spi device context
++* This function is called when the device is uninitialized
++*/
++static int snd_intelmad_remove(struct spi_device *spi)
++{
++ struct snd_intelmad *intelmaddata =
++ dev_get_drvdata(&spi->dev);
++ /*
++ * TODO:: de-register interrupt handler
++ */
++
++ if (intelmaddata) {
++ snd_card_free(intelmaddata->card);
++ /*genl_unregister_family(&audio_event_genl_family);*/
++ unregister_sst_card(intelmaddata->sstdrv_ops);
++ /* free allocated memory for internal context */
++ destroy_workqueue(intelmaddata->mad_jack_wq);
++ kfree(intelmaddata->sstdrv_ops);
++ kfree(intelmaddata);
++ }
++ return 0;
++}
++
++/*********************************************************************
++ * Driver initialization and exit
++ *********************************************************************/
++
++static struct spi_driver snd_intelmad_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .bus = &spi_bus_type,
++ .owner = THIS_MODULE,
++ },
++ .probe = snd_intelmad_probe,
++ .remove = __devexit_p(snd_intelmad_remove),
++};
++
++/*
++* alsa_card_intelmad_init- driver init function
++* This function is called when driver module is inserted
++*/
++static int __init alsa_card_intelmad_init(void)
++{
++ printk(KERN_DEBUG "SST DBG:called\n");
++ return spi_register_driver(&snd_intelmad_driver);
++}
++
++/**
++* alsa_card_intelmad_exit- driver exit function
++* This function is called when driver module is removed
++*/
++static void __exit alsa_card_intelmad_exit(void)
++{
++ printk(KERN_DEBUG "SST DBG:called\n");
++ spi_unregister_driver(&snd_intelmad_driver);
++}
++
++module_init(alsa_card_intelmad_init)
++module_exit(alsa_card_intelmad_exit)
++
+diff --git a/sound/pci/sst/intelmid.h b/sound/pci/sst/intelmid.h
+new file mode 100644
+index 0000000..235115e
+--- /dev/null
++++ b/sound/pci/sst/intelmid.h
+@@ -0,0 +1,170 @@
++/*
++ * intelmid.h - Intel Sound card driver for MID
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Harsha Priya <priya.harsha@intel.com>
++ * Vinod Koul <vinod.koul@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * ALSA driver header for Intel MAD chipset
++ */
++#ifndef __INTELMID_H
++#define __INTELMID_H
++
++#include <linux/time.h>
++
++#define DRIVER_NAME "pmic_audio"
++#define PMIC_SOUND_IRQ_TYPE_MASK (1 << 15)
++#define AUDINT_BASE (0xFFFFEFF8 + (6 * sizeof(u8)))
++#define REG_IRQ
++/* values #defined */
++/* will differ for different hw - to be taken from config */
++#define MAX_DEVICES 1
++#define MIN_RATE 8000
++#define MAX_RATE 48000
++#define MAX_BUFFER (128*1024) /* TBD for PCM */
++#define MIN_BUFFER (128*1024)
++#define MAX_PERIODS (1024)
++#define MIN_PERIODS 2
++#define MAX_PERIOD_BYTES MAX_BUFFER
++//#define MIN_PERIOD_BYTES 32
++#define MIN_PERIOD_BYTES 160
++#define MAX_MUTE 1
++#define MIN_MUTE 0
++#define MONO_CNTL 1
++#define STEREO_CNTL 2
++#define MIN_CHANNEL 1
++#define MAX_CHANNEL 2
++#define FIFO_SIZE 0 /* fifo not being used */
++#define INTEL_MAD "Intel MAD"
++#define MAX_CTRL 7
++#define MAX_VENDORS 3
++/* TODO +6 db */
++#define MAX_VOL 64
++/* TODO -57 db */
++#define MIN_VOL 0
++#define PLAYBACK_COUNT 1
++#define CAPTURE_COUNT 1
++
++extern int sst_card_vendor_id;
++
++struct mad_jack {
++ struct snd_jack jack;
++ int jack_status;
++ struct timeval buttonpressed;
++ struct timeval buttonreleased;
++};
++struct mad_jack_msg_wq {
++ u8 intsts;
++ struct snd_intelmad *intelmaddata;
++ struct work_struct wq;
++
++};
++struct snd_intelmad {
++ struct snd_card *card; /* ptr to the card details */
++ int card_index;/* card index */
++ char *card_id; /* card id */
++ struct intel_sst_card_ops *sstdrv_ops;/* ptr to sst driver ops */
++ struct spi_device *spi;
++ int irq;
++ int pmic_status;
++ void __iomem *int_base;
++ int output_sel;
++ int input_sel;
++ int master_mute;
++ struct mad_jack jack[4];
++ int playback_cnt;
++ int capture_cnt;
++ struct mad_jack_msg_wq mad_jack_msg;
++ struct workqueue_struct *mad_jack_wq;
++ u8 jack_prev_state;
++};
++
++struct snd_control_val {
++ int playback_vol_max;
++ int playback_vol_min;
++ int capture_vol_max;
++ int capture_vol_min;
++};
++
++struct mad_stream_pvt {
++ int stream_status;
++ int stream_ops;
++ struct snd_pcm_substream *substream;
++ struct snd_pcm_indirect pcm_indirect;
++ struct pcm_stream_info stream_info;
++ ssize_t dbg_cum_bytes;
++};
++
++enum mad_drv_status {
++ INIT = 1,
++ STARTED,
++ RUNNING,
++ PAUSED,
++ DROPPED,
++};
++
++enum mad_pmic_status {
++ PMIC_UNINIT = 1,
++ PMIC_INIT,
++};
++enum _widget_ctrl {
++ PLAYBACK_VOL = 1 ,
++ PLAYBACK_MUTE,
++ CAPTURE_VOL,
++ CAPTURE_MUTE,
++ OUTPUT_SEL,
++ INPUT_SEL,
++ MASTER_MUTE
++};
++
++/*enum {
++ AUDIO_GENL_ATTR_UNSPEC = 0,
++ AUDIO_GENL_ATTR_EVENT,
++ AUDIO_GENL_ATTR_MAX,
++};
++enum {
++ AUDIO_GENL_CMD_UNSPEC,
++ AUDIO_GENL_CMD_EVENT,
++ AUDIO_GENL_CMD_MAX,
++};
++
++enum eaudio_events {
++ AUDIO_EVENT_HP_DETECT,
++ AUDIO_EVENT_HS_DETECT,
++ AUDIO_EVENT_SHORT_PRESS,
++ AUDIO_EVENT_LONG_PRESS,
++ AUDIO_EVENT_COUNT,
++};
++
++struct audio_genl_event {
++ u32 orig;
++ enum eaudio_events event;
++};*/
++
++
++void period_elapsed(void *mad_substream);
++int snd_intelmad_alloc_stream(struct snd_pcm_substream *substream);
++int snd_intelmad_init_stream(struct snd_pcm_substream *substream);
++void send_buffer_to_sst(struct snd_pcm_substream *substream,
++ struct snd_pcm_indirect *rec, size_t bytes);
++int sst_sc_reg_access(struct sc_reg_access *sc_access,
++ int type, int num_val);
++
++
++#endif /* __INTELMID_H */
+diff --git a/sound/pci/sst/intelmid_ctrl.c b/sound/pci/sst/intelmid_ctrl.c
+new file mode 100644
+index 0000000..f778628
+--- /dev/null
++++ b/sound/pci/sst/intelmid_ctrl.c
+@@ -0,0 +1,555 @@
++/*
++ * intelmid_ctrl.c - Intel Sound card driver for MID
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Harsha Priya <priya.harsha@intel.com>
++ * Vinod Koul <vinod.koul@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * ALSA driver handling mixer controls for Intel MAD chipset
++ */
++#include <linux/spi/spi.h>
++#include <linux/io.h>
++#include <linux/delay.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/moduleparam.h>
++#include <linux/sched.h>
++#include <sound/core.h>
++#include <sound/control.h>
++#include <sound/pcm.h>
++#include <sound/jack.h>
++#include <sound/pcm_params.h>
++#include <sound/info.h>
++#include <sound/initval.h>
++#include <sound/pcm-indirect.h>
++#include <sound/intel_lpe.h>
++#include <sound/intel_sst_ioctl.h>
++#include "intelmid_snd_control.h"
++#include "intelmid.h"
++
++static char *out_names[] = {"Headphones",
++ "Internal speakers"};
++static char *in_names[] = {"HS_MIC",
++ "AMIC",
++ "DMIC"};
++
++struct snd_pmic_ops *intelmad_vendor_ops[MAX_VENDORS] = {
++ &snd_pmic_ops_fs,
++ &snd_pmic_ops_mx,
++ &snd_pmic_ops_nc
++};
++
++struct snd_control_val intelmad_ctrl_val[MAX_VENDORS] = {
++ {
++ .playback_vol_max = 63,
++ .playback_vol_min = 0,
++ .capture_vol_max = 63,
++ .capture_vol_min = 0,
++ },
++ {
++ .playback_vol_max = 0,
++ .playback_vol_min = -31,
++ .capture_vol_max = 0,
++ .capture_vol_min = -20,
++ },
++ {
++ .playback_vol_max = 0,
++ .playback_vol_min = -126,
++ .capture_vol_max = 0,
++ .capture_vol_min = -31,
++ },
++};
++
++/* control path functionalities */
++
++static inline int snd_intelmad_volume_info(struct snd_ctl_elem_info *uinfo,
++ int control_type, int max, int min)
++{
++ WARN_ON(!uinfo);
++
++ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
++ uinfo->count = control_type;
++ uinfo->value.integer.min = min;
++ uinfo->value.integer.max = max;
++ return 0;
++}
++
++/**
++* snd_intelmad_mute_info - provides information about the mute controls
++* @kcontrol: pointer to the control
++* @uinfo: pointer to the structure where the control's info need
++* to be filled
++* This function is called when a mixer application requests for control's info
++*/
++static int snd_intelmad_mute_info(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_info *uinfo)
++{
++ WARN_ON(!uinfo);
++ WARN_ON(!kcontrol);
++
++ /* set up the mute as a boolean mono control with min-max values */
++ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
++ uinfo->count = MONO_CNTL;
++ uinfo->value.integer.min = MIN_MUTE;
++ uinfo->value.integer.max = MAX_MUTE;
++ return 0;
++}
++
++/**
++* snd_intelmad_capture_volume_info - provides info about the volume control
++* @kcontrol: pointer to the control
++* @uinfo: pointer to the structure where the control's info need
++* to be filled
++* This function is called when a mixer application requests for control's info
++*/
++static int snd_intelmad_capture_volume_info(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_info *uinfo)
++{
++ snd_intelmad_volume_info(uinfo, MONO_CNTL,
++ intelmad_ctrl_val[sst_card_vendor_id].capture_vol_max,
++ intelmad_ctrl_val[sst_card_vendor_id].capture_vol_min);
++ return 0;
++}
++
++/**
++* snd_intelmad_playback_volume_info - provides info about the volume control
++* @kcontrol: pointer to the control
++* @uinfo: pointer to the structure where the control's info need
++* to be filled
++* This function is called when a mixer application requests for control's info
++*/
++static int snd_intelmad_playback_volume_info(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_info *uinfo)
++{
++ snd_intelmad_volume_info(uinfo, STEREO_CNTL,
++ intelmad_ctrl_val[sst_card_vendor_id].playback_vol_max,
++ intelmad_ctrl_val[sst_card_vendor_id].playback_vol_min);
++ return 0;
++}
++
++/**
++* snd_intelmad_device_info - provides information about the devices available
++* @kcontrol: pointer to the control
++* @uinfo: pointer to the structure where the devices's info need
++* to be filled
++* This function is called when a mixer application requests for device's info
++*/
++static int snd_intelmad_device_info(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_info *uinfo)
++{
++ WARN_ON(!kcontrol);
++ WARN_ON(!uinfo);
++ /* setup device select as drop down controls with different values */
++ if (kcontrol->id.numid == OUTPUT_SEL)
++ uinfo->value.enumerated.items = ARRAY_SIZE(out_names);
++ else
++ uinfo->value.enumerated.items = ARRAY_SIZE(in_names);
++ uinfo->count = MONO_CNTL;
++ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
++
++ if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
++ uinfo->value.enumerated.item = 1;
++ if (kcontrol->id.numid == OUTPUT_SEL)
++ strncpy(uinfo->value.enumerated.name,
++ out_names[uinfo->value.enumerated.item],
++ strlen(out_names[uinfo->value.enumerated.item]));
++ else
++ strncpy(uinfo->value.enumerated.name,
++ in_names[uinfo->value.enumerated.item],
++ strlen(in_names[uinfo->value.enumerated.item]));
++ return 0;
++}
++
++/**
++* snd_intelmad_volume_get - gets the current volume for the control
++* @kcontrol: pointer to the control
++* @uval: pointer to the structure where the control's info need
++* to be filled
++* This function is called when .get function of a control is invoked from app
++*/
++static int snd_intelmad_volume_get(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *uval)
++{
++ int ret_val = 0, cntl_list[2] = {0,};
++ u8 value = 0;
++ struct snd_intelmad *intelmaddata;
++ struct snd_pmic_ops *scard_ops;
++
++ printk(KERN_DEBUG "SST DBG:called\n");
++
++ WARN_ON(!uval);
++ WARN_ON(!kcontrol);
++
++ intelmaddata = kcontrol->private_data;
++
++ WARN_ON(!intelmaddata->sstdrv_ops);
++
++ scard_ops = intelmaddata->sstdrv_ops->scard_ops;
++
++ WARN_ON(!scard_ops);
++
++ switch (kcontrol->id.numid) {
++ case PLAYBACK_VOL:
++ cntl_list[0] = PMIC_SND_RIGHT_PB_VOL;
++ cntl_list[1] = PMIC_SND_LEFT_PB_VOL;
++ break;
++
++ case CAPTURE_VOL:
++ cntl_list[0] = PMIC_SND_CAPTURE_VOL;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ ret_val = scard_ops->get_vol(cntl_list[0], &value);
++ uval->value.integer.value[0] = value;
++
++ if (ret_val)
++ return ret_val;
++
++ if (kcontrol->id.numid == PLAYBACK_VOL) {
++ ret_val = scard_ops->get_vol(cntl_list[1], &value);
++ uval->value.integer.value[1] = value;
++ }
++ return ret_val;
++}
++
++/**
++* snd_intelmad_mute_get - gets the current mute status for the control
++* @kcontrol: pointer to the control
++* @uval: pointer to the structure where the control's info need
++* to be filled
++* This function is called when .get function of a control is invoked from app
++*/
++static int snd_intelmad_mute_get(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *uval)
++{
++
++ int cntl_list = 0, ret_val = 0;
++ u8 value = 0;
++ struct snd_intelmad *intelmaddata;
++ struct snd_pmic_ops *scard_ops;
++
++ printk(KERN_DEBUG "SST DBG:called\n");
++
++ WARN_ON(!uval);
++ WARN_ON(!kcontrol);
++
++ intelmaddata = kcontrol->private_data;
++
++ WARN_ON(!intelmaddata->sstdrv_ops);
++
++ scard_ops = intelmaddata->sstdrv_ops->scard_ops;
++
++ WARN_ON(!scard_ops);
++
++ switch (kcontrol->id.numid) {
++ case PLAYBACK_MUTE:
++ if (intelmaddata->output_sel == STEREO_HEADPHONE)
++ cntl_list = PMIC_SND_LEFT_HP_MUTE;
++ else if (intelmaddata->output_sel == INTERNAL_SPKR)
++ cntl_list = PMIC_SND_LEFT_SPEAKER_MUTE;
++ break;
++
++ case CAPTURE_MUTE:
++ if (intelmaddata->input_sel == DMIC)
++ cntl_list = PMIC_SND_DMIC_MUTE;
++ else if (intelmaddata->input_sel == AMIC)
++ cntl_list = PMIC_SND_AMIC_MUTE;
++ else if (intelmaddata->input_sel == HS_MIC)
++ cntl_list = PMIC_SND_HP_MIC_MUTE;
++ break;
++ case MASTER_MUTE:
++ uval->value.integer.value[0] = intelmaddata->master_mute;
++ return 0;
++ default:
++ return -EINVAL;
++ }
++
++ ret_val = scard_ops->get_mute(cntl_list, &value);
++ uval->value.integer.value[0] = value;
++ return ret_val;
++}
++
++/**
++* snd_intelmad_volume_set - sets the volume control's info
++* @kcontrol: pointer to the control
++* @uval: pointer to the structure where the control's info is
++* available to be set
++* This function is called when .set function of a control is invoked from app
++*/
++static int snd_intelmad_volume_set(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *uval)
++{
++
++ int ret_val, cntl_list[2] = {0,};
++ struct snd_intelmad *intelmaddata;
++ struct snd_pmic_ops *scard_ops;
++
++ printk(KERN_DEBUG "SST DBG:volume set called:%ld %ld \n",
++ uval->value.integer.value[0],
++ uval->value.integer.value[1]);
++
++ WARN_ON(!uval);
++ WARN_ON(!kcontrol);
++
++ intelmaddata = kcontrol->private_data;
++
++ WARN_ON(!intelmaddata->sstdrv_ops);
++
++ scard_ops = intelmaddata->sstdrv_ops->scard_ops;
++
++ WARN_ON(!scard_ops);
++
++ switch (kcontrol->id.numid) {
++ case PLAYBACK_VOL:
++ cntl_list[0] = PMIC_SND_LEFT_PB_VOL;
++ cntl_list[1] = PMIC_SND_RIGHT_PB_VOL;
++ break;
++
++ case CAPTURE_VOL:
++ cntl_list[0] = PMIC_SND_CAPTURE_VOL;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ ret_val = scard_ops->set_vol(cntl_list[0],
++ uval->value.integer.value[0]);
++ if (ret_val)
++ return ret_val;
++
++ if (kcontrol->id.numid == PLAYBACK_VOL)
++ ret_val = scard_ops->set_vol(cntl_list[1],
++ uval->value.integer.value[1]);
++ return ret_val;
++}
++
++/**
++* snd_intelmad_mute_set - sets the mute control's info
++* @kcontrol: pointer to the control
++* @uval: pointer to the structure where the control's info is
++* available to be set
++* This function is called when .set function of a control is invoked from app
++*/
++static int snd_intelmad_mute_set(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *uval)
++{
++ int cntl_list[2] = {0,}, ret_val;
++ struct snd_intelmad *intelmaddata;
++ struct snd_pmic_ops *scard_ops;
++
++ printk(KERN_DEBUG "SST DBG:called\n");
++
++ WARN_ON(!uval);
++ WARN_ON(!kcontrol);
++
++ intelmaddata = kcontrol->private_data;
++
++ WARN_ON(!intelmaddata->sstdrv_ops);
++
++ scard_ops = intelmaddata->sstdrv_ops->scard_ops;
++
++ WARN_ON(!scard_ops);
++
++ kcontrol->private_value = uval->value.integer.value[0];
++
++ switch (kcontrol->id.numid) {
++ case PLAYBACK_MUTE:
++ if (intelmaddata->output_sel == STEREO_HEADPHONE) {
++ cntl_list[0] = PMIC_SND_LEFT_HP_MUTE;
++ cntl_list[1] = PMIC_SND_RIGHT_HP_MUTE;
++ } else if (intelmaddata->output_sel == INTERNAL_SPKR) {
++ cntl_list[0] = PMIC_SND_LEFT_SPEAKER_MUTE;
++ cntl_list[1] = PMIC_SND_RIGHT_SPEAKER_MUTE;
++ }
++ break;
++
++ case CAPTURE_MUTE:/*based on sel device mute the i/p dev*/
++ if (intelmaddata->input_sel == DMIC)
++ cntl_list[0] = PMIC_SND_DMIC_MUTE;
++ else if (intelmaddata->input_sel == AMIC)
++ cntl_list[0] = PMIC_SND_AMIC_MUTE;
++ else if (intelmaddata->input_sel == HS_MIC)
++ cntl_list[0] = PMIC_SND_HP_MIC_MUTE;
++ break;
++ case MASTER_MUTE:
++ cntl_list[0] = PMIC_SND_MUTE_ALL;
++ intelmaddata->master_mute = uval->value.integer.value[0];
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ ret_val = scard_ops->set_mute(cntl_list[0],
++ uval->value.integer.value[0]);
++ if (ret_val)
++ return ret_val;
++
++ if (kcontrol->id.numid == PLAYBACK_MUTE)
++ ret_val = scard_ops->set_mute(cntl_list[1],
++ uval->value.integer.value[0]);
++ return ret_val;
++}
++
++/**
++* snd_intelmad_device_get - get the device select control's info
++* @kcontrol: pointer to the control
++* @uval: pointer to the structure where the control's info is
++* to be filled
++* This function is called when .get function of a control is invoked from app
++*/
++static int snd_intelmad_device_get(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *uval)
++{
++ printk(KERN_DEBUG "SST DBG:called\n");
++
++ WARN_ON(!uval);
++ WARN_ON(!kcontrol);
++
++ uval->value.enumerated.item[0] = kcontrol->private_value;
++ return 0;
++}
++
++/**
++* snd_intelmad_device_set - set the device select control's info
++* @kcontrol: pointer to the control
++* @uval: pointer to the structure where the control's info is
++* available to be set
++* This function is called when .set function of a control is invoked from app
++*/
++static int snd_intelmad_device_set(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *uval)
++{
++ struct snd_intelmad *intelmaddata;
++ struct snd_pmic_ops *scard_ops;
++ int ret_val = 0, vendor, status;
++
++ printk(KERN_DEBUG "SST DBG:called\n");
++
++ WARN_ON(!uval);
++ WARN_ON(!kcontrol);
++ status = -1;
++
++ intelmaddata = kcontrol->private_data;
++
++ WARN_ON(!intelmaddata->sstdrv_ops);
++
++ scard_ops = intelmaddata->sstdrv_ops->scard_ops;
++
++ WARN_ON(!scard_ops);
++
++ /* store value with driver */
++ kcontrol->private_value = uval->value.enumerated.item[0];
++
++ switch (kcontrol->id.numid) {
++ case OUTPUT_SEL:
++ ret_val = scard_ops->set_output_dev(
++ uval->value.enumerated.item[0]);
++ intelmaddata->output_sel = uval->value.enumerated.item[0];
++ break;
++ case INPUT_SEL:
++ vendor = intelmaddata->sstdrv_ops->vendor_id;
++ if ((vendor == SND_MX) || (vendor == SND_FS )) {
++ if(uval->value.enumerated.item[0] == HS_MIC) {
++ status = 1;
++ intelmaddata->sstdrv_ops->control_set(SST_ENABLE_RX_TIME_SLOT, &status);
++ }
++ else {
++ status = 0;
++ intelmaddata->sstdrv_ops->control_set(SST_ENABLE_RX_TIME_SLOT, &status);
++ }
++ }
++ ret_val = scard_ops->set_input_dev(
++ uval->value.enumerated.item[0]);
++ intelmaddata->input_sel = uval->value.enumerated.item[0];
++ break;
++ default:
++ return -EINVAL;
++ }
++ kcontrol->private_value = uval->value.enumerated.item[0];
++ return ret_val;
++}
++
++struct snd_kcontrol_new snd_intelmad_controls[MAX_CTRL] __devinitdata = {
++{
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "PCM Playback Volume",
++ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
++ .info = snd_intelmad_playback_volume_info,
++ .get = snd_intelmad_volume_get,
++ .put = snd_intelmad_volume_set,
++ .private_value = 0,
++},
++{
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "PCM Playback Switch",
++ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
++ .info = snd_intelmad_mute_info,
++ .get = snd_intelmad_mute_get,
++ .put = snd_intelmad_mute_set,
++ .private_value = 0,
++},
++{
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "PCM Capture Volume",
++ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
++ .info = snd_intelmad_capture_volume_info,
++ .get = snd_intelmad_volume_get,
++ .put = snd_intelmad_volume_set,
++ .private_value = 0,
++},
++{
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "PCM Capture Switch",
++ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
++ .info = snd_intelmad_mute_info,
++ .get = snd_intelmad_mute_get,
++ .put = snd_intelmad_mute_set,
++ .private_value = 0,
++},
++{
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "PCM Playback Source",
++ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
++ .info = snd_intelmad_device_info,
++ .get = snd_intelmad_device_get,
++ .put = snd_intelmad_device_set,
++ .private_value = 0,
++},
++{
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "PCM Capture Source",
++ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
++ .info = snd_intelmad_device_info,
++ .get = snd_intelmad_device_get,
++ .put = snd_intelmad_device_set,
++ .private_value = 0,
++},
++{
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = "Master Playback Switch",
++ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
++ .info = snd_intelmad_mute_info,
++ .get = snd_intelmad_mute_get,
++ .put = snd_intelmad_mute_set,
++ .private_value = 0,
++},
++};
+diff --git a/sound/pci/sst/intelmid_ctrl.h b/sound/pci/sst/intelmid_ctrl.h
+new file mode 100644
+index 0000000..fa5feaf
+--- /dev/null
++++ b/sound/pci/sst/intelmid_ctrl.h
+@@ -0,0 +1,33 @@
++/*
++ * intelmid_ctrl.h - Intel Sound card driver for MID
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Harsha Priya <priya.harsha@intel.com>
++ * Vinod Koul <vinod.koul@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * ALSA driver header file for handling mixer controls for Intel MAD chipset
++ */
++#ifndef __INTELMID_CTRL_H
++#define __INTELMID_CTRL_H
++
++extern struct snd_control_val intelmad_ctrl_val[];
++extern struct snd_kcontrol_new snd_intelmad_controls[];
++extern struct snd_pmic_ops *intelmad_vendor_ops[];
++
++#endif /*__INTELMID_CTRL_H*/
+diff --git a/sound/pci/sst/intelmid_pvt.c b/sound/pci/sst/intelmid_pvt.c
+new file mode 100644
+index 0000000..1dd00c3
+--- /dev/null
++++ b/sound/pci/sst/intelmid_pvt.c
+@@ -0,0 +1,343 @@
++/*
++ * intelmid_pvt.h - Intel Sound card driver for MID
++ *
++ * Copyright (C) 2008-10 Intel Corp
++ * Authors: Harsha Priya <priya.harsha@intel.com>
++ * Vinod Koul <vinod.koul@intel.com>
++ * KP Jeeja <jeeja.kp@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * ALSA driver for Intel MID sound card chipset - holding private functions
++ */
++#include <linux/spi/spi.h>
++#include <linux/io.h>
++#include <linux/delay.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/moduleparam.h>
++#include <linux/sched.h>
++#include <asm/ipc_defs.h>
++#include <sound/core.h>
++#include <sound/control.h>
++#include <sound/pcm.h>
++#include <sound/jack.h>
++#include <sound/pcm_params.h>
++#include <sound/info.h>
++#include <sound/initval.h>
++#include <sound/pcm-indirect.h>
++#include <sound/intel_lpe.h>
++#include <sound/intel_sst_ioctl.h>
++#include "intelmid_snd_control.h"
++#include "intelmid.h"
++
++
++/*static unsigned int audio_event_seqnum;
++static struct genl_family audio_event_genl_family = {
++ .id = GENL_ID_GENERATE,
++ .name = "audio events",
++ .version = 0x01,
++ .maxattr = 0,
++};
++
++static struct genl_multicast_group audio_event_mcgrp = {
++ .name = "audio_group",
++};
++*/
++
++void period_elapsed(void *mad_substream)
++{
++ struct snd_pcm_substream *substream = mad_substream;
++ struct mad_stream_pvt *stream;
++
++ if (!substream || !substream->runtime)
++ return;
++ stream = substream->runtime->private_data;
++ if (!stream)
++ return;
++
++// printk(KERN_DEBUG "SST DBG:called\n");
++ if (stream->stream_status != RUNNING)
++ return;
++// printk(KERN_DEBUG "SST DBG:calling period elapsed\n");
++ snd_pcm_period_elapsed(substream);
++ return;
++}
++
++
++int snd_intelmad_alloc_stream(struct snd_pcm_substream *substream)
++{
++ struct snd_intelmad *intelmaddata = snd_pcm_substream_chip(substream);
++ struct mad_stream_pvt *stream = substream->runtime->private_data;
++ unsigned int bits_per_sec = (substream->runtime->sample_bits/8)
++ * (substream->runtime->channels)
++ * (substream->runtime->rate);
++ struct snd_sst_stream_params param = {{{0,},},};
++ struct snd_sst_params str_params = {0};
++ int ret_val;
++
++ /* set codec params and inform SST driver the same */
++
++ param.uc.pcm_params.codec = SST_CODEC_TYPE_PCM;
++ param.uc.pcm_params.brate = bits_per_sec;
++ param.uc.pcm_params.num_chan = (u8) substream->runtime->channels;
++ param.uc.pcm_params.sfreq = substream->runtime->rate;
++ param.uc.pcm_params.pcm_wd_sz = substream->runtime->sample_bits;
++// param.uc.pcm_params.frame_size = 0;
++// param.uc.pcm_params.samples_per_frame = 250; /* FIXME */
++ param.uc.pcm_params.buffer_size = substream->runtime->buffer_size;
++ param.uc.pcm_params.period_count = substream->runtime->period_size;
++ printk(KERN_DEBUG "SST DBG:period_count +\
++ = %d\n", param.uc.pcm_params.period_count);
++ printk(KERN_DEBUG "SST DBG:sfreq= %d, wd_sz = %d\n", +\
++ param.uc.pcm_params.sfreq, param.uc.pcm_params.pcm_wd_sz);
++
++ str_params.sparams = param;
++ str_params.codec = SST_CODEC_TYPE_PCM;
++
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++ str_params.ops = STREAM_OPS_PLAYBACK;
++ else
++ str_params.ops = STREAM_OPS_CAPTURE;
++ ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_ALLOC,
++ &str_params);
++ printk(KERN_DEBUG "SST DBG:SST_SND_PLAY/CAPTURE ret_val = %x\n",
++ ret_val);
++ if (ret_val < 0)
++ return ret_val;
++
++ stream->stream_info.str_id = ret_val;
++ stream->stream_status = INIT;
++ stream->stream_info.buffer_ptr = 0;
++ printk(KERN_DEBUG "SST DBG:str id : %d\n", stream->stream_info.str_id);
++
++ return ret_val;
++}
++
++int snd_intelmad_init_stream(struct snd_pcm_substream *substream)
++{
++ struct mad_stream_pvt *stream = substream->runtime->private_data;
++ struct snd_intelmad *intelmaddata = snd_pcm_substream_chip(substream);
++ int ret_val;
++
++ printk(KERN_DEBUG "SST DBG:setting buffer ptr param\n");
++ stream->stream_info.period_elapsed = period_elapsed;
++ stream->stream_info.mad_substream = substream;
++ stream->stream_info.buffer_ptr = 0;
++ stream->stream_info.sfreq = substream->runtime->rate;
++ ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_STREAM_INIT,
++ &stream->stream_info);
++ if (ret_val)
++ dev_err(&intelmaddata->spi->dev,\
++ "SST ERR: error code = %d \n", ret_val);
++ return ret_val;
++
++}
++
++void send_buffer_to_sst(struct snd_pcm_substream *substream,
++ struct snd_pcm_indirect *rec, size_t bytes)
++{
++ struct snd_intelmad *intelmaddata = snd_pcm_substream_chip(substream);
++ struct mad_stream_pvt *stream = substream->runtime->private_data;
++ struct stream_buffer buffer_to_sst = {0,};
++ int ret_val;
++
++ /* sends data to SST to be processed */
++ stream->dbg_cum_bytes += bytes;
++ printk(KERN_DEBUG "SST DBG:bytes = %d \n", bytes);
++ printk(KERN_DEBUG "SST DBG:cum_bytes +\
++ = 0x%x, \n", stream->dbg_cum_bytes);
++ buffer_to_sst.length = bytes;
++ buffer_to_sst.addr = (unsigned long) substream->runtime->dma_area +
++ rec->sw_data;
++ /* SST API to actually send the buffer to be played */
++ ret_val = intelmaddata->sstdrv_ops->send_buffer(
++ stream->stream_info.str_id,
++ &buffer_to_sst);
++ printk(KERN_DEBUG "SST DBG:send_buffer +\
++ ret_val = 0x%x \n", ret_val);
++ return;
++}
++
++/*int snd_intelmad_generate_netlink(u32 orig, enum eaudio_events event)
++{
++ struct sk_buff *skb = NULL;
++ struct nlattr *attr = NULL;
++ struct audio_genl_event *aud_event = NULL;
++ void *msg_header = NULL;
++ int size = 0, ret_val = 0;
++
++
++ size = nla_total_size(sizeof(struct audio_genl_event)) + \
++ nla_total_size(0);
++
++ skb = genlmsg_new(size, GFP_ATOMIC);
++ if (!skb)
++ return -ENOMEM;
++
++
++ msg_header = genlmsg_put(skb, 0, audio_event_seqnum++,
++ &audio_event_genl_family, 0,
++ AUDIO_GENL_CMD_EVENT);
++ if (!msg_header) {
++ nlmsg_free(skb);
++ return -ENOMEM;
++ }
++
++ attr = nla_reserve(skb, AUDIO_GENL_ATTR_EVENT, \
++ sizeof(struct audio_genl_event));
++
++ if (!attr) {
++ nlmsg_free(skb);
++ return -EINVAL;
++ }
++
++ aud_event = nla_data(attr);
++ if (!aud_event) {
++ nlmsg_free(skb);
++ return -EINVAL;
++ }
++
++ memset(aud_event, 0, sizeof(struct audio_genl_event));
++
++ aud_event->orig = orig;
++ aud_event->event = event;
++
++
++ ret_val = genlmsg_end(skb, msg_header);
++ if (ret_val < 0) {
++ nlmsg_free(skb);
++ return ret_val;
++ }
++
++ ret_val = genlmsg_multicast(skb, 0, audio_event_mcgrp.id, GFP_ATOMIC);
++
++ if (ret_val)
++ printk(KERN_INFO "Failed to send a Genetlink message!\n");
++ return 0;
++}*/
++
++
++/**
++* Reads/writes/read-modify operations on registers accessed through SCU (sound
++* card and few SST DSP regsiters that are not accissible to IA)
++*/
++int sst_sc_reg_access(struct sc_reg_access *sc_access,
++ int type, int num_val)
++{
++ int i, retval = 0, j = 0, k = 0, count = 0;
++ struct ipc_pmic_reg_data reg_data;
++ struct ipc_pmic_mod_reg_data pmic_mod_reg = {0};
++
++ reg_data.ioc = TRUE;
++ if (type == PMIC_WRITE) {
++ do {
++ int max_retries = 0;
++
++ if (num_val <= 4)
++ count = num_val;
++ else
++ count = 4;
++retry_write:
++ for (i = 0; i < count; i++, j++) {
++ reg_data.pmic_reg_data[i].
++ register_address = sc_access[j].reg_addr;
++
++ reg_data.pmic_reg_data[i].value =
++ sc_access[j].value;
++ }
++ reg_data.num_entries = (u8) count;
++ retval = ipc_pmic_register_write(&reg_data, 0);
++ if (retval == E_NO_INTERRUPT_ON_IOC &&
++ max_retries < 10) {
++ printk(KERN_ERR "SST ERR: write communcation needs retry \n");
++ max_retries++;
++ goto retry_write;
++ }
++ if (0 != retval) {
++ printk(KERN_ERR "SST ERR: pmic write failed \n");
++ return retval;
++ }
++ num_val -= count;
++ } while (num_val > 0);
++ } else if (type == PMIC_READ) {
++ do {
++ int max_retries = 0;
++ if (num_val <= 4)
++ count = num_val;
++ else
++ count = 4;
++retry_read:
++ for (i = 0; i < count; i++, j++)
++ reg_data.pmic_reg_data[i].register_address
++ = sc_access[j].reg_addr;
++ reg_data.num_entries = count;
++ retval = ipc_pmic_register_read(&reg_data);
++ if (retval == E_NO_INTERRUPT_ON_IOC &&
++ max_retries < 10) {
++ printk(KERN_ERR "ERR: read communcation needs retry \n");
++ max_retries++;
++ goto retry_read;
++ }
++ if (0 != retval) {
++ printk(KERN_ERR "ERR: pmic read failed \n");
++ return retval;
++ }
++
++ for (i = 0; i < count; i++, k++)
++ sc_access[k].value =
++ reg_data.pmic_reg_data[i].value;
++ num_val -= count;
++ } while (num_val > 0);
++ } else {
++ pmic_mod_reg.ioc = TRUE;
++ do {
++ int max_retries = 0;
++ if (num_val <= 4)
++ count = num_val;
++ else
++ count = 4;
++retry_readmod:
++ for (i = 0; i < count; i++, j++) {
++ pmic_mod_reg.pmic_mod_reg_data[i].
++ register_address = sc_access[j].reg_addr;
++ pmic_mod_reg.pmic_mod_reg_data[i].value =
++ sc_access[j].value;
++ pmic_mod_reg.pmic_mod_reg_data[i].bit_map =
++ sc_access[j].mask;
++ }
++ pmic_mod_reg.num_entries = count;
++ printk(KERN_DEBUG "SST DBG:read_modify +\
++ called for cnt = %d\n", count);
++ retval = ipc_pmic_register_read_modify(&pmic_mod_reg);
++ if (retval == E_NO_INTERRUPT_ON_IOC &&
++ max_retries < 10) {
++ printk(KERN_ERR "SST ERR: read/modify retry \n");
++ max_retries++;
++ goto retry_readmod;
++ }
++ if (0 != retval) {
++ /* pmic communication fails */
++ printk(KERN_ERR "SST ERR: pmic read_modify failed \n");
++ return retval;
++ }
++ num_val -= count;
++ } while (num_val > 0);
++ }
++ return retval;
++}
++
+--
+1.6.2.2
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-8-8.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-8-8.patch
new file mode 100644
index 0000000..9be2a62
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-audio-driver-6.0-8-8.patch
@@ -0,0 +1,85 @@
+From e17c41ab9c34ea8715c2655fcb623b0bb92eaab1 Mon Sep 17 00:00:00 2001
+From: R, Dharageswari <dharageswari.r@intel.com>
+Date: Thu, 29 Apr 2010 20:30:16 +0530
+Subject: [PATCH] ADR-Post-Beta-0.05.002.03-8/8-Moorestown Audio Drivers: Config files
+
+This patch adds the makefiles and Kconfig changes for both SST and MAD drivers
+
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+
+ modified: sound/pci/Kconfig
+ modified: sound/pci/Makefile
+ new file: sound/pci/sst/Makefile
+Patch-mainline: 2.6.35?
+---
+ sound/pci/Kconfig | 25 +++++++++++++++++++++++++
+ sound/pci/Makefile | 3 ++-
+ sound/pci/sst/Makefile | 9 +++++++++
+ 3 files changed, 36 insertions(+), 1 deletions(-)
+ create mode 100644 sound/pci/sst/Makefile
+
+diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
+index 1513d72..e41380d 100644
+--- a/sound/pci/Kconfig
++++ b/sound/pci/Kconfig
+@@ -8,6 +8,30 @@ menuconfig SND_PCI
+ Support for sound devices connected via the PCI bus.
+
+ if SND_PCI
++config SND_INTEL_SST
++ bool "Moorestown SST (LPE) Driver"
++ depends on X86 && LNW_IPC
++ default y
++ help
++ Say Y here to include support for the Moorestown SST DSP driver
++ On other PC platforms if you are unsure answer 'N'
++
++config SND_INTELMID
++ bool "Moorestown sound card driver"
++ select SND_JACK
++ depends on SND_INTEL_SST && SPI_MRST
++ default y
++ help
++ Say Y here to include support for the Moorestown sound driver.
++ On other PC platforms if you are unsure answer 'N'
++
++config SND_AUDIO_DBG_PRINT
++ bool "Moorestown Audio driver debug printk calls"
++ depends on SND_INTELMID
++ default n
++ help
++ Say Y here to include debug printks calls in the Intel MID driver.
++ If you are unsure say 'N'
+
+ config SND_AD1889
+ tristate "Analog Devices AD1889"
+ select SND_AC97_CODEC
+diff --git a/sound/pci/Makefile b/sound/pci/Makefile
+index 5665c1e..541057b 100644
+--- a/sound/pci/Makefile
++++ b/sound/pci/Makefile
+@@ -77,4 +77,5 @@ obj-$(CONFIG_SND) += \
+ rme9652/ \
+ trident/ \
+ ymfpci/ \
+- vx222/
++ vx222/ \
++ sst/
+diff --git a/sound/pci/sst/Makefile b/sound/pci/sst/Makefile
+new file mode 100644
+index 0000000..cf96b11
+--- /dev/null
++++ b/sound/pci/sst/Makefile
+@@ -0,0 +1,9 @@
++#
++# Makefile for Intel MID Audio drivers
++#
++EXTRA_CFLAGS=-g -DCONFIG_MRST_RAR_HANDLER
++snd-intel-sst-objs := intel_sst.o intel_sst_ipc.o intel_sst_stream.o intel_sst_interface.o intel_sst_dsp.o intel_sst_pvt.o
++snd-intelmid-objs := intelmid.o intelmid_v0_control.o intelmid_v1_control.o intelmid_v2_control.o intelmid_ctrl.o intelmid_pvt.o
++# Toplevel Module Dependency
++obj-$(CONFIG_SND_INTEL_SST) += snd-intel-sst.o
++obj-$(CONFIG_SND_INTELMID) += snd-intelmid.o
+--
+1.6.2.2
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-ericsson-mbm-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-ericsson-mbm-driver.patch
new file mode 100644
index 0000000..f34e280
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-ericsson-mbm-driver.patch
@@ -0,0 +1,465 @@
+From b9a664ffe791221bd2d7bc625f8b288d7dc8549a Mon Sep 17 00:00:00 2001
+From: Jacob Pan <jacob.jun.pan@intel.com>
+Date: Mon, 4 Jan 2010 11:04:34 -0800
+Subject: [PATCH 057/104] Ericsson MBM Driver
+
+Signed-off-by: Jacob Pan <jacob.jun.pan@intel.com>
+---
+ drivers/net/usb/Kconfig | 17 ++
+ drivers/net/usb/Makefile | 1 +
+ drivers/net/usb/mbm.c | 375 ++++++++++++++++++++++++++++++++++++++++++++
+ drivers/net/usb/usbnet.c | 3 +
+ include/linux/usb/usbnet.h | 1 +
+ 5 files changed, 397 insertions(+), 0 deletions(-)
+ create mode 100644 drivers/net/usb/mbm.c
+
+diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
+index 32d9356..adb6d4c 100644
+--- a/drivers/net/usb/Kconfig
++++ b/drivers/net/usb/Kconfig
+@@ -361,6 +361,23 @@ config USB_HSO
+ To compile this driver as a module, choose M here: the
+ module will be called hso.
+
++config USB_NET_MBM
++ tristate "Ericsson Mobile Broadband Module"
++ depends on USB_USBNET
++ select USB_NET_CDCETHER
++ default y
++ help
++ Choose this option to support Mobile Broadband devices from
++ Ericsson MBM, Mobile Broadband Module.
++ This driver should work with at least the following devices:
++ * Ericsson Mobile Broadband Minicard
++ * Ericsson F3507g Wireless Module
++ * Ericsson F3607gw Broadband Module
++ * Dell Wireless 5530 HSPA
++ * Toshiba F3507g
++ * Sony Ericsson EC400
++ * Sony Ericsson MD400
++
+ config USB_NET_INT51X1
+ tristate "Intellon PLC based usb adapter"
+ depends on USB_USBNET
+diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
+index e17afb7..82d5f60 100644
+--- a/drivers/net/usb/Makefile
++++ b/drivers/net/usb/Makefile
+@@ -18,6 +18,7 @@ obj-$(CONFIG_USB_NET_PLUSB) += plusb.o
+ obj-$(CONFIG_USB_NET_RNDIS_HOST) += rndis_host.o
+ obj-$(CONFIG_USB_NET_CDC_SUBSET) += cdc_subset.o
+ obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o
++obj-$(CONFIG_USB_NET_MBM) += mbm.o
+ obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
+ obj-$(CONFIG_USB_USBNET) += usbnet.o
+ obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
+diff --git a/drivers/net/usb/mbm.c b/drivers/net/usb/mbm.c
+new file mode 100644
+index 0000000..4bb909b
+--- /dev/null
++++ b/drivers/net/usb/mbm.c
+@@ -0,0 +1,375 @@
++/* -*- linux-c -*-
++ * Copyright (C) 2008 Carl Nordbeck <Carl.Nordbeck@ericsson.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/netdevice.h>
++#include <linux/ctype.h>
++#include <linux/ethtool.h>
++#include <linux/workqueue.h>
++#include <linux/mii.h>
++#include <linux/crc32.h>
++#include <linux/usb.h>
++#include <linux/usb/cdc.h>
++#include <linux/usb/usbnet.h>
++
++#define DRIVER_VERSION "0.03"
++
++/* Bogus speed for bugy HSPA modems */
++#define TX_LINK_SPEED 0x001E8480 /* 2.0 Mbps */
++#define RX_LINK_SPEED 0x006DDD00 /* 7.2 Mbps */
++#define FIX_SPEED 0x00989680 /* 10.0 Mbps */
++
++struct mbm_data {
++ unsigned int rx_speed;
++ unsigned int tx_speed;
++ unsigned int connect;
++};
++
++static const u8 mbm_guid[16] = {
++ 0xa3, 0x17, 0xa8, 0x8b, 0x04, 0x5e, 0x4f, 0x01,
++ 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a,
++};
++static void dumpspeed(struct usbnet *dev, __le32 *speeds)
++{
++ struct mbm_data *data = (void *)&dev->data;
++
++ data->rx_speed = __le32_to_cpu(speeds[0]);
++ data->tx_speed = __le32_to_cpu(speeds[1]);
++
++ if (data->rx_speed == FIX_SPEED && data->tx_speed == FIX_SPEED) {
++/* Bogus speed for buggy HSPA modems */
++ dev_info(&dev->udev->dev,
++ "link speeds: %u kbps RX, %u kbps TX\n",
++ RX_LINK_SPEED / 1000, TX_LINK_SPEED / 1000);
++
++ data->rx_speed = RX_LINK_SPEED;
++ data->tx_speed = TX_LINK_SPEED;
++ } else
++ dev_info(&dev->udev->dev,
++ "link speeds: %u kbps RX, %u kbps TX\n",
++ __le32_to_cpu(speeds[0]) / 1000,
++ __le32_to_cpu(speeds[1]) / 1000);
++}
++
++static void mbm_status(struct usbnet *dev, struct urb *urb)
++{
++ struct mbm_data *data = (void *)&dev->data;
++ struct usb_cdc_notification *event;
++
++ if (urb->actual_length < sizeof(*event))
++ return;
++
++ /* SPEED_CHANGE can get split into two 8-byte packets */
++ if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
++ dumpspeed(dev, (__le32 *) urb->transfer_buffer);
++ return;
++ }
++
++ event = urb->transfer_buffer;
++ switch (event->bNotificationType) {
++ case USB_CDC_NOTIFY_NETWORK_CONNECTION:
++ data->connect = event->wValue;
++ if (netif_msg_timer(dev))
++ dev_dbg(&dev->udev->dev, "CDC: carrier %s\n",
++ data->connect ? "on" : "off");
++ if (event->wValue)
++ netif_carrier_on(dev->net);
++ else
++ netif_carrier_off(dev->net);
++ break;
++ case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */
++ if (netif_msg_timer(dev))
++ dev_dbg(&dev->udev->dev, "CDC: speed change (len %d)\n",
++ urb->actual_length);
++ if (urb->actual_length != (sizeof(*event) + 8))
++ set_bit(EVENT_STS_SPLIT, &dev->flags);
++ else
++ dumpspeed(dev, (__le32 *) &event[1]);
++ break;
++ default:
++ dev_err(&dev->udev->dev, "CDC: unexpected notification %02x!\n",
++ event->bNotificationType);
++ break;
++ }
++}
++
++static u8 nibble(unsigned char c)
++{
++ if (likely(isdigit(c)))
++ return c - '0';
++ c = toupper(c);
++ if (likely(isxdigit(c)))
++ return 10 + c - 'A';
++ return 0;
++}
++
++static inline int
++get_ethernet_addr(struct usbnet *dev, struct usb_cdc_ether_desc *e)
++{
++ int tmp, i;
++ unsigned char buf[13];
++
++ tmp = usb_string(dev->udev, e->iMACAddress, buf, sizeof(buf));
++ if (tmp != 12) {
++ dev_dbg(&dev->udev->dev,
++ "bad MAC string %d fetch, %d\n", e->iMACAddress, tmp);
++ if (tmp >= 0)
++ tmp = -EINVAL;
++ return tmp;
++ }
++ for (i = tmp = 0; i < 6; i++, tmp += 2)
++ dev->net->dev_addr[i] =
++ (nibble(buf[tmp]) << 4) + nibble(buf[tmp + 1]);
++ return 0;
++}
++
++static void mbm_get_drvinfo(struct net_device *net,
++ struct ethtool_drvinfo *info)
++{
++ struct usbnet *dev = netdev_priv(net);
++
++ strncpy(info->driver, dev->driver_name, sizeof(info->driver));
++ strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
++ strncpy(info->fw_version, dev->driver_info->description,
++ sizeof(info->fw_version));
++ usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
++}
++
++static struct ethtool_ops mbm_ethtool_ops = {
++ .get_drvinfo = mbm_get_drvinfo,
++ .get_link = usbnet_get_link,
++ .get_msglevel = usbnet_get_msglevel,
++ .set_msglevel = usbnet_set_msglevel,
++ .get_settings = usbnet_get_settings,
++ .set_settings = usbnet_set_settings,
++ .nway_reset = usbnet_nway_reset,
++};
++
++static int mbm_check_connect(struct usbnet *dev)
++{
++ struct mbm_data *data = (void *)&dev->data;
++
++ return !data->connect;
++}
++
++static int mbm_bind(struct usbnet *dev, struct usb_interface *intf)
++{
++ struct cdc_state *info = (void *)&dev->data;
++ struct usb_driver *driver = driver_of(intf);
++ struct usb_interface_descriptor *d = NULL;
++ struct usb_cdc_mdlm_desc *desc = NULL;
++ struct usb_cdc_mdlm_detail_desc *detail = NULL;
++ struct mbm_data *data = NULL;
++
++ u8 *buf = intf->cur_altsetting->extra;
++ int len = intf->cur_altsetting->extralen;
++ int status;
++
++ memset(info, 0, sizeof(*info));
++ info->control = intf;
++ while (len > 3) {
++ if (buf[1] != USB_DT_CS_INTERFACE)
++ goto next_desc;
++
++ switch (buf[2]) {
++ case USB_CDC_MDLM_TYPE:
++ if (info->header)
++ goto bad_desc;
++
++ desc = (void *)buf;
++
++ if (desc->bLength != sizeof(*desc))
++ goto bad_desc;
++
++ if (memcmp(&desc->bGUID, mbm_guid, 16))
++ goto bad_desc;
++ break;
++ case USB_CDC_MDLM_DETAIL_TYPE:
++ if (detail)
++ goto bad_desc;
++
++ detail = (void *)buf;
++
++ if (detail->bGuidDescriptorType == 0) {
++ if (detail->bLength < (sizeof(*detail) + 1))
++ goto bad_desc;
++ }
++ break;
++ case USB_CDC_UNION_TYPE:
++ if (info->u)
++ goto bad_desc;
++
++ info->u = (void *)buf;
++
++ if (info->u->bLength != sizeof(*info->u))
++ goto bad_desc;
++
++ info->control = usb_ifnum_to_if(dev->udev,
++ info->u->
++ bMasterInterface0);
++ info->data =
++ usb_ifnum_to_if(dev->udev,
++ info->u->bSlaveInterface0);
++ if (!info->control || !info->data) {
++ dev_dbg(&intf->dev,
++ "master #%u/%p slave #%u/%p\n",
++ info->u->bMasterInterface0,
++ info->control,
++ info->u->bSlaveInterface0, info->data);
++ goto bad_desc;
++ }
++
++ /* a data interface altsetting does the real i/o */
++ d = &info->data->cur_altsetting->desc;
++ if (d->bInterfaceClass != USB_CLASS_CDC_DATA)
++ goto bad_desc;
++ break;
++ case USB_CDC_ETHERNET_TYPE:
++ if (info->ether)
++ goto bad_desc;
++
++ info->ether = (void *)buf;
++ if (info->ether->bLength != sizeof(*info->ether))
++ goto bad_desc;
++ dev->hard_mtu =
++ le16_to_cpu(info->ether->wMaxSegmentSize);
++ break;
++ }
++next_desc:
++ len -= buf[0]; /* bLength */
++ buf += buf[0];
++ }
++
++ if (!desc || !detail) {
++ dev_dbg(&intf->dev, "missing cdc mdlm %s%sdescriptor\n",
++ desc ? "" : "func ", detail ? "" : "detail ");
++ goto bad_desc;
++ }
++
++ if (!info->u || (!info->ether)) {
++ dev_dbg(&intf->dev, "missing cdc %s%s%sdescriptor\n",
++ info->header ? "" : "header ",
++ info->u ? "" : "union ", info->ether ? "" : "ether ");
++ goto bad_desc;
++ }
++
++ status = usb_driver_claim_interface(driver, info->data, dev);
++ if (status < 0) {
++ dev_dbg(&intf->dev, "Failed claimin interface\n");
++ return status;
++ }
++ status = usbnet_get_endpoints(dev, info->data);
++ if (status < 0) {
++ dev_dbg(&intf->dev, "Failed get endpoints\n");
++ usb_set_intfdata(info->data, NULL);
++ usb_driver_release_interface(driver, info->data);
++ return status;
++ }
++
++ dev->status = NULL;
++ if (info->control->cur_altsetting->desc.bNumEndpoints == 1) {
++ struct usb_endpoint_descriptor *desc;
++
++ dev->status = &info->control->cur_altsetting->endpoint[0];
++ desc = &dev->status->desc;
++ if (!usb_endpoint_is_int_in(desc)
++ || (le16_to_cpu(desc->wMaxPacketSize)
++ < sizeof(struct usb_cdc_notification))
++ || !desc->bInterval) {
++ dev_dbg(&intf->dev, "bad notification endpoint\n");
++ dev->status = NULL;
++ }
++ }
++ usb_set_intfdata(intf, data);
++ dev->net->ethtool_ops = &mbm_ethtool_ops;
++
++ status = get_ethernet_addr(dev, info->ether);
++ if (status < 0) {
++ usb_set_intfdata(info->data, NULL);
++ usb_driver_release_interface(driver_of(intf), info->data);
++ return status;
++ }
++
++ return 0;
++
++bad_desc:
++ dev_info(&dev->udev->dev, "unsupported MDLM descriptors\n");
++ return -ENODEV;
++}
++
++static const struct driver_info mbm_info = {
++ .description = "Mobile Broadband Network Device",
++ .flags = FLAG_MBN,
++ .check_connect = mbm_check_connect,
++ .bind = mbm_bind,
++ .unbind = usbnet_cdc_unbind,
++ .status = mbm_status,
++};
++
++static const struct usb_device_id products[] = {
++ {
++ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM,
++ USB_CDC_PROTO_NONE),
++ .driver_info = (unsigned long)&mbm_info,
++ },
++
++ {}, /* END */
++};
++
++MODULE_DEVICE_TABLE(usb, products);
++
++int mbm_suspend(struct usb_interface *intf, pm_message_t message)
++{
++ dev_dbg(&intf->dev, "mbm%d_suspend\n", intf->minor);
++ return usbnet_suspend(intf, message);
++}
++
++int mbm_resume(struct usb_interface *intf)
++{
++ dev_dbg(&intf->dev, "mbm%d_resume\n", intf->minor);
++ return usbnet_resume(intf);
++}
++
++static struct usb_driver usbmbm_driver = {
++ .name = "mbm",
++ .id_table = products,
++ .probe = usbnet_probe,
++ .disconnect = usbnet_disconnect,
++ .suspend = mbm_suspend,
++ .resume = mbm_resume,
++ .supports_autosuspend = 1,
++};
++
++static int __init usbmbm_init(void)
++{
++ return usb_register(&usbmbm_driver);
++}
++
++module_init(usbmbm_init);
++
++static void __exit usbmbm_exit(void)
++{
++ usb_deregister(&usbmbm_driver);
++}
++
++module_exit(usbmbm_exit);
++
++MODULE_AUTHOR("Carl Nordbeck");
++MODULE_DESCRIPTION("Ericsson Mobile Broadband");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 035fab0..4733b73 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -1371,6 +1371,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ /* WWAN devices should always be named "wwan%d" */
+ if ((dev->driver_info->flags & FLAG_WWAN) != 0)
+ strcpy(net->name, "wwan%d");
++ /* MB devices should always be named "mb%d" */
++ if (dev->driver_info->flags & FLAG_MBN)
++ strcpy (net->name, "mb%d");
+
+ /* maybe the remote can't receive an Ethernet MTU */
+ if (net->mtu > (dev->hard_mtu - net->hard_header_len))
+diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
+index 8ce6135..e47afbf 100644
+--- a/include/linux/usb/usbnet.h
++++ b/include/linux/usb/usbnet.h
+@@ -94,6 +94,7 @@ struct driver_info {
+ #define FLAG_AVOID_UNLINK_URBS 0x0100 /* don't unlink urbs at usbnet_stop() */
+ #define FLAG_SEND_ZLP 0x0200 /* hw requires ZLPs are sent */
+ #define FLAG_WWAN 0x0400 /* use "wwan%d" names */
++#define FLAG_MBN 0x0800 /* use "mb%d" names */
+
+ #define FLAG_LINK_INTR 0x0800 /* updates link (carrier) status */
+
+--
+1.6.2.5
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-fix-hw-qh-prefetch-bug.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-fix-hw-qh-prefetch-bug.patch
new file mode 100644
index 0000000..a9c1d79
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-fix-hw-qh-prefetch-bug.patch
@@ -0,0 +1,25 @@
+From: Alek Du <alek.du@intel.com>
+Date: Mon, 19 Apr 2010 12:39:37 -0800
+Subject: [PATCH] ehci: Only enable QH prefetch on Moorestown
+Patch-mainline: when mrst code is merged?
+
+Hardware QH prefetch feature is found to be buggy on some none-moorestown
+platforms. Disable this feature on other platforms at the moment.
+
+Signed-off-by: Alek Du <alek.du@intel.com>
+---
+ drivers/usb/host/ehci-hcd.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -595,7 +595,9 @@ static int ehci_init(struct usb_hcd *hcd
+ }
+ if (HCC_HW_PREFETCH(hcc_params)) {
+ ehci_dbg(ehci, "HW prefetch capable %d\n", park);
++#ifdef CONFIG_X86_MRST
+ temp |= (CMD_ASPE | CMD_PSPE);
++#endif
+ }
+
+ if (HCC_CANPARK(hcc_params)) {
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-gpe-fix-for-sensor.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-gpe-fix-for-sensor.patch
new file mode 100644
index 0000000..dea7789
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-gpe-fix-for-sensor.patch
@@ -0,0 +1,85 @@
+From 39fd545bb198f6e17e7e8f730535e3976088cd9f Mon Sep 17 00:00:00 2001
+From: Alan Olsen <alan.r.olsen@intel.com>
+Date: Fri, 26 Mar 2010 11:59:00 -0700
+Subject: [PATCH] GPE fix for sensors
+
+Patch-mainline: 2.6.35?
+
+Signed-off-by: Alan Olsen <alan.r.olsen@intel.com>
+---
+ drivers/hwmon/emc1403.c | 53 ++++++++--------------------------------------
+ 1 files changed, 10 insertions(+), 43 deletions(-)
+
+diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
+index 75e3b15..c94d933 100644
+--- a/drivers/hwmon/emc1403.c
++++ b/drivers/hwmon/emc1403.c
+@@ -33,7 +33,6 @@
+ #include <linux/delay.h>
+ #include <linux/mutex.h>
+ #include <linux/sysfs.h>
+-#include <linux/gpe.h>
+ #include <linux/intel_mid.h>
+
+
+@@ -624,48 +623,16 @@ static int emc1403_probe(struct i2c_client *new_client,
+ data->therm_irq = t_irq & ~IRQ_TYPE_MASK;
+ data->alert_irq = a_irq & ~IRQ_TYPE_MASK;
+ /* interpret irq field */
+- if (data->therm_irq == 0x113) {
+- if (t_irq & IRQ_TYPE_MASK) {
+- /* irq -> GPE_ID */
+- res = request_gpe(data->therm_irq,
+- (gpio_function_t)therm_interrupt_handler,
+- data, DETECT_LEVEL_LOW);
+- if (res)
+- dev_crit(&new_client->dev, "%s(): cannot \
+- register therm gpe \n", __func__);
+- } else {
+- res = request_irq(data->therm_irq,
+- therm_interrupt_handler,
+- DETECT_LEVEL_LOW, "emc1403", data);
+- if (res)
+- dev_crit(&new_client->dev, "%s(): \
+- cannot get therm IRQ\n", __func__);
+- }
+- } else {
+- printk(KERN_WARNING"emc1403: IRQ mismatch \
+- sent for therm registration");
+- }
+- if (data->alert_irq == 0x114) {
+- if (a_irq & IRQ_TYPE_MASK) {
+- /* irq -> GPE_ID */
+- res = request_gpe(data->alert_irq,
+- (gpio_function_t)alert_interrupt_handler,
+- data, DETECT_LEVEL_LOW);
+- if (res)
+- dev_crit(&new_client->dev, "%s(): \
+- cannot register alert gpe \n", __func__);
+- } else {
+- res = request_irq(data->alert_irq,
+- alert_interrupt_handler, DETECT_LEVEL_LOW,
+- "emc1403", data);
+- if (res)
+- dev_crit(&new_client->dev, "%s(): cannot \
++ res = request_irq(data->therm_irq, therm_interrupt_handler,
++ IRQ_TYPE_EDGE_FALLING, "emc1403_therm", data);
++ if (res)
++ dev_crit(&new_client->dev, "%s(): \
++ cannot get therm IRQ\n", __func__);
++ res = request_irq(data->alert_irq, alert_interrupt_handler,
++ IRQ_TYPE_EDGE_FALLING, "emc1403_alert", data);
++ if (res)
++ dev_crit(&new_client->dev, "%s(): cannot \
+ get alert IRQ\n", __func__);
+- }
+- } else {
+- printk(KERN_WARNING"emc1403: IRQ mismatch \
+- sent for alert registration");
+- }
+ #endif
+ emc1403_set_default_config(new_client);
+ dev_info(&new_client->dev, "%s EMC1403 Thermal chip found \n",
+--
+1.6.0.6
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-graphics-changes-for-aava-koski-dv1-hardware.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-graphics-changes-for-aava-koski-dv1-hardware.patch
new file mode 100644
index 0000000..d076b8d
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-graphics-changes-for-aava-koski-dv1-hardware.patch
@@ -0,0 +1,1859 @@
+From a060e4d4a34b0fe17384e2d02e65c74fe73ee4c9 Mon Sep 17 00:00:00 2001
+From: Prajwal Mohan <prajwal.karur.mohan@intel.com>
+Date: Thu, 6 May 2010 14:29:26 -0700
+Subject: [PATCH] Graphics changes for Aava Koski DV1 hardware
+
+Signed-Off-By: Prajwal Mohan <prajwal.karur.mohan@intel.com>
+Patch-mainline: 2.6.35?
+---
+ drivers/gpu/drm/mrst/drv/psb_drv.h | 7 +
+ drivers/gpu/drm/mrst/drv/psb_intel_display.c | 75 +-
+ drivers/gpu/drm/mrst/drv/psb_intel_dsi.c | 9 +
+ drivers/gpu/drm/mrst/drv/psb_intel_dsi_aava.c | 1356 ++++++++++----------
+ .../linux_framebuffer_mrst/mrstlfb_displayclass.c | 21 +-
+ 5 files changed, 735 insertions(+), 733 deletions(-)
+
+diff --git a/drivers/gpu/drm/mrst/drv/psb_drv.h b/drivers/gpu/drm/mrst/drv/psb_drv.h
+index 2ac7934..56c1e90 100644
+--- a/drivers/gpu/drm/mrst/drv/psb_drv.h
++++ b/drivers/gpu/drm/mrst/drv/psb_drv.h
+@@ -413,6 +413,8 @@ struct drm_psb_private {
+ uint32_t dspcntr;
+
+ /* MRST_DSI private date start */
++ struct work_struct dsi_work;
++
+ /*
+ *MRST DSI info
+ */
+@@ -430,6 +432,9 @@ struct drm_psb_private {
+
+ enum mipi_panel_type panel_make;
+
++ /* Set if MIPI encoder wants to control plane/pipe */
++ bool dsi_plane_pipe_control;
++
+ /* status */
+ uint32_t videoModeFormat:2;
+ uint32_t laneCount:3;
+@@ -610,6 +615,8 @@ struct drm_psb_private {
+ uint32_t saveMIPI_CONTROL_REG;
+ uint32_t saveMIPI;
+ void (*init_drvIC)(struct drm_device *dev);
++ void (*dsi_prePowerState)(struct drm_device *dev);
++ void (*dsi_postPowerState)(struct drm_device *dev);
+
+ /* DPST Register Save */
+ uint32_t saveHISTOGRAM_INT_CONTROL_REG;
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_display.c b/drivers/gpu/drm/mrst/drv/psb_intel_display.c
+index 10c6dec..72d42eb 100644
+--- a/drivers/gpu/drm/mrst/drv/psb_intel_display.c
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_display.c
+@@ -2089,6 +2089,7 @@ static void mrst_crtc_dpms(struct drm_crtc *crtc, int mode)
+ {
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct drm_psb_private *dev_priv = dev->dev_private;
+ int pipe = psb_intel_crtc->pipe;
+ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+@@ -2130,18 +2131,22 @@ static void mrst_crtc_dpms(struct drm_crtc *crtc, int mode)
+ udelay(150);
+ }
+
+- /* Enable the pipe */
+- temp = REG_READ(pipeconf_reg);
+- if ((temp & PIPEACONF_ENABLE) == 0)
+- REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+-
+- /* Enable the plane */
+- temp = REG_READ(dspcntr_reg);
+- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+- REG_WRITE(dspcntr_reg,
+- temp | DISPLAY_PLANE_ENABLE);
+- /* Flush the plane changes */
+- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ if (dev_priv->iLVDS_enable ||
++ !dev_priv->dsi_plane_pipe_control) {
++ /* Enable the pipe */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) == 0)
++ REG_WRITE(pipeconf_reg,
++ temp | PIPEACONF_ENABLE);
++
++ /* Enable the plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
++ REG_WRITE(dspcntr_reg,
++ temp | DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ }
+ }
+
+ psb_intel_crtc_load_lut(crtc);
+@@ -2158,30 +2163,34 @@ static void mrst_crtc_dpms(struct drm_crtc *crtc, int mode)
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+- /* Disable display plane */
+- temp = REG_READ(dspcntr_reg);
+- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+- REG_WRITE(dspcntr_reg,
+- temp & ~DISPLAY_PLANE_ENABLE);
+- /* Flush the plane changes */
+- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+- REG_READ(dspbase_reg);
+- }
++ if (dev_priv->iLVDS_enable ||
++ !dev_priv->dsi_plane_pipe_control) {
++ /* Disable display plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
++ REG_WRITE(dspcntr_reg,
++ temp & ~DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ REG_READ(dspbase_reg);
++ }
+
+- if (!IS_I9XX(dev)) {
+- /* Wait for vblank for the disable to take effect */
+- psb_intel_wait_for_vblank(dev);
+- }
++ if (!IS_I9XX(dev)) {
++ /* Wait for vblank for the disable to take effect */
++ psb_intel_wait_for_vblank(dev);
++ }
+
+- /* Next, disable display pipes */
+- temp = REG_READ(pipeconf_reg);
+- if ((temp & PIPEACONF_ENABLE) != 0) {
+- REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+- REG_READ(pipeconf_reg);
+- }
++ /* Next, disable display pipes */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) != 0) {
++ REG_WRITE(pipeconf_reg,
++ temp & ~PIPEACONF_ENABLE);
++ REG_READ(pipeconf_reg);
++ }
+
+- /* Wait for for the pipe disable to take effect. */
+- mrstWaitForPipeDisable(dev);
++ /* Wait for for the pipe disable to take effect. */
++ mrstWaitForPipeDisable(dev);
++ }
+
+ temp = REG_READ(dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) != 0) {
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_dsi.c b/drivers/gpu/drm/mrst/drv/psb_intel_dsi.c
+index 3d45df8..eb6cb2a 100644
+--- a/drivers/gpu/drm/mrst/drv/psb_intel_dsi.c
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_dsi.c
+@@ -18,6 +18,11 @@
+ * jim liu <jim.liu@intel.com>
+ */
+
++#define USE_AAVA_VERSION
++#ifdef USE_AAVA_VERSION
++#include "psb_intel_dsi_aava.c"
++#else
++
+ #include <linux/backlight.h>
+ #include <drm/drmP.h>
+ #include <drm/drm.h>
+@@ -2448,3 +2453,7 @@ failed_find:
+ drm_connector_cleanup(connector);
+ kfree(connector);
+ }
++
++#endif /* USE_AAVA_VERSION */
++
++
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_dsi_aava.c b/drivers/gpu/drm/mrst/drv/psb_intel_dsi_aava.c
+index 6c21480..9e761c6 100644
+--- a/drivers/gpu/drm/mrst/drv/psb_intel_dsi_aava.c
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_dsi_aava.c
+@@ -1,32 +1,4 @@
+-/*
+- * Copyright © 2006-2007 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * jim liu <jim.liu@intel.com>
+- */
+-
+-/* This enables setting backlights on with a delay at startup,
+- should be removed after resolving issue with backlights going off
+- after setting them on in initial mrst_dsi_set_power call */
++
+ #define AAVA_BACKLIGHT_HACK
+
+ #include <linux/backlight.h>
+@@ -34,26 +6,33 @@
+ #include <drm/drm.h>
+ #include <drm/drm_crtc.h>
+ #include <drm/drm_edid.h>
+-
+ #include <asm/ipc_defs.h>
+
+-#ifdef AAVA_BACKLIGHT_HACK
+-#include <linux/workqueue.h>
+-#endif /* AAVA_BACKLIGHT_HACK */
+-
+ #include "psb_drv.h"
+ #include "psb_intel_drv.h"
+ #include "psb_intel_reg.h"
+ #include "ospm_power.h"
+
+-#define DRM_MODE_ENCODER_MIPI 5
++#ifdef AAVA_BACKLIGHT_HACK
++#include <linux/workqueue.h>
++#endif /* AAVA_BACKLIGHT_HACK */
+
+-//#define DBG_PRINTS 1
+-#define DBG_PRINTS 0
++/* Debug trace definitions */
++#define DBG_LEVEL 1
+
+-#define NEW_CRAP_SAMPLE_SETTINGS
++#if (DBG_LEVEL > 0)
++#define DBG_TRACE(format,args...) printk(KERN_ERR "%s: " format "\n", \
++ __FUNCTION__ , ## args)
++#else
++#define DBG_TRACE(format,args...)
++#endif
++
++#define DBG_ERR(format,args...) printk(KERN_ERR "%s: " format "\n", \
++ __FUNCTION__ , ## args)
+
+-#define AAVA_EV_0_5
++#define BRIGHTNESS_MAX_LEVEL 100
++
++#define DRM_MODE_ENCODER_MIPI 5
+
+ #define VSIZE 480
+ #define HSIZE 864
+@@ -84,686 +63,633 @@
+ #define DISP_VSYNC_START (DISP_VBLANK_START + VFP_LINES - 1)
+ #define DISP_VSYNC_END (DISP_VSYNC_START + VSYNC_LINES - 1)
+
+-#define BRIGHTNESS_MAX_LEVEL 100
++#define MAX_FIFO_WAIT_MS 100
+
+ static unsigned int dphy_reg = 0x0d0a7f06;
+ static unsigned int mipi_clock = 0x2;
+
+ #ifdef AAVA_BACKLIGHT_HACK
+-static void bl_work_handler(struct work_struct *work);
+-DECLARE_DELAYED_WORK(bl_work, bl_work_handler);
++static void dsi_bl_work_handler(struct work_struct *work);
++DECLARE_DELAYED_WORK(bl_work, dsi_bl_work_handler);
+ #endif /* AAVA_BACKLIGHT_HACK */
+
+ // Temporary access from sysfs begin
+-static struct drm_encoder *orig_encoder;
+-static void mrst_dsi_prepare(struct drm_encoder *encoder);
+-static void mrst_dsi_commit(struct drm_encoder *encoder);
+-static void mrst_dsi_mode_set(struct drm_encoder *encoder,
+- struct drm_display_mode *mode,
+- struct drm_display_mode *adjusted_mode);
+-static void panel_reset(void);
+-
+-static ssize_t dphy_store(struct class *class, const char *buf, size_t len)
++static struct drm_device *test_dev;
++// Temporary access from sysfs end
++
++
++static int dsi_wait_hs_data_fifo(struct drm_device *dev)
+ {
+- ssize_t status;
+- unsigned long value;
++ int fifo_wait_time = 0;
+
+- status = strict_strtoul(buf, 16, &value);
+- dphy_reg = value;
+- printk("!!! dphy_reg = %x, clock = %x\n", dphy_reg, mipi_clock);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL) {
++ if (fifo_wait_time == MAX_FIFO_WAIT_MS) {
++ DBG_ERR("timeout");
++ return -1;
++ }
++ udelay(1000);
++ fifo_wait_time++;
++ }
++ return 0;
++}
+
+- return len;
++static int dsi_wait_hs_ctrl_fifo(struct drm_device *dev)
++{
++ int fifo_wait_time = 0;
++
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL) {
++ if (fifo_wait_time == MAX_FIFO_WAIT_MS) {
++ DBG_ERR("timeout");
++ return -1;
++ }
++ udelay(1000);
++ fifo_wait_time++;
++ }
++ return 0;
+ }
+
+-static ssize_t clock_store(struct class *class, const char *buf, size_t len)
++static void dsi_set_backlight_state(int state)
+ {
+- ssize_t status;
+- unsigned long value;
++ struct ipc_pmic_reg_data tmp_reg;
+
+- status = strict_strtoul(buf, 0, &value);
+- mipi_clock = value;
+- printk("!!! dphy_reg = %x, clock = %x\n", dphy_reg, mipi_clock);
++ DBG_TRACE("%d", state);
+
+- return len;
++ tmp_reg.ioc = 1;
++ tmp_reg.num_entries = 2;
++ tmp_reg.pmic_reg_data[0].register_address = 0x2a;
++ tmp_reg.pmic_reg_data[1].register_address = 0x28;
++
++ if (state) {
++ tmp_reg.pmic_reg_data[0].value = 0xaa;
++ tmp_reg.pmic_reg_data[1].value = 0x30;
++ } else {
++ tmp_reg.pmic_reg_data[0].value = 0x00;
++ tmp_reg.pmic_reg_data[1].value = 0x00;
++ }
++
++ if (ipc_pmic_register_write(&tmp_reg, TRUE))
++ DBG_ERR("pmic reg write failed");
+ }
+
+-static ssize_t apply_settings(struct class *class, const char *buf, size_t len)
++
++#ifdef AAVA_BACKLIGHT_HACK
++static void dsi_bl_work_handler(struct work_struct *work)
+ {
+- ssize_t status;
+- long value;
++ DBG_TRACE("");
++ dsi_set_backlight_state(1);
++}
++#endif /* AAVA_BACKLIGHT_HACK */
+
+- printk("!!! dphy_reg = %x, clock = %x\n", dphy_reg, mipi_clock);
+
+- status = strict_strtoul(buf, 0, &value);
+- if (value > 0) {
+- mrst_dsi_prepare(orig_encoder);
+- msleep(500);
+- if (value > 1) {
+- panel_reset();
+- msleep(500);
+- }
+- mrst_dsi_mode_set(orig_encoder, NULL, NULL);
+- msleep(500);
+- mrst_dsi_commit(orig_encoder);
++static void dsi_set_panel_reset_state(int state)
++{
++ struct ipc_pmic_reg_data tmp_reg = {0};
++
++ DBG_TRACE("%d", state);
++
++ tmp_reg.ioc = 1;
++ tmp_reg.num_entries = 1;
++ tmp_reg.pmic_reg_data[0].register_address = 0xe6;
++
++ if (state)
++ tmp_reg.pmic_reg_data[0].value = 0x01;
++ else
++ tmp_reg.pmic_reg_data[0].value = 0x09;
++
++ if (ipc_pmic_register_write(&tmp_reg, TRUE)) {
++ DBG_ERR("pmic reg write failed");
++ return;
+ }
+
+- return len;
++ if (state) {
++ /* Minimum active time to trigger reset is 10us */
++ udelay(10);
++ } else {
++ /* Maximum startup time from reset is 120ms */
++ msleep(120);
++ }
+ }
+-// Temporary access from sysfs end
+
+-static void panel_init(struct drm_device *dev)
++
++static void dsi_init_panel(struct drm_device *dev)
+ {
+-#if DBG_PRINTS
+- printk("panel_init\n");
+-#endif /* DBG_PRINTS */
++ DBG_TRACE("");
+
+- /* Flip page order */
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
+- HS_DATA_FIFO_FULL);
++ /* Flip page order to have correct image orientation */
++ if (dsi_wait_hs_data_fifo(dev) < 0)
++ return;
+ REG_WRITE(0xb068, 0x00008036);
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
+- HS_CTRL_FIFO_FULL);
++ if (dsi_wait_hs_ctrl_fifo(dev) < 0)
++ return;
+ REG_WRITE(0xb070, 0x00000229);
+
+-#ifdef NEW_CRAP_SAMPLE_SETTINGS
+- // 0xF0, for new crap displays
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
+- HS_DATA_FIFO_FULL);
+- REG_WRITE(0xb068, 0x005a5af0);
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
+- HS_CTRL_FIFO_FULL);
++ /* Write protection key to allow DM bit setting */
++ if (dsi_wait_hs_data_fifo(dev) < 0)
++ return;
++ REG_WRITE(0xb068, 0x005a5af1);
++ if (dsi_wait_hs_ctrl_fifo(dev) < 0)
++ return;
+ REG_WRITE(0xb070, 0x00000329);
+-#endif
+
+- /* Write protection key */
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
+- HS_DATA_FIFO_FULL);
+- REG_WRITE(0xb068, 0x005a5af1);
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
+- HS_CTRL_FIFO_FULL);
++ /* Set DM bit to enable video mode */
++ if (dsi_wait_hs_data_fifo(dev) < 0)
++ return;
++ REG_WRITE(0xb068, 0x000100f7);
++ if (dsi_wait_hs_ctrl_fifo(dev) < 0)
++ return;
+ REG_WRITE(0xb070, 0x00000329);
+
+-#ifdef NEW_CRAP_SAMPLE_SETTINGS
+- // 0xFC, for new crap displays
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
+- HS_DATA_FIFO_FULL);
++ /* Write protection keys to allow TCON setting */
++ if (dsi_wait_hs_data_fifo(dev) < 0)
++ return;
++ REG_WRITE(0xb068, 0x005a5af0);
++ if (dsi_wait_hs_ctrl_fifo(dev) < 0)
++ return;
++ REG_WRITE(0xb070, 0x00000329);
++
++ if (dsi_wait_hs_data_fifo(dev) < 0)
++ return;
+ REG_WRITE(0xb068, 0x005a5afc);
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
+- HS_CTRL_FIFO_FULL);
++ if (dsi_wait_hs_ctrl_fifo(dev) < 0)
++ return;
+ REG_WRITE(0xb070, 0x00000329);
+
+- // 0xB7, for new crap displays
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
+- HS_DATA_FIFO_FULL);
+-#ifdef DOES_NOT_WORK
+- /* Suggested by TPO, doesn't work as usual */
++ /* Write TCON setting */
++ if (dsi_wait_hs_data_fifo(dev) < 0)
++ return;
++#if 0
++ /* Suggested by TPO, doesn't work */
+ REG_WRITE(0xb068, 0x110000b7);
+ REG_WRITE(0xb068, 0x00000044);
+ #else
+ REG_WRITE(0xb068, 0x770000b7);
+ REG_WRITE(0xb068, 0x00000044);
+ #endif
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
+- HS_CTRL_FIFO_FULL);
++ if (dsi_wait_hs_ctrl_fifo(dev) < 0)
++ return;
+ REG_WRITE(0xb070, 0x00000529);
++}
+
+- // 0xB6, for new crap displays
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
+- HS_DATA_FIFO_FULL);
+- REG_WRITE(0xb068, 0x000a0ab6);
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
+- HS_CTRL_FIFO_FULL);
+- REG_WRITE(0xb070, 0x00000329);
+
+- // 0xF2, for new crap displays
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
+- HS_DATA_FIFO_FULL);
+- REG_WRITE(0xb068, 0x081010f2);
+- REG_WRITE(0xb068, 0x4a070708);
+- REG_WRITE(0xb068, 0x000000c5);
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
+- HS_CTRL_FIFO_FULL);
+- REG_WRITE(0xb070, 0x00000929);
++static void dsi_set_ptarget_state(struct drm_device *dev, int state)
++{
++ u32 pp_sts_reg;
+
+- // 0xF8, for new crap displays
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
+- HS_DATA_FIFO_FULL);
+- REG_WRITE(0xb068, 0x024003f8);
+- REG_WRITE(0xb068, 0x01030a04);
+- REG_WRITE(0xb068, 0x0e020220);
+- REG_WRITE(0xb068, 0x00000004);
++ DBG_TRACE("%d", state);
+
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
+- HS_CTRL_FIFO_FULL);
+- REG_WRITE(0xb070, 0x00000d29);
++ if (state) {
++ REG_WRITE(PP_CONTROL, (REG_READ(PP_CONTROL) | POWER_TARGET_ON));
++ do {
++ pp_sts_reg = REG_READ(PP_STATUS);
++ } while ((pp_sts_reg & (PP_ON | PP_READY)) == PP_READY);
++ } else {
++ REG_WRITE(PP_CONTROL,
++ (REG_READ(PP_CONTROL) & ~POWER_TARGET_ON));
++ do {
++ pp_sts_reg = REG_READ(PP_STATUS);
++ } while (pp_sts_reg & PP_ON);
++ }
++}
+
+- // 0xE2, for new crap displays
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
+- HS_DATA_FIFO_FULL);
+- REG_WRITE(0xb068, 0x398fc3e2);
+- REG_WRITE(0xb068, 0x0000916f);
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
+- HS_CTRL_FIFO_FULL);
+- REG_WRITE(0xb070, 0x00000629);
+
+-#ifdef DOES_NOT_WORK
+- /* Suggested by TPO, doesn't work as usual */
+- // 0xE3, for new crap displays
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
+- HS_DATA_FIFO_FULL);
+- REG_WRITE(0xb068, 0x20f684e3);
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
+- HS_CTRL_FIFO_FULL);
+- REG_WRITE(0xb070, 0x00000429);
++static void dsi_send_turn_on_packet(struct drm_device *dev)
++{
++ DBG_TRACE("");
+
+- msleep(50);
+-#endif
++ REG_WRITE(DPI_CONTROL_REG, DPI_TURN_ON);
+
+- // 0xB0, for new crap displays
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
+- HS_DATA_FIFO_FULL);
+- REG_WRITE(0xb068, 0x000000b0);
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
+- HS_CTRL_FIFO_FULL);
+- REG_WRITE(0xb070, 0x00000229);
++ /* Short delay to wait that display turns on */
++ msleep(10);
++}
+
+- // 0xF4, for new crap displays
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
+- HS_DATA_FIFO_FULL);
+- REG_WRITE(0xb068, 0x240242f4);
+- REG_WRITE(0xb068, 0x78ee2002);
+- REG_WRITE(0xb068, 0x2a071050);
+- REG_WRITE(0xb068, 0x507fee10);
+- REG_WRITE(0xb068, 0x10300710);
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
+- HS_CTRL_FIFO_FULL);
+- REG_WRITE(0xb070, 0x00001429);
+
+- // 0xBA, for new crap displays
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
+- HS_DATA_FIFO_FULL);
+- REG_WRITE(0xb068, 0x19fe07ba);
+- REG_WRITE(0xb068, 0x101c0a31);
+- REG_WRITE(0xb068, 0x00000010);
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
+- HS_CTRL_FIFO_FULL);
+- REG_WRITE(0xb070, 0x00000929);
++static void dsi_send_shutdown_packet(struct drm_device *dev)
++{
++ DBG_TRACE("");
+
+- // 0xBB, for new crap displays
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
+- HS_DATA_FIFO_FULL);
+- REG_WRITE(0xb068, 0x28ff07bb);
+- REG_WRITE(0xb068, 0x24280a31);
+- REG_WRITE(0xb068, 0x00000034);
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
+- HS_CTRL_FIFO_FULL);
+- REG_WRITE(0xb070, 0x00000929);
++ REG_WRITE(DPI_CONTROL_REG, DPI_SHUT_DOWN);
++}
+
+- // 0xFB, for new crap displays
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
+- HS_DATA_FIFO_FULL);
+- REG_WRITE(0xb068, 0x535d05fb);
+- REG_WRITE(0xb068, 0x1b1a2130);
+- REG_WRITE(0xb068, 0x221e180e);
+- REG_WRITE(0xb068, 0x131d2120);
+- REG_WRITE(0xb068, 0x535d0508);
+- REG_WRITE(0xb068, 0x1c1a2131);
+- REG_WRITE(0xb068, 0x231f160d);
+- REG_WRITE(0xb068, 0x111b2220);
+- REG_WRITE(0xb068, 0x535c2008);
+- REG_WRITE(0xb068, 0x1f1d2433);
+- REG_WRITE(0xb068, 0x2c251a10);
+- REG_WRITE(0xb068, 0x2c34372d);
+- REG_WRITE(0xb068, 0x00000023);
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
+- HS_CTRL_FIFO_FULL);
+- REG_WRITE(0xb070, 0x00003129);
+
+- // 0xFA, for new crap displays
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
+- HS_DATA_FIFO_FULL);
+- REG_WRITE(0xb068, 0x525c0bfa);
+- REG_WRITE(0xb068, 0x1c1c232f);
+- REG_WRITE(0xb068, 0x2623190e);
+- REG_WRITE(0xb068, 0x18212625);
+- REG_WRITE(0xb068, 0x545d0d0e);
+- REG_WRITE(0xb068, 0x1e1d2333);
+- REG_WRITE(0xb068, 0x26231a10);
+- REG_WRITE(0xb068, 0x1a222725);
+- REG_WRITE(0xb068, 0x545d280f);
+- REG_WRITE(0xb068, 0x21202635);
+- REG_WRITE(0xb068, 0x31292013);
+- REG_WRITE(0xb068, 0x31393d33);
+- REG_WRITE(0xb068, 0x00000029);
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
+- HS_CTRL_FIFO_FULL);
+- REG_WRITE(0xb070, 0x00003129);
+-#endif
++static void dsi_set_pipe_plane_enable_state(struct drm_device *dev, int state)
++{
++ u32 temp_reg;
+
+- /* Set DM */
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
+- HS_DATA_FIFO_FULL);
+- REG_WRITE(0xb068, 0x000100f7);
+- while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
+- HS_CTRL_FIFO_FULL);
+- REG_WRITE(0xb070, 0x00000329);
+-}
++ DBG_TRACE("%d", state);
+
++ if (state) {
++ /* Enable pipe */
++ temp_reg = REG_READ(PIPEACONF);
++ temp_reg |= (PIPEACONF_ENABLE);
++ REG_WRITE(PIPEACONF, temp_reg);
++ temp_reg = REG_READ(PIPEACONF);
+
+-static void panel_reset_on(void)
+-{
+- struct ipc_pmic_reg_data tmp_reg = {0};
+-#if DBG_PRINTS
+- printk("panel_reset_on\n");
+-#endif /* DBG_PRINTS */
+- tmp_reg.ioc = 1;
+- tmp_reg.num_entries = 1;
+-#ifdef AAVA_EV_0_5
+- tmp_reg.pmic_reg_data[0].register_address = 0xe6;
+- tmp_reg.pmic_reg_data[0].value = 0x01;
+-#else /* CDK */
+- tmp_reg.pmic_reg_data[0].register_address = 0xf4;
+- if (ipc_pmic_register_read(&tmp_reg)) {
+- printk("panel_reset_on: failed to read pmic reg 0xf4!\n");
+- return;
+- }
+- tmp_reg.pmic_reg_data[0].value &= 0xbf;
+-#endif /* AAVA_EV_0_5 */
+- if (ipc_pmic_register_write(&tmp_reg, TRUE)) {
+- printk("panel_reset_on: failed to write pmic reg 0xe6!\n");
+- }
+-}
++ /* Wait for 20ms for the pipe enable to take effect. */
++ msleep(20);
+
++ /* Enable plane */
++ temp_reg = REG_READ(DSPACNTR);
++ temp_reg |= (DISPLAY_PLANE_ENABLE);
++ REG_WRITE(DSPACNTR, temp_reg);
++ temp_reg = REG_READ(DSPACNTR);
+
+-static void panel_reset_off(void)
+-{
+- struct ipc_pmic_reg_data tmp_reg = {0};
+-#if DBG_PRINTS
+- printk("panel_reset_off\n");
+-#endif /* DBG_PRINTS */
+- tmp_reg.ioc = 1;
+- tmp_reg.num_entries = 1;
+-#ifdef AAVA_EV_0_5
+- tmp_reg.pmic_reg_data[0].register_address = 0xe6;
+- tmp_reg.pmic_reg_data[0].value = 0x09;
+-#else /* CDK */
+- tmp_reg.pmic_reg_data[0].register_address = 0xf4;
+- if (ipc_pmic_register_read(&tmp_reg)) {
+- printk("panel_reset_off: failed to read pmic reg 0xf4!\n");
+- return;
+- }
+- tmp_reg.pmic_reg_data[0].value |= 0x40;
+-#endif /* AAVA_EV_0_5 */
+- if (ipc_pmic_register_write(&tmp_reg, TRUE)) {
+- printk("panel_reset_off: failed to write pmic reg 0xe6!\n");
++ /* Flush plane change by read/write/read of BASE reg */
++ temp_reg = REG_READ(MRST_DSPABASE);
++ REG_WRITE(MRST_DSPABASE, temp_reg);
++ temp_reg = REG_READ(MRST_DSPABASE);
++
++ /* Wait for 20ms for the plane enable to take effect. */
++ msleep(20);
++ } else {
++ /* Disable plane */
++ temp_reg = REG_READ(DSPACNTR);
++ temp_reg &= ~(DISPLAY_PLANE_ENABLE);
++ REG_WRITE(DSPACNTR, temp_reg);
++ temp_reg = REG_READ(DSPACNTR);
++
++ /* Flush plane change by read/write/read of BASE reg */
++ temp_reg = REG_READ(MRST_DSPABASE);
++ REG_WRITE(MRST_DSPABASE, temp_reg);
++ temp_reg = REG_READ(MRST_DSPABASE);
++
++ /* Wait for 20ms for the plane disable to take effect. */
++ msleep(20);
++
++ /* Disable pipe */
++ temp_reg = REG_READ(PIPEACONF);
++ temp_reg &= ~(PIPEACONF_ENABLE);
++ REG_WRITE(PIPEACONF, temp_reg);
++ temp_reg = REG_READ(PIPEACONF);
++
++ /* Wait for 20ms for the pipe disable to take effect. */
++ msleep(20);
+ }
+ }
+
+
+-static void panel_reset(void)
++static void dsi_set_device_ready_state(struct drm_device *dev, int state)
+ {
+-#if DBG_PRINTS
+- printk("panel_reset\n");
+-#endif /* DBG_PRINTS */
+-
+- panel_reset_on();
+- msleep(20);
+- panel_reset_off();
+- msleep(20);
++ DBG_TRACE("%d", state);
++
++ if (state)
++ REG_WRITE(DEVICE_READY_REG, 0x00000001);
++ else
++ REG_WRITE(DEVICE_READY_REG, 0x00000000);
+ }
+
+
+-static void backlight_state(bool on)
++static void dsi_configure_mipi_block(struct drm_device *dev)
+ {
+- struct ipc_pmic_reg_data tmp_reg;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 color_format = (RGB_888_FMT << FMT_DPI_POS);
++ u32 res = 0;
+
+-#if DBG_PRINTS
+- printk("backlight_state\n");
+-#endif /* DBG_PRINTS */
++ DBG_TRACE("");
+
+- tmp_reg.ioc = 1;
+- tmp_reg.num_entries = 2;
+- tmp_reg.pmic_reg_data[0].register_address = 0x2a;
+- tmp_reg.pmic_reg_data[1].register_address = 0x28;
++ /* MIPI clock ratio 1:1 */
++ //REG_WRITE(MIPI_CONTROL_REG, 0x00000018);
++ //REG_WRITE(0xb080, 0x0b061a02);
+
+- if( on ) {
+-#if DBG_PRINTS
+- printk("backlight_state: ON\n");
+-#endif /* DBG_PRINTS */
+- tmp_reg.pmic_reg_data[0].value = 0xaa;
+-#ifdef AAVA_EV_0_5
+- tmp_reg.pmic_reg_data[1].value = 0x30;
+-#else /* CDK */
+- tmp_reg.pmic_reg_data[1].value = 0x60;
+-#endif /* AAVA_EV_0_5 */
+- } else {
+-#if DBG_PRINTS
+- printk("backlight_state: OFF\n");
+-#endif /* DBG_PRINTS */
+- tmp_reg.pmic_reg_data[0].value = 0x00;
+- tmp_reg.pmic_reg_data[1].value = 0x00;
+- }
++ /* MIPI clock ratio 2:1 */
++ //REG_WRITE(MIPI_CONTROL_REG, 0x00000019);
++ //REG_WRITE(0xb080, 0x3f1f1c04);
+
+- if (ipc_pmic_register_write(&tmp_reg, TRUE)) {
+- printk("backlight_state: failed to write pmic regs 0x2a and 0x28!\n");
+- }
+-}
++ /* MIPI clock ratio 3:1 */
++ //REG_WRITE(MIPI_CONTROL_REG, 0x0000001a);
++ //REG_WRITE(0xb080, 0x091f7f08);
+
+-#ifdef AAVA_BACKLIGHT_HACK
+-static void bl_work_handler(struct work_struct *work)
+-{
+- backlight_state(true);
++ /* MIPI clock ratio 4:1 */
++ REG_WRITE(MIPI_CONTROL_REG, (0x00000018 | mipi_clock));
++ REG_WRITE(0xb080, dphy_reg);
++
++ /* Enable all interrupts */
++ REG_WRITE(INTR_EN_REG, 0xffffffff);
++
++ REG_WRITE(TURN_AROUND_TIMEOUT_REG, 0x0000000A);
++ REG_WRITE(DEVICE_RESET_REG, 0x000000ff);
++ REG_WRITE(INIT_COUNT_REG, 0x00000fff);
++ REG_WRITE(HS_TX_TIMEOUT_REG, 0x90000);
++ REG_WRITE(LP_RX_TIMEOUT_REG, 0xffff);
++ REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG, 0x46);
++ REG_WRITE(EOT_DISABLE_REG, 0x00000000);
++ REG_WRITE(LP_BYTECLK_REG, 0x00000004);
++
++ REG_WRITE(VIDEO_FMT_REG, dev_priv->videoModeFormat);
++
++ REG_WRITE(DSI_FUNC_PRG_REG, (dev_priv->laneCount | color_format));
++
++ res = dev_priv->HactiveArea | (dev_priv->VactiveArea << RES_V_POS);
++ REG_WRITE(DPI_RESOLUTION_REG, res);
++
++ REG_WRITE(VERT_SYNC_PAD_COUNT_REG, dev_priv->VsyncWidth);
++ REG_WRITE(VERT_BACK_PORCH_COUNT_REG, dev_priv->VbackPorch);
++ REG_WRITE(VERT_FRONT_PORCH_COUNT_REG, dev_priv->VfrontPorch);
++
++ REG_WRITE(HORIZ_SYNC_PAD_COUNT_REG, dev_priv->HsyncWidth);
++ REG_WRITE(HORIZ_BACK_PORCH_COUNT_REG, dev_priv->HbackPorch);
++ REG_WRITE(HORIZ_FRONT_PORCH_COUNT_REG, dev_priv->HfrontPorch);
++ REG_WRITE(HORIZ_ACTIVE_AREA_COUNT_REG, MIPI_HACT);
++
++ /* Enable MIPI Port */
++ REG_WRITE(MIPI, MIPI_PORT_EN);
+ }
+-#endif /* AAVA_BACKLIGHT_HACK */
+
+
+-/**
+- * Sets the power state for the panel.
+- */
+-static void mrst_dsi_set_power(struct drm_device *dev,
+- struct psb_intel_output *output, bool on)
++static void dsi_configure_down(struct drm_device *dev)
+ {
+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+- u32 pp_status;
+
+-#if DBG_PRINTS
+- printk("mrst_dsi_set_power\n");
+-#endif /* DBG_PRINTS */
++ DBG_TRACE("");
+
+- /*
+- * The DIS device must be ready before we can change power state.
+- */
+- if (!dev_priv->dsi_device_ready)
+- {
+-#if DBG_PRINTS
+- printk("mrst_dsi_set_power: !dev_priv->dsi_device_ready!\n");
+-#endif /* DBG_PRINTS */
++ if (!dev_priv->dpi_panel_on) {
++ DBG_TRACE("already off");
+ return;
+ }
+
+- /*
+- * We don't support dual DSI yet. May be in POR in the future.
++ /* Disable backlight */
++ dsi_set_backlight_state(0);
++
++ /* Disable pipe and plane */
++ dsi_set_pipe_plane_enable_state(dev, 0);
++
++ /* Disable PTARGET */
++ dsi_set_ptarget_state(dev, 0);
++
++ /* Send shutdown command, can only be sent if
++ * interface is configured
+ */
+- if (dev_priv->dual_display)
+- {
+-#if DBG_PRINTS
+- printk("mrst_dsi_set_power: dev_priv->dual_display!\n");
+-#endif /* DBG_PRINTS */
+- return;
+- }
++ if (dev_priv->dsi_device_ready)
++ dsi_send_shutdown_packet(dev);
+
+- if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+- OSPM_UHB_FORCE_POWER_ON))
+- return;
++ /* Clear device ready state */
++ dsi_set_device_ready_state(dev, 0);
+
+- if (on) {
+-#if DBG_PRINTS
+- printk("mrst_dsi_set_power: on\n");
+-#endif /* DBG_PRINTS */
+- if (dev_priv->dpi && !dev_priv->dpi_panel_on) {
+-#if DBG_PRINTS
+- printk("mrst_dsi_set_power: dpi\n");
+-#endif /* DBG_PRINTS */
+- REG_WRITE(DPI_CONTROL_REG, DPI_TURN_ON);
+- REG_WRITE(PP_CONTROL,
+- (REG_READ(PP_CONTROL) | POWER_TARGET_ON));
+- do {
+- pp_status = REG_READ(PP_STATUS);
+- } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
+-
+- /* Run TPO display specific initialisations */
+-// MiKo TBD, this delay may need to be tuned
+- msleep(50);
+- panel_init(dev);
+-
+- /* Set backlights on */
+- backlight_state( true );
+- dev_priv->dpi_panel_on = true;
+- }
+- } else {
+-#if DBG_PRINTS
+- printk("mrst_dsi_set_power: off\n");
+-#endif /* DBG_PRINTS */
+- if (dev_priv->dpi && dev_priv->dpi_panel_on) {
+-#if DBG_PRINTS
+- printk("mrst_dsi_set_power: dpi\n");
+-#endif /* DBG_PRINTS */
+- /* Set backlights off */
+- backlight_state( false );
+-
+-// MiKo TBD, something clever could be done here to save power, for example:
+-// -Set display to sleep mode, or
+-// -Set display to HW reset, or
+-// -Shutdown the voltages to display
+-
+- REG_WRITE(PP_CONTROL,
+- (REG_READ(PP_CONTROL) & ~POWER_TARGET_ON));
+- do {
+- pp_status = REG_READ(PP_STATUS);
+- } while (pp_status & PP_ON);
+-
+- REG_WRITE(DPI_CONTROL_REG, DPI_SHUT_DOWN);
+-
+- dev_priv->dpi_panel_on = false;
+- }
+- }
++ /* Set panel to reset */
++ dsi_set_panel_reset_state(1);
+
+- ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ dev_priv->dpi_panel_on = false;
+ }
+
+
+-static void mrst_dsi_dpms(struct drm_encoder *encoder, int mode)
++static void dsi_configure_up(struct drm_device *dev)
+ {
+- struct drm_device *dev = encoder->dev;
+- struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+
+-#if DBG_PRINTS
+- printk("mrst_dsi_dpms\n");
+-#endif /* DBG_PRINTS */
++ DBG_TRACE("");
+
+- if (mode == DRM_MODE_DPMS_ON)
+- mrst_dsi_set_power(dev, output, true);
+- else
+- mrst_dsi_set_power(dev, output, false);
++ if (dev_priv->dpi_panel_on) {
++ DBG_TRACE("already on");
++ return;
++ }
++
++ /* Get panel from reset */
++ dsi_set_panel_reset_state(0);
++
++ /* Set device ready state */
++ dsi_set_device_ready_state(dev, 1);
++
++ /* Send turn on command */
++ dsi_send_turn_on_packet(dev);
++
++ /* Enable PTARGET */
++ dsi_set_ptarget_state(dev, 1);
++
++ /* Initialize panel */
++ dsi_init_panel(dev);
++
++ /* Enable plane and pipe */
++ dsi_set_pipe_plane_enable_state(dev, 1);
++
++ /* Enable backlight */
++ dsi_set_backlight_state(1);
++
++ dev_priv->dpi_panel_on = true;
+ }
+
+
+-static void mrst_dsi_save(struct drm_connector *connector)
++static void dsi_init_drv_ic(struct drm_device *dev)
+ {
+-#if DBG_PRINTS
+- printk("mrst_dsi_save\n");
+-#endif /* DBG_PRINTS */
+- // MiKo TBD
++ DBG_TRACE("");
+ }
+
+
+-static void mrst_dsi_restore(struct drm_connector *connector)
++static void dsi_schedule_work(struct drm_device *dev)
+ {
+-#if DBG_PRINTS
+- printk("mrst_dsi_restore\n");
+-#endif /* DBG_PRINTS */
+- // MiKo TBD
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ DBG_TRACE("");
++
++ schedule_work(&dev_priv->dsi_work);
+ }
+
+
+-static void mrst_dsi_prepare(struct drm_encoder *encoder)
++static void dsi_work_handler(struct work_struct *work)
+ {
+- struct drm_device *dev = encoder->dev;
+- struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ struct drm_psb_private *dev_priv = container_of(work,
++ struct drm_psb_private, dsi_work);
+
+-#if DBG_PRINTS
+- printk("mrst_dsi_prepare\n");
+-#endif /* DBG_PRINTS */
++ DBG_TRACE("");
+
+- if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+- OSPM_UHB_FORCE_POWER_ON))
+- return;
++ dsi_configure_up(dev_priv->dev);
++}
+
+- mrst_dsi_set_power(dev, output, false);
+
+- ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++static void dsi_init_mipi_config(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ DBG_TRACE("");
++
++ /* Fixed values for TPO display */
++ dev_priv->pixelClock = 33264;
++ dev_priv->HsyncWidth = MIPI_HSPAD;
++ dev_priv->HbackPorch = MIPI_HBP;
++ dev_priv->HfrontPorch = MIPI_HFP;
++ dev_priv->HactiveArea = HSIZE;
++ dev_priv->VsyncWidth = MIPI_VSPAD;
++ dev_priv->VbackPorch = MIPI_VBP;
++ dev_priv->VfrontPorch = MIPI_VFP;
++ dev_priv->VactiveArea = VSIZE;
++ dev_priv->bpp = 24;
++
++ /* video mode */
++ dev_priv->dpi = true;
++
++ /* Set this true since firmware or kboot has enabled display */
++ dev_priv->dpi_panel_on = true;
++
++ /* Set this false to ensure proper initial configuration */
++ dev_priv->dsi_device_ready = false;
++
++ /* 2 lanes */
++ dev_priv->laneCount = MIPI_LANES;
++
++ /* Burst mode */
++ dev_priv->videoModeFormat = BURST_MODE;
++
++ dev_priv->init_drvIC = dsi_init_drv_ic;
++ dev_priv->dsi_prePowerState = dsi_configure_down;
++ dev_priv->dsi_postPowerState = dsi_schedule_work;
+ }
+
+
+-static void mrst_dsi_commit(struct drm_encoder *encoder)
++static struct drm_display_mode *dsi_get_fixed_display_mode(void)
+ {
+- struct drm_device *dev = encoder->dev;
+- struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ struct drm_display_mode *mode;
++
++ DBG_TRACE("");
++
++ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++ if (!mode) {
++ DBG_ERR("kzalloc failed\n");
++ return NULL;
++ }
++
++ /* Fixed mode for TPO display
++ Note: Using defined values for easier match with ITP scripts
++ and adding 1 since psb_intel_display.c decreases by 1
++ */
++ mode->hdisplay = (DISP_HPIX + 1);
++ mode->vdisplay = (DISP_VPIX + 1);
++ mode->hsync_start = (DISP_HSYNC_START + 1);
++ mode->hsync_end = (DISP_HSYNC_END + 1);
++ mode->htotal = (DISP_HBLANK_END + 1);
++ mode->vsync_start = (DISP_VSYNC_START + 1);
++ mode->vsync_end = (DISP_VSYNC_END + 1);
++ mode->vtotal = (DISP_VBLANK_END + 1);
++ mode->clock = 33264;
+
+-#if DBG_PRINTS
+- printk("mrst_dsi_commit\n");
+-#endif /* DBG_PRINTS */
++ drm_mode_set_name(mode);
++ drm_mode_set_crtcinfo(mode, 0);
+
+- mrst_dsi_set_power(dev, output, true);
++ return mode;
+ }
+
+
+-static void mrst_dsi_mode_set(struct drm_encoder *encoder,
+- struct drm_display_mode *mode,
+- struct drm_display_mode *adjusted_mode)
++/* Encoder funcs */
++static void dsi_encoder_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
+ {
+ struct drm_device *dev = encoder->dev;
+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+- u32 SupportedFormat = 0;
+- u32 resolution = 0;
+- uint64_t curValue = DRM_MODE_SCALE_FULLSCREEN;
++ uint64_t scale_mode = DRM_MODE_SCALE_FULLSCREEN;
+
+-#if DBG_PRINTS
+- printk("mrst_dsi_mode_set\n");
+-#endif /* DBG_PRINTS */
++ DBG_TRACE("");
+
+ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
+- OSPM_UHB_FORCE_POWER_ON))
++ OSPM_UHB_FORCE_POWER_ON)) {
++ DBG_ERR("OSPM_DISPLAY_ISLAND OSPM_UHB_FORCE_POWER_ON failed");
+ return;
++ }
+
+ /* Sleep to ensure that the graphics engine is ready
+ * since its mode_set is called before ours
+ */
+ msleep(100);
+
+- switch (dev_priv->bpp)
+- {
+- case 24:
+- SupportedFormat = RGB_888_FMT;
+- break;
+- default:
+- printk("mrst_dsi_mode_set, invalid bpp!\n");
+- break;
+- }
+-
+- if (dev_priv->dpi) {
++ /* Only one mode is supported,
++ * so configure only if not yet configured
++ */
++ if (!dev_priv->dsi_device_ready) {
+ drm_connector_property_get_value(
+ &enc_to_psb_intel_output(encoder)->base,
+ dev->mode_config.scaling_mode_property,
+- &curValue);
+- if (curValue == DRM_MODE_SCALE_CENTER) {
++ &scale_mode);
++ if (scale_mode == DRM_MODE_SCALE_CENTER)
+ REG_WRITE(PFIT_CONTROL, 0);
+- } else if (curValue == DRM_MODE_SCALE_FULLSCREEN) {
++ else if (scale_mode == DRM_MODE_SCALE_FULLSCREEN)
+ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+- } else {
+- printk("mrst_dsi_mode_set, scaling not supported!\n");
++ else {
++ DBG_ERR("unsupported scaling");
+ REG_WRITE(PFIT_CONTROL, 0);
+ }
++ dsi_configure_mipi_block(dev);
++ dev_priv->dsi_device_ready = true;
++ }
+
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
+
+- /* MIPI clock ratio 1:1 */
+- //REG_WRITE(MIPI_CONTROL_REG, 0x00000018);
+- //REG_WRITE(0xb080, 0x0b061a02);
+-
+- /* MIPI clock ratio 2:1 */
+- //REG_WRITE(MIPI_CONTROL_REG, 0x00000019);
+- //REG_WRITE(0xb080, 0x3f1f1c04);
+
+- /* MIPI clock ratio 3:1 */
+- //REG_WRITE(MIPI_CONTROL_REG, 0x0000001a);
+- //REG_WRITE(0xb080, 0x091f7f08);
++static void dsi_encoder_prepare(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
+
+- /* MIPI clock ratio 4:1 */
+- REG_WRITE(MIPI_CONTROL_REG, (0x00000018 | mipi_clock));
+- REG_WRITE(0xb080, dphy_reg);
++ DBG_TRACE("");
+
+- /* Enable all interrupts */
+- REG_WRITE(INTR_EN_REG, 0xffffffff);
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON)) {
++ DBG_ERR("OSPM_DISPLAY_ISLAND OSPM_UHB_FORCE_POWER_ON failed");
++ return;
++ }
+
+- REG_WRITE(TURN_AROUND_TIMEOUT_REG, 0x0000000A);
+- REG_WRITE(DEVICE_RESET_REG, 0x000000ff);
+- REG_WRITE(INIT_COUNT_REG, 0x00000fff);
+- REG_WRITE(HS_TX_TIMEOUT_REG, 0x90000);
+- REG_WRITE(LP_RX_TIMEOUT_REG, 0xffff);
+- REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG, 0x46);
+- REG_WRITE(EOT_DISABLE_REG, 0x00000000);
+- REG_WRITE(LP_BYTECLK_REG, 0x00000004);
++ dsi_configure_down(dev);
+
+- REG_WRITE(VIDEO_FMT_REG, dev_priv->videoModeFormat);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
+
+- SupportedFormat <<= FMT_DPI_POS;
+- REG_WRITE(DSI_FUNC_PRG_REG,
+- (dev_priv->laneCount | SupportedFormat));
+
+- resolution = dev_priv->HactiveArea |
+- (dev_priv->VactiveArea << RES_V_POS);
+- REG_WRITE(DPI_RESOLUTION_REG, resolution);
++static void dsi_encoder_commit(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+
+- REG_WRITE(VERT_SYNC_PAD_COUNT_REG, dev_priv->VsyncWidth);
+- REG_WRITE(VERT_BACK_PORCH_COUNT_REG, dev_priv->VbackPorch);
+- REG_WRITE(VERT_FRONT_PORCH_COUNT_REG, dev_priv->VfrontPorch);
++ DBG_TRACE("");
+
+- REG_WRITE(HORIZ_SYNC_PAD_COUNT_REG, dev_priv->HsyncWidth);
+- REG_WRITE(HORIZ_BACK_PORCH_COUNT_REG, dev_priv->HbackPorch);
+- REG_WRITE(HORIZ_FRONT_PORCH_COUNT_REG, dev_priv->HfrontPorch);
+- REG_WRITE(HORIZ_ACTIVE_AREA_COUNT_REG, MIPI_HACT);
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON)) {
++ DBG_ERR("OSPM_DISPLAY_ISLAND OSPM_UHB_FORCE_POWER_ON failed");
++ return;
+ }
+
+- /* Enable MIPI Port */
+- REG_WRITE(MIPI, MIPI_PORT_EN);
++ if (!work_pending(&dev_priv->dsi_work))
++ dsi_configure_up(dev);
+
+- REG_WRITE(DEVICE_READY_REG, 0x00000001);
+- REG_WRITE(DPI_CONTROL_REG, 0x00000002); /* Turn On */
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
+
+- dev_priv->dsi_device_ready = true;
+
+- /* Enable pipe */
+- REG_WRITE(PIPEACONF, dev_priv->pipeconf);
+- REG_READ(PIPEACONF);
++static void dsi_encoder_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+
+- /* Wait for 20ms for the pipe enable to take effect. */
+- udelay(20000);
++ DBG_TRACE("%s", ((mode == DRM_MODE_DPMS_ON) ? "ON" : "OFF"));
+
+- /* Enable plane */
+- REG_WRITE(DSPACNTR, dev_priv->dspcntr);
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON)) {
++ DBG_ERR("OSPM_DISPLAY_ISLAND OSPM_UHB_FORCE_POWER_ON failed");
++ return;
++ }
+
+- /* Wait for 20ms for the plane enable to take effect. */
+- udelay(20000);
++ if (mode == DRM_MODE_DPMS_ON) {
++ if (!work_pending(&dev_priv->dsi_work))
++ dsi_configure_up(dev);
++ } else
++ dsi_configure_down(dev);
+
+ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
+ }
+
+
+-/**
+- * Detect the MIPI connection.
+- *
+- * This always returns CONNECTOR_STATUS_CONNECTED.
+- * This connector should only have
+- * been set up if the MIPI was actually connected anyway.
+- */
+-static enum drm_connector_status mrst_dsi_detect(struct drm_connector
+- *connector)
++/* Connector funcs */
++static enum drm_connector_status dsi_connector_detect(struct drm_connector
++ *connector)
+ {
+-#if DBG_PRINTS
+- printk("mrst_dsi_detect\n");
+-#endif /* DBG_PRINTS */
++ DBG_TRACE("");
+ return connector_status_connected;
+ }
+
+
+-/**
+- * Return the list of MIPI DDB modes if available.
+- */
+-static int mrst_dsi_get_modes(struct drm_connector *connector)
++static int dsi_connector_get_modes(struct drm_connector *connector)
+ {
+ struct drm_device *dev = connector->dev;
+- struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
+- struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev;
++ struct psb_intel_output *psb_output = to_psb_intel_output(connector);
++ struct psb_intel_mode_device *mode_dev = psb_output->mode_dev;
++ struct drm_display_mode *mode;
++
++ DBG_TRACE("");
+
+ /* Didn't get an DDB, so
+ * Set wide sync ranges so we get all modes
+@@ -775,8 +701,7 @@ static int mrst_dsi_get_modes(struct drm_connector *connector)
+ connector->display_info.max_hfreq = 200;
+
+ if (mode_dev->panel_fixed_mode != NULL) {
+- struct drm_display_mode *mode =
+- drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
++ mode = drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+@@ -784,180 +709,116 @@ static int mrst_dsi_get_modes(struct drm_connector *connector)
+ }
+
+
+-static const struct drm_encoder_helper_funcs mrst_dsi_helper_funcs = {
+- .dpms = mrst_dsi_dpms,
++static void dsi_connector_save(struct drm_connector *connector)
++{
++ DBG_TRACE("");
++}
++
++
++static void dsi_connector_restore(struct drm_connector *connector)
++{
++ DBG_TRACE("");
++}
++
++
++static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
++ .dpms = dsi_encoder_dpms,
+ .mode_fixup = psb_intel_lvds_mode_fixup,
+- .prepare = mrst_dsi_prepare,
+- .mode_set = mrst_dsi_mode_set,
+- .commit = mrst_dsi_commit,
++ .prepare = dsi_encoder_prepare,
++ .mode_set = dsi_encoder_mode_set,
++ .commit = dsi_encoder_commit,
+ };
+
+-
+-static const struct drm_connector_helper_funcs
+- mrst_dsi_connector_helper_funcs = {
+- .get_modes = mrst_dsi_get_modes,
+- .mode_valid = psb_intel_lvds_mode_valid,
+- .best_encoder = psb_intel_best_encoder,
++static const struct drm_connector_helper_funcs connector_helper_funcs = {
++ .get_modes = dsi_connector_get_modes,
++ .mode_valid = psb_intel_lvds_mode_valid,
++ .best_encoder = psb_intel_best_encoder,
+ };
+
+
+-static const struct drm_connector_funcs mrst_dsi_connector_funcs = {
++static const struct drm_connector_funcs connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+- .save = mrst_dsi_save,
+- .restore = mrst_dsi_restore,
+- .detect = mrst_dsi_detect,
++ .save = dsi_connector_save,
++ .restore = dsi_connector_restore,
++ .detect = dsi_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = psb_intel_lvds_set_property,
+ .destroy = psb_intel_lvds_destroy,
+ };
+
+
+-/** Returns the panel fixed mode from configuration. */
+-struct drm_display_mode *mrst_dsi_get_configuration_mode(struct drm_device *dev)
+-{
+- struct drm_display_mode *mode;
+-
+- mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+- if (!mode)
+- return NULL;
+-
+- /* MiKo, fixed mode for TPO display
+- Note: Using defined values for easier match with ITP scripts
+- and adding 1 since psb_intel_display.c decreases by 1
+- */
+- mode->hdisplay = (DISP_HPIX + 1);
+- mode->vdisplay = (DISP_VPIX + 1);
+- mode->hsync_start = (DISP_HSYNC_START + 1);
+- mode->hsync_end = (DISP_HSYNC_END + 1);
+- mode->htotal = (DISP_HBLANK_END + 1);
+- mode->vsync_start = (DISP_VSYNC_START + 1);
+- mode->vsync_end = (DISP_VSYNC_END + 1);
+- mode->vtotal = (DISP_VBLANK_END + 1);
+- mode->clock = 33264;
+-
+- drm_mode_set_name(mode);
+- drm_mode_set_crtcinfo(mode, 0);
+-
+- return mode;
+-}
+-
+-
+-/* ************************************************************************* *\
+-FUNCTION: mrst_mipi_settings_init
+- `
+-DESCRIPTION:
+-
+-\* ************************************************************************* */
+-static bool mrst_mipi_settings_init(DRM_DRIVER_PRIVATE_T *dev_priv)
+-{
+- /* MiKo, fixed values for TPO display */
+- dev_priv->pixelClock = 33264;
+- dev_priv->HsyncWidth = MIPI_HSPAD;
+- dev_priv->HbackPorch = MIPI_HBP;
+- dev_priv->HfrontPorch = MIPI_HFP;
+- dev_priv->HactiveArea = HSIZE;
+- dev_priv->VsyncWidth = MIPI_VSPAD;
+- dev_priv->VbackPorch = MIPI_VBP;
+- dev_priv->VfrontPorch = MIPI_VFP;
+- dev_priv->VactiveArea = VSIZE;
+- dev_priv->bpp = 24;
+-
+- /* video mode */
+- dev_priv->dpi = true;
+-
+- /* MiKo, set these true by default to ensure that first mode set is done
+- cleanly
+- */
+- dev_priv->dpi_panel_on = true;
+- dev_priv->dsi_device_ready = true;
+-
+- /* 2 lanes */
+- dev_priv->laneCount = MIPI_LANES;
+-
+- /* Burst mode */
+- dev_priv->videoModeFormat = BURST_MODE;
+-
+- return true;
+-}
+-
+-
+-/**
+- * mrst_dsi_init - setup MIPI connectors on this device
+- * @dev: drm device
+- *
+- * Create the connector, try to figure out what
+- * modes we can display on the MIPI panel (if present).
+- */
+ void mrst_dsi_init(struct drm_device *dev,
+- struct psb_intel_mode_device *mode_dev)
++ struct psb_intel_mode_device *mode_dev)
+ {
+ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+- struct psb_intel_output *psb_intel_output;
++ struct psb_intel_output *psb_output;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+-#if DBG_PRINTS
+- printk("mrst_dsi_init\n");
+-#endif /* DBG_PRINTS */
++ DBG_TRACE("");
+
+- psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
+- if (!psb_intel_output)
++ psb_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
++ if (!psb_output) {
++ DBG_ERR("kzalloc failed\n");
+ return;
+-
+- panel_reset();
++ }
+
+ #ifdef AAVA_BACKLIGHT_HACK
+ schedule_delayed_work(&bl_work, 2*HZ);
+ #endif /* AAVA_BACKLIGHT_HACK */
+
+- psb_intel_output->mode_dev = mode_dev;
+- connector = &psb_intel_output->base;
+- encoder = &psb_intel_output->enc;
+- drm_connector_init(dev,
+- &psb_intel_output->base,
+- &mrst_dsi_connector_funcs,
+- DRM_MODE_CONNECTOR_MIPI);
+-
+- drm_encoder_init(dev,
+- &psb_intel_output->enc,
+- &psb_intel_lvds_enc_funcs,
+- DRM_MODE_ENCODER_MIPI);
+-
+- drm_mode_connector_attach_encoder(&psb_intel_output->base,
+- &psb_intel_output->enc);
+- psb_intel_output->type = INTEL_OUTPUT_MIPI;
+-
+- drm_encoder_helper_add(encoder, &mrst_dsi_helper_funcs);
+- drm_connector_helper_add(connector, &mrst_dsi_connector_helper_funcs);
++ psb_output->mode_dev = mode_dev;
++ connector = &psb_output->base;
++ encoder = &psb_output->enc;
++ drm_connector_init(dev, &psb_output->base, &connector_funcs,
++ DRM_MODE_CONNECTOR_MIPI);
++
++ drm_encoder_init(dev, &psb_output->enc, &psb_intel_lvds_enc_funcs,
++ DRM_MODE_ENCODER_MIPI);
++
++ drm_mode_connector_attach_encoder(&psb_output->base, &psb_output->enc);
++ psb_output->type = INTEL_OUTPUT_MIPI;
++
++ drm_encoder_helper_add(encoder, &encoder_helper_funcs);
++ drm_connector_helper_add(connector, &connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ drm_connector_attach_property(connector,
+- dev->mode_config.scaling_mode_property,
+- DRM_MODE_SCALE_FULLSCREEN);
+- drm_connector_attach_property(connector,
+- dev_priv->backlight_property,
+- BRIGHTNESS_MAX_LEVEL);
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_FULLSCREEN);
++ drm_connector_attach_property(connector, dev_priv->backlight_property,
++ BRIGHTNESS_MAX_LEVEL);
+
+- if (!mrst_mipi_settings_init(dev_priv))
+- printk("Can't initialize MIPI settings\n");
++ mode_dev->panel_wants_dither = false;
++
++ dsi_init_mipi_config(dev_priv);
+
+ /* No config phase */
+ dev_priv->config_phase = false;
+
+ /* Get the fixed mode */
+- mode_dev->panel_fixed_mode = mrst_dsi_get_configuration_mode(dev);
+- if (mode_dev->panel_fixed_mode) {
++ mode_dev->panel_fixed_mode = dsi_get_fixed_display_mode();
++ if (mode_dev->panel_fixed_mode)
+ mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+- } else {
+- printk("Found no modes for MIPI!\n");
++ else {
++ DBG_ERR("Fixed mode not available!\n");
+ goto failed_find;
+ }
++
++ /* Set this true since we enable/disable plane and pipe */
++ dev_priv->dsi_plane_pipe_control = true;
++
++ drm_sysfs_connector_add(connector);
++
++ /* Initialize work queue */
++ INIT_WORK(&dev_priv->dsi_work, dsi_work_handler);
++
+ // Temporary access from sysfs begin
+- orig_encoder = encoder;
++ test_dev = dev;
+ // Temporary access from sysfs end
+- drm_sysfs_connector_add(connector);
++
+ return;
+
+ failed_find:
+@@ -966,31 +827,140 @@ failed_find:
+ kfree(connector);
+ }
+
++
+ // Temporary access from sysfs begin
+-static struct class_attribute miko_class_attrs[] = {
+- __ATTR(dphy, 0644, NULL, dphy_store),
+- __ATTR(clock, 0644, NULL, clock_store),
+- __ATTR(apply, 0200, NULL, apply_settings),
++static ssize_t dsi_run_test(struct class *class, const char *buf, size_t len)
++{
++ struct drm_device *dev = test_dev;
++ ssize_t status;
++ long test_id;
++ int i;
++
++ status = strict_strtoul(buf, 0, &test_id);
++
++ DBG_TRACE("test_id %li", test_id);
++
++ switch(test_id) {
++ case 1:
++ /* BL on */
++ dsi_set_backlight_state(1);
++ break;
++ case 2:
++ /* BL off */
++ dsi_set_backlight_state(0);
++ break;
++ case 3:
++ /* Reset off */
++ dsi_set_panel_reset_state(0);
++ break;
++ case 4:
++ /* Reset on */
++ dsi_set_panel_reset_state(1);
++ break;
++ case 5:
++ /* Set device ready state */
++ dsi_set_device_ready_state(dev, 1);
++ break;
++ case 6:
++ /* Clear device ready state */
++ dsi_set_device_ready_state(dev, 0);
++ break;
++ case 7:
++ /* Send turn on command */
++ dsi_send_turn_on_packet(dev);
++ break;
++ case 8:
++ /* Send shutdown command */
++ dsi_send_shutdown_packet(dev);
++ break;
++ case 9:
++ /* Enable PTARGET */
++ dsi_set_ptarget_state(dev, 1);
++ break;
++ case 10:
++ /* Disable PTARGET */
++ dsi_set_ptarget_state(dev, 0);
++ break;
++ case 11:
++ /* Initialize panel */
++ dsi_init_panel(dev);
++ break;
++ case 12:
++ /* Enable plane and pipe */
++ dsi_set_pipe_plane_enable_state(dev, 1);
++ break;
++ case 13:
++ /* Disable plane and pipe */
++ dsi_set_pipe_plane_enable_state(dev, 0);
++ break;
++ case 14:
++ /* configure up */
++ dsi_configure_up(dev);
++ break;
++ case 15:
++ /* configure down */
++ dsi_configure_down(dev);
++ break;
++ case 16:
++ /* Draw pixels */
++ for (i = 0 ; i < (864*40) ; i++) {
++ if (dsi_wait_hs_data_fifo(dev) < 0)
++ break;
++ if (i == 0)
++ REG_WRITE(0xb068, 0x0f0f0f2c);
++ else
++ REG_WRITE(0xb068, 0x0f0f0f3c);
++ if (dsi_wait_hs_ctrl_fifo(dev) < 0)
++ break;
++ REG_WRITE(0xb070, 0x00000429);
++ }
++ case 17:
++ /* Sleep out command */
++ if (dsi_wait_hs_data_fifo(dev) < 0)
++ break;
++ REG_WRITE(0xb068, 0x00000011);
++ if (dsi_wait_hs_ctrl_fifo(dev) < 0)
++ break;
++ REG_WRITE(0xb070, 0x00000129);
++ break;
++ case 18:
++ /* Display on command */
++ if (dsi_wait_hs_data_fifo(dev) < 0)
++ break;
++ REG_WRITE(0xb068, 0x00000029);
++ if (dsi_wait_hs_ctrl_fifo(dev) < 0)
++ break;
++ REG_WRITE(0xb070, 0x00000129);
++ break;
++ default:
++ break;
++ }
++ return len;
++}
++
++
++
++static struct class_attribute tpo_class_attrs[] = {
++ __ATTR(test, 0200, NULL, dsi_run_test),
+ __ATTR_NULL,
+ };
+
+-static struct class miko_class = {
+- .name = "miko",
++static struct class tpo_class = {
++ .name = "tpo",
+ .owner = THIS_MODULE,
+
+- .class_attrs = miko_class_attrs,
++ .class_attrs = tpo_class_attrs,
+ };
+
+-static int __init miko_sysfs_init(void)
++static int __init tpo_sysfs_init(void)
+ {
+- int status;
++ int status;
+
+- status = class_register(&miko_class);
++ status = class_register(&tpo_class);
+ if (status < 0)
+ return status;
+
+ return status;
+ }
+-postcore_initcall(miko_sysfs_init);
++postcore_initcall(tpo_sysfs_init);
+ // Temporary access from sysfs end
+-
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_displayclass.c b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_displayclass.c
+index adca7e2..7831183 100644
+--- a/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_displayclass.c
++++ b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_displayclass.c
+@@ -1977,6 +1977,9 @@ PVRSRV_ERROR MRSTLFBPrePowerState(IMG_HANDLE hDevHandle,
+ (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON))
+ return PVRSRV_OK;
+
++ if (!dev_priv->iLVDS_enable && dev_priv->dsi_prePowerState != NULL)
++ dev_priv->dsi_prePowerState(dev);
++
+ save_display_registers(dev);
+
+ if (dev_priv->iLVDS_enable) {
+@@ -1999,14 +2002,15 @@ PVRSRV_ERROR MRSTLFBPrePowerState(IMG_HANDLE hDevHandle,
+ /*turn off PLLs*/
+ PSB_WVDC32(0, MRST_DPLL_A);
+ } else {
+- PSB_WVDC32(DPI_SHUT_DOWN, DPI_CONTROL_REG);
+- PSB_WVDC32(0x0, PIPEACONF);
+- PSB_WVDC32(0x2faf0000, BLC_PWM_CTL);
+- while (REG_READ(0x70008) & 0x40000000);
+- while ((PSB_RVDC32(GEN_FIFO_STAT_REG) & DPI_FIFO_EMPTY)
++ if (dev_priv->dsi_prePowerState == NULL) {
++ PSB_WVDC32(DPI_SHUT_DOWN, DPI_CONTROL_REG);
++ PSB_WVDC32(0x0, PIPEACONF);
++ PSB_WVDC32(0x2faf0000, BLC_PWM_CTL);
++ while (REG_READ(0x70008) & 0x40000000);
++ while ((PSB_RVDC32(GEN_FIFO_STAT_REG) & DPI_FIFO_EMPTY)
+ != DPI_FIFO_EMPTY);
+- PSB_WVDC32(0, DEVICE_READY_REG);
+-
++ PSB_WVDC32(0, DEVICE_READY_REG);
++ }
+ /* turn off mipi panel power */
+ ret = lnw_ipc_single_cmd(IPC_MSG_PANEL_ON_OFF, IPC_CMD_PANEL_OFF, 0, 0);
+ if (ret)
+@@ -2052,5 +2056,8 @@ PVRSRV_ERROR MRSTLFBPostPowerState(IMG_HANDLE hDevHandle,
+
+ restore_display_registers(dev);
+
++ if (!dev_priv->iLVDS_enable && dev_priv->dsi_postPowerState != NULL)
++ dev_priv->dsi_postPowerState(dev);
++
+ return PVRSRV_OK;
+ }
+--
+1.6.2.5
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-gtm501l-driver-1.2.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-gtm501l-driver-1.2.patch
new file mode 100644
index 0000000..206e49e
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-gtm501l-driver-1.2.patch
@@ -0,0 +1,2395 @@
+Index: linux-2.6.33/drivers/spi/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/spi/Kconfig
++++ linux-2.6.33/drivers/spi/Kconfig
+@@ -335,6 +335,10 @@ config SPI_DW_PCI
+ #
+ comment "SPI Protocol Masters"
+
++config SPI_MRST_GTM501
++ tristate "SPI protocol driver for GTM501l"
++ depends on SPI_MRST
++
+ config SPI_SPIDEV
+ tristate "User mode SPI device driver support"
+ depends on EXPERIMENTAL
+Index: linux-2.6.33/drivers/spi/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/spi/Makefile
++++ linux-2.6.33/drivers/spi/Makefile
+@@ -43,6 +43,7 @@ obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_ms
+ obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o
+ obj-$(CONFIG_SPI_NUC900) += spi_nuc900.o
+ obj-$(CONFIG_SPI_MRST) += mrst_spi.o
++obj-$(CONFIG_SPI_MRST_GTM501) += gtm501l_spi.o
+
+ # special build for s3c24xx spi driver with fiq support
+ spi_s3c24xx_hw-y := spi_s3c24xx.o
+Index: linux-2.6.33/drivers/spi/gtm501l_spi.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/spi/gtm501l_spi.c
+@@ -0,0 +1,2029 @@
++/****************************************************************************
++ *
++ * Driver for the Option GTM501L spi modem.
++ *
++ * Copyright (C) 2008 Option International
++ * Copyright (C) 2008 Filip Aben <f.aben@option.com>
++ * Denis Joseph Barrow <d.barow@option.com>
++ * Jan Dumon <j.dumon@option.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
++ * USA
++ *
++ *
++ *
++ *****************************************************************************/
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/tty.h>
++#include <linux/device.h>
++#include <linux/spi/spi.h>
++#include <linux/tty.h>
++#include <linux/kfifo.h>
++#include <linux/tty_flip.h>
++#include <linux/workqueue.h>
++#include <linux/timer.h>
++
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/rfkill.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/skbuff.h>
++#include <net/arp.h>
++#include <linux/ip.h>
++#include <linux/dmapool.h>
++#include <linux/gpio.h>
++#include <linux/sysfs.h>
++
++#include <asm/ipc_defs.h>
++
++#ifdef CONFIG_DEBUG_FS
++#include <linux/debugfs.h>
++#include <linux/ktime.h>
++#include <linux/spinlock.h>
++#endif
++
++#include "gtm501l_spi.h"
++#include <linux/spi/mrst_spi.h>
++
++/* various static variables */
++static struct tty_driver *tty_drv;
++static struct ktermios *gtm501l_termios[GTM501L_MAX_MINORS];
++static struct ktermios *gtm501l_termios_locked[GTM501L_MAX_MINORS];
++static struct lock_class_key gtm501l_key;
++static struct gtm501l_port_data *gtm501l_serial_ports[GTM501L_MAX_MINORS];
++
++/* Default port spec */
++static struct gtm501l_port_spec gtm501l_default_port_spec[] = {
++ { 1, GTM501L_PORT_SPEC_SERIAL, "Control" }, /* 0 */
++ { 1, GTM501L_PORT_SPEC_SERIAL, "NetAT" }, /* 1 */
++ { 1, GTM501L_PORT_SPEC_NET, "NetIP" }, /* 2 */
++ { 1, GTM501L_PORT_SPEC_SERIAL, "App" }, /* 3 */
++ { 1, GTM501L_PORT_SPEC_SERIAL, "App2" }, /* 4 */
++ { 1, GTM501L_PORT_SPEC_SERIAL, "PC/SC" }, /* 5 */
++ { 1, GTM501L_PORT_SPEC_SERIAL, "Modem" }, /* 6 */
++ { 1, GTM501L_PORT_SPEC_SERIAL, "Diag" }, /* 7 */
++ { 1, GTM501L_PORT_SPEC_SERIAL, "Logger" }, /* 8 */
++ { 0, GTM501L_PORT_SPEC_SERIAL, "" }, /* 9 */
++ { 1, GTM501L_PORT_SPEC_NET, "NetIP2" }, /* 10 */
++ { 0, GTM501L_PORT_SPEC_SERIAL, "" }, /* 11 */
++ { 0, GTM501L_PORT_SPEC_SERIAL, "" }, /* 12 */
++ { 0, GTM501L_PORT_SPEC_SERIAL, "" }, /* 13 */
++ { 0, GTM501L_PORT_SPEC_SERIAL, "" }, /* 14 */
++ { 0, GTM501L_PORT_SPEC_SERIAL, "" }, /* 15 */
++};
++
++/* Module load parameter */
++static int gpio_in = -1; /* GPIO interrupt input assignment, set default to -1 */
++static int backoff_enabled = 1; /* Enable the backoff timer */
++static int spi_b16 = 1; /* Enable 16bit SPI word length, otherwise use 8bit SPI word length */
++int gtm501l_debug = 0;
++
++/* temp debug variables */
++static int spi_err_count = 0;
++static unsigned int total_tty_write = 0;
++static unsigned int total_spi_write = 0;
++
++static unsigned char scratch_buf[GTM501L_TRANSFER_SIZE];
++
++static int reset_state = 1;
++
++/* prototype declarations */
++static void gtm501l_push_skb(struct gtm501l_port_data *port_data);
++static void gtm501l_net_init(struct net_device *net);
++static void gtm501l_spi_complete(void *ctx);
++static void _gtm501l_set_termios(struct tty_struct *tty, struct ktermios *old);
++static void gtm501l_throttle(struct tty_struct *tty);
++static void gtm501l_create_ports(struct gtm501l_device *gtm_dev,
++ struct gtm501l_port_spec *specs);
++/*static void gtm501l_pmic_init_voltages( void );*/
++static void gtm501l_pmic_set_wwanresetn( unsigned char level );
++static void gtm501l_pmic_set_wwandisablen( unsigned char level );
++
++void gtm501l_debug_printk(const char *function, int line, char *format, ...) {
++ va_list args;
++ int len;
++ char buffer[255];
++
++ len = snprintf(buffer, 255, DRVNAME " [%s:%d] ", function, line);
++
++ va_start(args, format);
++ vsnprintf(buffer + len, 255 - len, format, args);
++ va_end(args);
++
++ printk("%s", buffer);
++}
++
++#define __bswap_16(x) \
++ (__extension__ \
++ ({ register unsigned short int __v, __x = (x); \
++ __asm__ ("rorw $8, %w0" \
++ : "=r" (__v) \
++ : "0" (__x) \
++ : "cc"); \
++ __v; }))
++
++static inline void swap_buf(u16 *buf, int len) {
++ int n;
++ len = (len + 1) / 2;
++ n = (len + 7) / 8;
++ switch (len % 8) {
++ case 0: do { *buf = __bswap_16(*buf); buf++;
++ case 7: *buf = __bswap_16(*buf); buf++;
++ case 6: *buf = __bswap_16(*buf); buf++;
++ case 5: *buf = __bswap_16(*buf); buf++;
++ case 4: *buf = __bswap_16(*buf); buf++;
++ case 3: *buf = __bswap_16(*buf); buf++;
++ case 2: *buf = __bswap_16(*buf); buf++;
++ case 1: *buf = __bswap_16(*buf); buf++;
++ } while (--n > 0);
++ }
++}
++
++#ifdef CONFIG_DEBUG_FS
++
++static ssize_t gtm501l_read_gpio(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
++{
++ char buf[32];
++ unsigned int len = 0;
++ if (gpio_in >= 0) {
++ len += snprintf(buf+len, sizeof(buf)-len,
++ "GPIO%d = %d\n", gpio_in, (gpio_get_value(gpio_in))?1:0);
++ } else {
++ len += snprintf(buf+len, sizeof(buf)-len,
++ "GPIO unuse\n");
++ }
++ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
++}
++
++static struct file_operations gtm501l_gpio_operations = {
++ .owner = THIS_MODULE,
++ .read = gtm501l_read_gpio,
++};
++
++static ssize_t gtm501l_read_pmic(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
++{
++ char buf[8];
++ unsigned int len = sprintf(buf, "%d\n", reset_state);
++
++ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
++}
++
++static ssize_t gtm501l_write_pmic(struct file *file, const char __user *user, size_t count, loff_t *offset)
++{
++ reset_state = simple_strtoul(user, NULL, 0);
++ gtm501l_pmic_set_wwanresetn( reset_state );
++ gtm501l_pmic_set_wwandisablen( reset_state );
++ return count;
++}
++
++static struct file_operations gtm501l_pmic_operations = {
++ .owner = THIS_MODULE,
++ .read = gtm501l_read_pmic,
++ .write = gtm501l_write_pmic,
++};
++
++/**
++ * gtm501l_debugfs_init generates all debugfs file nodes.
++ * It initialized the frame statistic structure.
++ */
++static int gtm501l_debugfs_init(struct gtm501l_device *gtm_dev)
++{
++ gtm_dev->debugfs = debugfs_create_dir("gtm501l", NULL);
++ if (!gtm_dev->debugfs)
++ return -ENOMEM;
++
++ debugfs_create_file("gpio", S_IFREG | S_IRUGO,
++ gtm_dev->debugfs, NULL, &gtm501l_gpio_operations);
++ debugfs_create_file("pmic", S_IFREG | S_IRUGO,
++ gtm_dev->debugfs, NULL, &gtm501l_pmic_operations);
++ debugfs_create_u32("backoff_timer", S_IFREG | S_IRUGO,
++ gtm_dev->debugfs, &backoff_enabled);
++ debugfs_create_x32("flags", S_IFREG | S_IRUGO,
++ gtm_dev->debugfs, (unsigned int *)&gtm_dev->flags);
++#ifdef DEBUG
++ debugfs_create_x32("debug", S_IFREG | S_IRUGO,
++ gtm_dev->debugfs, &gtm501l_debug);
++#endif
++
++ return 0;
++}
++
++static void gtm501l_debugfs_remove(struct gtm501l_device *gtm_dev)
++{
++ if (gtm_dev->debugfs)
++ debugfs_remove_recursive(gtm_dev->debugfs);
++}
++#endif
++
++static __be32 gtm501l_get_ip_addr(struct net_device *net)
++{
++ struct in_device *in_dev;
++
++ if ((in_dev = __in_dev_get_rtnl(net)) != NULL) {
++ if(in_dev->ifa_list) {
++ return in_dev->ifa_list->ifa_local;
++ }
++ }
++
++ return 0;
++}
++
++/*
++static void gtm501l_pmic_init_voltages( void )
++{
++ struct ipc_pmic_reg_data reg_data;
++
++ reg_data.num_entries = 3;
++
++ // Set VDDQCNT
++
++ reg_data.pmic_reg_data[0].register_address = 0x037;
++ reg_data.pmic_reg_data[0].value = 0x07;
++
++ // Set YMX3GDCNT
++
++ reg_data.pmic_reg_data[1].register_address = 0x03c;
++ reg_data.pmic_reg_data[1].value = 0x47;
++
++ // Set CLKOUT
++
++ reg_data.pmic_reg_data[2].register_address = 0x021;
++ reg_data.pmic_reg_data[2].value = 0x00;
++ ipc_pmic_register_write(&reg_data, 0);
++
++}
++*/
++
++static void gtm501l_pmic_set_wwanresetn( unsigned char level )
++{
++ struct ipc_pmic_mod_reg_data mod_reg_data;
++
++ // WWAN_RESET_N is connected to COMMS_GPO5
++
++ mod_reg_data.num_entries = 1;
++ mod_reg_data.pmic_mod_reg_data[0].bit_map = 0x20;
++ mod_reg_data.pmic_mod_reg_data[0].register_address = 0x0f4;
++
++ if (level)
++ {
++ mod_reg_data.pmic_mod_reg_data[0].value = 0x20;
++ }
++ else
++ {
++ mod_reg_data.pmic_mod_reg_data[0].value = 0x00;
++ }
++
++ ipc_pmic_register_read_modify(&mod_reg_data);
++}
++
++static void gtm501l_pmic_set_wwandisablen( unsigned char level )
++{
++ struct ipc_pmic_mod_reg_data mod_reg_data;
++
++ // WWAN_DISABLE_N is connected to COMMS_GPO0
++
++ mod_reg_data.num_entries = 1;
++ mod_reg_data.pmic_mod_reg_data[0].bit_map = 0x01;
++ mod_reg_data.pmic_mod_reg_data[0].register_address = 0x0f4;
++
++ if (level)
++ {
++ mod_reg_data.pmic_mod_reg_data[0].value = 0x01;
++ }
++ else
++ {
++ mod_reg_data.pmic_mod_reg_data[0].value = 0x00;
++ }
++
++ ipc_pmic_register_read_modify(&mod_reg_data);
++}
++
++
++static void gtm501l_rx_netchar(struct gtm501l_net *gtm_net,
++ unsigned char *in_buf, int size)
++{
++ struct net_device *net = gtm_net->net;
++ unsigned int temp_bytes;
++ unsigned int buffer_offset = 0, n;
++ unsigned int frame_len;
++ unsigned char *tmp_rx_buf;
++ unsigned char c;
++ int header_invalid;
++
++ while(size) {
++ switch (gtm_net->rx_state) {
++ case WAIT_IP:
++ /* waiting for IP header. */
++ /* wanted bytes - size of ip header */
++ temp_bytes = (size < gtm_net->rx_buf_missing) ?
++ size : gtm_net->rx_buf_missing;
++
++ memcpy(((unsigned char *)(&gtm_net->rx_ip_hdr)) +
++ gtm_net->rx_buf_size, in_buf + buffer_offset,
++ temp_bytes);
++
++ gtm_net->rx_buf_size += temp_bytes;
++ buffer_offset += temp_bytes;
++ gtm_net->rx_buf_missing -= temp_bytes;
++ size -= temp_bytes;
++
++ if (!gtm_net->rx_buf_missing) {
++ /* header is complete allocate an sk_buffer and
++ * continue to WAIT_DATA */
++
++ header_invalid = 0;
++
++ frame_len = ntohs(gtm_net->rx_ip_hdr.tot_len);
++ if ((frame_len > GTM501L_DEFAULT_MRU) ||
++ (frame_len < sizeof(struct iphdr))) {
++ if(!gtm_net->sync_lost)
++ dev_err(&net->dev,
++ "Invalid frame length (%d)\n",
++ frame_len);
++ header_invalid = 1;
++ }
++
++ /* Validate destination address */
++ if (!header_invalid &&
++ (gtm501l_get_ip_addr(net) != gtm_net->rx_ip_hdr.daddr)) {
++ if(!gtm_net->sync_lost)
++ dev_err(&net->dev,
++ "Not our address (" NIPQUAD_FMT ")\n",
++ NIPQUAD(gtm_net->rx_ip_hdr.daddr));
++ header_invalid = 1;
++ }
++
++ if (header_invalid) {
++ /* This header is not valid, roll back
++ * for sizeof(header) bytes - 1 and
++ * wait for sync */
++ gtm_net->rx_state = WAIT_SYNC;
++ n = min(buffer_offset,
++ sizeof(struct iphdr) - 1);
++ buffer_offset -= n;
++ size += n;
++ continue;
++ }
++
++ /* Allocate an sk_buff */
++ gtm_net->rx_skb = dev_alloc_skb(frame_len);
++ if (!gtm_net->rx_skb) {
++ /* We got no receive buffer. */
++ //D1("ddcould not allocate memory");
++ gtm_net->rx_state = WAIT_SYNC;
++ return;
++ }
++
++ if(gtm_net->sync_lost) {
++ dev_info(&net->dev, "Back in sync. (%d stray bytes)\n",
++ gtm_net->sync_lost);
++ gtm_net->sync_lost = 0;
++ }
++
++ /* Here's where it came from */
++ gtm_net->rx_skb->dev = net;
++
++ /* Copy what we got so far. make room for iphdr
++ * after tail. */
++ tmp_rx_buf =
++ skb_put(gtm_net->rx_skb,
++ sizeof(struct iphdr));
++ memcpy(tmp_rx_buf, (char *)&(gtm_net->rx_ip_hdr),
++ sizeof(struct iphdr));
++
++ /* ETH_HLEN */
++ gtm_net->rx_buf_size = sizeof(struct iphdr);
++
++ gtm_net->rx_buf_missing =
++ frame_len - sizeof(struct iphdr);
++ gtm_net->rx_state = WAIT_DATA;
++ }
++ break;
++
++ case WAIT_DATA:
++ temp_bytes = (size < gtm_net->rx_buf_missing)
++ ? size : gtm_net->rx_buf_missing;
++
++ /* Copy the rest of the bytes that are left in the
++ * buffer into the waiting sk_buf. */
++ /* Make room for temp_bytes after tail. */
++ tmp_rx_buf = skb_put(gtm_net->rx_skb, temp_bytes);
++ memcpy(tmp_rx_buf, in_buf + buffer_offset, temp_bytes);
++
++ gtm_net->rx_buf_missing -= temp_bytes;
++ size -= temp_bytes;
++ buffer_offset += temp_bytes;
++ gtm_net->rx_buf_size += temp_bytes;
++ if (!gtm_net->rx_buf_missing) {
++ /* Packet is complete. Inject into stack. */
++ /* We have IP packet here */
++ gtm_net->rx_skb->protocol =
++ __constant_htons(ETH_P_IP);
++ /* don't check it */
++ gtm_net->rx_skb->ip_summed =
++ CHECKSUM_UNNECESSARY;
++
++ skb_reset_mac_header(gtm_net->rx_skb);
++
++ /* Ship it off to the kernel */
++ netif_rx(gtm_net->rx_skb);
++ /* No longer our buffer. */
++ gtm_net->rx_skb = NULL;
++
++ /* update out statistics */
++ STATS(net).rx_packets++;
++ STATS(net).rx_bytes += gtm_net->rx_buf_size;
++
++ gtm_net->rx_buf_size = 0;
++ gtm_net->rx_buf_missing = sizeof(struct iphdr);
++ gtm_net->rx_state = WAIT_IP;
++ }
++ break;
++
++ case WAIT_SYNC:
++ if(!gtm_net->sync_lost) {
++ dev_err(&net->dev, "Lost sync...\n");
++ }
++ gtm_net->sync_lost++;
++ /* Look for the next possible IP header */
++ c = *(in_buf + buffer_offset);
++ if(c >= 0x45 && c <= 0x4f) {
++ /* This might be the begin of a new ip pkt */
++ gtm_net->rx_state = WAIT_IP;
++ gtm_net->rx_buf_size = 0;
++ gtm_net->rx_buf_missing = sizeof(struct iphdr);
++ }
++ else {
++ size--;
++ buffer_offset++;
++ }
++ break;
++
++ default:
++ size--;
++ break;
++ }
++ }
++}
++
++/* mux functions */
++
++/* function that tells you how much characters you can feed to fill your mux buffer*/
++static inline int gtm501l_mux_to_demux(int count)
++{
++ int burst_cnt, remain_cnt;
++ if (count & 0x1) {
++ printk("Error - space in frame can't be an odd number\n");
++ return -1;
++ }
++ /* We've got 2 extra bytes of framing per 512 bytes of data */
++ burst_cnt = (count / (MUX_BURST_SIZE + 2)) * MUX_BURST_SIZE;
++ remain_cnt = count % (MUX_BURST_SIZE + 2);
++
++ if (remain_cnt > 2)
++ return remain_cnt - 2 + burst_cnt;
++ else
++ return burst_cnt + 1;
++}
++
++/* multiplexes the data into the output buffer. output buffer is expected to be large enough to fit the data. */
++static int gtm501l_mux_data(int chan_id, unsigned char *in_buf, int in_size,
++ unsigned char *out_buf)
++{
++ int odd, cnt, result_size = 0;
++
++ /* check for an odd number of bytes */
++ odd = in_size & 0x1;
++
++ /* make it even */
++ in_size &= ~1;
++
++ /* First fill up with as much burst frames as possible */
++ while (in_size) {
++
++ cnt = (in_size >= MUX_BURST_SIZE) ? MUX_BURST_SIZE : in_size;
++ *(out_buf + result_size) =
++ MUX_CONTROL_BYTE(chan_id, MUX_BURST_TRANSFER,
++ MUX_MASTER_TO_SLAVE);
++ //printk("Burst frame %d bytes\n", cnt);
++ result_size++;
++ *(out_buf + result_size) = cnt / 2;
++ result_size++;
++ memcpy(out_buf + result_size, in_buf, cnt);
++ result_size += cnt;
++ in_size -= cnt;
++ in_buf += cnt;
++ }
++
++ /* Then tackle the odd byte */
++ if (odd) {
++ *(out_buf + result_size) =
++ MUX_CONTROL_BYTE(chan_id, MUX_DATA_TRANSFER,
++ MUX_MASTER_TO_SLAVE);
++ result_size++;
++ *(out_buf + result_size) = *in_buf;
++ result_size++;
++
++ }
++
++ return result_size;
++}
++
++/* kfifo put theoretically cannot fail to copy all buffer here */
++void gtm501l_tty_insert_flip_string(struct gtm501l_serial *gtm_ser,
++ unsigned char *chars,size_t size)
++{
++ int chars_inserted;
++ int copylen;
++ struct tty_struct *tty;
++
++ if (gtm_ser && gtm_ser->tty) {
++ tty= gtm_ser->tty;
++ if (test_bit(TTY_THROTTLED, &tty->flags)) {
++ dprintk(DEBUG_TTY, "received %d bytes while throttled\n", size);
++ copylen=kfifo_in(gtm_ser->throttle_fifo,chars,size);
++ if(copylen!=size)
++ dprintk(DEBUG_TTY, "kfifo_put failed got %d expected %d\n",
++ copylen, size);
++ }
++ else {
++ chars_inserted=tty_insert_flip_string(tty, chars, size);
++ if(chars_inserted!=size) {
++
++ size -= chars_inserted;
++ copylen=kfifo_in(gtm_ser->throttle_fifo,
++ &chars[chars_inserted],
++ size);
++ if(copylen!=size)
++ dprintk(DEBUG_TTY, "%s kfifo_put failed got %d expected %d\n",
++ copylen, size);
++ }
++ /* The flip here should force the tty layer
++ * to throttle if neccessary
++ */
++ tty_flip_buffer_push(tty);
++ }
++ }
++
++}
++
++#define PORT_SPEC_FLAGS_ENABLED 0x80
++#define PORT_SPEC_FLAGS_TYPE_MASK 0x7f
++#define PORT_SPEC_FLAGS_SERIAL 0x01
++#define PORT_SPEC_FLAGS_NET 0x02
++
++static void gtm501l_decode_version_info(struct gtm501l_device *gtm_dev,
++ unsigned char *buf, int size)
++{
++ int i = 0, chan, flags;
++ u16 version;
++ u16 framelength;
++ struct gtm501l_port_spec *new_port_spec;
++
++ /* Protocol version */
++ memcpy(&version, buf + i, 2);
++ i += 2;
++
++ if(version != 0 || size != 260) {
++ /* Unknown version or size is wrong.. */
++ return;
++ }
++
++ /* Frame length */
++ memcpy(&framelength, buf + i, 2);
++ i += 2;
++
++ /* Channel descriptors */
++ new_port_spec = kzalloc(sizeof(struct gtm501l_port_spec) * 16, GFP_KERNEL);
++ for(chan = 0; chan < 16; chan++) {
++ flags = buf[i++];
++
++ if(flags | PORT_SPEC_FLAGS_ENABLED) {
++ new_port_spec[chan].enabled = 1;
++ switch(flags & PORT_SPEC_FLAGS_TYPE_MASK) {
++ case PORT_SPEC_FLAGS_SERIAL:
++ new_port_spec[chan].type = GTM501L_PORT_SPEC_SERIAL;
++ break;
++ case PORT_SPEC_FLAGS_NET:
++ new_port_spec[chan].type = GTM501L_PORT_SPEC_NET;
++ break;
++ default:
++ /* Unknown channel type: disable this channel */
++ new_port_spec[chan].enabled = 0;
++ }
++
++ /* Copy the name */
++ memcpy(&new_port_spec[chan].name, buf + i, 15);
++ }
++
++ i += 15;
++ }
++
++ /* Activate the new port spec */
++ gtm501l_create_ports(gtm_dev, new_port_spec);
++ kfree(new_port_spec);
++}
++
++
++/**
++ * gtm501l_demux walks through the received SPI message given in in_buf with the length size and copies all
++ * found data into the opened channels given in the gtm_dev structure. The SPI buffer length must be always
++ * a multiple of 2 bytes!
++ * It returns finally the real found useful data inside the SPI frame length
++ */
++static int gtm501l_demux(struct gtm501l_device *gtm_dev, unsigned char *in_buf,
++ int size)
++{
++ int i = 0, valid = 0, copy;
++ unsigned char temp;
++ struct gtm501l_port_data *port_data;
++ struct gtm501l_serial *gtm_ser = NULL;
++ struct gtm501l_net *gtm_net = NULL;
++ unsigned char old_dcd;
++
++ gtm_dev->empty_transfers++;
++
++ while (i < size) {
++ gtm_ser = NULL;
++ gtm_net = NULL;
++ copy = 0;
++
++ if(spi_b16)
++ swap_buf((u16 *)(in_buf + i), 2);
++
++ /* check for an end of frame sequence */
++ if((in_buf[i]==0) && (in_buf[i+1]==0)) break;
++
++ if(0 && in_buf[i] == 0xFF) { /* TODO: Fill in correct check for version info */
++ copy = in_buf[++i] * 2;
++ i++;
++ if(spi_b16)
++ swap_buf((u16 *)(in_buf + i), copy);
++ gtm501l_decode_version_info(gtm_dev, in_buf + i, copy);
++ i += copy;
++ continue;
++ }
++
++ /* verify valid packet */
++ temp = MUX_DEVICE(in_buf[i]);
++ if (temp != MUX_SLAVE_TO_MASTER) {
++ /*
++ * ##PH: That should never happen and should counted as errorness data
++ */
++ i += 2;
++ continue;
++ }
++
++#ifdef DEBUG
++ if(!valid && (i > 0) && (gtm501l_debug & DEBUG_DEMUX)) {
++ int j;
++ dprintk(DEBUG_DEMUX, "First valid byte found at offset %d\n", i);
++ for(j=0 ; j<i ; j++) printk("%.2X ", in_buf[j]);
++ printk("\n");
++ }
++#endif
++
++ valid = 1;
++
++ //dprintk(DEBUG_DEMUX, "Got valid mux bytes 0x%X 0x%X\n", in_buf[i], in_buf[i+1]);
++
++ /* verify valid channel */
++ temp = MUX_CHANNEL(in_buf[i]);
++ if (temp >= GTM501L_PORT_PER_DEV || !gtm_dev->port_data[temp]) {
++ i += 2;
++ continue;
++ }
++ port_data = gtm_dev->port_data[temp];
++
++ if (port_data->spec.type == GTM501L_PORT_SPEC_NET)
++ gtm_net = &port_data->type.net;
++ else if (port_data->spec.type == GTM501L_PORT_SPEC_SERIAL)
++ gtm_ser = &port_data->type.serial;
++ //dprintk(DEBUG_DEMUX, "For channel %d\n", temp);
++
++ /* start decoding data */
++ temp = MUX_BLOCK_TYPE(in_buf[i]);
++ if (temp == MUX_BURST_TRANSFER) {
++ copy = in_buf[++i] * 2;
++ if( 0 == copy ) copy = MUX_BURST_SIZE;
++ if(spi_b16)
++ swap_buf((u16 *)(in_buf + i + 1), copy);
++ } else if (temp == MUX_DATA_TRANSFER) {
++ copy = 1;
++ }
++
++ if (copy) {
++ gtm_dev->empty_transfers = 0;
++ //dprintk(DEBUG_DEMUX, "\tNeed to copy %d data bytes\n", copy);
++ /* regular data */
++ if( gtm_ser && gtm_ser->tty ) {
++ gtm501l_tty_insert_flip_string(gtm_ser, &in_buf[++i], copy);
++ }
++ else if (gtm_net) {
++/*
++ int j;
++ for(j=i+1;j<(i+1+copy);j++) printk("0x%.2X ", in_buf[j]);
++ printk("\n");
++*/
++ gtm501l_rx_netchar(gtm_net, &in_buf[++i], copy);
++ } else {
++ i++;
++ }
++
++ i += copy;
++ continue;
++ }
++
++ if (temp == MUX_CONTROL_TRANSFER) {
++ //dprintk(DEBUG_DEMUX, "Is a control byte\n");
++ /* control data */
++ temp = in_buf[i + 1];
++ if (MUX_LINK(temp)) {
++ set_bit(GTM501L_TX_FC,
++ &port_data->signal_state);
++ dprintk(DEBUG_DEMUX, "FC set\n");
++ } else {
++ clear_bit(GTM501L_TX_FC,
++ &port_data->signal_state);
++ dprintk(DEBUG_DEMUX, "FC cleared\n");
++ }
++
++ if (gtm_ser) {
++ old_dcd =
++ test_bit(GTM501L_DCD,
++ &port_data->signal_state);
++
++ if (MUX_DCD(temp)) {
++ set_bit(GTM501L_DCD,
++ &port_data->signal_state);
++ dprintk(DEBUG_DEMUX, "DCD set\n");
++ } else {
++ clear_bit(GTM501L_DCD,
++ &port_data->signal_state);
++ dprintk(DEBUG_DEMUX, "DCD cleared\n");
++ }
++ if (MUX_CTS(temp)) {
++ set_bit(GTM501L_CTS,
++ &port_data->signal_state);
++ dprintk(DEBUG_DEMUX, "CTS set\n");
++ } else {
++ clear_bit(GTM501L_CTS,
++ &port_data->signal_state);
++ dprintk(DEBUG_DEMUX, "CTS cleared\n");
++ }
++
++ if (MUX_DSR(temp)) {
++ set_bit(GTM501L_DSR,
++ &port_data->signal_state);
++ dprintk(DEBUG_DEMUX, "DSR set\n");
++ } else {
++ clear_bit(GTM501L_DSR,
++ &port_data->signal_state);
++ dprintk(DEBUG_DEMUX, "DSR cleared\n");
++ }
++
++ if (MUX_RI(temp)) {
++ set_bit(GTM501L_RI,
++ &port_data->signal_state);
++ dprintk(DEBUG_DEMUX, "RI set\n");
++ } else {
++ clear_bit(GTM501L_RI,
++ &port_data->signal_state);
++ dprintk(DEBUG_DEMUX, "RI cleared\n");
++ }
++
++ if (old_dcd
++ && !test_bit(GTM501L_DCD,
++ &port_data->signal_state))
++ if (gtm_ser->tty)
++ tty_hangup(gtm_ser->tty);
++ }
++
++ i += 2;
++ }
++
++ }
++ return i;
++}
++
++static int gtm501l_mux_flow_control(struct gtm501l_port_data *port_data,
++ unsigned char *out_buf)
++{
++ *out_buf =
++ MUX_CONTROL_BYTE(port_data->port_id, MUX_CONTROL_TRANSFER,
++ MUX_MASTER_TO_SLAVE);
++ *(out_buf + 1) =
++ (test_bit(GTM501L_DTR, &port_data->signal_state)
++ ? (1 << MUX_DTR_SHIFT) : 0)
++ | (test_bit(GTM501L_RTS, &port_data->signal_state)
++ ? (1 << MUX_RTS_SHIFT): 0)
++ | (test_bit(GTM501L_RX_FC, &port_data->signal_state)
++ ? (1 << MUX_LINK_SHIFT): 0);
++
++ return 2;
++}
++
++static void gtm501l_timer(unsigned long arg)
++{
++ struct tasklet_struct *tasklet = (struct tasklet_struct *)arg;
++ //printk("Timer fired\n");
++ tasklet_hi_schedule(tasklet);
++}
++
++/* gpio interrupt handler */
++
++static irqreturn_t gtm501l_gpio_interrupt(int irq, void *dev)
++{
++ struct gtm501l_device *gtm_dev = (struct gtm501l_device *)dev;
++ static int count = 0;
++
++ if(!gtm_dev || !test_bit(GTM501L_STATE_PRESENT, &gtm_dev->flags))
++ return IRQ_HANDLED;
++
++ /*
++ * Using the GPE layer it will set the
++ * irq to the requested gpio_in line
++ */
++
++ if(gtm_dev->stats) gtm_dev->stats->wait_finished(gtm_dev->frame_stats);
++
++ if (!(count % 5000))
++ dprintk(DEBUG_GPIO, "Scheduling\n");
++
++ if(!test_bit(GTM501L_STATE_IO_IN_PROGRESS, &gtm_dev->flags)) {
++
++ /* If we received no data for the last x
++ * frames, delay the next transfer */
++ if(gtm_dev->empty_transfers > GTM501L_MAX_EMPTY && backoff_enabled) {
++ if(!timer_pending(&gtm_dev->timer)) {
++ gtm_dev->timer.expires = jiffies + GTM501L_BACKOFF_TIMER;
++ gtm_dev->timer.function = gtm501l_timer;
++ gtm_dev->timer.data = (unsigned long)&gtm_dev->io_work_tasklet;
++ add_timer(&gtm_dev->timer);
++ }
++ }
++ else
++ tasklet_hi_schedule(&gtm_dev->io_work_tasklet);
++ }
++ else set_bit(GTM501L_STATE_IO_READY, &gtm_dev->flags);
++
++ count++;
++
++ return IRQ_HANDLED;
++}
++
++static int gtm501l_prepare_tx_buffer(struct gtm501l_device *gtm_dev)
++{
++ int tx_count = 0;
++ int i, j;
++ unsigned int temp_count;
++ struct gtm501l_port_data *port_data;
++ int round_robin_index;
++ unsigned char *tx_buffer;
++ int len;
++
++ if(gtm_dev->stats) gtm_dev->stats->encode_start_idle_finished(gtm_dev->frame_stats);
++
++ tx_buffer = gtm_dev->tx_buffer[gtm_dev->tx_buffer_used];
++
++ /* First add flow control events for all ports */
++ for (i = 0; i < GTM501L_PORT_PER_DEV; i++) {
++ port_data = gtm_dev->port_data[i];
++ if(!port_data)
++ continue;
++
++ if (test_and_clear_bit (GTM501L_UPDATE, &port_data->signal_state)) {
++ tx_count += gtm501l_mux_flow_control(port_data,
++ tx_buffer + tx_count);
++ }
++ }
++
++ /* assemble data from buffers from all ports */
++ round_robin_index = gtm_dev->round_robin_index;
++ for (j = round_robin_index; j <
++ (GTM501L_PORT_PER_DEV + round_robin_index) ; j++) {
++ i = j % GTM501L_PORT_PER_DEV;
++ port_data = gtm_dev->port_data[i];
++ if(!port_data)
++ continue;
++
++ /* check if this port is flow controlled */
++ if (test_bit(GTM501L_TX_FC, &port_data->signal_state))
++ continue;
++
++ /* check for data to be sent */
++ temp_count = gtm501l_mux_to_demux(GTM501L_TRANSFER_SIZE - tx_count);
++ temp_count = min(kfifo_len(port_data->tx_fifo), temp_count);
++ if (temp_count) {
++ len = kfifo_out(port_data->tx_fifo, scratch_buf, temp_count);
++#ifdef GTM501L_DEBUG
++ sprintf(debug_buff_name, "gtm501l tx buf port %d %d", i, len);
++ GTM501L_BUFFER_DUMP(debug_buff_name, scratch_buf, temp_count);
++#endif
++ tx_count += gtm501l_mux_data(i, scratch_buf, temp_count,
++ tx_buffer + tx_count);
++ total_spi_write += temp_count;
++ gtm_dev->empty_transfers = 0;
++ }
++ if( port_data->spec.type == GTM501L_PORT_SPEC_NET)
++ gtm501l_push_skb(port_data);
++ else if(port_data->type.serial.tty)
++ tty_wakeup(port_data->type.serial.tty);
++ }
++ gtm_dev->round_robin_index = gtm_dev->round_robin_index + 1;
++ if (gtm_dev->round_robin_index == GTM501L_PORT_PER_DEV)
++ gtm_dev->round_robin_index = 0;
++
++ /* End-Of-Frame marker */
++ temp_count = min(2, GTM501L_TRANSFER_SIZE - tx_count);
++ memset(tx_buffer + tx_count, 0, temp_count);
++ tx_count += temp_count;
++
++ if(spi_b16)
++ swap_buf((u16 *)(tx_buffer), tx_count);
++
++ gtm_dev->tx_count = tx_count;
++
++ if(gtm_dev->stats) gtm_dev->stats->encode_finished(gtm_dev->frame_stats, tx_count - temp_count);
++
++ return tx_count;
++}
++
++/* serial functions */
++static void gtm501l_io(unsigned long data)
++{
++ struct gtm501l_device *gtm_dev = (struct gtm501l_device *)data;
++ int retval;
++#ifdef GTM501L_DEBUG
++ char debug_buff_name[80];
++#endif
++
++ if (!test_bit(GTM501L_STATE_PRESENT, &gtm_dev->flags))
++ return;
++
++ if (!test_and_set_bit(GTM501L_STATE_IO_IN_PROGRESS, &gtm_dev->flags)) {
++ gtm501l_prepare_tx_buffer(gtm_dev);
++
++ if(gtm_dev->stats) gtm_dev->stats->transfer_start(gtm_dev->frame_stats);
++
++ spi_message_init(&gtm_dev->spi_msg);
++ gtm_dev->spi_msg.context = gtm_dev;
++ gtm_dev->spi_msg.complete = gtm501l_spi_complete;
++ gtm_dev->spi_msg.is_dma_mapped = 1;
++
++ /* set up our spi transfer */
++ gtm_dev->spi_xfer.len = GTM501L_TRANSFER_SIZE;
++ gtm_dev->spi_xfer.cs_change = 0;
++#if 0
++ gtm_dev->tx_dma[gtm_dev->tx_buffer_used] =
++ dma_map_single(&gtm_dev->spi_dev->dev, gtm_dev->tx_buffer[gtm_dev->tx_buffer_used],
++ GTM501L_TRANSFER_SIZE, DMA_TO_DEVICE);
++ gtm_dev->rx_dma = dma_map_single(&gtm_dev->spi_dev->dev, gtm_dev->rx_buffer,
++ GTM501L_TRANSFER_SIZE, DMA_FROM_DEVICE);
++#else
++ gtm_dev->tx_dma[gtm_dev->tx_buffer_used] = virt_to_phys(gtm_dev->tx_buffer[gtm_dev->tx_buffer_used]);
++ gtm_dev->rx_dma = virt_to_phys(gtm_dev->rx_buffer);
++#endif
++
++ gtm_dev->spi_xfer.tx_dma = gtm_dev->tx_dma[gtm_dev->tx_buffer_used];
++ gtm_dev->spi_xfer.tx_buf = gtm_dev->tx_buffer[gtm_dev->tx_buffer_used];
++ gtm_dev->tx_buffer_used = (++gtm_dev->tx_buffer_used) % 2;
++ gtm_dev->tx_count = 0;
++
++ gtm_dev->spi_xfer.rx_dma = gtm_dev->rx_dma;
++ gtm_dev->spi_xfer.rx_buf = gtm_dev->rx_buffer;
++
++ spi_message_add_tail(&gtm_dev->spi_xfer, &gtm_dev->spi_msg);
++
++ retval = spi_async(gtm_dev->spi_dev, &gtm_dev->spi_msg);
++
++ if (retval) {
++ dprintk(DEBUG_SPI, "ERROR: spi_async failed (%d)\n", retval);
++ clear_bit(GTM501L_STATE_IO_IN_PROGRESS,
++ &gtm_dev->flags);
++ tasklet_hi_schedule(&gtm_dev->io_work_tasklet);
++ return;
++ }
++ } else {
++ dprintk(DEBUG_SPI, "ERROR - gtm501l_io called, but spi still busy\n");
++ }
++
++}
++
++static ssize_t gtm501l_sysfs_channel(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct gtm501l_port_data *port_data = NULL;
++ int i;
++
++ /* Look for the port_data matching this device. */
++ if(strcmp("tty", dev->class->name) == 0) {
++ for (i = 0; i < GTM501L_MAX_MINORS; i++) {
++ if (gtm501l_serial_ports[i] &&
++ gtm501l_serial_ports[i]->type.serial.dev == dev) {
++ port_data = gtm501l_serial_ports[i];
++ break;
++ }
++ }
++ }
++ else if(strcmp("net", dev->class->name) == 0) {
++ port_data = net_to_gtm501l_data(to_net_dev(dev));
++ }
++
++ return sprintf(buf, "%s\n", (port_data ? port_data->spec.name : "unknown"));
++}
++
++static DEVICE_ATTR(channel, S_IRUGO, gtm501l_sysfs_channel, NULL);
++
++static void gtm501l_free_port(struct gtm501l_port_data *port_data)
++{
++ /* do device type specific cleanup */
++ if (port_data->spec.type == GTM501L_PORT_SPEC_SERIAL) {
++ /* serial device cleanup */
++ device_remove_file(port_data->type.serial.dev, &dev_attr_channel);
++ gtm501l_serial_ports[port_data->type.serial.minor] = 0;
++ tty_unregister_device(tty_drv, port_data->type.serial.minor);
++ kfifo_free(port_data->type.serial.throttle_fifo);
++ } else if (port_data->spec.type == GTM501L_PORT_SPEC_NET) {
++ /* net device cleanup */
++ device_remove_file(&port_data->type.net.net->dev, &dev_attr_channel);
++ unregister_netdev(port_data->type.net.net);
++ free_netdev(port_data->type.net.net);
++ }
++
++ /* do common device deinitialization */
++ kfifo_free(port_data->tx_fifo);
++ kfree(port_data);
++}
++
++static int gtm501l_get_free_port(void)
++{
++ int i;
++
++ for (i = 0; i < GTM501L_MAX_MINORS; i++) {
++ if (!gtm501l_serial_ports[i])
++ return i;
++ }
++ return -1;
++}
++
++static void gtm501l_create_ports(struct gtm501l_device *gtm_dev,
++ struct gtm501l_port_spec *specs)
++{
++ struct net_device *net;
++ struct gtm501l_serial *gtm_ser;
++ struct gtm501l_port_data *port_data = NULL;
++ int minor = -1;
++ int i;
++ int status;
++
++ for(i = 0; i < GTM501L_PORT_PER_DEV; i++) {
++ port_data = gtm_dev->port_data[i];
++
++ if(port_data) {
++ if(!specs[i].enabled) {
++ /* A port did exist, but it's gone now */
++ gtm501l_free_port(port_data);
++ gtm_dev->port_data[i] = NULL;
++ continue;
++ }
++ else if (specs[i].type == port_data->spec.type) {
++ /* Old and new port are of the same type,
++ * only update the name */
++ memcpy(&port_data->spec.name, &specs[i].name, 16);
++ continue;
++ }
++ else {
++ /* Old and new port have different types */
++ gtm501l_free_port(port_data);
++ gtm_dev->port_data[i] = NULL;
++ }
++ }
++
++ /* If this port is not enabled, skip it */
++ if(!specs[i].enabled) {
++ continue;
++ }
++
++ dprintk(DEBUG_INIT, "%d: (%d) %s\n", i, specs[i].type, specs[i].name);
++
++ port_data = kzalloc(sizeof(struct gtm501l_port_data), GFP_KERNEL);
++ if (!port_data)
++ continue;
++
++ memcpy(&port_data->spec, &specs[i], sizeof(struct gtm501l_port_spec));
++
++ spin_lock_init(&port_data->fifo_lock);
++ lockdep_set_class_and_subclass(&port_data->fifo_lock, &gtm501l_key, 0);
++
++ /* common initialization */
++ port_data->spi_itf = gtm_dev;
++ port_data->port_id = i;
++ port_data->tx_fifo =
++ status = kfifo_alloc(port_data->tx_fifo, GTM501L_FIFO_SIZE, GFP_KERNEL);
++ if (status) {
++ printk(KERN_ERR "GTM501 failed kfifo tx alloc %d\n", status);
++ kfree(port_data);
++ continue;
++ }
++ /* device specific initialization */
++ if (port_data->spec.type == GTM501L_PORT_SPEC_SERIAL) {
++ /* serial device */
++ if ((minor = gtm501l_get_free_port()) == -1) {
++ kfree(port_data);
++ continue;
++ }
++ gtm_ser = &port_data->type.serial;
++ gtm_ser->minor = minor;
++ gtm_ser->dev =
++ tty_register_device(tty_drv, minor,
++ &gtm_dev->spi_dev->dev);
++ if (!gtm_ser->dev) {
++ dprintk(DEBUG_INIT, "Registering tty device failed\n");
++ kfree(port_data);
++ continue;
++ }
++ gtm501l_serial_ports[minor] = port_data;
++ spin_lock_init(&gtm_ser->throttle_fifo_lock);
++ lockdep_set_class_and_subclass(&gtm_ser->throttle_fifo_lock, &gtm501l_key, 0);
++ status = kfifo_alloc(gtm_ser->throttle_fifo, GTM501l_THROTTLE_FIFO_SIZE,
++ GFP_KERNEL);
++ if (status) {
++ tty_unregister_device(tty_drv,
++ gtm_ser->minor);
++ kfree(port_data);
++ continue;
++ }
++
++ if (device_create_file(gtm_ser->dev, &dev_attr_channel))
++ dev_err(gtm_ser->dev, "Could not create sysfs file for channel\n");
++ }
++ else if (port_data->spec.type == GTM501L_PORT_SPEC_NET) {
++ /* net device */
++ net = alloc_netdev(sizeof(struct gtm501l_port_data *), "gtm%d",
++ gtm501l_net_init);
++ if (!net) {
++ kfifo_free(port_data->tx_fifo);
++ kfree(port_data);
++ continue;
++ }
++
++ *((struct gtm501l_port_data **)netdev_priv(net)) = port_data;
++ port_data->type.net.net = net;
++
++ if (register_netdev(net)) {
++ free_netdev(net);
++ kfifo_free(port_data->tx_fifo);
++ kfree(port_data);
++ continue;
++ }
++
++ if (device_create_file(&net->dev, &dev_attr_channel))
++ dev_err(&net->dev, "Could not create sysfs file for channel\n");
++ }
++
++ gtm_dev->port_data[i] = port_data;
++
++ }
++}
++
++static void gtm501l_free_device(struct kref *ref)
++{
++ int i;
++ struct gtm501l_device *gtm_dev =
++ container_of(ref, struct gtm501l_device, ref);
++ struct gtm501l_port_data *port_data;
++
++ tasklet_kill(&gtm_dev->io_work_tasklet);
++
++ for (i = 0; i < GTM501L_PORT_PER_DEV; i++) {
++ port_data = gtm_dev->port_data[i];
++ if(port_data)
++ gtm501l_free_port(port_data);
++ }
++#ifdef CONFIG_DEBUG_FS
++ gtm501l_debugfs_remove(gtm_dev);
++#endif
++ kfree(gtm_dev);
++}
++
++static void gtm501l_spi_complete(void *ctx)
++{
++ struct gtm501l_device *gtm_dev = (struct gtm501l_device *)ctx;
++ unsigned int rx_count = 0;
++
++ if(gtm_dev->stats) {
++ gtm_dev->stats->transfer_finished_wait_start(gtm_dev->frame_stats);
++ gtm_dev->stats->transfer_decode_start(gtm_dev->frame_stats);
++ }
++
++ /* did we get removed meanwhile ? */
++ if (!test_bit(GTM501L_STATE_PRESENT, &gtm_dev->flags))
++ return;
++
++ if (!gtm_dev->spi_msg.status) {
++#if 0
++ dma_unmap_single(&gtm_dev->spi_dev->dev,
++ gtm_dev->tx_dma[(gtm_dev->tx_buffer_used + 1) % 2],
++ GTM501L_TRANSFER_SIZE, DMA_TO_DEVICE);
++ dma_unmap_single(&gtm_dev->spi_dev->dev, gtm_dev->rx_dma,
++ GTM501L_TRANSFER_SIZE, DMA_FROM_DEVICE);
++#endif
++ rx_count = gtm501l_demux(gtm_dev, gtm_dev->rx_buffer,
++ gtm_dev->spi_msg.actual_length);
++ } else {
++ spi_err_count++;
++ printk("SPI transfer error %d - (%d)\n",
++ gtm_dev->spi_msg.status, spi_err_count);
++ }
++
++ if(gtm_dev->stats) gtm_dev->stats->decode_finished_may_idle_start(gtm_dev->frame_stats, rx_count);
++
++ clear_bit(GTM501L_STATE_IO_IN_PROGRESS, &gtm_dev->flags);
++
++ //gtm501l_prepare_tx_buffer(gtm_dev);
++
++ if(test_and_clear_bit(GTM501L_STATE_IO_READY, &gtm_dev->flags))
++ tasklet_hi_schedule(&gtm_dev->io_work_tasklet);
++}
++
++/* char/tty operations */
++
++static void gtm501l_throttle(struct tty_struct *tty)
++{
++ struct gtm501l_port_data *port_data =
++ (struct gtm501l_port_data *)tty->driver_data;
++
++ func_enter();
++
++ if (port_data) {
++ func_exit();
++ return;
++ }
++
++ if(!test_bit(GTM501L_RX_FC, &port_data->signal_state)) {
++ set_bit(GTM501L_RX_FC, &port_data->signal_state);
++ set_bit(GTM501L_UPDATE, &port_data->signal_state);
++ }
++
++ func_exit();
++}
++
++/* To be checked... I can't remember the exact details but the hso driver
++ * needed a hso_unthrottle_tasklet to prevent hso_throttle being
++ * called recursively, I am not sure whether this can happen here.
++ */
++#define UNTHROTTLE_STACK_BUF_SIZE (512)
++static void gtm501l_unthrottle(struct tty_struct *tty)
++{
++ struct gtm501l_port_data *port_data =
++ (struct gtm501l_port_data *)tty->driver_data;
++ struct gtm501l_serial *gtm_ser;
++ int write_length_remaining, curr_write_len;
++ char stack_buff[UNTHROTTLE_STACK_BUF_SIZE];
++ struct gtm501l_device *gtm_dev = port_data->spi_itf;
++
++ func_enter();
++
++ if (!port_data) {
++ func_exit();
++ return;
++ }
++
++ gtm_ser=&port_data->type.serial;
++ write_length_remaining=kfifo_len(gtm_ser->throttle_fifo);
++ while (write_length_remaining) {
++ if (test_bit(TTY_THROTTLED, &tty->flags)) {
++ func_exit();
++ return;
++ }
++ curr_write_len = min(write_length_remaining,
++ UNTHROTTLE_STACK_BUF_SIZE);
++ curr_write_len = kfifo_out(gtm_ser->throttle_fifo,
++ stack_buff, curr_write_len);
++ curr_write_len = tty_insert_flip_string
++ (tty, stack_buff,
++ curr_write_len);
++ write_length_remaining -= curr_write_len;
++ tty_flip_buffer_push(tty);
++ }
++
++ clear_bit(GTM501L_RX_FC, &port_data->signal_state);
++ set_bit(GTM501L_UPDATE, &port_data->signal_state);
++
++ /* If the timer is currently running, stop it and try to initiate a
++ * transfer immediately */
++ if(timer_pending(&gtm_dev->timer)) {
++ del_timer_sync(&gtm_dev->timer);
++ gtm501l_io((unsigned long)gtm_dev);
++ }
++
++ func_exit();
++}
++
++static int gtm501l_tiocmget(struct tty_struct *tty, struct file *filp)
++{
++ unsigned int value;
++ struct gtm501l_port_data *port_data =
++ (struct gtm501l_port_data *)tty->driver_data;
++
++ func_enter();
++
++ if (!port_data) {
++ func_exit();
++ return 0;
++ }
++
++ value =
++ (test_bit(GTM501L_RTS, &port_data->signal_state) ? TIOCM_RTS : 0) |
++ (test_bit(GTM501L_DTR, &port_data->signal_state) ? TIOCM_DTR : 0) |
++ (test_bit(GTM501L_CTS, &port_data->signal_state) ? TIOCM_CTS : 0) |
++ (test_bit(GTM501L_DSR, &port_data->signal_state) ? TIOCM_DSR : 0) |
++ (test_bit(GTM501L_DCD, &port_data->signal_state) ? TIOCM_CAR : 0) |
++ (test_bit(GTM501L_RI, &port_data->signal_state) ? TIOCM_RNG : 0);
++
++ func_exit();
++ return value;
++}
++
++static int gtm501l_tiocmset(struct tty_struct *tty, struct file *filp,
++ unsigned int set, unsigned int clear)
++{
++ struct gtm501l_port_data *port_data =
++ (struct gtm501l_port_data *)tty->driver_data;
++
++ func_enter();
++
++ if (!port_data) {
++ func_exit();
++ return -ENODEV;
++ }
++
++ if (set & TIOCM_RTS)
++ set_bit(GTM501L_RTS, &port_data->signal_state);
++ if (set & TIOCM_DTR)
++ set_bit(GTM501L_DTR, &port_data->signal_state);
++
++ if (clear & TIOCM_RTS)
++ clear_bit(GTM501L_RTS, &port_data->signal_state);
++ if (clear & TIOCM_DTR)
++ clear_bit(GTM501L_DTR, &port_data->signal_state);
++
++ set_bit(GTM501L_UPDATE, &port_data->signal_state);
++
++ func_exit();
++ return 0;
++}
++
++static int gtm501l_open(struct tty_struct *tty, struct file *filp)
++{
++ struct gtm501l_serial *gtm_ser = NULL;
++ struct gtm501l_port_data *port_data;
++
++ func_enter();
++
++ if ((tty->index > GTM501L_MAX_MINORS)
++ || (!gtm501l_serial_ports[tty->index])) {
++ func_exit();
++ return -ENODEV;
++ }
++
++ port_data = gtm501l_serial_ports[tty->index];
++ gtm_ser = &port_data->type.serial;
++
++ if (!test_bit(GTM501L_STATE_PRESENT, &port_data->spi_itf->flags)) {
++ func_exit();
++ return -ENODEV;
++ }
++
++ gtm_ser->open++;
++ tty->driver_data = port_data;
++ tty->low_latency = 1;
++ gtm_ser->tty = tty;
++ _gtm501l_set_termios(tty, NULL);
++
++ /* signal_update_needed flag will be set by tiocmset */
++ clear_bit(GTM501L_RX_FC, &port_data->signal_state);
++ gtm501l_tiocmset(tty, filp, TIOCM_DTR | TIOCM_RTS, 0);
++
++ kref_get(&port_data->spi_itf->ref);
++ func_exit();
++ return 0;
++}
++
++static void gtm501l_close(struct tty_struct *tty, struct file *filp)
++{
++ struct gtm501l_serial *gtm_ser = NULL;
++ struct gtm501l_port_data *port_data =
++ (struct gtm501l_port_data *)tty->driver_data;
++
++ func_enter();
++
++ if ((tty->index > GTM501L_MAX_MINORS) || !port_data) {
++ func_exit();
++ return;
++ }
++
++ gtm_ser = &port_data->type.serial;
++
++ /*
++ * ugh, the refcounting... unfortunately open() & close()'s aren't always executed symmetrically.
++ * There are cases where after a failed open you can still get a close(). We can't handle those
++ * here. File a tty layer bug.
++ */
++ if(--gtm_ser->open >= 0) {
++ kref_put(&port_data->spi_itf->ref, gtm501l_free_device);
++ if( gtm_ser->open == 0) {
++ kfifo_reset(port_data->tx_fifo);
++ /* signal_update_needed flag will be set by tiocmset */
++ set_bit(GTM501L_RX_FC, &port_data->signal_state);
++ gtm501l_tiocmset(tty, filp, 0, TIOCM_DTR | TIOCM_RTS);
++ gtm_ser->tty = NULL;
++ }
++ } else gtm_ser->open = 0;
++
++ func_exit();
++}
++
++static int gtm501l_write(struct tty_struct *tty, const unsigned char *buf,
++ int count)
++{
++ struct gtm501l_port_data *port_data =
++ (struct gtm501l_port_data *)tty->driver_data;
++ struct gtm501l_serial *gtm_ser;
++ unsigned int tx_count;
++ unsigned char *tmp_buf = (unsigned char *)buf;
++ struct gtm501l_device *gtm_dev = port_data->spi_itf;
++
++ func_enter();
++
++ if (!port_data) {
++ func_exit();
++ return -ENODEV;
++ }
++
++ gtm_ser = &port_data->type.serial;
++
++ tx_count = kfifo_in(port_data->tx_fifo, tmp_buf, count);
++ total_tty_write+=tx_count;
++
++ /* If the timer is currently running, stop it and try to initiate a
++ * transfer immediately */
++ if(timer_pending(&gtm_dev->timer)) {
++ del_timer_sync(&gtm_dev->timer);
++ gtm501l_io((unsigned long)gtm_dev);
++ }
++
++ //printk("Write: wrote %d bytes in fifo (total = %d)\n", tx_count, total_tty_write);
++
++ func_exit();
++
++ return tx_count;
++}
++
++static int gtm501l_write_room(struct tty_struct *tty)
++{
++ struct gtm501l_port_data *port_data =
++ (struct gtm501l_port_data *)tty->driver_data;
++
++ if (!port_data) {
++ return -ENODEV;
++ }
++
++ //func_enter();
++
++ return GTM501L_FIFO_SIZE - kfifo_len(port_data->tx_fifo);
++}
++
++static void _gtm501l_set_termios(struct tty_struct *tty, struct ktermios *old)
++{
++ struct gtm501l_port_data *port_data =
++ (struct gtm501l_port_data *)tty->driver_data;
++ struct gtm501l_serial *serial;
++ struct ktermios *termios;
++
++ if ((!tty) || (!tty->termios) || (!port_data)) {
++ printk(KERN_ERR "%s: no tty structures", __func__);
++ return;
++ }
++
++ serial = &port_data->type.serial;
++ /*
++ * * The default requirements for this device are:
++ * */
++ termios = tty->termios;
++ termios->c_iflag &= ~(IGNBRK /* disable ignore break */
++ | BRKINT /* disable break causes interrupt */
++ | PARMRK /* disable mark parity errors */
++ | ISTRIP /* disable clear high bit of input characters */
++ | INLCR /* disable translate NL to CR */
++ | IGNCR /* disable ignore CR */
++ | ICRNL /* disable translate CR to NL */
++ | IXON); /* disable enable XON/XOFF flow control */
++
++ /* disable postprocess output characters */
++ termios->c_oflag &= ~OPOST;
++
++ termios->c_lflag &= ~(ECHO /* disable echo input characters */
++ | ECHONL /* disable echo new line */
++ | ICANON /* disable erase, kill, werase, and rprnt
++ special characters */
++ | ISIG /* disable interrupt, quit, and suspend special
++ characters */
++ | IEXTEN); /* disable non-POSIX special characters */
++
++ termios->c_cflag &= ~(CSIZE /* no size */
++ | PARENB /* disable parity bit */
++ | CBAUD /* clear current baud rate */
++ | CBAUDEX); /* clear current buad rate */
++ termios->c_cflag |= CS8; /* character size 8 bits */
++
++ tty_encode_baud_rate(serial->tty, 115200, 115200);
++ /*
++ * Force low_latency on; otherwise the pushes are scheduled;
++ * this is bad as it opens up the possibility of dropping bytes
++ * on the floor. We don't want to drop bytes on the floor. :)
++ */
++ serial->tty->low_latency = 1;
++ serial->tty->termios->c_cflag |= B115200; /* baud rate 115200 */
++ return;
++}
++
++static void gtm501l_set_termios(struct tty_struct *tty, struct ktermios *old)
++{
++ struct gtm501l_port_data *port_data =
++ (struct gtm501l_port_data *)tty->driver_data;
++ struct gtm501l_serial *serial;
++
++ func_enter();
++
++ if (!port_data) {
++ func_exit();
++ return;
++ }
++ serial = &port_data->type.serial;
++
++ /* the actual setup */
++ if (serial->tty)
++ _gtm501l_set_termios(tty, old);
++ else
++ tty->termios = old;
++
++ /* done */
++ func_exit();
++ return;
++}
++
++static int gtm501l_chars_in_buffer(struct tty_struct *tty)
++{
++ struct gtm501l_port_data *port_data =
++ (struct gtm501l_port_data *)tty->driver_data;
++
++ if (!port_data)
++ return -ENODEV;
++
++ //func_enter();
++
++ return kfifo_len(port_data->tx_fifo);
++}
++
++static struct mrst_spi_chip mrst_gtm501l = {
++ .poll_mode = 0,
++ .enable_dma = 1,
++ .type = SPI_FRF_SPI,
++};
++
++/* spi operations */
++
++static int gtm501l_spi_probe(struct spi_device *spi)
++{
++ struct gtm501l_device *gtm_dev;
++ int i;
++
++ func_enter();
++
++ /* we check here only the SPI mode and correct them, if needed */
++ if (GTM501L_SPI_MODE != (spi->mode & (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST | SPI_3WIRE))) {
++ pr_warning("%s: SPI mode wrong setup, found %d, correct to %d\n",
++ DRVNAME, spi->mode, GTM501L_SPI_MODE);
++ spi->mode = GTM501L_SPI_MODE | (SPI_LOOP & spi->mode);
++ }
++
++ if (spi->mode & SPI_LOOP) {
++ pr_warning("%s: SPI device in loop back\n", DRVNAME);
++ }
++
++ /* The Bit_per_word and the maximum speed has to be setup by us, the protocol driver */
++ if(spi_b16)
++ spi->bits_per_word = 16;
++ else
++ spi->bits_per_word = 8;
++
++ spi->max_speed_hz = GTM501L_SPI_SPEED;
++
++ spi->controller_data = &mrst_gtm501l;
++
++ if (spi_setup(spi)) {
++ pr_err("%s: SPI setup does wasn't successful\n", DRVNAME);
++ func_exit();
++ return -ENODEV;
++ }
++
++ /* initialize structure to hold our device variables */
++ gtm_dev = kzalloc(sizeof(struct gtm501l_device), GFP_ATOMIC);
++
++ if (!gtm_dev) {
++ func_exit();
++ return -ENOMEM;
++ }
++ gtm_dev->spi_dev = spi;
++ kref_init(&gtm_dev->ref);
++
++ /*initialize transfer and dma buffers */
++ for(i = 0; i < 2; i++) {
++ gtm_dev->tx_buffer[i] = kzalloc(GTM501L_TRANSFER_SIZE, GFP_KERNEL | GFP_DMA);
++ if( 0 == gtm_dev->tx_buffer[i]) {
++ pr_err("%s: DMA-TX[%d] buffer allocation failed\n", DRVNAME, i);
++ func_exit();
++ return -EIO;
++ }
++ }
++ gtm_dev->rx_buffer = kzalloc(GTM501L_TRANSFER_SIZE, GFP_KERNEL | GFP_DMA);
++ if( 0 == gtm_dev->rx_buffer) {
++ pr_err("%s: DMA-RX buffer allocation failed\n", DRVNAME);
++ func_exit();
++ return -EIO;
++ }
++
++ /* create our tty/net ports */
++ gtm501l_create_ports(gtm_dev, gtm501l_default_port_spec);
++
++ spi_set_drvdata(spi, gtm_dev);
++
++ tasklet_init(&gtm_dev->io_work_tasklet,
++ (void (*)(unsigned long))gtm501l_io,
++ (unsigned long)gtm_dev);
++
++ init_timer(&gtm_dev->timer);
++
++#ifdef CONFIG_DEBUG_FS
++ gtm501l_debugfs_init(gtm_dev);
++#endif
++
++ /*
++ * Init GPIO and IRQ, if at least the gpio parameter is set
++ */
++ if (gpio_in < 0)
++ gpio_in = GTM501L_GPIO0;
++
++ if (request_irq(spi->irq, gtm501l_gpio_interrupt, GTM501L_IRQ_TYPE,
++ "option", (void *)gtm_dev)) {
++ kref_put(&gtm_dev->ref, gtm501l_free_device);
++ func_exit();
++ return -EIO;
++ }
++
++ set_bit(GTM501L_STATE_PRESENT, &gtm_dev->flags);
++ /*
++ * Schedule tasklet once in case the gpio is active at probe time.
++ * Otherwise wait for the next interrupt
++ */
++ gtm501l_gpio_interrupt(spi->irq, (void *)gtm_dev);
++
++ func_exit();
++ return 0;
++}
++
++static int gtm501l_spi_remove(struct spi_device *spi)
++{
++ struct gtm501l_device *gtm_dev =
++ (struct gtm501l_device *)spi_get_drvdata(spi);
++
++ func_enter();
++
++ del_timer_sync(&gtm_dev->timer);
++
++ clear_bit(GTM501L_STATE_PRESENT, &gtm_dev->flags);
++ free_irq(spi->irq, gtm_dev);
++ kfree(gtm_dev->tx_buffer[0]);
++ kfree(gtm_dev->tx_buffer[1]);
++ kfree(gtm_dev->rx_buffer);
++ spi_set_drvdata(spi, NULL);
++ kref_put(&gtm_dev->ref, gtm501l_free_device);
++
++ func_exit();
++ return 0;
++}
++
++static void gtm501l_spi_shutdown(struct spi_device *spi)
++{
++ func_enter();
++}
++
++static int gtm501l_spi_suspend(struct spi_device *spi, pm_message_t msg)
++{
++ func_enter();
++ return 0;
++}
++
++static int gtm501l_spi_resume(struct spi_device *spi)
++{
++ func_enter();
++ return 0;
++}
++
++static struct tty_operations gtm501l_serial_ops = {
++ .open = gtm501l_open,
++ .close = gtm501l_close,
++ .write = gtm501l_write,
++ .write_room = gtm501l_write_room,
++ .set_termios = gtm501l_set_termios,
++ .chars_in_buffer = gtm501l_chars_in_buffer,
++ .tiocmget = gtm501l_tiocmget,
++ .tiocmset = gtm501l_tiocmset,
++ .throttle = gtm501l_throttle,
++ .unthrottle = gtm501l_unthrottle
++};
++
++static struct spi_driver gtm501l_spi_driver = {
++ .driver = {
++ .name = "spi_opt_modem",
++ .bus = &spi_bus_type,
++ .owner = THIS_MODULE
++ },
++ .probe = gtm501l_spi_probe,
++ .remove = __devexit_p(gtm501l_spi_remove),
++ .shutdown = gtm501l_spi_shutdown,
++ .suspend = gtm501l_spi_suspend,
++ .resume = gtm501l_spi_resume,
++};
++
++/* module exit point */
++static void __exit gtm501l_exit(void)
++{
++ func_enter();
++ tty_unregister_driver(tty_drv);
++ spi_unregister_driver(&gtm501l_spi_driver);
++ dprintk(DEBUG_CLEANUP, "GTM501L driver removed\n");
++ func_exit();
++}
++
++/* module entry point */
++static int __init gtm501l_init(void)
++{
++ int result = 0;
++
++ func_enter();
++
++/* gtm501l_pmic_init_voltages();*/
++ gtm501l_pmic_set_wwandisablen(1);
++
++ gtm501l_pmic_set_wwanresetn(0);
++ msleep(100);
++ gtm501l_pmic_set_wwanresetn(1);
++
++ memset(gtm501l_serial_ports, 0, sizeof(gtm501l_serial_ports));
++ memset(gtm501l_termios, 0, sizeof(gtm501l_termios));
++ memset(gtm501l_termios_locked, 0, sizeof(gtm501l_termios_locked));
++
++ /* initialize lower-edge tty driver */
++ tty_drv = alloc_tty_driver(GTM501L_MAX_MINORS);
++ if (!tty_drv) {
++ func_exit();
++ return -ENOMEM;
++ }
++
++ tty_drv->magic = TTY_DRIVER_MAGIC;
++ tty_drv->owner = THIS_MODULE;
++ tty_drv->driver_name = "gtm501l";
++ tty_drv->name = "ttyGTM";
++ tty_drv->minor_start = 0;
++ tty_drv->num = GTM501L_MAX_MINORS;
++ tty_drv->type = TTY_DRIVER_TYPE_SERIAL;
++ tty_drv->subtype = SERIAL_TYPE_NORMAL;
++ tty_drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
++ tty_drv->init_termios = tty_std_termios;
++ tty_drv->init_termios.c_cflag = B115200 | CS8 | CREAD | HUPCL | CLOCAL;
++ tty_drv->termios = gtm501l_termios;
++ tty_drv->termios_locked = gtm501l_termios_locked;
++
++ tty_set_operations(tty_drv, &gtm501l_serial_ops);
++
++ result = tty_register_driver(tty_drv);
++ if (result) {
++ printk(KERN_ERR "%s - tty_register_driver failed(%d)\n",
++ __func__, result);
++ func_exit();
++ return result;
++ }
++
++ /*
++ initialize upper-edge spi driver. needs to be done after tty initialization because the spi probe will
++ race
++ */
++ result = spi_register_driver(&gtm501l_spi_driver);
++ if (result) {
++ printk(KERN_ERR "%s - spi_register_driver failed(%d)\n",
++ __func__, result);
++ tty_unregister_driver(tty_drv);
++ func_exit();
++ return result;
++ }
++
++ dprintk(DEBUG_INIT, "GTM501L driver initialized successfully\n");
++ func_exit();
++ return 0;
++}
++
++static int gtm501l_net_open(struct net_device *net)
++{
++ struct gtm501l_port_data *port_data = net_to_gtm501l_data(net);
++
++ func_enter();
++
++ port_data->type.net.rx_state = WAIT_IP;
++ port_data->type.net.sync_lost = 0;
++ port_data->type.net.rx_buf_size = 0;
++ port_data->type.net.rx_buf_missing = sizeof(struct iphdr);
++
++ /* update remote side it's ok to send us data */
++ clear_bit(GTM501L_RX_FC, &port_data->signal_state);
++ set_bit(GTM501L_UPDATE, &port_data->signal_state);
++ netif_start_queue(net);
++ func_exit();
++ return 0;
++}
++
++static int gtm501l_net_close(struct net_device *net)
++{
++ struct gtm501l_port_data *port_data = net_to_gtm501l_data(net);
++
++ func_enter();
++
++ /* stop remote side from sending us data */
++ set_bit(GTM501L_RX_FC, &port_data->signal_state);
++ set_bit(GTM501L_UPDATE, &port_data->signal_state);
++ netif_stop_queue(net);
++ func_exit();
++ return 0;
++}
++
++static void gtm501l_push_skb(struct gtm501l_port_data *port_data)
++{
++ struct gtm501l_net *gtm_net = &port_data->type.net;
++ struct sk_buff *skb = gtm_net->tx_skb;
++ unsigned int len;
++
++ func_enter();
++
++ if (skb && gtm_net->net->flags & IFF_UP) {
++ len = kfifo_in(port_data->tx_fifo, skb->data, skb->len);
++ skb_pull(skb, len);
++ if (skb->len == 0) {
++ // dev_kfree_skb(skb); // TODO: This causes a crash...
++ gtm_net->tx_skb = NULL;
++ netif_start_queue(gtm_net->net);
++ }
++ }
++
++ func_exit();
++}
++
++static int gtm501l_net_start_xmit(struct sk_buff *skb, struct net_device *net)
++{
++ int result = 0;
++ struct gtm501l_port_data *port_data = net_to_gtm501l_data(net);
++ struct gtm501l_net *gtm_net = &port_data->type.net;
++
++ func_enter();
++
++ netif_stop_queue(net);
++
++ if (gtm_net->tx_skb) {
++ printk(KERN_ERR "%s tx_skb not null\n", __func__);
++ result = -EIO;
++ } else {
++ gtm_net->tx_skb = skb;
++ gtm501l_push_skb(port_data);
++ }
++ if (result) {
++ STATS(net).tx_errors++;
++ netif_start_queue(net);
++ } else {
++ STATS(net).tx_packets++;
++ STATS(net).tx_bytes += skb->len;
++ /* And tell the kernel when the last transmit started. */
++ net->trans_start = jiffies;
++ }
++ /* we're done */
++ func_exit();
++ return result;
++}
++
++#ifndef NETDEVICE_HAS_STATS
++static struct net_device_stats *gtm501l_net_get_stats(struct net_device *net)
++{
++ return &STATS(net);
++}
++#endif
++
++/* called when a packet did not ack after watchdogtimeout */
++static void gtm501l_net_tx_timeout(struct net_device *net)
++{
++ func_enter();
++
++ /* Tell syslog we are hosed. */
++ dev_warn(&net->dev, "Tx timed out.\n");
++
++ /* Update statistics */
++ STATS(net).tx_errors++;
++
++ func_exit();
++}
++
++static const struct net_device_ops gtm501l_netdev_ops = {
++ .ndo_open = gtm501l_net_open,
++ .ndo_stop = gtm501l_net_close,
++ .ndo_start_xmit = gtm501l_net_start_xmit,
++#ifndef NETDEVICE_HAS_STATS
++ .ndo_get_stats = gtm501l_net_get_stats,
++#endif
++ .ndo_tx_timeout = gtm501l_net_tx_timeout,
++};
++
++static void gtm501l_net_init(struct net_device *net)
++{
++ func_enter();
++
++ /* fill in the other fields */
++ net->netdev_ops = &gtm501l_netdev_ops;
++ net->watchdog_timeo = GTM501L_NET_TX_TIMEOUT;
++ net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
++ net->type = ARPHRD_NONE;
++ net->mtu = GTM501L_DEFAULT_MTU;
++ net->tx_queue_len = 10;
++
++ func_exit();
++}
++
++struct gtm501l_device *gtm501l_set_stats_ops(struct gtm501_stats_ops *stats)
++{
++ struct gtm501l_device *gtm_dev = NULL;
++ int i;
++
++ /* Look for gtm_dev */
++ for (i = 0; i < GTM501L_MAX_MINORS; i++) {
++ if (gtm501l_serial_ports[i] &&
++ gtm501l_serial_ports[i]->spi_itf) {
++ gtm_dev = gtm501l_serial_ports[i]->spi_itf;
++ break;
++ }
++ }
++
++ if(gtm_dev)
++ gtm_dev->stats = stats;
++
++ return gtm_dev;
++}
++
++/* module definitions */
++module_init(gtm501l_init);
++module_exit(gtm501l_exit);
++
++module_param_named(backoff, backoff_enabled, uint, S_IRUGO);
++MODULE_PARM_DESC(backoff, "Enable (1) or disable (0) backoff timer.");
++
++module_param_named(gpi, gpio_in, uint, S_IRUGO);
++MODULE_PARM_DESC(gpi, "GPIO input base address. (default: -1 => automatic)");
++
++module_param_named(b16, spi_b16, bool, S_IRUGO);
++MODULE_PARM_DESC(b16, "SPI 16Bit/word or 8Bit/word, default 16Bit");
++
++#ifdef DEBUG
++module_param_named(debug, gtm501l_debug, uint, S_IRUGO);
++MODULE_PARM_DESC(debug, "Debug flags");
++#endif
++
++MODULE_AUTHOR("Option Wireless");
++MODULE_DESCRIPTION("GTM501L spi driver");
++MODULE_LICENSE("GPL");
++MODULE_INFO(Version, "0.5pre1-option");
++
++EXPORT_SYMBOL_GPL(gtm501l_debug);
++EXPORT_SYMBOL_GPL(gtm501l_debug_printk);
++EXPORT_SYMBOL_GPL(gtm501l_set_stats_ops);
++
+Index: linux-2.6.33/drivers/spi/gtm501l_spi.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/spi/gtm501l_spi.h
+@@ -0,0 +1,329 @@
++/****************************************************************************
++ *
++ * Driver for the Option GTM501L spi modem.
++ *
++ * Copyright (C) 2008 Option International
++ * Copyright (C) 2008 Filip Aben <f.aben@option.com>
++ * Denis Joseph Barrow <d.barow@option.com>
++ * Jan Dumon <j.dumon@option.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
++ * USA
++ *
++ *
++ *
++ *****************************************************************************/
++
++#ifndef _GTM501L_SPI_H
++#define _GTM501L_SPI_H
++#include <linux/version.h>
++#include <linux/tty.h>
++#include <linux/device.h>
++#include <linux/spi/spi.h>
++
++#include <linux/netdevice.h>
++#include <linux/ip.h>
++
++#define DRVNAME "gtm501l"
++
++#define DEBUG
++
++#ifdef DEBUG
++#define DEBUG_FLOW (1 << 0)
++#define DEBUG_INIT (1 << 1)
++#define DEBUG_CLEANUP (1 << 2)
++#define DEBUG_TTY (1 << 3)
++#define DEBUG_NET (1 << 4)
++#define DEBUG_MUX (1 << 5)
++#define DEBUG_DEMUX (1 << 6)
++#define DEBUG_SPI (1 << 7)
++#define DEBUG_GPIO (1 << 8)
++
++#define dprintk(f, str...) if(gtm501l_debug & f) gtm501l_debug_printk(__func__, __LINE__, str)
++
++#define GTM501L_BUFFER_DUMP(prefix_str,buf,len) \
++ print_hex_dump(KERN_DEBUG,prefix_str, DUMP_PREFIX_OFFSET,16,1,buf,len,1)
++
++void gtm501l_debug_printk(const char *function, int line, char *format, ...);
++extern int gtm501l_debug;
++
++#else
++#define dprintk(f, str...)
++#define GTM501L_BUFFER_DUMP(prefix_str,buf,len)
++#endif
++
++#define func_enter() dprintk(DEBUG_FLOW, "enter\n")
++#define func_exit() dprintk(DEBUG_FLOW, "exit\n")
++
++#define GTM501L_DEFAULT_MTU 1500
++#define GTM501L_DEFAULT_MRU 2500
++#define GTM501L_NET_TX_TIMEOUT (HZ * 10)
++
++#define GTM501L_IRQ_TYPE IRQ_TYPE_EDGE_FALLING
++#define GTM501L_GPIO_TARGET 0
++#define GTM501L_GPIO0 0x3c /* default use Langwell GPIO60 */
++
++/* various macro definitions */
++#define GTM501L_MAX_MINORS 256
++#define GTM501L_PORT_PER_DEV 16
++#define GTM501L_TRANSFER_SIZE 2040
++/* GTM501l_THROTTLE_FIFO_SIZE must be a power of 2
++ * & larger than GTM501L_TRANSFER_SIZE */
++#define GTM501l_THROTTLE_FIFO_SIZE 4096
++#define GTM501L_FIFO_SIZE 4096
++
++/* device flags bitfield definitions */
++#define GTM501L_STATE_PRESENT 0
++#define GTM501L_STATE_IO_IN_PROGRESS 1
++#define GTM501L_STATE_IO_READY 2
++
++#define MUX_CHANNEL(x) ((x >> MUX_CHANNEL_SHIFT) & 0xF)
++#define MUX_CHANNEL_SHIFT 0
++#define MUX_BLOCK_TYPE(x) ((x >> MUX_BLOCK_TYPE_SHIFT) & 0x3)
++#define MUX_BLOCK_TYPE_SHIFT 4
++#define MUX_DEVICE(x) ((x >> MUX_DEVICE_SHIFT) & 0x3)
++#define MUX_DEVICE_SHIFT 6
++#define MUX_BURST_SIZE 512
++
++#define MUX_DATA_TRANSFER 0
++#define MUX_BURST_TRANSFER 1
++#define MUX_CONTROL_TRANSFER 2
++
++#define MUX_CONTROL_BYTE(channel,type,device) ( \
++ (channel<<MUX_CHANNEL_SHIFT) | \
++ (type<<MUX_BLOCK_TYPE_SHIFT) | \
++ (device<<MUX_DEVICE_SHIFT) \
++ )
++
++#define MUX_DCD(x) ((x >> MUX_DCD_SHIFT) & 0x1)
++#define MUX_DCD_SHIFT 0
++#define MUX_CTS(x) ((x >> MUX_CTS_SHIFT) & 0x1)
++#define MUX_CTS_SHIFT 1
++#define MUX_DSR(x) ((x >> MUX_DSR_SHIFT) & 0x1)
++#define MUX_DSR_SHIFT 2
++#define MUX_RI(x) ((x >> MUX_RI_SHIFT) & 0x1)
++#define MUX_RI_SHIFT 3
++#define MUX_DTR(x) ((x >> MUX_DTR_SHIFT) & 0x1)
++#define MUX_DTR_SHIFT 4
++#define MUX_RTS(x) ((x >> MUX_RTS_SHIFT) & 0x1)
++#define MUX_RTS_SHIFT 5
++#define MUX_LINK(x) ((x >> MUX_LINK_SHIFT) & 0x1)
++#define MUX_LINK_SHIFT 7
++
++#define MUX_INVALID 0
++#define MUX_SLAVE_TO_MASTER 1
++#define MUX_MASTER_TO_SLAVE 2
++#define MUX_INVALID2 3
++
++#define GTM501L_SPI_MODE SPI_MODE_1 /* SPI Mode 1 currently used */
++
++#define GTM501L_SPI_SPEED 12500000
++
++/* flow control bitfields */
++#define GTM501L_DCD 0
++#define GTM501L_CTS 1
++#define GTM501L_DSR 2
++#define GTM501L_RI 3
++#define GTM501L_DTR 4
++#define GTM501L_RTS 5
++#define GTM501L_TX_FC 6
++#define GTM501L_RX_FC 7
++#define GTM501L_UPDATE 8
++
++#define GTM501L_MAX_EMPTY 500
++#define GTM501L_BACKOFF_TIMER (HZ / 2)
++
++struct gtm501l_device {
++ struct spi_device *spi_dev;
++ struct kref ref;
++ struct gtm501l_port_data *port_data[GTM501L_PORT_PER_DEV];
++ struct tasklet_struct io_work_tasklet;
++ unsigned long flags;
++ dma_addr_t rx_dma;
++ dma_addr_t tx_dma[2];
++
++ unsigned char *rx_buffer;
++ unsigned char *tx_buffer[2];
++ int tx_buffer_used;
++ int tx_count;
++
++ struct spi_message spi_msg;
++ struct spi_transfer spi_xfer;
++
++ int gpio_irq;
++ int round_robin_index;
++
++ struct timer_list timer;
++ int empty_transfers;
++
++#ifdef CONFIG_DEBUG_FS
++ struct dentry *debugfs; /* debugfs parent directory */
++ struct gtm501l_frame_stats *frame_stats;
++#endif
++
++ struct gtm501_stats_ops *stats;
++};
++
++struct gtm501l_serial {
++ struct device *dev;
++ struct tty_struct *tty;
++ struct kfifo *throttle_fifo;
++ spinlock_t throttle_fifo_lock;
++ int minor;
++ int open;
++};
++
++enum rx_parse_state {
++ syncing,
++ getting_frame_len,
++ filling_skb,
++ WAIT_IP,
++ WAIT_DATA,
++ WAIT_SYNC
++};
++
++#undef NETDEVICE_HAS_STATS
++
++struct gtm501l_net {
++ enum rx_parse_state rx_state;
++ int sync_lost;
++ struct sk_buff *tx_skb;
++ struct sk_buff *rx_skb;
++ unsigned short rx_frame_len;
++ struct net_device *net;
++ unsigned short rx_buf_size;
++ unsigned short rx_buf_missing;
++ struct iphdr rx_ip_hdr;
++#ifndef NETDEVICE_HAS_STATS
++ struct net_device_stats stats;
++#endif
++};
++
++#define GTM501L_PORT_SPEC_SERIAL 0
++#define GTM501L_PORT_SPEC_NET 1
++
++struct gtm501l_port_spec {
++ int enabled;
++ int type;
++ char name[16];
++};
++
++struct gtm501l_port_data {
++ struct gtm501l_device *spi_itf;
++ int port_id;
++ struct gtm501l_port_spec spec;
++ struct kfifo *tx_fifo;
++ spinlock_t fifo_lock;
++ unsigned long signal_state;
++ union {
++ struct gtm501l_serial serial;
++ struct gtm501l_net net;
++ } type;
++};
++
++#define net_to_gtm501l_data(net) *((struct gtm501l_port_data **)netdev_priv(net))
++
++#ifdef NETDEVICE_HAS_STATS
++#define STATS(net) ((net)->stats)
++#else
++#define STATS(net) (((struct gtm501l_port_data *)net_to_gtm501l_data(net))->type.net.stats)
++#endif
++
++#ifdef CONFIG_DEBUG_FS
++
++/**
++ * transfer SPI frame sequence, can be used for global sequence state or for per CPU seq state variable
++ */
++enum gtm501l_fsequence { /* frame sequence start states */
++ none, /* undefined state */
++ idle, /* idle state host driver waits */
++ encode, /* encoding SPI frame */
++ encode_interrupt_decode, /* encoding SPI frame started and interrupts decoding frame */
++ decode, /* decoding SPI frame */
++ decode_interrupt_encode /* decoding SPI frame started and interrupts decoding frame */
++};
++
++/**
++ * job time with the support for interrupt time correction, which me be used only for encoding and decoding time
++ * measurements
++ */
++struct gtm501l_jtime { /* job time */
++ ktime_t start; /* start time for that job */
++ ktime_t correct; /* correction time, if job was interrupted */
++ u32 dt; /* delta time need for that job in us */
++ u32 min_dt; /* min time need for that job is us */
++ u32 max_dt; /* max time need for that job is us */
++ u64 total; /* total time need for that job is us */
++ u32 bug; /* bug counter for negative time delta */
++};
++
++/**
++ * frame statistics
++ */
++struct gtm501l_frame_stats { /* frame transfer statistics */
++ spinlock_t lock; /* lock for that structure */
++ enum gtm501l_fsequence seq[NR_CPUS]; /* current sequence for each CPU separate */
++ struct gtm501l_jtime idle; /* timings for idle, just waiting for the application or GTM501L become busy */
++ struct gtm501l_jtime encode; /* timings for encoding SPI frame */
++ struct gtm501l_jtime transceive; /* timings for tranceiving SPI frame */
++ struct gtm501l_jtime decode; /* timings for decoding SPI frame */
++ struct gtm501l_jtime wait; /* timings for waiting for GTM501L become ready */
++ struct gtm501l_jtime cycle; /* timings for a SPI frame cycle without idle time */
++ struct kfifo *transmit_id_pipe; /* fifo pipe frame id to transmit task */
++ struct kfifo *decode_id_pipe; /* fifo pipe frame id to decode task */
++ struct kfifo *decode_txb_pipe; /* fifo pipe number of transmit byte to decode task for analysis */
++ struct kfifo *decode_dt_pipe; /* fifo pipe SPI frame transfer time to decode task for analysis */
++ u32 transmit_id; /* id and number of transmit SPI frames */
++ u32 receive_id; /* id and number of received SPI frames */
++ u32 encode_start_id; /* id and number of started encoded frames */
++ u32 encode_end_id; /* id and number of finished encoded frames */
++ u32 decode_start_id; /* id and number of started decoded frames */
++ u32 decode_end_id; /* id and number of started decoded frames */
++ u32 idles; /* number of entered idle states */
++ u32 waits; /* number of entered wait states */
++ u32 max_tx_bytes; /* maximum transmitted bytes in a frame */
++ u32 max_rx_bytes; /* maximum received bytes in a frame */
++ u64 total_tx_bytes; /* total transmitted bytes in a frame for calculating average */
++ u64 total_rx_bytes; /* total received bytes in a frame for calculating average */
++ u32 first_tx_bytes; /* first transmitted bytes in a frame for calculating average */
++ u32 first_rx_bytes; /* first received bytes in a frame for calculating average */
++ u32 max_tx_rate; /* maximum transmitted bytes per time rate in bytes/sec */
++ u32 max_rx_rate; /* maximum received bytes per time rate in bytes/sec */
++ u32 encode_pass_decode; /* encode task pass decode task */
++ u32 encode_interrupts_decode; /* encode task interrupts decode task on the same CPU */
++ u32 decode_pass_encode; /* decode task pass encode task */
++ u32 decode_interrupts_encode; /* decode task interrupts encode task on the same CPU */
++ u32 encode_bug; /* number of counted bugs for encode process */
++ int encode_buffers_used; /* number of need encode buffers */
++ u32 decode_bug; /* number of counted bugs for the decode process */
++ int decode_buffers_used; /* number of need decode buffers */
++ struct dentry *debugfs; /* debugfs entry for the frame_stats file */
++};
++
++#endif
++
++struct gtm501_stats_ops {
++ void (*wait_finished)(struct gtm501l_frame_stats *fstats);
++ void (*encode_start_idle_finished)(struct gtm501l_frame_stats *fstats);
++ void (*encode_finished)(struct gtm501l_frame_stats *fstats, unsigned int tx_bytes);
++ void (*transfer_start)(struct gtm501l_frame_stats *fstats);
++ void (*transfer_finished_wait_start)(struct gtm501l_frame_stats *fstats);
++ void (*transfer_decode_start)(struct gtm501l_frame_stats *fstats);
++ void (*decode_finished_may_idle_start)(struct gtm501l_frame_stats *fstats, unsigned int rx_bytes);
++};
++
++/* Prototypes */
++struct gtm501l_device *gtm501l_set_stats_ops(struct gtm501_stats_ops *stats);
++
++#endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-ifxgps-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-ifxgps-driver.patch
new file mode 100644
index 0000000..f23aec3
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-ifxgps-driver.patch
@@ -0,0 +1,1648 @@
+Index: linux-2.6.33/drivers/spi/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/spi/Kconfig
++++ linux-2.6.33/drivers/spi/Kconfig
+@@ -339,6 +339,10 @@ config SPI_MRST_GTM501
+ tristate "SPI protocol driver for GTM501l"
+ depends on SPI_MRST
+
++config SPI_IFX_GPS
++ tristate "SPI protocol driver for IFX HH2 GPS"
++ depends on SPI_MRST
++
+ config SPI_SPIDEV
+ tristate "User mode SPI device driver support"
+ depends on EXPERIMENTAL
+Index: linux-2.6.33/drivers/spi/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/spi/Makefile
++++ linux-2.6.33/drivers/spi/Makefile
+@@ -44,6 +44,7 @@ obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.
+ obj-$(CONFIG_SPI_NUC900) += spi_nuc900.o
+ obj-$(CONFIG_SPI_MRST) += mrst_spi.o
+ obj-$(CONFIG_SPI_MRST_GTM501) += gtm501l_spi.o
++obj-$(CONFIG_SPI_IFX_GPS) += hh2serial.o
+
+ # special build for s3c24xx spi driver with fiq support
+ spi_s3c24xx_hw-y := spi_s3c24xx.o
+Index: linux-2.6.33/drivers/spi/hh2serial.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/spi/hh2serial.c
+@@ -0,0 +1,1572 @@
++/*
++ * HH2 SPI Serial driver
++ *
++ * Copyright (C) 2009 Markus Burvall (Markus.Burvall@swedenconnectivity.com)
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, version 2 of the License.
++ *
++ */
++
++
++#define DEBUG 1
++
++//#define HH2_TTY_ECHO
++//#define HH2_TTY_SEND_POLL
++//#define HH2_NO_SPI
++#define HH2SERIAL_SPI_16BIT
++//#define HH2SERIAL_ENABLE_DEBUG
++#define HH2SERIAL_SPI_POLL
++
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++
++#include <linux/serial.h>
++#include <linux/serial_core.h>
++
++#include <linux/kthread.h>
++#include <linux/delay.h>
++#include <asm/atomic.h>
++
++#ifndef HH2_NO_SPI
++#include <linux/spi/spi.h>
++#include <linux/spi/mrst_spi.h>
++#endif
++
++MODULE_AUTHOR("Markus Burvall <Markus.Burvall@swedenconnectivity.com>");
++MODULE_DESCRIPTION("HH2 Serial Driver");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("hh2serial");
++
++#ifdef HH2SERIAL_ENABLE_DEBUG
++
++#define FUNC_ENTER() do { printk("ENTER: %s\n", __func__); } while (0)
++
++#else
++
++#define FUNC_ENTER()
++
++#endif
++
++
++struct hh2serial_dev {
++ struct uart_port port;
++ bool tx_enabled;
++ bool rx_enabled;
++ struct spi_device *spi;
++
++ struct task_struct *main_thread;
++ struct task_struct *poll_thread;
++
++ wait_queue_head_t wq;
++ atomic_t spi_need_read;
++ atomic_t tty_need_read;
++ atomic_t spi_irq_pending;
++ int mthread_up;
++};
++
++static const char driver_name[] = "hh2serial";
++static const char tty_dev_name[] = "ttyHH2";
++static struct hh2serial_dev priv0;
++
++
++/* max len for a spi transfer is 18B */
++#define HH2SERIAL_SPI_MAX_BYTES 18
++/* 16 bits / byte + read and write gives 4*18 = 72 */
++#define HH2SERIAL_BUFSIZE 72
++
++
++#ifdef HH2SERIAL_SPI_POLL
++#define HH2SERIAL_POLL_TIMEOUT 100
++#endif
++
++/* HH2 DATA OPERATIONS */
++#define GPSD_SRREAD 0x80 /* bit 7 */
++#define GPSD_DWRITE 0x40 /* bit 6 */
++#define GPSD_DREAD 0xC0 /* bit 7 and 6 */
++#define GPSD_CRWRITE 0x00 /* All zero */
++
++#ifdef HH2SERIAL_SPI_16BIT
++/* HH2 DATA OPERATIONS */
++#define GPSD_16BIT_SRREAD 0x8000 /* bit 7 */
++#define GPSD_16BIT_DWRITE 0x4000 /* bit 6 */
++#define GPSD_16BIT_DREAD 0xC000 /* bit 7 and 6 */
++#define GPSD_16BIT_CRWRITE 0x0000 /* All zero */
++#endif
++
++/* HH2 STATUS REGISTER */
++#define GPSS_TCNT 0x1F /* bits [4..0] */
++#define GPSS_REMPTY 0x20 /* bit 5 */
++#define GPSS_TERR 0x40 /* bit 6 */
++#define GPSS_RERR 0x80 /* bit 7 */
++
++/* HH2 CONTROL REGISTER */
++#define GPSC_ENABLE_TCNT_INTR 0x10 /* Enable Rx interrupt */
++#define GPSC_ENABLE_REMPTY_INTR 0x20 /* Enable Tx interrupt */
++#define GPSC_CLEAR_TERR 0x40 /* Clear TERR */
++#define GPSC_CLEAR_RERR 0x80 /* Clear RERR */
++#define GPSC_ENABLE_INTERRUPTS 0x30 /* Enable Interrupts through control register */
++#define GPSC_DISABLE_INTERRUPTS 0x00 /* Disable Interrupts through control register */
++
++
++/* ************************* */
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_stop_tx
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static void hh2serial_stop_tx(struct uart_port *port)
++{
++ struct hh2serial_dev *priv = container_of(port, struct hh2serial_dev, port);
++ FUNC_ENTER();
++ priv->tx_enabled = false;
++}
++
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_spi_get_rx_len
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++#ifndef HH2_NO_SPI
++/* Reads status register from HH2 */
++/* Negative for error */
++int hh2serial_spi_get_rx_len(struct hh2serial_dev *hh2serial)
++{
++ struct spi_device *spi = hh2serial->spi;
++ int ret;
++ struct spi_message message;
++ struct spi_transfer x;
++ u8 *local_buf;
++ u8 *buf_ptr;
++
++ FUNC_ENTER();
++
++ spi_message_init(&message);
++ memset(&x, 0, sizeof x);
++#ifndef HH2SERIAL_SPI_16BIT
++ x.len = 1;
++#else
++ x.len = 2;
++#endif
++ spi_message_add_tail(&x, &message);
++
++ local_buf = kzalloc((x.len * 2), GFP_KERNEL);
++ if (!local_buf)
++ return -ENOMEM;
++
++
++#ifndef HH2SERIAL_SPI_16BIT
++ local_buf[0] = GPSD_SRREAD;
++#else /* if 16 bit, write control to get status */
++ local_buf[1] = GPSD_CRWRITE;
++ local_buf[0] = GPSC_CLEAR_TERR | GPSC_CLEAR_RERR;
++ /*FIXME if not clearing errors */
++ //local_buf[0] = 0;
++#endif
++ x.tx_buf = local_buf;
++ x.rx_buf = local_buf + x.len;
++
++ x.cs_change = 0;
++ x.speed_hz = 1562500;
++
++ /* do the i/o */
++ ret = spi_sync(spi, &message);
++ if (ret == 0)
++ {
++
++ buf_ptr = x.rx_buf;
++
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "hh2serial RD:%02X, %02X\n",
++ *buf_ptr,
++ buf_ptr[1]);
++#endif
++
++#ifndef HH2SERIAL_SPI_16BIT
++ /* 8 bit First byte is status register */
++ /* Available bytes */
++ ret = *buf_ptr & GPSS_TCNT;
++
++ /* Check buffer overrun or underrun errors */
++ if (*buf_ptr & GPSS_TERR)
++ printk(KERN_INFO "hh2serial HH2 transmitter underrun!\n");
++
++ if (*buf_ptr & GPSS_RERR)
++ printk(KERN_INFO "hh2serial HH2 receiver overrun!\n");
++
++#else
++ /* 16 bit second byte is status register */
++ /* Available bytes */
++ ret = buf_ptr[1] & GPSS_TCNT;
++
++ /* Check buffer overrun or underrun errors */
++ if (buf_ptr[1] & GPSS_TERR)
++ printk(KERN_INFO "hh2serial HH2 transmitter underrun!\n");
++
++ if (buf_ptr[1] & GPSS_RERR)
++ printk(KERN_INFO "hh2serial HH2 receiver overrun!\n");
++#endif
++ /* Take care of errors */
++ /* FIX ME */
++
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "hh2serial SR:%02X, rx len %d\n",
++ buf_ptr[1],
++ ret);
++#endif
++ }
++
++ kfree(local_buf);
++ return ret;
++
++}
++#endif
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_spi_read
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++#ifndef HH2_NO_SPI
++/* Reads maximum 18 bytes of data from SPI buffer */
++int hh2serial_spi_read(struct hh2serial_dev *hh2serial,
++ u8 *rxbuf, u8 *spiAvailData, unsigned len)
++{
++ struct spi_device *spi = hh2serial->spi;
++ int status, available_rd;
++ struct spi_message message;
++ struct spi_transfer x;
++ u8 *local_buf;
++ u8 *buf_ptr;
++ unsigned len_inc_hdr;
++
++ FUNC_ENTER();
++ /* FIXME check header */
++ if ((len * 2) > HH2SERIAL_BUFSIZE || !rxbuf)
++ return -EINVAL;
++
++ spi_message_init(&message);
++ memset(&x, 0, sizeof x);
++
++ /* Add header length */
++#ifndef HH2SERIAL_SPI_16BIT
++ len_inc_hdr = len+1;
++#else
++ len_inc_hdr = len;
++#endif
++
++ x.len = len_inc_hdr;
++ spi_message_add_tail(&x, &message);
++
++ local_buf = kzalloc(HH2SERIAL_BUFSIZE, GFP_KERNEL);
++ if (!local_buf)
++ return -ENOMEM;
++
++ /* Add DATA READ as every second byte */
++ local_buf[1] = GPSD_DREAD;
++#ifdef HH2SERIAL_SPI_16BIT
++ if (len_inc_hdr > 2)
++ {
++ int byte_index = 1;
++ while (byte_index < len_inc_hdr)
++ {
++ local_buf[byte_index] = GPSD_DREAD;
++ byte_index = byte_index + 2;
++ }
++ }
++
++#endif
++
++ x.tx_buf = local_buf;
++ x.rx_buf = local_buf + len_inc_hdr;
++
++
++ x.cs_change = 0;
++ x.speed_hz = 1562500;
++
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ if (len > 0)
++ {
++ int byte_index = 0;
++ printk(KERN_INFO "hh2serial_spi_read:\n:wr data");
++ while (byte_index < len_inc_hdr)
++ {
++ printk(KERN_INFO "%02X", (local_buf[byte_index++]));
++ }
++
++ printk(KERN_INFO "\n");
++
++
++ }
++#endif
++ /* do the i/o */
++ status = spi_sync(spi, &message);
++ if (status == 0)
++ {
++ /* First byte of read data */
++ buf_ptr = x.rx_buf;
++
++#ifndef HH2SERIAL_SPI_16BIT
++ /* 8 bit First byte is status register */
++ /* Available bytes */
++ available_rd = *buf_ptr & GPSS_TCNT;
++
++ /* Check buffer overrun or underrun errors */
++ if (*buf_ptr & GPSS_TERR)
++ printk(KERN_INFO "hh2serial HH2 transmitter underrun!\n");
++
++ if (*buf_ptr & GPSS_RERR)
++ printk(KERN_INFO "hh2serial HH2 receiver overrun!\n");
++#else
++ /* 16 bit second byte is status register */
++ /* Every other byte is status register */
++ /* Last status register contains Available bytes at end of op*/
++ /* This is status before the last byte is read, so -1 */
++ available_rd = (buf_ptr[len_inc_hdr-1] & GPSS_TCNT) - 1;
++
++ /* Check buffer overrun or underrun errors */
++ if (buf_ptr[len_inc_hdr-1] & GPSS_TERR)
++ printk(KERN_INFO "hh2serial HH2 transmitter underrun!\n");
++
++ if (buf_ptr[len_inc_hdr-1] & GPSS_RERR)
++ printk(KERN_INFO "hh2serial HH2 receiver overrun!\n");
++#endif
++
++
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "hh2serial_spi_read len inc hdr wr:%d, avail rd %d, cs_change:%d\n",
++ len_inc_hdr,
++ available_rd,
++ x.cs_change);
++ printk(KERN_INFO "hh2serial_spi_read:%02X, %02X\n",
++ *buf_ptr,
++ buf_ptr[1]);
++
++#endif
++
++ /* Don't copy status byte */
++#ifndef HH2SERIAL_SPI_16BIT
++ buf_ptr++;
++#endif
++
++ *spiAvailData = available_rd;
++ memcpy(rxbuf, buf_ptr, len);
++
++ /* Print incoming message */
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ if (len > 0)
++ {
++ int byte_index = 0;
++ printk(KERN_INFO "hh2serial_spi_read:\n:rd data");
++ while (byte_index < len)
++ {
++ printk(KERN_INFO "%02X", (rxbuf[byte_index++]));
++ }
++ printk(KERN_INFO "\n");
++
++ }
++#endif
++
++ }
++
++ kfree(local_buf);
++ return status;
++}
++#endif
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_spi_write
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++#ifndef HH2_NO_SPI
++int hh2serial_spi_write(struct hh2serial_dev *hh2serial,
++ const u8 *txbuf, u8 *spiAvailData, unsigned len)
++{
++ struct spi_device *spi = hh2serial->spi;
++ int status, available_rd;
++ struct spi_message message;
++ struct spi_transfer x;
++ u8 *local_buf;
++ u8 *buf_ptr;
++ unsigned len_inc_hdr;
++
++ FUNC_ENTER();
++
++ if ((len * 2) > HH2SERIAL_BUFSIZE )
++ return -EINVAL;
++
++
++ spi_message_init(&message);
++ memset(&x, 0, sizeof x);
++
++ /* Add header length */
++#ifndef HH2SERIAL_SPI_16BIT
++ len_inc_hdr = len+1;
++#else
++ len_inc_hdr = len;
++#endif
++
++ x.len = len_inc_hdr;
++ spi_message_add_tail(&x, &message);
++
++ /* Allocate and make room for 1 byte header */
++ local_buf = kzalloc(HH2SERIAL_BUFSIZE+1, GFP_KERNEL);
++ if (!local_buf)
++ return -ENOMEM;
++
++ /* Add write header */
++ local_buf[1] = GPSD_DWRITE;
++ local_buf[0] = txbuf[0];
++
++
++#ifndef HH2SERIAL_SPI_16BIT
++ memcpy(&(local_buf[1]), txbuf, len);
++#else
++ if (len_inc_hdr > 2)
++ {
++ int byte_index = 2;
++ while (byte_index < len_inc_hdr)
++ {
++
++ local_buf[byte_index] = txbuf[byte_index];
++ local_buf[byte_index+1] = GPSD_DWRITE;
++ byte_index = byte_index + 2;
++ }
++ }
++#endif
++
++ x.tx_buf = local_buf;
++ x.rx_buf = local_buf +(len_inc_hdr);
++
++ x.cs_change = 0;
++ x.speed_hz = 1562500;
++
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ if (len > 0)
++ {
++ int byte_index = 0;
++ printk(KERN_INFO "hh2serial_spi_write:\n:wr data");
++ while (byte_index < len_inc_hdr)
++ {
++ printk(KERN_INFO "%02X", (local_buf[byte_index++]));
++ }
++ printk(KERN_INFO "\n");
++
++
++ }
++#endif
++
++ /* do the i/o */
++ status = spi_sync(spi, &message);
++ if (status == 0)
++ {
++ /* read data */
++ buf_ptr = x.rx_buf;
++
++#ifndef HH2SERIAL_SPI_16BIT
++ /* 8 bit First byte is status register */
++ /* Available bytes */
++ available_rd = *buf_ptr & GPSS_TCNT;
++
++ /* Check buffer overrun or underrun errors */
++ if (*buf_ptr & GPSS_TERR)
++ printk(KERN_INFO "hh2serial HH2 transmitter underrun!\n");
++
++ if (*buf_ptr & GPSS_RERR)
++ printk(KERN_INFO "hh2serial HH2 receiver overrun!\n");
++#else
++ /* 16 bit second byte is status register */
++ /* Available bytes */
++ available_rd = buf_ptr[1] & GPSS_TCNT;
++
++ /* Check buffer overrun or underrun errors */
++ if (buf_ptr[1] & GPSS_TERR)
++ printk(KERN_INFO "hh2serial HH2 transmitter underrun!\n");
++
++ if (buf_ptr[1] & GPSS_RERR)
++ printk(KERN_INFO "hh2serial HH2 receiver overrun!\n");
++#endif
++
++
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "hh2serial_spi_write:%02X, %02X\n",
++ *buf_ptr,
++ buf_ptr[1]);
++
++ printk(KERN_INFO "hh2serial_spi_write: wr:%d, avail rd %d\n",
++ len,
++ available_rd);
++#endif
++
++ *spiAvailData = available_rd;
++
++
++ }
++
++
++
++ kfree(local_buf);
++ return status;
++}
++#endif
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_write2tty
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static void hh2serial_write2tty(
++ struct hh2serial_dev *priv, unsigned char *str, int len)
++{
++ struct uart_port *port = &priv->port;
++ struct tty_struct *tty;
++ int usable;
++
++ FUNC_ENTER();
++
++ /* if uart is not opened, will just return */
++ if (!port->state)
++ return;
++
++ tty = port->state->port.tty;
++ if (!tty)
++ return; /* receive some char before the tty is opened */
++
++ /* MRB could lock forever if no space in tty buffer */
++ while (len) {
++ usable = tty_buffer_request_room(tty, len);
++ if (usable) {
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "hh2serial_output_tty buf space: %d\n", usable);
++#endif
++ tty_insert_flip_string(tty, str, usable);
++ str += usable;
++ port->icount.rx += usable;
++ tty_flip_buffer_push(tty);
++ }
++ len -= usable;
++ }
++}
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_write_circ_buf2spi
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++#ifndef HH2_NO_SPI
++static inline void hh2serial_write_circ_buf2spi(struct hh2serial_dev *priv,
++ struct circ_buf *xmit)
++{
++ int len, left = 0;
++#ifndef HH2SERIAL_SPI_16BIT
++ u8 obuf[HH2SERIAL_SPI_MAX_BYTES], ibuf[HH2SERIAL_SPI_MAX_BYTES];
++#else
++ u16 obuf[HH2SERIAL_SPI_MAX_BYTES], ibuf[HH2SERIAL_SPI_MAX_BYTES];
++#endif
++ u8 rxlen;
++ u8 valid_str[HH2SERIAL_SPI_MAX_BYTES];
++
++ int i, j;
++
++ FUNC_ENTER();
++
++ while (!uart_circ_empty(xmit)) {
++ /*
++ printk(KERN_INFO "MrB set CR get SR: %d\n",
++ hh2serial_spi_get_rx_len(priv));
++ */
++
++ left = uart_circ_chars_pending(xmit);
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "Bytes in circ buffer: %d\n", left);
++#endif
++ while (left) {
++ /* MrB Change below to 1 and word length to 16 to write 16 bit
++ word by word */
++#ifndef HH2SERIAL_SPI_16BIT
++ len = (left >= HH2SERIAL_SPI_MAX_BYTES) ? HH2SERIAL_SPI_MAX_BYTES : left;
++#else
++ len = (left >= HH2SERIAL_SPI_MAX_BYTES) ? HH2SERIAL_SPI_MAX_BYTES : left;
++#endif
++
++ memset(obuf, 0, len);
++ memset(ibuf, 0, len);
++ for (i = 0; i < len; i++) {
++
++ obuf[i] = (u8)xmit->buf[xmit->tail];
++
++ xmit->tail = (xmit->tail + 1) &
++ (UART_XMIT_SIZE - 1);
++ }
++#ifndef HH2SERIAL_SPI_16BIT
++
++ hh2serial_spi_write(priv, (u8 *)obuf,
++ &rxlen, len);
++
++#else
++ /* len * 2 since 16 bits instead of 8 bits */
++ hh2serial_spi_write(priv, (u8 *)obuf,
++ &rxlen, len*2);
++
++#endif
++ left -= len;
++ }
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "hh2serial: Bytes avail to read: %d\n", rxlen);
++#endif
++ /* Read if available bytes */
++ /* FIXME: Could add a maximum read loop here */
++ while (rxlen > 0)
++ {
++
++ len = rxlen;
++#ifndef HH2SERIAL_SPI_16BIT
++ hh2serial_spi_read(priv, (u8 *)ibuf, &rxlen, len);
++#else
++ hh2serial_spi_read(priv, (u8 *)ibuf, &rxlen, len*2);
++#endif
++
++ for (i = 0, j = 0; i < len; i++) {
++ valid_str[j++] = (u8)(ibuf[i]);
++ }
++
++ if (j)
++ hh2serial_write2tty(priv, valid_str, j);
++
++ priv->port.icount.tx += len;
++ }
++ }
++}
++#endif
++
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_handle_tty_input
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static void hh2serial_handle_tty_input(struct hh2serial_dev *priv)
++{
++ struct uart_port *port = &priv->port;
++ struct circ_buf *xmit = &port->state->xmit;
++
++ FUNC_ENTER();
++
++ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
++ return;
++#ifndef HH2_NO_SPI
++ hh2serial_write_circ_buf2spi(priv, xmit);
++#endif
++ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
++ uart_write_wakeup(port);
++
++ if (uart_circ_empty(xmit))
++ hh2serial_stop_tx(port);
++}
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_transfer_spi2tty
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static void hh2serial_transfer_spi2tty(struct hh2serial_dev *priv)
++{
++ int loop = 10, len;
++ int i, j;
++ u8 valid_str[HH2SERIAL_SPI_MAX_BYTES], rxlen = 0;
++#ifndef HH2SERIAL_SPI_16BIT
++ u8 ibuf[HH2SERIAL_SPI_MAX_BYTES];
++#else
++ u16 ibuf[HH2SERIAL_SPI_MAX_BYTES];
++#endif
++
++ FUNC_ENTER();
++
++ rxlen = hh2serial_spi_get_rx_len(priv);
++
++ /* FIXME No of loops to be investigated */
++ while (rxlen > 0 && loop > 0)
++ {
++
++ len = rxlen;
++#ifndef HH2SERIAL_SPI_16BIT
++ hh2serial_spi_read(priv, (u8 *)ibuf, &rxlen, len);
++#else
++ hh2serial_spi_read(priv, (u8 *)ibuf, &rxlen, len*2);
++#endif
++
++ for (i = 0, j = 0; i < len; i++) {
++ valid_str[j++] = (u8)(ibuf[i]);
++ }
++
++ if (j)
++ hh2serial_write2tty(priv, valid_str, j);
++
++ priv->port.icount.tx += len;
++
++ loop--;
++ }
++
++}
++
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_main_thread
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static int hh2serial_main_thread(void *_priv)
++{
++ struct hh2serial_dev *priv = _priv;
++ wait_queue_head_t *wq = &priv->wq;
++
++ int ret = 0;
++
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "hh2serial: start main thread\n");
++#endif
++ init_waitqueue_head(wq);
++
++ do {
++ //udelay(delay);
++ wait_event_interruptible(*wq, (atomic_read(&priv->spi_irq_pending) ||
++ atomic_read(&priv->spi_need_read) ||
++ atomic_read(&priv->tty_need_read) ||
++ kthread_should_stop()));
++
++ priv->mthread_up = 1;
++
++ /* tty has data to be read */
++ if (atomic_read(&priv->tty_need_read)) {
++ atomic_set(&priv->tty_need_read, 0);
++ /* Read from tty send to spi */
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "hh2serial: Read from tty send to spi\n");
++#endif
++ /* Read from tty send to spi */
++ /* Receive data from spi send to UART */
++
++ hh2serial_handle_tty_input(priv);
++
++ }
++
++#ifdef HH2SERIAL_SPI_POLL
++ if (atomic_read(&priv->spi_need_read)) {
++ atomic_set(&priv->spi_need_read, 0);
++ /* Read from SPI send to UART */
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "hh2serial: Read from SPI send to UART\n");
++#endif
++#ifndef HH2_TTY_SEND_POLL
++ hh2serial_transfer_spi2tty(priv);
++#else
++ if (priv->tx_enabled) {
++ struct uart_port *port = &priv->port;
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk("TX enabled!\n");
++#endif
++ spin_lock_irqsave(&port->lock, flags);
++
++
++ if (priv->rx_enabled) {
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "RX enabled!\n");
++#endif
++ hh2serial_write2tty(priv, "testar", 6);
++ }
++
++
++ spin_unlock_irqrestore(&port->lock, flags);
++ }
++#endif /* HH2_TTY_SEND_POLL */
++
++ }
++#endif
++
++
++
++ if (atomic_read(&priv->spi_irq_pending)) {
++ atomic_set(&priv->spi_irq_pending, 0);
++ /* Read from SPI send to UART */
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "hh2serial: Read from SPI send to UART\n");
++#endif
++ }
++
++
++ priv->mthread_up = 0;
++ } while (!kthread_should_stop());
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "hh2serial: stopped main thread\n");
++#endif
++ return ret;
++}
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_poll_thread
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++#ifdef HH2SERIAL_SPI_POLL
++static int hh2serial_poll_thread(void *_priv)
++{
++
++ int ret = 0;
++ struct hh2serial_dev *priv = _priv;
++
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "hh2serial: start poll thread\n");
++#endif
++ do {
++ //udelay(delay);
++
++ if (HH2SERIAL_POLL_TIMEOUT > 999)
++ ssleep(HH2SERIAL_POLL_TIMEOUT/1000);
++ else
++ msleep(HH2SERIAL_POLL_TIMEOUT);
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "hh2serial: poll\n");
++#endif
++ if (!priv->mthread_up)
++ {
++ /* Send poll event to main */
++ if (!atomic_read(&priv->spi_need_read)) {
++ atomic_set(&priv->spi_need_read, 1);
++ wake_up_process(priv->main_thread);
++ }
++ }
++
++ } while (!kthread_should_stop());
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk(KERN_INFO "hh2serial: stopped poll thread\n");
++#endif
++ return ret;
++}
++#endif /* #ifdef HH2SERIAL_SPI_POLL */
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_tx_empty
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static unsigned int hh2serial_tx_empty(struct uart_port *port)
++{
++ FUNC_ENTER();
++ return TIOCSER_TEMT;
++}
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_set_mctrl
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static void hh2serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
++{
++ FUNC_ENTER();
++
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk("MCTRL RTS: %d\n", mctrl & TIOCM_RTS);
++ printk("MCTRL DTR: %d\n", mctrl & TIOCM_DTR);
++ printk("MCTRL OUT1: %d\n", mctrl & TIOCM_OUT1);
++ printk("MCTRL OUT2: %d\n", mctrl & TIOCM_OUT2);
++ printk("MCTRL LOOP: %d\n", mctrl & TIOCM_LOOP);
++#endif
++}
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_get_mctrl
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static unsigned int hh2serial_get_mctrl(struct uart_port *port)
++{
++ FUNC_ENTER();
++ return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
++}
++
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_tx_chars
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static void hh2serial_tx_chars(struct uart_port *port)
++{
++#ifndef HH2_TTY_ECHO
++ struct hh2serial_dev *priv = container_of(port, struct hh2serial_dev, port);
++
++ FUNC_ENTER();
++
++ if (priv->tx_enabled) {
++
++ /* if writing to SPI enabled */
++
++ /* Send message to main thread to read from tty send to SPI */
++ /* Send poll event to main */
++ if (!atomic_read(&priv->tty_need_read)) {
++ atomic_set(&priv->tty_need_read, 1);
++ wake_up_process(priv->main_thread);
++ }
++
++
++ }
++
++#else
++ struct hh2serial_dev *priv = container_of(port, struct hh2serial_dev, port);
++ struct circ_buf *xmit = &port->state->xmit;
++
++
++
++ struct uart_port *recv_port = &priv->port;
++ struct tty_struct *recv_tty;
++
++ unsigned long flags;
++ char ch;
++
++ FUNC_ENTER();
++
++ if (priv->tx_enabled) {
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk("TX enabled!\n");
++#endif
++ //spin_lock_irqsave(&other_port->lock, flags);
++ if (priv->rx_enabled) {
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk("RX enabled!\n");
++#endif
++
++ recv_tty = recv_port->state->port.tty;
++
++ if (port->x_char) {
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk("One char %c!\n", port->x_char);
++#endif
++ tty_insert_flip_char(recv_tty, port->x_char, TTY_NORMAL);
++ tty_flip_buffer_push(recv_tty);
++ port->icount.tx++;
++ port->x_char = 0;
++ return;
++ }
++
++ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ pr_debug("STOP TX_CHARS 1\n");
++#endif
++ hh2serial_stop_tx(port);
++ return;
++ }
++
++ while (!uart_circ_empty(xmit)) {
++
++ ch = xmit->buf[xmit->tail];
++ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk("Loop one char %c!\n", ch);
++#endif
++ tty_insert_flip_char(recv_tty, ch, TTY_NORMAL);
++ tty_flip_buffer_push(recv_tty);
++ port->icount.tx++;
++ }
++
++ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
++ {
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk("Uart wakeup!\n");
++#endif
++ uart_write_wakeup(port);
++ }
++
++ if (uart_circ_empty(xmit)) {
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ pr_debug("STOP TX_CHARS 2\n");
++#endif
++ hh2serial_stop_tx(port);
++ }
++ }
++ else
++ {
++#ifdef HH2SERIAL_ENABLE_DEBUG
++ printk("Other port disabled!\n");
++#endif
++ }
++ //spin_unlock_irqrestore(&priv->other_priv->port.lock, flags);
++ }
++
++#endif
++
++}
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_start_tx
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static void hh2serial_start_tx(struct uart_port *port)
++{
++ struct hh2serial_dev *priv = container_of(port, struct hh2serial_dev, port);
++ FUNC_ENTER();
++ priv->tx_enabled = true;
++
++ hh2serial_tx_chars(port);
++}
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_stop_rx
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static void hh2serial_stop_rx(struct uart_port *port)
++{
++ struct hh2serial_dev *priv = container_of(port, struct hh2serial_dev, port);
++ FUNC_ENTER();
++ priv->rx_enabled = false;
++}
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_enable_ms
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static void hh2serial_enable_ms(struct uart_port *port)
++{
++ FUNC_ENTER();
++}
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_break_ctl
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static void hh2serial_break_ctl(struct uart_port *port, int break_state)
++{
++ FUNC_ENTER();
++}
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_startup
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static int hh2serial_startup(struct uart_port *port)
++{
++ struct hh2serial_dev *priv = container_of(port, struct hh2serial_dev, port);
++ FUNC_ENTER();
++
++#ifdef HH2SERIAL_SPI_POLL
++ priv->poll_thread = kthread_run(hh2serial_poll_thread,
++ priv, "hh2serial_poll");
++ if (IS_ERR(priv->poll_thread)) {
++ printk(KERN_INFO "hh2serial Failed to start poll thread: %ld",
++ PTR_ERR(priv->poll_thread));
++ }
++#endif
++
++ spin_lock(&port->lock);
++ priv->rx_enabled = true;
++ spin_unlock(&port->lock);
++ return 0;
++}
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_shutdown
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static void hh2serial_shutdown(struct uart_port *port)
++{
++#ifdef HH2SERIAL_SPI_POLL
++ struct hh2serial_dev *priv = container_of(port, struct hh2serial_dev, port);
++#endif
++ FUNC_ENTER();
++#ifdef HH2SERIAL_SPI_POLL
++ if (priv->poll_thread)
++ kthread_stop(priv->poll_thread);
++#endif
++}
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_set_termios
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static void hh2serial_set_termios(struct uart_port *port,
++ struct ktermios *termios,
++ struct ktermios *old)
++{
++ FUNC_ENTER();
++
++ switch (termios->c_cflag & CSIZE) {
++ case CS5:
++ pr_debug("CS5: data bits 5\n");
++ break;
++ case CS6:
++ pr_debug("CS6: data bits 6\n");
++ break;
++ case CS7:
++ pr_debug("CS7: data bits 7\n");
++ break;
++ case CS8:
++ pr_debug("CS8: data bits 8\n");
++ break;
++ default:
++ pr_debug("CS: Unknown\n");
++ break;
++ }
++
++ if (termios->c_cflag & PARENB) {
++ if (termios->c_cflag & PARODD)
++ pr_debug("PARITY ODD\n");
++ else
++ pr_debug("PARITY EVEN\n");
++ } else {
++ pr_debug("PARITY NONE\n");
++ }
++
++ if (termios->c_cflag & CSTOPB)
++ pr_debug("STOP BITS 2\n");
++ else
++ pr_debug("STOP BITS 1\n");
++
++ if (termios->c_cflag & CRTSCTS)
++ pr_debug("RTS CTS ENABLED\n");
++ else
++ pr_debug("RTS CTS DISABLED\n");
++}
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_type
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static const char *hh2serial_type(struct uart_port *port)
++{
++ FUNC_ENTER();
++ return "VUART";
++}
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_request_port
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static int hh2serial_request_port(struct uart_port *port)
++{
++ FUNC_ENTER();
++ return 0;
++}
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_config_port
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static void hh2serial_config_port(struct uart_port *port, int flags)
++{
++ FUNC_ENTER();
++
++ if (flags & UART_CONFIG_TYPE)
++ port->type = PORT_16550A;
++}
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_release_port
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static void hh2serial_release_port(struct uart_port *port)
++{
++ FUNC_ENTER();
++}
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_verify_port
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static int hh2serial_verify_port(struct uart_port *port, struct serial_struct *ser)
++{
++ FUNC_ENTER();
++ return 0;
++}
++
++static struct uart_ops hh2serial_uart_ops = {
++ .tx_empty = hh2serial_tx_empty,
++ .set_mctrl = hh2serial_set_mctrl,
++ .get_mctrl = hh2serial_get_mctrl,
++ .stop_tx = hh2serial_stop_tx,
++ .start_tx = hh2serial_start_tx,
++ .stop_rx = hh2serial_stop_rx,
++ .enable_ms = hh2serial_enable_ms,
++ .break_ctl = hh2serial_break_ctl,
++ .startup = hh2serial_startup,
++ .shutdown = hh2serial_shutdown,
++ .set_termios = hh2serial_set_termios,
++ .type = hh2serial_type,
++ .release_port = hh2serial_release_port,
++ .request_port = hh2serial_request_port,
++ .config_port = hh2serial_config_port,
++ .verify_port = hh2serial_verify_port,
++};
++
++#ifndef HH2_NO_SPI
++/* pure SPI related functions */
++/*******************************************************************************
++ * FUNCTION: serial_hh2serial_suspend
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static int serial_hh2serial_suspend(struct spi_device *spi, pm_message_t state)
++{
++ FUNC_ENTER();
++ return 0;
++}
++
++/*******************************************************************************
++ * FUNCTION: serial_hh2serial_resume
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static int serial_hh2serial_resume(struct spi_device *spi)
++{
++ FUNC_ENTER();
++ return 0;
++}
++
++
++static struct mrst_spi_chip hh2spi0 = {
++ .poll_mode = 1,
++ .enable_dma = 0,
++ .type = SPI_FRF_SPI,
++};
++
++
++/*******************************************************************************
++ * FUNCTION: serial_hh2serial_probe
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static int serial_hh2serial_probe(struct spi_device *spi)
++{
++ FUNC_ENTER();
++#ifndef HH2_NO_SPI
++
++ /* set spi info */
++ spi->mode = SPI_MODE_0;
++#ifndef HH2SERIAL_SPI_16BIT
++ spi->bits_per_word = 8; /* HH2 uses 8 bits */
++#else
++ spi->bits_per_word = 16; /* HH2 uses 8 bits, test with 16, sends byte by byte */
++#endif
++
++ spi->controller_data = &hh2spi0;
++
++ spi_setup(spi);
++ priv0.spi = spi;
++ atomic_set(&priv0.spi_irq_pending, 0);
++#endif
++
++
++ return 0;
++
++
++}
++
++/*******************************************************************************
++ * FUNCTION: hh2serial_remove
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static int hh2serial_remove(struct spi_device *dev)
++{
++ FUNC_ENTER();
++
++ return 0;
++}
++
++
++static struct spi_driver spi_hh2serial_driver = {
++ .driver = {
++ .name = "spi_ifx_gps",
++ //.name = "spi_flash",
++ .bus = &spi_bus_type,
++ .owner = THIS_MODULE,
++ },
++ .probe = serial_hh2serial_probe,
++ .remove = __devexit_p(hh2serial_remove),
++ .suspend = serial_hh2serial_suspend,
++ .resume = serial_hh2serial_resume,
++};
++
++#endif
++
++static struct uart_driver hh2serial_driver = {
++ .owner = THIS_MODULE,
++ .driver_name = driver_name,
++ .dev_name = tty_dev_name,
++ .major = 240,
++ .minor = 0,
++ .nr = 1,
++};
++
++/*******************************************************************************
++ * FUNCTION: __init
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static int __init
++hh2serial_init (void)
++{
++ int ret;
++
++ ret = uart_register_driver(&hh2serial_driver);
++
++ if (ret) {
++ pr_err("%s: could not register UART driver\n", driver_name);
++ goto out_register_driver;
++ }
++
++ memset(&priv0, sizeof(struct hh2serial_dev), 0);
++ priv0.port.line = 0;
++ priv0.port.ops = &hh2serial_uart_ops;
++ priv0.port.type = PORT_16550A;
++ spin_lock_init(&priv0.port.lock);
++
++ ret = uart_add_one_port(&hh2serial_driver, &priv0.port);
++
++ if (ret) {
++ pr_err("%s: could not add port hh2serial0\n", driver_name);
++ goto out_add_port0;
++ }
++
++ atomic_set(&priv0.spi_need_read, 0);
++ atomic_set(&priv0.tty_need_read, 0);
++ atomic_set(&priv0.spi_irq_pending, 0);
++
++
++
++#ifndef HH2_NO_SPI
++ /* Register SPI device driver*/
++ ret = spi_register_driver(&spi_hh2serial_driver);
++ if (ret)
++ {
++ pr_err("%s: could not register driver spi_hh2serial_driver\n", driver_name);
++ goto out_add_spi;
++ }
++#endif
++
++
++ priv0.main_thread = kthread_run(hh2serial_main_thread,
++ &priv0, "hh2serial_main");
++ if (IS_ERR(priv0.main_thread)) {
++ ret = PTR_ERR(priv0.main_thread);
++ goto err_kthread;
++ }
++
++
++
++ printk ("Module %s loaded\n", driver_name);
++ return 0;
++
++err_kthread:
++
++#ifndef HH2_NO_SPI
++out_add_spi:
++ uart_remove_one_port(&hh2serial_driver, &priv0.port);
++#endif
++out_add_port0:
++ uart_unregister_driver(&hh2serial_driver);
++out_register_driver:
++ return ret;
++}
++
++/*******************************************************************************
++ * FUNCTION: __exit
++ *
++ * DESCRIPTION:
++ *
++ * PARAMETERS:
++ *
++ * RETURN:
++ *
++ ******************************************************************************/
++static void __exit
++hh2serial_exit (void)
++{
++ if (priv0.main_thread)
++ kthread_stop(priv0.main_thread);
++
++#ifndef HH2_NO_SPI
++ /* unregister SPI driver */
++ spi_unregister_driver(&spi_hh2serial_driver);
++#endif
++ uart_remove_one_port(&hh2serial_driver, &priv0.port);
++
++ uart_unregister_driver(&hh2serial_driver);
++ printk ("Module %s removed\n", driver_name);
++}
++
++
++
++module_init(hh2serial_init);
++module_exit(hh2serial_exit);
+Index: linux-2.6.33/drivers/misc/intel_mrst.c
+===================================================================
+--- linux-2.6.33.orig/drivers/misc/intel_mrst.c
++++ linux-2.6.33/drivers/misc/intel_mrst.c
+@@ -131,9 +131,11 @@ static int intel_mrst_bringup_8688_sdio2
+ {
+ unsigned int temp = 0;
+
+- /* Register 0xf4 has 2 GPIO lines connected to the MRVL 8688:
++ /* Register 0xf4 has 4 GPIO lines connected to the MRVL 8688 * IFX GPS:
+ * bit 4: PDn
+- * bit 3: WiFi RESETn */
++ * bit 3: WiFi RESETn
++ * bit 2: GPS RESET_N
++ * bit 1: GPS PD_N*/
+
+ intel_mrst_pmic_read(0xf4, &temp);
+ temp = temp|0x8;
+@@ -142,6 +144,12 @@ static int intel_mrst_bringup_8688_sdio2
+ temp = temp|0x10;
+ intel_mrst_pmic_write(0xf4, temp);
+
++ temp = temp|0x04;
++ intel_mrst_pmic_write(0xf4, temp);
++
++ temp = temp|0x02;
++ intel_mrst_pmic_write(0xf4, temp);
++
+ return 0;
+ }
+
+@@ -187,10 +195,10 @@ static int __init intel_mrst_module_init
+ /* We only need the following PMIC register initializations if
+ * we are using the Marvell 8688 WLAN card on the SDIO2 port */
+
+-#ifdef CONFIG_8688_RC
++#if defined(CONFIG_8688_RC) || defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_SPI_IFX_GPS)
+
+ printk(KERN_INFO "intel_mrst_module_init: bringing up power for "
+- "8688 WLAN on SDIO2...\n");
++ "8688 WLAN on SDIO2 & IFX GPS over SPI...\n");
+ ret = intel_mrst_bringup_8688_sdio2();
+
+ #endif /* CONFIG_8688_RC */
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-img-graphics-driver-5.3.0.0007.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-img-graphics-driver-5.3.0.0007.patch
new file mode 100644
index 0000000..65048c8
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-img-graphics-driver-5.3.0.0007.patch
@@ -0,0 +1,106773 @@
+From e6fbc1d68e24c1526e9e30d1d2381a77697f3b1d Mon Sep 17 00:00:00 2001
+From: Prajwal Mohan <prajwal.karur.mohan@intel.com>
+Date: Thu, 13 May 2010 14:50:27 -0700
+Subject: [PATCH] IMG graphics driver consolidation patch
+
+Signed-off-by: Prajwal Mohan <prajwal.karur.mohan@intel.com>
+Patch-mainline: 2.6.35?
+---
+ drivers/gpu/drm/Kconfig | 2 +
+ drivers/gpu/drm/Makefile | 3 +-
+ drivers/gpu/drm/drm_crtc.c | 2 +
+ drivers/gpu/drm/drm_drv.c | 11 +-
+ drivers/gpu/drm/drm_global.c | 107 +
+ drivers/gpu/drm/drm_irq.c | 27 +
+ drivers/gpu/drm/mrst/Kconfig | 220 ++
+ drivers/gpu/drm/mrst/Makefile | 169 +
+ drivers/gpu/drm/mrst/drv/lnc_topaz.c | 714 ++++
+ drivers/gpu/drm/mrst/drv/lnc_topaz.h | 925 ++++++
+ drivers/gpu/drm/mrst/drv/lnc_topazinit.c | 2051 ++++++++++++
+ drivers/gpu/drm/mrst/drv/msvdx_power.c | 164 +
+ drivers/gpu/drm/mrst/drv/msvdx_power.h | 48 +
+ drivers/gpu/drm/mrst/drv/psb_bl.c | 260 ++
+ drivers/gpu/drm/mrst/drv/psb_buffer.c | 379 +++
+ drivers/gpu/drm/mrst/drv/psb_dpst.c | 254 ++
+ drivers/gpu/drm/mrst/drv/psb_dpst.h | 98 +
+ drivers/gpu/drm/mrst/drv/psb_drm.h | 634 ++++
+ drivers/gpu/drm/mrst/drv/psb_drv.c | 2218 +++++++++++++
+ drivers/gpu/drm/mrst/drv/psb_drv.h | 1025 ++++++
+ drivers/gpu/drm/mrst/drv/psb_fb.c | 1817 +++++++++++
+ drivers/gpu/drm/mrst/drv/psb_fb.h | 49 +
+ drivers/gpu/drm/mrst/drv/psb_fence.c | 158 +
+ drivers/gpu/drm/mrst/drv/psb_gtt.c | 1040 ++++++
+ drivers/gpu/drm/mrst/drv/psb_gtt.h | 111 +
+ drivers/gpu/drm/mrst/drv/psb_hotplug.c | 425 +++
+ drivers/gpu/drm/mrst/drv/psb_hotplug.h | 90 +
+ drivers/gpu/drm/mrst/drv/psb_intel_bios.c | 305 ++
+ drivers/gpu/drm/mrst/drv/psb_intel_bios.h | 430 +++
+ drivers/gpu/drm/mrst/drv/psb_intel_display.c | 2538 +++++++++++++++
+ drivers/gpu/drm/mrst/drv/psb_intel_display.h | 25 +
+ drivers/gpu/drm/mrst/drv/psb_intel_drv.h | 283 ++
+ drivers/gpu/drm/mrst/drv/psb_intel_dsi.c | 2450 ++++++++++++++
+ drivers/gpu/drm/mrst/drv/psb_intel_dsi_aava.c | 996 ++++++
+ drivers/gpu/drm/mrst/drv/psb_intel_i2c.c | 172 +
+ drivers/gpu/drm/mrst/drv/psb_intel_lvds.c | 1385 ++++++++
+ drivers/gpu/drm/mrst/drv/psb_intel_modes.c | 77 +
+ drivers/gpu/drm/mrst/drv/psb_intel_reg.h | 1099 +++++++
+ drivers/gpu/drm/mrst/drv/psb_intel_sdvo.c | 1408 ++++++++
+ drivers/gpu/drm/mrst/drv/psb_intel_sdvo_regs.h | 338 ++
+ drivers/gpu/drm/mrst/drv/psb_mmu.c | 1010 ++++++
+ drivers/gpu/drm/mrst/drv/psb_msvdx.c | 1063 ++++++
+ drivers/gpu/drm/mrst/drv/psb_msvdx.h | 610 ++++
+ drivers/gpu/drm/mrst/drv/psb_msvdxinit.c | 770 +++++
+ drivers/gpu/drm/mrst/drv/psb_pvr_glue.c | 74 +
+ drivers/gpu/drm/mrst/drv/psb_pvr_glue.h | 26 +
+ drivers/gpu/drm/mrst/drv/psb_reg.h | 570 ++++
+ drivers/gpu/drm/mrst/drv/psb_reset.c | 209 ++
+ drivers/gpu/drm/mrst/drv/psb_schedule.c | 70 +
+ drivers/gpu/drm/mrst/drv/psb_schedule.h | 81 +
+ drivers/gpu/drm/mrst/drv/psb_setup.c | 35 +
+ drivers/gpu/drm/mrst/drv/psb_sgx.c | 929 ++++++
+ drivers/gpu/drm/mrst/drv/psb_sgx.h | 32 +
+ drivers/gpu/drm/mrst/drv/psb_socket.c | 376 +++
+ drivers/gpu/drm/mrst/drv/psb_ttm_glue.c | 344 ++
+ drivers/gpu/drm/mrst/drv/psb_umevents.c | 485 +++
+ drivers/gpu/drm/mrst/drv/psb_umevents.h | 154 +
+ drivers/gpu/drm/mrst/drv/topaz_power.c | 173 +
+ drivers/gpu/drm/mrst/drv/topaz_power.h | 53 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_agp_backend.c | 144 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_bo.c | 1729 ++++++++++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_bo_api.h | 573 ++++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_bo_driver.h | 862 +++++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_bo_util.c | 546 ++++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_bo_vm.c | 429 +++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_execbuf_util.c | 108 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_execbuf_util.h | 103 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_fence.c | 607 ++++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_fence_api.h | 272 ++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_fence_driver.h | 302 ++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_fence_user.c | 238 ++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_fence_user.h | 140 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_lock.c | 155 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_lock.h | 176 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_memory.c | 228 ++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_memory.h | 147 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_object.c | 440 +++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_object.h | 262 ++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_pat_compat.c | 164 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_pat_compat.h | 34 +
+ .../gpu/drm/mrst/drv/ttm/ttm_placement_common.h | 91 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_placement_user.c | 468 +++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_placement_user.h | 252 ++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_regman.h | 67 +
+ drivers/gpu/drm/mrst/drv/ttm/ttm_tt.c | 653 ++++
+ drivers/gpu/drm/mrst/drv/ttm/ttm_userobj_api.h | 72 +
+ drivers/gpu/drm/mrst/pvr/COPYING | 351 ++
+ drivers/gpu/drm/mrst/pvr/INSTALL | 76 +
+ drivers/gpu/drm/mrst/pvr/README | 48 +
+ drivers/gpu/drm/mrst/pvr/eurasiacon/.gitignore | 6 +
+ drivers/gpu/drm/mrst/pvr/include4/dbgdrvif.h | 298 ++
+ drivers/gpu/drm/mrst/pvr/include4/img_defs.h | 108 +
+ drivers/gpu/drm/mrst/pvr/include4/img_types.h | 128 +
+ drivers/gpu/drm/mrst/pvr/include4/ioctldef.h | 98 +
+ drivers/gpu/drm/mrst/pvr/include4/pdumpdefs.h | 99 +
+ drivers/gpu/drm/mrst/pvr/include4/pvr_debug.h | 127 +
+ drivers/gpu/drm/mrst/pvr/include4/pvrmodule.h | 31 +
+ drivers/gpu/drm/mrst/pvr/include4/pvrversion.h | 38 +
+ drivers/gpu/drm/mrst/pvr/include4/regpaths.h | 43 +
+ drivers/gpu/drm/mrst/pvr/include4/services.h | 872 +++++
+ drivers/gpu/drm/mrst/pvr/include4/servicesext.h | 648 ++++
+ drivers/gpu/drm/mrst/pvr/include4/sgx_options.h | 224 ++
+ drivers/gpu/drm/mrst/pvr/include4/sgxapi_km.h | 323 ++
+ drivers/gpu/drm/mrst/pvr/include4/sgxscript.h | 81 +
+ .../3rdparty/linux_framebuffer_mrst/.gitignore | 6 +
+ .../linux_framebuffer_mrst/makefile.linux.common | 41 +
+ .../3rdparty/linux_framebuffer_mrst/mrstlfb.h | 295 ++
+ .../linux_framebuffer_mrst/mrstlfb_displayclass.c | 2056 ++++++++++++
+ .../linux_framebuffer_mrst/mrstlfb_linux.c | 206 ++
+ .../services4/include/env/linux/pvr_drm_shared.h | 54 +
+ .../drm/mrst/pvr/services4/include/kernelbuffer.h | 60 +
+ .../drm/mrst/pvr/services4/include/kerneldisplay.h | 153 +
+ .../drm/mrst/pvr/services4/include/pvr_bridge.h | 1383 ++++++++
+ .../drm/mrst/pvr/services4/include/pvr_bridge_km.h | 288 ++
+ .../gpu/drm/mrst/pvr/services4/include/pvrmmap.h | 36 +
+ .../drm/mrst/pvr/services4/include/servicesint.h | 266 ++
+ .../drm/mrst/pvr/services4/include/sgx_bridge.h | 477 +++
+ .../drm/mrst/pvr/services4/include/sgx_mkif_km.h | 334 ++
+ .../gpu/drm/mrst/pvr/services4/include/sgxinfo.h | 288 ++
+ .../mrst/pvr/services4/srvkm/bridged/.gitignore | 5 +
+ .../services4/srvkm/bridged/bridged_pvr_bridge.c | 3426 ++++++++++++++++++++
+ .../services4/srvkm/bridged/bridged_pvr_bridge.h | 231 ++
+ .../pvr/services4/srvkm/bridged/bridged_support.c | 85 +
+ .../pvr/services4/srvkm/bridged/bridged_support.h | 43 +
+ .../srvkm/bridged/sgx/bridged_sgx_bridge.c | 2511 ++++++++++++++
+ .../srvkm/bridged/sgx/bridged_sgx_bridge.h | 42 +
+ .../drm/mrst/pvr/services4/srvkm/common/.gitignore | 5 +
+ .../pvr/services4/srvkm/common/buffer_manager.c | 2036 ++++++++++++
+ .../mrst/pvr/services4/srvkm/common/deviceclass.c | 1937 +++++++++++
+ .../mrst/pvr/services4/srvkm/common/devicemem.c | 1448 +++++++++
+ .../drm/mrst/pvr/services4/srvkm/common/handle.c | 1547 +++++++++
+ .../gpu/drm/mrst/pvr/services4/srvkm/common/hash.c | 463 +++
+ .../drm/mrst/pvr/services4/srvkm/common/lists.c | 99 +
+ .../gpu/drm/mrst/pvr/services4/srvkm/common/mem.c | 151 +
+ .../mrst/pvr/services4/srvkm/common/mem_debug.c | 250 ++
+ .../drm/mrst/pvr/services4/srvkm/common/metrics.c | 160 +
+ .../mrst/pvr/services4/srvkm/common/pdump_common.c | 1723 ++++++++++
+ .../drm/mrst/pvr/services4/srvkm/common/perproc.c | 283 ++
+ .../drm/mrst/pvr/services4/srvkm/common/power.c | 818 +++++
+ .../drm/mrst/pvr/services4/srvkm/common/pvrsrv.c | 1195 +++++++
+ .../drm/mrst/pvr/services4/srvkm/common/queue.c | 1137 +++++++
+ .../gpu/drm/mrst/pvr/services4/srvkm/common/ra.c | 1871 +++++++++++
+ .../drm/mrst/pvr/services4/srvkm/common/resman.c | 717 ++++
+ .../pvr/services4/srvkm/devices/sgx/.gitignore | 5 +
+ .../drm/mrst/pvr/services4/srvkm/devices/sgx/mmu.c | 2776 ++++++++++++++++
+ .../drm/mrst/pvr/services4/srvkm/devices/sgx/mmu.h | 139 +
+ .../drm/mrst/pvr/services4/srvkm/devices/sgx/pb.c | 458 +++
+ .../services4/srvkm/devices/sgx/sgx_bridge_km.h | 147 +
+ .../pvr/services4/srvkm/devices/sgx/sgxconfig.h | 134 +
+ .../pvr/services4/srvkm/devices/sgx/sgxinfokm.h | 352 ++
+ .../mrst/pvr/services4/srvkm/devices/sgx/sgxinit.c | 2218 +++++++++++++
+ .../mrst/pvr/services4/srvkm/devices/sgx/sgxkick.c | 744 +++++
+ .../pvr/services4/srvkm/devices/sgx/sgxpower.c | 453 +++
+ .../pvr/services4/srvkm/devices/sgx/sgxreset.c | 489 +++
+ .../pvr/services4/srvkm/devices/sgx/sgxtransfer.c | 543 ++++
+ .../pvr/services4/srvkm/devices/sgx/sgxutils.c | 928 ++++++
+ .../pvr/services4/srvkm/devices/sgx/sgxutils.h | 99 +
+ .../mrst/pvr/services4/srvkm/env/linux/.gitignore | 5 +
+ .../mrst/pvr/services4/srvkm/env/linux/env_data.h | 66 +
+ .../pvr/services4/srvkm/env/linux/env_perproc.h | 56 +
+ .../drm/mrst/pvr/services4/srvkm/env/linux/event.c | 273 ++
+ .../drm/mrst/pvr/services4/srvkm/env/linux/event.h | 32 +
+ .../mrst/pvr/services4/srvkm/env/linux/linkage.h | 61 +
+ .../drm/mrst/pvr/services4/srvkm/env/linux/lock.h | 32 +
+ .../drm/mrst/pvr/services4/srvkm/env/linux/mm.c | 2360 ++++++++++++++
+ .../drm/mrst/pvr/services4/srvkm/env/linux/mm.h | 331 ++
+ .../drm/mrst/pvr/services4/srvkm/env/linux/mmap.c | 1148 +++++++
+ .../drm/mrst/pvr/services4/srvkm/env/linux/mmap.h | 107 +
+ .../mrst/pvr/services4/srvkm/env/linux/module.c | 765 +++++
+ .../drm/mrst/pvr/services4/srvkm/env/linux/mutex.c | 136 +
+ .../drm/mrst/pvr/services4/srvkm/env/linux/mutex.h | 70 +
+ .../mrst/pvr/services4/srvkm/env/linux/mutils.c | 133 +
+ .../mrst/pvr/services4/srvkm/env/linux/mutils.h | 101 +
+ .../mrst/pvr/services4/srvkm/env/linux/osfunc.c | 2564 +++++++++++++++
+ .../mrst/pvr/services4/srvkm/env/linux/osperproc.c | 113 +
+ .../drm/mrst/pvr/services4/srvkm/env/linux/pdump.c | 662 ++++
+ .../pvr/services4/srvkm/env/linux/private_data.h | 67 +
+ .../drm/mrst/pvr/services4/srvkm/env/linux/proc.c | 970 ++++++
+ .../drm/mrst/pvr/services4/srvkm/env/linux/proc.h | 115 +
+ .../pvr/services4/srvkm/env/linux/pvr_bridge_k.c | 651 ++++
+ .../mrst/pvr/services4/srvkm/env/linux/pvr_debug.c | 426 +++
+ .../mrst/pvr/services4/srvkm/env/linux/pvr_drm.c | 310 ++
+ .../mrst/pvr/services4/srvkm/env/linux/pvr_drm.h | 80 +
+ .../mrst/pvr/services4/srvkm/hwdefs/sgx535defs.h | 637 ++++
+ .../drm/mrst/pvr/services4/srvkm/hwdefs/sgxdefs.h | 82 +
+ .../mrst/pvr/services4/srvkm/hwdefs/sgxerrata.h | 308 ++
+ .../pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h | 163 +
+ .../drm/mrst/pvr/services4/srvkm/hwdefs/sgxmmu.h | 79 +
+ .../pvr/services4/srvkm/include/buffer_manager.h | 213 ++
+ .../drm/mrst/pvr/services4/srvkm/include/device.h | 278 ++
+ .../drm/mrst/pvr/services4/srvkm/include/handle.h | 382 +++
+ .../drm/mrst/pvr/services4/srvkm/include/hash.h | 73 +
+ .../drm/mrst/pvr/services4/srvkm/include/lists.h | 176 +
+ .../drm/mrst/pvr/services4/srvkm/include/metrics.h | 130 +
+ .../drm/mrst/pvr/services4/srvkm/include/osfunc.h | 487 +++
+ .../mrst/pvr/services4/srvkm/include/osperproc.h | 76 +
+ .../mrst/pvr/services4/srvkm/include/pdump_km.h | 451 +++
+ .../pvr/services4/srvkm/include/pdump_osfunc.h | 137 +
+ .../drm/mrst/pvr/services4/srvkm/include/perproc.h | 110 +
+ .../drm/mrst/pvr/services4/srvkm/include/power.h | 133 +
+ .../drm/mrst/pvr/services4/srvkm/include/queue.h | 119 +
+ .../gpu/drm/mrst/pvr/services4/srvkm/include/ra.h | 155 +
+ .../drm/mrst/pvr/services4/srvkm/include/resman.h | 113 +
+ .../pvr/services4/srvkm/include/services_headers.h | 49 +
+ .../drm/mrst/pvr/services4/srvkm/include/srvkm.h | 69 +
+ .../mrst/pvr/services4/system/include/syscommon.h | 217 ++
+ .../pvr/services4/system/moorestown/.gitignore | 5 +
+ .../pvr/services4/system/moorestown/oemfuncs.h | 72 +
+ .../pvr/services4/system/moorestown/ospm_power.c | 479 +++
+ .../pvr/services4/system/moorestown/ospm_power.h | 79 +
+ .../system/moorestown/sys_pvr_drm_export.c | 135 +
+ .../system/moorestown/sys_pvr_drm_export.h | 87 +
+ .../system/moorestown/sys_pvr_drm_import.h | 45 +
+ .../pvr/services4/system/moorestown/sysconfig.c | 1022 ++++++
+ .../pvr/services4/system/moorestown/sysconfig.h | 139 +
+ .../mrst/pvr/services4/system/moorestown/sysinfo.h | 43 +
+ .../mrst/pvr/services4/system/moorestown/sysirq.c | 565 ++++
+ .../mrst/pvr/services4/system/moorestown/sysirq.h | 49 +
+ .../pvr/services4/system/moorestown/syslocal.h | 82 +
+ .../pvr/services4/system/moorestown/sysutils.c | 30 +
+ .../mrst/pvr/tools/intern/debug/client/linuxsrv.h | 48 +
+ .../tools/intern/debug/dbgdriv/common/dbgdriv.c | 2075 ++++++++++++
+ .../tools/intern/debug/dbgdriv/common/dbgdriv.h | 116 +
+ .../tools/intern/debug/dbgdriv/common/hostfunc.h | 58 +
+ .../pvr/tools/intern/debug/dbgdriv/common/hotkey.c | 135 +
+ .../pvr/tools/intern/debug/dbgdriv/common/hotkey.h | 60 +
+ .../pvr/tools/intern/debug/dbgdriv/common/ioctl.c | 371 +++
+ .../pvr/tools/intern/debug/dbgdriv/common/ioctl.h | 87 +
+ .../tools/intern/debug/dbgdriv/linux/hostfunc.c | 302 ++
+ .../intern/debug/dbgdriv/linux/kbuild/Makefile | 35 +
+ .../pvr/tools/intern/debug/dbgdriv/linux/main.c | 298 ++
+ .../debug/dbgdriv/linux/makefile.linux.common | 40 +
+ include/drm/drmP.h | 22 +
+ include/drm/drm_mode.h | 2 +
+ include/linux/backlight.h | 3 +
+ 235 files changed, 104731 insertions(+), 2 deletions(-)
+ create mode 100644 drivers/gpu/drm/drm_global.c
+ create mode 100644 drivers/gpu/drm/mrst/Kconfig
+ create mode 100644 drivers/gpu/drm/mrst/Makefile
+ create mode 100644 drivers/gpu/drm/mrst/drv/lnc_topaz.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/lnc_topaz.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/lnc_topazinit.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/msvdx_power.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/msvdx_power.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_bl.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_buffer.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_dpst.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_dpst.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_drm.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_drv.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_drv.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_fb.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_fb.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_fence.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_gtt.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_gtt.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_hotplug.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_hotplug.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_bios.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_bios.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_display.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_display.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_drv.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_dsi.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_dsi_aava.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_i2c.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_lvds.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_modes.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_reg.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_sdvo.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_intel_sdvo_regs.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_mmu.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_msvdx.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_msvdx.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_msvdxinit.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_pvr_glue.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_pvr_glue.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_reg.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_reset.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_schedule.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_schedule.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_setup.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_sgx.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_sgx.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_socket.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_ttm_glue.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_umevents.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/psb_umevents.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/topaz_power.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/topaz_power.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_agp_backend.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_bo.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_bo_api.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_bo_driver.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_bo_util.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_bo_vm.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_execbuf_util.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_execbuf_util.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_fence.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_fence_api.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_fence_driver.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_fence_user.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_fence_user.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_lock.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_lock.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_memory.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_memory.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_object.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_object.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_pat_compat.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_pat_compat.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_placement_common.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_placement_user.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_placement_user.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_regman.h
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_tt.c
+ create mode 100644 drivers/gpu/drm/mrst/drv/ttm/ttm_userobj_api.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/COPYING
+ create mode 100644 drivers/gpu/drm/mrst/pvr/INSTALL
+ create mode 100644 drivers/gpu/drm/mrst/pvr/README
+ create mode 100644 drivers/gpu/drm/mrst/pvr/eurasiacon/.gitignore
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/dbgdrvif.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/img_defs.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/img_types.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/ioctldef.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/pdumpdefs.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/pvr_debug.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/pvrmodule.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/pvrversion.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/regpaths.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/services.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/servicesext.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/sgx_options.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/sgxapi_km.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/include4/sgxscript.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/.gitignore
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/makefile.linux.common
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_displayclass.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_linux.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/include/env/linux/pvr_drm_shared.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/include/kernelbuffer.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/include/kerneldisplay.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/include/pvr_bridge.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/include/pvr_bridge_km.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/include/pvrmmap.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/include/servicesint.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/include/sgx_bridge.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/include/sgx_mkif_km.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/include/sgxinfo.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/.gitignore
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_pvr_bridge.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_support.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_support.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/sgx/bridged_sgx_bridge.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/sgx/bridged_sgx_bridge.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/.gitignore
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/buffer_manager.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/deviceclass.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/devicemem.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/handle.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/hash.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/lists.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/mem.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/mem_debug.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/metrics.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/pdump_common.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/perproc.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/power.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/pvrsrv.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/queue.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/ra.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/common/resman.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/.gitignore
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/mmu.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/mmu.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/pb.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgx_bridge_km.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxconfig.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxinfokm.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxinit.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxkick.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxpower.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxreset.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxtransfer.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxutils.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxutils.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/.gitignore
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/env_data.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/env_perproc.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/event.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/event.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/linkage.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/lock.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mm.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mm.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mmap.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mmap.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/module.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutex.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutex.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutils.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutils.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/osfunc.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/osperproc.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pdump.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/private_data.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/proc.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/proc.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_bridge_k.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_debug.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_drm.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_drm.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgx535defs.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxdefs.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxerrata.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxmmu.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/buffer_manager.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/device.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/handle.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/hash.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/lists.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/metrics.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/osfunc.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/osperproc.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/pdump_km.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/pdump_osfunc.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/perproc.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/power.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/queue.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/ra.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/resman.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/services_headers.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/srvkm/include/srvkm.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/include/syscommon.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/.gitignore
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/oemfuncs.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/ospm_power.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/ospm_power.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_export.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_export.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_import.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysconfig.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysconfig.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysinfo.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysirq.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysirq.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/syslocal.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysutils.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/client/linuxsrv.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/dbgdriv.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/dbgdriv.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hostfunc.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hotkey.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hotkey.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/ioctl.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/ioctl.h
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/hostfunc.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/kbuild/Makefile
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/main.c
+ create mode 100644 drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/makefile.linux.common
+
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index 305c590..8242c7f 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -157,3 +157,5 @@ config DRM_SAVAGE
+ help
+ Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
+ chipset. If M is selected the module will be called savage.
++
++source drivers/gpu/drm/mrst/Kconfig
+diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
+index 39c5aa7..ca0eea7 100644
+--- a/drivers/gpu/drm/Makefile
++++ b/drivers/gpu/drm/Makefile
+@@ -11,7 +11,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
+ drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
+ drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
+ drm_crtc.o drm_modes.o drm_edid.o \
+- drm_info.o drm_debugfs.o drm_encoder_slave.o
++ drm_info.o drm_debugfs.o drm_encoder_slave.o drm_global.o
+
+ drm-$(CONFIG_COMPAT) += drm_ioc32.o
+
+@@ -33,4 +33,5 @@ obj-$(CONFIG_DRM_SAVAGE)+= savage/
+ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
+ obj-$(CONFIG_DRM_VIA) +=via/
+ obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
++obj-$(CONFIG_DRM_MRST) +=mrst/
+ obj-y += i2c/
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index d91fb8c..9004741 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -159,6 +159,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
+ { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
+ { DRM_MODE_CONNECTOR_TV, "TV", 0 },
+ { DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 },
++ { DRM_MODE_CONNECTOR_MIPI, "MIPI", 0 },
+ };
+
+ static struct drm_prop_enum_list drm_encoder_enum_list[] =
+@@ -167,6 +168,7 @@ static struct drm_prop_enum_list drm_encoder_enum_list[] =
+ { DRM_MODE_ENCODER_TMDS, "TMDS" },
+ { DRM_MODE_ENCODER_LVDS, "LVDS" },
+ { DRM_MODE_ENCODER_TVDAC, "TV" },
++ { DRM_MODE_ENCODER_MIPI, "MIPI" },
+ };
+
+ char *drm_get_encoder_name(struct drm_encoder *encoder)
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index 766c468..48d70c2 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -342,6 +342,8 @@ static int __init drm_core_init(void)
+
+ DRM_INFO("Initialized %s %d.%d.%d %s\n",
+ CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
++ drm_global_init();
++
+ return 0;
+ err_p3:
+ drm_sysfs_destroy();
+@@ -355,6 +357,7 @@ err_p1:
+
+ static void __exit drm_core_exit(void)
+ {
++ drm_global_release();
+ remove_proc_entry("dri", NULL);
+ debugfs_remove(drm_debugfs_root);
+ drm_sysfs_destroy();
+@@ -437,6 +440,12 @@ static int drm_version(struct drm_device *dev, void *data,
+ long drm_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+ {
++ return drm_unlocked_ioctl(filp, cmd, arg);
++}
++EXPORT_SYMBOL(drm_ioctl);
++
++long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev;
+ struct drm_ioctl_desc *ioctl;
+@@ -526,7 +535,7 @@ long drm_ioctl(struct file *filp,
+ return retcode;
+ }
+
+-EXPORT_SYMBOL(drm_ioctl);
++EXPORT_SYMBOL(drm_unlocked_ioctl);
+
+ struct drm_local_map *drm_getsarea(struct drm_device *dev)
+ {
+diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
+new file mode 100644
+index 0000000..e054c4f
+--- /dev/null
++++ b/drivers/gpu/drm/drm_global.c
+@@ -0,0 +1,107 @@
++/**************************************************************************
++ *
++ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++#include <drmP.h>
++struct drm_global_item {
++ struct mutex mutex;
++ void *object;
++ int refcount;
++};
++
++static struct drm_global_item glob[DRM_GLOBAL_NUM];
++
++void drm_global_init(void)
++{
++ int i;
++
++ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
++ struct drm_global_item *item = &glob[i];
++ mutex_init(&item->mutex);
++ item->object = NULL;
++ item->refcount = 0;
++ }
++}
++
++void drm_global_release(void)
++{
++ int i;
++ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
++ struct drm_global_item *item = &glob[i];
++ BUG_ON(item->object != NULL);
++ BUG_ON(item->refcount != 0);
++ }
++}
++
++int drm_global_item_ref(struct drm_global_reference *ref)
++{
++ int ret;
++ struct drm_global_item *item = &glob[ref->global_type];
++ void *object;
++
++ mutex_lock(&item->mutex);
++ if (item->refcount == 0) {
++ item->object = kmalloc(ref->size, GFP_KERNEL);
++ if (unlikely(item->object == NULL)) {
++ ret = -ENOMEM;
++ goto out_err;
++ }
++
++ ref->object = item->object;
++ ret = ref->init(ref);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ ++item->refcount;
++ }
++ ref->object = item->object;
++ object = item->object;
++ mutex_unlock(&item->mutex);
++ return 0;
++ out_err:
++ kfree(item->object);
++ mutex_unlock(&item->mutex);
++ item->object = NULL;
++ return ret;
++}
++
++EXPORT_SYMBOL(drm_global_item_ref);
++
++void drm_global_item_unref(struct drm_global_reference *ref)
++{
++ struct drm_global_item *item = &glob[ref->global_type];
++
++ mutex_lock(&item->mutex);
++ BUG_ON(item->refcount == 0);
++ BUG_ON(ref->object != item->object);
++ if (--item->refcount == 0) {
++ ref->release(ref);
++ kfree(item->object);
++ item->object = NULL;
++ }
++ mutex_unlock(&item->mutex);
++}
++
++EXPORT_SYMBOL(drm_global_item_unref);
+diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
+index b98384d..7991d00 100644
+--- a/drivers/gpu/drm/drm_irq.c
++++ b/drivers/gpu/drm/drm_irq.c
+@@ -72,6 +72,28 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
+ return 0;
+ }
+
++#if 0
++static void drm_flip_work_func(struct work_struct *work)
++{
++ struct drm_device *dev =
++ container_of(work, struct drm_device, flip_work);
++#if 0
++ struct drm_pending_flip *f, *t;
++#endif
++ u32 frame;
++
++ mutex_lock(&dev->struct_mutex);
++
++ list_for_each_entry_safe(f, t, &dev->flip_list, link) {
++ frame = drm_vblank_count(dev, f->pipe);
++ if (vblank_after(frame, f->frame))
++ drm_finish_pending_flip(dev, f, frame);
++ }
++
++ mutex_unlock(&dev->struct_mutex);
++}
++#endif
++
+ static void vblank_disable_fn(unsigned long arg)
+ {
+ struct drm_device *dev = (struct drm_device *)arg;
+@@ -163,6 +185,11 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
+ atomic_set(&dev->vblank_refcount[i], 0);
+ }
+
++#if 0
++ INIT_LIST_HEAD(&dev->flip_list);
++ INIT_WORK(&dev->flip_work, drm_flip_work_func);
++#endif
++
+ dev->vblank_disable_allowed = 0;
+ return 0;
+
+diff --git a/drivers/gpu/drm/mrst/Kconfig b/drivers/gpu/drm/mrst/Kconfig
+new file mode 100644
+index 0000000..2fc22d1
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/Kconfig
+@@ -0,0 +1,220 @@
++#
++# Drm device configuration
++#
++# This driver provides support for the
++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
++#
++menuconfig DRM_MRST
++ tristate "Intel Moorestown (load along with IMG driver)"
++ depends on DRM && PCI
++ select FB_CFB_COPYAREA
++ select FB_CFB_FILLRECT
++ select FB_CFB_IMAGEBLIT
++ select PVR_SUPPORT_DRI_DRM
++ select DRM_KMS_HELPER
++ help
++ Choose this option if you have a Moorestown platform.
++ If M is selected the module will be called mrst.
++
++config IMG_DOES_NOT_SUPPORT_MENLOW
++ bool "Disable MRST funtions for Menlow"
++ depends on DRM_MRST
++ default n
++ help
++ Choose Menlow
++
++config PVR_RELEASE
++ string "Build IMG kernel services as release"
++ depends on DRM_MRST
++ default "release"
++ help
++ xxxxxxx
++
++config PVR_SERVICES4
++ bool "Enable PVR services4"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxx
++
++config PVR_XOPEN_SOURCE
++ int "Number of xopen source"
++ depends on DRM_MRST
++ default 600
++ help
++ xxxxxxx
++
++config PVR2D_VALIDATE_INPUT_PARAMS
++ bool "PVR2D Validate input params"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxx
++
++config PVR_DISPLAY_CONTROLLER
++ string "Name of PVR display controller"
++ depends on DRM_MRST
++ default "mrstlfb"
++ help
++ xxxxxxx
++
++config PVR_SGX_CORE_REV
++ int "SGX core revison"
++ depends on DRM_MRST
++ default 121
++ help
++ xxxxxxx
++
++config PVR_SUPPORT_SVRINIT
++ bool "Support IMG Kernel Service Init"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxxx
++
++config PVR_SUPPORT_SGX
++ bool "Support IMG SGX core"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxxx
++
++config PVR_SUPPORT_PERCONTEXT_PB
++ bool "Support PVR PERCONTEXT_PB"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxx
++
++config PVR_SUPPORT_LINUX_X86_WRITECOMBINE
++ bool "Support X86 write combine in IMG service"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxx
++
++config PVR_TRANSFER_QUEUE
++ bool "Support IMG TRANSFER_QUEUE"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxxx
++
++config PVR_SUPPORT_DRI_DRM
++ bool
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxxx
++
++config PVR_SYS_USING_INTERRUPTS
++ bool "Using interrupts in IMG kernel service"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxxx
++
++config PVR_SUPPORT_HW_RECOVERY
++ bool "Support hardware recover in IMG kernel service"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxxx
++
++config PVR_SUPPORT_POWER_MANAGEMENT
++ bool "Support POWER_MANAGEMENT in IMG kernel service"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxxx
++
++config PVR_SECURE_HANDLES
++ bool "Support PVR_SECURE_HANDLES"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxxx
++
++config PVR_USE_PTHREADS
++ bool "Use pthreads in IMG service"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxx
++
++config PVR_SUPPORT_SGX_EVENT_OBJECT
++ bool "Support SGX event object"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxx
++
++config PVR_SUPPORT_SGX_HWPERF
++ bool "Support SGX HWPERF"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxxx
++
++config PVR_SUPPORT_SGX_LOW_LATENCY_SCHEDULING
++ bool "Support SGX LOW_LATENCY_SCHEDULING"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxxx
++
++config PVR_SUPPORT_LINUX_X86_PAT
++ bool "Support PAT in IMG kernel service"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxxx
++
++config PVR_PROC_USE_SEQ_FILE
++ bool "Support PVR_PROC_USE_SEQ_FILE"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxx
++
++config PVR_SUPPORT_SGX535
++ bool "SUPPORT_SGX535"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxx
++
++config PVR_SUPPORT_CACHEFLUSH_ON_ALLOC
++ bool "SUPPORT_CACHEFLUSH_ON_ALLOC"
++ depends on DRM_MRST
++ default n
++ help
++ xxxxxx
++
++config PVR_SUPPORT_MEMINFO_IDS
++ bool "SUPPORT_MEMINFO_IDS"
++ depends on DRM_MRST
++ default n
++ help
++ xxxxxx
++
++config PVR_SUPPORT_CACHE_LINE_FLUSH
++ bool "SUPPORT_CACHE_LINE_FLUSH"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxx
++
++config PVR_SUPPORT_CPU_CACHED_BUFFERS
++ bool "SUPPORT_CPU_CACHED_BUFFERS"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxx
++
++config PVR_DEBUG_MESA_OGL_TRACE
++ bool "DEBUG_MESA_OGL_TRACE"
++ depends on DRM_MRST
++ default y
++ help
++ xxxxxx
+diff --git a/drivers/gpu/drm/mrst/Makefile b/drivers/gpu/drm/mrst/Makefile
+new file mode 100644
+index 0000000..e23d8c3
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/Makefile
+@@ -0,0 +1,169 @@
++# Makefile for the drm device driver. This driver provides support for the
++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
++
++ccflags-y += -Idrivers/gpu/drm/mrst/pvr/include4 \
++ -Idrivers/gpu/drm/mrst/pvr/services4/include \
++ -Idrivers/gpu/drm/mrst/pvr/services4/include/env/linux \
++ -Idrivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux \
++ -Idrivers/gpu/drm/mrst/pvr/services4/srvkm/include \
++ -Idrivers/gpu/drm/mrst/pvr/services4/srvkm/bridged \
++ -Idrivers/gpu/drm/mrst/pvr/services4/system/moorestown \
++ -Idrivers/gpu/drm/mrst/pvr/services4/system/include \
++ -Idrivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs \
++ -Idrivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/sgx \
++ -Idrivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx \
++ -Idrivers/gpu/drm/mrst/drv \
++ -Idrivers/gpu/drm/mrst/drv/ttm \
++ -Iinclude/linux \
++ -Werror \
++ -DLINUX \
++ -DPVR_BUILD_DIR="\"pc_i686_moorestown_linux\"" \
++ -DSGX535
++
++#FIXME: whether we need the follow -D
++ccflags-$(CONFIG_PCI_MSI) += -DCONFIG_PCI_MSI
++ccflags-y += -DBUILD=$(CONFIG_PVR_RELEASE)
++ccflags-y += -DPVR_BUILD_TYPE="\"$(CONFIG_PVR_RELEASE)\""
++ifeq ($(CONFIG_PVR_RELEASE),"release")
++ ccflags-y += -DRELEASE
++else
++ ccflags-y += -DDEBUG
++endif
++ccflags-$(CONFIG_PVR_SERVICES4) += -DSERVICES4
++ccflags-y += -D_XOPEN_SOURCE=$(CONFIG_PVR_XOPEN_SOURCE)
++ccflags-$(CONFIG_PVR2D_VALIDATE_INPUT_PARAMS) += -DPVR2D_VALIDATE_INPUT_PARAMS
++ccflags-y += -DDISPLAY_CONTROLLER=$(CONFIG_PVR_DISPLAY_CONTROLLER)
++ccflags-y += -UDEBUG_LOG_PATH_TRUNCATE
++ccflags-$(CONFIG_PVR_SUPPORT_SVRINIT) += -DSUPPORT_SRVINIT
++ccflags-$(CONFIG_PVR_SUPPORT_SGX) += -DSUPPORT_SGX
++ccflags-$(CONFIG_PVR_SUPPORT_PERCONTEXT_PB) += -DSUPPORT_PERCONTEXT_PB
++ccflags-$(CONFIG_PVR_SUPPORT_LINUX_X86_WRITECOMBINE) += -DSUPPORT_LINUX_X86_WRITECOMBINE
++ccflags-$(CONFIG_PVR_TRANSFER_QUEUE) += -DTRANSFER_QUEUE
++ccflags-$(CONFIG_PVR_SUPPORT_DRI_DRM) += -DSUPPORT_DRI_DRM
++ccflags-$(CONFIG_PVR_SUPPORT_DRI_DRM) += -DSUPPORT_DRI_DRM_EXT
++ccflags-$(CONFIG_PVR_SYS_USING_INTERRUPTS) += -DSYS_USING_INTERRUPTS
++ccflags-$(CONFIG_PVR_SUPPORT_HW_RECOVERY) += -DSUPPORT_HW_RECOVERY
++ccflags-$(CONFIG_PVR_SUPPORT_POWER_MANAGEMENT) += -DSUPPORT_ACTIVE_POWER_MANAGEMENT
++ccflags-$(CONFIG_PVR_SECURE_HANDLES) += -DPVR_SECURE_HANDLES
++ccflags-$(CONFIG_PVR_USE_PTHREADS) += -DUSE_PTHREADS
++ccflags-$(CONFIG_PVR_SUPPORT_SGX_EVENT_OBJECT) += -DSUPPORT_SGX_EVENT_OBJECT
++ccflags-$(CONFIG_PVR_SUPPORT_SGX_HWPERF) += -DSUPPORT_SGX_HWPERF
++ccflags-$(CONFIG_PVR_SUPPORT_SGX_LOW_LATENCY_SCHEDULING) += -DSUPPORT_SGX_LOW_LATENCY_SCHEDULING
++ccflags-$(CONFIG_PVR_SUPPORT_LINUX_X86_PAT) += -DSUPPORT_LINUX_X86_PAT
++ccflags-$(CONFIG_PVR_PROC_USE_SEQ_FILE) += -DPVR_PROC_USE_SEQ_FILE
++ccflags-$(CONFIG_PVR_SUPPORT_SGX535) += -DSUPPORT_SGX535
++ccflags-y += -DSGX_CORE_REV=$(CONFIG_PVR_SGX_CORE_REV)
++ccflags-$(CONFIG_PVR_SUPPORT_CACHEFLUSH_ON_ALLOC) += -DSUPPORT_CACHEFLUSH_ON_ALLOC
++ccflags-$(CONFIG_PVR_SUPPORT_MEMINFO_IDS) += -DSUPPORT_MEMINFO_IDS
++ccflags-$(CONFIG_PVR_SUPPORT_CACHE_LINE_FLUSH) += -DSUPPORT_CACHE_LINE_FLUSH
++ccflags-$(CONFIG_PVR_SUPPORT_CPU_CACHED_BUFFERS) += -DSUPPORT_CPU_CACHED_BUFFERS
++ccflags-$(CONFIG_PVR_DEBUG_MESA_OGL_TRACE)+= -DDEBUG_MESA_OGL_TRACE
++
++ENVDIR = pvr/services4/srvkm/env/linux
++COMMONDIR = pvr/services4/srvkm/common
++BRIDGEDDIR = pvr/services4/srvkm/bridged
++SYSCONFIGDIR = pvr/services4/system/moorestown
++SGXDIR = pvr/services4/srvkm/devices/sgx
++FBDEVDIR = pvr/services4/3rdparty/linux_framebuffer_mrst
++DRMDRVDIR = drv
++
++ENV_OBJS = $(ENVDIR)/osfunc.o \
++ $(ENVDIR)/mutils.o \
++ $(ENVDIR)/mmap.o \
++ $(ENVDIR)/module.o \
++ $(ENVDIR)/pdump.o \
++ $(ENVDIR)/proc.o \
++ $(ENVDIR)/pvr_bridge_k.o \
++ $(ENVDIR)/pvr_debug.o \
++ $(ENVDIR)/mm.o \
++ $(ENVDIR)/mutex.o \
++ $(ENVDIR)/event.o \
++ $(ENVDIR)/osperproc.o \
++ $(ENVDIR)/pvr_drm.o
++
++COMMON_OBJS = $(COMMONDIR)/buffer_manager.o \
++ $(COMMONDIR)/devicemem.o \
++ $(COMMONDIR)/deviceclass.o \
++ $(COMMONDIR)/handle.o \
++ $(COMMONDIR)/hash.o \
++ $(COMMONDIR)/metrics.o \
++ $(COMMONDIR)/pvrsrv.o \
++ $(COMMONDIR)/queue.o \
++ $(COMMONDIR)/ra.o \
++ $(COMMONDIR)/resman.o \
++ $(COMMONDIR)/power.o \
++ $(COMMONDIR)/mem.o \
++ $(COMMONDIR)/pdump_common.o \
++ $(COMMONDIR)/perproc.o \
++ $(COMMONDIR)/lists.o \
++ $(COMMONDIR)/mem_debug.o
++
++BRIDGED_OBJS = $(BRIDGEDDIR)/bridged_support.o \
++ $(BRIDGEDDIR)/bridged_pvr_bridge.o \
++ $(BRIDGEDDIR)/sgx/bridged_sgx_bridge.o
++
++SYSCONFIG_OBJS = $(SYSCONFIGDIR)/sysconfig.o \
++ $(SYSCONFIGDIR)/sysutils.o \
++ $(SYSCONFIGDIR)/ospm_power.o \
++ $(SYSCONFIGDIR)/sysirq.o \
++ $(SYSCONFIGDIR)/sys_pvr_drm_export.o
++
++SGX_OBJS = $(SGXDIR)/sgxinit.o \
++ $(SGXDIR)/sgxpower.o \
++ $(SGXDIR)/sgxreset.o \
++ $(SGXDIR)/sgxutils.o \
++ $(SGXDIR)/sgxkick.o \
++ $(SGXDIR)/sgxtransfer.o \
++ $(SGXDIR)/mmu.o \
++ $(SGXDIR)/pb.o
++
++FB_OBJS = $(FBDEVDIR)/mrstlfb_displayclass.o \
++ $(FBDEVDIR)/mrstlfb_linux.o
++
++DRV_OBJS = $(DRMDRVDIR)/lnc_topaz.o \
++ $(DRMDRVDIR)/topaz_power.o \
++ $(DRMDRVDIR)/lnc_topazinit.o \
++ $(DRMDRVDIR)/psb_bl.o \
++ $(DRMDRVDIR)/psb_buffer.o \
++ $(DRMDRVDIR)/psb_dpst.o \
++ $(DRMDRVDIR)/psb_drv.o \
++ $(DRMDRVDIR)/psb_fb.o \
++ $(DRMDRVDIR)/psb_fence.o \
++ $(DRMDRVDIR)/psb_gtt.o \
++ $(DRMDRVDIR)/psb_hotplug.o \
++ $(DRMDRVDIR)/psb_intel_bios.o \
++ $(DRMDRVDIR)/psb_intel_display.o \
++ $(DRMDRVDIR)/psb_intel_i2c.o \
++ $(DRMDRVDIR)/psb_intel_lvds.o \
++ $(DRMDRVDIR)/psb_intel_modes.o \
++ $(DRMDRVDIR)/psb_intel_sdvo.o \
++ $(DRMDRVDIR)/psb_mmu.o \
++ $(DRMDRVDIR)/psb_msvdx.o \
++ $(DRMDRVDIR)/msvdx_power.o \
++ $(DRMDRVDIR)/psb_msvdxinit.o \
++ $(DRMDRVDIR)/psb_reset.o \
++ $(DRMDRVDIR)/psb_schedule.o \
++ $(DRMDRVDIR)/psb_sgx.o \
++ $(DRMDRVDIR)/psb_socket.o \
++ $(DRMDRVDIR)/psb_ttm_glue.o \
++ $(DRMDRVDIR)/psb_pvr_glue.o \
++ $(DRMDRVDIR)/psb_umevents.o \
++ $(DRMDRVDIR)/ttm/ttm_agp_backend.o \
++ $(DRMDRVDIR)/ttm/ttm_bo.o \
++ $(DRMDRVDIR)/ttm/ttm_bo_util.o \
++ $(DRMDRVDIR)/ttm/ttm_bo_vm.o \
++ $(DRMDRVDIR)/ttm/ttm_execbuf_util.o \
++ $(DRMDRVDIR)/ttm/ttm_fence.o \
++ $(DRMDRVDIR)/ttm/ttm_fence_user.o \
++ $(DRMDRVDIR)/ttm/ttm_lock.o \
++ $(DRMDRVDIR)/ttm/ttm_memory.o \
++ $(DRMDRVDIR)/ttm/ttm_object.o \
++ $(DRMDRVDIR)/ttm/ttm_pat_compat.o \
++ $(DRMDRVDIR)/ttm/ttm_placement_user.o \
++ $(DRMDRVDIR)/ttm/ttm_tt.o
++
++mrst-objs += $(ENV_OBJS) $(COMMON_OBJS) $(BRIDGED_OBJS) $(SYSCONFIG_OBJS) $(SGX_OBJS) $(FB_OBJS) $(DRV_OBJS)
++
++obj-$(CONFIG_DRM_MRST) += mrst.o
++obj-$(CONFIG_DRM_MRST_AAVA) += $(DRMDRVDIR)/psb_intel_dsi_aava.o
++obj-$(CONFIG_DRM_MRST_CDK) += $(DRMDRVDIR)/psb_intel_dsi.o
+diff --git a/drivers/gpu/drm/mrst/drv/lnc_topaz.c b/drivers/gpu/drm/mrst/drv/lnc_topaz.c
+new file mode 100644
+index 0000000..04c42f8
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/lnc_topaz.c
+@@ -0,0 +1,714 @@
++/**
++ * file lnc_topaz.c
++ * TOPAZ I/O operations and IRQ handling
++ *
++ */
++
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++/* include headers */
++/* #define DRM_DEBUG_CODE 2 */
++#include <drm/drmP.h>
++#include <drm/drm_os_linux.h>
++
++#include "psb_drv.h"
++#include "psb_drm.h"
++#include "lnc_topaz.h"
++#include "ospm_power.h"
++
++#include <linux/io.h>
++#include <linux/delay.h>
++
++#define TOPAZ_RM_MULTI_MTX_WRITE
++
++/* static function define */
++static int lnc_topaz_deliver_command(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset,
++ unsigned long cmd_size,
++ void **topaz_cmd, uint32_t sequence,
++ int copy_cmd);
++static int lnc_topaz_send(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size, uint32_t sync_seq);
++static int lnc_mtx_send(struct drm_psb_private *dev_priv, const void *cmd);
++static int lnc_topaz_dequeue_send(struct drm_device *dev);
++static int lnc_topaz_save_command(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size, uint32_t sequence);
++
++IMG_BOOL lnc_topaz_interrupt(IMG_VOID *pvData)
++{
++ struct drm_device *dev;
++ struct drm_psb_private *dev_priv;
++ uint32_t clr_flag;
++ struct topaz_private *topaz_priv;
++ uint32_t topaz_stat;
++ uint32_t cur_seq;
++
++ if (pvData == IMG_NULL) {
++ DRM_ERROR("ERROR: TOPAZ %s, Invalid params\n", __func__);
++ return IMG_FALSE;
++ }
++
++ if (!ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND)) {
++ DRM_ERROR("ERROR: interrupt arrived but HW is power off\n");
++ return IMG_FALSE;
++ }
++
++ dev = (struct drm_device *)pvData;
++ dev_priv = (struct drm_psb_private *) dev->dev_private;
++ topaz_priv = dev_priv->topaz_private;
++
++ topaz_priv->topaz_hw_busy = REG_READ(0x20D0) & (0x1 << 11);
++
++ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT, &topaz_stat);
++ clr_flag = lnc_topaz_queryirq(dev);
++
++ lnc_topaz_clearirq(dev, clr_flag);
++
++ /* ignore non-SYNC interrupts */
++ if ((CCB_CTRL_SEQ(dev_priv) & 0x8000) == 0)
++ return IMG_TRUE;
++
++ cur_seq = *(uint32_t *)topaz_priv->topaz_sync_addr;
++
++ PSB_DEBUG_IRQ("TOPAZ:Got SYNC IRQ,sync seq:0x%08x (MTX) vs 0x%08x\n",
++ cur_seq, dev_priv->sequence[LNC_ENGINE_ENCODE]);
++
++ psb_fence_handler(dev, LNC_ENGINE_ENCODE);
++
++ /* save frame skip flag for query */
++ topaz_priv->frame_skip = CCB_CTRL_FRAMESKIP(dev_priv);
++
++ topaz_priv->topaz_busy = 1;
++ lnc_topaz_dequeue_send(dev);
++
++ if (drm_topaz_pmpolicy != PSB_PMPOLICY_NOPM)
++ schedule_delayed_work(&dev_priv->scheduler.topaz_suspend_wq, 0);
++
++ return IMG_TRUE;
++}
++
++static int lnc_submit_encode_cmdbuf(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset, unsigned long cmd_size,
++ struct ttm_fence_object *fence)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ unsigned long irq_flags;
++ int ret = 0;
++ void *cmd;
++ uint32_t tmp;
++ uint32_t sequence = dev_priv->sequence[LNC_ENGINE_ENCODE];
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ uint32_t ui32_reg_value = 0;
++
++ PSB_DEBUG_GENERAL("TOPAZ: command submit\n");
++
++ PSB_DEBUG_GENERAL("TOPAZ: topaz busy = %d\n", topaz_priv->topaz_busy);
++
++ /* FIXME: workaround for HSD 3469585
++ * disable DRAM Self Refresh Mode
++ * by resetting DUNIT.DPMC0
++ */
++ ui32_reg_value = MSG_READ32(0x1, 0x4);
++ MSG_WRITE32(0x1, 0x4, (ui32_reg_value & (~(0x1 << 7))));
++
++ if (topaz_priv->topaz_fw_loaded == 0) {
++ /* #.# load fw to driver */
++ PSB_DEBUG_INIT("TOPAZ: load /lib/firmware/topaz_fw.bin\n");
++ ret = topaz_init_fw(dev);
++ if (ret != 0) {
++ /* FIXME: find a proper return value */
++ DRM_ERROR("TOPAX:load /lib/firmware/topaz_fw.bin fail,"
++ "ensure udevd is configured correctly!\n");
++
++ return -EFAULT;
++ }
++ topaz_priv->topaz_fw_loaded = 1;
++ }
++
++ tmp = atomic_cmpxchg(&dev_priv->topaz_mmu_invaldc, 1, 0);
++ if (tmp == 1)
++ topaz_mmu_flushcache(dev_priv);
++
++ /* # schedule watchdog */
++ /* psb_schedule_watchdog(dev_priv); */
++
++ /* # spin lock irq save [msvdx_lock] */
++ spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
++
++ /* # if topaz need to reset, reset it */
++ if (topaz_priv->topaz_needs_reset) {
++ /* #.# reset it */
++ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
++ PSB_DEBUG_GENERAL("TOPAZ: needs reset.\n");
++
++ if (lnc_topaz_reset(dev_priv)) {
++ ret = -EBUSY;
++ DRM_ERROR("TOPAZ: reset failed.\n");
++ return ret;
++ }
++
++ PSB_DEBUG_GENERAL("TOPAZ: reset ok.\n");
++
++ /* #.# upload firmware */
++ if (topaz_setup_fw(dev, topaz_priv->topaz_cur_codec)) {
++ DRM_ERROR("TOPAZ: upload FW to HW failed\n");
++ return -EBUSY;
++ }
++
++ spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
++ }
++
++ if (!topaz_priv->topaz_busy) {
++ /* # direct map topaz command if topaz is free */
++ PSB_DEBUG_GENERAL("TOPAZ:direct send command,sequence %08x \n",
++ sequence);
++
++ topaz_priv->topaz_busy = 1;
++ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
++
++ ret = lnc_topaz_deliver_command(dev, cmd_buffer, cmd_offset,
++ cmd_size, NULL, sequence, 0);
++
++ if (ret) {
++ DRM_ERROR("TOPAZ: failed to extract cmd...\n");
++ return ret;
++ }
++ } else {
++ PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence %08x \n",
++ sequence);
++ cmd = NULL;
++
++ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
++
++ ret = lnc_topaz_deliver_command(dev, cmd_buffer, cmd_offset,
++ cmd_size, &cmd, sequence, 1);
++ if (cmd == NULL || ret) {
++ DRM_ERROR("TOPAZ: map command for save fialed\n");
++ return ret;
++ }
++
++ ret = lnc_topaz_save_command(dev, cmd, cmd_size, sequence);
++ if (ret)
++ DRM_ERROR("TOPAZ: save command failed\n");
++ }
++
++ return ret;
++}
++
++static int lnc_topaz_save_command(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size, uint32_t sequence)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct lnc_topaz_cmd_queue *topaz_cmd;
++ unsigned long irq_flags;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ PSB_DEBUG_GENERAL("TOPAZ: queue command,sequence: %08x..\n",
++ sequence);
++
++ topaz_cmd = kzalloc(sizeof(struct lnc_topaz_cmd_queue),
++ GFP_KERNEL);
++ if (topaz_cmd == NULL) {
++ mutex_unlock(&topaz_priv->topaz_mutex);
++ DRM_ERROR("TOPAZ: out of memory....\n");
++ return -ENOMEM;
++ }
++
++ topaz_cmd->cmd = cmd;
++ topaz_cmd->cmd_size = cmd_size;
++ topaz_cmd->sequence = sequence;
++
++ spin_lock_irqsave(&topaz_priv->topaz_lock, irq_flags);
++ list_add_tail(&topaz_cmd->head, &topaz_priv->topaz_queue);
++ if (!topaz_priv->topaz_busy) {
++ /* topaz_priv->topaz_busy = 1; */
++ PSB_DEBUG_GENERAL("TOPAZ: need immediate dequeue...\n");
++ lnc_topaz_dequeue_send(dev);
++ PSB_DEBUG_GENERAL("TOPAZ: after dequeue command\n");
++ }
++
++ spin_unlock_irqrestore(&topaz_priv->topaz_lock, irq_flags);
++
++ return 0;
++}
++
++
++int lnc_cmdbuf_video(struct drm_file *priv,
++ struct list_head *validate_list,
++ uint32_t fence_type,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct psb_ttm_fence_rep *fence_arg)
++{
++ struct drm_device *dev = priv->minor->dev;
++ struct ttm_fence_object *fence = NULL;
++ int ret;
++
++ ret = lnc_submit_encode_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
++ arg->cmdbuf_size, fence);
++ if (ret)
++ return ret;
++
++ /* workaround for interrupt issue */
++ psb_fence_or_sync(priv, LNC_ENGINE_ENCODE, fence_type, arg->fence_flags,
++ validate_list, fence_arg, &fence);
++
++ if (fence)
++ ttm_fence_object_unref(&fence);
++
++ mutex_lock(&cmd_buffer->mutex);
++ if (cmd_buffer->sync_obj != NULL)
++ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
++ mutex_unlock(&cmd_buffer->mutex);
++
++ return 0;
++}
++
++static int lnc_topaz_sync(struct drm_device *dev, uint32_t sync_seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t sync_cmd[3];
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++#if 0
++ struct ttm_fence_device *fdev = &dev_priv->fdev;
++ struct ttm_fence_class_manager *fc =
++ &fdev->fence_class[LNC_ENGINE_ENCODE];
++ unsigned long irq_flags;
++#endif
++#if LNC_TOPAZ_NO_IRQ
++ uint32_t *sync_p = (uint32_t *)topaz_priv->topaz_sync_addr;
++ int count = 10000;
++ uint32_t cur_seq;
++#endif
++
++ /* insert a SYNC command here */
++ topaz_priv->topaz_sync_cmd_seq = (1 << 15) |
++ topaz_priv->topaz_cmd_seq++;
++ sync_cmd[0] = 1 | (MTX_CMDID_SYNC << 1) | (3 << 8) |
++ (topaz_priv->topaz_sync_cmd_seq << 16);
++ sync_cmd[1] = topaz_priv->topaz_sync_offset;
++ sync_cmd[2] = sync_seq;
++
++ PSB_DEBUG_GENERAL("TOPAZ:MTX_CMDID_SYNC: size(3),cmd seq (0x%04x),"
++ "sync_seq (0x%08x)\n",
++ topaz_priv->topaz_sync_cmd_seq, sync_seq);
++
++ if (drm_topaz_sbuswa)
++ TOPAZ_WAIT_UNTIL_IDLE;
++
++ lnc_mtx_send(dev_priv, sync_cmd);
++
++#if LNC_TOPAZ_NO_IRQ /* workaround for interrupt issue */
++ /* # poll topaz register for certain times */
++ while (count && *sync_p != sync_seq) {
++ DRM_UDELAY(100);
++ --count;
++ }
++ if ((count == 0) && (*sync_p != sync_seq)) {
++ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),actual 0x%08x\n",
++ sync_seq, *sync_p);
++ return -EBUSY;
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *sync_p);
++
++ topaz_priv->topaz_busy = 0;
++
++ /* XXX: check psb_fence_handler is suitable for topaz */
++ cur_seq = *sync_p;
++#if 0
++ write_lock_irqsave(&fc->lock, irq_flags);
++ ttm_fence_handler(fdev, LNC_ENGINE_ENCODE,
++ cur_seq,
++ _PSB_FENCE_TYPE_EXE, 0);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++#endif
++#endif
++ return 0;
++}
++
++int
++lnc_topaz_deliver_command(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset, unsigned long cmd_size,
++ void **topaz_cmd, uint32_t sequence,
++ int copy_cmd)
++{
++ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
++ struct ttm_bo_kmap_obj cmd_kmap;
++ bool is_iomem;
++ int ret;
++ unsigned char *cmd_start, *tmp;
++
++ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 2,
++ &cmd_kmap);
++ if (ret) {
++ DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", ret);
++ return ret;
++ }
++ cmd_start = (unsigned char *) ttm_kmap_obj_virtual(&cmd_kmap,
++ &is_iomem) + cmd_page_offset;
++
++ if (copy_cmd) {
++ PSB_DEBUG_GENERAL("TOPAZ: queue commands\n");
++ tmp = kzalloc(cmd_size, GFP_KERNEL);
++ if (tmp == NULL) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ memcpy(tmp, cmd_start, cmd_size);
++ *topaz_cmd = tmp;
++ } else {
++ PSB_DEBUG_GENERAL("TOPAZ: directly send the command\n");
++ ret = lnc_topaz_send(dev, cmd_start, cmd_size, sequence);
++ if (ret) {
++ DRM_ERROR("TOPAZ: commit commands failed.\n");
++ ret = -EINVAL;
++ }
++ }
++
++out:
++ PSB_DEBUG_GENERAL("TOPAZ:cmd_size(%ld), sequence(%d) copy_cmd(%d)\n",
++ cmd_size, sequence, copy_cmd);
++
++ ttm_bo_kunmap(&cmd_kmap);
++
++ return ret;
++}
++
++int
++lnc_topaz_send(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size, uint32_t sync_seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int ret = 0;
++ unsigned char *command = (unsigned char *) cmd;
++ struct topaz_cmd_header *cur_cmd_header;
++ uint32_t cur_cmd_size, cur_cmd_id;
++ uint32_t codec;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ PSB_DEBUG_GENERAL("TOPAZ: send the command in the buffer one by one\n");
++
++ while (cmd_size > 0) {
++ cur_cmd_header = (struct topaz_cmd_header *) command;
++ cur_cmd_size = cur_cmd_header->size * 4;
++ cur_cmd_id = cur_cmd_header->id;
++
++ switch (cur_cmd_id) {
++ case MTX_CMDID_SW_NEW_CODEC:
++ codec = *((uint32_t *) cmd + 1);
++
++ PSB_DEBUG_GENERAL("TOPAZ: setup new codec %s (%d)\n",
++ codec_to_string(codec), codec);
++ if (topaz_setup_fw(dev, codec)) {
++ DRM_ERROR("TOPAZ: upload FW to HW failed\n");
++ return -EBUSY;
++ }
++
++ topaz_priv->topaz_cur_codec = codec;
++ break;
++
++ case MTX_CMDID_SW_ENTER_LOWPOWER:
++ PSB_DEBUG_GENERAL("TOPAZ: enter lowpower.... \n");
++ PSB_DEBUG_GENERAL("XXX: implement it\n");
++ break;
++
++ case MTX_CMDID_SW_LEAVE_LOWPOWER:
++ PSB_DEBUG_GENERAL("TOPAZ: leave lowpower... \n");
++ PSB_DEBUG_GENERAL("XXX: implement it\n");
++ break;
++
++ /* ordinary commmand */
++ case MTX_CMDID_START_PIC:
++ /* XXX: specially handle START_PIC hw command */
++ CCB_CTRL_SET_QP(dev_priv,
++ *(command + cur_cmd_size - 4));
++ /* strip the QP parameter (it's software arg) */
++ cur_cmd_header->size--;
++ default:
++ cur_cmd_header->seq = 0x7fff &
++ topaz_priv->topaz_cmd_seq++;
++
++ PSB_DEBUG_GENERAL("TOPAZ: %s: size(%d),"
++ " seq (0x%04x)\n",
++ cmd_to_string(cur_cmd_id),
++ cur_cmd_size, cur_cmd_header->seq);
++
++ if (drm_topaz_sbuswa && cur_cmd_id != \
++ MTX_CMDID_START_PIC)
++ TOPAZ_WAIT_UNTIL_IDLE;
++
++ ret = lnc_mtx_send(dev_priv, command);
++ if (ret) {
++ DRM_ERROR("TOPAZ: error -- ret(%d)\n", ret);
++ goto out;
++ }
++ break;
++ }
++
++ command += cur_cmd_size;
++ cmd_size -= cur_cmd_size;
++ }
++ lnc_topaz_sync(dev, sync_seq);
++out:
++ return ret;
++}
++
++static int lnc_mtx_send(struct drm_psb_private *dev_priv, const void *cmd)
++{
++ struct topaz_cmd_header *cur_cmd_header =
++ (struct topaz_cmd_header *) cmd;
++ uint32_t cmd_size = cur_cmd_header->size;
++ uint32_t read_index, write_index;
++ const uint32_t *cmd_pointer = (uint32_t *) cmd;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ int ret = 0;
++
++ /* <msvdx does> # enable all clock */
++
++ write_index = topaz_priv->topaz_cmd_windex;
++ if (write_index + cmd_size + 1 > topaz_priv->topaz_ccb_size) {
++ int free_space = topaz_priv->topaz_ccb_size - write_index;
++
++ PSB_DEBUG_GENERAL("TOPAZ: -------will wrap CCB write point.\n");
++ if (free_space > 0) {
++ struct topaz_cmd_header pad_cmd;
++
++ pad_cmd.id = MTX_CMDID_NULL;
++ pad_cmd.size = free_space;
++ pad_cmd.seq = 0x7fff & topaz_priv->topaz_cmd_seq;
++
++ PSB_DEBUG_GENERAL("TOPAZ: MTX_CMDID_NULL:"
++ " size(%d),seq (0x%04x)\n",
++ pad_cmd.size, pad_cmd.seq);
++
++#ifndef TOPAZ_RM_MULTI_MTX_WRITE
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, pad_cmd.val);
++#else
++ topaz_write_mtx_mem(dev_priv,
++ topaz_priv->topaz_ccb_buffer_addr
++ + topaz_priv->topaz_cmd_windex * 4,
++ pad_cmd.val);
++ topaz_priv->topaz_cmd_windex++;
++#endif
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ POLL_WB_SEQ(dev_priv, pad_cmd.seq);
++ ++topaz_priv->topaz_cmd_seq;
++ }
++ POLL_WB_RINDEX(dev_priv, 0);
++ if (ret == 0)
++ topaz_priv->topaz_cmd_windex = 0;
++ else {
++ DRM_ERROR("TOPAZ: poll rindex timeout\n");
++ return ret; /* HW may hang, need reset */
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: -------wrap CCB was done.\n");
++ }
++
++ read_index = CCB_CTRL_RINDEX(dev_priv);/* temperily use CCB CTRL */
++ write_index = topaz_priv->topaz_cmd_windex;
++
++ PSB_DEBUG_GENERAL("TOPAZ: write index(%d), read index(%d,WB=%d)\n",
++ write_index, read_index, WB_CCB_CTRL_RINDEX(dev_priv));
++
++#ifndef TOPAZ_RM_MULTI_MTX_WRITE
++ TOPAZ_BEGIN_CCB(dev_priv);
++ while (cmd_size > 0) {
++ TOPAZ_OUT_CCB(dev_priv, *cmd_pointer++);
++ --cmd_size;
++ }
++#else
++ while (cmd_size > 0) {
++ topaz_write_mtx_mem(
++ dev_priv,
++ topaz_priv->topaz_ccb_buffer_addr
++ + topaz_priv->topaz_cmd_windex * 4,
++ *cmd_pointer++);
++ topaz_priv->topaz_cmd_windex++;
++ --cmd_size;
++ }
++#endif
++ TOPAZ_END_CCB(dev_priv, 1);
++
++#if 0
++ DRM_UDELAY(1000);
++ lnc_topaz_clearirq(dev,
++ lnc_topaz_queryirq(dev));
++ LNC_TRACEL("TOPAZ: after clear, query again\n");
++ lnc_topaz_queryirq(dev_priv);
++#endif
++
++ return ret;
++}
++
++int lnc_topaz_dequeue_send(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct lnc_topaz_cmd_queue *topaz_cmd = NULL;
++ int ret;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ PSB_DEBUG_GENERAL("TOPAZ: dequeue command and send it to topaz\n");
++
++ if (list_empty(&topaz_priv->topaz_queue)) {
++ topaz_priv->topaz_busy = 0;
++ return 0;
++ }
++
++ topaz_cmd = list_first_entry(&topaz_priv->topaz_queue,
++ struct lnc_topaz_cmd_queue, head);
++
++ PSB_DEBUG_GENERAL("TOPAZ: queue has id %08x\n", topaz_cmd->sequence);
++ ret = lnc_topaz_send(dev, topaz_cmd->cmd, topaz_cmd->cmd_size,
++ topaz_cmd->sequence);
++ if (ret) {
++ DRM_ERROR("TOPAZ: lnc_topaz_send failed.\n");
++ ret = -EINVAL;
++ }
++
++ list_del(&topaz_cmd->head);
++ kfree(topaz_cmd->cmd);
++ kfree(topaz_cmd
++ );
++
++ return ret;
++}
++
++void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t kick_count)
++{
++ PSB_DEBUG_GENERAL("TOPAZ: kick mtx count(%d).\n", kick_count);
++ MTX_WRITE32(MTX_CR_MTX_KICK, kick_count);
++}
++
++int lnc_check_topaz_idle(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ if (topaz_priv->topaz_fw_loaded == 0)
++ return 0;
++
++ if (topaz_priv->topaz_busy)
++ return -EBUSY;
++
++ if (topaz_priv->topaz_hw_busy) {
++ PSB_DEBUG_PM("TOPAZ: %s, HW is busy\n", __func__);
++ return -EBUSY;
++ }
++
++ return 0; /* we think it is idle */
++}
++
++int lnc_video_frameskip(struct drm_device *dev, uint64_t user_pointer)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ int ret;
++
++ ret = copy_to_user((void __user *) ((unsigned long)user_pointer),
++ &topaz_priv->frame_skip, sizeof(topaz_priv->frame_skip));
++
++ if (ret)
++ return -EFAULT;
++
++ return 0;
++}
++
++static void lnc_topaz_flush_cmd_queue(struct topaz_private *topaz_priv)
++{
++ struct lnc_topaz_cmd_queue *entry, *next;
++
++ /* remind to reset topaz */
++ topaz_priv->topaz_needs_reset = 1;
++
++ if (list_empty(&topaz_priv->topaz_queue)) {
++ topaz_priv->topaz_busy = 0;
++ return;
++ }
++
++ /* flush all command in queue */
++ list_for_each_entry_safe(entry, next,
++ &topaz_priv->topaz_queue,
++ head) {
++ list_del(&entry->head);
++ kfree(entry->cmd);
++ kfree(entry);
++ }
++
++ return;
++}
++
++void lnc_topaz_handle_timeout(struct ttm_fence_device *fdev)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(fdev, struct drm_psb_private, fdev);
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ lnc_topaz_flush_cmd_queue(topaz_priv);
++}
++
++inline int psb_try_power_down_topaz(struct drm_device *dev)
++{
++ ospm_apm_power_down_topaz(dev);
++ return 0;
++}
++
++void lnc_map_topaz_reg(struct drm_device *dev)
++{
++ unsigned long resource_start;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
++
++ if (IS_MRST(dev) && !dev_priv->topaz_disabled) {
++ dev_priv->topaz_reg =
++ ioremap(resource_start + LNC_TOPAZ_OFFSET,
++ LNC_TOPAZ_SIZE);
++ if (!dev_priv->topaz_reg)
++ DRM_ERROR("failed to map TOPAZ register address\n");
++ }
++
++ return;
++}
++
++void lnc_unmap_topaz_reg(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ if (IS_MRST(dev)) {
++ if (dev_priv->topaz_reg) {
++ iounmap(dev_priv->topaz_reg);
++ dev_priv->topaz_reg = NULL;
++ }
++ }
++
++ return;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/lnc_topaz.h b/drivers/gpu/drm/mrst/drv/lnc_topaz.h
+new file mode 100644
+index 0000000..7511c32
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/lnc_topaz.h
+@@ -0,0 +1,925 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _LNC_TOPAZ_H_
++#define _LNC_TOPAZ_H_
++
++#include "psb_drv.h"
++#include "img_types.h"
++
++#define LNC_TOPAZ_NO_IRQ 0
++#define TOPAZ_MTX_REG_SIZE (34 * 4 + 183 * 4)
++
++extern int drm_topaz_pmpolicy;
++
++/*
++ * MACROS to insert values into fields within a word. The basename of the
++ * field must have MASK_BASENAME and SHIFT_BASENAME constants.
++ */
++#define MM_WRITE32(base, offset, value) \
++do { \
++ *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg) \
++ + base + offset)) = value; \
++} while (0)
++
++#define MM_READ32(base, offset, pointer) \
++do { \
++ *(pointer) = *((unsigned long *)((unsigned char *)(dev_priv->topaz_reg)\
++ + base + offset)); \
++} while (0)
++
++#define F_MASK(basename) (MASK_##basename)
++#define F_SHIFT(basename) (SHIFT_##basename)
++
++#define F_ENCODE(val, basename) \
++ (((val) << (F_SHIFT(basename))) & (F_MASK(basename)))
++
++/* MVEA macro */
++#define MVEA_START 0x03000
++
++#define MVEA_WRITE32(offset, value) MM_WRITE32(MVEA_START, offset, value)
++#define MVEA_READ32(offset, pointer) MM_READ32(MVEA_START, offset, pointer);
++
++#define F_MASK_MVEA(basename) (MASK_MVEA_##basename) /* MVEA */
++#define F_SHIFT_MVEA(basename) (SHIFT_MVEA_##basename) /* MVEA */
++#define F_ENCODE_MVEA(val, basename) \
++ (((val)<<(F_SHIFT_MVEA(basename)))&(F_MASK_MVEA(basename)))
++
++/* VLC macro */
++#define TOPAZ_VLC_START 0x05000
++
++/* TOPAZ macro */
++#define TOPAZ_START 0x02000
++
++#define TOPAZ_WRITE32(offset, value) MM_WRITE32(TOPAZ_START, offset, value)
++#define TOPAZ_READ32(offset, pointer) MM_READ32(TOPAZ_START, offset, pointer)
++
++#define F_MASK_TOPAZ(basename) (MASK_TOPAZ_##basename)
++#define F_SHIFT_TOPAZ(basename) (SHIFT_TOPAZ_##basename)
++#define F_ENCODE_TOPAZ(val, basename) \
++ (((val)<<(F_SHIFT_TOPAZ(basename)))&(F_MASK_TOPAZ(basename)))
++
++/* MTX macro */
++#define MTX_START 0x0
++
++#define MTX_WRITE32(offset, value) MM_WRITE32(MTX_START, offset, value)
++#define MTX_READ32(offset, pointer) MM_READ32(MTX_START, offset, pointer)
++
++/* DMAC macro */
++#define DMAC_START 0x0f000
++
++#define DMAC_WRITE32(offset, value) MM_WRITE32(DMAC_START, offset, value)
++#define DMAC_READ32(offset, pointer) MM_READ32(DMAC_START, offset, pointer)
++
++#define F_MASK_DMAC(basename) (MASK_DMAC_##basename)
++#define F_SHIFT_DMAC(basename) (SHIFT_DMAC_##basename)
++#define F_ENCODE_DMAC(val, basename) \
++ (((val)<<(F_SHIFT_DMAC(basename)))&(F_MASK_DMAC(basename)))
++
++
++/* Register CR_IMG_TOPAZ_INTENAB */
++#define TOPAZ_CR_IMG_TOPAZ_INTENAB 0x0008
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x00000001
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA 0x0008
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x80000000
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 31
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MAS_INTEN 0x0008
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x00000008
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 3
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT 0x0008
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x00000002
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 1
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX 0x0008
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x00000004
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 2
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT 0x0008
++
++#define TOPAZ_CR_IMG_TOPAZ_INTCLEAR 0x000C
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x00000001
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA 0x000C
++
++#define TOPAZ_CR_IMG_TOPAZ_INTSTAT 0x0004
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x00000001
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MVEA 0x0004
++
++#define MTX_CCBCTRL_ROFF 0
++#define MTX_CCBCTRL_COMPLETE 4
++#define MTX_CCBCTRL_CCBSIZE 8
++#define MTX_CCBCTRL_QP 12
++#define MTX_CCBCTRL_FRAMESKIP 20
++#define MTX_CCBCTRL_INITQP 24
++
++#define TOPAZ_CR_MMU_STATUS 0x001C
++#define MASK_TOPAZ_CR_MMU_PF_N_RW 0x00000001
++#define SHIFT_TOPAZ_CR_MMU_PF_N_RW 0
++#define REGNUM_TOPAZ_CR_MMU_PF_N_RW 0x001C
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x00000008
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 3
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT 0x000C
++
++#define TOPAZ_CR_MMU_MEM_REQ 0x0020
++#define MASK_TOPAZ_CR_MEM_REQ_STAT_READS 0x000000FF
++#define SHIFT_TOPAZ_CR_MEM_REQ_STAT_READS 0
++#define REGNUM_TOPAZ_CR_MEM_REQ_STAT_READS 0x0020
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x00000002
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 1
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX 0x000C
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x00000004
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 2
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT 0x000C
++
++#define MTX_CR_MTX_KICK 0x0080
++#define MASK_MTX_MTX_KICK 0x0000FFFF
++#define SHIFT_MTX_MTX_KICK 0
++#define REGNUM_MTX_MTX_KICK 0x0080
++
++#define MTX_DATA_MEM_BASE 0x82880000
++
++#define MTX_CR_MTX_RAM_ACCESS_CONTROL 0x0108
++#define MASK_MTX_MTX_MCMR 0x00000001
++#define SHIFT_MTX_MTX_MCMR 0
++#define REGNUM_MTX_MTX_MCMR 0x0108
++
++#define MASK_MTX_MTX_MCMID 0x0FF00000
++#define SHIFT_MTX_MTX_MCMID 20
++#define REGNUM_MTX_MTX_MCMID 0x0108
++
++#define MASK_MTX_MTX_MCM_ADDR 0x000FFFFC
++#define SHIFT_MTX_MTX_MCM_ADDR 2
++#define REGNUM_MTX_MTX_MCM_ADDR 0x0108
++
++#define MTX_CR_MTX_RAM_ACCESS_STATUS 0x010C
++#define MASK_MTX_MTX_MTX_MCM_STAT 0x00000001
++#define SHIFT_MTX_MTX_MTX_MCM_STAT 0
++#define REGNUM_MTX_MTX_MTX_MCM_STAT 0x010C
++
++#define MASK_MTX_MTX_MCMAI 0x00000002
++#define SHIFT_MTX_MTX_MCMAI 1
++#define REGNUM_MTX_MTX_MCMAI 0x0108
++
++#define MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER 0x0104
++
++#define MVEA_CR_MVEA_BUSY 0x0018
++#define MVEA_CR_MVEA_DMACMDFIFO_WAIT 0x001C
++#define MVEA_CR_MVEA_DMACMDFIFO_STATUS 0x0020
++
++#define MVEA_CR_IMG_MVEA_SRST 0x0000
++#define MASK_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x00000001
++#define SHIFT_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0
++#define REGNUM_MVEA_CR_IMG_MVEA_SPE_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x00000002
++#define SHIFT_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 1
++#define REGNUM_MVEA_CR_IMG_MVEA_IPE_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x00000004
++#define SHIFT_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 2
++#define REGNUM_MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x00000008
++#define SHIFT_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 3
++#define REGNUM_MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x00000010
++#define SHIFT_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 4
++#define REGNUM_MVEA_CR_IMG_MVEA_CMC_SOFT_RESET 0x0000
++
++#define MASK_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x00000020
++#define SHIFT_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 5
++#define REGNUM_MVEA_CR_IMG_MVEA_DCF_SOFT_RESET 0x0000
++
++#define TOPAZ_CR_IMG_TOPAZ_CORE_ID 0x03C0
++#define TOPAZ_CR_IMG_TOPAZ_CORE_REV 0x03D0
++
++#define TOPAZ_MTX_PC (0x00000005)
++#define PC_START_ADDRESS (0x80900000)
++
++#define TOPAZ_CR_TOPAZ_AUTO_CLK_GATE 0x0014
++#define MASK_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x00000001
++#define SHIFT_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0
++#define REGNUM_TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE 0x0014
++
++#define MASK_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x00000002
++#define SHIFT_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 1
++#define REGNUM_TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE 0x0014
++
++#define MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x00000002
++#define SHIFT_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 1
++#define REGNUM_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE 0x0010
++
++#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET 0x000000F8
++#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET 0x000000FC
++#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK 0x00010000
++#define MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK 0x80000000
++
++#define TOPAZ_CORE_CR_MTX_DEBUG_OFFSET 0x0000003C
++
++#define MASK_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x00000004
++#define SHIFT_TOPAZ_CR_MTX_DBG_IS_SLAVE 2
++#define REGNUM_TOPAZ_CR_MTX_DBG_IS_SLAVE 0x003C
++
++#define MASK_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x00000018
++#define SHIFT_TOPAZ_CR_MTX_DBG_GPIO_OUT 3
++#define REGNUM_TOPAZ_CR_MTX_DBG_GPIO_OUT 0x003C
++
++#define MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET 0x00000108
++
++#define TOPAZ_CR_MMU_CONTROL0 0x0024
++#define MASK_TOPAZ_CR_MMU_BYPASS 0x00000800
++#define SHIFT_TOPAZ_CR_MMU_BYPASS 11
++#define REGNUM_TOPAZ_CR_MMU_BYPASS 0x0024
++
++#define TOPAZ_CR_MMU_DIR_LIST_BASE(X) (0x0030 + (4 * (X)))
++#define MASK_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0xFFFFF000
++#define SHIFT_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 12
++#define REGNUM_TOPAZ_CR_MMU_DIR_LIST_BASE_ADDR 0x0030
++
++#define MASK_TOPAZ_CR_MMU_INVALDC 0x00000008
++#define SHIFT_TOPAZ_CR_MMU_INVALDC 3
++#define REGNUM_TOPAZ_CR_MMU_INVALDC 0x0024
++
++#define MASK_TOPAZ_CR_MMU_FLUSH 0x00000004
++#define SHIFT_TOPAZ_CR_MMU_FLUSH 2
++#define REGNUM_TOPAZ_CR_MMU_FLUSH 0x0024
++
++#define TOPAZ_CR_MMU_BANK_INDEX 0x0038
++#define MASK_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (0x00000003 << (8 + ((i) * 2)))
++#define SHIFT_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) (8 + ((i) * 2))
++#define REGNUM_TOPAZ_CR_MMU_BANK_N_INDEX_M(i) 0x0038
++
++#define TOPAZ_CR_TOPAZ_MAN_CLK_GATE 0x0010
++#define MASK_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0x00000001
++#define SHIFT_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0
++#define REGNUM_TOPAZ_CR_TOPAZ_MVEA_MAN_CLK_GATE 0x0010
++
++#define MTX_CORE_CR_MTX_TXRPT_OFFSET 0x0000000c
++#define TXRPT_WAITONKICK_VALUE 0x8ade0000
++
++#define MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK 0x00000002
++
++#define MTX_CORE_CR_MTX_ENABLE_OFFSET 0x00000000
++#define MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK 0x00000001
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x00000002
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 1
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_INTS_MTX 0x0004
++
++#define MTX_CORE_CR_MTX_SOFT_RESET_OFFSET 0x00000200
++#define MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK 0x00000001
++
++#define MTX_CR_MTX_SYSC_CDMAA 0x0344
++#define MASK_MTX_CDMAA_ADDRESS 0x03FFFFFC
++#define SHIFT_MTX_CDMAA_ADDRESS 2
++#define REGNUM_MTX_CDMAA_ADDRESS 0x0344
++
++#define MTX_CR_MTX_SYSC_CDMAC 0x0340
++#define MASK_MTX_LENGTH 0x0000FFFF
++#define SHIFT_MTX_LENGTH 0
++#define REGNUM_MTX_LENGTH 0x0340
++
++#define MASK_MTX_BURSTSIZE 0x07000000
++#define SHIFT_MTX_BURSTSIZE 24
++#define REGNUM_MTX_BURSTSIZE 0x0340
++
++#define MASK_MTX_RNW 0x00020000
++#define SHIFT_MTX_RNW 17
++#define REGNUM_MTX_RNW 0x0340
++
++#define MASK_MTX_ENABLE 0x00010000
++#define SHIFT_MTX_ENABLE 16
++#define REGNUM_MTX_ENABLE 0x0340
++
++#define MASK_MTX_LENGTH 0x0000FFFF
++#define SHIFT_MTX_LENGTH 0
++#define REGNUM_MTX_LENGTH 0x0340
++
++#define TOPAZ_CR_IMG_TOPAZ_SRST 0x0000
++#define MASK_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x00000001
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET 0x0000
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x00000008
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 3
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET 0x0000
++
++#define MASK_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x00000002
++#define SHIFT_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 1
++#define REGNUM_TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET 0x0000
++
++#define MVEA_CR_MVEA_AUTO_CLOCK_GATING 0x0024
++#define MASK_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x00000001
++#define SHIFT_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0
++#define REGNUM_MVEA_CR_MVEA_SPE_AUTO_CLK_GATE 0x0024
++
++#define MASK_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x00000002
++#define SHIFT_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 1
++#define REGNUM_MVEA_CR_MVEA_IPE_AUTO_CLK_GATE 0x0024
++
++#define MASK_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x00000004
++#define SHIFT_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 2
++#define REGNUM_MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE 0x0024
++
++#define MASK_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x00000008
++#define SHIFT_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 3
++#define REGNUM_MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE 0x0024
++
++#define TOPAZ_CR_IMG_TOPAZ_DMAC_MODE 0x0040
++#define MASK_TOPAZ_CR_DMAC_MASTER_MODE 0x00000001
++#define SHIFT_TOPAZ_CR_DMAC_MASTER_MODE 0
++#define REGNUM_TOPAZ_CR_DMAC_MASTER_MODE 0x0040
++
++#define MTX_CR_MTX_SYSC_CDMAT 0x0350
++#define MASK_MTX_TRANSFERDATA 0xFFFFFFFF
++#define SHIFT_MTX_TRANSFERDATA 0
++#define REGNUM_MTX_TRANSFERDATA 0x0350
++
++#define IMG_SOC_DMAC_IRQ_STAT(X) (0x000C + (32 * (X)))
++#define MASK_IMG_SOC_TRANSFER_FIN 0x00020000
++#define SHIFT_IMG_SOC_TRANSFER_FIN 17
++#define REGNUM_IMG_SOC_TRANSFER_FIN 0x000C
++
++#define IMG_SOC_DMAC_COUNT(X) (0x0004 + (32 * (X)))
++#define MASK_IMG_SOC_CNT 0x0000FFFF
++#define SHIFT_IMG_SOC_CNT 0
++#define REGNUM_IMG_SOC_CNT 0x0004
++
++#define MASK_IMG_SOC_EN 0x00010000
++#define SHIFT_IMG_SOC_EN 16
++#define REGNUM_IMG_SOC_EN 0x0004
++
++#define MASK_IMG_SOC_LIST_EN 0x00040000
++#define SHIFT_IMG_SOC_LIST_EN 18
++#define REGNUM_IMG_SOC_LIST_EN 0x0004
++
++#define IMG_SOC_DMAC_PER_HOLD(X) (0x0018 + (32 * (X)))
++#define MASK_IMG_SOC_PER_HOLD 0x0000007F
++#define SHIFT_IMG_SOC_PER_HOLD 0
++#define REGNUM_IMG_SOC_PER_HOLD 0x0018
++
++#define IMG_SOC_DMAC_SETUP(X) (0x0000 + (32 * (X)))
++#define MASK_IMG_SOC_START_ADDRESS 0xFFFFFFF
++#define SHIFT_IMG_SOC_START_ADDRESS 0
++#define REGNUM_IMG_SOC_START_ADDRESS 0x0000
++
++#define MASK_IMG_SOC_BSWAP 0x40000000
++#define SHIFT_IMG_SOC_BSWAP 30
++#define REGNUM_IMG_SOC_BSWAP 0x0004
++
++#define MASK_IMG_SOC_PW 0x18000000
++#define SHIFT_IMG_SOC_PW 27
++#define REGNUM_IMG_SOC_PW 0x0004
++
++#define MASK_IMG_SOC_DIR 0x04000000
++#define SHIFT_IMG_SOC_DIR 26
++#define REGNUM_IMG_SOC_DIR 0x0004
++
++#define MASK_IMG_SOC_PI 0x03000000
++#define SHIFT_IMG_SOC_PI 24
++#define REGNUM_IMG_SOC_PI 0x0004
++#define IMG_SOC_PI_1 0x00000002
++#define IMG_SOC_PI_2 0x00000001
++#define IMG_SOC_PI_4 0x00000000
++
++#define MASK_IMG_SOC_TRANSFER_IEN 0x20000000
++#define SHIFT_IMG_SOC_TRANSFER_IEN 29
++#define REGNUM_IMG_SOC_TRANSFER_IEN 0x0004
++
++#define DMAC_VALUE_COUNT(BSWAP, PW, DIR, PERIPH_INCR, COUNT) \
++ ((((BSWAP) << SHIFT_IMG_SOC_BSWAP) & MASK_IMG_SOC_BSWAP)| \
++ (((PW) << SHIFT_IMG_SOC_PW) & MASK_IMG_SOC_PW)| \
++ (((DIR) << SHIFT_IMG_SOC_DIR) & MASK_IMG_SOC_DIR)| \
++ (((PERIPH_INCR) << SHIFT_IMG_SOC_PI) & MASK_IMG_SOC_PI)| \
++ (((COUNT) << SHIFT_IMG_SOC_CNT) & MASK_IMG_SOC_CNT))
++
++#define IMG_SOC_DMAC_PERIPH(X) (0x0008 + (32 * (X)))
++#define MASK_IMG_SOC_EXT_SA 0x0000000F
++#define SHIFT_IMG_SOC_EXT_SA 0
++#define REGNUM_IMG_SOC_EXT_SA 0x0008
++
++#define MASK_IMG_SOC_ACC_DEL 0xE0000000
++#define SHIFT_IMG_SOC_ACC_DEL 29
++#define REGNUM_IMG_SOC_ACC_DEL 0x0008
++
++#define MASK_IMG_SOC_INCR 0x08000000
++#define SHIFT_IMG_SOC_INCR 27
++#define REGNUM_IMG_SOC_INCR 0x0008
++
++#define MASK_IMG_SOC_BURST 0x07000000
++#define SHIFT_IMG_SOC_BURST 24
++#define REGNUM_IMG_SOC_BURST 0x0008
++
++#define DMAC_VALUE_PERIPH_PARAM(ACC_DEL, INCR, BURST) \
++((((ACC_DEL) << SHIFT_IMG_SOC_ACC_DEL) & MASK_IMG_SOC_ACC_DEL)| \
++(((INCR) << SHIFT_IMG_SOC_INCR) & MASK_IMG_SOC_INCR)| \
++(((BURST) << SHIFT_IMG_SOC_BURST) & MASK_IMG_SOC_BURST))
++
++#define IMG_SOC_DMAC_PERIPHERAL_ADDR(X) (0x0014 + (32 * (X)))
++#define MASK_IMG_SOC_ADDR 0x007FFFFF
++#define SHIFT_IMG_SOC_ADDR 0
++#define REGNUM_IMG_SOC_ADDR 0x0014
++
++#define SHIFT_TOPAZ_VEC_BUSY 11
++#define MASK_TOPAZ_VEC_BUSY (0x1<<SHIFT_TOPAZ_VEC_BUSY)
++
++#define TOPAZ_MTX_TXRPT_OFFSET 0xc
++#define TOPAZ_GUNIT_GVD_PSMI_GFX_OFFSET 0x20D0
++
++#define TOPAZ_GUNIT_READ32(offset) ioread32(dev_priv->vdc_reg + offset)
++#define TOPAZ_READ_BITS(val, basename) \
++ (((val)&MASK_TOPAZ_##basename)>>SHIFT_TOPAZ_##basename)
++
++#define TOPAZ_WAIT_UNTIL_IDLE \
++ do { \
++ uint8_t tmp_poll_number = 0;\
++ uint32_t tmp_reg; \
++ if (topaz_priv->topaz_cmd_windex == WB_CCB_CTRL_RINDEX(dev_priv)) { \
++ tmp_reg = TOPAZ_GUNIT_READ32(TOPAZ_GUNIT_GVD_PSMI_GFX_OFFSET);\
++ if (0 != TOPAZ_READ_BITS(tmp_reg, VEC_BUSY)) { \
++ MTX_READ32(TOPAZ_MTX_TXRPT_OFFSET, &tmp_reg);\
++ while ((tmp_reg != 0x8ade0000) && \
++ (tmp_poll_number++ < 10)) \
++ MTX_READ32(0xc, &tmp_reg); \
++ PSB_DEBUG_GENERAL( \
++ "TOPAZ: TXRPT reg remain: %x,poll %d times.\n",\
++ tmp_reg, tmp_poll_number);\
++ } \
++ } \
++ } while (0)
++
++/* **************** DMAC define **************** */
++enum DMAC_eBSwap {
++ DMAC_BSWAP_NO_SWAP = 0x0,/* !< No byte swapping will be performed. */
++ DMAC_BSWAP_REVERSE = 0x1,/* !< Byte order will be reversed. */
++};
++
++enum DMAC_ePW {
++ DMAC_PWIDTH_32_BIT = 0x0,/* !< Peripheral width 32-bit. */
++ DMAC_PWIDTH_16_BIT = 0x1,/* !< Peripheral width 16-bit. */
++ DMAC_PWIDTH_8_BIT = 0x2,/* !< Peripheral width 8-bit. */
++};
++
++enum DMAC_eAccDel {
++ DMAC_ACC_DEL_0 = 0x0, /* !< Access delay zero clock cycles */
++ DMAC_ACC_DEL_256 = 0x1, /* !< Access delay 256 clock cycles */
++ DMAC_ACC_DEL_512 = 0x2, /* !< Access delay 512 clock cycles */
++ DMAC_ACC_DEL_768 = 0x3, /* !< Access delay 768 clock cycles */
++ DMAC_ACC_DEL_1024 = 0x4,/* !< Access delay 1024 clock cycles */
++ DMAC_ACC_DEL_1280 = 0x5,/* !< Access delay 1280 clock cycles */
++ DMAC_ACC_DEL_1536 = 0x6,/* !< Access delay 1536 clock cycles */
++ DMAC_ACC_DEL_1792 = 0x7,/* !< Access delay 1792 clock cycles */
++};
++
++enum DMAC_eBurst {
++ DMAC_BURST_0 = 0x0, /* !< burst size of 0 */
++ DMAC_BURST_1 = 0x1, /* !< burst size of 1 */
++ DMAC_BURST_2 = 0x2, /* !< burst size of 2 */
++ DMAC_BURST_3 = 0x3, /* !< burst size of 3 */
++ DMAC_BURST_4 = 0x4, /* !< burst size of 4 */
++ DMAC_BURST_5 = 0x5, /* !< burst size of 5 */
++ DMAC_BURST_6 = 0x6, /* !< burst size of 6 */
++ DMAC_BURST_7 = 0x7, /* !< burst size of 7 */
++};
++
++/* commands for topaz,shared with user space driver */
++enum drm_lnc_topaz_cmd {
++ MTX_CMDID_NULL = 0,
++ MTX_CMDID_DO_HEADER = 1,
++ MTX_CMDID_ENCODE_SLICE = 2,
++ MTX_CMDID_WRITEREG = 3,
++ MTX_CMDID_START_PIC = 4,
++ MTX_CMDID_END_PIC = 5,
++ MTX_CMDID_SYNC = 6,
++ MTX_CMDID_ENCODE_ONE_ROW = 7,
++ MTX_CMDID_FLUSH = 8,
++ MTX_CMDID_SW_LEAVE_LOWPOWER = 0x7c,
++ MTX_CMDID_SW_ENTER_LOWPOWER = 0x7e,
++ MTX_CMDID_SW_NEW_CODEC = 0x7f
++};
++
++/* codecs topaz supports,shared with user space driver */
++enum drm_lnc_topaz_codec {
++ IMG_CODEC_JPEG = 0,
++ IMG_CODEC_H264_NO_RC,
++ IMG_CODEC_H264_VBR,
++ IMG_CODEC_H264_CBR,
++ IMG_CODEC_H263_NO_RC,
++ IMG_CODEC_H263_VBR,
++ IMG_CODEC_H263_CBR,
++ IMG_CODEC_MPEG4_NO_RC,
++ IMG_CODEC_MPEG4_VBR,
++ IMG_CODEC_MPEG4_CBR,
++ IMG_CODEC_NUM
++};
++
++/* XXX: it's a copy of msvdx cmd queue. should have some change? */
++struct lnc_topaz_cmd_queue {
++ struct list_head head;
++ void *cmd;
++ unsigned long cmd_size;
++ uint32_t sequence;
++};
++
++
++struct topaz_cmd_header {
++ union {
++ struct {
++ unsigned long enable_interrupt:1;
++ unsigned long id:7;
++ unsigned long size:8;
++ unsigned long seq:16;
++ };
++ uint32_t val;
++ };
++};
++
++/* define structure */
++/* firmware file's info head */
++struct topaz_fwinfo {
++ unsigned int ver:16;
++ unsigned int codec:16;
++
++ unsigned int text_size;
++ unsigned int data_size;
++ unsigned int data_location;
++};
++
++/* firmware data array define */
++struct topaz_codec_fw {
++ uint32_t ver;
++ uint32_t codec;
++
++ uint32_t text_size;
++ uint32_t data_size;
++ uint32_t data_location;
++
++ struct ttm_buffer_object *text;
++ struct ttm_buffer_object *data;
++};
++
++struct topaz_private {
++ /* current video task */
++ unsigned int pmstate;
++ struct sysfs_dirent *sysfs_pmstate;
++ int frame_skip;
++
++ void *topaz_mtx_reg_state;
++ struct ttm_buffer_object *topaz_mtx_data_mem;
++ uint32_t topaz_cur_codec;
++ uint32_t cur_mtx_data_size;
++ int topaz_needs_reset;
++
++ /*
++ *topaz command queue
++ */
++ spinlock_t topaz_lock;
++ struct mutex topaz_mutex;
++ struct list_head topaz_queue;
++ int topaz_busy; /* 0 means topaz is free */
++ int topaz_fw_loaded;
++
++ /* topaz ccb data */
++ /* XXX: should the addr stored by 32 bits? more compatible way?? */
++ uint32_t topaz_ccb_buffer_addr;
++ uint32_t topaz_ccb_ctrl_addr;
++ uint32_t topaz_ccb_size;
++ uint32_t topaz_cmd_windex;
++ uint16_t topaz_cmd_seq;
++
++ uint32_t stored_initial_qp;
++ uint32_t topaz_dash_access_ctrl;
++
++ struct ttm_buffer_object *topaz_bo; /* 4K->2K/2K for writeback/sync */
++ struct ttm_bo_kmap_obj topaz_bo_kmap;
++ void *topaz_ccb_wb;
++ uint32_t topaz_wb_offset;
++ uint32_t *topaz_sync_addr;
++ uint32_t topaz_sync_offset;
++ uint32_t topaz_sync_cmd_seq;
++ uint32_t topaz_mtx_saved;
++
++ /* firmware */
++ struct topaz_codec_fw topaz_fw[IMG_CODEC_NUM];
++
++ uint32_t topaz_hw_busy;
++};
++
++/* external function declare */
++/* lnc_topazinit.c */
++int lnc_topaz_init(struct drm_device *dev);
++int lnc_topaz_uninit(struct drm_device *dev);
++int lnc_topaz_reset(struct drm_psb_private *dev_priv);
++int topaz_init_fw(struct drm_device *dev);
++int topaz_setup_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec);
++int topaz_wait_for_register(struct drm_psb_private *dev_priv,
++ uint32_t addr, uint32_t value,
++ uint32_t enable);
++void topaz_write_mtx_mem(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr, uint32_t val);
++uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr);
++void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv,
++ uint32_t addr);
++void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv,
++ uint32_t val);
++void topaz_mmu_flushcache(struct drm_psb_private *dev_priv);
++int lnc_topaz_save_mtx_state(struct drm_device *dev);
++int lnc_topaz_restore_mtx_state(struct drm_device *dev);
++
++/* lnc_topaz.c */
++IMG_BOOL lnc_topaz_interrupt(IMG_VOID *pvData);
++
++int lnc_cmdbuf_video(struct drm_file *priv,
++ struct list_head *validate_list,
++ uint32_t fence_type,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct psb_ttm_fence_rep *fence_arg);
++
++void topaz_mtx_kick(struct drm_psb_private *dev_priv, uint32_t kick_cout);
++void lnc_topaz_handle_timeout(struct ttm_fence_device *fdev);
++
++uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
++int lnc_wait_topaz_idle(struct drm_device *dev);
++int lnc_check_topaz_idle(struct drm_device *dev);
++void lnc_unmap_topaz_reg(struct drm_device *dev);
++void lnc_map_topaz_reg(struct drm_device *dev);
++
++/* macros to get/set CCB control data */
++#define WB_CCB_CTRL_RINDEX(dev_priv) \
++(*((uint32_t *)((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_wb))
++
++#define WB_CCB_CTRL_SEQ(dev_priv) \
++(*((uint32_t *)((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_wb\
++ + 1))
++
++#define POLL_WB_RINDEX(dev_priv, value) \
++do { \
++ int i; \
++ for (i = 0; i < 10000; i++) { \
++ if (WB_CCB_CTRL_RINDEX(dev_priv) == value) \
++ break; \
++ else \
++ DRM_UDELAY(100); \
++ } \
++ if (WB_CCB_CTRL_RINDEX(dev_priv) != value) { \
++ DRM_ERROR("TOPAZ: poll rindex timeout\n"); \
++ ret = -EBUSY; \
++ } \
++} while (0)
++
++#define POLL_WB_SEQ(dev_priv, value) \
++do { \
++ int i; \
++ for (i = 0; i < 10000; i++) { \
++ if (CCB_CTRL_SEQ(dev_priv) == value) \
++ break; \
++ else \
++ DRM_UDELAY(1000); \
++ } \
++ if (CCB_CTRL_SEQ(dev_priv) != value) { \
++ DRM_ERROR("TOPAZ:poll mtxseq timeout,0x%08x(mtx) vs 0x%08x\n",\
++ WB_CCB_CTRL_SEQ(dev_priv), value); \
++ ret = -EBUSY; \
++ } \
++} while (0)
++
++#define CCB_CTRL_RINDEX(dev_priv) \
++ topaz_read_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_ROFF)
++
++#define CCB_CTRL_RINDEX(dev_priv) \
++ topaz_read_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_ROFF)
++
++#define CCB_CTRL_QP(dev_priv) \
++ topaz_read_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_QP)
++
++#define CCB_CTRL_SEQ(dev_priv) \
++ topaz_read_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_COMPLETE)
++
++#define CCB_CTRL_FRAMESKIP(dev_priv) \
++ topaz_read_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_FRAMESKIP)
++
++#define CCB_CTRL_SET_QP(dev_priv, qp) \
++ topaz_write_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_QP, qp)
++
++#define CCB_CTRL_SET_INITIALQP(dev_priv, qp) \
++ topaz_write_mtx_mem(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_ctrl_addr \
++ + MTX_CCBCTRL_INITQP, qp)
++
++
++#define TOPAZ_BEGIN_CCB(dev_priv) \
++ topaz_write_mtx_mem_multiple_setup(dev_priv, \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_ccb_buffer_addr + \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_cmd_windex * 4)
++
++#define TOPAZ_OUT_CCB(dev_priv, cmd) \
++do { \
++ topaz_write_mtx_mem_multiple(dev_priv, cmd); \
++ ((struct topaz_private *)dev_priv->topaz_private)->topaz_cmd_windex++; \
++} while (0)
++
++#define TOPAZ_END_CCB(dev_priv, kick_count) \
++ topaz_mtx_kick(dev_priv, 1);
++
++static inline char *cmd_to_string(int cmd_id)
++{
++ switch (cmd_id) {
++ case MTX_CMDID_START_PIC:
++ return "MTX_CMDID_START_PIC";
++ case MTX_CMDID_END_PIC:
++ return "MTX_CMDID_END_PIC";
++ case MTX_CMDID_DO_HEADER:
++ return "MTX_CMDID_DO_HEADER";
++ case MTX_CMDID_ENCODE_SLICE:
++ return "MTX_CMDID_ENCODE_SLICE";
++ case MTX_CMDID_SYNC:
++ return "MTX_CMDID_SYNC";
++
++ default:
++ return "Undefined command";
++
++ }
++}
++
++static inline char *codec_to_string(int codec)
++{
++ switch (codec) {
++ case IMG_CODEC_H264_NO_RC:
++ return "H264_NO_RC";
++ case IMG_CODEC_H264_VBR:
++ return "H264_VBR";
++ case IMG_CODEC_H264_CBR:
++ return "H264_CBR";
++ case IMG_CODEC_H263_NO_RC:
++ return "H263_NO_RC";
++ case IMG_CODEC_H263_VBR:
++ return "H263_VBR";
++ case IMG_CODEC_H263_CBR:
++ return "H263_CBR";
++ case IMG_CODEC_MPEG4_NO_RC:
++ return "MPEG4_NO_RC";
++ case IMG_CODEC_MPEG4_VBR:
++ return "MPEG4_VBR";
++ case IMG_CODEC_MPEG4_CBR:
++ return "MPEG4_CBR";
++ default:
++ return "Undefined codec";
++ }
++}
++
++
++static inline void lnc_topaz_enableirq(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ /* uint32_t ier = dev_priv->vdc_irq_mask | _LNC_IRQ_TOPAZ_FLAG; */
++
++ PSB_DEBUG_IRQ("TOPAZ: enable IRQ\n");
++
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MAS_INTEN) |
++ /* F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MVEA) | */
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MMU_FAULT) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTEN_MTX_HALT));
++
++ /* write in sysirq.c */
++ /* PSB_WVDC32(ier, PSB_INT_ENABLE_R); /\* essential *\/ */
++}
++
++static inline void lnc_topaz_disableirq(struct drm_device *dev)
++{
++
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ /* uint32_t ier = dev_priv->vdc_irq_mask & (~_LNC_IRQ_TOPAZ_FLAG); */
++
++ PSB_DEBUG_INIT("TOPAZ: disable IRQ\n");
++
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTENAB, 0);
++
++ /* write in sysirq.c */
++ /* PSB_WVDC32(ier, PSB_INT_ENABLE_R); /\* essential *\/ */
++}
++
++
++static inline void lnc_topaz_clearirq(struct drm_device *dev,
++ uint32_t clear_topaz)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ PSB_DEBUG_INIT("TOPAZ: clear IRQ\n");
++ if (clear_topaz != 0)
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR, clear_topaz);
++
++ /* PSB_WVDC32(_LNC_IRQ_TOPAZ_FLAG, PSB_INT_IDENTITY_R); */
++}
++
++static inline uint32_t lnc_topaz_queryirq(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t val, /* iir, */ clear = 0;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_INTSTAT, &val);
++ /* iir = PSB_RVDC32(PSB_INT_IDENTITY_R); */
++
++ (void) topaz_priv;
++
++ if ((val == 0) /* && (iir == 0) */) {/* no interrupt */
++ PSB_DEBUG_GENERAL("TOPAZ: no interrupt,IIR=TOPAZ_INTSTAT=0\n");
++ return 0;
++ }
++
++ PSB_DEBUG_IRQ("TOPAZ:TOPAZ_INTSTAT=0x%08x\n", val);
++
++ if (val & (1<<31))
++ PSB_DEBUG_IRQ("TOPAZ:IRQ pin activated,cmd seq=0x%04x,"
++ "sync seq: 0x%08x vs 0x%08x (MTX)\n",
++ CCB_CTRL_SEQ(dev_priv),
++ dev_priv->sequence[LNC_ENGINE_ENCODE],
++ *(uint32_t *)topaz_priv->topaz_sync_addr);
++ else
++ PSB_DEBUG_IRQ("TOPAZ:IRQ pin not activated,cmd seq=0x%04x,"
++ "sync seq: 0x%08x vs 0x%08x (MTX)\n",
++ CCB_CTRL_SEQ(dev_priv),
++ dev_priv->sequence[LNC_ENGINE_ENCODE],
++ *(uint32_t *)topaz_priv->topaz_sync_addr);
++
++ if (val & 0x8) {
++ uint32_t mmu_status, mmu_req;
++
++ TOPAZ_READ32(TOPAZ_CR_MMU_STATUS, &mmu_status);
++ TOPAZ_READ32(TOPAZ_CR_MMU_MEM_REQ, &mmu_req);
++
++ PSB_DEBUG_IRQ("TOPAZ: detect a page fault interrupt, "
++ "address=0x%08x,mem req=0x%08x\n",
++ mmu_status, mmu_req);
++ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MMU_FAULT);
++ }
++
++ if (val & 0x4) {
++ PSB_DEBUG_IRQ("TOPAZ: detect a MTX_HALT interrupt\n");
++ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX_HALT);
++ }
++
++ if (val & 0x2) {
++ PSB_DEBUG_IRQ("TOPAZ: detect a MTX interrupt\n");
++ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX);
++ }
++
++ if (val & 0x1) {
++ PSB_DEBUG_IRQ("TOPAZ: detect a MVEA interrupt\n");
++ clear |= F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MVEA);
++ }
++
++ return clear;
++}
++
++
++#define TOPAZ_NEW_PMSTATE(drm_dev, topaz_priv, new_state) \
++do { \
++ topaz_priv->pmstate = new_state; \
++ sysfs_notify_dirent(topaz_priv->sysfs_pmstate); \
++ PSB_DEBUG_PM("TOPAZ: %s\n", \
++ (new_state == PSB_PMSTATE_POWERUP) ? "powerup": "powerdown"); \
++} while (0)
++
++#endif /* _LNC_TOPAZ_H_ */
+diff --git a/drivers/gpu/drm/mrst/drv/lnc_topazinit.c b/drivers/gpu/drm/mrst/drv/lnc_topazinit.c
+new file mode 100644
+index 0000000..f968d5b
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/lnc_topazinit.c
+@@ -0,0 +1,2051 @@
++/**
++ * file lnc_topazinit.c
++ * TOPAZ initialization and mtx-firmware upload
++ *
++ */
++
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++/* NOTE: (READ BEFORE REFINE CODE)
++ * 1. The FIRMWARE's SIZE is measured by byte, we have to pass the size
++ * measured by word to DMAC.
++ *
++ *
++ *
++ */
++
++/* include headers */
++
++/* #define DRM_DEBUG_CODE 2 */
++
++#include <linux/firmware.h>
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++
++#include "psb_drv.h"
++#include "lnc_topaz.h"
++#include "ospm_power.h"
++#include "sysirq.h"
++
++/* WARNING: this define is very important */
++#define RAM_SIZE (1024 * 24)
++
++/* register default values
++ * THIS HEADER IS ONLY INCLUDE ONCE*/
++static unsigned long topaz_default_regs[183][3] = {
++ {MVEA_START, 0x00000000, 0x00000000},
++ {MVEA_START, 0x00000004, 0x00000400},
++ {MVEA_START, 0x00000008, 0x00000000},
++ {MVEA_START, 0x0000000C, 0x00000000},
++ {MVEA_START, 0x00000010, 0x00000000},
++ {MVEA_START, 0x00000014, 0x00000000},
++ {MVEA_START, 0x00000018, 0x00000000},
++ {MVEA_START, 0x0000001C, 0x00000000},
++ {MVEA_START, 0x00000020, 0x00000120},
++ {MVEA_START, 0x00000024, 0x00000000},
++ {MVEA_START, 0x00000028, 0x00000000},
++ {MVEA_START, 0x00000100, 0x00000000},
++ {MVEA_START, 0x00000104, 0x00000000},
++ {MVEA_START, 0x00000108, 0x00000000},
++ {MVEA_START, 0x0000010C, 0x00000000},
++ {MVEA_START, 0x0000011C, 0x00000001},
++ {MVEA_START, 0x0000012C, 0x00000000},
++ {MVEA_START, 0x00000180, 0x00000000},
++ {MVEA_START, 0x00000184, 0x00000000},
++ {MVEA_START, 0x00000188, 0x00000000},
++ {MVEA_START, 0x0000018C, 0x00000000},
++ {MVEA_START, 0x00000190, 0x00000000},
++ {MVEA_START, 0x00000194, 0x00000000},
++ {MVEA_START, 0x00000198, 0x00000000},
++ {MVEA_START, 0x0000019C, 0x00000000},
++ {MVEA_START, 0x000001A0, 0x00000000},
++ {MVEA_START, 0x000001A4, 0x00000000},
++ {MVEA_START, 0x000001A8, 0x00000000},
++ {MVEA_START, 0x000001AC, 0x00000000},
++ {MVEA_START, 0x000001B0, 0x00000000},
++ {MVEA_START, 0x000001B4, 0x00000000},
++ {MVEA_START, 0x000001B8, 0x00000000},
++ {MVEA_START, 0x000001BC, 0x00000000},
++ {MVEA_START, 0x000001F8, 0x00000000},
++ {MVEA_START, 0x000001FC, 0x00000000},
++ {MVEA_START, 0x00000200, 0x00000000},
++ {MVEA_START, 0x00000204, 0x00000000},
++ {MVEA_START, 0x00000208, 0x00000000},
++ {MVEA_START, 0x0000020C, 0x00000000},
++ {MVEA_START, 0x00000210, 0x00000000},
++ {MVEA_START, 0x00000220, 0x00000001},
++ {MVEA_START, 0x00000224, 0x0000001F},
++ {MVEA_START, 0x00000228, 0x00000100},
++ {MVEA_START, 0x0000022C, 0x00001F00},
++ {MVEA_START, 0x00000230, 0x00000101},
++ {MVEA_START, 0x00000234, 0x00001F1F},
++ {MVEA_START, 0x00000238, 0x00001F01},
++ {MVEA_START, 0x0000023C, 0x0000011F},
++ {MVEA_START, 0x00000240, 0x00000200},
++ {MVEA_START, 0x00000244, 0x00001E00},
++ {MVEA_START, 0x00000248, 0x00000002},
++ {MVEA_START, 0x0000024C, 0x0000001E},
++ {MVEA_START, 0x00000250, 0x00000003},
++ {MVEA_START, 0x00000254, 0x0000001D},
++ {MVEA_START, 0x00000258, 0x00001F02},
++ {MVEA_START, 0x0000025C, 0x00000102},
++ {MVEA_START, 0x00000260, 0x0000011E},
++ {MVEA_START, 0x00000264, 0x00000000},
++ {MVEA_START, 0x00000268, 0x00000000},
++ {MVEA_START, 0x0000026C, 0x00000000},
++ {MVEA_START, 0x00000270, 0x00000000},
++ {MVEA_START, 0x00000274, 0x00000000},
++ {MVEA_START, 0x00000278, 0x00000000},
++ {MVEA_START, 0x00000280, 0x00008000},
++ {MVEA_START, 0x00000284, 0x00000000},
++ {MVEA_START, 0x00000288, 0x00000000},
++ {MVEA_START, 0x0000028C, 0x00000000},
++ {MVEA_START, 0x00000314, 0x00000000},
++ {MVEA_START, 0x00000318, 0x00000000},
++ {MVEA_START, 0x0000031C, 0x00000000},
++ {MVEA_START, 0x00000320, 0x00000000},
++ {MVEA_START, 0x00000324, 0x00000000},
++ {MVEA_START, 0x00000348, 0x00000000},
++ {MVEA_START, 0x00000380, 0x00000000},
++ {MVEA_START, 0x00000384, 0x00000000},
++ {MVEA_START, 0x00000388, 0x00000000},
++ {MVEA_START, 0x0000038C, 0x00000000},
++ {MVEA_START, 0x00000390, 0x00000000},
++ {MVEA_START, 0x00000394, 0x00000000},
++ {MVEA_START, 0x00000398, 0x00000000},
++ {MVEA_START, 0x0000039C, 0x00000000},
++ {MVEA_START, 0x000003A0, 0x00000000},
++ {MVEA_START, 0x000003A4, 0x00000000},
++ {MVEA_START, 0x000003A8, 0x00000000},
++ {MVEA_START, 0x000003B0, 0x00000000},
++ {MVEA_START, 0x000003B4, 0x00000000},
++ {MVEA_START, 0x000003B8, 0x00000000},
++ {MVEA_START, 0x000003BC, 0x00000000},
++ {MVEA_START, 0x000003D4, 0x00000000},
++ {MVEA_START, 0x000003D8, 0x00000000},
++ {MVEA_START, 0x000003DC, 0x00000000},
++ {MVEA_START, 0x000003E0, 0x00000000},
++ {MVEA_START, 0x000003E4, 0x00000000},
++ {MVEA_START, 0x000003EC, 0x00000000},
++ {MVEA_START, 0x000002D0, 0x00000000},
++ {MVEA_START, 0x000002D4, 0x00000000},
++ {MVEA_START, 0x000002D8, 0x00000000},
++ {MVEA_START, 0x000002DC, 0x00000000},
++ {MVEA_START, 0x000002E0, 0x00000000},
++ {MVEA_START, 0x000002E4, 0x00000000},
++ {MVEA_START, 0x000002E8, 0x00000000},
++ {MVEA_START, 0x000002EC, 0x00000000},
++ {MVEA_START, 0x000002F0, 0x00000000},
++ {MVEA_START, 0x000002F4, 0x00000000},
++ {MVEA_START, 0x000002F8, 0x00000000},
++ {MVEA_START, 0x000002FC, 0x00000000},
++ {MVEA_START, 0x00000300, 0x00000000},
++ {MVEA_START, 0x00000304, 0x00000000},
++ {MVEA_START, 0x00000308, 0x00000000},
++ {MVEA_START, 0x0000030C, 0x00000000},
++ {MVEA_START, 0x00000290, 0x00000000},
++ {MVEA_START, 0x00000294, 0x00000000},
++ {MVEA_START, 0x00000298, 0x00000000},
++ {MVEA_START, 0x0000029C, 0x00000000},
++ {MVEA_START, 0x000002A0, 0x00000000},
++ {MVEA_START, 0x000002A4, 0x00000000},
++ {MVEA_START, 0x000002A8, 0x00000000},
++ {MVEA_START, 0x000002AC, 0x00000000},
++ {MVEA_START, 0x000002B0, 0x00000000},
++ {MVEA_START, 0x000002B4, 0x00000000},
++ {MVEA_START, 0x000002B8, 0x00000000},
++ {MVEA_START, 0x000002BC, 0x00000000},
++ {MVEA_START, 0x000002C0, 0x00000000},
++ {MVEA_START, 0x000002C4, 0x00000000},
++ {MVEA_START, 0x000002C8, 0x00000000},
++ {MVEA_START, 0x000002CC, 0x00000000},
++ {MVEA_START, 0x00000080, 0x00000000},
++ {MVEA_START, 0x00000084, 0x80705700},
++ {MVEA_START, 0x00000088, 0x00000000},
++ {MVEA_START, 0x0000008C, 0x00000000},
++ {MVEA_START, 0x00000090, 0x00000000},
++ {MVEA_START, 0x00000094, 0x00000000},
++ {MVEA_START, 0x00000098, 0x00000000},
++ {MVEA_START, 0x0000009C, 0x00000000},
++ {MVEA_START, 0x000000A0, 0x00000000},
++ {MVEA_START, 0x000000A4, 0x00000000},
++ {MVEA_START, 0x000000A8, 0x00000000},
++ {MVEA_START, 0x000000AC, 0x00000000},
++ {MVEA_START, 0x000000B0, 0x00000000},
++ {MVEA_START, 0x000000B4, 0x00000000},
++ {MVEA_START, 0x000000B8, 0x00000000},
++ {MVEA_START, 0x000000BC, 0x00000000},
++ {MVEA_START, 0x000000C0, 0x00000000},
++ {MVEA_START, 0x000000C4, 0x00000000},
++ {MVEA_START, 0x000000C8, 0x00000000},
++ {MVEA_START, 0x000000CC, 0x00000000},
++ {MVEA_START, 0x000000D0, 0x00000000},
++ {MVEA_START, 0x000000D4, 0x00000000},
++ {MVEA_START, 0x000000D8, 0x00000000},
++ {MVEA_START, 0x000000DC, 0x00000000},
++ {MVEA_START, 0x000000E0, 0x00000000},
++ {MVEA_START, 0x000000E4, 0x00000000},
++ {MVEA_START, 0x000000E8, 0x00000000},
++ {MVEA_START, 0x000000EC, 0x00000000},
++ {MVEA_START, 0x000000F0, 0x00000000},
++ {MVEA_START, 0x000000F4, 0x00000000},
++ {MVEA_START, 0x000000F8, 0x00000000},
++ {MVEA_START, 0x000000FC, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000000, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000004, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000008, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000000C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000010, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000014, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000001C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000020, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000024, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000002C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000034, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000038, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000003C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000040, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000044, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000048, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000004C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000050, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000054, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000058, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000005C, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000060, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000064, 0x00000000},
++ {TOPAZ_VLC_START, 0x00000068, 0x00000000},
++ {TOPAZ_VLC_START, 0x0000006C, 0x00000000}
++};
++
++#define FIRMWARE_NAME "topaz_fw.bin"
++
++/* static function define */
++static int topaz_upload_fw(struct drm_device *dev,
++ enum drm_lnc_topaz_codec codec);
++static inline void topaz_set_default_regs(struct drm_psb_private
++ *dev_priv);
++
++#define UPLOAD_FW_BY_DMA 1
++
++#if UPLOAD_FW_BY_DMA
++static void topaz_dma_transfer(struct drm_psb_private *dev_priv,
++ uint32_t channel, uint32_t src_phy_addr,
++ uint32_t offset, uint32_t dst_addr,
++ uint32_t byte_num, uint32_t is_increment,
++ uint32_t is_write);
++#else
++static void topaz_mtx_upload_by_register(struct drm_device *dev,
++ uint32_t mtx_mem, uint32_t addr,
++ uint32_t size,
++ struct ttm_buffer_object *buf);
++#endif
++
++static void topaz_write_core_reg(struct drm_psb_private *dev_priv,
++ uint32_t reg, const uint32_t val);
++static void topaz_read_core_reg(struct drm_psb_private *dev_priv,
++ uint32_t reg, uint32_t *ret_val);
++static void get_mtx_control_from_dash(struct drm_psb_private *dev_priv);
++static void release_mtx_control_from_dash(struct drm_psb_private
++ *dev_priv);
++static void topaz_mmu_hwsetup(struct drm_psb_private *dev_priv);
++static void mtx_dma_read(struct drm_device *dev, uint32_t source_addr,
++ uint32_t size);
++static void mtx_dma_write(struct drm_device *dev);
++
++
++#define DEBUG_FUNCTION 0
++
++#if DEBUG_FUNCTION
++static int topaz_test_null(struct drm_device *dev, uint32_t seq);
++static int topaz_test_sync(struct drm_device *dev, uint32_t seq,
++ uint32_t sync_seq);
++static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value);
++static void topaz_save_default_regs(struct drm_psb_private *dev_priv,
++ uint32_t *data);
++static void topaz_restore_default_regs(struct drm_psb_private *dev_priv,
++ uint32_t *data);
++static int topaz_test_sync_manual_alloc_page(struct drm_device *dev,
++ uint32_t seq,
++ uint32_t sync_seq,
++ uint32_t offset);
++static int topaz_test_sync_tt_test(struct drm_device *dev,
++ uint32_t seq,
++ uint32_t sync_seq);
++#endif
++
++uint32_t topaz_read_mtx_mem(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr)
++{
++ uint32_t read_val;
++ uint32_t reg, bank_size, ram_bank_size, ram_id;
++
++ TOPAZ_READ32(0x3c, &reg);
++ reg = 0x0a0a0606;
++ bank_size = (reg & 0xF0000) >> 16;
++
++ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
++ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
++
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
++ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
++ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR) |
++ F_ENCODE(1, MTX_MTX_MCMR));
++
++ /* ?? poll this reg? */
++ topaz_wait_for_register(dev_priv,
++ MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS,
++ 1, 1);
++
++ MTX_READ32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, &read_val);
++
++ return read_val;
++}
++
++void topaz_write_mtx_mem(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr, uint32_t val)
++{
++ uint32_t ram_id = 0;
++ uint32_t reg, bank_size, ram_bank_size;
++
++ TOPAZ_READ32(0x3c, &reg);
++
++ /* PSB_DEBUG_GENERAL ("TOPAZ: DEBUG REG(%x)\n", reg); */
++ reg = 0x0a0a0606;
++
++ bank_size = (reg & 0xF0000) >> 16;
++
++ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
++ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
++
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
++ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
++ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR));
++
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val);
++
++ /* ?? poll this reg? */
++ topaz_wait_for_register(dev_priv,
++ MTX_START + MTX_CR_MTX_RAM_ACCESS_STATUS,
++ 1, 1);
++
++ return;
++}
++
++void topaz_write_mtx_mem_multiple_setup(struct drm_psb_private *dev_priv,
++ uint32_t byte_addr)
++{
++ uint32_t ram_id = 0;
++ uint32_t reg, bank_size, ram_bank_size;
++
++ TOPAZ_READ32(0x3c, &reg);
++
++ reg = 0x0a0a0606;
++
++ bank_size = (reg & 0xF0000) >> 16;
++
++ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
++ ram_id = (byte_addr - MTX_DATA_MEM_BASE) / ram_bank_size;
++
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_CONTROL,
++ F_ENCODE(0x18 + ram_id, MTX_MTX_MCMID) |
++ F_ENCODE(1, MTX_MTX_MCMAI) |
++ F_ENCODE(byte_addr >> 2, MTX_MTX_MCM_ADDR));
++}
++
++void topaz_write_mtx_mem_multiple(struct drm_psb_private *dev_priv,
++ uint32_t val)
++{
++ MTX_WRITE32(MTX_CR_MTX_RAM_ACCESS_DATA_TRANSFER, val);
++}
++
++
++int topaz_wait_for_register(struct drm_psb_private *dev_priv,
++ uint32_t addr, uint32_t value, uint32_t mask)
++{
++ uint32_t tmp;
++ uint32_t count = 10000;
++
++ /* # poll topaz register for certain times */
++ while (count) {
++ /* #.# read */
++ MM_READ32(addr, 0, &tmp);
++
++ if (value == (tmp & mask))
++ return 0;
++
++ /* #.# delay and loop */
++ DRM_UDELAY(100);
++ --count;
++ }
++
++ /* # now waiting is timeout, return 1 indicat failed */
++ /* XXX: testsuit means a timeout 10000 */
++
++ DRM_ERROR("TOPAZ:time out to poll addr(0x%x) expected value(0x%08x), "
++ "actual 0x%08x (0x%08x & 0x%08x)\n",
++ addr, value, tmp & mask, tmp, mask);
++
++ return -EBUSY;
++
++}
++
++static ssize_t psb_topaz_pmstate_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct drm_device *drm_dev = dev_get_drvdata(dev);
++ struct drm_psb_private *dev_priv;
++ struct topaz_private *topaz_priv;
++ unsigned int pmstate;
++ unsigned long flags;
++ int ret = -EINVAL;
++
++ if (drm_dev == NULL)
++ return 0;
++
++ dev_priv = drm_dev->dev_private;
++ topaz_priv = dev_priv->topaz_private;
++ pmstate = topaz_priv->pmstate;
++
++ pmstate = topaz_priv->pmstate;
++ spin_lock_irqsave(&topaz_priv->topaz_lock, flags);
++ ret = sprintf(buf, "%s\n",
++ (pmstate == PSB_PMSTATE_POWERUP) ? "powerup" : "powerdown");
++ spin_unlock_irqrestore(&topaz_priv->topaz_lock, flags);
++
++ return ret;
++}
++
++static DEVICE_ATTR(topaz_pmstate, 0444, psb_topaz_pmstate_show, NULL);
++
++
++/* this function finish the first part of initialization, the rest
++ * should be done in topaz_setup_fw
++ */
++int lnc_topaz_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ uint32_t core_id, core_rev;
++ int ret = 0, n;
++ bool is_iomem;
++ struct topaz_private *topaz_priv;
++ void *topaz_bo_virt;
++
++ PSB_DEBUG_GENERAL("TOPAZ: init topaz data structures\n");
++ topaz_priv = kmalloc(sizeof(struct topaz_private), GFP_KERNEL);
++ if (topaz_priv == NULL)
++ return -1;
++
++ dev_priv->topaz_private = topaz_priv;
++ memset(topaz_priv, 0, sizeof(struct topaz_private));
++
++ /* get device --> drm_device --> drm_psb_private --> topaz_priv
++ * for psb_topaz_pmstate_show: topaz_pmpolicy
++ * if not pci_set_drvdata, can't get drm_device from device
++ */
++ pci_set_drvdata(dev->pdev, dev);
++ if (device_create_file(&dev->pdev->dev,
++ &dev_attr_topaz_pmstate))
++ DRM_ERROR("TOPAZ: could not create sysfs file\n");
++ topaz_priv->sysfs_pmstate = sysfs_get_dirent(
++ dev->pdev->dev.kobj.sd, "topaz_pmstate");
++
++ topaz_priv = dev_priv->topaz_private;
++
++ /* # initialize comand topaz queueing [msvdx_queue] */
++ INIT_LIST_HEAD(&topaz_priv->topaz_queue);
++ /* # init mutex? CHECK: mutex usage [msvdx_mutex] */
++ mutex_init(&topaz_priv->topaz_mutex);
++ /* # spin lock init? CHECK spin lock usage [msvdx_lock] */
++ spin_lock_init(&topaz_priv->topaz_lock);
++
++ /* # topaz status init. [msvdx_busy] */
++ topaz_priv->topaz_busy = 0;
++ topaz_priv->topaz_cmd_seq = 0;
++ topaz_priv->topaz_fw_loaded = 0;
++ /* FIXME: workaround since JPEG firmware is not ready */
++ topaz_priv->topaz_cur_codec = 1;
++ topaz_priv->cur_mtx_data_size = 0;
++ topaz_priv->topaz_hw_busy = 1;
++
++ topaz_priv->topaz_mtx_reg_state = kmalloc(TOPAZ_MTX_REG_SIZE,
++ GFP_KERNEL);
++ if (topaz_priv->topaz_mtx_reg_state == NULL) {
++ DRM_ERROR("TOPAZ: failed to allocate space "
++ "for mtx register\n");
++ return -1;
++ }
++
++ /* # gain write back structure,we may only need 32+4=40DW */
++ ret = ttm_buffer_object_create(bdev, 4096,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, &(topaz_priv->topaz_bo));
++ if (ret != 0) {
++ DRM_ERROR("TOPAZ: failed to allocate topaz BO.\n");
++ return ret;
++ }
++
++ ret = ttm_bo_kmap(topaz_priv->topaz_bo, 0,
++ topaz_priv->topaz_bo->num_pages,
++ &topaz_priv->topaz_bo_kmap);
++ if (ret) {
++ DRM_ERROR("TOPAZ: map topaz BO bo failed......\n");
++ ttm_bo_unref(&topaz_priv->topaz_bo);
++ return ret;
++ }
++
++ topaz_bo_virt = ttm_kmap_obj_virtual(&topaz_priv->topaz_bo_kmap,
++ &is_iomem);
++ topaz_priv->topaz_ccb_wb = (void *) topaz_bo_virt;
++ topaz_priv->topaz_wb_offset = topaz_priv->topaz_bo->offset;
++ topaz_priv->topaz_sync_addr = (uint32_t *) (topaz_bo_virt
++ + 2048);
++ topaz_priv->topaz_sync_offset = topaz_priv->topaz_wb_offset
++ + 2048;
++ PSB_DEBUG_GENERAL("TOPAZ: alloc BO for WriteBack and SYNC\n");
++ PSB_DEBUG_GENERAL("TOPAZ: WB offset=0x%08x\n",
++ topaz_priv->topaz_wb_offset);
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC offset=0x%08x\n",
++ topaz_priv->topaz_sync_offset);
++
++ *(topaz_priv->topaz_sync_addr) = ~0; /* reset sync seq */
++
++ /* # reset topaz */
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++ /* # set up MMU */
++ topaz_mmu_hwsetup(dev_priv);
++
++ PSB_DEBUG_GENERAL("TOPAZ: defer firmware loading to the place"
++ "when receiving user space commands\n");
++
++#if 0 /* can't load FW here */
++ /* #.# load fw to driver */
++ PSB_DEBUG_GENERAL("TOPAZ: will init firmware\n");
++ ret = topaz_init_fw(dev);
++ if (ret != 0)
++ return -1;
++
++ topaz_setup_fw(dev, IMG_CODEC_MPEG4_NO_RC);/* just for test */
++#endif
++ /* <msvdx does> # minimal clock */
++
++ /* <msvdx does> # return 0 */
++ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_ID, &core_id);
++ TOPAZ_READ32(TOPAZ_CR_IMG_TOPAZ_CORE_REV, &core_rev);
++
++ PSB_DEBUG_GENERAL("TOPAZ: core_id(%x) core_rev(%x)\n",
++ core_id, core_rev);
++
++ /* create firmware storage */
++ for (n = 1; n < IMG_CODEC_NUM; ++n) {
++ /* #.# malloc DRM object for fw storage */
++ ret = ttm_buffer_object_create(bdev, 12 * 4096,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, &topaz_priv->topaz_fw[n].text);
++ if (ret) {
++ DRM_ERROR("Failed to allocate firmware.\n");
++ goto out;
++ }
++
++ /* #.# malloc DRM object for fw storage */
++ ret = ttm_buffer_object_create(bdev, 12 * 4096,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, &topaz_priv->topaz_fw[n].data);
++ if (ret) {
++ DRM_ERROR("Failed to allocate firmware.\n");
++ goto out;
++ }
++ }
++
++ ret = ttm_buffer_object_create(bdev,
++ 12 * 4096,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU |
++ TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL,
++ &topaz_priv->topaz_mtx_data_mem);
++ if (ret) {
++ DRM_ERROR("TOPAZ: failed to allocate ttm buffer for "
++ "mtx data save\n");
++ goto out;
++ }
++ topaz_priv->cur_mtx_data_size = 0;
++
++ PSB_DEBUG_INIT("TOPAZ:old clock gating disable = 0x%08x\n",
++ PSB_RVDC32(PSB_TOPAZ_CLOCKGATING));
++
++ PSB_DEBUG_INIT("TOPAZ:rest MSDVX to disable clock gating\n");
++
++ PSB_WVDC32(0x00011fff, PSB_TOPAZ_CLOCKGATING);
++
++ PSB_DEBUG_INIT("MSDVX:new clock gating disable = 0x%08x\n",
++ PSB_RVDC32(PSB_TOPAZ_CLOCKGATING));
++
++ return 0;
++
++out:
++ for (n = 1; n < IMG_CODEC_NUM; ++n) {
++ if (topaz_priv->topaz_fw[n].text != NULL)
++ ttm_bo_unref(&topaz_priv->topaz_fw[n].text);
++ if (topaz_priv->topaz_fw[n].data != NULL)
++ ttm_bo_unref(&topaz_priv->topaz_fw[n].data);
++ }
++
++ if (topaz_priv->topaz_mtx_data_mem != NULL)
++ ttm_bo_unref(&topaz_priv->topaz_mtx_data_mem);
++
++ return ret;
++}
++
++int lnc_topaz_uninit(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ int n;
++
++ /* flush MMU */
++ PSB_DEBUG_GENERAL("XXX: need to flush mmu cache here??\n");
++ /* topaz_mmu_flushcache (dev_priv); */
++
++ /* # reset TOPAZ chip */
++ lnc_topaz_reset(dev_priv);
++
++ /* release resources */
++ /* # release write back memory */
++ topaz_priv->topaz_ccb_wb = NULL;
++
++ /* release mtx register save space */
++ kfree(topaz_priv->topaz_mtx_reg_state);
++
++ /* release mtx data memory save space */
++ if (topaz_priv->topaz_mtx_data_mem)
++ ttm_bo_unref(&topaz_priv->topaz_mtx_data_mem);
++
++ /* # release firmware storage */
++ for (n = 1; n < IMG_CODEC_NUM; ++n) {
++ if (topaz_priv->topaz_fw[n].text != NULL)
++ ttm_bo_unref(&topaz_priv->topaz_fw[n].text);
++ if (topaz_priv->topaz_fw[n].data != NULL)
++ ttm_bo_unref(&topaz_priv->topaz_fw[n].data);
++ }
++
++ ttm_bo_kunmap(&topaz_priv->topaz_bo_kmap);
++ ttm_bo_unref(&topaz_priv->topaz_bo);
++
++ if (topaz_priv) {
++ pci_set_drvdata(dev->pdev, NULL);
++ device_remove_file(&dev->pdev->dev, &dev_attr_topaz_pmstate);
++ sysfs_put(topaz_priv->sysfs_pmstate);
++ topaz_priv->sysfs_pmstate = NULL;
++
++ kfree(topaz_priv);
++ dev_priv->topaz_private = NULL;
++ }
++
++ return 0;
++}
++
++int lnc_topaz_reset(struct drm_psb_private *dev_priv)
++{
++ struct topaz_private *topaz_priv;
++
++ topaz_priv = dev_priv->topaz_private;
++ topaz_priv->topaz_busy = 0;
++ topaz_priv->topaz_cmd_seq = 0;
++ topaz_priv->cur_mtx_data_size = 0;
++ topaz_priv->topaz_cmd_windex = 0;
++ topaz_priv->topaz_needs_reset = 0;
++
++ /* # reset topaz */
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++ /* # set up MMU */
++ topaz_mmu_hwsetup(dev_priv);
++
++ return 0;
++}
++
++/* read firmware bin file and load all data into driver */
++int topaz_init_fw(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ const struct firmware *raw = NULL;
++ unsigned char *ptr;
++ int ret = 0;
++ int n;
++ struct topaz_fwinfo *cur_fw;
++ int cur_size;
++ struct topaz_codec_fw *cur_codec;
++ struct ttm_buffer_object **cur_drm_obj;
++ struct ttm_bo_kmap_obj tmp_kmap;
++ bool is_iomem;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ topaz_priv->stored_initial_qp = 0;
++
++ /* # get firmware */
++ ret = request_firmware(&raw, FIRMWARE_NAME, &dev->pdev->dev);
++ if (ret != 0) {
++ DRM_ERROR("TOPAZ: request_firmware failed: %d\n", ret);
++ return ret;
++ }
++
++ PSB_DEBUG_GENERAL("TOPAZ: opened firmware\n");
++
++ if (raw && (raw->size < sizeof(struct topaz_fwinfo))) {
++ DRM_ERROR("TOPAZ: firmware file is not correct size.\n");
++ goto out;
++ }
++
++ ptr = (unsigned char *) raw->data;
++
++ if (!ptr) {
++ DRM_ERROR("TOPAZ: failed to load firmware.\n");
++ goto out;
++ }
++
++ /* # load fw from file */
++ PSB_DEBUG_GENERAL("TOPAZ: load firmware.....\n");
++ cur_fw = NULL;
++ /* didn't use the first element */
++ for (n = 1; n < IMG_CODEC_NUM; ++n) {
++ cur_fw = (struct topaz_fwinfo *) ptr;
++
++ cur_codec = &topaz_priv->topaz_fw[cur_fw->codec];
++ cur_codec->ver = cur_fw->ver;
++ cur_codec->codec = cur_fw->codec;
++ cur_codec->text_size = cur_fw->text_size;
++ cur_codec->data_size = cur_fw->data_size;
++ cur_codec->data_location = cur_fw->data_location;
++
++ PSB_DEBUG_GENERAL("TOPAZ: load firemware %s.\n",
++ codec_to_string(cur_fw->codec));
++
++ /* #.# handle text section */
++ ptr += sizeof(struct topaz_fwinfo);
++ cur_drm_obj = &cur_codec->text;
++ cur_size = cur_fw->text_size;
++
++ /* #.# fill DRM object with firmware data */
++ ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages,
++ &tmp_kmap);
++ if (ret) {
++ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret);
++ ttm_bo_unref(cur_drm_obj);
++ *cur_drm_obj = NULL;
++ goto out;
++ }
++
++ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr,
++ cur_size);
++
++ ttm_bo_kunmap(&tmp_kmap);
++
++ /* #.# handle data section */
++ ptr += cur_fw->text_size;
++ cur_drm_obj = &cur_codec->data;
++ cur_size = cur_fw->data_size;
++
++ /* #.# fill DRM object with firmware data */
++ ret = ttm_bo_kmap(*cur_drm_obj, 0, (*cur_drm_obj)->num_pages,
++ &tmp_kmap);
++ if (ret) {
++ PSB_DEBUG_GENERAL("drm_bo_kmap failed: %d\n", ret);
++ ttm_bo_unref(cur_drm_obj);
++ *cur_drm_obj = NULL;
++ goto out;
++ }
++
++ memcpy(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), ptr,
++ cur_size);
++
++ ttm_bo_kunmap(&tmp_kmap);
++
++ /* #.# validate firmware */
++
++ /* #.# update ptr */
++ ptr += cur_fw->data_size;
++ }
++
++ release_firmware(raw);
++
++ PSB_DEBUG_GENERAL("TOPAZ: return from firmware init\n");
++
++ return 0;
++
++out:
++ if (raw) {
++ PSB_DEBUG_GENERAL("release firmware....\n");
++ release_firmware(raw);
++ }
++
++ return -1;
++}
++
++/* setup fw when start a new context */
++int topaz_setup_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t mem_size = RAM_SIZE; /* follow DDK */
++ uint32_t verify_pc;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++#if 0
++ if (codec == topaz_priv->topaz_current_codec) {
++ LNC_TRACEL("TOPAZ: reuse previous codec\n");
++ return 0;
++ }
++#endif
++
++ /* XXX: need to rest topaz? */
++ PSB_DEBUG_GENERAL("XXX: should reset topaz when context change?\n");
++
++ /* XXX: interrupt enable shouldn't be enable here,
++ * this funtion is called when interrupt is enable,
++ * but here, we've no choice since we have to call setup_fw by
++ * manual */
++ /* # upload firmware, clear interruputs and start the firmware
++ * -- from hostutils.c in TestSuits*/
++
++ /* # reset MVEA */
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(1, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++ MVEA_WRITE32(MVEA_CR_IMG_MVEA_SRST,
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_SPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_IPE_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMPRS_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_JMCOMP_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_CMC_SOFT_RESET) |
++ F_ENCODE(0, MVEA_CR_IMG_MVEA_DCF_SOFT_RESET));
++
++
++ topaz_mmu_hwsetup(dev_priv);
++
++#if !LNC_TOPAZ_NO_IRQ
++ sysirq_uninstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
++#endif
++
++ PSB_DEBUG_GENERAL("TOPAZ: will setup firmware....\n");
++
++ topaz_set_default_regs(dev_priv);
++
++ /* # reset mtx */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MVEA_SOFT_RESET) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_MTX_SOFT_RESET) |
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_VLC_SOFT_RESET));
++
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_SRST, 0x0);
++
++ /* # upload fw by drm */
++ PSB_DEBUG_GENERAL("TOPAZ: will upload firmware\n");
++
++ topaz_upload_fw(dev, codec);
++#if 0
++ /* allocate the space for context save & restore if needed */
++ if (topaz_priv->topaz_mtx_data_mem == NULL) {
++ ret = ttm_buffer_object_create(bdev,
++ topaz_priv->cur_mtx_data_size * 4,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU |
++ TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL,
++ &topaz_priv->topaz_mtx_data_mem);
++ if (ret) {
++ DRM_ERROR("TOPAZ: failed to allocate ttm buffer for "
++ "mtx data save\n");
++ return -1;
++ }
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: after upload fw ....\n");
++#endif
++
++ /* XXX: In power save mode, need to save the complete data memory
++ * and restore it. MTX_FWIF.c record the data size */
++ PSB_DEBUG_GENERAL("TOPAZ:in power save mode need to save memory?\n");
++
++ PSB_DEBUG_GENERAL("TOPAZ: setting up pc address\n");
++ topaz_write_core_reg(dev_priv, TOPAZ_MTX_PC, PC_START_ADDRESS);
++
++ PSB_DEBUG_GENERAL("TOPAZ: verify pc address\n");
++
++ topaz_read_core_reg(dev_priv, TOPAZ_MTX_PC, &verify_pc);
++
++ /* enable auto clock is essential for this driver */
++ TOPAZ_WRITE32(TOPAZ_CR_TOPAZ_AUTO_CLK_GATE,
++ F_ENCODE(1, TOPAZ_CR_TOPAZ_VLC_AUTO_CLK_GATE) |
++ F_ENCODE(1, TOPAZ_CR_TOPAZ_DB_AUTO_CLK_GATE));
++ MVEA_WRITE32(MVEA_CR_MVEA_AUTO_CLOCK_GATING,
++ F_ENCODE(1, MVEA_CR_MVEA_IPE_AUTO_CLK_GATE) |
++ F_ENCODE(1, MVEA_CR_MVEA_SPE_AUTO_CLK_GATE) |
++ F_ENCODE(1, MVEA_CR_MVEA_CMPRS_AUTO_CLK_GATE) |
++ F_ENCODE(1, MVEA_CR_MVEA_JMCOMP_AUTO_CLK_GATE));
++
++ PSB_DEBUG_GENERAL("TOPAZ: current pc(%08X) vs %08X\n",
++ verify_pc, PC_START_ADDRESS);
++
++ /* # turn on MTX */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX));
++
++ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
++ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK);
++
++ /* # poll on the interrupt which the firmware will generate */
++ topaz_wait_for_register(dev_priv,
++ TOPAZ_START + TOPAZ_CR_IMG_TOPAZ_INTSTAT,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTS_MTX),
++ F_MASK(TOPAZ_CR_IMG_TOPAZ_INTS_MTX));
++
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_INTCLEAR,
++ F_ENCODE(1, TOPAZ_CR_IMG_TOPAZ_INTCLR_MTX));
++
++ PSB_DEBUG_GENERAL("TOPAZ: after topaz mtx setup ....\n");
++
++ /* # get ccb buffer addr -- file hostutils.c */
++ topaz_priv->topaz_ccb_buffer_addr =
++ topaz_read_mtx_mem(dev_priv,
++ MTX_DATA_MEM_BASE + mem_size - 4);
++ topaz_priv->topaz_ccb_ctrl_addr =
++ topaz_read_mtx_mem(dev_priv,
++ MTX_DATA_MEM_BASE + mem_size - 8);
++ topaz_priv->topaz_ccb_size =
++ topaz_read_mtx_mem(dev_priv,
++ topaz_priv->topaz_ccb_ctrl_addr +
++ MTX_CCBCTRL_CCBSIZE);
++
++ topaz_priv->topaz_cmd_windex = 0;
++
++ PSB_DEBUG_GENERAL("TOPAZ:ccb_buffer_addr(%x),ctrl_addr(%x) size(%d)\n",
++ topaz_priv->topaz_ccb_buffer_addr,
++ topaz_priv->topaz_ccb_ctrl_addr,
++ topaz_priv->topaz_ccb_size);
++
++ /* # write back the initial QP Value */
++ topaz_write_mtx_mem(dev_priv,
++ topaz_priv->topaz_ccb_ctrl_addr + MTX_CCBCTRL_INITQP,
++ topaz_priv->stored_initial_qp);
++
++ PSB_DEBUG_GENERAL("TOPAZ: write WB mem address 0x%08x\n",
++ topaz_priv->topaz_wb_offset);
++ topaz_write_mtx_mem(dev_priv, MTX_DATA_MEM_BASE + mem_size - 12,
++ topaz_priv->topaz_wb_offset);
++
++ /* this kick is essential for mtx.... */
++ *((uint32_t *) topaz_priv->topaz_ccb_wb) = 0x01020304;
++ topaz_mtx_kick(dev_priv, 1);
++ DRM_UDELAY(1000);
++ PSB_DEBUG_GENERAL("TOPAZ: DDK expected 0x12345678 in WB memory,"
++ " and here it is 0x%08x\n",
++ *((uint32_t *) topaz_priv->topaz_ccb_wb));
++
++ *((uint32_t *) topaz_priv->topaz_ccb_wb) = 0x0;/* reset it to 0 */
++ PSB_DEBUG_GENERAL("TOPAZ: firmware uploaded.\n");
++
++ /* XXX: is there any need to record next cmd num??
++ * we use fence seqence number to record it
++ */
++ topaz_priv->topaz_busy = 0;
++ topaz_priv->topaz_cmd_seq = 0;
++
++#if !LNC_TOPAZ_NO_IRQ
++ sysirq_preinstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
++ sysirq_postinstall_islands(dev, OSPM_VIDEO_ENC_ISLAND);
++ lnc_topaz_enableirq(dev);
++#endif
++
++#if 0
++ topaz_mmu_flushcache(dev_priv);
++ topaz_test_null(dev, 0xe1e1);
++ topaz_test_null(dev, 0xe2e2);
++ topaz_test_sync(dev, 0xe2e2, 0x87654321);
++
++ topaz_mmu_test(dev, 0x12345678);
++ topaz_test_null(dev, 0xe3e3);
++ topaz_mmu_test(dev, 0x8764321);
++
++ topaz_test_null(dev, 0xe4e4);
++ topaz_test_null(dev, 0xf3f3);
++#endif
++
++ return 0;
++}
++
++#if UPLOAD_FW_BY_DMA
++int topaz_upload_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ const struct topaz_codec_fw *cur_codec_fw;
++ uint32_t text_size, data_size;
++ uint32_t data_location;
++ uint32_t cur_mtx_data_size;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ /* # refer HLD document */
++
++ /* # MTX reset */
++ PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n");
++ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
++ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
++
++ DRM_UDELAY(6000);
++
++ /* # upload the firmware by DMA */
++ cur_codec_fw = &topaz_priv->topaz_fw[codec];
++
++ PSB_DEBUG_GENERAL("Topaz:upload codec %s(%d) text sz=%d data sz=%d"
++ " data location(%d)\n", codec_to_string(codec), codec,
++ cur_codec_fw->text_size, cur_codec_fw->data_size,
++ cur_codec_fw->data_location);
++
++ /* # upload text */
++ text_size = cur_codec_fw->text_size / 4;
++
++ /* setup the MTX to start recieving data:
++ use a register for the transfer which will point to the source
++ (MTX_CR_MTX_SYSC_CDMAT) */
++ /* #.# fill the dst addr */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000);
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(2, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(text_size, MTX_LENGTH));
++
++ /* #.# set DMAC access to host memory via BIF */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
++
++ /* #.# transfer the codec */
++ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
++ MTX_CR_MTX_SYSC_CDMAT, text_size, 0, 0);
++
++ /* #.# wait dma finish */
++ topaz_wait_for_register(dev_priv,
++ DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++
++ /* #.# clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++
++ /* # return access to topaz core */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
++
++ /* # upload data */
++ data_size = cur_codec_fw->data_size / 4;
++ data_location = cur_codec_fw->data_location;
++
++ /* #.# fill the dst addr */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA,
++ 0x80900000 + (data_location - 0x82880000));
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(2, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(data_size, MTX_LENGTH));
++
++ /* #.# set DMAC access to host memory via BIF */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
++
++ /* #.# transfer the codec */
++ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->data->offset, 0,
++ MTX_CR_MTX_SYSC_CDMAT, data_size, 0, 0);
++
++ /* #.# wait dma finish */
++ topaz_wait_for_register(dev_priv,
++ DMAC_START + IMG_SOC_DMAC_IRQ_STAT(0),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++
++ /* #.# clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++
++ /* # return access to topaz core */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
++
++ /* record this codec's mtx data size for
++ * context save & restore */
++ /* FIXME: since non-root sighting fixed by pre allocated,
++ * only need to correct the buffer size
++ */
++ cur_mtx_data_size = data_size;
++ if (topaz_priv->cur_mtx_data_size != cur_mtx_data_size)
++ topaz_priv->cur_mtx_data_size = cur_mtx_data_size;
++
++ return 0;
++}
++
++#else
++
++void topaz_mtx_upload_by_register(struct drm_device *dev, uint32_t mtx_mem,
++ uint32_t addr, uint32_t size,
++ struct ttm_buffer_object *buf)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t *buf_p;
++ uint32_t debug_reg, bank_size, bank_ram_size, bank_count;
++ uint32_t cur_ram_id, ram_addr , ram_id;
++ int map_ret, lp;
++ struct ttm_bo_kmap_obj bo_kmap;
++ bool is_iomem;
++ uint32_t cur_addr;
++
++ get_mtx_control_from_dash(dev_priv);
++
++ map_ret = ttm_bo_kmap(buf, 0, buf->num_pages, &bo_kmap);
++ if (map_ret) {
++ DRM_ERROR("TOPAZ: drm_bo_kmap failed: %d\n", map_ret);
++ return;
++ }
++ buf_p = (uint32_t *) ttm_kmap_obj_virtual(&bo_kmap, &is_iomem);
++
++
++ TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET, &debug_reg);
++ debug_reg = 0x0a0a0606;
++ bank_size = (debug_reg & 0xf0000) >> 16;
++ bank_ram_size = 1 << (bank_size + 2);
++
++ bank_count = (debug_reg & 0xf00) >> 8;
++
++ topaz_wait_for_register(dev_priv,
++ MTX_START+MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_OFFSET,
++ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK,
++ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK);
++
++ cur_ram_id = -1;
++ cur_addr = addr;
++ for (lp = 0; lp < size / 4; ++lp) {
++ ram_id = mtx_mem + (cur_addr / bank_ram_size);
++
++ if (cur_ram_id != ram_id) {
++ ram_addr = cur_addr >> 2;
++
++ MTX_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
++ F_ENCODE(ram_id, MTX_MTX_MCMID) |
++ F_ENCODE(ram_addr, MTX_MTX_MCM_ADDR) |
++ F_ENCODE(1, MTX_MTX_MCMAI));
++
++ cur_ram_id = ram_id;
++ }
++ cur_addr += 4;
++
++ MTX_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_DATA_TRANSFER_OFFSET,
++ *(buf_p + lp));
++
++ topaz_wait_for_register(dev_priv,
++ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_OFFSET + MTX_START,
++ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK,
++ MTX_CORE_CR_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK);
++ }
++
++ ttm_bo_kunmap(&bo_kmap);
++
++ PSB_DEBUG_GENERAL("TOPAZ: register data upload done\n");
++ return;
++}
++
++int topaz_upload_fw(struct drm_device *dev, enum drm_lnc_topaz_codec codec)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ const struct topaz_codec_fw *cur_codec_fw;
++ uint32_t text_size, data_size;
++ uint32_t data_location;
++
++ /* # refer HLD document */
++ /* # MTX reset */
++ PSB_DEBUG_GENERAL("TOPAZ: mtx reset.\n");
++ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
++ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
++
++ DRM_UDELAY(6000);
++
++ /* # upload the firmware by DMA */
++ cur_codec_fw = &topaz_priv->topaz_fw[codec];
++
++ PSB_DEBUG_GENERAL("Topaz: upload codec %s text size(%d) data size(%d)"
++ " data location(0x%08x)\n", codec_to_string(codec),
++ cur_codec_fw->text_size, cur_codec_fw->data_size,
++ cur_codec_fw->data_location);
++
++ /* # upload text */
++ text_size = cur_codec_fw->text_size;
++
++ topaz_mtx_upload_by_register(dev, LNC_MTX_CORE_CODE_MEM,
++ PC_START_ADDRESS - MTX_MEMORY_BASE,
++ text_size, cur_codec_fw->text);
++
++ /* # upload data */
++ data_size = cur_codec_fw->data_size;
++ data_location = cur_codec_fw->data_location;
++
++ topaz_mtx_upload_by_register(dev, LNC_MTX_CORE_DATA_MEM,
++ data_location - 0x82880000, data_size,
++ cur_codec_fw->data);
++
++ return 0;
++}
++
++#endif /* UPLOAD_FW_BY_DMA */
++
++void
++topaz_dma_transfer(struct drm_psb_private *dev_priv, uint32_t channel,
++ uint32_t src_phy_addr, uint32_t offset,
++ uint32_t soc_addr, uint32_t byte_num,
++ uint32_t is_increment, uint32_t is_write)
++{
++ uint32_t dmac_count;
++ uint32_t irq_stat;
++ uint32_t count;
++
++ PSB_DEBUG_GENERAL("TOPAZ: using dma to transfer firmware\n");
++ /* # check that no transfer is currently in progress and no
++ interrupts are outstanding ?? (why care interrupt) */
++ DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &dmac_count);
++ if (0 != (dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)))
++ DRM_ERROR("TOPAZ: there is tranfer in progress\n");
++
++ /* assert(0==(dmac_count & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN)));*/
++
++ /* no hold off period */
++ DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0);
++ /* clear previous interrupts */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
++ /* check irq status */
++ DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_stat);
++ /* assert(0 == irq_stat); */
++ if (0 != irq_stat)
++ DRM_ERROR("TOPAZ: there is hold up\n");
++
++ DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel),
++ (src_phy_addr + offset));
++ count = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP, DMAC_PWIDTH_32_BIT,
++ is_write, DMAC_PWIDTH_32_BIT, byte_num);
++ /* generate an interrupt at the end of transfer */
++ count |= MASK_IMG_SOC_TRANSFER_IEN;
++ count |= F_ENCODE(is_write, IMG_SOC_DIR);
++ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count);
++
++ DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel),
++ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0,
++ is_increment, DMAC_BURST_2));
++
++ DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr);
++
++ /* Finally, rewrite the count register with
++ * the enable bit set to kick off the transfer
++ */
++ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count | MASK_IMG_SOC_EN);
++
++ PSB_DEBUG_GENERAL("TOPAZ: dma transfer started.\n");
++
++ return;
++}
++
++void topaz_set_default_regs(struct drm_psb_private *dev_priv)
++{
++ int n;
++ int count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
++
++ for (n = 0; n < count; n++)
++ MM_WRITE32(topaz_default_regs[n][0],
++ topaz_default_regs[n][1],
++ topaz_default_regs[n][2]);
++
++}
++
++void topaz_write_core_reg(struct drm_psb_private *dev_priv, uint32_t reg,
++ const uint32_t val)
++{
++ uint32_t tmp;
++ get_mtx_control_from_dash(dev_priv);
++
++ /* put data into MTX_RW_DATA */
++ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET, val);
++
++ /* request a write */
++ tmp = reg &
++ ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK;
++ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET, tmp);
++
++ /* wait for operation finished */
++ topaz_wait_for_register(dev_priv,
++ MTX_START +
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
++
++ release_mtx_control_from_dash(dev_priv);
++}
++
++void topaz_read_core_reg(struct drm_psb_private *dev_priv, uint32_t reg,
++ uint32_t *ret_val)
++{
++ uint32_t tmp;
++
++ get_mtx_control_from_dash(dev_priv);
++
++ /* request a write */
++ tmp = (reg &
++ ~MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
++ MTX_WRITE32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK | tmp);
++
++ /* wait for operation finished */
++ topaz_wait_for_register(dev_priv,
++ MTX_START +
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_OFFSET,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
++ MTX_CORE_CR_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
++
++ /* read */
++ MTX_READ32(MTX_CORE_CR_MTX_REGISTER_READ_WRITE_DATA_OFFSET,
++ ret_val);
++
++ release_mtx_control_from_dash(dev_priv);
++}
++
++void get_mtx_control_from_dash(struct drm_psb_private *dev_priv)
++{
++ int debug_reg_slave_val;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ /* GetMTXControlFromDash */
++ TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
++ F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE) |
++ F_ENCODE(2, TOPAZ_CR_MTX_DBG_GPIO_OUT));
++ do {
++ TOPAZ_READ32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
++ &debug_reg_slave_val);
++ } while ((debug_reg_slave_val & 0x18) != 0);
++
++ /* save access control */
++ TOPAZ_READ32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
++ &topaz_priv->topaz_dash_access_ctrl);
++}
++
++void release_mtx_control_from_dash(struct drm_psb_private *dev_priv)
++{
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ /* restore access control */
++ TOPAZ_WRITE32(MTX_CORE_CR_MTX_RAM_ACCESS_CONTROL_OFFSET,
++ topaz_priv->topaz_dash_access_ctrl);
++
++ /* release bus */
++ TOPAZ_WRITE32(TOPAZ_CORE_CR_MTX_DEBUG_OFFSET,
++ F_ENCODE(1, TOPAZ_CR_MTX_DBG_IS_SLAVE));
++}
++
++void topaz_mmu_hwsetup(struct drm_psb_private *dev_priv)
++{
++ uint32_t pd_addr = psb_get_default_pd_addr(dev_priv->mmu);
++
++ /* bypass all request while MMU is being configured */
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0,
++ F_ENCODE(1, TOPAZ_CR_MMU_BYPASS));
++
++ /* set MMU hardware at the page table directory */
++ PSB_DEBUG_GENERAL("TOPAZ: write PD phyaddr=0x%08x "
++ "into MMU_DIR_LIST0/1\n", pd_addr);
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(0), pd_addr);
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_DIR_LIST_BASE(1), 0);
++
++ /* setup index register, all pointing to directory bank 0 */
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_BANK_INDEX, 0);
++
++ /* now enable MMU access for all requestors */
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, 0);
++}
++
++void topaz_mmu_flushcache(struct drm_psb_private *dev_priv)
++{
++ uint32_t mmu_control;
++
++ if (dev_priv->topaz_disabled)
++ return;
++
++#if 0
++ PSB_DEBUG_GENERAL("XXX: Only one PTD/PTE cache"
++ " so flush using the master core\n");
++#endif
++ /* XXX: disable interrupt */
++
++ TOPAZ_READ32(TOPAZ_CR_MMU_CONTROL0, &mmu_control);
++ mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_INVALDC);
++ mmu_control |= F_ENCODE(1, TOPAZ_CR_MMU_FLUSH);
++
++#if 0
++ PSB_DEBUG_GENERAL("Set Invalid flag (this causes a flush with MMU\n"
++ "still operating afterwards even if not cleared,\n"
++ "but may want to replace with MMU_FLUSH?\n");
++#endif
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control);
++
++ /* clear it */
++ mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_INVALDC));
++ mmu_control &= (~F_ENCODE(1, TOPAZ_CR_MMU_FLUSH));
++ TOPAZ_WRITE32(TOPAZ_CR_MMU_CONTROL0, mmu_control);
++}
++
++#if DEBUG_FUNCTION
++
++static int topaz_test_sync(struct drm_device *dev, uint32_t seq,
++ uint32_t sync_seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ uint32_t sync_cmd[3];
++ struct topaz_cmd_header *cmd_hdr;
++ uint32_t *sync_p = (uint32_t *)topaz_priv->topaz_sync_addr;
++ int count = 1000;
++ uint32_t clr_flag;
++
++ cmd_hdr = (struct topaz_cmd_header *)&sync_cmd[0];
++
++ /* reset sync area */
++ *sync_p = 0;
++
++ /* insert a SYNC command here */
++ cmd_hdr->id = MTX_CMDID_SYNC;
++ cmd_hdr->size = 3;
++ cmd_hdr->seq = seq;
++
++ sync_cmd[1] = topaz_priv->topaz_sync_offset;
++ sync_cmd[2] = sync_seq;
++
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ PSB_DEBUG_GENERAL("Topaz: Sent SYNC with cmd seq=0x%08x,"
++ "sync_seq=0x%08x\n", seq, sync_seq);
++
++ while (count && *sync_p != sync_seq) {
++ DRM_UDELAY(100);
++ --count;
++ }
++ if ((count == 0) && (*sync_p != sync_seq)) {
++ DRM_ERROR("TOPAZ: wait sycn timeout, expect sync seq 0x%08x,"
++ "actual 0x%08x\n", sync_seq, *sync_p);
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC succeed, sync seq=0x%08x\n", *sync_p);
++ PSB_DEBUG_GENERAL("Topaz: after SYNC test, query IRQ and clear it\n");
++
++ clr_flag = lnc_topaz_queryirq(dev);
++ lnc_topaz_clearirq(dev, clr_flag);
++
++ return 0;
++}
++static int topaz_test_sync_tt_test(struct drm_device *dev,
++ uint32_t seq,
++ uint32_t sync_seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ int ret;
++ bool is_iomem;
++ struct ttm_buffer_object *test_obj;
++ struct ttm_bo_kmap_obj test_kmap;
++ unsigned int *test_adr;
++ uint32_t sync_cmd[3];
++ int count = 1000;
++ unsigned long pfn;
++
++ ret = ttm_buffer_object_create(bdev, 4096,
++ ttm_bo_type_kernel,
++ TTM_PL_FLAG_TT | TTM_PL_FLAG_NO_EVICT,
++ 0, 0, 0, NULL, &test_obj);
++ if (ret) {
++ DRM_ERROR("failed create test object buffer\n");
++ return -1;
++ }
++
++ ret = psb_mmu_virtual_to_pfn(psb_mmu_get_default_pd(dev_priv->mmu),
++ test_obj->offset, &pfn);
++ if (ret) {
++ DRM_ERROR("failed to get pfn from virtual\n");
++ return -1;
++ }
++
++ PSB_DEBUG_GENERAL("Topaz:offset %lx, pfn %lx\n", test_obj->offset, pfn);
++
++ ret = ttm_bo_kmap(test_obj, 0, test_obj->num_pages,
++ &test_kmap);
++ if (ret) {
++ DRM_ERROR("failed map buffer\n");
++ return -1;
++ }
++ test_adr = ttm_kmap_obj_virtual(&test_kmap, &is_iomem);
++ *test_adr = 0xff55;
++ ttm_bo_kunmap(&test_kmap);
++
++ /* insert a SYNC command here */
++ sync_cmd[0] = (MTX_CMDID_SYNC << 1) | (3 << 8) |
++ (seq << 16);
++ sync_cmd[1] = test_obj->offset;
++ sync_cmd[2] = sync_seq;
++
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ ret = ttm_bo_kmap(test_obj, 0, test_obj->num_pages,
++ &test_kmap);
++ if (ret) {
++ DRM_ERROR("failed map buffer\n");
++ return -1;
++ }
++ test_adr = ttm_kmap_obj_virtual(&test_kmap, &is_iomem);
++
++ while (count && *test_adr != sync_seq) {
++ DRM_UDELAY(100);
++ --count;
++ }
++ if ((count == 0) && (*test_adr != sync_seq)) {
++ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),"
++ "actual 0x%08x\n",
++ sync_seq, *test_adr);
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *test_adr);
++ ttm_bo_kunmap(&test_kmap);
++ ttm_bo_unref(&test_obj);
++
++ return 0;
++}
++
++static int topaz_test_sync_manual_alloc_page(struct drm_device *dev,
++ uint32_t seq,
++ uint32_t sync_seq,
++ uint32_t offset)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int ret;
++ uint32_t sync_cmd[3];
++ int count = 1000;
++ unsigned long pfn;
++
++ struct page *p;
++ uint32_t *v;
++/* uint32_t offset = 0xd0000000; */
++
++ p = alloc_page(GFP_DMA32);
++ if (!p) {
++ DRM_ERROR("Topaz:Failed allocating page\n");
++ return -1;
++ }
++
++ v = kmap(p);
++ memset(v, 0x67, PAGE_SIZE);
++ pfn = (offset >> PAGE_SHIFT);
++ kunmap(p);
++
++ ret = psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
++ &p, pfn << PAGE_SHIFT, 1, 0, 0, 0);
++ if (ret) {
++ DRM_ERROR("Topaz:Failed inserting mmu page\n");
++ return -1;
++ }
++
++ /* insert a SYNC command here */
++ sync_cmd[0] = (MTX_CMDID_SYNC << 1) | (3 << 8) |
++ (0x5b << 16);
++ sync_cmd[1] = pfn << PAGE_SHIFT;
++ sync_cmd[2] = seq;
++
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[0]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[1]);
++ TOPAZ_OUT_CCB(dev_priv, sync_cmd[2]);
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ v = kmap(p);
++ while (count && *v != sync_seq) {
++ DRM_UDELAY(100);
++ --count;
++ }
++ if ((count == 0) && (*v != sync_seq)) {
++ DRM_ERROR("TOPAZ: wait sycn timeout (0x%08x),"
++ "actual 0x%08x\n",
++ sync_seq, *v);
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: SYNC done, seq=0x%08x\n", *v);
++ kunmap(p);
++
++ return 0;
++}
++
++static int topaz_test_null(struct drm_device *dev, uint32_t seq)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct topaz_cmd_header null_cmd;
++ uint32_t clr_flag;
++
++ /* XXX: here we finished firmware setup....
++ * using a NULL command to verify the
++ * correctness of firmware
++ */
++
++ null_cmd.id = MTX_CMDID_NULL;
++ null_cmd.size = 1;
++ null_cmd.seq = seq;
++
++ TOPAZ_BEGIN_CCB(dev_priv);
++ TOPAZ_OUT_CCB(dev_priv, *((uint32_t *)&null_cmd));
++ TOPAZ_END_CCB(dev_priv, 1);
++
++ DRM_UDELAY(1000); /* wait to finish */
++
++ PSB_DEBUG_GENERAL("Topaz: Sent NULL with sequence=0x%08x,"
++ " got sequence=0x%08x (WB_seq=0x%08x,WB_roff=%d)\n",
++ seq, CCB_CTRL_SEQ(dev_priv), WB_CCB_CTRL_SEQ(dev_priv),
++ WB_CCB_CTRL_RINDEX(dev_priv));
++
++ PSB_DEBUG_GENERAL("Topaz: after NULL test, query IRQ and clear it\n");
++
++ clr_flag = lnc_topaz_queryirq(dev);
++ lnc_topaz_clearirq(dev, clr_flag);
++
++ return 0;
++}
++
++
++/*
++ * this function will test whether the mmu is correct:
++ * it get a drm_buffer_object and use CMD_SYNC to write
++ * certain value into this buffer.
++ */
++static void topaz_mmu_test(struct drm_device *dev, uint32_t sync_value)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ unsigned long real_pfn;
++ int ret;
++
++ /* topaz_mmu_flush(dev); */
++ topaz_test_sync(dev, 0x55, sync_value);
++
++ ret = psb_mmu_virtual_to_pfn(psb_mmu_get_default_pd(dev_priv->mmu),
++ topaz_priv->topaz_sync_offset, &real_pfn);
++ if (ret != 0) {
++ PSB_DEBUG_GENERAL("psb_mmu_virtual_to_pfn failed,exit\n");
++ return;
++ }
++ PSB_DEBUG_GENERAL("TOPAZ: issued SYNC command, "
++ "BO offset=0x%08x (pfn=%lu), synch value=0x%08x\n",
++ topaz_priv->topaz_sync_offset, real_pfn, sync_value);
++}
++
++void topaz_save_default_regs(struct drm_psb_private *dev_priv, uint32_t *data)
++{
++ int n;
++ int count;
++
++ count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
++ for (n = 0; n < count; n++, ++data)
++ MM_READ32(topaz_default_regs[n][0],
++ topaz_default_regs[n][1],
++ data);
++
++}
++
++void topaz_restore_default_regs(struct drm_psb_private *dev_priv,
++ uint32_t *data)
++{
++ int n;
++ int count;
++
++ count = sizeof(topaz_default_regs) / (sizeof(unsigned long) * 3);
++ for (n = 0; n < count; n++, ++data)
++ MM_WRITE32(topaz_default_regs[n][0],
++ topaz_default_regs[n][1],
++ *data);
++
++}
++
++#endif
++
++int lnc_topaz_restore_mtx_state(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ uint32_t reg_val;
++ uint32_t *mtx_reg_state;
++ int i;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ if (!topaz_priv->topaz_mtx_saved)
++ return -1;
++
++ if (topaz_priv->topaz_mtx_data_mem == NULL) {
++ PSB_DEBUG_GENERAL("TOPAZ: try to restore context without "
++ "space allocated, return directly without restore\n");
++ return -1;
++ }
++
++ /* turn on mtx clocks */
++ MTX_READ32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE, &reg_val);
++ MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE,
++ reg_val & (~MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE));
++
++ /* reset mtx */
++ /* FIXME: should use core_write??? */
++ MTX_WRITE32(MTX_CORE_CR_MTX_SOFT_RESET_OFFSET,
++ MTX_CORE_CR_MTX_SOFT_RESET_MTX_RESET_MASK);
++ DRM_UDELAY(6000);
++
++ topaz_mmu_hwsetup(dev_priv);
++ /* upload code, restore mtx data */
++ mtx_dma_write(dev);
++
++ mtx_reg_state = topaz_priv->topaz_mtx_reg_state;
++ /* restore register */
++ /* FIXME: conside to put read/write into one function */
++ /* Saves 8 Registers of D0 Bank */
++ /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */
++ for (i = 0; i < 8; i++) {
++ topaz_write_core_reg(dev_priv, 0x1 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 8 Registers of D1 Bank */
++ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
++ for (i = 0; i < 8; i++) {
++ topaz_write_core_reg(dev_priv, 0x2 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 4 Registers of A0 Bank */
++ /* A0StP, A0FrP, A0.2 and A0.3 */
++ for (i = 0; i < 4; i++) {
++ topaz_write_core_reg(dev_priv, 0x3 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 4 Registers of A1 Bank */
++ /* A1GbP, A1LbP, A1.2 and A1.3 */
++ for (i = 0; i < 4; i++) {
++ topaz_write_core_reg(dev_priv, 0x4 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves PC and PCX */
++ for (i = 0; i < 2; i++) {
++ topaz_write_core_reg(dev_priv, 0x5 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 8 Control Registers */
++ /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI,
++ * TXGPIOO */
++ for (i = 0; i < 8; i++) {
++ topaz_write_core_reg(dev_priv, 0x7 | (i<<4),
++ *mtx_reg_state);
++ mtx_reg_state++;
++ }
++
++ /* turn on MTX */
++ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
++ MTX_CORE_CR_MTX_ENABLE_MTX_ENABLE_MASK);
++
++ topaz_priv->topaz_mtx_saved = 0;
++
++ return 0;
++}
++
++int lnc_topaz_save_mtx_state(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ uint32_t *mtx_reg_state;
++ int i;
++ struct topaz_codec_fw *cur_codec_fw;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ /* FIXME: make sure the topaz_mtx_data_mem is allocated */
++ if (topaz_priv->topaz_mtx_data_mem == NULL) {
++ PSB_DEBUG_GENERAL("TOPAZ: try to save context without space "
++ "allocated, return directly without save\n");
++ return -1;
++ }
++ if (topaz_priv->topaz_fw_loaded == 0) {
++ PSB_DEBUG_GENERAL("TOPAZ: try to save context without firmware "
++ "uploaded\n");
++ return -1;
++ }
++
++ topaz_wait_for_register(dev_priv,
++ MTX_START + MTX_CORE_CR_MTX_TXRPT_OFFSET,
++ TXRPT_WAITONKICK_VALUE,
++ 0xffffffff);
++
++ /* stop mtx */
++ MTX_WRITE32(MTX_CORE_CR_MTX_ENABLE_OFFSET,
++ MTX_CORE_CR_MTX_ENABLE_MTX_TOFF_MASK);
++
++ mtx_reg_state = topaz_priv->topaz_mtx_reg_state;
++
++ /* FIXME: conside to put read/write into one function */
++ /* Saves 8 Registers of D0 Bank */
++ /* DoRe0, D0Ar6, D0Ar4, D0Ar2, D0FrT, D0.5, D0.6 and D0.7 */
++ for (i = 0; i < 8; i++) {
++ topaz_read_core_reg(dev_priv, 0x1 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 8 Registers of D1 Bank */
++ /* D1Re0, D1Ar5, D1Ar3, D1Ar1, D1RtP, D1.5, D1.6 and D1.7 */
++ for (i = 0; i < 8; i++) {
++ topaz_read_core_reg(dev_priv, 0x2 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 4 Registers of A0 Bank */
++ /* A0StP, A0FrP, A0.2 and A0.3 */
++ for (i = 0; i < 4; i++) {
++ topaz_read_core_reg(dev_priv, 0x3 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 4 Registers of A1 Bank */
++ /* A1GbP, A1LbP, A1.2 and A1.3 */
++ for (i = 0; i < 4; i++) {
++ topaz_read_core_reg(dev_priv, 0x4 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves PC and PCX */
++ for (i = 0; i < 2; i++) {
++ topaz_read_core_reg(dev_priv, 0x5 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++ /* Saves 8 Control Registers */
++ /* TXSTAT, TXMASK, TXSTATI, TXMASKI, TXPOLL, TXGPIOI, TXPOLLI,
++ * TXGPIOO */
++ for (i = 0; i < 8; i++) {
++ topaz_read_core_reg(dev_priv, 0x7 | (i<<4),
++ mtx_reg_state);
++ mtx_reg_state++;
++ }
++
++ /* save mtx data memory */
++ cur_codec_fw = &topaz_priv->topaz_fw[topaz_priv->topaz_cur_codec];
++
++ mtx_dma_read(dev, cur_codec_fw->data_location + 0x80900000 - 0x82880000,
++ topaz_priv->cur_mtx_data_size);
++
++ /* turn off mtx clocks */
++ MTX_WRITE32(TOPAZ_CR_TOPAZ_MAN_CLK_GATE,
++ MASK_TOPAZ_CR_TOPAZ_MTX_MAN_CLK_GATE);
++
++ topaz_priv->topaz_mtx_saved = 1;
++
++ return 0;
++}
++
++void mtx_dma_read(struct drm_device *dev, uint32_t source_addr, uint32_t size)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct ttm_buffer_object *target;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ /* setup mtx DMAC registers to do transfer */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, source_addr);
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(2, MTX_BURSTSIZE) |
++ F_ENCODE(1, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(size, MTX_LENGTH));
++
++ /* give the DMAC access to the host memory via BIF */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
++
++ target = topaz_priv->topaz_mtx_data_mem;
++ /* transfert the data */
++ /* FIXME: size is meaured by bytes? */
++ topaz_dma_transfer(dev_priv, 0, target->offset, 0,
++ MTX_CR_MTX_SYSC_CDMAT,
++ size, 0, 1);
++
++ /* wait for it transfer */
++ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++ /* clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++ /* give access back to topaz core */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
++}
++
++void dmac_transfer(struct drm_device *dev, uint32_t channel, uint32_t dst_addr,
++ uint32_t soc_addr, uint32_t bytes_num,
++ int increment, int rnw)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ uint32_t count_reg;
++ uint32_t irq_state;
++
++ /* check no transfer is in progress */
++ DMAC_READ32(IMG_SOC_DMAC_COUNT(channel), &count_reg);
++ if (0 != (count_reg & (MASK_IMG_SOC_EN | MASK_IMG_SOC_LIST_EN))) {
++ DRM_ERROR("TOPAZ: there's transfer in progress when wanna "
++ "save mtx data\n");
++ /* FIXME: how to handle this error */
++ return;
++ }
++
++ /* no hold off period */
++ DMAC_WRITE32(IMG_SOC_DMAC_PER_HOLD(channel), 0);
++ /* cleare irq state */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(channel), 0);
++ DMAC_READ32(IMG_SOC_DMAC_IRQ_STAT(channel), &irq_state);
++ if (0 != irq_state) {
++ DRM_ERROR("TOPAZ: there's irq cann't clear\n");
++ return;
++ }
++
++ DMAC_WRITE32(IMG_SOC_DMAC_SETUP(channel), dst_addr);
++ count_reg = DMAC_VALUE_COUNT(DMAC_BSWAP_NO_SWAP,
++ DMAC_PWIDTH_32_BIT, rnw,
++ DMAC_PWIDTH_32_BIT, bytes_num);
++ /* generate an interrupt at end of transfer */
++ count_reg |= MASK_IMG_SOC_TRANSFER_IEN;
++ count_reg |= F_ENCODE(rnw, IMG_SOC_DIR);
++ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel), count_reg);
++
++ DMAC_WRITE32(IMG_SOC_DMAC_PERIPH(channel),
++ DMAC_VALUE_PERIPH_PARAM(DMAC_ACC_DEL_0, increment,
++ DMAC_BURST_2));
++ DMAC_WRITE32(IMG_SOC_DMAC_PERIPHERAL_ADDR(channel), soc_addr);
++
++ /* Finally, rewrite the count register with the enable
++ * bit set to kick off the transfer */
++ DMAC_WRITE32(IMG_SOC_DMAC_COUNT(channel),
++ count_reg | MASK_IMG_SOC_EN);
++}
++
++void mtx_dma_write(struct drm_device *dev)
++{
++ struct topaz_codec_fw *cur_codec_fw;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++
++ cur_codec_fw = &topaz_priv->topaz_fw[topaz_priv->topaz_cur_codec];
++
++ /* upload code */
++ /* setup mtx DMAC registers to recieve transfer */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000);
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(2, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(cur_codec_fw->text_size / 4, MTX_LENGTH));
++
++ /* give DMAC access to host memory */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
++
++ /* transfer code */
++ topaz_dma_transfer(dev_priv, 0, cur_codec_fw->text->offset, 0,
++ MTX_CR_MTX_SYSC_CDMAT, cur_codec_fw->text_size / 4,
++ 0, 0);
++ /* wait finished */
++ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++ /* clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++
++ /* setup mtx start recieving data */
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAA, 0x80900000 +
++ (cur_codec_fw->data_location) - 0x82880000);
++
++ MTX_WRITE32(MTX_CR_MTX_SYSC_CDMAC,
++ F_ENCODE(2, MTX_BURSTSIZE) |
++ F_ENCODE(0, MTX_RNW) |
++ F_ENCODE(1, MTX_ENABLE) |
++ F_ENCODE(topaz_priv->cur_mtx_data_size, MTX_LENGTH));
++
++ /* give DMAC access to host memory */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 1);
++
++ /* transfer data */
++ topaz_dma_transfer(dev_priv, 0, topaz_priv->topaz_mtx_data_mem->offset,
++ 0, MTX_CR_MTX_SYSC_CDMAT,
++ topaz_priv->cur_mtx_data_size,
++ 0, 0);
++ /* wait finished */
++ topaz_wait_for_register(dev_priv, IMG_SOC_DMAC_IRQ_STAT(0) + DMAC_START,
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN),
++ F_ENCODE(1, IMG_SOC_TRANSFER_FIN));
++ /* clear interrupt */
++ DMAC_WRITE32(IMG_SOC_DMAC_IRQ_STAT(0), 0);
++
++ /* give access back to Topaz Core */
++ TOPAZ_WRITE32(TOPAZ_CR_IMG_TOPAZ_DMAC_MODE, 0);
++}
++
+diff --git a/drivers/gpu/drm/mrst/drv/msvdx_power.c b/drivers/gpu/drm/mrst/drv/msvdx_power.c
+new file mode 100644
+index 0000000..803e04d
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/msvdx_power.c
+@@ -0,0 +1,164 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: binglin.chen@intel.com>
++ *
++ */
++
++#include "msvdx_power.h"
++#include "psb_msvdx.h"
++#include "psb_drv.h"
++
++#include "services_headers.h"
++#include "sysconfig.h"
++
++static PVRSRV_ERROR DevInitMSVDXPart1(IMG_VOID *pvDeviceNode)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++ PVRSRV_ERROR eError;
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState;
++
++ /* register power operation function */
++ /* FIXME: this should be in part2 init function, but
++ * currently here only OSPM needs IMG device... */
++ eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF;
++ eError = PVRSRVRegisterPowerDevice(psDeviceNode->sDevId.ui32DeviceIndex,
++ MSVDXPrePowerState,
++ MSVDXPostPowerState,
++ MSVDXPreClockSpeedChange,
++ MSVDXPostClockSpeedChange,
++ (IMG_HANDLE)psDeviceNode,
++ PVRSRV_DEV_POWER_STATE_ON,
++ eDefaultPowerState);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "DevInitMSVDXPart1: failed to "
++ "register device with power manager"));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR DevDeInitMSVDX(IMG_VOID *pvDeviceNode)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++ PVRSRV_ERROR eError;
++
++ /* should deinit all resource */
++
++ eError = PVRSRVRemovePowerDevice(psDeviceNode->sDevId.ui32DeviceIndex);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MSVDXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ /* version check */
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MSVDXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_MSVDX;
++ psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_VIDEO;
++
++ psDeviceNode->pfnInitDevice = DevInitMSVDXPart1;
++ psDeviceNode->pfnDeInitDevice = DevDeInitMSVDX;
++
++ psDeviceNode->pfnInitDeviceCompatCheck = MSVDXDevInitCompatCheck;
++
++ psDeviceNode->pfnDeviceISR = psb_msvdx_interrupt;
++ psDeviceNode->pvISRData = (IMG_VOID *)gpDrmDevice;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MSVDXPrePowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ /* ask for a change not power on*/
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON)) {
++ struct drm_psb_private *dev_priv = gpDrmDevice->dev_private;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++ MSVDX_NEW_PMSTATE(gpDrmDevice, msvdx_priv, PSB_PMSTATE_POWERDOWN);
++
++ /* context save */
++ psb_msvdx_save_context(gpDrmDevice);
++
++ /* internally close the device */
++
++ /* ask for power off */
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF) {
++ /* here will deinitialize the driver if needed */
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s no action for transform from %d to %d",
++ __func__,
++ eCurrentPowerState,
++ eNewPowerState));
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MSVDXPostPowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ /* if ask for change & current status is not on */
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON)) {
++ /* internally open device */
++ struct drm_psb_private *dev_priv = gpDrmDevice->dev_private;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++ MSVDX_NEW_PMSTATE(gpDrmDevice, msvdx_priv, PSB_PMSTATE_POWERUP);
++
++ /* context restore */
++ psb_msvdx_restore_context(gpDrmDevice);
++
++ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF) {
++ /* here will initialize the driver if needed */
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s no action for transform from %d to %d",
++ __func__,
++ eCurrentPowerState,
++ eNewPowerState));
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MSVDXPreClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MSVDXPostClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ return PVRSRV_OK;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/msvdx_power.h b/drivers/gpu/drm/mrst/drv/msvdx_power.h
+new file mode 100644
+index 0000000..19a3d44
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/msvdx_power.h
+@@ -0,0 +1,48 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: binglin.chen@intel.com>
++ *
++ */
++
++#ifndef MSVDX_POWER_H_
++#define MSVDX_POWER_H_
++
++#include "services_headers.h"
++#include "sysconfig.h"
++
++extern struct drm_device *gpDrmDevice;
++
++/* function define */
++PVRSRV_ERROR MSVDXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
++PVRSRV_ERROR MSVDXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++/* power function define */
++PVRSRV_ERROR MSVDXPrePowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR MSVDXPostPowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR MSVDXPreClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR MSVDXPostClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR MSVDXInitOSPM(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++#endif /* !MSVDX_POWER_H_ */
+diff --git a/drivers/gpu/drm/mrst/drv/psb_bl.c b/drivers/gpu/drm/mrst/drv/psb_bl.c
+new file mode 100644
+index 0000000..0ef6c41
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_bl.c
+@@ -0,0 +1,260 @@
++/*
++ * psb backlight using HAL
++ *
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: Eric Knopp
++ *
++ */
++
++#include <linux/backlight.h>
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_bios.h"
++#include "ospm_power.h"
++
++#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
++#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
++#define BLC_PWM_FREQ_CALC_CONSTANT 32
++#define MHz 1000000
++#define BRIGHTNESS_MIN_LEVEL 1
++#define BRIGHTNESS_MAX_LEVEL 100
++#define BRIGHTNESS_MASK 0xFF
++#define BLC_POLARITY_NORMAL 0
++#define BLC_POLARITY_INVERSE 1
++#define BLC_ADJUSTMENT_MAX 100
++
++#define PSB_BLC_PWM_PRECISION_FACTOR 10
++#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
++#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
++
++#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
++#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
++
++static int psb_brightness;
++static struct backlight_device *psb_backlight_device;
++static u8 blc_brightnesscmd;
++u8 blc_pol;
++u8 blc_type;
++
++
++int psb_set_brightness(struct backlight_device *bd)
++{
++ u32 blc_pwm_ctl;
++ u32 max_pwm_blc;
++
++ struct drm_device *dev =
++ (struct drm_device *)psb_backlight_device->priv;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ int level = bd->props.brightness;
++
++ DRM_DEBUG("backlight level set to %d\n", level);
++
++ /* Perform value bounds checking */
++ if (level < BRIGHTNESS_MIN_LEVEL)
++ level = BRIGHTNESS_MIN_LEVEL;
++
++ if (IS_POULSBO(dev)) {
++ psb_intel_lvds_set_brightness(dev, level);
++ psb_brightness = level;
++ return 0;
++ }
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ /* Calculate and set the brightness value */
++ max_pwm_blc = REG_READ(BLC_PWM_CTL) >>
++ MRST_BACKLIGHT_MODULATION_FREQ_SHIFT;
++ blc_pwm_ctl = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
++
++ /* Adjust the backlight level with the percent in
++ * dev_priv->blc_adj1;
++ */
++ blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj1;
++ blc_pwm_ctl = blc_pwm_ctl / BLC_ADJUSTMENT_MAX;
++
++ /* Adjust the backlight level with the percent in
++ * dev_priv->blc_adj2;
++ */
++ blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj2;
++ blc_pwm_ctl = blc_pwm_ctl / BLC_ADJUSTMENT_MAX;
++
++
++ if (blc_pol == BLC_POLARITY_INVERSE)
++ blc_pwm_ctl = max_pwm_blc - blc_pwm_ctl;
++
++ /* force PWM bit on */
++ REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
++ REG_WRITE(BLC_PWM_CTL,
++ (max_pwm_blc << MRST_BACKLIGHT_MODULATION_FREQ_SHIFT) |
++ blc_pwm_ctl);
++
++ /* printk("***backlight brightness = %i\n", level); */
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++
++ /* cache the brightness for later use */
++ psb_brightness = level;
++ return 0;
++}
++
++int psb_get_brightness(struct backlight_device *bd)
++{
++ /* return locally cached var instead of HW read (due to DPST etc.) */
++ return psb_brightness;
++}
++
++struct backlight_ops psb_ops = {
++ .get_brightness = psb_get_brightness,
++ .update_status = psb_set_brightness,
++};
++
++int psb_backlight_init(struct drm_device *dev)
++{
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ unsigned long CoreClock;
++ /* u32 bl_max_freq; */
++ /* unsigned long value; */
++ u16 bl_max_freq;
++ uint32_t value;
++ uint32_t clock;
++ uint32_t blc_pwm_precision_factor;
++
++ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ psb_backlight_device = backlight_device_register("psb-bl",
++ NULL, NULL, &psb_ops);
++ if (IS_ERR(psb_backlight_device))
++ return PTR_ERR(psb_backlight_device);
++
++ psb_backlight_device->priv = dev;
++
++ if (IS_MRST(dev)) {
++ dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
++ dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
++
++ /* this needs to come from VBT when available */
++ bl_max_freq = 256;
++ /* this needs to be set elsewhere */
++ blc_pol = BLC_POLARITY_NORMAL;
++ blc_pwm_precision_factor = BLC_PWM_PRECISION_FACTOR;
++
++ if (dev_priv->sku_83)
++ CoreClock = 166;
++ else if (dev_priv->sku_100)
++ CoreClock = 200;
++ else if (dev_priv->sku_100L)
++ CoreClock = 100;
++ else
++ return 1;
++ } else {
++ /* get bl_max_freq and pol from dev_priv*/
++ if (!dev_priv->lvds_bl) {
++ DRM_ERROR("Has no valid LVDS backlight info\n");
++ return 1;
++ }
++ bl_max_freq = dev_priv->lvds_bl->freq;
++ blc_pol = dev_priv->lvds_bl->pol;
++ blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
++ blc_brightnesscmd = dev_priv->lvds_bl->brightnesscmd;
++ blc_type = dev_priv->lvds_bl->type;
++
++ /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
++ /*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
++
++ pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
++ pci_read_config_dword(pci_root, 0xD4, &clock);
++
++ switch (clock & 0x07) {
++ case 0:
++ CoreClock = 100;
++ break;
++ case 1:
++ CoreClock = 133;
++ break;
++ case 2:
++ CoreClock = 150;
++ break;
++ case 3:
++ CoreClock = 178;
++ break;
++ case 4:
++ CoreClock = 200;
++ break;
++ case 5:
++ case 6:
++ case 7:
++ CoreClock = 266;
++ default:
++ return 1;
++ }
++ } /*end if(IS_MRST(dev))*/
++
++ value = (CoreClock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
++ value *= blc_pwm_precision_factor;
++ value /= bl_max_freq;
++ value /= blc_pwm_precision_factor;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ if (IS_MRST(dev)) {
++ if (value >
++ (unsigned long long)MRST_BLC_MAX_PWM_REG_FREQ)
++ return 2;
++ else {
++ REG_WRITE(BLC_PWM_CTL2,
++ (0x80000000 | REG_READ(BLC_PWM_CTL2)));
++ REG_WRITE(BLC_PWM_CTL, value |
++ (value <<
++ MRST_BACKLIGHT_MODULATION_FREQ_SHIFT));
++ }
++ } else {
++ if (
++ value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
++ value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
++ return 2;
++ else {
++ value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
++ REG_WRITE(BLC_PWM_CTL,
++ (value << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
++ (value));
++ }
++ } /*end if(IS_MRST(dev))*/
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++
++ psb_backlight_device->props.brightness = BRIGHTNESS_MAX_LEVEL;
++ psb_backlight_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL;
++ backlight_update_status(psb_backlight_device);
++#endif
++ return 0;
++}
++
++void psb_backlight_exit(void)
++{
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ psb_backlight_device->props.brightness = 0;
++ backlight_update_status(psb_backlight_device);
++ backlight_device_unregister(psb_backlight_device);
++#endif
++ return;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_buffer.c b/drivers/gpu/drm/mrst/drv/psb_buffer.c
+new file mode 100644
+index 0000000..d54a429
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_buffer.c
+@@ -0,0 +1,379 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
++ */
++#include "ttm/ttm_placement_common.h"
++#include "ttm/ttm_execbuf_util.h"
++#include "ttm/ttm_fence_api.h"
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_schedule.h"
++
++#define DRM_MEM_TTM 26
++
++struct drm_psb_ttm_backend {
++ struct ttm_backend base;
++ struct page **pages;
++ unsigned int desired_tile_stride;
++ unsigned int hw_tile_stride;
++ int mem_type;
++ unsigned long offset;
++ unsigned long num_pages;
++};
++
++/*
++ * MSVDX/TOPAZ GPU virtual space looks like this
++ * (We currently use only one MMU context).
++ * PSB_MEM_MMU_START: from 0x40000000, for generic buffers
++ * TTM_PL_CI: from 0xe0000000+half GTT space, for camear/video buffer sharing
++ * TTM_PL_RAR: from TTM_PL_CI, for RAR/video buffer sharing
++ * TTM_PL_TT: from TTM_PL_RAR, for buffers need to mapping into GTT
++ */
++static int psb_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
++ struct ttm_mem_type_manager *man)
++{
++
++ struct drm_psb_private *dev_priv =
++ container_of(bdev, struct drm_psb_private, bdev);
++ struct psb_gtt *pg = dev_priv->pg;
++
++ switch (type) {
++ case TTM_PL_SYSTEM:
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
++ man->available_caching = TTM_PL_FLAG_CACHED |
++ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
++ man->default_caching = TTM_PL_FLAG_CACHED;
++ break;
++ case DRM_PSB_MEM_MMU:
++ man->io_offset = 0x00000000;
++ man->io_size = 0x00000000;
++ man->io_addr = NULL;
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_CMA;
++ man->gpu_offset = PSB_MEM_MMU_START;
++ man->available_caching = TTM_PL_FLAG_CACHED |
++ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
++ man->default_caching = TTM_PL_FLAG_WC;
++ break;
++ case TTM_PL_CI:
++ man->io_addr = NULL;
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_FIXED |
++ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
++ man->io_offset = dev_priv->ci_region_start;
++ man->io_size = pg->ci_stolen_size;
++ man->gpu_offset = pg->mmu_gatt_start;
++ man->available_caching = TTM_PL_FLAG_UNCACHED;
++ man->default_caching = TTM_PL_FLAG_UNCACHED;
++ break;
++ case TTM_PL_RAR: /* Unmappable RAR memory */
++ man->io_offset = dev_priv->rar_region_start;
++ man->io_size = pg->rar_stolen_size;
++ man->io_addr = NULL;
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_FIXED |
++ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
++ man->available_caching = TTM_PL_FLAG_UNCACHED;
++ man->default_caching = TTM_PL_FLAG_UNCACHED;
++ man->gpu_offset = pg->mmu_gatt_start;
++ break;
++ case TTM_PL_TT: /* Mappable GATT memory */
++ man->io_offset = pg->gatt_start;
++ man->io_size = pg->gatt_pages << PAGE_SHIFT;
++ man->io_addr = NULL;
++#ifdef PSB_WORKING_HOST_MMU_ACCESS
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
++#else
++ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++ TTM_MEMTYPE_FLAG_CMA;
++#endif
++ man->available_caching = TTM_PL_FLAG_CACHED |
++ TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
++ man->default_caching = TTM_PL_FLAG_WC;
++ man->gpu_offset = pg->mmu_gatt_start;
++ break;
++ default:
++ DRM_ERROR("Unsupported memory type %u\n", (unsigned) type);
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static uint32_t psb_evict_mask(struct ttm_buffer_object *bo)
++{
++ uint32_t cur_placement = bo->mem.flags & ~TTM_PL_MASK_MEM;
++
++ /* all buffers evicted to system memory */
++ return cur_placement | TTM_PL_FLAG_SYSTEM;
++}
++
++static int psb_invalidate_caches(struct ttm_bo_device *bdev,
++ uint32_t placement)
++{
++ return 0;
++}
++
++static int psb_move_blit(struct ttm_buffer_object *bo,
++ bool evict, bool no_wait,
++ struct ttm_mem_reg *new_mem)
++{
++ BUG();
++ return 0;
++}
++
++/*
++ * Flip destination ttm into GATT,
++ * then blit and subsequently move out again.
++ */
++
++static int psb_move_flip(struct ttm_buffer_object *bo,
++ bool evict, bool interruptible, bool no_wait,
++ struct ttm_mem_reg *new_mem)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_reg tmp_mem;
++ int ret;
++
++ tmp_mem = *new_mem;
++ tmp_mem.mm_node = NULL;
++ tmp_mem.proposed_flags = TTM_PL_FLAG_TT;
++
++ ret = ttm_bo_mem_space(bo, &tmp_mem, interruptible, no_wait);
++ if (ret)
++ return ret;
++ ret = ttm_tt_bind(bo->ttm, &tmp_mem);
++ if (ret)
++ goto out_cleanup;
++ ret = psb_move_blit(bo, true, no_wait, &tmp_mem);
++ if (ret)
++ goto out_cleanup;
++
++ ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
++out_cleanup:
++ if (tmp_mem.mm_node) {
++ spin_lock(&bdev->lru_lock);
++ drm_mm_put_block(tmp_mem.mm_node);
++ tmp_mem.mm_node = NULL;
++ spin_unlock(&bdev->lru_lock);
++ }
++ return ret;
++}
++
++static int psb_move(struct ttm_buffer_object *bo,
++ bool evict, bool interruptible,
++ bool no_wait, struct ttm_mem_reg *new_mem)
++{
++ struct ttm_mem_reg *old_mem = &bo->mem;
++
++ if ((old_mem->mem_type == TTM_PL_RAR) ||
++ (new_mem->mem_type == TTM_PL_RAR)) {
++ ttm_bo_free_old_node(bo);
++ *old_mem = *new_mem;
++ } else if (old_mem->mem_type == TTM_PL_SYSTEM) {
++ return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
++ } else if (new_mem->mem_type == TTM_PL_SYSTEM) {
++ int ret = psb_move_flip(bo, evict, interruptible,
++ no_wait, new_mem);
++ if (unlikely(ret != 0)) {
++ if (ret == -ERESTART)
++ return ret;
++ else
++ return ttm_bo_move_memcpy(bo, evict, no_wait,
++ new_mem);
++ }
++ } else {
++ if (psb_move_blit(bo, evict, no_wait, new_mem))
++ return ttm_bo_move_memcpy(bo, evict, no_wait,
++ new_mem);
++ }
++ return 0;
++}
++
++static int drm_psb_tbe_populate(struct ttm_backend *backend,
++ unsigned long num_pages,
++ struct page **pages,
++ struct page *dummy_read_page)
++{
++ struct drm_psb_ttm_backend *psb_be =
++ container_of(backend, struct drm_psb_ttm_backend, base);
++
++ psb_be->pages = pages;
++ return 0;
++}
++
++static int drm_psb_tbe_unbind(struct ttm_backend *backend)
++{
++ struct ttm_bo_device *bdev = backend->bdev;
++ struct drm_psb_private *dev_priv =
++ container_of(bdev, struct drm_psb_private, bdev);
++ struct drm_psb_ttm_backend *psb_be =
++ container_of(backend, struct drm_psb_ttm_backend, base);
++ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
++ struct ttm_mem_type_manager *man = &bdev->man[psb_be->mem_type];
++
++ PSB_DEBUG_RENDER("MMU unbind.\n");
++
++ if (psb_be->mem_type == TTM_PL_TT) {
++ uint32_t gatt_p_offset =
++ (psb_be->offset - man->gpu_offset) >> PAGE_SHIFT;
++
++ (void) psb_gtt_remove_pages(dev_priv->pg, gatt_p_offset,
++ psb_be->num_pages,
++ psb_be->desired_tile_stride,
++ psb_be->hw_tile_stride);
++ }
++
++ psb_mmu_remove_pages(pd, psb_be->offset,
++ psb_be->num_pages,
++ psb_be->desired_tile_stride,
++ psb_be->hw_tile_stride);
++
++ return 0;
++}
++
++static int drm_psb_tbe_bind(struct ttm_backend *backend,
++ struct ttm_mem_reg *bo_mem)
++{
++ struct ttm_bo_device *bdev = backend->bdev;
++ struct drm_psb_private *dev_priv =
++ container_of(bdev, struct drm_psb_private, bdev);
++ struct drm_psb_ttm_backend *psb_be =
++ container_of(backend, struct drm_psb_ttm_backend, base);
++ struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
++ struct ttm_mem_type_manager *man = &bdev->man[bo_mem->mem_type];
++ int type;
++ int ret = 0;
++
++ psb_be->mem_type = bo_mem->mem_type;
++ psb_be->num_pages = bo_mem->num_pages;
++ psb_be->desired_tile_stride = 0;
++ psb_be->hw_tile_stride = 0;
++ psb_be->offset = (bo_mem->mm_node->start << PAGE_SHIFT) +
++ man->gpu_offset;
++
++ type =
++ (bo_mem->
++ flags & TTM_PL_FLAG_CACHED) ? PSB_MMU_CACHED_MEMORY : 0;
++
++ PSB_DEBUG_RENDER("MMU bind.\n");
++ if (psb_be->mem_type == TTM_PL_TT) {
++ uint32_t gatt_p_offset =
++ (psb_be->offset - man->gpu_offset) >> PAGE_SHIFT;
++
++ ret = psb_gtt_insert_pages(dev_priv->pg, psb_be->pages,
++ gatt_p_offset,
++ psb_be->num_pages,
++ psb_be->desired_tile_stride,
++ psb_be->hw_tile_stride, type);
++ }
++
++ ret = psb_mmu_insert_pages(pd, psb_be->pages,
++ psb_be->offset, psb_be->num_pages,
++ psb_be->desired_tile_stride,
++ psb_be->hw_tile_stride, type);
++ if (ret)
++ goto out_err;
++
++ return 0;
++out_err:
++ drm_psb_tbe_unbind(backend);
++ return ret;
++
++}
++
++static void drm_psb_tbe_clear(struct ttm_backend *backend)
++{
++ struct drm_psb_ttm_backend *psb_be =
++ container_of(backend, struct drm_psb_ttm_backend, base);
++
++ psb_be->pages = NULL;
++ return;
++}
++
++static void drm_psb_tbe_destroy(struct ttm_backend *backend)
++{
++ struct drm_psb_ttm_backend *psb_be =
++ container_of(backend, struct drm_psb_ttm_backend, base);
++
++ if (backend)
++ kfree(psb_be);
++}
++
++static struct ttm_backend_func psb_ttm_backend = {
++ .populate = drm_psb_tbe_populate,
++ .clear = drm_psb_tbe_clear,
++ .bind = drm_psb_tbe_bind,
++ .unbind = drm_psb_tbe_unbind,
++ .destroy = drm_psb_tbe_destroy,
++};
++
++static struct ttm_backend *drm_psb_tbe_init(struct ttm_bo_device *bdev)
++{
++ struct drm_psb_ttm_backend *psb_be;
++
++ psb_be = kzalloc(sizeof(*psb_be), GFP_KERNEL);
++ if (!psb_be)
++ return NULL;
++ psb_be->pages = NULL;
++ psb_be->base.func = &psb_ttm_backend;
++ psb_be->base.bdev = bdev;
++ return &psb_be->base;
++}
++
++/*
++ * Use this memory type priority if no eviction is needed.
++ */
++static uint32_t psb_mem_prios[] = {
++ TTM_PL_CI,
++ TTM_PL_RAR,
++ TTM_PL_TT,
++ DRM_PSB_MEM_MMU,
++ TTM_PL_SYSTEM
++};
++
++/*
++ * Use this memory type priority if need to evict.
++ */
++static uint32_t psb_busy_prios[] = {
++ TTM_PL_TT,
++ TTM_PL_CI,
++ TTM_PL_RAR,
++ DRM_PSB_MEM_MMU,
++ TTM_PL_SYSTEM
++};
++
++
++struct ttm_bo_driver psb_ttm_bo_driver = {
++ .mem_type_prio = psb_mem_prios,
++ .mem_busy_prio = psb_busy_prios,
++ .num_mem_type_prio = ARRAY_SIZE(psb_mem_prios),
++ .num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios),
++ .create_ttm_backend_entry = &drm_psb_tbe_init,
++ .invalidate_caches = &psb_invalidate_caches,
++ .init_mem_type = &psb_init_mem_type,
++ .evict_flags = &psb_evict_mask,
++ .move = &psb_move,
++ .verify_access = &psb_verify_access,
++ .sync_obj_signaled = &ttm_fence_sync_obj_signaled,
++ .sync_obj_wait = &ttm_fence_sync_obj_wait,
++ .sync_obj_flush = &ttm_fence_sync_obj_flush,
++ .sync_obj_unref = &ttm_fence_sync_obj_unref,
++ .sync_obj_ref = &ttm_fence_sync_obj_ref
++};
+diff --git a/drivers/gpu/drm/mrst/drv/psb_dpst.c b/drivers/gpu/drm/mrst/drv/psb_dpst.c
+new file mode 100644
+index 0000000..c16c982
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_dpst.c
+@@ -0,0 +1,254 @@
++/*
++ * Copyright © 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * James C. Gualario <james.c.gualario@intel.com>
++ *
++ */
++
++#include "psb_umevents.h"
++#include "psb_dpst.h"
++/**
++ * inform the kernel of the work to be performed and related function.
++ *
++ */
++DECLARE_WORK(dpst_dev_change_work, &psb_dpst_dev_change_wq);
++/**
++ * psb_dpst_notify_change_um - notify user mode of hotplug changes
++ *
++ * @name: name of event to notify user mode of change to
++ * @state: dpst state struct to get workqueue from
++ *
++ */
++int psb_dpst_notify_change_um(enum dpst_event_enum event,
++ struct dpst_state *state)
++{
++ if (state == NULL)
++ return IRQ_HANDLED;
++
++ state->dpst_change_wq_data.dev_name_arry_rw_status
++ [state->dpst_change_wq_data.dev_name_write] =
++ DRM_DPST_READY_TO_READ;
++ state->dpst_change_wq_data.dpst_events
++ [state->dpst_change_wq_data.dev_name_write] =
++ event;
++ if (state->dpst_change_wq_data.dev_name_read_write_wrap_ack == 1)
++ state->dpst_change_wq_data.dev_name_read_write_wrap_ack = 0;
++ state->dpst_change_wq_data.dev_name_write++;
++ if (state->dpst_change_wq_data.dev_name_write ==
++ state->dpst_change_wq_data.dev_name_read) {
++ state->dpst_change_wq_data.dev_name_write--;
++ return IRQ_NONE;
++ }
++ if (state->dpst_change_wq_data.dev_name_write >
++ DRM_DPST_RING_DEPTH_MAX) {
++ state->dpst_change_wq_data.dev_name_write = 0;
++ state->dpst_change_wq_data.dev_name_write_wrap = 1;
++ }
++ state->dpst_change_wq_data.hotplug_dev_list = state->list;
++ queue_work(state->dpst_wq, &(state->dpst_change_wq_data.work));
++ return IRQ_HANDLED;
++}
++EXPORT_SYMBOL(psb_dpst_notify_change_um);
++/**
++ *
++ * psb_dpst_create_and_notify_um - create and notify user mode of new dev
++ *
++ * @name: name to give for new event / device
++ * @state: dpst state instaces to associate event with
++ *
++ */
++struct umevent_obj *psb_dpst_create_and_notify_um(const char *name,
++ struct dpst_state *state)
++{
++ return psb_create_umevent_obj(name, state->list);
++
++}
++EXPORT_SYMBOL(psb_dpst_create_and_notify_um);
++/**
++ * psb_dpst_device_pool_create_and_init - make new hotplug device pool
++ *
++ * @parent_kobj - parent kobject to associate dpst kset with
++ * @state - dpst state instance to associate list with
++ *
++ */
++struct umevent_list *psb_dpst_device_pool_create_and_init(
++ struct kobject *parent_kobj,
++ struct dpst_state *state)
++{
++ struct umevent_list *new_hotplug_dev_list = NULL;
++ new_hotplug_dev_list = psb_umevent_create_list();
++ if (new_hotplug_dev_list)
++ psb_umevent_init(parent_kobj, new_hotplug_dev_list,
++ "psb_dpst");
++
++ state->dpst_wq = create_singlethread_workqueue("dpst-wq");
++
++ if (!state->dpst_wq)
++ return NULL;
++
++ INIT_WORK(&state->dpst_change_wq_data.work, psb_dpst_dev_change_wq);
++
++ state->dpst_change_wq_data.dev_name_read = 0;
++ state->dpst_change_wq_data.dev_name_write = 0;
++ state->dpst_change_wq_data.dev_name_write_wrap = 0;
++ state->dpst_change_wq_data.dev_name_read_write_wrap_ack = 0;
++
++ memset(&(state->dpst_change_wq_data.dev_name_arry_rw_status[0]),
++ 0, sizeof(int)*DRM_DPST_RING_DEPTH);
++
++ return new_hotplug_dev_list;
++}
++EXPORT_SYMBOL(psb_dpst_device_pool_create_and_init);
++/**
++ * psb_dpst_init - init dpst subsystem
++ * @parent_kobj - parent kobject to associate dpst state with
++ *
++ */
++struct dpst_state *psb_dpst_init(struct kobject *parent_kobj)
++{
++ struct dpst_state *state;
++ struct umevent_obj *working_umevent;
++
++ state = kzalloc(sizeof(struct dpst_state), GFP_KERNEL);
++ printk(KERN_ALERT "after kzalloc\n");
++ state->list = NULL;
++ state->list = psb_dpst_device_pool_create_and_init(
++ parent_kobj,
++ state);
++ working_umevent =
++ psb_dpst_create_and_notify_um("init",
++ state);
++ state->dpst_change_wq_data.dev_umevent_arry
++ [DPST_EVENT_INIT_COMPLETE] = &(working_umevent->head);
++ working_umevent =
++ psb_dpst_create_and_notify_um("hist_int",
++ state);
++ state->dpst_change_wq_data.dev_umevent_arry
++ [DPST_EVENT_HIST_INTERRUPT] = &(working_umevent->head);
++ working_umevent =
++ psb_dpst_create_and_notify_um("term",
++ state);
++ state->dpst_change_wq_data.dev_umevent_arry
++ [DPST_EVENT_TERMINATE] = &(working_umevent->head);
++ working_umevent =
++ psb_dpst_create_and_notify_um("phase_done",
++ state);
++ state->dpst_change_wq_data.dev_umevent_arry
++ [DPST_EVENT_PHASE_COMPLETE] = &(working_umevent->head);
++
++ return state;
++}
++EXPORT_SYMBOL(psb_dpst_init);
++/**
++ * psb_dpst_device_pool_destroy - destroy all dpst related resources
++ *
++ * @state: dpst state instance to destroy
++ *
++ */
++void psb_dpst_device_pool_destroy(struct dpst_state *state)
++{
++ int i;
++ struct umevent_list *list;
++ struct umevent_obj *umevent_test;
++ list = state->list;
++ flush_workqueue(state->dpst_wq);
++ destroy_workqueue(state->dpst_wq);
++ for (i = 0; i < DRM_DPST_MAX_NUM_EVENTS; i++) {
++ umevent_test = list_entry(
++ (state->dpst_change_wq_data.dev_umevent_arry[i]),
++ struct umevent_obj, head);
++ state->dpst_change_wq_data.dev_umevent_arry[i] = NULL;
++ }
++ psb_umevent_cleanup(list);
++ kfree(state);
++}
++EXPORT_SYMBOL(psb_dpst_device_pool_destroy);
++/**
++ * psb_dpst_dev_change_wq - change workqueue implementation
++ *
++ * @work: work struct to use for kernel scheduling
++ *
++ */
++void psb_dpst_dev_change_wq(struct work_struct *work)
++{
++ struct dpst_disp_workqueue_data *wq_data;
++ int curr_event_index;
++ wq_data = to_dpst_disp_workqueue_data(work);
++ if (wq_data->dev_name_write_wrap == 1) {
++ wq_data->dev_name_read_write_wrap_ack = 1;
++ wq_data->dev_name_write_wrap = 0;
++ while (wq_data->dev_name_read != DRM_DPST_RING_DEPTH_MAX) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_DPST_READY_TO_READ) {
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_DPST_READ_COMPLETE;
++ curr_event_index = wq_data->dpst_events
++ [wq_data->dev_name_read];
++ psb_umevent_notify_change_gfxsock
++ (list_entry(
++ (wq_data->dev_umevent_arry
++ [curr_event_index]),
++ struct umevent_obj, head),
++ DRM_DPST_SOCKET_GROUP_ID);
++ }
++ wq_data->dev_name_read++;
++ }
++ wq_data->dev_name_read = 0;
++ while (wq_data->dev_name_read < wq_data->dev_name_write-1) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_DPST_READY_TO_READ) {
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_DPST_READ_COMPLETE;
++ curr_event_index = wq_data->dpst_events
++ [wq_data->dev_name_read];
++ psb_umevent_notify_change_gfxsock
++ (list_entry(
++ (wq_data->dev_umevent_arry
++ [curr_event_index]),
++ struct umevent_obj, head),
++ DRM_DPST_SOCKET_GROUP_ID);
++ }
++ wq_data->dev_name_read++;
++ }
++ } else {
++ while (wq_data->dev_name_read < wq_data->dev_name_write) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_DPST_READY_TO_READ) {
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_DPST_READ_COMPLETE;
++ curr_event_index = wq_data->dpst_events
++ [wq_data->dev_name_read];
++ psb_umevent_notify_change_gfxsock
++ (list_entry(
++ (wq_data->dev_umevent_arry
++ [curr_event_index]),
++ struct umevent_obj, head),
++ DRM_DPST_SOCKET_GROUP_ID);
++ }
++ wq_data->dev_name_read++;
++ }
++ }
++ if (wq_data->dev_name_read > DRM_DPST_RING_DEPTH_MAX)
++ wq_data->dev_name_read = 0;
++}
++EXPORT_SYMBOL(psb_dpst_dev_change_wq);
+diff --git a/drivers/gpu/drm/mrst/drv/psb_dpst.h b/drivers/gpu/drm/mrst/drv/psb_dpst.h
+new file mode 100644
+index 0000000..6f24a05
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_dpst.h
+@@ -0,0 +1,98 @@
++/*
++ * Copyright © 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * James C. Gualario <james.c.gualario@intel.com>
++ *
++ */
++
++#ifndef _PSB_DPST_H_
++#define _PSB_DPST_H_
++/**
++ * required includes
++ *
++ */
++#include "psb_umevents.h"
++/**
++ * dpst event enumeration
++ *
++ */
++enum dpst_event_enum {
++ DPST_EVENT_INIT_COMPLETE,
++ DPST_EVENT_HIST_INTERRUPT,
++ DPST_EVENT_TERMINATE,
++ DPST_EVENT_PHASE_COMPLETE,
++ DPST_MAX_EVENT
++};
++/**
++ * dpst specific defines
++ *
++ */
++#define DRM_DPST_RING_DEPTH 256
++#define DRM_DPST_RING_DEPTH_MAX (DRM_DPST_RING_DEPTH-1)
++#define DRM_DPST_READY_TO_READ 1
++#define DRM_DPST_READ_COMPLETE 2
++#define DRM_DPST_MAX_NUM_EVENTS (DPST_MAX_EVENT)
++/**
++ * dpst workqueue data struct.
++ */
++struct dpst_disp_workqueue_data {
++ struct work_struct work;
++ const char *dev_name;
++ int dev_name_write;
++ int dev_name_read;
++ int dev_name_write_wrap;
++ int dev_name_read_write_wrap_ack;
++ enum dpst_event_enum dpst_events[DRM_DPST_RING_DEPTH];
++ int dev_name_arry_rw_status[DRM_DPST_RING_DEPTH];
++ struct umevent_list *hotplug_dev_list;
++ struct list_head *dev_umevent_arry[DRM_DPST_MAX_NUM_EVENTS];
++};
++/**
++ * dpst state structure
++ *
++ */
++struct dpst_state {
++ struct workqueue_struct *dpst_wq;
++ struct dpst_disp_workqueue_data dpst_change_wq_data;
++ struct umevent_list *list;
++};
++/**
++ * main interface function prototytpes for dpst support.
++ *
++ */
++extern struct dpst_state *psb_dpst_init(struct kobject *parent_kobj);
++extern int psb_dpst_notify_change_um(enum dpst_event_enum event,
++ struct dpst_state *state);
++extern struct umevent_obj *psb_dpst_create_and_notify_um(const char *name,
++ struct dpst_state *state);
++extern struct umevent_list *psb_dpst_device_pool_create_and_init(
++ struct kobject *parent_kobj,
++ struct dpst_state *state);
++extern void psb_dpst_device_pool_destroy(struct dpst_state *state);
++/**
++ * to go back and forth between work struct and workqueue data
++ *
++ */
++#define to_dpst_disp_workqueue_data(x) \
++ container_of(x, struct dpst_disp_workqueue_data, work)
++
++/**
++ * function prototypes for workqueue implementation
++ *
++ */
++extern void psb_dpst_dev_change_wq(struct work_struct *work);
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_drm.h b/drivers/gpu/drm/mrst/drv/psb_drm.h
+new file mode 100644
+index 0000000..f23afd0
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_drm.h
+@@ -0,0 +1,634 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _PSB_DRM_H_
++#define _PSB_DRM_H_
++
++#if defined(__linux__) && !defined(__KERNEL__)
++#include<stdint.h>
++#include <linux/types.h>
++#include "drm_mode.h"
++#endif
++
++#include "ttm/ttm_fence_user.h"
++#include "ttm/ttm_placement_user.h"
++
++/*
++ * Menlow/MRST graphics driver package version
++ * a.b.c.xxxx
++ * a - Product Family: 5 - Linux
++ * b - Major Release Version: 0 - non-Gallium (Unbuntu);
++ * 1 - Gallium (Moblin2)
++ * c - Hotfix Release
++ * xxxx - Graphics internal build #
++ */
++#define PSB_PACKAGE_VERSION "5.3.0.32L.0007"
++
++#define DRM_PSB_SAREA_MAJOR 0
++#define DRM_PSB_SAREA_MINOR 2
++#define PSB_FIXED_SHIFT 16
++
++
++#define PSB_NUM_PIPE 2
++
++/*
++ * Public memory types.
++ */
++
++#define DRM_PSB_MEM_MMU TTM_PL_PRIV1
++#define DRM_PSB_FLAG_MEM_MMU TTM_PL_FLAG_PRIV1
++
++typedef int32_t psb_fixed;
++typedef uint32_t psb_ufixed;
++
++static inline int32_t psb_int_to_fixed(int a)
++{
++ return a * (1 << PSB_FIXED_SHIFT);
++}
++
++static inline uint32_t psb_unsigned_to_ufixed(unsigned int a)
++{
++ return a << PSB_FIXED_SHIFT;
++}
++
++/*Status of the command sent to the gfx device.*/
++typedef enum {
++ DRM_CMD_SUCCESS,
++ DRM_CMD_FAILED,
++ DRM_CMD_HANG
++} drm_cmd_status_t;
++
++struct drm_psb_scanout {
++ uint32_t buffer_id; /* DRM buffer object ID */
++ uint32_t rotation; /* Rotation as in RR_rotation definitions */
++ uint32_t stride; /* Buffer stride in bytes */
++ uint32_t depth; /* Buffer depth in bits (NOT) bpp */
++ uint32_t width; /* Buffer width in pixels */
++ uint32_t height; /* Buffer height in lines */
++ int32_t transform[3][3]; /* Buffer composite transform */
++ /* (scaling, rot, reflect) */
++};
++
++#define DRM_PSB_SAREA_OWNERS 16
++#define DRM_PSB_SAREA_OWNER_2D 0
++#define DRM_PSB_SAREA_OWNER_3D 1
++
++#define DRM_PSB_SAREA_SCANOUTS 3
++
++struct drm_psb_sarea {
++ /* Track changes of this data structure */
++
++ uint32_t major;
++ uint32_t minor;
++
++ /* Last context to touch part of hw */
++ uint32_t ctx_owners[DRM_PSB_SAREA_OWNERS];
++
++ /* Definition of front- and rotated buffers */
++ uint32_t num_scanouts;
++ struct drm_psb_scanout scanouts[DRM_PSB_SAREA_SCANOUTS];
++
++ int planeA_x;
++ int planeA_y;
++ int planeA_w;
++ int planeA_h;
++ int planeB_x;
++ int planeB_y;
++ int planeB_w;
++ int planeB_h;
++ /* Number of active scanouts */
++ uint32_t num_active_scanouts;
++};
++
++#define PSB_RELOC_MAGIC 0x67676767
++#define PSB_RELOC_SHIFT_MASK 0x0000FFFF
++#define PSB_RELOC_SHIFT_SHIFT 0
++#define PSB_RELOC_ALSHIFT_MASK 0xFFFF0000
++#define PSB_RELOC_ALSHIFT_SHIFT 16
++
++#define PSB_RELOC_OP_OFFSET 0 /* Offset of the indicated
++ * buffer
++ */
++
++struct drm_psb_reloc {
++ uint32_t reloc_op;
++ uint32_t where; /* offset in destination buffer */
++ uint32_t buffer; /* Buffer reloc applies to */
++ uint32_t mask; /* Destination format: */
++ uint32_t shift; /* Destination format: */
++ uint32_t pre_add; /* Destination format: */
++ uint32_t background; /* Destination add */
++ uint32_t dst_buffer; /* Destination buffer. Index into buffer_list */
++ uint32_t arg0; /* Reloc-op dependant */
++ uint32_t arg1;
++};
++
++
++#define PSB_GPU_ACCESS_READ (1ULL << 32)
++#define PSB_GPU_ACCESS_WRITE (1ULL << 33)
++#define PSB_GPU_ACCESS_MASK (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)
++
++#define PSB_BO_FLAG_COMMAND (1ULL << 52)
++
++#define PSB_ENGINE_2D 0
++#define PSB_ENGINE_VIDEO 1
++#define LNC_ENGINE_ENCODE 5
++
++/*
++ * For this fence class we have a couple of
++ * fence types.
++ */
++
++#define _PSB_FENCE_EXE_SHIFT 0
++#define _PSB_FENCE_FEEDBACK_SHIFT 4
++
++#define _PSB_FENCE_TYPE_EXE (1 << _PSB_FENCE_EXE_SHIFT)
++#define _PSB_FENCE_TYPE_FEEDBACK (1 << _PSB_FENCE_FEEDBACK_SHIFT)
++
++#define PSB_NUM_ENGINES 6
++
++
++#define PSB_FEEDBACK_OP_VISTEST (1 << 0)
++
++struct drm_psb_extension_rep {
++ int32_t exists;
++ uint32_t driver_ioctl_offset;
++ uint32_t sarea_offset;
++ uint32_t major;
++ uint32_t minor;
++ uint32_t pl;
++};
++
++#define DRM_PSB_EXT_NAME_LEN 128
++
++union drm_psb_extension_arg {
++ char extension[DRM_PSB_EXT_NAME_LEN];
++ struct drm_psb_extension_rep rep;
++};
++
++struct psb_validate_req {
++ uint64_t set_flags;
++ uint64_t clear_flags;
++ uint64_t next;
++ uint64_t presumed_gpu_offset;
++ uint32_t buffer_handle;
++ uint32_t presumed_flags;
++ uint32_t group;
++ uint32_t pad64;
++};
++
++struct psb_validate_rep {
++ uint64_t gpu_offset;
++ uint32_t placement;
++ uint32_t fence_type_mask;
++};
++
++#define PSB_USE_PRESUMED (1 << 0)
++
++struct psb_validate_arg {
++ int handled;
++ int ret;
++ union {
++ struct psb_validate_req req;
++ struct psb_validate_rep rep;
++ } d;
++};
++
++
++#define DRM_PSB_FENCE_NO_USER (1 << 0)
++
++struct psb_ttm_fence_rep {
++ uint32_t handle;
++ uint32_t fence_class;
++ uint32_t fence_type;
++ uint32_t signaled_types;
++ uint32_t error;
++};
++
++typedef struct drm_psb_cmdbuf_arg {
++ uint64_t buffer_list; /* List of buffers to validate */
++ uint64_t clip_rects; /* See i915 counterpart */
++ uint64_t scene_arg;
++ uint64_t fence_arg;
++
++ uint32_t ta_flags;
++
++ uint32_t ta_handle; /* TA reg-value pairs */
++ uint32_t ta_offset;
++ uint32_t ta_size;
++
++ uint32_t oom_handle;
++ uint32_t oom_offset;
++ uint32_t oom_size;
++
++ uint32_t cmdbuf_handle; /* 2D Command buffer object or, */
++ uint32_t cmdbuf_offset; /* rasterizer reg-value pairs */
++ uint32_t cmdbuf_size;
++
++ uint32_t reloc_handle; /* Reloc buffer object */
++ uint32_t reloc_offset;
++ uint32_t num_relocs;
++
++ int32_t damage; /* Damage front buffer with cliprects */
++ /* Not implemented yet */
++ uint32_t fence_flags;
++ uint32_t engine;
++
++ /*
++ * Feedback;
++ */
++
++ uint32_t feedback_ops;
++ uint32_t feedback_handle;
++ uint32_t feedback_offset;
++ uint32_t feedback_breakpoints;
++ uint32_t feedback_size;
++} drm_psb_cmdbuf_arg_t;
++
++typedef struct drm_psb_pageflip_arg {
++ uint32_t flip_offset;
++ uint32_t stride;
++} drm_psb_pageflip_arg_t;
++
++typedef enum {
++ LNC_VIDEO_DEVICE_INFO,
++ LNC_VIDEO_GETPARAM_RAR_INFO,
++ LNC_VIDEO_GETPARAM_CI_INFO,
++ LNC_VIDEO_GETPARAM_RAR_HANDLER_OFFSET,
++ LNC_VIDEO_FRAME_SKIP
++} lnc_getparam_key_t;
++
++struct drm_lnc_video_getparam_arg {
++ lnc_getparam_key_t key;
++ uint64_t arg; /* argument pointer */
++ uint64_t value; /* feed back pointer */
++};
++
++
++/*
++ * Feedback components:
++ */
++
++/*
++ * Vistest component. The number of these in the feedback buffer
++ * equals the number of vistest breakpoints + 1.
++ * This is currently the only feedback component.
++ */
++
++struct drm_psb_vistest {
++ uint32_t vt[8];
++};
++
++struct drm_psb_sizes_arg {
++ uint32_t ta_mem_size;
++ uint32_t mmu_size;
++ uint32_t pds_size;
++ uint32_t rastgeom_size;
++ uint32_t tt_size;
++ uint32_t vram_size;
++};
++
++struct drm_psb_hist_status_arg {
++ uint32_t buf[32];
++};
++
++struct drm_psb_dpst_lut_arg {
++ uint8_t lut[256];
++ int output_id;
++};
++
++struct mrst_timing_info {
++ uint16_t pixel_clock;
++ uint8_t hactive_lo;
++ uint8_t hblank_lo;
++ uint8_t hblank_hi:4;
++ uint8_t hactive_hi:4;
++ uint8_t vactive_lo;
++ uint8_t vblank_lo;
++ uint8_t vblank_hi:4;
++ uint8_t vactive_hi:4;
++ uint8_t hsync_offset_lo;
++ uint8_t hsync_pulse_width_lo;
++ uint8_t vsync_pulse_width_lo:4;
++ uint8_t vsync_offset_lo:4;
++ uint8_t vsync_pulse_width_hi:2;
++ uint8_t vsync_offset_hi:2;
++ uint8_t hsync_pulse_width_hi:2;
++ uint8_t hsync_offset_hi:2;
++ uint8_t width_mm_lo;
++ uint8_t height_mm_lo;
++ uint8_t height_mm_hi:4;
++ uint8_t width_mm_hi:4;
++ uint8_t hborder;
++ uint8_t vborder;
++ uint8_t unknown0:1;
++ uint8_t hsync_positive:1;
++ uint8_t vsync_positive:1;
++ uint8_t separate_sync:2;
++ uint8_t stereo:1;
++ uint8_t unknown6:1;
++ uint8_t interlaced:1;
++} __attribute__((packed));
++
++struct mrst_panel_descriptor_v1{
++ uint32_t Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
++ /* 0x61190 if MIPI */
++ uint32_t Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
++ uint32_t Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
++ uint32_t Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 dword */
++ /* Register 0x61210 */
++ struct mrst_timing_info DTD;/*18 bytes, Standard definition */
++ uint16_t Panel_Backlight_Inverter_Descriptor;/* 16 bits, as follows */
++ /* Bit 0, Frequency, 15 bits,0 - 32767Hz */
++ /* Bit 15, Polarity, 1 bit, 0: Normal, 1: Inverted */
++ uint16_t Panel_MIPI_Display_Descriptor;
++ /*16 bits, Defined as follows: */
++ /* if MIPI, 0x0000 if LVDS */
++ /* Bit 0, Type, 2 bits, */
++ /* 0: Type-1, */
++ /* 1: Type-2, */
++ /* 2: Type-3, */
++ /* 3: Type-4 */
++ /* Bit 2, Pixel Format, 4 bits */
++ /* Bit0: 16bpp (not supported in LNC), */
++ /* Bit1: 18bpp loosely packed, */
++ /* Bit2: 18bpp packed, */
++ /* Bit3: 24bpp */
++ /* Bit 6, Reserved, 2 bits, 00b */
++ /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
++ /* Bit 14, Reserved, 2 bits, 00b */
++} __attribute__ ((packed));
++
++struct mrst_panel_descriptor_v2{
++ uint32_t Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
++ /* 0x61190 if MIPI */
++ uint32_t Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
++ uint32_t Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
++ uint8_t Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 byte */
++ /* Register 0x61210 */
++ struct mrst_timing_info DTD;/*18 bytes, Standard definition */
++ uint16_t Panel_Backlight_Inverter_Descriptor;/*16 bits, as follows*/
++ /*Bit 0, Frequency, 16 bits, 0 - 32767Hz*/
++ uint8_t Panel_Initial_Brightness;/* [7:0] 0 - 100% */
++ /*Bit 7, Polarity, 1 bit,0: Normal, 1: Inverted*/
++ uint16_t Panel_MIPI_Display_Descriptor;
++ /*16 bits, Defined as follows: */
++ /* if MIPI, 0x0000 if LVDS */
++ /* Bit 0, Type, 2 bits, */
++ /* 0: Type-1, */
++ /* 1: Type-2, */
++ /* 2: Type-3, */
++ /* 3: Type-4 */
++ /* Bit 2, Pixel Format, 4 bits */
++ /* Bit0: 16bpp (not supported in LNC), */
++ /* Bit1: 18bpp loosely packed, */
++ /* Bit2: 18bpp packed, */
++ /* Bit3: 24bpp */
++ /* Bit 6, Reserved, 2 bits, 00b */
++ /* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
++ /* Bit 14, Reserved, 2 bits, 00b */
++} __attribute__ ((packed));
++
++union mrst_panel_rx{
++ struct{
++ uint16_t NumberOfLanes:2; /*Num of Lanes, 2 bits,0 = 1 lane,*/
++ /* 1 = 2 lanes, 2 = 3 lanes, 3 = 4 lanes. */
++ uint16_t MaxLaneFreq:3; /* 0: 100MHz, 1: 200MHz, 2: 300MHz, */
++ /*3: 400MHz, 4: 500MHz, 5: 600MHz, 6: 700MHz, 7: 800MHz.*/
++ uint16_t SupportedVideoTransferMode:2; /*0: Non-burst only */
++ /* 1: Burst and non-burst */
++ /* 2/3: Reserved */
++ uint16_t HSClkBehavior:1; /*0: Continuous, 1: Non-continuous*/
++ uint16_t DuoDisplaySupport:1; /*1 bit,0: No, 1: Yes*/
++ uint16_t ECC_ChecksumCapabilities:1;/*1 bit,0: No, 1: Yes*/
++ uint16_t BidirectionalCommunication:1;/*1 bit,0: No, 1: Yes */
++ uint16_t Rsvd:5;/*5 bits,00000b */
++ } panelrx;
++ uint16_t panel_receiver;
++} __attribute__ ((packed));
++
++struct gct_ioctl_arg{
++ uint8_t bpi; /* boot panel index, number of panel used during boot */
++ uint8_t pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
++ struct mrst_timing_info DTD; /* timing info for the selected panel */
++ uint32_t Panel_Port_Control;
++ uint32_t PP_On_Sequencing;/*1 dword,Register 0x61208,*/
++ uint32_t PP_Off_Sequencing;/*1 dword,Register 0x6120C,*/
++ uint32_t PP_Cycle_Delay;
++ uint16_t Panel_Backlight_Inverter_Descriptor;
++ uint16_t Panel_MIPI_Display_Descriptor;
++} __attribute__ ((packed));
++
++struct mrst_vbt{
++ char Signature[4]; /*4 bytes,"$GCT" */
++ uint8_t Revision; /*1 byte */
++ uint8_t Size; /*1 byte */
++ uint8_t Checksum; /*1 byte,Calculated*/
++ void *mrst_gct;
++} __attribute__ ((packed));
++
++struct mrst_gct_v1{ /* expect this table to change per customer request*/
++ union{ /*8 bits,Defined as follows: */
++ struct{
++ uint8_t PanelType:4; /*4 bits, Bit field for panels*/
++ /* 0 - 3: 0 = LVDS, 1 = MIPI*/
++ /*2 bits,Specifies which of the*/
++ uint8_t BootPanelIndex:2;
++ /* 4 panels to use by default*/
++ uint8_t BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
++ /* the 4 MIPI DSI receivers to use*/
++ } PD;
++ uint8_t PanelDescriptor;
++ };
++ struct mrst_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/
++ union mrst_panel_rx panelrx[4]; /* panel receivers*/
++} __attribute__ ((packed));
++
++struct mrst_gct_v2{ /* expect this table to change per customer request*/
++ union{ /*8 bits,Defined as follows: */
++ struct{
++ uint8_t PanelType:4; /*4 bits, Bit field for panels*/
++ /* 0 - 3: 0 = LVDS, 1 = MIPI*/
++ /*2 bits,Specifies which of the*/
++ uint8_t BootPanelIndex:2;
++ /* 4 panels to use by default*/
++ uint8_t BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
++ /* the 4 MIPI DSI receivers to use*/
++ } PD;
++ uint8_t PanelDescriptor;
++ };
++ struct mrst_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/
++ union mrst_panel_rx panelrx[4]; /* panel receivers*/
++} __attribute__ ((packed));
++
++#define PSB_DC_CRTC_SAVE 0x01
++#define PSB_DC_CRTC_RESTORE 0x02
++#define PSB_DC_OUTPUT_SAVE 0x04
++#define PSB_DC_OUTPUT_RESTORE 0x08
++#define PSB_DC_CRTC_MASK 0x03
++#define PSB_DC_OUTPUT_MASK 0x0C
++
++struct drm_psb_dc_state_arg {
++ uint32_t flags;
++ uint32_t obj_id;
++};
++
++struct drm_psb_mode_operation_arg {
++ uint32_t obj_id;
++ uint16_t operation;
++ struct drm_mode_modeinfo mode;
++ void *data;
++};
++
++struct drm_psb_stolen_memory_arg {
++ uint32_t base;
++ uint32_t size;
++};
++
++/*Display Register Bits*/
++#define REGRWBITS_PFIT_CONTROLS (1 << 0)
++#define REGRWBITS_PFIT_AUTOSCALE_RATIOS (1 << 1)
++#define REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS (1 << 2)
++#define REGRWBITS_PIPEASRC (1 << 3)
++#define REGRWBITS_PIPEBSRC (1 << 4)
++#define REGRWBITS_VTOTAL_A (1 << 5)
++#define REGRWBITS_VTOTAL_B (1 << 6)
++
++/*Overlay Register Bits*/
++#define OV_REGRWBITS_OVADD (1 << 0)
++#define OV_REGRWBITS_OGAM_ALL (1 << 1)
++
++struct drm_psb_register_rw_arg {
++ uint32_t b_force_hw_on;
++
++ uint32_t display_read_mask;
++ uint32_t display_write_mask;
++
++ struct {
++ uint32_t pfit_controls;
++ uint32_t pfit_autoscale_ratios;
++ uint32_t pfit_programmed_scale_ratios;
++ uint32_t pipeasrc;
++ uint32_t pipebsrc;
++ uint32_t vtotal_a;
++ uint32_t vtotal_b;
++ } display;
++
++ uint32_t overlay_read_mask;
++ uint32_t overlay_write_mask;
++
++ struct {
++ uint32_t OVADD;
++ uint32_t OGAMC0;
++ uint32_t OGAMC1;
++ uint32_t OGAMC2;
++ uint32_t OGAMC3;
++ uint32_t OGAMC4;
++ uint32_t OGAMC5;
++ } overlay;
++
++ uint32_t sprite_enable_mask;
++ uint32_t sprite_disable_mask;
++
++ struct {
++ uint32_t dspa_control;
++ uint32_t dspa_key_value;
++ uint32_t dspa_key_mask;
++ uint32_t dspc_control;
++ uint32_t dspc_stride;
++ uint32_t dspc_position;
++ uint32_t dspc_linear_offset;
++ uint32_t dspc_size;
++ uint32_t dspc_surface;
++ } sprite;
++};
++
++struct psb_gtt_mapping_arg {
++ void *hKernelMemInfo;
++ uint32_t offset_pages;
++};
++
++struct drm_psb_getpageaddrs_arg {
++ uint32_t handle;
++ unsigned long *page_addrs;
++ unsigned long gtt_offset;
++};
++
++
++/* Controlling the kernel modesetting buffers */
++
++#define DRM_PSB_KMS_OFF 0x00
++#define DRM_PSB_KMS_ON 0x01
++#define DRM_PSB_VT_LEAVE 0x02
++#define DRM_PSB_VT_ENTER 0x03
++#define DRM_PSB_EXTENSION 0x06
++#define DRM_PSB_SIZES 0x07
++#define DRM_PSB_FUSE_REG 0x08
++#define DRM_PSB_VBT 0x09
++#define DRM_PSB_DC_STATE 0x0A
++#define DRM_PSB_ADB 0x0B
++#define DRM_PSB_MODE_OPERATION 0x0C
++#define DRM_PSB_STOLEN_MEMORY 0x0D
++#define DRM_PSB_REGISTER_RW 0x0E
++#define DRM_PSB_GTT_MAP 0x0F
++#define DRM_PSB_GTT_UNMAP 0x10
++#define DRM_PSB_GETPAGEADDRS 0x11
++/**
++ * NOTE: Add new commands here, but increment
++ * the values below and increment their
++ * corresponding defines where they're
++ * defined elsewhere.
++ */
++#define DRM_PVR_RESERVED1 0x12
++#define DRM_PVR_RESERVED2 0x13
++#define DRM_PVR_RESERVED3 0x14
++#define DRM_PVR_RESERVED4 0x15
++#define DRM_PVR_RESERVED5 0x16
++
++#define DRM_PSB_HIST_ENABLE 0x17
++#define DRM_PSB_HIST_STATUS 0x18
++#define DRM_PSB_UPDATE_GUARD 0x19
++#define DRM_PSB_INIT_COMM 0x1A
++#define DRM_PSB_DPST 0x1B
++#define DRM_PSB_GAMMA 0x1C
++#define DRM_PSB_DPST_BL 0x1D
++
++#define DRM_PVR_RESERVED6 0x1E
++
++#define DRM_PSB_GET_PIPE_FROM_CRTC_ID 0x1F
++
++struct drm_psb_dev_info_arg {
++ uint32_t num_use_attribute_registers;
++};
++#define DRM_PSB_DEVINFO 0x01
++
++#define PSB_MODE_OPERATION_MODE_VALID 0x01
++#define PSB_MODE_OPERATION_SET_DC_BASE 0x02
++
++struct drm_psb_get_pipe_from_crtc_id_arg {
++ /** ID of CRTC being requested **/
++ uint32_t crtc_id;
++
++ /** pipe of requested CRTC **/
++ uint32_t pipe;
++};
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_drv.c b/drivers/gpu/drm/mrst/drv/psb_drv.c
+new file mode 100644
+index 0000000..dbc4327
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_drv.c
+@@ -0,0 +1,2218 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include "psb_drm.h"
++#include "psb_drv.h"
++#include "psb_fb.h"
++#include "psb_reg.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_bios.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++#include <drm/drm_pciids.h>
++#include "ospm_power.h"
++#include "pvr_drm_shared.h"
++#include "img_types.h"
++#include <linux/cpu.h>
++#include <linux/notifier.h>
++#include <linux/spinlock.h>
++#include <linux/rar/rar_register.h>
++#include <linux/rar/memrar.h>
++
++/*IMG headers*/
++#include "pvr_drm_shared.h"
++#include "img_types.h"
++#include "pvr_bridge.h"
++#include "linkage.h"
++#include "sysirq.h"
++
++int drm_psb_debug;
++EXPORT_SYMBOL(drm_psb_debug);
++static int drm_psb_trap_pagefaults;
++
++int drm_psb_no_fb;
++int drm_psb_force_pipeb;
++int drm_idle_check_interval = 5;
++int drm_msvdx_pmpolicy = PSB_PMPOLICY_POWERDOWN;
++int drm_topaz_pmpolicy = PSB_PMPOLICY_NOPM;
++int drm_topaz_sbuswa;
++int drm_psb_ospm = 1;
++
++static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++
++MODULE_PARM_DESC(debug, "Enable debug output");
++MODULE_PARM_DESC(no_fb, "Disable FBdev");
++MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
++MODULE_PARM_DESC(disable_vsync, "Disable vsync interrupts");
++MODULE_PARM_DESC(force_pipeb, "Forces PIPEB to become primary fb");
++MODULE_PARM_DESC(ta_mem_size, "TA memory size in kiB");
++MODULE_PARM_DESC(ospm, "switch for ospm support");
++MODULE_PARM_DESC(msvdx_pmpolicy, "msvdx power management policy btw frames");
++MODULE_PARM_DESC(topaz_pmpolicy, "topaz power managerment policy btw frames");
++MODULE_PARM_DESC(topaz_sbuswa, "WA for topaz sysbus write");
++module_param_named(debug, drm_psb_debug, int, 0600);
++module_param_named(no_fb, drm_psb_no_fb, int, 0600);
++module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
++module_param_named(force_pipeb, drm_psb_force_pipeb, int, 0600);
++module_param_named(msvdx_pmpolicy, drm_msvdx_pmpolicy, int, 0600);
++module_param_named(topaz_pmpolicy, drm_topaz_pmpolicy, int, 0600);
++module_param_named(topaz_sbuswa, drm_topaz_sbuswa, int, 0600);
++module_param_named(ospm, drm_psb_ospm, int, 0600);
++
++#if 0
++#ifndef CONFIG_X86_PAT
++#warning "Don't build this driver without PAT support!!!"
++#endif
++#endif
++#define psb_PCI_IDS \
++ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
++ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
++ {0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0, 0, 0}
++
++static struct pci_device_id pciidlist[] = {
++ psb_PCI_IDS
++};
++
++/*
++ * Standard IOCTLs.
++ */
++
++#define DRM_IOCTL_PSB_KMS_OFF \
++ DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_KMS_ON \
++ DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_VT_LEAVE \
++ DRM_IO(DRM_PSB_VT_LEAVE + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_VT_ENTER \
++ DRM_IO(DRM_PSB_VT_ENTER + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_EXTENSION \
++ DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \
++ union drm_psb_extension_arg)
++#define DRM_IOCTL_PSB_SIZES \
++ DRM_IOR(DRM_PSB_SIZES + DRM_COMMAND_BASE, \
++ struct drm_psb_sizes_arg)
++#define DRM_IOCTL_PSB_FUSE_REG \
++ DRM_IOWR(DRM_PSB_FUSE_REG + DRM_COMMAND_BASE, uint32_t)
++#define DRM_IOCTL_PSB_VBT \
++ DRM_IOWR(DRM_PSB_VBT + DRM_COMMAND_BASE, \
++ struct gct_ioctl_arg)
++#define DRM_IOCTL_PSB_DC_STATE \
++ DRM_IOW(DRM_PSB_DC_STATE + DRM_COMMAND_BASE, \
++ struct drm_psb_dc_state_arg)
++#define DRM_IOCTL_PSB_ADB \
++ DRM_IOWR(DRM_PSB_ADB + DRM_COMMAND_BASE, uint32_t)
++#define DRM_IOCTL_PSB_MODE_OPERATION \
++ DRM_IOWR(DRM_PSB_MODE_OPERATION + DRM_COMMAND_BASE, \
++ struct drm_psb_mode_operation_arg)
++#define DRM_IOCTL_PSB_STOLEN_MEMORY \
++ DRM_IOWR(DRM_PSB_STOLEN_MEMORY + DRM_COMMAND_BASE, \
++ struct drm_psb_stolen_memory_arg)
++#define DRM_IOCTL_PSB_REGISTER_RW \
++ DRM_IOWR(DRM_PSB_REGISTER_RW + DRM_COMMAND_BASE, \
++ struct drm_psb_register_rw_arg)
++#define DRM_IOCTL_PSB_GTT_MAP \
++ DRM_IOWR(DRM_PSB_GTT_MAP + DRM_COMMAND_BASE, \
++ struct psb_gtt_mapping_arg)
++#define DRM_IOCTL_PSB_GTT_UNMAP \
++ DRM_IOW(DRM_PSB_GTT_UNMAP + DRM_COMMAND_BASE, \
++ struct psb_gtt_mapping_arg)
++#define DRM_IOCTL_PSB_GETPAGEADDRS \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_GETPAGEADDRS,\
++ struct drm_psb_getpageaddrs_arg)
++#define DRM_IOCTL_PSB_HIST_ENABLE \
++ DRM_IOWR(DRM_PSB_HIST_ENABLE + DRM_COMMAND_BASE, \
++ uint32_t)
++#define DRM_IOCTL_PSB_HIST_STATUS \
++ DRM_IOWR(DRM_PSB_HIST_STATUS + DRM_COMMAND_BASE, \
++ struct drm_psb_hist_status_arg)
++#define DRM_IOCTL_PSB_UPDATE_GUARD \
++ DRM_IOWR(DRM_PSB_UPDATE_GUARD + DRM_COMMAND_BASE, \
++ uint32_t)
++#define DRM_IOCTL_PSB_INIT_COMM \
++ DRM_IOWR(DRM_PSB_INIT_COMM + DRM_COMMAND_BASE, \
++ uint32_t)
++#define DRM_IOCTL_PSB_DPST \
++ DRM_IOWR(DRM_PSB_DPST + DRM_COMMAND_BASE, \
++ uint32_t)
++#define DRM_IOCTL_PSB_GAMMA \
++ DRM_IOWR(DRM_PSB_GAMMA + DRM_COMMAND_BASE, \
++ struct drm_psb_dpst_lut_arg)
++#define DRM_IOCTL_PSB_DPST_BL \
++ DRM_IOWR(DRM_PSB_DPST_BL + DRM_COMMAND_BASE, \
++ uint32_t)
++#define DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID \
++ DRM_IOWR(DRM_PSB_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
++ struct drm_psb_get_pipe_from_crtc_id_arg)
++
++
++/*pvr ioctls*/
++#define PVR_DRM_SRVKM_IOCTL \
++ DRM_IOW(DRM_COMMAND_BASE + PVR_DRM_SRVKM_CMD, \
++ PVRSRV_BRIDGE_PACKAGE)
++#define PVR_DRM_DISP_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_DISP_CMD)
++#define PVR_DRM_BC_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_BC_CMD)
++#define PVR_DRM_IS_MASTER_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_IS_MASTER_CMD)
++#define PVR_DRM_UNPRIV_IOCTL \
++ DRM_IOWR(DRM_COMMAND_BASE + PVR_DRM_UNPRIV_CMD, \
++ IMG_UINT32)
++#define PVR_DRM_DBGDRV_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_DBGDRV_CMD)
++
++/*
++ * TTM execbuf extension.
++ */
++#if defined(PDUMP)
++#define DRM_PSB_CMDBUF (PVR_DRM_DBGDRV_CMD + 1)
++#else
++#define DRM_PSB_CMDBUF (DRM_PSB_DPST_BL + 1)
++#endif
++
++#define DRM_PSB_SCENE_UNREF (DRM_PSB_CMDBUF + 1)
++#define DRM_IOCTL_PSB_CMDBUF \
++ DRM_IOW(DRM_PSB_CMDBUF + DRM_COMMAND_BASE, \
++ struct drm_psb_cmdbuf_arg)
++#define DRM_IOCTL_PSB_SCENE_UNREF \
++ DRM_IOW(DRM_PSB_SCENE_UNREF + DRM_COMMAND_BASE, \
++ struct drm_psb_scene)
++#define DRM_IOCTL_PSB_KMS_OFF DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_KMS_ON DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
++#define DRM_IOCTL_PSB_EXTENSION \
++ DRM_IOWR(DRM_PSB_EXTENSION + DRM_COMMAND_BASE, \
++ union drm_psb_extension_arg)
++/*
++ * TTM placement user extension.
++ */
++
++#define DRM_PSB_PLACEMENT_OFFSET (DRM_PSB_SCENE_UNREF + 1)
++
++#define DRM_PSB_TTM_PL_CREATE (TTM_PL_CREATE + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_REFERENCE (TTM_PL_REFERENCE + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_UNREF (TTM_PL_UNREF + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_SYNCCPU (TTM_PL_SYNCCPU + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_WAITIDLE (TTM_PL_WAITIDLE + DRM_PSB_PLACEMENT_OFFSET)
++#define DRM_PSB_TTM_PL_SETSTATUS (TTM_PL_SETSTATUS + DRM_PSB_PLACEMENT_OFFSET)
++
++/*
++ * TTM fence extension.
++ */
++
++#define DRM_PSB_FENCE_OFFSET (DRM_PSB_TTM_PL_SETSTATUS + 1)
++#define DRM_PSB_TTM_FENCE_SIGNALED (TTM_FENCE_SIGNALED + DRM_PSB_FENCE_OFFSET)
++#define DRM_PSB_TTM_FENCE_FINISH (TTM_FENCE_FINISH + DRM_PSB_FENCE_OFFSET)
++#define DRM_PSB_TTM_FENCE_UNREF (TTM_FENCE_UNREF + DRM_PSB_FENCE_OFFSET)
++
++#define DRM_PSB_FLIP (DRM_PSB_TTM_FENCE_UNREF + 1) /*20*/
++/* PSB video extension */
++#define DRM_LNC_VIDEO_GETPARAM (DRM_PSB_FLIP + 1)
++
++#define DRM_IOCTL_PSB_TTM_PL_CREATE \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_CREATE,\
++ union ttm_pl_create_arg)
++#define DRM_IOCTL_PSB_TTM_PL_REFERENCE \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_REFERENCE,\
++ union ttm_pl_reference_arg)
++#define DRM_IOCTL_PSB_TTM_PL_UNREF \
++ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_UNREF,\
++ struct ttm_pl_reference_req)
++#define DRM_IOCTL_PSB_TTM_PL_SYNCCPU \
++ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SYNCCPU,\
++ struct ttm_pl_synccpu_arg)
++#define DRM_IOCTL_PSB_TTM_PL_WAITIDLE \
++ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_WAITIDLE,\
++ struct ttm_pl_waitidle_arg)
++#define DRM_IOCTL_PSB_TTM_PL_SETSTATUS \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_PL_SETSTATUS,\
++ union ttm_pl_setstatus_arg)
++#define DRM_IOCTL_PSB_TTM_FENCE_SIGNALED \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_SIGNALED, \
++ union ttm_fence_signaled_arg)
++#define DRM_IOCTL_PSB_TTM_FENCE_FINISH \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_FINISH, \
++ union ttm_fence_finish_arg)
++#define DRM_IOCTL_PSB_TTM_FENCE_UNREF \
++ DRM_IOW(DRM_COMMAND_BASE + DRM_PSB_TTM_FENCE_UNREF, \
++ struct ttm_fence_unref_arg)
++#define DRM_IOCTL_PSB_FLIP \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PSB_FLIP, \
++ struct drm_psb_pageflip_arg)
++#define DRM_IOCTL_LNC_VIDEO_GETPARAM \
++ DRM_IOWR(DRM_COMMAND_BASE + DRM_LNC_VIDEO_GETPARAM, \
++ struct drm_lnc_video_getparam_arg)
++
++static int psb_vt_leave_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_vt_enter_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_sizes_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_fuse_reg_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_vbt_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
++ struct drm_file *file_priv);
++static int psb_adb_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_hist_enable_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_hist_status_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_update_guard_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_init_comm_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_dpst_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_gamma_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++
++#define PSB_IOCTL_DEF(ioctl, func, flags) \
++ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
++
++static struct drm_ioctl_desc psb_ioctls[] = {
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_OFF, psbfb_kms_off_ioctl,
++ DRM_ROOT_ONLY),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_ON,
++ psbfb_kms_on_ioctl,
++ DRM_ROOT_ONLY),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_LEAVE, psb_vt_leave_ioctl,
++ DRM_ROOT_ONLY),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VT_ENTER,
++ psb_vt_enter_ioctl,
++ DRM_ROOT_ONLY),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_EXTENSION, psb_extension_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_SIZES, psb_sizes_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_FUSE_REG, psb_fuse_reg_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_VBT, psb_vbt_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_DC_STATE, psb_dc_state_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_MODE_OPERATION, psb_mode_operation_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_STOLEN_MEMORY, psb_stolen_memory_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_REGISTER_RW, psb_register_rw_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GTT_MAP,
++ psb_gtt_map_meminfo_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GTT_UNMAP,
++ psb_gtt_unmap_meminfo_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GETPAGEADDRS,
++ psb_getpageaddrs_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(PVR_DRM_SRVKM_IOCTL, PVRSRV_BridgeDispatchKM, 0),
++ PSB_IOCTL_DEF(PVR_DRM_DISP_IOCTL, PVRDRM_Dummy_ioctl, 0),
++ PSB_IOCTL_DEF(PVR_DRM_BC_IOCTL, PVRDRM_Dummy_ioctl, 0),
++ PSB_IOCTL_DEF(PVR_DRM_IS_MASTER_IOCTL, PVRDRMIsMaster, DRM_MASTER),
++ PSB_IOCTL_DEF(PVR_DRM_UNPRIV_IOCTL, PVRDRMUnprivCmd, 0),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_HIST_ENABLE,
++ psb_hist_enable_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_HIST_STATUS,
++ psb_hist_status_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_UPDATE_GUARD, psb_update_guard_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_INIT_COMM, psb_init_comm_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST, psb_dpst_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GAMMA, psb_gamma_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID, psb_intel_get_pipe_from_crtc_id, 0),
++#if defined(PDUMP)
++ PSB_IOCTL_DEF(PVR_DRM_DBGDRV_IOCTL, dbgdrv_ioctl, 0),
++#endif
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_CMDBUF, psb_cmdbuf_ioctl, DRM_AUTH),
++ /*to be removed later*/
++ /*PSB_IOCTL_DEF(DRM_IOCTL_PSB_SCENE_UNREF, drm_psb_scene_unref_ioctl,
++ DRM_AUTH),*/
++
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_CREATE, psb_pl_create_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_REFERENCE, psb_pl_reference_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_UNREF, psb_pl_unref_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SYNCCPU, psb_pl_synccpu_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_WAITIDLE, psb_pl_waitidle_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_PL_SETSTATUS, psb_pl_setstatus_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_SIGNALED,
++ psb_fence_signaled_ioctl, DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_FINISH, psb_fence_finish_ioctl,
++ DRM_AUTH),
++ PSB_IOCTL_DEF(DRM_IOCTL_PSB_TTM_FENCE_UNREF, psb_fence_unref_ioctl,
++ DRM_AUTH),
++ /*to be removed later */
++ /*PSB_IOCTL_DEF(DRM_IOCTL_PSB_FLIP, psb_page_flip, DRM_AUTH),*/
++ PSB_IOCTL_DEF(DRM_IOCTL_LNC_VIDEO_GETPARAM,
++ lnc_video_getparam, DRM_AUTH)
++};
++
++static int psb_max_ioctl = DRM_ARRAY_SIZE(psb_ioctls);
++
++static void get_ci_info(struct drm_psb_private *dev_priv)
++{
++ struct pci_dev *pdev;
++
++ pdev = pci_get_subsys(0x8086, 0x080b, 0, 0, NULL);
++ if (pdev == NULL) {
++ /* IF no pci_device we set size & addr to 0, no ci
++ * share buffer can be created */
++ dev_priv->ci_region_start = 0;
++ dev_priv->ci_region_size = 0;
++ printk(KERN_ERR "can't find CI device, no ci share buffer\n");
++ return;
++ }
++
++ dev_priv->ci_region_start = pci_resource_start(pdev, 1);
++ dev_priv->ci_region_size = pci_resource_len(pdev, 1);
++
++ printk(KERN_INFO "ci_region_start %x ci_region_size %d\n",
++ dev_priv->ci_region_start, dev_priv->ci_region_size);
++
++ pci_dev_put(pdev);
++
++ return;
++}
++
++static void get_rar_info(struct drm_psb_private *dev_priv)
++{
++#if defined(CONFIG_RAR_REGISTER)
++ int ret;
++ u32 start_addr, end_addr;
++
++ dev_priv->rar_region_start = 0;
++ dev_priv->rar_region_size = 0;
++ end_addr = 0;
++ ret = 0;
++
++ ret = rar_get_address(RAR_TYPE_VIDEO, &start_addr,
++ &end_addr);
++ if (ret) {
++ printk(KERN_ERR "failed to get rar region info\n");
++ return;
++ }
++ dev_priv->rar_region_start = (uint32_t) start_addr;
++ if (!ret)
++ dev_priv->rar_region_size =
++ end_addr - dev_priv->rar_region_start + 1;
++
++#endif
++ return;
++}
++
++static void psb_set_uopt(struct drm_psb_uopt *uopt)
++{
++ return;
++}
++
++static void psb_lastclose(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ return;
++
++ if (!dev->dev_private)
++ return;
++
++ mutex_lock(&dev_priv->cmdbuf_mutex);
++ if (dev_priv->context.buffers) {
++ vfree(dev_priv->context.buffers);
++ dev_priv->context.buffers = NULL;
++ }
++ mutex_unlock(&dev_priv->cmdbuf_mutex);
++}
++
++static void psb_do_takedown(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++
++
++ if (dev_priv->have_mem_mmu) {
++ ttm_bo_clean_mm(bdev, DRM_PSB_MEM_MMU);
++ dev_priv->have_mem_mmu = 0;
++ }
++
++ if (dev_priv->have_tt) {
++ ttm_bo_clean_mm(bdev, TTM_PL_TT);
++ dev_priv->have_tt = 0;
++ }
++
++ if (dev_priv->have_camera) {
++ ttm_bo_clean_mm(bdev, TTM_PL_CI);
++ dev_priv->have_camera = 0;
++ }
++ if (dev_priv->have_rar) {
++ ttm_bo_clean_mm(bdev, TTM_PL_RAR);
++ dev_priv->have_rar = 0;
++ }
++
++ psb_msvdx_uninit(dev);
++
++ if (IS_MRST(dev))
++ if (!dev_priv->topaz_disabled)
++ lnc_topaz_uninit(dev);
++}
++
++#define FB_REG06 0xD0810600
++#define FB_TOPAZ_DISABLE BIT0
++#define PCI_ID_TOPAZ_DISABLED 0x4101
++#define FB_MIPI_DISABLE BIT11
++#define FB_REG09 0xD0810900
++#define FB_SKU_MASK (BIT12|BIT13|BIT14)
++#define FB_SKU_SHIFT 12
++#define FB_SKU_100 0
++#define FB_SKU_100L 1
++#define FB_SKU_83 2
++#if 1 /* FIXME remove it after PO */
++#define FB_GFX_CLK_DIVIDE_MASK (BIT20|BIT21|BIT22)
++#define FB_GFX_CLK_DIVIDE_SHIFT 20
++#define FB_VED_CLK_DIVIDE_MASK (BIT23|BIT24)
++#define FB_VED_CLK_DIVIDE_SHIFT 23
++#define FB_VEC_CLK_DIVIDE_MASK (BIT25|BIT26)
++#define FB_VEC_CLK_DIVIDE_SHIFT 25
++#endif /* FIXME remove it after PO */
++
++
++void mrst_get_fuse_settings(struct drm_psb_private *dev_priv)
++{
++ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++ uint32_t fuse_value = 0;
++ uint32_t fuse_value_tmp = 0;
++
++ pci_write_config_dword(pci_root, 0xD0, FB_REG06);
++ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
++
++ dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
++
++ DRM_INFO("internal display is %s\n",
++ dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
++
++ if (dev_priv->dev->pci_device == PCI_ID_TOPAZ_DISABLED)
++ dev_priv->topaz_disabled = 1;
++ else
++ dev_priv->topaz_disabled = 0;
++
++ dev_priv->video_device_fuse = fuse_value;
++
++ DRM_INFO("topaz is %s\n",
++ dev_priv->topaz_disabled ? "disabled" : "enabled");
++
++ pci_write_config_dword(pci_root, 0xD0, FB_REG09);
++ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
++
++ DRM_INFO("SKU values is 0x%x. \n", fuse_value);
++ fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
++
++ dev_priv->fuse_reg_value = fuse_value;
++
++ switch (fuse_value_tmp) {
++ case FB_SKU_100:
++ DRM_INFO("SKU values is SKU_100. LNC core clk is 200MHz.\n");
++ dev_priv->sku_100 = true;
++ break;
++ case FB_SKU_100L:
++ DRM_INFO("SKU values is SKU_100L. LNC core clk is 100MHz.\n");
++ dev_priv->sku_100L = true;
++ break;
++ case FB_SKU_83:
++ DRM_INFO("SKU values is SKU_83. LNC core clk is 166MHz.\n");
++ dev_priv->sku_83 = true;
++ break;
++ default:
++ DRM_ERROR("Invalid SKU values, SKU value = 0x%08x\n",
++ fuse_value_tmp);
++ }
++
++#if 1 /* FIXME remove it after PO */
++ fuse_value_tmp =
++ (fuse_value & FB_GFX_CLK_DIVIDE_MASK) >> FB_GFX_CLK_DIVIDE_SHIFT;
++
++ switch (fuse_value_tmp) {
++ case 0:
++ DRM_INFO("Gfx clk : core clk = 1:1. \n");
++ break;
++ case 1:
++ DRM_INFO("Gfx clk : core clk = 4:3. \n");
++ break;
++ case 2:
++ DRM_INFO("Gfx clk : core clk = 8:5. \n");
++ break;
++ case 3:
++ DRM_INFO("Gfx clk : core clk = 2:1. \n");
++ break;
++ case 4:
++ DRM_INFO("Gfx clk : core clk = 16:7. \n");
++ break;
++ case 5:
++ DRM_INFO("Gfx clk : core clk = 8:3. \n");
++ break;
++ case 6:
++ DRM_INFO("Gfx clk : core clk = 16:5. \n");
++ break;
++ case 7:
++ DRM_INFO("Gfx clk : core clk = 4:1. \n");
++ break;
++ default:
++ DRM_ERROR("Invalid GFX CLK DIVIDE values, value = 0x%08x\n",
++ fuse_value_tmp);
++ }
++
++ fuse_value_tmp =
++ (fuse_value & FB_VED_CLK_DIVIDE_MASK) >> FB_VED_CLK_DIVIDE_SHIFT;
++
++ switch (fuse_value_tmp) {
++ case 0:
++ DRM_INFO("Ved clk : core clk = 1:1. \n");
++ break;
++ case 1:
++ DRM_INFO("Ved clk : core clk = 4:3. \n");
++ break;
++ case 2:
++ DRM_INFO("Ved clk : core clk = 8:5. \n");
++ break;
++ case 3:
++ DRM_INFO("Ved clk : core clk = 2:1. \n");
++ break;
++ default:
++ DRM_ERROR("Invalid VED CLK DIVIDE values, value = 0x%08x\n",
++ fuse_value_tmp);
++ }
++
++ fuse_value_tmp =
++ (fuse_value & FB_VEC_CLK_DIVIDE_MASK) >> FB_VEC_CLK_DIVIDE_SHIFT;
++
++ switch (fuse_value_tmp) {
++ case 0:
++ DRM_INFO("Vec clk : core clk = 1:1. \n");
++ break;
++ case 1:
++ DRM_INFO("Vec clk : core clk = 4:3. \n");
++ break;
++ case 2:
++ DRM_INFO("Vec clk : core clk = 8:5. \n");
++ break;
++ case 3:
++ DRM_INFO("Vec clk : core clk = 2:1. \n");
++ break;
++ default:
++ DRM_ERROR("Invalid VEC CLK DIVIDE values, value = 0x%08x\n",
++ fuse_value_tmp);
++ }
++#endif /* FIXME remove it after PO */
++
++ return;
++}
++
++bool mrst_get_vbt_data(struct drm_psb_private *dev_priv)
++{
++ struct mrst_vbt *pVBT = &dev_priv->vbt_data;
++ u32 platform_config_address;
++ u8 *pVBT_virtual;
++ u8 bpi;
++ void *pGCT;
++ struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
++
++ /*get the address of the platform config vbt, B0:D2:F0;0xFC */
++ pci_read_config_dword(pci_gfx_root, 0xFC, &platform_config_address);
++ DRM_INFO("drm platform config address is %x\n",
++ platform_config_address);
++
++ /* check for platform config address == 0. */
++ /* this means fw doesn't support vbt */
++
++ if (platform_config_address == 0) {
++ pVBT->Size = 0;
++ return false;
++ }
++
++ /* get the virtual address of the vbt */
++ pVBT_virtual = ioremap(platform_config_address, sizeof(*pVBT));
++
++ memcpy(pVBT, pVBT_virtual, sizeof(*pVBT));
++ iounmap(pVBT_virtual); /* Free virtual address space */
++
++ printk(KERN_ALERT "GCT Revision is %x\n", pVBT->Revision);
++ pVBT->mrst_gct = NULL;
++ pVBT->mrst_gct = ioremap(platform_config_address + sizeof(*pVBT) - 4,
++ pVBT->Size - sizeof(*pVBT) + 4);
++ pGCT = pVBT->mrst_gct;
++
++ switch (pVBT->Revision) {
++ case 0:
++ bpi = ((struct mrst_gct_v1 *)pGCT)->PD.BootPanelIndex;
++ dev_priv->gct_data.bpi = bpi;
++ dev_priv->gct_data.pt =
++ ((struct mrst_gct_v1 *)pGCT)->PD.PanelType;
++ memcpy(&dev_priv->gct_data.DTD,
++ &((struct mrst_gct_v1 *)pGCT)->panel[bpi].DTD,
++ sizeof(struct mrst_timing_info));
++ dev_priv->gct_data.Panel_Port_Control =
++ ((struct mrst_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
++ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
++ ((struct mrst_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
++ break;
++ case 1:
++ bpi = ((struct mrst_gct_v2 *)pGCT)->PD.BootPanelIndex;
++ dev_priv->gct_data.bpi = bpi;
++ dev_priv->gct_data.pt =
++ ((struct mrst_gct_v2 *)pGCT)->PD.PanelType;
++ memcpy(&dev_priv->gct_data.DTD,
++ &((struct mrst_gct_v2 *)pGCT)->panel[bpi].DTD,
++ sizeof(struct mrst_timing_info));
++ dev_priv->gct_data.Panel_Port_Control =
++ ((struct mrst_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
++ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
++ ((struct mrst_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
++ break;
++ default:
++ printk(KERN_ALERT "Unknown revision of GCT!\n");
++ pVBT->Size = 0;
++ return false;
++ }
++
++ return true;
++}
++
++static int psb_do_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ struct psb_gtt *pg = dev_priv->pg;
++
++ uint32_t stolen_gtt;
++ uint32_t tt_start;
++ uint32_t tt_pages;
++
++ int ret = -ENOMEM;
++
++
++ /*
++ * Initialize sequence numbers for the different command
++ * submission mechanisms.
++ */
++
++ dev_priv->sequence[PSB_ENGINE_2D] = 0;
++ dev_priv->sequence[PSB_ENGINE_VIDEO] = 0;
++ dev_priv->sequence[LNC_ENGINE_ENCODE] = 0;
++
++ if (pg->mmu_gatt_start & 0x0FFFFFFF) {
++ DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n");
++ ret = -EINVAL;
++ goto out_err;
++ }
++
++ stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
++ stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ stolen_gtt =
++ (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
++
++ dev_priv->gatt_free_offset = pg->mmu_gatt_start +
++ (stolen_gtt << PAGE_SHIFT) * 1024;
++
++ if (1 || drm_debug) {
++ uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
++ uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
++ DRM_INFO("SGX core id = 0x%08x\n", core_id);
++ DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
++ (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
++ _PSB_CC_REVISION_MAJOR_SHIFT,
++ (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
++ _PSB_CC_REVISION_MINOR_SHIFT);
++ DRM_INFO
++ ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
++ (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
++ _PSB_CC_REVISION_MAINTENANCE_SHIFT,
++ (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
++ _PSB_CC_REVISION_DESIGNER_SHIFT);
++ }
++
++ spin_lock_init(&dev_priv->irqmask_lock);
++
++ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
++ pg->gatt_pages : PSB_TT_PRIV0_PLIMIT;
++ tt_start = dev_priv->gatt_free_offset - pg->mmu_gatt_start;
++ tt_pages -= tt_start >> PAGE_SHIFT;
++ dev_priv->sizes.ta_mem_size = 0;
++
++
++ if (IS_MRST(dev) &&
++ (dev_priv->ci_region_size != 0) &&
++ !ttm_bo_init_mm(bdev, TTM_PL_CI, pg->ci_start >> PAGE_SHIFT,
++ dev_priv->ci_region_size >> PAGE_SHIFT)) {
++ dev_priv->have_camera = 1;
++ }
++
++ /* since there is always rar region for video, it is ok */
++ if (IS_MRST(dev) &&
++ (dev_priv->rar_region_size != 0) &&
++ !ttm_bo_init_mm(bdev, TTM_PL_RAR, pg->rar_start >> PAGE_SHIFT,
++ dev_priv->rar_region_size >> PAGE_SHIFT)) {
++ dev_priv->have_rar = 1;
++ }
++
++ /* TT region managed by TTM. */
++ if (!ttm_bo_init_mm(bdev, TTM_PL_TT,
++ (pg->rar_start + dev_priv->rar_region_size) >> PAGE_SHIFT,
++ pg->gatt_pages -
++ (pg->ci_start >> PAGE_SHIFT) -
++ ((dev_priv->ci_region_size + dev_priv->rar_region_size)
++ >> PAGE_SHIFT))) {
++
++ dev_priv->have_tt = 1;
++ dev_priv->sizes.tt_size =
++ (tt_pages << PAGE_SHIFT) / (1024 * 1024) / 2;
++ }
++
++ if (!ttm_bo_init_mm(bdev,
++ DRM_PSB_MEM_MMU,
++ 0x00000000,
++ (pg->gatt_start - PSB_MEM_MMU_START) >> PAGE_SHIFT)) {
++ dev_priv->have_mem_mmu = 1;
++ dev_priv->sizes.mmu_size =
++ (pg->gatt_start - PSB_MEM_MMU_START) /
++ (1024*1024);
++ }
++
++
++ PSB_DEBUG_INIT("Init MSVDX\n");
++ psb_msvdx_init(dev);
++
++ if (IS_MRST(dev)) {
++ PSB_DEBUG_INIT("Init Topaz\n");
++ /* for sku100L and sku100M, VEC is disabled in fuses */
++ if (!dev_priv->topaz_disabled)
++ lnc_topaz_init(dev);
++ else
++ ospm_power_island_down(OSPM_VIDEO_ENC_ISLAND);
++ }
++
++ return 0;
++out_err:
++ psb_do_takedown(dev);
++ return ret;
++}
++
++static int psb_intel_opregion_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ /*struct psb_intel_opregion * opregion = &dev_priv->opregion;*/
++ u32 opregion_phy;
++ void *base;
++ u32 *lid_state;
++
++ dev_priv->lid_state = NULL;
++
++ pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy);
++ if (opregion_phy == 0) {
++ DRM_DEBUG("Opregion not supported, won't support lid-switch\n");
++ return -ENOTSUPP;
++ }
++ DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy);
++
++ base = ioremap(opregion_phy, 8*1024);
++ if (!base)
++ return -ENOMEM;
++
++ lid_state = base + 0x01ac;
++
++ DRM_DEBUG("Lid switch state 0x%08x\n", *lid_state);
++
++ dev_priv->lid_state = lid_state;
++ dev_priv->lid_last_state = *lid_state;
++ return 0;
++}
++
++static int psb_driver_unload(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ /*Fristly, unload pvr driver*/
++ PVRSRVDrmUnload(dev);
++
++ psb_backlight_exit(); /*writes minimum value to backlight HW reg */
++
++ if (drm_psb_no_fb == 0)
++ psb_modeset_cleanup(dev);
++
++ if (dev_priv) {
++ if (IS_POULSBO(dev))
++ psb_lid_timer_takedown(dev_priv);
++
++ /* psb_watchdog_takedown(dev_priv); */
++ psb_do_takedown(dev);
++
++
++ if (dev_priv->pf_pd) {
++ psb_mmu_free_pagedir(dev_priv->pf_pd);
++ dev_priv->pf_pd = NULL;
++ }
++ if (dev_priv->mmu) {
++ struct psb_gtt *pg = dev_priv->pg;
++
++ down_read(&pg->sem);
++ psb_mmu_remove_pfn_sequence(
++ psb_mmu_get_default_pd
++ (dev_priv->mmu),
++ pg->mmu_gatt_start,
++ pg->vram_stolen_size >> PAGE_SHIFT);
++ if (pg->ci_stolen_size != 0)
++ psb_mmu_remove_pfn_sequence(
++ psb_mmu_get_default_pd
++ (dev_priv->mmu),
++ pg->ci_start,
++ pg->ci_stolen_size >> PAGE_SHIFT);
++ if (pg->rar_stolen_size != 0)
++ psb_mmu_remove_pfn_sequence(
++ psb_mmu_get_default_pd
++ (dev_priv->mmu),
++ pg->rar_start,
++ pg->rar_stolen_size >> PAGE_SHIFT);
++ up_read(&pg->sem);
++ psb_mmu_driver_takedown(dev_priv->mmu);
++ dev_priv->mmu = NULL;
++ }
++ psb_gtt_takedown(dev_priv->pg, 1);
++ if (dev_priv->scratch_page) {
++ __free_page(dev_priv->scratch_page);
++ dev_priv->scratch_page = NULL;
++ }
++ if (dev_priv->has_bo_device) {
++ ttm_bo_device_release(&dev_priv->bdev);
++ dev_priv->has_bo_device = 0;
++ }
++ if (dev_priv->has_fence_device) {
++ ttm_fence_device_release(&dev_priv->fdev);
++ dev_priv->has_fence_device = 0;
++ }
++ if (dev_priv->vdc_reg) {
++ iounmap(dev_priv->vdc_reg);
++ dev_priv->vdc_reg = NULL;
++ }
++ if (dev_priv->sgx_reg) {
++ iounmap(dev_priv->sgx_reg);
++ dev_priv->sgx_reg = NULL;
++ }
++ if (dev_priv->msvdx_reg) {
++ iounmap(dev_priv->msvdx_reg);
++ dev_priv->msvdx_reg = NULL;
++ }
++
++ if (IS_MRST(dev)) {
++ if (dev_priv->topaz_reg) {
++ iounmap(dev_priv->topaz_reg);
++ dev_priv->topaz_reg = NULL;
++ }
++ }
++
++ if (dev_priv->tdev)
++ ttm_object_device_release(&dev_priv->tdev);
++
++ if (dev_priv->has_global)
++ psb_ttm_global_release(dev_priv);
++
++ kfree(dev_priv);
++ dev->dev_private = NULL;
++
++ /*destory VBT data*/
++ if (IS_POULSBO(dev))
++ psb_intel_destory_bios(dev);
++ }
++
++ ospm_power_uninit();
++
++ return 0;
++}
++
++
++static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
++{
++ struct drm_psb_private *dev_priv;
++ struct ttm_bo_device *bdev;
++ unsigned long resource_start;
++ struct psb_gtt *pg;
++ int ret = -ENOMEM;
++ uint32_t tt_pages;
++
++ DRM_INFO("psb - %s\n", PSB_PACKAGE_VERSION);
++
++ if (IS_MRST(dev))
++ DRM_INFO("Run drivers on Moorestown platform!\n");
++ else
++ DRM_INFO("Run drivers on Poulsbo platform!\n");
++
++ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
++ if (dev_priv == NULL)
++ return -ENOMEM;
++
++ /*init DPST umcomm to NULL*/
++ dev_priv->psb_dpst_state = NULL;
++ dev_priv->psb_hotplug_state = NULL;
++
++ dev_priv->dev = dev;
++ bdev = &dev_priv->bdev;
++
++ ret = psb_ttm_global_init(dev_priv);
++ if (unlikely(ret != 0))
++ goto out_err;
++ dev_priv->has_global = 1;
++
++ dev_priv->tdev = ttm_object_device_init
++ (dev_priv->mem_global_ref.object, PSB_OBJECT_HASH_ORDER);
++ if (unlikely(dev_priv->tdev == NULL))
++ goto out_err;
++
++ mutex_init(&dev_priv->temp_mem);
++ mutex_init(&dev_priv->cmdbuf_mutex);
++ mutex_init(&dev_priv->reset_mutex);
++ INIT_LIST_HEAD(&dev_priv->context.validate_list);
++ INIT_LIST_HEAD(&dev_priv->context.kern_validate_list);
++
++
++ spin_lock_init(&dev_priv->reloc_lock);
++
++ DRM_INIT_WAITQUEUE(&dev_priv->rel_mapped_queue);
++
++ dev->dev_private = (void *) dev_priv;
++ dev_priv->chipset = chipset;
++ psb_set_uopt(&dev_priv->uopt);
++
++ PSB_DEBUG_GENERAL("Init watchdog and scheduler\n");
++ /* psb_watchdog_init(dev_priv); */
++ psb_scheduler_init(dev, &dev_priv->scheduler);
++
++
++ PSB_DEBUG_INIT("Mapping MMIO\n");
++ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
++
++ if (IS_MRST(dev))
++ dev_priv->msvdx_reg =
++ ioremap(resource_start + MRST_MSVDX_OFFSET,
++ PSB_MSVDX_SIZE);
++ else
++ dev_priv->msvdx_reg =
++ ioremap(resource_start + PSB_MSVDX_OFFSET,
++ PSB_MSVDX_SIZE);
++
++ if (!dev_priv->msvdx_reg)
++ goto out_err;
++
++ if (IS_MRST(dev) && !dev_priv->topaz_disabled) {
++ dev_priv->topaz_reg =
++ ioremap(resource_start + LNC_TOPAZ_OFFSET,
++ LNC_TOPAZ_SIZE);
++ if (!dev_priv->topaz_reg)
++ goto out_err;
++ }
++
++ dev_priv->vdc_reg =
++ ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
++ if (!dev_priv->vdc_reg)
++ goto out_err;
++
++ if (IS_MRST(dev))
++ dev_priv->sgx_reg =
++ ioremap(resource_start + MRST_SGX_OFFSET,
++ PSB_SGX_SIZE);
++ else
++ dev_priv->sgx_reg =
++ ioremap(resource_start + PSB_SGX_OFFSET, PSB_SGX_SIZE);
++
++ if (!dev_priv->sgx_reg)
++ goto out_err;
++
++ if (IS_MRST(dev)) {
++ mrst_get_fuse_settings(dev_priv);
++ mrst_get_vbt_data(dev_priv);
++ } else {
++ psb_intel_opregion_init(dev);
++ psb_intel_init_bios(dev);
++ }
++
++ PSB_DEBUG_INIT("Init TTM fence and BO driver\n");
++
++ if (IS_MRST(dev)) {
++ get_ci_info(dev_priv);
++ get_rar_info(dev_priv);
++ }
++
++ /* Init OSPM support */
++ ospm_power_init(dev);
++
++ ret = psb_ttm_fence_device_init(&dev_priv->fdev);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ dev_priv->has_fence_device = 1;
++ ret = ttm_bo_device_init(bdev,
++ dev_priv->mem_global_ref.object,
++ &psb_ttm_bo_driver,
++ DRM_PSB_FILE_PAGE_OFFSET);
++ if (unlikely(ret != 0))
++ goto out_err;
++ dev_priv->has_bo_device = 1;
++ ttm_lock_init(&dev_priv->ttm_lock);
++
++ ret = -ENOMEM;
++
++ dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
++ if (!dev_priv->scratch_page)
++ goto out_err;
++
++ set_pages_uc(dev_priv->scratch_page, 1);
++
++ dev_priv->pg = psb_gtt_alloc(dev);
++ if (!dev_priv->pg)
++ goto out_err;
++
++ ret = psb_gtt_init(dev_priv->pg, 0);
++ if (ret)
++ goto out_err;
++
++ ret = psb_gtt_mm_init(dev_priv->pg);
++ if (ret)
++ goto out_err;
++
++ dev_priv->mmu = psb_mmu_driver_init((void *)0,
++ drm_psb_trap_pagefaults, 0,
++ dev_priv);
++ if (!dev_priv->mmu)
++ goto out_err;
++
++ pg = dev_priv->pg;
++
++ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
++ (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
++
++ /* CI/RAR use the lower half of TT. */
++ pg->ci_start = (tt_pages / 2) << PAGE_SHIFT;
++ pg->rar_start = pg->ci_start + pg->ci_stolen_size;
++
++
++ /*
++ * Make MSVDX/TOPAZ MMU aware of the CI stolen memory area.
++ */
++ if (dev_priv->pg->ci_stolen_size != 0) {
++ down_read(&pg->sem);
++ ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd
++ (dev_priv->mmu),
++ dev_priv->ci_region_start >> PAGE_SHIFT,
++ pg->mmu_gatt_start + pg->ci_start,
++ pg->ci_stolen_size >> PAGE_SHIFT, 0);
++ up_read(&pg->sem);
++ if (ret)
++ goto out_err;
++ }
++
++ /*
++ * Make MSVDX/TOPAZ MMU aware of the rar stolen memory area.
++ */
++ if (dev_priv->pg->rar_stolen_size != 0) {
++ down_read(&pg->sem);
++ ret = psb_mmu_insert_pfn_sequence(
++ psb_mmu_get_default_pd(dev_priv->mmu),
++ dev_priv->rar_region_start >> PAGE_SHIFT,
++ pg->mmu_gatt_start + pg->rar_start,
++ pg->rar_stolen_size >> PAGE_SHIFT, 0);
++ up_read(&pg->sem);
++ if (ret)
++ goto out_err;
++ }
++
++ dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
++ if (!dev_priv->pf_pd)
++ goto out_err;
++
++ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
++ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
++
++
++ spin_lock_init(&dev_priv->sequence_lock);
++
++
++ PSB_DEBUG_INIT("Begin to init SGX/MSVDX/Topaz\n");
++
++ ret = psb_do_init(dev);
++ if (ret)
++ return ret;
++
++ /**
++ * Init lid switch timer.
++ * NOTE: must do this after psb_intel_opregion_init
++ * and psb_backlight_init
++ */
++ if (IS_POULSBO(dev) && dev_priv->lid_state)
++ psb_lid_timer_init(dev_priv);
++
++ /*initialize the MSI for MRST*/
++ if (IS_MRST(dev)) {
++ if (pci_enable_msi(dev->pdev)) {
++ DRM_ERROR("Enable MSI for MRST failed!\n");
++ } else {
++ PSB_DEBUG_INIT("Enabled MSI IRQ (%d)\n",
++ dev->pdev->irq);
++ /* pci_write_config_word(pdev, 0x04, 0x07); */
++ }
++ }
++
++
++ if (drm_psb_no_fb == 0) {
++ psb_modeset_init(dev);
++ drm_helper_initial_config(dev);
++ }
++
++ /*must be after mrst_get_fuse_settings()*/
++ ret = psb_backlight_init(dev);
++ if (ret)
++ return ret;
++
++
++ /*Intel drm driver load is done, continue doing pvr load*/
++ DRM_DEBUG("Pvr driver load\n");
++
++ return PVRSRVDrmLoad(dev, chipset);
++out_err:
++ psb_driver_unload(dev);
++ return ret;
++}
++
++int psb_driver_device_is_agp(struct drm_device *dev)
++{
++ return 0;
++}
++
++int psb_extension_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ union drm_psb_extension_arg *arg = data;
++ struct drm_psb_extension_rep *rep = &arg->rep;
++
++ if (strcmp(arg->extension, "psb_ttm_placement_alphadrop") == 0) {
++ rep->exists = 1;
++ rep->driver_ioctl_offset = DRM_PSB_PLACEMENT_OFFSET;
++ rep->sarea_offset = 0;
++ rep->major = 1;
++ rep->minor = 0;
++ rep->pl = 0;
++ return 0;
++ }
++ if (strcmp(arg->extension, "psb_ttm_fence_alphadrop") == 0) {
++ rep->exists = 1;
++ rep->driver_ioctl_offset = DRM_PSB_FENCE_OFFSET;
++ rep->sarea_offset = 0;
++ rep->major = 1;
++ rep->minor = 0;
++ rep->pl = 0;
++ return 0;
++ }
++ if (strcmp(arg->extension, "psb_ttm_execbuf_alphadrop") == 0) {
++ rep->exists = 1;
++ rep->driver_ioctl_offset = DRM_PSB_CMDBUF;
++ rep->sarea_offset = 0;
++ rep->major = 1;
++ rep->minor = 0;
++ rep->pl = 0;
++ return 0;
++ }
++
++ /*return the page flipping ioctl offset*/
++ if (strcmp(arg->extension, "psb_page_flipping_alphadrop") == 0) {
++ rep->exists = 1;
++ rep->driver_ioctl_offset = DRM_PSB_FLIP;
++ rep->sarea_offset = 0;
++ rep->major = 1;
++ rep->minor = 0;
++ rep->pl = 0;
++ return 0;
++ }
++
++ /* return the video rar offset */
++ if (strcmp(arg->extension, "lnc_video_getparam") == 0) {
++ rep->exists = 1;
++ rep->driver_ioctl_offset = DRM_LNC_VIDEO_GETPARAM;
++ rep->sarea_offset = 0;
++ rep->major = 1;
++ rep->minor = 0;
++ rep->pl = 0;
++ return 0;
++ }
++
++ rep->exists = 0;
++ return 0;
++}
++
++static int psb_vt_leave_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ struct ttm_mem_type_manager *man;
++ int clean;
++ int ret;
++
++ ret = ttm_write_lock(&dev_priv->ttm_lock, 1,
++ psb_fpriv(file_priv)->tfile);
++ if (unlikely(ret != 0))
++ return ret;
++
++ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_TT);
++ if (unlikely(ret != 0))
++ goto out_unlock;
++
++ man = &bdev->man[TTM_PL_TT];
++ spin_lock(&bdev->lru_lock);
++ clean = drm_mm_clean(&man->manager);
++ spin_unlock(&bdev->lru_lock);
++ if (unlikely(!clean))
++ DRM_INFO("Warning: GATT was not clean after VT switch.\n");
++
++ ttm_bo_swapout_all(&dev_priv->bdev);
++
++ return 0;
++out_unlock:
++ (void) ttm_write_unlock(&dev_priv->ttm_lock,
++ psb_fpriv(file_priv)->tfile);
++ return ret;
++}
++
++static int psb_vt_enter_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ return ttm_write_unlock(&dev_priv->ttm_lock,
++ psb_fpriv(file_priv)->tfile);
++}
++
++static int psb_sizes_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct drm_psb_sizes_arg *arg =
++ (struct drm_psb_sizes_arg *) data;
++
++ *arg = dev_priv->sizes;
++ return 0;
++}
++
++static int psb_fuse_reg_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ uint32_t *arg = data;
++
++ *arg = dev_priv->fuse_reg_value;
++ return 0;
++}
++static int psb_vbt_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct gct_ioctl_arg *pGCT = data;
++
++ memcpy(pGCT, &dev_priv->gct_data, sizeof(*pGCT));
++
++ return 0;
++}
++
++static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
++ struct drm_file *file_priv)
++{
++ uint32_t flags;
++ uint32_t obj_id;
++ struct drm_mode_object *obj;
++ struct drm_connector *connector;
++ struct drm_crtc *crtc;
++ struct drm_psb_dc_state_arg *arg =
++ (struct drm_psb_dc_state_arg *)data;
++
++ if (IS_MRST(dev))
++ return 0;
++
++ flags = arg->flags;
++ obj_id = arg->obj_id;
++
++ if (flags & PSB_DC_CRTC_MASK) {
++ obj = drm_mode_object_find(dev, obj_id,
++ DRM_MODE_OBJECT_CRTC);
++ if (!obj) {
++ DRM_DEBUG("Invalid CRTC object.\n");
++ return -EINVAL;
++ }
++
++ crtc = obj_to_crtc(obj);
++
++ mutex_lock(&dev->mode_config.mutex);
++ if (drm_helper_crtc_in_use(crtc)) {
++ if (flags & PSB_DC_CRTC_SAVE)
++ crtc->funcs->save(crtc);
++ else
++ crtc->funcs->restore(crtc);
++ }
++ mutex_unlock(&dev->mode_config.mutex);
++
++ return 0;
++ } else if (flags & PSB_DC_OUTPUT_MASK) {
++ obj = drm_mode_object_find(dev, obj_id,
++ DRM_MODE_OBJECT_CONNECTOR);
++ if (!obj) {
++ DRM_DEBUG("Invalid connector id.\n");
++ return -EINVAL;
++ }
++
++ connector = obj_to_connector(obj);
++ if (flags & PSB_DC_OUTPUT_SAVE)
++ connector->funcs->save(connector);
++ else
++ connector->funcs->restore(connector);
++
++ return 0;
++ }
++
++ DRM_DEBUG("Bad flags 0x%x\n", flags);
++ return -EINVAL;
++}
++
++static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ uint32_t *arg = data;
++ struct backlight_device bd;
++ dev_priv->blc_adj2 = *arg;
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ bd.props.brightness = psb_get_brightness(&bd);
++ psb_set_brightness(&bd);
++#endif
++ return 0;
++}
++
++static int psb_adb_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ uint32_t *arg = data;
++ struct backlight_device bd;
++ dev_priv->blc_adj1 = *arg;
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ bd.props.brightness = psb_get_brightness(&bd);
++ psb_set_brightness(&bd);
++#endif
++ return 0;
++}
++
++static int psb_hist_enable_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ u32 irqCtrl = 0;
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct dpst_guardband guardband_reg;
++ struct dpst_ie_histogram_control ie_hist_cont_reg;
++ uint32_t *enable = data;
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ return 0;
++ }
++
++ if (*enable == 1) {
++ ie_hist_cont_reg.data = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
++ ie_hist_cont_reg.ie_pipe_assignment = 0;
++ ie_hist_cont_reg.histogram_mode_select = DPST_YUV_LUMA_MODE;
++ ie_hist_cont_reg.ie_histogram_enable = 1;
++ PSB_WVDC32(ie_hist_cont_reg.data, HISTOGRAM_LOGIC_CONTROL);
++
++ guardband_reg.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++ guardband_reg.interrupt_enable = 1;
++ guardband_reg.interrupt_status = 1;
++ PSB_WVDC32(guardband_reg.data, HISTOGRAM_INT_CONTROL);
++
++ irqCtrl = PSB_RVDC32(PIPEASTAT);
++ PSB_WVDC32(irqCtrl | PIPE_DPST_EVENT_ENABLE, PIPEASTAT);
++ /* Wait for two vblanks */
++ } else {
++ guardband_reg.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++ guardband_reg.interrupt_enable = 0;
++ guardband_reg.interrupt_status = 1;
++ PSB_WVDC32(guardband_reg.data, HISTOGRAM_INT_CONTROL);
++
++ ie_hist_cont_reg.data = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
++ ie_hist_cont_reg.ie_histogram_enable = 0;
++ PSB_WVDC32(ie_hist_cont_reg.data, HISTOGRAM_LOGIC_CONTROL);
++
++ irqCtrl = PSB_RVDC32(PIPEASTAT);
++ irqCtrl &= ~PIPE_DPST_EVENT_ENABLE;
++ PSB_WVDC32(irqCtrl, PIPEASTAT);
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return 0;
++}
++
++static int psb_hist_status_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct drm_psb_hist_status_arg *hist_status = data;
++ uint32_t *arg = hist_status->buf;
++ u32 iedbr_reg_data = 0;
++ struct dpst_ie_histogram_control ie_hist_cont_reg;
++ u32 i;
++ int dpst3_bin_threshold_count = 0;
++ uint32_t blm_hist_ctl = HISTOGRAM_LOGIC_CONTROL;
++ uint32_t iebdr_reg = HISTOGRAM_BIN_DATA;
++ uint32_t segvalue_max_22_bit = 0x3fffff;
++ uint32_t iedbr_busy_bit = 0x80000000;
++ int dpst3_bin_count = 32;
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ return 0;
++ }
++
++ ie_hist_cont_reg.data = PSB_RVDC32(blm_hist_ctl);
++ ie_hist_cont_reg.bin_reg_func_select = dpst3_bin_threshold_count;
++ ie_hist_cont_reg.bin_reg_index = 0;
++
++ PSB_WVDC32(ie_hist_cont_reg.data, blm_hist_ctl);
++
++ for (i = 0; i < dpst3_bin_count; i++) {
++ iedbr_reg_data = PSB_RVDC32(iebdr_reg);
++
++ if (!(iedbr_reg_data & iedbr_busy_bit)) {
++ arg[i] = iedbr_reg_data & segvalue_max_22_bit;
++ } else {
++ i = 0;
++ ie_hist_cont_reg.data = PSB_RVDC32(blm_hist_ctl);
++ ie_hist_cont_reg.bin_reg_index = 0;
++ PSB_WVDC32(ie_hist_cont_reg.data, blm_hist_ctl);
++ }
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return 0;
++}
++
++static int psb_init_comm_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct pci_dev *pdev = NULL;
++ struct device *ddev = NULL;
++ struct kobject *kobj = NULL;
++ uint32_t *arg = data;
++
++ if (*arg == 1) {
++ /*find handle to drm kboject*/
++ pdev = dev->pdev;
++ ddev = &pdev->dev;
++ kobj = &ddev->kobj;
++
++ if (dev_priv->psb_dpst_state == NULL) {
++ /*init dpst kmum comms*/
++ dev_priv->psb_dpst_state = psb_dpst_init(kobj);
++ } else {
++ printk(KERN_ALERT "DPST already initialized\n");
++ }
++
++ sysirq_enable_dpst(dev);
++ psb_dpst_notify_change_um(DPST_EVENT_INIT_COMPLETE,
++ dev_priv->psb_dpst_state);
++ } else {
++ /*hotplug and dpst destroy examples*/
++ sysirq_disable_dpst(dev);
++ psb_dpst_notify_change_um(DPST_EVENT_TERMINATE,
++ dev_priv->psb_dpst_state);
++ psb_dpst_device_pool_destroy(dev_priv->psb_dpst_state);
++ dev_priv->psb_dpst_state = NULL;
++ }
++ return 0;
++}
++
++/* return the current mode to the dpst module */
++static int psb_dpst_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ uint32_t *arg = data;
++ uint32_t x;
++ uint32_t y;
++ uint32_t reg;
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ return 0;
++ }
++
++ reg = PSB_RVDC32(PIPEASRC);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ /* horizontal is the left 16 bits */
++ x = reg >> 16;
++ /* vertical is the right 16 bits */
++ y = reg & 0x0000ffff;
++
++ /* the values are the image size minus one */
++ x+=1;
++ y+=1;
++
++ *arg = (x << 16) | y;
++
++ return 0;
++}
++static int psb_gamma_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_dpst_lut_arg *lut_arg = data;
++ struct drm_mode_object *obj;
++ struct drm_crtc *crtc;
++ struct drm_connector *connector;
++ struct psb_intel_crtc *psb_intel_crtc;
++ int i = 0;
++ int32_t obj_id;
++
++ obj_id = lut_arg->output_id;
++ obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
++ if (!obj) {
++ DRM_DEBUG("Invalid Connector object.\n");
++ return -EINVAL;
++ }
++
++ connector = obj_to_connector(obj);
++ crtc = connector->encoder->crtc;
++ psb_intel_crtc = to_psb_intel_crtc(crtc);
++
++ for (i = 0; i < 256; i++)
++ psb_intel_crtc->lut_adj[i] = lut_arg->lut[i];
++
++ psb_intel_crtc_load_lut(crtc);
++
++ return 0;
++}
++
++static int psb_update_guard_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct dpst_guardband* input = (struct dpst_guardband*) data;
++ struct dpst_guardband reg_data;
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ return 0;
++ }
++
++ reg_data.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++ reg_data.guardband = input->guardband;
++ reg_data.guardband_interrupt_delay = input->guardband_interrupt_delay;
++ /* printk(KERN_ALERT "guardband = %u\ninterrupt delay = %u\n",
++ reg_data.guardband, reg_data.guardband_interrupt_delay); */
++ PSB_WVDC32(reg_data.data, HISTOGRAM_INT_CONTROL);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return 0;
++}
++
++static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ uint32_t obj_id;
++ uint16_t op;
++ struct drm_mode_modeinfo *umode;
++ struct drm_display_mode *mode = NULL;
++ struct drm_psb_mode_operation_arg *arg;
++ struct drm_mode_object *obj;
++ struct drm_connector *connector;
++ struct drm_framebuffer * drm_fb;
++ struct psb_framebuffer * psb_fb;
++ struct drm_connector_helper_funcs *connector_funcs;
++ int ret = 0;
++ int resp = MODE_OK;
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++
++ arg = (struct drm_psb_mode_operation_arg *)data;
++ obj_id = arg->obj_id;
++ op = arg->operation;
++
++ switch(op) {
++ case PSB_MODE_OPERATION_SET_DC_BASE:
++ obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_FB);
++ if(!obj) {
++ DRM_ERROR("Invalid FB id %d\n", obj_id);
++ return -EINVAL;
++ }
++
++ drm_fb = obj_to_fb(obj);
++ psb_fb = to_psb_fb(drm_fb);
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ REG_WRITE(DSPASURF, psb_fb->offset);
++ REG_READ(DSPASURF);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ dev_priv->saveDSPASURF = psb_fb->offset;
++ }
++
++ return 0;
++ case PSB_MODE_OPERATION_MODE_VALID:
++ umode = &arg->mode;
++
++ mutex_lock(&dev->mode_config.mutex);
++
++ obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
++ if (!obj) {
++ ret = -EINVAL;
++ goto mode_op_out;
++ }
++
++ connector = obj_to_connector(obj);
++
++ mode = drm_mode_create(dev);
++ if (!mode) {
++ ret = -ENOMEM;
++ goto mode_op_out;
++ }
++
++ /* drm_crtc_convert_umode(mode, umode); */
++ {
++ mode->clock = umode->clock;
++ mode->hdisplay = umode->hdisplay;
++ mode->hsync_start = umode->hsync_start;
++ mode->hsync_end = umode->hsync_end;
++ mode->htotal = umode->htotal;
++ mode->hskew = umode->hskew;
++ mode->vdisplay = umode->vdisplay;
++ mode->vsync_start = umode->vsync_start;
++ mode->vsync_end = umode->vsync_end;
++ mode->vtotal = umode->vtotal;
++ mode->vscan = umode->vscan;
++ mode->vrefresh = umode->vrefresh;
++ mode->flags = umode->flags;
++ mode->type = umode->type;
++ strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
++ mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
++ }
++
++ connector_funcs = (struct drm_connector_helper_funcs *)
++ connector->helper_private;
++
++ if (connector_funcs->mode_valid) {
++ resp = connector_funcs->mode_valid(connector, mode);
++ arg->data = (void *)resp;
++ }
++
++ /*do some clean up work*/
++ if(mode) {
++ drm_mode_destroy(dev, mode);
++ }
++mode_op_out:
++ mutex_unlock(&dev->mode_config.mutex);
++ return ret;
++
++ default:
++ DRM_DEBUG("Unsupported psb mode operation");
++ return -EOPNOTSUPP;
++ }
++
++ return 0;
++}
++
++static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct drm_psb_stolen_memory_arg *arg = data;
++
++ arg->base = dev_priv->pg->stolen_base;
++ arg->size = dev_priv->pg->vram_stolen_size;
++
++ return 0;
++}
++
++static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct drm_psb_register_rw_arg *arg = data;
++ UHBUsage usage =
++ arg->b_force_hw_on ? OSPM_UHB_FORCE_POWER_ON : OSPM_UHB_ONLY_IF_ON;
++
++ if (arg->display_write_mask != 0) {
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
++ if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS)
++ PSB_WVDC32(arg->display.pfit_controls,
++ PFIT_CONTROL);
++ if (arg->display_write_mask &
++ REGRWBITS_PFIT_AUTOSCALE_RATIOS)
++ PSB_WVDC32(arg->display.pfit_autoscale_ratios,
++ PFIT_AUTO_RATIOS);
++ if (arg->display_write_mask &
++ REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
++ PSB_WVDC32(
++ arg->display.pfit_programmed_scale_ratios,
++ PFIT_PGM_RATIOS);
++ if (arg->display_write_mask & REGRWBITS_PIPEASRC)
++ PSB_WVDC32(arg->display.pipeasrc,
++ PIPEASRC);
++ if (arg->display_write_mask & REGRWBITS_PIPEBSRC)
++ PSB_WVDC32(arg->display.pipebsrc,
++ PIPEBSRC);
++ if (arg->display_write_mask & REGRWBITS_VTOTAL_A)
++ PSB_WVDC32(arg->display.vtotal_a,
++ VTOTAL_A);
++ if (arg->display_write_mask & REGRWBITS_VTOTAL_B)
++ PSB_WVDC32(arg->display.vtotal_b,
++ VTOTAL_B);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS)
++ dev_priv->savePFIT_CONTROL =
++ arg->display.pfit_controls;
++ if (arg->display_write_mask &
++ REGRWBITS_PFIT_AUTOSCALE_RATIOS)
++ dev_priv->savePFIT_AUTO_RATIOS =
++ arg->display.pfit_autoscale_ratios;
++ if (arg->display_write_mask &
++ REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
++ dev_priv->savePFIT_PGM_RATIOS =
++ arg->display.pfit_programmed_scale_ratios;
++ if (arg->display_write_mask & REGRWBITS_PIPEASRC)
++ dev_priv->savePIPEASRC = arg->display.pipeasrc;
++ if (arg->display_write_mask & REGRWBITS_PIPEBSRC)
++ dev_priv->savePIPEBSRC = arg->display.pipebsrc;
++ if (arg->display_write_mask & REGRWBITS_VTOTAL_A)
++ dev_priv->saveVTOTAL_A = arg->display.vtotal_a;
++ if (arg->display_write_mask & REGRWBITS_VTOTAL_B)
++ dev_priv->saveVTOTAL_B = arg->display.vtotal_b;
++ }
++ }
++
++ if (arg->display_read_mask != 0) {
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
++ if (arg->display_read_mask &
++ REGRWBITS_PFIT_CONTROLS)
++ arg->display.pfit_controls =
++ PSB_RVDC32(PFIT_CONTROL);
++ if (arg->display_read_mask &
++ REGRWBITS_PFIT_AUTOSCALE_RATIOS)
++ arg->display.pfit_autoscale_ratios =
++ PSB_RVDC32(PFIT_AUTO_RATIOS);
++ if (arg->display_read_mask &
++ REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
++ arg->display.pfit_programmed_scale_ratios =
++ PSB_RVDC32(PFIT_PGM_RATIOS);
++ if (arg->display_read_mask & REGRWBITS_PIPEASRC)
++ arg->display.pipeasrc = PSB_RVDC32(PIPEASRC);
++ if (arg->display_read_mask & REGRWBITS_PIPEBSRC)
++ arg->display.pipebsrc = PSB_RVDC32(PIPEBSRC);
++ if (arg->display_read_mask & REGRWBITS_VTOTAL_A)
++ arg->display.vtotal_a = PSB_RVDC32(VTOTAL_A);
++ if (arg->display_read_mask & REGRWBITS_VTOTAL_B)
++ arg->display.vtotal_b = PSB_RVDC32(VTOTAL_B);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ if (arg->display_read_mask &
++ REGRWBITS_PFIT_CONTROLS)
++ arg->display.pfit_controls =
++ dev_priv->savePFIT_CONTROL;
++ if (arg->display_read_mask &
++ REGRWBITS_PFIT_AUTOSCALE_RATIOS)
++ arg->display.pfit_autoscale_ratios =
++ dev_priv->savePFIT_AUTO_RATIOS;
++ if (arg->display_read_mask &
++ REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
++ arg->display.pfit_programmed_scale_ratios =
++ dev_priv->savePFIT_PGM_RATIOS;
++ if (arg->display_read_mask & REGRWBITS_PIPEASRC)
++ arg->display.pipeasrc = dev_priv->savePIPEASRC;
++ if (arg->display_read_mask & REGRWBITS_PIPEBSRC)
++ arg->display.pipebsrc = dev_priv->savePIPEBSRC;
++ if (arg->display_read_mask & REGRWBITS_VTOTAL_A)
++ arg->display.vtotal_a = dev_priv->saveVTOTAL_A;
++ if (arg->display_read_mask & REGRWBITS_VTOTAL_B)
++ arg->display.vtotal_b = dev_priv->saveVTOTAL_B;
++ }
++ }
++
++ if (arg->overlay_write_mask != 0) {
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
++ if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) {
++ PSB_WVDC32(arg->overlay.OGAMC5, OV_OGAMC5);
++ PSB_WVDC32(arg->overlay.OGAMC4, OV_OGAMC4);
++ PSB_WVDC32(arg->overlay.OGAMC3, OV_OGAMC3);
++ PSB_WVDC32(arg->overlay.OGAMC2, OV_OGAMC2);
++ PSB_WVDC32(arg->overlay.OGAMC1, OV_OGAMC1);
++ PSB_WVDC32(arg->overlay.OGAMC0, OV_OGAMC0);
++ }
++
++ if (arg->overlay_write_mask & OV_REGRWBITS_OVADD)
++ PSB_WVDC32(arg->overlay.OVADD, OV_OVADD);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) {
++ dev_priv->saveOV_OGAMC5 = arg->overlay.OGAMC5;
++ dev_priv->saveOV_OGAMC4 = arg->overlay.OGAMC4;
++ dev_priv->saveOV_OGAMC3 = arg->overlay.OGAMC3;
++ dev_priv->saveOV_OGAMC2 = arg->overlay.OGAMC2;
++ dev_priv->saveOV_OGAMC1 = arg->overlay.OGAMC1;
++ dev_priv->saveOV_OGAMC0 = arg->overlay.OGAMC0;
++ }
++ if (arg->overlay_write_mask & OV_REGRWBITS_OVADD)
++ dev_priv->saveOV_OVADD = arg->overlay.OVADD;
++ }
++ }
++
++ if (arg->overlay_read_mask != 0) {
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
++ if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) {
++ arg->overlay.OGAMC5 = PSB_RVDC32(OV_OGAMC5);
++ arg->overlay.OGAMC4 = PSB_RVDC32(OV_OGAMC4);
++ arg->overlay.OGAMC3 = PSB_RVDC32(OV_OGAMC3);
++ arg->overlay.OGAMC2 = PSB_RVDC32(OV_OGAMC2);
++ arg->overlay.OGAMC1 = PSB_RVDC32(OV_OGAMC1);
++ arg->overlay.OGAMC0 = PSB_RVDC32(OV_OGAMC0);
++ }
++ if (arg->overlay_read_mask & OV_REGRWBITS_OVADD)
++ arg->overlay.OVADD = PSB_RVDC32(OV_OVADD);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) {
++ arg->overlay.OGAMC5 = dev_priv->saveOV_OGAMC5;
++ arg->overlay.OGAMC4 = dev_priv->saveOV_OGAMC4;
++ arg->overlay.OGAMC3 = dev_priv->saveOV_OGAMC3;
++ arg->overlay.OGAMC2 = dev_priv->saveOV_OGAMC2;
++ arg->overlay.OGAMC1 = dev_priv->saveOV_OGAMC1;
++ arg->overlay.OGAMC0 = dev_priv->saveOV_OGAMC0;
++ }
++ if (arg->overlay_read_mask & OV_REGRWBITS_OVADD)
++ arg->overlay.OVADD = dev_priv->saveOV_OVADD;
++ }
++ }
++
++ if (arg->sprite_enable_mask != 0) {
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
++ PSB_WVDC32(0x1F3E, DSPARB);
++ PSB_WVDC32(arg->sprite.dspa_control | PSB_RVDC32(DSPACNTR), DSPACNTR);
++ PSB_WVDC32(arg->sprite.dspa_key_value, DSPAKEYVAL);
++ PSB_WVDC32(arg->sprite.dspa_key_mask, DSPAKEYMASK);
++ PSB_WVDC32(PSB_RVDC32(DSPASURF), DSPASURF);
++ PSB_RVDC32(DSPASURF);
++ PSB_WVDC32(arg->sprite.dspc_control, DSPCCNTR);
++ PSB_WVDC32(arg->sprite.dspc_stride, DSPCSTRIDE);
++ PSB_WVDC32(arg->sprite.dspc_position, DSPCPOS);
++ PSB_WVDC32(arg->sprite.dspc_linear_offset, DSPCLINOFF);
++ PSB_WVDC32(arg->sprite.dspc_size, DSPCSIZE);
++ PSB_WVDC32(arg->sprite.dspc_surface, DSPCSURF);
++ PSB_RVDC32(DSPCSURF);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ }
++
++ if (arg->sprite_disable_mask != 0) {
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, usage)) {
++ PSB_WVDC32(0x3F3E, DSPARB);
++ PSB_WVDC32(0x0, DSPCCNTR);
++ PSB_WVDC32(arg->sprite.dspc_surface, DSPCSURF);
++ PSB_RVDC32(DSPCSURF);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ }
++
++
++ return 0;
++}
++
++/* always available as we are SIGIO'd */
++static unsigned int psb_poll(struct file *filp,
++ struct poll_table_struct *wait)
++{
++ return POLLIN | POLLRDNORM;
++}
++
++int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
++{
++ DRM_DEBUG("\n");
++ return PVRSRVOpen(dev, priv);
++}
++
++static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
++ unsigned long arg)
++{
++ struct drm_file *file_priv = filp->private_data;
++ struct drm_device *dev = file_priv->minor->dev;
++ unsigned int nr = DRM_IOCTL_NR(cmd);
++ long ret;
++
++ DRM_DEBUG("cmd = %x, nr = %x\n", cmd, nr);
++
++ /*
++ * The driver private ioctls and TTM ioctls should be
++ * thread-safe.
++ */
++
++ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
++ && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
++ struct drm_ioctl_desc *ioctl =
++ &psb_ioctls[nr - DRM_COMMAND_BASE];
++
++ if (unlikely(ioctl->cmd != cmd)) {
++ DRM_ERROR(
++ "Invalid drm cmnd %d ioctl->cmd %x, cmd %x\n",
++ nr - DRM_COMMAND_BASE, ioctl->cmd, cmd);
++ return -EINVAL;
++ }
++
++ return drm_unlocked_ioctl(filp, cmd, arg);
++ }
++ /*
++ * Not all old drm ioctls are thread-safe.
++ */
++
++ lock_kernel();
++ ret = drm_unlocked_ioctl(filp, cmd, arg);
++ unlock_kernel();
++ return ret;
++}
++
++static int psb_blc_read(char *buf, char **start, off_t offset, int request,
++ int *eof, void *data)
++{
++ struct drm_minor *minor = (struct drm_minor *) data;
++ struct drm_device *dev = minor->dev;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct backlight_device bd;
++ int user_brightness = 0;
++ int final_brightness = 0;
++ int len = 0;
++
++ *start = &buf[offset];
++ *eof = 0;
++
++ user_brightness = psb_get_brightness(&bd);
++ final_brightness = (user_brightness * dev_priv->blc_adj1) / 100;
++ final_brightness = (final_brightness * dev_priv->blc_adj2) / 100;
++
++ DRM_INFO("%i\n", final_brightness);
++
++ if (len > request + offset)
++ return request;
++ *eof = 1;
++ return len - offset;
++}
++
++static int psb_ospm_read(char *buf, char **start, off_t offset, int request,
++ int *eof, void *data)
++{
++ struct drm_minor *minor = (struct drm_minor *) data;
++ struct drm_device *dev = minor->dev;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ int len = 0;
++#ifdef OSPM_STAT
++ unsigned long on_time = 0;
++ unsigned long off_time = 0;
++#endif
++
++ *start = &buf[offset];
++ *eof = 0;
++
++#ifdef SUPPORT_ACTIVE_POWER_MANAGEMENT
++ DRM_INFO("GFX D0i3: enabled ");
++#else
++ DRM_INFO("GFX D0i3: disabled ");
++#endif
++
++#ifdef OSPM_STAT
++ switch (dev_priv->graphics_state) {
++ case PSB_PWR_STATE_ON:
++ DRM_INFO("GFX state:%s\n", "on");
++ break;
++ case PSB_PWR_STATE_OFF:
++ DRM_INFO("GFX state:%s\n", "off");
++ break;
++ default:
++ DRM_INFO("GFX state:%s\n", "unknown");
++ }
++
++ on_time = dev_priv->gfx_on_time * 1000 / HZ;
++ off_time = dev_priv->gfx_off_time * 1000 / HZ;
++ switch (dev_priv->graphics_state) {
++ case PSB_PWR_STATE_ON:
++ on_time += (jiffies - dev_priv->gfx_last_mode_change) * \
++ 1000 / HZ;
++ break;
++ case PSB_PWR_STATE_OFF:
++ off_time += (jiffies - dev_priv->gfx_last_mode_change) * \
++ 1000 / HZ;
++ break;
++ }
++ DRM_INFO("GFX(count/ms):\n");
++ DRM_INFO("on:%lu/%lu, off:%lu/%lu \n",
++ dev_priv->gfx_on_cnt, on_time, dev_priv->gfx_off_cnt, off_time);
++#endif
++ if (len > request + offset)
++ return request;
++ *eof = 1;
++ return len - offset;
++}
++
++/* When a client dies:
++ * - Check for and clean up flipped page state
++ */
++void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
++{
++}
++
++static void psb_remove(struct pci_dev *pdev)
++{
++ struct drm_device *dev = pci_get_drvdata(pdev);
++ drm_put_dev(dev);
++}
++
++static int psb_proc_init(struct drm_minor *minor)
++{
++ struct proc_dir_entry *ent;
++ struct proc_dir_entry *ent1;
++ ent = create_proc_read_entry(OSPM_PROC_ENTRY, 0, minor->proc_root,
++ psb_ospm_read, minor);
++ ent1 = create_proc_read_entry(BLC_PROC_ENTRY, 0, minor->proc_root,
++ psb_blc_read, minor);
++
++ if (!ent || !ent1)
++ return -1;
++
++ return 0;
++}
++
++static void psb_proc_cleanup(struct drm_minor *minor)
++{
++ remove_proc_entry(OSPM_PROC_ENTRY, minor->proc_root);
++ remove_proc_entry(BLC_PROC_ENTRY, minor->proc_root);
++ return;
++}
++
++static struct drm_driver driver = {
++ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
++ DRIVER_IRQ_VBL | DRIVER_MODESET,
++ .load = psb_driver_load,
++ .unload = psb_driver_unload,
++
++ .ioctls = psb_ioctls,
++ .device_is_agp = psb_driver_device_is_agp,
++ .irq_preinstall = sysirq_preinstall,
++ .irq_postinstall = sysirq_postinstall,
++ .irq_uninstall = sysirq_uninstall,
++ .irq_handler = sysirq_handler,
++ .enable_vblank = sysirq_enable_vblank,
++ .disable_vblank = sysirq_disable_vblank,
++ .get_vblank_counter = sysirq_get_vblank_counter,
++ .firstopen = NULL,
++ .lastclose = psb_lastclose,
++ .open = psb_driver_open,
++ .postclose = PVRSRVDrmPostClose,
++ .suspend = PVRSRVDriverSuspend,
++ .resume = PVRSRVDriverResume,
++ .get_map_ofs = drm_core_get_map_ofs,
++ .get_reg_ofs = drm_core_get_reg_ofs,
++ .proc_init = psb_proc_init,
++ .proc_cleanup = psb_proc_cleanup,
++ .preclose = psb_driver_preclose,
++ .fops = {
++ .owner = THIS_MODULE,
++ .open = psb_open,
++ .release = psb_release,
++ .unlocked_ioctl = psb_unlocked_ioctl,
++ .mmap = psb_mmap,
++ .poll = psb_poll,
++ .fasync = drm_fasync,
++ .read = drm_read,
++ },
++ .pci_driver = {
++ .name = DRIVER_NAME,
++ .id_table = pciidlist,
++ .resume = ospm_power_resume,
++ .suspend = ospm_power_suspend,
++ .probe = psb_probe,
++ .remove = psb_remove,
++ },
++ .name = DRIVER_NAME,
++ .desc = DRIVER_DESC,
++ .date = PSB_DRM_DRIVER_DATE,
++ .major = PSB_DRM_DRIVER_MAJOR,
++ .minor = PSB_DRM_DRIVER_MINOR,
++ .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
++};
++
++static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++ return drm_get_dev(pdev, ent, &driver);
++}
++
++static int __init psb_init(void)
++{
++ driver.num_ioctls = psb_max_ioctl;
++
++ PVRDPFInit();
++
++ return drm_init(&driver);
++}
++
++static void __exit psb_exit(void)
++{
++ drm_exit(&driver);
++}
++
++late_initcall(psb_init);
++module_exit(psb_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL");
+diff --git a/drivers/gpu/drm/mrst/drv/psb_drv.h b/drivers/gpu/drm/mrst/drv/psb_drv.h
+new file mode 100644
+index 0000000..2ac7934
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_drv.h
+@@ -0,0 +1,1025 @@
++/**************************************************************************
++ * Copyright (c) 2007-2008, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _PSB_DRV_H_
++#define _PSB_DRV_H_
++
++#include <drm/drmP.h>
++#include "psb_drm.h"
++#include "psb_reg.h"
++#include "psb_schedule.h"
++#include "psb_intel_drv.h"
++#include "psb_hotplug.h"
++#include "psb_dpst.h"
++#include "psb_gtt.h"
++#include "ttm/ttm_object.h"
++#include "ttm/ttm_fence_driver.h"
++#include "ttm/ttm_bo_driver.h"
++#include "ttm/ttm_lock.h"
++
++/*IMG headers*/
++#include "private_data.h"
++#include "pvr_drm.h"
++
++extern struct ttm_bo_driver psb_ttm_bo_driver;
++
++enum {
++ CHIP_PSB_8108 = 0,
++ CHIP_PSB_8109 = 1,
++ CHIP_MRST_4100 = 2
++};
++
++/*
++ *Hardware bugfixes
++ */
++
++#define FIX_TG_16
++#define FIX_TG_2D_CLOCKGATE
++#define OSPM_STAT
++
++#define DRIVER_NAME "pvrsrvkm"
++#define DRIVER_DESC "drm driver for the Intel GMA500"
++#define DRIVER_AUTHOR "Tungsten Graphics Inc."
++#define OSPM_PROC_ENTRY "ospm"
++#define BLC_PROC_ENTRY "mrst_blc"
++
++#define PSB_DRM_DRIVER_DATE "2009-03-10"
++#define PSB_DRM_DRIVER_MAJOR 8
++#define PSB_DRM_DRIVER_MINOR 1
++#define PSB_DRM_DRIVER_PATCHLEVEL 0
++
++/*
++ *TTM driver private offsets.
++ */
++
++#define DRM_PSB_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
++
++#define PSB_OBJECT_HASH_ORDER 13
++#define PSB_FILE_OBJECT_HASH_ORDER 12
++#define PSB_BO_HASH_ORDER 12
++
++#define PSB_VDC_OFFSET 0x00000000
++#define PSB_VDC_SIZE 0x000080000
++#define MRST_MMIO_SIZE 0x0000C0000
++#define PSB_SGX_SIZE 0x8000
++#define PSB_SGX_OFFSET 0x00040000
++#define MRST_SGX_OFFSET 0x00080000
++#define PSB_MMIO_RESOURCE 0
++#define PSB_GATT_RESOURCE 2
++#define PSB_GTT_RESOURCE 3
++#define PSB_GMCH_CTRL 0x52
++#define PSB_BSM 0x5C
++#define _PSB_GMCH_ENABLED 0x4
++#define PSB_PGETBL_CTL 0x2020
++#define _PSB_PGETBL_ENABLED 0x00000001
++#define PSB_SGX_2D_SLAVE_PORT 0x4000
++#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
++#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
++#define PSB_NUM_VALIDATE_BUFFERS 2048
++
++#define PSB_MEM_MMU_START 0x40000000
++
++/*
++ *Flags for external memory type field.
++ */
++
++#define MRST_MSVDX_OFFSET 0x90000 /*MSVDX Base offset */
++#define PSB_MSVDX_OFFSET 0x50000 /*MSVDX Base offset */
++/* MSVDX MMIO region is 0x50000 - 0x57fff ==> 32KB */
++#define PSB_MSVDX_SIZE 0x10000
++
++#define LNC_TOPAZ_OFFSET 0xA0000
++#define LNC_TOPAZ_SIZE 0x10000
++
++#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
++#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
++#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
++
++/*
++ *PTE's and PDE's
++ */
++
++#define PSB_PDE_MASK 0x003FFFFF
++#define PSB_PDE_SHIFT 22
++#define PSB_PTE_SHIFT 12
++
++#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
++#define PSB_PTE_WO 0x0002 /* Write only */
++#define PSB_PTE_RO 0x0004 /* Read only */
++#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
++
++/*
++ *VDC registers and bits
++ */
++#define PSB_MSVDX_CLOCKGATING 0x2064
++#define PSB_TOPAZ_CLOCKGATING 0x2068
++#define PSB_HWSTAM 0x2098
++#define PSB_INSTPM 0x20C0
++#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
++#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
++#define _PSB_DPST_PIPEA_FLAG (1<<6)
++#define _PSB_DPST_PIPEB_FLAG (1<<4)
++#define _PSB_IRQ_SGX_FLAG (1<<18)
++#define _PSB_IRQ_MSVDX_FLAG (1<<19)
++#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
++#define PSB_INT_IDENTITY_R 0x20A4
++#define PSB_INT_MASK_R 0x20A8
++#define PSB_INT_ENABLE_R 0x20A0
++
++#define _PSB_MMU_ER_MASK 0x0001FF00
++#define _PSB_MMU_ER_HOST (1 << 16)
++#define GPIOA 0x5010
++#define GPIOB 0x5014
++#define GPIOC 0x5018
++#define GPIOD 0x501c
++#define GPIOE 0x5020
++#define GPIOF 0x5024
++#define GPIOG 0x5028
++#define GPIOH 0x502c
++#define GPIO_CLOCK_DIR_MASK (1 << 0)
++#define GPIO_CLOCK_DIR_IN (0 << 1)
++#define GPIO_CLOCK_DIR_OUT (1 << 1)
++#define GPIO_CLOCK_VAL_MASK (1 << 2)
++#define GPIO_CLOCK_VAL_OUT (1 << 3)
++#define GPIO_CLOCK_VAL_IN (1 << 4)
++#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
++#define GPIO_DATA_DIR_MASK (1 << 8)
++#define GPIO_DATA_DIR_IN (0 << 9)
++#define GPIO_DATA_DIR_OUT (1 << 9)
++#define GPIO_DATA_VAL_MASK (1 << 10)
++#define GPIO_DATA_VAL_OUT (1 << 11)
++#define GPIO_DATA_VAL_IN (1 << 12)
++#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
++
++#define VCLK_DIVISOR_VGA0 0x6000
++#define VCLK_DIVISOR_VGA1 0x6004
++#define VCLK_POST_DIV 0x6010
++
++#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
++#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
++#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
++#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
++#define PSB_COMM_USER_IRQ (1024 >> 2)
++#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
++#define PSB_COMM_FW (2048 >> 2)
++
++#define PSB_UIRQ_VISTEST 1
++#define PSB_UIRQ_OOM_REPLY 2
++#define PSB_UIRQ_FIRE_TA_REPLY 3
++#define PSB_UIRQ_FIRE_RASTER_REPLY 4
++
++#define PSB_2D_SIZE (256*1024*1024)
++#define PSB_MAX_RELOC_PAGES 1024
++
++#define PSB_LOW_REG_OFFS 0x0204
++#define PSB_HIGH_REG_OFFS 0x0600
++
++#define PSB_NUM_VBLANKS 2
++
++
++#define PSB_2D_SIZE (256*1024*1024)
++#define PSB_MAX_RELOC_PAGES 1024
++
++#define PSB_LOW_REG_OFFS 0x0204
++#define PSB_HIGH_REG_OFFS 0x0600
++
++#define PSB_NUM_VBLANKS 2
++#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
++#define PSB_LID_DELAY (DRM_HZ / 10)
++
++#define PSB_PWR_STATE_ON 1
++#define PSB_PWR_STATE_OFF 2
++
++#define PSB_PMPOLICY_NOPM 0
++#define PSB_PMPOLICY_POWERDOWN 2
++
++#define PSB_PMSTATE_POWERUP 0
++#define PSB_PMSTATE_POWERDOWN 2
++
++/*
++ *User options.
++ */
++
++struct drm_psb_uopt {
++ int pad; /*keep it here in case we use it in future*/
++};
++
++/**
++ *struct psb_context
++ *
++ *@buffers: array of pre-allocated validate buffers.
++ *@used_buffers: number of buffers in @buffers array currently in use.
++ *@validate_buffer: buffers validated from user-space.
++ *@kern_validate_buffers : buffers validated from kernel-space.
++ *@fence_flags : Fence flags to be used for fence creation.
++ *
++ *This structure is used during execbuf validation.
++ */
++
++struct psb_context {
++ struct psb_validate_buffer *buffers;
++ uint32_t used_buffers;
++ struct list_head validate_list;
++ struct list_head kern_validate_list;
++ uint32_t fence_types;
++ uint32_t val_seq;
++};
++
++struct psb_validate_buffer;
++
++struct psb_msvdx_cmd_queue {
++ struct list_head head;
++ void *cmd;
++ unsigned long cmd_size;
++ uint32_t sequence;
++};
++
++struct drm_psb_private {
++
++ /*
++ *TTM Glue.
++ */
++
++ struct drm_global_reference mem_global_ref;
++ int has_global;
++
++ struct drm_device *dev;
++ struct ttm_object_device *tdev;
++ struct ttm_fence_device fdev;
++ struct ttm_bo_device bdev;
++ struct ttm_lock ttm_lock;
++ struct vm_operations_struct *ttm_vm_ops;
++ int has_fence_device;
++ int has_bo_device;
++
++ unsigned long chipset;
++
++ struct drm_psb_dev_info_arg dev_info;
++ struct drm_psb_uopt uopt;
++
++ struct psb_gtt *pg;
++
++ /*GTT Memory manager*/
++ struct psb_gtt_mm *gtt_mm;
++
++ struct page *scratch_page;
++ uint32_t sequence[PSB_NUM_ENGINES];
++ uint32_t last_sequence[PSB_NUM_ENGINES];
++ uint32_t last_submitted_seq[PSB_NUM_ENGINES];
++
++ struct psb_mmu_driver *mmu;
++ struct psb_mmu_pd *pf_pd;
++
++ uint8_t *sgx_reg;
++ uint8_t *vdc_reg;
++ uint32_t gatt_free_offset;
++
++ /*
++ *MSVDX
++ */
++ uint8_t *msvdx_reg;
++ atomic_t msvdx_mmu_invaldc;
++ void *msvdx_private;
++
++ /*
++ *TOPAZ
++ */
++ uint8_t *topaz_reg;
++ void *topaz_private;
++ uint8_t topaz_disabled;
++ uint32_t video_device_fuse;
++ atomic_t topaz_mmu_invaldc;
++
++ /*
++ *Fencing / irq.
++ */
++
++ uint32_t vdc_irq_mask;
++ u32 pipestat[2];
++ bool vblanksEnabledForFlips;
++
++ spinlock_t irqmask_lock;
++ spinlock_t sequence_lock;
++
++ /*
++ *Modesetting
++ */
++ struct psb_intel_mode_device mode_dev;
++
++ struct drm_crtc *plane_to_crtc_mapping[2];
++ struct drm_crtc *pipe_to_crtc_mapping[2];
++
++ /*
++ * CI share buffer
++ */
++ unsigned int ci_region_start;
++ unsigned int ci_region_size;
++
++ /*
++ * RAR share buffer;
++ */
++ unsigned int rar_region_start;
++ unsigned int rar_region_size;
++
++ /*
++ *Memory managers
++ */
++
++ int have_camera;
++ int have_rar;
++ int have_tt;
++ int have_mem_mmu;
++ struct mutex temp_mem;
++
++ /*
++ *Relocation buffer mapping.
++ */
++
++ spinlock_t reloc_lock;
++ unsigned int rel_mapped_pages;
++ wait_queue_head_t rel_mapped_queue;
++
++ /*
++ *SAREA
++ */
++ struct drm_psb_sarea *sarea_priv;
++
++ /*
++ *OSPM info
++ */
++ uint32_t ospm_base;
++
++ /*
++ * Sizes info
++ */
++
++ struct drm_psb_sizes_arg sizes;
++
++ uint32_t fuse_reg_value;
++
++ /* vbt (gct) header information*/
++ struct mrst_vbt vbt_data;
++ /* info that is stored from the gct */
++ struct gct_ioctl_arg gct_data;
++
++ /*
++ *LVDS info
++ */
++ int backlight_duty_cycle; /* restore backlight to this value */
++ bool panel_wants_dither;
++ struct drm_display_mode *panel_fixed_mode;
++ struct drm_display_mode *lfp_lvds_vbt_mode;
++ struct drm_display_mode *sdvo_lvds_vbt_mode;
++
++ struct bdb_lvds_backlight *lvds_bl; /*LVDS backlight info from VBT*/
++ struct psb_intel_i2c_chan *lvds_i2c_bus;
++
++ /* Feature bits from the VBIOS*/
++ unsigned int int_tv_support:1;
++ unsigned int lvds_dither:1;
++ unsigned int lvds_vbt:1;
++ unsigned int int_crt_support:1;
++ unsigned int lvds_use_ssc:1;
++ int lvds_ssc_freq;
++
++/* MRST private date start */
++/*FIXME JLIU7 need to revisit */
++ bool sku_83;
++ bool sku_100;
++ bool sku_100L;
++ bool sku_bypass;
++ uint32_t iLVDS_enable;
++
++ /* pipe config register value */
++ uint32_t pipeconf;
++
++ /* plane control register value */
++ uint32_t dspcntr;
++
++/* MRST_DSI private date start */
++ /*
++ *MRST DSI info
++ */
++ /* The DSI device ready */
++ bool dsi_device_ready;
++
++ /* The DPI panel power on */
++ bool dpi_panel_on;
++
++ /* The DBI panel power on */
++ bool dbi_panel_on;
++
++ /* The DPI display */
++ bool dpi;
++
++ enum mipi_panel_type panel_make;
++
++ /* status */
++ uint32_t videoModeFormat:2;
++ uint32_t laneCount:3;
++ uint32_t status_reserved:27;
++
++ /* dual display - DPI & DBI */
++ bool dual_display;
++
++ /* HS or LP transmission */
++ bool lp_transmission;
++
++ /* configuration phase */
++ bool config_phase;
++
++ /* DSI clock */
++ uint32_t RRate;
++ uint32_t DDR_Clock;
++ uint32_t DDR_Clock_Calculated;
++ uint32_t ClockBits;
++
++ /* DBI Buffer pointer */
++ u8 *p_DBI_commandBuffer_orig;
++ u8 *p_DBI_commandBuffer;
++ uint32_t DBI_CB_pointer;
++ u8 *p_DBI_dataBuffer_orig;
++ u8 *p_DBI_dataBuffer;
++ uint32_t DBI_DB_pointer;
++
++ /* DPI panel spec */
++ uint32_t pixelClock;
++ uint32_t HsyncWidth;
++ uint32_t HbackPorch;
++ uint32_t HfrontPorch;
++ uint32_t HactiveArea;
++ uint32_t VsyncWidth;
++ uint32_t VbackPorch;
++ uint32_t VfrontPorch;
++ uint32_t VactiveArea;
++ uint32_t bpp:5;
++ uint32_t Reserved:27;
++
++ /* DBI panel spec */
++ uint32_t dbi_pixelClock;
++ uint32_t dbi_HsyncWidth;
++ uint32_t dbi_HbackPorch;
++ uint32_t dbi_HfrontPorch;
++ uint32_t dbi_HactiveArea;
++ uint32_t dbi_VsyncWidth;
++ uint32_t dbi_VbackPorch;
++ uint32_t dbi_VfrontPorch;
++ uint32_t dbi_VactiveArea;
++ uint32_t dbi_bpp:5;
++ uint32_t dbi_Reserved:27;
++
++/* MRST_DSI private date end */
++
++ /*
++ *Register state
++ */
++ uint32_t saveDSPACNTR;
++ uint32_t saveDSPBCNTR;
++ uint32_t savePIPEACONF;
++ uint32_t savePIPEBCONF;
++ uint32_t savePIPEASRC;
++ uint32_t savePIPEBSRC;
++ uint32_t saveFPA0;
++ uint32_t saveFPA1;
++ uint32_t saveDPLL_A;
++ uint32_t saveDPLL_A_MD;
++ uint32_t saveHTOTAL_A;
++ uint32_t saveHBLANK_A;
++ uint32_t saveHSYNC_A;
++ uint32_t saveVTOTAL_A;
++ uint32_t saveVBLANK_A;
++ uint32_t saveVSYNC_A;
++ uint32_t saveDSPASTRIDE;
++ uint32_t saveDSPASIZE;
++ uint32_t saveDSPAPOS;
++ uint32_t saveDSPABASE;
++ uint32_t saveDSPASURF;
++ uint32_t saveFPB0;
++ uint32_t saveFPB1;
++ uint32_t saveDPLL_B;
++ uint32_t saveDPLL_B_MD;
++ uint32_t saveHTOTAL_B;
++ uint32_t saveHBLANK_B;
++ uint32_t saveHSYNC_B;
++ uint32_t saveVTOTAL_B;
++ uint32_t saveVBLANK_B;
++ uint32_t saveVSYNC_B;
++ uint32_t saveDSPBSTRIDE;
++ uint32_t saveDSPBSIZE;
++ uint32_t saveDSPBPOS;
++ uint32_t saveDSPBBASE;
++ uint32_t saveDSPBSURF;
++ uint32_t saveVCLK_DIVISOR_VGA0;
++ uint32_t saveVCLK_DIVISOR_VGA1;
++ uint32_t saveVCLK_POST_DIV;
++ uint32_t saveVGACNTRL;
++ uint32_t saveADPA;
++ uint32_t saveLVDS;
++ uint32_t saveDVOA;
++ uint32_t saveDVOB;
++ uint32_t saveDVOC;
++ uint32_t savePP_ON;
++ uint32_t savePP_OFF;
++ uint32_t savePP_CONTROL;
++ uint32_t savePP_CYCLE;
++ uint32_t savePFIT_CONTROL;
++ uint32_t savePaletteA[256];
++ uint32_t savePaletteB[256];
++ uint32_t saveBLC_PWM_CTL2;
++ uint32_t saveBLC_PWM_CTL;
++ uint32_t saveCLOCKGATING;
++ uint32_t saveDSPARB;
++ uint32_t saveDSPATILEOFF;
++ uint32_t saveDSPBTILEOFF;
++ uint32_t saveDSPAADDR;
++ uint32_t saveDSPBADDR;
++ uint32_t savePFIT_AUTO_RATIOS;
++ uint32_t savePFIT_PGM_RATIOS;
++ uint32_t savePP_ON_DELAYS;
++ uint32_t savePP_OFF_DELAYS;
++ uint32_t savePP_DIVISOR;
++ uint32_t saveBCLRPAT_A;
++ uint32_t saveBCLRPAT_B;
++ uint32_t saveDSPALINOFF;
++ uint32_t saveDSPBLINOFF;
++ uint32_t savePERF_MODE;
++ uint32_t saveDSPFW1;
++ uint32_t saveDSPFW2;
++ uint32_t saveDSPFW3;
++ uint32_t saveDSPFW4;
++ uint32_t saveDSPFW5;
++ uint32_t saveDSPFW6;
++ uint32_t saveCHICKENBIT;
++ uint32_t saveDSPACURSOR_CTRL;
++ uint32_t saveDSPBCURSOR_CTRL;
++ uint32_t saveDSPACURSOR_BASE;
++ uint32_t saveDSPBCURSOR_BASE;
++ uint32_t saveDSPACURSOR_POS;
++ uint32_t saveDSPBCURSOR_POS;
++ uint32_t save_palette_a[256];
++ uint32_t save_palette_b[256];
++ uint32_t saveOV_OVADD;
++ uint32_t saveOV_OGAMC0;
++ uint32_t saveOV_OGAMC1;
++ uint32_t saveOV_OGAMC2;
++ uint32_t saveOV_OGAMC3;
++ uint32_t saveOV_OGAMC4;
++ uint32_t saveOV_OGAMC5;
++
++ /* DSI reg save */
++ uint32_t saveDEVICE_READY_REG;
++ uint32_t saveINTR_EN_REG;
++ uint32_t saveDSI_FUNC_PRG_REG;
++ uint32_t saveHS_TX_TIMEOUT_REG;
++ uint32_t saveLP_RX_TIMEOUT_REG;
++ uint32_t saveTURN_AROUND_TIMEOUT_REG;
++ uint32_t saveDEVICE_RESET_REG;
++ uint32_t saveDPI_RESOLUTION_REG;
++ uint32_t saveHORIZ_SYNC_PAD_COUNT_REG;
++ uint32_t saveHORIZ_BACK_PORCH_COUNT_REG;
++ uint32_t saveHORIZ_FRONT_PORCH_COUNT_REG;
++ uint32_t saveHORIZ_ACTIVE_AREA_COUNT_REG;
++ uint32_t saveVERT_SYNC_PAD_COUNT_REG;
++ uint32_t saveVERT_BACK_PORCH_COUNT_REG;
++ uint32_t saveVERT_FRONT_PORCH_COUNT_REG;
++ uint32_t saveHIGH_LOW_SWITCH_COUNT_REG;
++ uint32_t saveINIT_COUNT_REG;
++ uint32_t saveMAX_RET_PAK_REG;
++ uint32_t saveVIDEO_FMT_REG;
++ uint32_t saveEOT_DISABLE_REG;
++ uint32_t saveLP_BYTECLK_REG;
++ uint32_t saveHS_LS_DBI_ENABLE_REG;
++ uint32_t saveTXCLKESC_REG;
++ uint32_t saveDPHY_PARAM_REG;
++ uint32_t saveMIPI_CONTROL_REG;
++ uint32_t saveMIPI;
++ void (*init_drvIC)(struct drm_device *dev);
++
++ /* DPST Register Save */
++ uint32_t saveHISTOGRAM_INT_CONTROL_REG;
++ uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG;
++
++ /*
++ *Scheduling.
++ */
++
++ struct mutex reset_mutex;
++ struct psb_scheduler scheduler;
++ struct mutex cmdbuf_mutex;
++ /*uint32_t ta_mem_pages;
++ struct psb_ta_mem *ta_mem;
++ int force_ta_mem_load;*/
++ atomic_t val_seq;
++
++ /*
++ *TODO: change this to be per drm-context.
++ */
++
++ struct psb_context context;
++
++ /*
++ * LID-Switch
++ */
++ spinlock_t lid_lock;
++ struct timer_list lid_timer;
++ struct psb_intel_opregion opregion;
++ u32 *lid_state;
++ u32 lid_last_state;
++
++ /*
++ *Watchdog
++ */
++
++ spinlock_t watchdog_lock;
++ struct timer_list watchdog_timer;
++ struct work_struct watchdog_wq;
++ struct work_struct msvdx_watchdog_wq;
++ struct work_struct topaz_watchdog_wq;
++ int timer_available;
++
++ uint32_t apm_reg;
++ uint16_t apm_base;
++#ifdef OSPM_STAT
++ unsigned char graphics_state;
++ unsigned long gfx_on_time;
++ unsigned long gfx_off_time;
++ unsigned long gfx_last_mode_change;
++ unsigned long gfx_on_cnt;
++ unsigned long gfx_off_cnt;
++#endif
++
++ /*to be removed later*/
++ /*int dri_page_flipping;
++ int current_page;
++ int pipe_active[2];
++ int saved_start[2];
++ int saved_offset[2];
++ int saved_stride[2];
++
++ int flip_start[2];
++ int flip_offset[2];
++ int flip_stride[2];*/
++
++
++ /*
++ * Used for modifying backlight from
++ * xrandr -- consider removing and using HAL instead
++ */
++ struct drm_property *backlight_property;
++ uint32_t blc_adj1;
++ uint32_t blc_adj2;
++
++ /*
++ * DPST and Hotplug state
++ */
++
++ struct dpst_state *psb_dpst_state;
++ struct hotplug_state *psb_hotplug_state;
++
++};
++
++struct psb_fpriv {
++ struct ttm_object_file *tfile;
++};
++
++struct psb_mmu_driver;
++
++extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
++extern int drm_pick_crtcs(struct drm_device *dev);
++
++
++static inline struct psb_fpriv *psb_fpriv(struct drm_file *file_priv)
++{
++ PVRSRV_FILE_PRIVATE_DATA *pvr_file_priv
++ = (PVRSRV_FILE_PRIVATE_DATA *)file_priv->driver_priv;
++ return (struct psb_fpriv *) pvr_file_priv->pPriv;
++}
++
++static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
++{
++ return (struct drm_psb_private *) dev->dev_private;
++}
++
++/*
++ *TTM glue. psb_ttm_glue.c
++ */
++
++extern int psb_open(struct inode *inode, struct file *filp);
++extern int psb_release(struct inode *inode, struct file *filp);
++extern int psb_mmap(struct file *filp, struct vm_area_struct *vma);
++
++extern int psb_fence_signaled_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_verify_access(struct ttm_buffer_object *bo,
++ struct file *filp);
++extern ssize_t psb_ttm_read(struct file *filp, char __user *buf,
++ size_t count, loff_t *f_pos);
++extern ssize_t psb_ttm_write(struct file *filp, const char __user *buf,
++ size_t count, loff_t *f_pos);
++extern int psb_fence_finish_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_fence_unref_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_unref_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_reference_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_pl_create_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_extension_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_ttm_global_init(struct drm_psb_private *dev_priv);
++extern void psb_ttm_global_release(struct drm_psb_private *dev_priv);
++extern int psb_getpageaddrs_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++/*
++ *MMU stuff.
++ */
++
++extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
++ int trap_pagefaults,
++ int invalid_type,
++ struct drm_psb_private *dev_priv);
++extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
++extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
++ *driver);
++extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
++ uint32_t gtt_start, uint32_t gtt_pages);
++extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
++ int trap_pagefaults,
++ int invalid_type);
++extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
++extern void psb_mmu_flush(struct psb_mmu_driver *driver);
++extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
++ unsigned long address,
++ uint32_t num_pages);
++extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
++ uint32_t start_pfn,
++ unsigned long address,
++ uint32_t num_pages, int type);
++extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
++ unsigned long *pfn);
++
++/*
++ *Enable / disable MMU for different requestors.
++ */
++
++
++extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
++extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
++ unsigned long address, uint32_t num_pages,
++ uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride, int type);
++extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
++ unsigned long address, uint32_t num_pages,
++ uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride);
++/*
++ *psb_sgx.c
++ */
++
++
++
++extern int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_reg_submit(struct drm_psb_private *dev_priv,
++ uint32_t *regs, unsigned int cmds);
++
++
++extern void psb_fence_or_sync(struct drm_file *file_priv,
++ uint32_t engine,
++ uint32_t fence_types,
++ uint32_t fence_flags,
++ struct list_head *list,
++ struct psb_ttm_fence_rep *fence_arg,
++ struct ttm_fence_object **fence_p);
++extern int psb_validate_kernel_buffer(struct psb_context *context,
++ struct ttm_buffer_object *bo,
++ uint32_t fence_class,
++ uint64_t set_flags,
++ uint64_t clr_flags);
++
++
++/*
++ *psb_fence.c
++ */
++
++extern void psb_fence_handler(struct drm_device *dev, uint32_t class);
++
++extern int psb_fence_emit_sequence(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t flags, uint32_t *sequence,
++ unsigned long *timeout_jiffies);
++extern void psb_fence_error(struct drm_device *dev,
++ uint32_t class,
++ uint32_t sequence, uint32_t type, int error);
++extern int psb_ttm_fence_device_init(struct ttm_fence_device *fdev);
++
++/* MSVDX/Topaz stuff */
++extern int lnc_video_frameskip(struct drm_device *dev,
++ uint64_t user_pointer);
++extern int lnc_video_getparam(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_try_power_down_topaz(struct drm_device *dev);
++extern int psb_try_power_down_msvdx(struct drm_device *dev);
++
++
++/*
++ *psb_fb.c
++ */
++extern int psbfb_probed(struct drm_device *dev);
++extern int psbfb_remove(struct drm_device *dev,
++ struct drm_framebuffer *fb);
++extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++
++/*
++ *psb_reset.c
++ */
++
++extern void psb_schedule_watchdog(struct drm_psb_private *dev_priv);
++extern void psb_watchdog_init(struct drm_psb_private *dev_priv);
++extern void psb_watchdog_takedown(struct drm_psb_private *dev_priv);
++extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
++extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
++extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
++
++
++
++/* modesetting */
++extern void psb_modeset_init(struct drm_device *dev);
++extern void psb_modeset_cleanup(struct drm_device *dev);
++
++/* psb_bl.c */
++int psb_backlight_init(struct drm_device *dev);
++void psb_backlight_exit(void);
++int psb_set_brightness(struct backlight_device *bd);
++int psb_get_brightness(struct backlight_device *bd);
++
++/*
++ *Utilities
++ */
++#define DRM_DRIVER_PRIVATE_T struct drm_psb_private
++
++static inline u32 MSG_READ32(uint port, uint offset)
++{
++ int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
++ outl(0x800000D0, 0xCF8);
++ outl(mcr, 0xCFC);
++ outl(0x800000D4, 0xCF8);
++ return inl(0xcfc);
++}
++static inline void MSG_WRITE32(uint port, uint offset, u32 value)
++{
++ int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
++ outl(0x800000D4, 0xCF8);
++ outl(value, 0xcfc);
++ outl(0x800000D0, 0xCF8);
++ outl(mcr, 0xCFC);
++}
++
++static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ return ioread32(dev_priv->vdc_reg + (reg));
++}
++
++#define REG_READ(reg) REGISTER_READ(dev, (reg))
++static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
++ uint32_t val)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ iowrite32((val), dev_priv->vdc_reg + (reg));
++}
++
++#define REG_WRITE(reg, val) REGISTER_WRITE(dev, (reg), (val))
++
++static inline void REGISTER_WRITE16(struct drm_device *dev,
++ uint32_t reg, uint32_t val)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ iowrite16((val), dev_priv->vdc_reg + (reg));
++}
++
++#define REG_WRITE16(reg, val) REGISTER_WRITE16(dev, (reg), (val))
++
++static inline void REGISTER_WRITE8(struct drm_device *dev,
++ uint32_t reg, uint32_t val)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ iowrite8((val), dev_priv->vdc_reg + (reg));
++}
++
++#define REG_WRITE8(reg, val) REGISTER_WRITE8(dev, (reg), (val))
++
++#define PSB_ALIGN_TO(_val, _align) \
++ (((_val) + ((_align) - 1)) & ~((_align) - 1))
++#define PSB_WVDC32(_val, _offs) \
++ iowrite32(_val, dev_priv->vdc_reg + (_offs))
++#define PSB_RVDC32(_offs) \
++ ioread32(dev_priv->vdc_reg + (_offs))
++
++/* #define TRAP_SGX_PM_FAULT 1 */
++#ifdef TRAP_SGX_PM_FAULT
++#define PSB_RSGX32(_offs) \
++({ \
++ if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) { \
++ printk(KERN_ERR "access sgx when it's off!! (READ) %s, %d\n", \
++ __FILE__, __LINE__); \
++ mdelay(1000); \
++ } \
++ ioread32(dev_priv->sgx_reg + (_offs)); \
++})
++#else
++#define PSB_RSGX32(_offs) \
++ ioread32(dev_priv->sgx_reg + (_offs))
++#endif
++
++#define PSB_WMSVDX32(_val, _offs) \
++ iowrite32(_val, dev_priv->msvdx_reg + (_offs))
++#define PSB_RMSVDX32(_offs) \
++ ioread32(dev_priv->msvdx_reg + (_offs))
++
++#define PSB_ALPL(_val, _base) \
++ (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT))
++#define PSB_ALPLM(_val, _base) \
++ ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK))
++
++#define PSB_D_RENDER (1 << 16)
++
++#define PSB_D_GENERAL (1 << 0)
++#define PSB_D_INIT (1 << 1)
++#define PSB_D_IRQ (1 << 2)
++#define PSB_D_FW (1 << 3)
++#define PSB_D_PERF (1 << 4)
++#define PSB_D_TMP (1 << 5)
++#define PSB_D_PM (1 << 6)
++
++extern int drm_psb_debug;
++extern int drm_psb_no_fb;
++extern int drm_idle_check_interval;
++extern int drm_topaz_sbuswa;
++
++#define PSB_DEBUG_FW(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_FW, _fmt, ##_arg)
++#define PSB_DEBUG_GENERAL(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg)
++#define PSB_DEBUG_INIT(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg)
++#define PSB_DEBUG_IRQ(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg)
++#define PSB_DEBUG_RENDER(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg)
++#define PSB_DEBUG_PERF(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_PERF, _fmt, ##_arg)
++#define PSB_DEBUG_TMP(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_TMP, _fmt, ##_arg)
++#define PSB_DEBUG_PM(_fmt, _arg...) \
++ PSB_DEBUG(PSB_D_PM, _fmt, ##_arg)
++
++#if DRM_DEBUG_CODE
++#define PSB_DEBUG(_flag, _fmt, _arg...) \
++ do { \
++ if (unlikely((_flag) & drm_psb_debug)) \
++ printk(KERN_DEBUG \
++ "[psb:0x%02x:%s] " _fmt , _flag, \
++ __func__ , ##_arg); \
++ } while (0)
++#else
++#define PSB_DEBUG(_fmt, _arg...) do { } while (0)
++#endif
++
++#define IS_POULSBO(dev) (((dev)->pci_device == 0x8108) || \
++ ((dev)->pci_device == 0x8109))
++
++#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_fb.c b/drivers/gpu/drm/mrst/drv/psb_fb.c
+new file mode 100644
+index 0000000..addec23
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_fb.c
+@@ -0,0 +1,1817 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/tty.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/fb.h>
++#include <linux/init.h>
++#include <linux/console.h>
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include <drm/drm_crtc.h>
++
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_drv.h"
++#include "ttm/ttm_userobj_api.h"
++#include "psb_fb.h"
++#include "psb_sgx.h"
++#include "psb_pvr_glue.h"
++
++static int fill_fb_bitfield(struct fb_var_screeninfo *var, int depth)
++{
++ switch (depth) {
++ case 8:
++ var->red.offset = 0;
++ var->green.offset = 0;
++ var->blue.offset = 0;
++ var->red.length = 8;
++ var->green.length = 8;
++ var->blue.length = 8;
++ var->transp.length = 0;
++ var->transp.offset = 0;
++ break;
++ case 15:
++ var->red.offset = 10;
++ var->green.offset = 5;
++ var->blue.offset = 0;
++ var->red.length = 5;
++ var->green.length = 5;
++ var->blue.length = 5;
++ var->transp.length = 1;
++ var->transp.offset = 15;
++ break;
++ case 16:
++ var->red.offset = 11;
++ var->green.offset = 5;
++ var->blue.offset = 0;
++ var->red.length = 5;
++ var->green.length = 6;
++ var->blue.length = 5;
++ var->transp.length = 0;
++ var->transp.offset = 0;
++ break;
++ case 24:
++ var->red.offset = 16;
++ var->green.offset = 8;
++ var->blue.offset = 0;
++ var->red.length = 8;
++ var->green.length = 8;
++ var->blue.length = 8;
++ var->transp.length = 0;
++ var->transp.offset = 0;
++ break;
++ case 32:
++ var->red.offset = 16;
++ var->green.offset = 8;
++ var->blue.offset = 0;
++ var->red.length = 8;
++ var->green.length = 8;
++ var->blue.length = 8;
++ var->transp.length = 8;
++ var->transp.offset = 24;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
++static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
++ struct drm_file *file_priv,
++ unsigned int *handle);
++
++static const struct drm_framebuffer_funcs psb_fb_funcs = {
++ .destroy = psb_user_framebuffer_destroy,
++ .create_handle = psb_user_framebuffer_create_handle,
++};
++
++struct psbfb_par {
++ struct drm_device *dev;
++ struct psb_framebuffer *psbfb;
++
++ int dpms_state;
++
++ int crtc_count;
++ /* crtc currently bound to this */
++ uint32_t crtc_ids[2];
++};
++
++#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
++
++void *psbfb_vdc_reg(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv;
++ dev_priv = (struct drm_psb_private *) dev->dev_private;
++ return dev_priv->vdc_reg;
++}
++EXPORT_SYMBOL(psbfb_vdc_reg);
++
++static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
++ unsigned blue, unsigned transp,
++ struct fb_info *info)
++{
++ struct psbfb_par *par = info->par;
++ struct drm_framebuffer *fb = &par->psbfb->base;
++ uint32_t v;
++
++ if (!fb)
++ return -ENOMEM;
++
++ if (regno > 255)
++ return 1;
++
++#if 0 /* JB: not drop, check that this works */
++ if (fb->bits_per_pixel == 8) {
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
++ head) {
++ for (i = 0; i < par->crtc_count; i++)
++ if (crtc->base.id == par->crtc_ids[i])
++ break;
++
++ if (i == par->crtc_count)
++ continue;
++
++ if (crtc->funcs->gamma_set)
++ crtc->funcs->gamma_set(crtc, red, green,
++ blue, regno);
++ }
++ return 0;
++ }
++#endif
++
++ red = CMAP_TOHW(red, info->var.red.length);
++ blue = CMAP_TOHW(blue, info->var.blue.length);
++ green = CMAP_TOHW(green, info->var.green.length);
++ transp = CMAP_TOHW(transp, info->var.transp.length);
++
++ v = (red << info->var.red.offset) |
++ (green << info->var.green.offset) |
++ (blue << info->var.blue.offset) |
++ (transp << info->var.transp.offset);
++
++ if (regno < 16) {
++ switch (fb->bits_per_pixel) {
++ case 16:
++ ((uint32_t *) info->pseudo_palette)[regno] = v;
++ break;
++ case 24:
++ case 32:
++ ((uint32_t *) info->pseudo_palette)[regno] = v;
++ break;
++ }
++ }
++
++ return 0;
++}
++
++static struct drm_display_mode *psbfb_find_first_mode(struct
++ fb_var_screeninfo
++ *var,
++ struct fb_info *info,
++ struct drm_crtc
++ *crtc)
++{
++ struct psbfb_par *par = info->par;
++ struct drm_device *dev = par->dev;
++ struct drm_display_mode *drm_mode;
++ struct drm_display_mode *preferred_mode = NULL;
++ struct drm_display_mode *last_mode = NULL;
++ struct drm_connector *connector;
++ int found;
++
++ found = 0;
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ if (connector->encoder && connector->encoder->crtc == crtc) {
++ found = 1;
++ break;
++ }
++ }
++
++ /* found no connector, bail */
++ if (!found)
++ return NULL;
++
++ found = 0;
++ list_for_each_entry(drm_mode, &connector->modes, head) {
++ if (drm_mode->hdisplay == var->xres &&
++ drm_mode->vdisplay == var->yres
++ && drm_mode->clock != 0) {
++ found = 1;
++ last_mode = drm_mode;
++ if (IS_POULSBO(dev)) {
++ if (last_mode->type & DRM_MODE_TYPE_PREFERRED)
++ preferred_mode = last_mode;
++ }
++ }
++ }
++
++ /* No mode matching mode found */
++ if (!found)
++ return NULL;
++
++ if (IS_POULSBO(dev)) {
++ if (preferred_mode)
++ return preferred_mode;
++ else
++ return last_mode;
++ } else {
++ return last_mode;
++ }
++}
++
++static int psbfb_check_var(struct fb_var_screeninfo *var,
++ struct fb_info *info)
++{
++ struct psbfb_par *par = info->par;
++ struct psb_framebuffer *psbfb = par->psbfb;
++ struct drm_device *dev = par->dev;
++ int ret;
++ int depth;
++ int pitch;
++ int bpp = var->bits_per_pixel;
++
++ if (!psbfb)
++ return -ENOMEM;
++
++ if (!var->pixclock)
++ return -EINVAL;
++
++ /* don't support virtuals for now */
++ if (var->xres_virtual > var->xres)
++ return -EINVAL;
++
++ if (var->yres_virtual > var->yres)
++ return -EINVAL;
++
++ switch (bpp) {
++#if 0 /* JB: for now only support true color */
++ case 8:
++ depth = 8;
++ break;
++#endif
++ case 16:
++ depth = (var->green.length == 6) ? 16 : 15;
++ break;
++ case 24: /* assume this is 32bpp / depth 24 */
++ bpp = 32;
++ /* fallthrough */
++ case 32:
++ depth = (var->transp.length > 0) ? 32 : 24;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
++
++ /* Check that we can resize */
++ if ((pitch * var->yres) > psbfb->size) {
++#if 1
++ /* Need to resize the fb object.
++ * But the generic fbdev code doesn't really understand
++ * that we can do this. So disable for now.
++ */
++ DRM_INFO("Can't support requested size, too big!\n");
++ return -EINVAL;
++#endif
++ }
++
++ ret = fill_fb_bitfield(var, depth);
++ if (ret)
++ return ret;
++
++#if 1
++ /* Here we walk the output mode list and look for modes. If we haven't
++ * got it, then bail. Not very nice, so this is disabled.
++ * In the set_par code, we create our mode based on the incoming
++ * parameters. Nicer, but may not be desired by some.
++ */
++ {
++ struct drm_crtc *crtc;
++ int i;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
++ head) {
++ struct psb_intel_crtc *psb_intel_crtc =
++ to_psb_intel_crtc(crtc);
++
++ for (i = 0; i < par->crtc_count; i++)
++ if (crtc->base.id == par->crtc_ids[i])
++ break;
++
++ if (i == par->crtc_count)
++ continue;
++
++ if (psb_intel_crtc->mode_set.num_connectors == 0)
++ continue;
++
++ if (!psbfb_find_first_mode(&info->var, info, crtc))
++ return -EINVAL;
++ }
++ }
++#else
++ (void) i;
++ (void) dev; /* silence warnings */
++ (void) crtc;
++ (void) drm_mode;
++ (void) connector;
++#endif
++
++ return 0;
++}
++
++/* this will let fbcon do the mode init */
++static int psbfb_set_par(struct fb_info *info)
++{
++ struct psbfb_par *par = info->par;
++ struct psb_framebuffer *psbfb = par->psbfb;
++ struct drm_framebuffer *fb = &psbfb->base;
++ struct drm_device *dev = par->dev;
++ struct fb_var_screeninfo *var = &info->var;
++ /* struct drm_psb_private *dev_priv = dev->dev_private; */
++ struct drm_display_mode *drm_mode;
++ int pitch;
++ int depth;
++ int bpp = var->bits_per_pixel;
++
++ if (!fb)
++ return -ENOMEM;
++
++ switch (bpp) {
++ case 8:
++ depth = 8;
++ break;
++ case 16:
++ depth = (var->green.length == 6) ? 16 : 15;
++ break;
++ case 24: /* assume this is 32bpp / depth 24 */
++ bpp = 32;
++ /* fallthrough */
++ case 32:
++ depth = (var->transp.length > 0) ? 32 : 24;
++ break;
++ default:
++ DRM_ERROR("Illegal BPP\n");
++ return -EINVAL;
++ }
++
++ pitch = ((var->xres * ((bpp + 1) / 8)) + 0x3f) & ~0x3f;
++
++ if ((pitch * var->yres) > (psbfb->size)) {
++#if 1
++ /* Need to resize the fb object.
++ * But the generic fbdev code doesn't really understand
++ * that we can do this. So disable for now.
++ */
++ DRM_INFO("Can't support requested size, too big!\n");
++ return -EINVAL;
++#endif
++ }
++
++ psbfb->offset = 0;
++ fb->width = var->xres;
++ fb->height = var->yres;
++ fb->bits_per_pixel = bpp;
++ fb->pitch = pitch;
++ fb->depth = depth;
++
++ info->fix.line_length = psbfb->base.pitch;
++ info->fix.visual =
++ (psbfb->base.depth ==
++ 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
++
++ /* some fbdev's apps don't want these to change */
++ info->fix.smem_start = dev->mode_config.fb_base + psbfb->offset;
++
++#if 0
++ /* relates to resize - disable */
++ info->fix.smem_len = info->fix.line_length * var->yres;
++ info->screen_size = info->fix.smem_len; /* ??? */
++#endif
++
++ /* Should we walk the output's modelist or just create our own ???
++ * For now, we create and destroy a mode based on the incoming
++ * parameters. But there's commented out code below which scans
++ * the output list too.
++ */
++#if 1
++ /* This code is now in the for loop futher down. */
++#endif
++
++ {
++ struct drm_crtc *crtc;
++ int ret;
++ int i;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
++ head) {
++ struct psb_intel_crtc *psb_intel_crtc =
++ to_psb_intel_crtc(crtc);
++
++ for (i = 0; i < par->crtc_count; i++)
++ if (crtc->base.id == par->crtc_ids[i])
++ break;
++
++ if (i == par->crtc_count)
++ continue;
++
++ if (psb_intel_crtc->mode_set.num_connectors == 0)
++ continue;
++
++#if 1
++ drm_mode =
++ psbfb_find_first_mode(&info->var, info, crtc);
++ if (!drm_mode)
++ DRM_ERROR("No matching mode found\n");
++ psb_intel_crtc->mode_set.mode = drm_mode;
++#endif
++
++#if 0 /* FIXME: TH */
++ if (crtc->fb == psb_intel_crtc->mode_set.fb) {
++#endif
++ DRM_DEBUG
++ ("setting mode on crtc %p with id %u\n",
++ crtc, crtc->base.id);
++ ret =
++ crtc->funcs->
++ set_config(&psb_intel_crtc->mode_set);
++ if (ret) {
++ DRM_ERROR("Failed setting mode\n");
++ return ret;
++ }
++#if 0
++ }
++#endif
++ }
++ DRM_DEBUG("Set par returned OK.\n");
++ return 0;
++ }
++
++ return 0;
++}
++#if 0
++static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
++ unsigned size)
++{
++ int ret = 0;
++ int i;
++ unsigned submit_size;
++
++ while (size > 0) {
++ submit_size = (size < 0x60) ? size : 0x60;
++ size -= submit_size;
++ ret = psb_2d_wait_available(dev_priv, submit_size);
++ if (ret)
++ return ret;
++
++ submit_size <<= 2;
++ for (i = 0; i < submit_size; i += 4)
++ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
++
++ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
++ }
++ return 0;
++}
++
++static int psb_accel_2d_fillrect(struct drm_psb_private *dev_priv,
++ uint32_t dst_offset, uint32_t dst_stride,
++ uint32_t dst_format, uint16_t dst_x,
++ uint16_t dst_y, uint16_t size_x,
++ uint16_t size_y, uint32_t fill)
++{
++ uint32_t buffer[10];
++ uint32_t *buf;
++
++ buf = buffer;
++
++ *buf++ = PSB_2D_FENCE_BH;
++
++ *buf++ =
++ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
++ PSB_2D_DST_STRIDE_SHIFT);
++ *buf++ = dst_offset;
++
++ *buf++ =
++ PSB_2D_BLIT_BH |
++ PSB_2D_ROT_NONE |
++ PSB_2D_COPYORDER_TL2BR |
++ PSB_2D_DSTCK_DISABLE |
++ PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
++
++ *buf++ = fill << PSB_2D_FILLCOLOUR_SHIFT;
++ *buf++ =
++ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
++ PSB_2D_DST_YSTART_SHIFT);
++ *buf++ =
++ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
++ PSB_2D_DST_YSIZE_SHIFT);
++ *buf++ = PSB_2D_FLUSH_BH;
++
++ return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
++}
++
++static void psbfb_fillrect_accel(struct fb_info *info,
++ const struct fb_fillrect *r)
++{
++ struct psbfb_par *par = info->par;
++ struct psb_framebuffer *psbfb = par->psbfb;
++ struct drm_framebuffer *fb = &psbfb->base;
++ struct drm_psb_private *dev_priv = par->dev->dev_private;
++ uint32_t offset;
++ uint32_t stride;
++ uint32_t format;
++
++ if (!fb)
++ return;
++
++ offset = psbfb->offset;
++ stride = fb->pitch;
++
++ switch (fb->depth) {
++ case 8:
++ format = PSB_2D_DST_332RGB;
++ break;
++ case 15:
++ format = PSB_2D_DST_555RGB;
++ break;
++ case 16:
++ format = PSB_2D_DST_565RGB;
++ break;
++ case 24:
++ case 32:
++ /* this is wrong but since we don't do blending its okay */
++ format = PSB_2D_DST_8888ARGB;
++ break;
++ default:
++ /* software fallback */
++ cfb_fillrect(info, r);
++ return;
++ }
++
++ psb_accel_2d_fillrect(dev_priv,
++ offset, stride, format,
++ r->dx, r->dy, r->width, r->height, r->color);
++}
++
++static void psbfb_fillrect(struct fb_info *info,
++ const struct fb_fillrect *rect)
++{
++ struct psbfb_par *par = info->par;
++ struct drm_device *dev = par->dev;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ if (unlikely(info->state != FBINFO_STATE_RUNNING))
++ return;
++
++ if (info->flags & FBINFO_HWACCEL_DISABLED)
++ return cfb_fillrect(info, rect);
++ /*
++ * psbfb_fillrect is atomic so need to do instantaneous check of
++ * power on
++ */
++ if (powermgmt_is_suspend_in_progress(PSB_GRAPHICS_ISLAND) ||
++ powermgmt_is_resume_in_progress(PSB_GRAPHICS_ISLAND) ||
++ !powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND))
++ return cfb_fillrect(info, rect);
++
++ if (psb_2d_trylock(dev_priv)) {
++ psbfb_fillrect_accel(info, rect);
++ psb_2d_unlock(dev_priv);
++ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
++ } else
++ cfb_fillrect(info, rect);
++}
++
++uint32_t psb_accel_2d_copy_direction(int xdir, int ydir)
++{
++ if (xdir < 0)
++ return (ydir <
++ 0) ? PSB_2D_COPYORDER_BR2TL :
++ PSB_2D_COPYORDER_TR2BL;
++ else
++ return (ydir <
++ 0) ? PSB_2D_COPYORDER_BL2TR :
++ PSB_2D_COPYORDER_TL2BR;
++}
++
++/*
++ * @srcOffset in bytes
++ * @srcStride in bytes
++ * @srcFormat psb 2D format defines
++ * @dstOffset in bytes
++ * @dstStride in bytes
++ * @dstFormat psb 2D format defines
++ * @srcX offset in pixels
++ * @srcY offset in pixels
++ * @dstX offset in pixels
++ * @dstY offset in pixels
++ * @sizeX of the copied area
++ * @sizeY of the copied area
++ */
++static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
++ uint32_t src_offset, uint32_t src_stride,
++ uint32_t src_format, uint32_t dst_offset,
++ uint32_t dst_stride, uint32_t dst_format,
++ uint16_t src_x, uint16_t src_y,
++ uint16_t dst_x, uint16_t dst_y,
++ uint16_t size_x, uint16_t size_y)
++{
++ uint32_t blit_cmd;
++ uint32_t buffer[10];
++ uint32_t *buf;
++ uint32_t direction;
++
++ buf = buffer;
++
++ direction =
++ psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
++
++ if (direction == PSB_2D_COPYORDER_BR2TL ||
++ direction == PSB_2D_COPYORDER_TR2BL) {
++ src_x += size_x - 1;
++ dst_x += size_x - 1;
++ }
++ if (direction == PSB_2D_COPYORDER_BR2TL ||
++ direction == PSB_2D_COPYORDER_BL2TR) {
++ src_y += size_y - 1;
++ dst_y += size_y - 1;
++ }
++
++ blit_cmd =
++ PSB_2D_BLIT_BH |
++ PSB_2D_ROT_NONE |
++ PSB_2D_DSTCK_DISABLE |
++ PSB_2D_SRCCK_DISABLE |
++ PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
++
++ *buf++ = PSB_2D_FENCE_BH;
++ *buf++ =
++ PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
++ PSB_2D_DST_STRIDE_SHIFT);
++ *buf++ = dst_offset;
++ *buf++ =
++ PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
++ PSB_2D_SRC_STRIDE_SHIFT);
++ *buf++ = src_offset;
++ *buf++ =
++ PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) |
++ (src_y << PSB_2D_SRCOFF_YSTART_SHIFT);
++ *buf++ = blit_cmd;
++ *buf++ =
++ (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
++ PSB_2D_DST_YSTART_SHIFT);
++ *buf++ =
++ (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
++ PSB_2D_DST_YSIZE_SHIFT);
++ *buf++ = PSB_2D_FLUSH_BH;
++
++ return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
++}
++
++static void psbfb_copyarea_accel(struct fb_info *info,
++ const struct fb_copyarea *a)
++{
++ struct psbfb_par *par = info->par;
++ struct psb_framebuffer *psbfb = par->psbfb;
++ struct drm_framebuffer *fb = &psbfb->base;
++ struct drm_psb_private *dev_priv = par->dev->dev_private;
++ uint32_t offset;
++ uint32_t stride;
++ uint32_t src_format;
++ uint32_t dst_format;
++
++ if (!fb)
++ return;
++
++ offset = psbfb->offset;
++ stride = fb->pitch;
++
++ switch (fb->depth) {
++ case 8:
++ src_format = PSB_2D_SRC_332RGB;
++ dst_format = PSB_2D_DST_332RGB;
++ break;
++ case 15:
++ src_format = PSB_2D_SRC_555RGB;
++ dst_format = PSB_2D_DST_555RGB;
++ break;
++ case 16:
++ src_format = PSB_2D_SRC_565RGB;
++ dst_format = PSB_2D_DST_565RGB;
++ break;
++ case 24:
++ case 32:
++ /* this is wrong but since we don't do blending its okay */
++ src_format = PSB_2D_SRC_8888ARGB;
++ dst_format = PSB_2D_DST_8888ARGB;
++ break;
++ default:
++ /* software fallback */
++ cfb_copyarea(info, a);
++ return;
++ }
++
++ psb_accel_2d_copy(dev_priv,
++ offset, stride, src_format,
++ offset, stride, dst_format,
++ a->sx, a->sy, a->dx, a->dy, a->width, a->height);
++}
++
++static void psbfb_copyarea(struct fb_info *info,
++ const struct fb_copyarea *region)
++{
++ struct psbfb_par *par = info->par;
++ struct drm_device *dev = par->dev;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ if (unlikely(info->state != FBINFO_STATE_RUNNING))
++ return;
++
++ if (info->flags & FBINFO_HWACCEL_DISABLED)
++ return cfb_copyarea(info, region);
++ /*
++ * psbfb_copyarea is atomic so need to do instantaneous check of
++ * power on
++ */
++ if (powermgmt_is_suspend_in_progress(PSB_GRAPHICS_ISLAND) ||
++ powermgmt_is_resume_in_progress(PSB_GRAPHICS_ISLAND) ||
++ !powermgmt_is_hw_on(dev->pdev, PSB_GRAPHICS_ISLAND))
++ return cfb_copyarea(info, region);
++
++ if (psb_2d_trylock(dev_priv)) {
++ psbfb_copyarea_accel(info, region);
++ psb_2d_unlock(dev_priv);
++ schedule_delayed_work(&dev_priv->scheduler.wq, 1);
++ } else
++ cfb_copyarea(info, region);
++}
++#endif
++void psbfb_imageblit(struct fb_info *info, const struct fb_image *image)
++{
++ if (unlikely(info->state != FBINFO_STATE_RUNNING))
++ return;
++
++ cfb_imageblit(info, image);
++}
++
++static void psbfb_onoff(struct fb_info *info, int dpms_mode)
++{
++ struct psbfb_par *par = info->par;
++ struct drm_device *dev = par->dev;
++ struct drm_crtc *crtc;
++ struct drm_encoder *encoder;
++ int i;
++
++ /*
++ * For each CRTC in this fb, find all associated encoders
++ * and turn them off, then turn off the CRTC.
++ */
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ struct drm_crtc_helper_funcs *crtc_funcs =
++ crtc->helper_private;
++
++ for (i = 0; i < par->crtc_count; i++)
++ if (crtc->base.id == par->crtc_ids[i])
++ break;
++
++ if (i == par->crtc_count)
++ continue;
++
++ if (dpms_mode == DRM_MODE_DPMS_ON)
++ crtc_funcs->dpms(crtc, dpms_mode);
++
++ /* Found a CRTC on this fb, now find encoders */
++ list_for_each_entry(encoder,
++ &dev->mode_config.encoder_list, head) {
++ if (encoder->crtc == crtc) {
++ struct drm_encoder_helper_funcs
++ *encoder_funcs;
++ encoder_funcs = encoder->helper_private;
++ encoder_funcs->dpms(encoder, dpms_mode);
++ }
++ }
++
++ if (dpms_mode == DRM_MODE_DPMS_OFF)
++ crtc_funcs->dpms(crtc, dpms_mode);
++ }
++}
++
++static int psbfb_blank(int blank_mode, struct fb_info *info)
++{
++ struct psbfb_par *par = info->par;
++
++ par->dpms_state = blank_mode;
++ PSB_DEBUG_PM("psbfb_blank \n");
++ switch (blank_mode) {
++ case FB_BLANK_UNBLANK:
++ psbfb_onoff(info, DRM_MODE_DPMS_ON);
++ break;
++ case FB_BLANK_NORMAL:
++ psbfb_onoff(info, DRM_MODE_DPMS_STANDBY);
++ break;
++ case FB_BLANK_HSYNC_SUSPEND:
++ psbfb_onoff(info, DRM_MODE_DPMS_STANDBY);
++ break;
++ case FB_BLANK_VSYNC_SUSPEND:
++ psbfb_onoff(info, DRM_MODE_DPMS_SUSPEND);
++ break;
++ case FB_BLANK_POWERDOWN:
++ psbfb_onoff(info, DRM_MODE_DPMS_OFF);
++ break;
++ }
++
++ return 0;
++}
++
++
++static int psbfb_kms_off(struct drm_device *dev, int suspend)
++{
++ struct drm_framebuffer *fb = 0;
++ DRM_DEBUG("psbfb_kms_off_ioctl\n");
++
++ mutex_lock(&dev->mode_config.mutex);
++ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
++ struct fb_info *info = fb->fbdev;
++
++ if (suspend) {
++ fb_set_suspend(info, 1);
++ psbfb_blank(FB_BLANK_POWERDOWN, info);
++ }
++ }
++ mutex_unlock(&dev->mode_config.mutex);
++ return 0;
++}
++
++int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ int ret;
++
++ if (drm_psb_no_fb)
++ return 0;
++ acquire_console_sem();
++ ret = psbfb_kms_off(dev, 0);
++ release_console_sem();
++
++ return ret;
++}
++
++static int psbfb_kms_on(struct drm_device *dev, int resume)
++{
++ struct drm_framebuffer *fb = 0;
++
++ DRM_DEBUG("psbfb_kms_on_ioctl\n");
++
++ mutex_lock(&dev->mode_config.mutex);
++ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
++ struct fb_info *info = fb->fbdev;
++
++ if (resume) {
++ fb_set_suspend(info, 0);
++ psbfb_blank(FB_BLANK_UNBLANK, info);
++ }
++
++ }
++ mutex_unlock(&dev->mode_config.mutex);
++
++ return 0;
++}
++
++int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ int ret;
++
++ if (drm_psb_no_fb)
++ return 0;
++ acquire_console_sem();
++ ret = psbfb_kms_on(dev, 0);
++ release_console_sem();
++ drm_helper_disable_unused_functions(dev);
++ return ret;
++}
++
++void psbfb_suspend(struct drm_device *dev)
++{
++ acquire_console_sem();
++ psbfb_kms_off(dev, 1);
++ release_console_sem();
++}
++
++void psbfb_resume(struct drm_device *dev)
++{
++ acquire_console_sem();
++ psbfb_kms_on(dev, 1);
++ release_console_sem();
++ drm_helper_disable_unused_functions(dev);
++}
++
++static int psbfb_vm_fault(struct vm_area_struct * vma, struct vm_fault * vmf)
++{
++ int page_num = 0;
++ int i;
++ unsigned long address = 0;
++ int ret;
++ unsigned long pfn;
++ struct psb_framebuffer *psbfb = (struct psb_framebuffer *)vma->vm_private_data;
++ struct drm_device * dev = psbfb->base.dev;
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt *pg = dev_priv->pg;
++ unsigned long phys_addr = (unsigned long)pg->stolen_base;;
++
++ page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++
++ address = (unsigned long)vmf->virtual_address;
++
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++
++ for(i=0; i<page_num; i++) {
++ pfn = (phys_addr >> PAGE_SHIFT); //phys_to_pfn(phys_addr);
++
++ ret = vm_insert_mixed(vma, address, pfn);
++ if(unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
++ break;
++ else if(unlikely(ret != 0)) {
++ ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
++ return ret;
++ }
++
++ address += PAGE_SIZE;
++ phys_addr += PAGE_SIZE;
++ }
++
++ return VM_FAULT_NOPAGE;
++}
++
++static void psbfb_vm_open(struct vm_area_struct * vma)
++{
++ DRM_DEBUG("vm_open\n");
++}
++
++static void psbfb_vm_close(struct vm_area_struct * vma)
++{
++ DRM_DEBUG("vm_close\n");
++}
++
++static struct vm_operations_struct psbfb_vm_ops = {
++ .fault = psbfb_vm_fault,
++ .open = psbfb_vm_open,
++ .close = psbfb_vm_close
++};
++
++static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
++{
++ struct psbfb_par *par = info->par;
++ struct psb_framebuffer *psbfb = par->psbfb;
++ char * fb_screen_base = NULL;
++ struct drm_device * dev = psbfb->base.dev;
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt *pg = dev_priv->pg;
++
++ if (vma->vm_pgoff != 0)
++ return -EINVAL;
++ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
++ return -EINVAL;
++
++ if (!psbfb->addr_space)
++ psbfb->addr_space = vma->vm_file->f_mapping;
++
++ fb_screen_base = (char *)info->screen_base;
++
++ DRM_DEBUG("vm_pgoff 0x%lx, screen base %p vram_addr %p\n", vma->vm_pgoff, fb_screen_base, pg->vram_addr);
++
++ /*if using stolen memory, */
++ if(fb_screen_base == pg->vram_addr) {
++ vma->vm_ops = &psbfb_vm_ops;
++ vma->vm_private_data = (void *)psbfb;
++ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
++ } else {
++ /*using IMG meminfo, can I use pvrmmap to map it?*/
++
++
++ }
++
++ return 0;
++}
++
++
++static struct fb_ops psbfb_ops = {
++ .owner = THIS_MODULE,
++ .fb_check_var = psbfb_check_var,
++ .fb_set_par = psbfb_set_par,
++ .fb_setcolreg = psbfb_setcolreg,
++ .fb_fillrect = cfb_fillrect,
++ .fb_copyarea = cfb_copyarea,
++ .fb_imageblit = cfb_imageblit,
++ .fb_mmap = psbfb_mmap,
++ .fb_blank = psbfb_blank,
++};
++
++static struct drm_mode_set panic_mode;
++
++int psbfb_panic(struct notifier_block *n, unsigned long ununsed,
++ void *panic_str)
++{
++ DRM_ERROR("panic occurred, switching back to text console\n");
++ drm_crtc_helper_set_config(&panic_mode);
++
++ return 0;
++}
++EXPORT_SYMBOL(psbfb_panic);
++
++static struct notifier_block paniced = {
++ .notifier_call = psbfb_panic,
++};
++
++
++static struct drm_framebuffer *psb_framebuffer_create
++ (struct drm_device *dev, struct drm_mode_fb_cmd *r,
++ void *mm_private)
++{
++ struct psb_framebuffer *fb;
++ int ret;
++
++ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
++ if (!fb)
++ return NULL;
++
++ ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
++
++ if (ret)
++ goto err;
++
++ drm_helper_mode_fill_fb_struct(&fb->base, r);
++
++ fb->pvrBO = mm_private;
++
++ return &fb->base;
++
++err:
++ kfree(fb);
++ return NULL;
++}
++
++static struct drm_framebuffer *psb_user_framebuffer_create
++ (struct drm_device *dev, struct drm_file *filp,
++ struct drm_mode_fb_cmd *r)
++{
++ struct psb_framebuffer *psbfb;
++ struct drm_framebuffer *fb;
++ struct fb_info *info;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL;
++ IMG_HANDLE hKernelMemInfo = (IMG_HANDLE)r->handle;
++ struct drm_psb_private *dev_priv
++ = (struct drm_psb_private *) dev->dev_private;
++ struct psb_gtt *pg = dev_priv->pg;
++ int ret;
++ uint32_t offset;
++ uint64_t size;
++
++ ret = psb_get_meminfo_by_handle(hKernelMemInfo, &psKernelMemInfo);
++ if (ret) {
++ DRM_ERROR("Cannot get meminfo for handle %lx\n",
++ (IMG_UINT32)hKernelMemInfo);
++
++ return NULL;
++ }
++
++ DRM_DEBUG("Got Kernel MemInfo for handle %lx\n",
++ (IMG_UINT32)hKernelMemInfo);
++
++ /* JB: TODO not drop, make smarter */
++ size = psKernelMemInfo->ui32AllocSize;
++ if (size < r->height * r->pitch)
++ return NULL;
++
++ /* JB: TODO not drop, refcount buffer */
++ /* return psb_framebuffer_create(dev, r, bo); */
++
++ fb = psb_framebuffer_create(dev, r, (void *)psKernelMemInfo);
++ if (!fb) {
++ DRM_ERROR("failed to allocate fb.\n");
++ return NULL;
++ }
++
++ psbfb = to_psb_fb(fb);
++ psbfb->size = size;
++ psbfb->hKernelMemInfo = hKernelMemInfo;
++
++ DRM_DEBUG("Mapping to gtt..., KernelMemInfo %p\n", psKernelMemInfo);
++
++ /*if not VRAM, map it into tt aperture*/
++ if (psKernelMemInfo->pvLinAddrKM != pg->vram_addr) {
++ ret = psb_gtt_map_meminfo(dev, hKernelMemInfo, &offset);
++ if (ret) {
++ DRM_ERROR("map meminfo for %lx failed\n",
++ (IMG_UINT32)hKernelMemInfo);
++ return NULL;
++ }
++ psbfb->offset = (offset << PAGE_SHIFT);
++ } else {
++ psbfb->offset = 0;
++ }
++
++ info = framebuffer_alloc(sizeof(struct psbfb_par), &dev->pdev->dev);
++ if (!info)
++ return NULL;
++
++ strcpy(info->fix.id, "psbfb");
++ info->fix.type = FB_TYPE_PACKED_PIXELS;
++ info->fix.visual = FB_VISUAL_TRUECOLOR;
++ info->fix.type_aux = 0;
++ info->fix.xpanstep = 1; /* doing it in hw */
++ info->fix.ypanstep = 1; /* doing it in hw */
++ info->fix.ywrapstep = 0;
++ info->fix.accel = FB_ACCEL_I830;
++ info->fix.type_aux = 0;
++
++ info->flags = FBINFO_DEFAULT;
++
++ info->fbops = &psbfb_ops;
++
++ info->fix.line_length = fb->pitch;
++ info->fix.smem_start = dev->mode_config.fb_base;
++ info->fix.smem_len = size;
++
++ info->flags = FBINFO_DEFAULT;
++
++ info->screen_base = psKernelMemInfo->pvLinAddrKM;
++ info->screen_size = size;
++
++ /* it is called for kms flip, the back buffer has been rendered,
++ * then we should not clear it*/
++#if 0
++ if (is_iomem)
++ memset_io(info->screen_base, 0, size);
++ else
++ memset(info->screen_base, 0, size);
++#endif
++ info->pseudo_palette = fb->pseudo_palette;
++ info->var.xres_virtual = fb->width;
++ info->var.yres_virtual = fb->height;
++ info->var.bits_per_pixel = fb->bits_per_pixel;
++ info->var.xoffset = 0;
++ info->var.yoffset = 0;
++ info->var.activate = FB_ACTIVATE_NOW;
++ info->var.height = -1;
++ info->var.width = -1;
++
++ info->var.xres = r->width;
++ info->var.yres = r->height;
++
++ info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
++ info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
++
++ info->pixmap.size = 64 * 1024;
++ info->pixmap.buf_align = 8;
++ info->pixmap.access_align = 32;
++ info->pixmap.flags = FB_PIXMAP_SYSTEM;
++ info->pixmap.scan_align = 1;
++
++ fill_fb_bitfield(&info->var, fb->depth);
++
++ register_framebuffer(info);
++
++ fb->fbdev = info;
++
++ return fb;
++}
++
++int psbfb_create(struct drm_device *dev, uint32_t fb_width,
++ uint32_t fb_height, uint32_t surface_width,
++ uint32_t surface_height, struct psb_framebuffer **psbfb_p)
++{
++ struct fb_info *info;
++ struct psbfb_par *par;
++ struct drm_framebuffer *fb;
++ struct psb_framebuffer *psbfb;
++ struct drm_mode_fb_cmd mode_cmd;
++ struct device *device = &dev->pdev->dev;
++ struct drm_psb_private *dev_priv
++ = (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt *pg = dev_priv->pg;
++ int size, aligned_size, ret;
++
++ mode_cmd.width = surface_width; /* crtc->desired_mode->hdisplay; */
++ mode_cmd.height = surface_height; /* crtc->desired_mode->vdisplay; */
++
++ mode_cmd.bpp = 32;
++ //HW requires pitch to be 64 byte aligned
++ mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
++ mode_cmd.depth = 24;
++
++ size = mode_cmd.pitch * mode_cmd.height;
++ aligned_size = ALIGN(size, PAGE_SIZE);
++
++ mutex_lock(&dev->struct_mutex);
++ fb = psb_framebuffer_create(dev, &mode_cmd, NULL);
++ if (!fb) {
++
++ DRM_ERROR("failed to allocate fb.\n");
++ ret = -ENOMEM;
++ goto out_err0;
++ }
++ psbfb = to_psb_fb(fb);
++ psbfb->size = size;
++
++ list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
++ info = framebuffer_alloc(sizeof(struct psbfb_par), device);
++ if (!info) {
++ ret = -ENOMEM;
++ goto out_err1;
++ }
++
++ par = info->par;
++ par->psbfb = psbfb;
++
++ strcpy(info->fix.id, "psbfb");
++ info->fix.type = FB_TYPE_PACKED_PIXELS;
++ info->fix.visual = FB_VISUAL_TRUECOLOR;
++ info->fix.type_aux = 0;
++ info->fix.xpanstep = 1; /* doing it in hw */
++ info->fix.ypanstep = 1; /* doing it in hw */
++ info->fix.ywrapstep = 0;
++ info->fix.accel = FB_ACCEL_I830;
++ info->fix.type_aux = 0;
++
++ info->flags = FBINFO_DEFAULT;
++
++ info->fbops = &psbfb_ops;
++
++ info->fix.line_length = fb->pitch;
++ info->fix.smem_start = dev->mode_config.fb_base;
++ info->fix.smem_len = size;
++ info->flags = FBINFO_DEFAULT;
++ info->screen_base = (char *)pg->vram_addr;
++ info->screen_size = size;
++ memset(info->screen_base, 0, size);
++
++ info->pseudo_palette = fb->pseudo_palette;
++ info->var.xres_virtual = fb->width;
++ info->var.yres_virtual = fb->height;
++ info->var.bits_per_pixel = fb->bits_per_pixel;
++ info->var.xoffset = 0;
++ info->var.yoffset = 0;
++ info->var.activate = FB_ACTIVATE_NOW;
++ info->var.height = -1;
++ info->var.width = -1;
++
++ info->var.xres = fb_width;
++ info->var.yres = fb_height;
++
++ info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
++ info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
++
++ info->pixmap.size = 64 * 1024;
++ info->pixmap.buf_align = 8;
++ info->pixmap.access_align = 32;
++ info->pixmap.flags = FB_PIXMAP_SYSTEM;
++ info->pixmap.scan_align = 1;
++
++ DRM_DEBUG("fb depth is %d\n", fb->depth);
++ DRM_DEBUG(" pitch is %d\n", fb->pitch);
++ fill_fb_bitfield(&info->var, fb->depth);
++
++ fb->fbdev = info;
++
++ par->dev = dev;
++
++ /* To allow resizing without swapping buffers */
++ printk(KERN_INFO"allocated %dx%d fb\n",
++ psbfb->base.width,
++ psbfb->base.height);
++
++ if (psbfb_p)
++ *psbfb_p = psbfb;
++
++ mutex_unlock(&dev->struct_mutex);
++
++ return 0;
++out_err1:
++ fb->funcs->destroy(fb);
++out_err0:
++ mutex_unlock(&dev->struct_mutex);
++ return ret;
++}
++
++static int psbfb_multi_fb_probe_crtc(struct drm_device *dev,
++ struct drm_crtc *crtc)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct drm_framebuffer *fb = crtc->fb;
++ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
++ struct drm_connector *connector;
++ struct fb_info *info;
++ struct psbfb_par *par;
++ struct drm_mode_set *modeset;
++ unsigned int width, height;
++ int new_fb = 0;
++ int ret, i, conn_count;
++
++ if (!drm_helper_crtc_in_use(crtc))
++ return 0;
++
++ if (!crtc->desired_mode)
++ return 0;
++
++ width = crtc->desired_mode->hdisplay;
++ height = crtc->desired_mode->vdisplay;
++
++ /* is there an fb bound to this crtc already */
++ if (!psb_intel_crtc->mode_set.fb) {
++ ret =
++ psbfb_create(dev, width, height, width, height,
++ &psbfb);
++ if (ret)
++ return -EINVAL;
++ new_fb = 1;
++ } else {
++ fb = psb_intel_crtc->mode_set.fb;
++ if ((fb->width < width) || (fb->height < height))
++ return -EINVAL;
++ }
++
++ info = fb->fbdev;
++ par = info->par;
++
++ modeset = &psb_intel_crtc->mode_set;
++ modeset->fb = fb;
++ conn_count = 0;
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ if (connector->encoder)
++ if (connector->encoder->crtc == modeset->crtc) {
++ modeset->connectors[conn_count] =
++ connector;
++ conn_count++;
++ if (conn_count > INTELFB_CONN_LIMIT)
++ BUG();
++ }
++ }
++
++ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
++ modeset->connectors[i] = NULL;
++
++ par->crtc_ids[0] = crtc->base.id;
++
++ modeset->num_connectors = conn_count;
++ if (modeset->mode != modeset->crtc->desired_mode)
++ modeset->mode = modeset->crtc->desired_mode;
++
++ par->crtc_count = 1;
++
++ if (new_fb) {
++ info->var.pixclock = -1;
++ if (register_framebuffer(info) < 0)
++ return -EINVAL;
++ } else
++ psbfb_set_par(info);
++
++ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
++ info->fix.id);
++
++ /* Switch back to kernel console on panic */
++ panic_mode = *modeset;
++ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
++ printk(KERN_INFO "registered panic notifier\n");
++
++ return 0;
++}
++
++static int psbfb_multi_fb_probe(struct drm_device *dev)
++{
++
++ struct drm_crtc *crtc;
++ int ret = 0;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ ret = psbfb_multi_fb_probe_crtc(dev, crtc);
++ if (ret)
++ return ret;
++ }
++ return ret;
++}
++
++static int psbfb_single_fb_probe(struct drm_device *dev)
++{
++ struct drm_crtc *crtc;
++ struct drm_connector *connector;
++ unsigned int fb_width = (unsigned) -1, fb_height = (unsigned) -1;
++ unsigned int surface_width = 0, surface_height = 0;
++ int new_fb = 0;
++ int crtc_count = 0;
++ int ret, i, conn_count = 0;
++ struct fb_info *info;
++ struct psbfb_par *par;
++ struct drm_mode_set *modeset = NULL;
++ struct drm_framebuffer *fb = NULL;
++ struct psb_framebuffer *psbfb = NULL;
++
++ /* first up get a count of crtcs now in use and
++ * new min/maxes width/heights */
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ if (drm_helper_crtc_in_use(crtc)) {
++ if (crtc->desired_mode) {
++ fb = crtc->fb;
++ if (crtc->desired_mode->hdisplay <
++ fb_width)
++ fb_width =
++ crtc->desired_mode->hdisplay;
++
++ if (crtc->desired_mode->vdisplay <
++ fb_height)
++ fb_height =
++ crtc->desired_mode->vdisplay;
++
++ if (crtc->desired_mode->hdisplay >
++ surface_width)
++ surface_width =
++ crtc->desired_mode->hdisplay;
++
++ if (crtc->desired_mode->vdisplay >
++ surface_height)
++ surface_height =
++ crtc->desired_mode->vdisplay;
++
++ }
++ crtc_count++;
++ }
++ }
++
++ if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
++ /* hmm everyone went away - assume VGA cable just fell out
++ and will come back later. */
++ return 0;
++ }
++
++ /* do we have an fb already? */
++ if (list_empty(&dev->mode_config.fb_kernel_list)) {
++ /* create an fb if we don't have one */
++ ret =
++ psbfb_create(dev, fb_width, fb_height, surface_width,
++ surface_height, &psbfb);
++ if (ret)
++ return -EINVAL;
++ new_fb = 1;
++ fb = &psbfb->base;
++ } else {
++ fb = list_first_entry(&dev->mode_config.fb_kernel_list,
++ struct drm_framebuffer, filp_head);
++
++ /* if someone hotplugs something bigger than we have already
++ * allocated, we are pwned. As really we can't resize an
++ * fbdev that is in the wild currently due to fbdev not really
++ * being designed for the lower layers moving stuff around
++ * under it. - so in the grand style of things - punt. */
++ if ((fb->width < surface_width)
++ || (fb->height < surface_height)) {
++ DRM_ERROR
++ ("Framebuffer not large enough to scale"
++ " console onto.\n");
++ return -EINVAL;
++ }
++ }
++
++ info = fb->fbdev;
++ par = info->par;
++
++ crtc_count = 0;
++ /* okay we need to setup new connector sets in the crtcs */
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ modeset = &psb_intel_crtc->mode_set;
++ modeset->fb = fb;
++ conn_count = 0;
++ list_for_each_entry(connector,
++ &dev->mode_config.connector_list,
++ head) {
++ if (connector->encoder)
++ if (connector->encoder->crtc ==
++ modeset->crtc) {
++ modeset->connectors[conn_count] =
++ connector;
++ conn_count++;
++ if (conn_count >
++ INTELFB_CONN_LIMIT)
++ BUG();
++ }
++ }
++
++ for (i = conn_count; i < INTELFB_CONN_LIMIT; i++)
++ modeset->connectors[i] = NULL;
++
++ par->crtc_ids[crtc_count++] = crtc->base.id;
++
++ modeset->num_connectors = conn_count;
++ if (modeset->mode != modeset->crtc->desired_mode)
++ modeset->mode = modeset->crtc->desired_mode;
++ }
++ par->crtc_count = crtc_count;
++
++ if (new_fb) {
++ info->var.pixclock = -1;
++ if (register_framebuffer(info) < 0)
++ return -EINVAL;
++ } else
++ psbfb_set_par(info);
++
++ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
++ info->fix.id);
++
++ /* Switch back to kernel console on panic */
++ panic_mode = *modeset;
++ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
++ printk(KERN_INFO "registered panic notifier\n");
++
++ return 0;
++}
++
++int psbfb_probe(struct drm_device *dev)
++{
++ int ret = 0;
++
++ DRM_DEBUG("\n");
++
++ /* something has changed in the lower levels of hell - deal with it
++ here */
++
++ /* two modes : a) 1 fb to rule all crtcs.
++ b) one fb per crtc.
++ two actions 1) new connected device
++ 2) device removed.
++ case a/1 : if the fb surface isn't big enough -
++ resize the surface fb.
++ if the fb size isn't big enough - resize fb into surface.
++ if everything big enough configure the new crtc/etc.
++ case a/2 : undo the configuration
++ possibly resize down the fb to fit the new configuration.
++ case b/1 : see if it is on a new crtc - setup a new fb and add it.
++ case b/2 : teardown the new fb.
++ */
++
++ /* mode a first */
++ /* search for an fb */
++ if (0 /*i915_fbpercrtc == 1 */)
++ ret = psbfb_multi_fb_probe(dev);
++ else
++ ret = psbfb_single_fb_probe(dev);
++
++ return ret;
++}
++EXPORT_SYMBOL(psbfb_probe);
++
++int psbfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
++{
++ struct fb_info *info;
++
++ if (drm_psb_no_fb)
++ return 0;
++
++ info = fb->fbdev;
++
++ if (info) {
++ unregister_framebuffer(info);
++ framebuffer_release(info);
++ }
++
++ atomic_notifier_chain_unregister(&panic_notifier_list, &paniced);
++ memset(&panic_mode, 0, sizeof(struct drm_mode_set));
++ return 0;
++}
++EXPORT_SYMBOL(psbfb_remove);
++
++static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
++ struct drm_file *file_priv,
++ unsigned int *handle)
++{
++ /* JB: TODO currently we can't go from a bo to a handle with ttm */
++ (void) file_priv;
++ *handle = 0;
++ return 0;
++}
++
++static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
++{
++ struct drm_device *dev = fb->dev;
++ struct psb_framebuffer *psbfb = to_psb_fb(fb);
++
++ /*ummap gtt pages*/
++ psb_gtt_unmap_meminfo(dev, psbfb->hKernelMemInfo);
++
++ if (fb->fbdev)
++ psbfb_remove(dev, fb);
++
++ /* JB: TODO not drop, refcount buffer */
++ drm_framebuffer_cleanup(fb);
++
++ kfree(fb);
++}
++
++static const struct drm_mode_config_funcs psb_mode_funcs = {
++ .fb_create = psb_user_framebuffer_create,
++ .fb_changed = psbfb_probe,
++};
++
++static int psb_create_backlight_property(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv
++ = (struct drm_psb_private *) dev->dev_private;
++ struct drm_property *backlight;
++
++ if (dev_priv->backlight_property)
++ return 0;
++
++ backlight = drm_property_create(dev,
++ DRM_MODE_PROP_RANGE,
++ "backlight",
++ 2);
++ backlight->values[0] = 0;
++ backlight->values[1] = 100;
++
++ dev_priv->backlight_property = backlight;
++
++ return 0;
++}
++
++static void psb_setup_outputs(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct drm_connector *connector;
++
++ drm_mode_create_scaling_mode_property(dev);
++
++ psb_create_backlight_property(dev);
++
++ if (IS_MRST(dev)) {
++ if (dev_priv->iLVDS_enable)
++ /* Set up integrated LVDS for MRST */
++ mrst_lvds_init(dev, &dev_priv->mode_dev);
++ else {
++ /* Set up integrated MIPI for MRST */
++ mrst_dsi_init(dev, &dev_priv->mode_dev);
++ }
++ } else {
++ psb_intel_lvds_init(dev, &dev_priv->mode_dev);
++ psb_intel_sdvo_init(dev, SDVOB);
++ }
++
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct drm_encoder *encoder = &psb_intel_output->enc;
++ int crtc_mask = 0, clone_mask = 0;
++
++ /* valid crtcs */
++ switch (psb_intel_output->type) {
++ case INTEL_OUTPUT_SDVO:
++ crtc_mask = ((1 << 0) | (1 << 1));
++ clone_mask = (1 << INTEL_OUTPUT_SDVO);
++ break;
++ case INTEL_OUTPUT_LVDS:
++ if (IS_MRST(dev))
++ crtc_mask = (1 << 0);
++ else
++ crtc_mask = (1 << 1);
++
++ clone_mask = (1 << INTEL_OUTPUT_LVDS);
++ break;
++ case INTEL_OUTPUT_MIPI:
++ crtc_mask = (1 << 0);
++ clone_mask = (1 << INTEL_OUTPUT_MIPI);
++ break;
++ }
++ encoder->possible_crtcs = crtc_mask;
++ encoder->possible_clones =
++ psb_intel_connector_clones(dev, clone_mask);
++ }
++}
++
++static void *psb_bo_from_handle(struct drm_device *dev,
++ struct drm_file *file_priv,
++ unsigned int handle)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL;
++ IMG_HANDLE hKernelMemInfo = (IMG_HANDLE)handle;
++ int ret;
++
++ ret = psb_get_meminfo_by_handle(hKernelMemInfo, &psKernelMemInfo);
++ if (ret) {
++ DRM_ERROR("Cannot get meminfo for handle %lx\n",
++ (IMG_UINT32)hKernelMemInfo);
++ return NULL;
++ }
++
++ return (void *)psKernelMemInfo;
++}
++
++static size_t psb_bo_size(struct drm_device *dev, void *bof)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO *)bof;
++ return (size_t)psKernelMemInfo->ui32AllocSize;
++}
++
++static size_t psb_bo_offset(struct drm_device *dev, void *bof)
++{
++ struct psb_framebuffer *psbfb
++ = (struct psb_framebuffer *)bof;
++
++ return (size_t)psbfb->offset;
++}
++
++static int psb_bo_pin_for_scanout(struct drm_device *dev, void *bo)
++{
++#if 0 /* JB: Not used for the drop */
++ struct ttm_buffer_object *bo = bof;
++ We should do things like check if
++ the buffer is in a scanout : able
++ place.And make sure that its pinned.
++#endif
++ return 0;
++ }
++
++ static int psb_bo_unpin_for_scanout(struct drm_device *dev,
++ void *bo) {
++#if 0 /* JB: Not used for the drop */
++ struct ttm_buffer_object *bo = bof;
++#endif
++ return 0;
++ }
++
++ void psb_modeset_init(struct drm_device *dev)
++ {
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
++ int i;
++ int num_pipe;
++
++ /* Init mm functions */
++ mode_dev->bo_from_handle = psb_bo_from_handle;
++ mode_dev->bo_size = psb_bo_size;
++ mode_dev->bo_offset = psb_bo_offset;
++ mode_dev->bo_pin_for_scanout = psb_bo_pin_for_scanout;
++ mode_dev->bo_unpin_for_scanout = psb_bo_unpin_for_scanout;
++
++ drm_mode_config_init(dev);
++
++ dev->mode_config.min_width = 0;
++ dev->mode_config.min_height = 0;
++
++ dev->mode_config.funcs = (void *) &psb_mode_funcs;
++
++ dev->mode_config.max_width = 2048;
++ dev->mode_config.max_height = 2048;
++
++ /* set memory base */
++ /* MRST and PSB should use BAR 2*/
++ dev->mode_config.fb_base =
++ pci_resource_start(dev->pdev, 2);
++
++ if (IS_MRST(dev))
++ num_pipe = 1;
++ else
++ num_pipe = 2;
++
++
++ for (i = 0; i < num_pipe; i++)
++ psb_intel_crtc_init(dev, i, mode_dev);
++
++ psb_setup_outputs(dev);
++
++ /* setup fbs */
++ /* drm_initial_config(dev); */
++ }
++
++ void psb_modeset_cleanup(struct drm_device *dev)
++ {
++ drm_mode_config_cleanup(dev);
++ }
+diff --git a/drivers/gpu/drm/mrst/drv/psb_fb.h b/drivers/gpu/drm/mrst/drv/psb_fb.h
+new file mode 100644
+index 0000000..1986eca
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_fb.h
+@@ -0,0 +1,49 @@
++/*
++ * Copyright (c) 2008, Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ *
++ */
++
++#ifndef _PSB_FB_H_
++#define _PSB_FB_H_
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++
++/*IMG Headers*/
++#include "servicesint.h"
++
++struct psb_framebuffer {
++ struct drm_framebuffer base;
++ struct address_space *addr_space;
++ struct ttm_buffer_object *bo;
++ /* struct ttm_bo_kmap_obj kmap; */
++ PVRSRV_KERNEL_MEM_INFO *pvrBO;
++ IMG_HANDLE hKernelMemInfo;
++ uint32_t size;
++ uint32_t offset;
++};
++
++#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
++
++
++extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
++
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/drv/psb_fence.c b/drivers/gpu/drm/mrst/drv/psb_fence.c
+new file mode 100644
+index 0000000..b630fc2
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_fence.c
+@@ -0,0 +1,158 @@
++/*
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ *
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++
++static void psb_fence_poll(struct ttm_fence_device *fdev,
++ uint32_t fence_class, uint32_t waiting_types)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(fdev, struct drm_psb_private, fdev);
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ uint32_t sequence = 0;
++
++ if (unlikely(!dev_priv))
++ return;
++
++ if (waiting_types) {
++ switch (fence_class) {
++ case PSB_ENGINE_VIDEO:
++ sequence = msvdx_priv->msvdx_current_sequence;
++ break;
++ case LNC_ENGINE_ENCODE:
++ sequence = *((uint32_t *)topaz_priv->topaz_sync_addr);
++ break;
++ default:
++ break;
++ }
++
++ ttm_fence_handler(fdev, fence_class, sequence,
++ _PSB_FENCE_TYPE_EXE, 0);
++
++ }
++}
++
++void psb_fence_error(struct drm_device *dev,
++ uint32_t fence_class,
++ uint32_t sequence, uint32_t type, int error)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_fence_device *fdev = &dev_priv->fdev;
++ unsigned long irq_flags;
++ struct ttm_fence_class_manager *fc =
++ &fdev->fence_class[fence_class];
++
++ BUG_ON(fence_class >= PSB_NUM_ENGINES);
++ write_lock_irqsave(&fc->lock, irq_flags);
++ ttm_fence_handler(fdev, fence_class, sequence, type, error);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++}
++
++int psb_fence_emit_sequence(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t flags, uint32_t *sequence,
++ unsigned long *timeout_jiffies)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(fdev, struct drm_psb_private, fdev);
++ uint32_t seq = 0;
++
++ if (!dev_priv)
++ return -EINVAL;
++
++ if (fence_class >= PSB_NUM_ENGINES)
++ return -EINVAL;
++
++ spin_lock(&dev_priv->sequence_lock);
++ seq = dev_priv->sequence[fence_class]++;
++ spin_unlock(&dev_priv->sequence_lock);
++
++ *sequence = seq;
++ *timeout_jiffies = jiffies + DRM_HZ * 3;
++
++ return 0;
++}
++
++static void psb_fence_lockup(struct ttm_fence_object *fence,
++ uint32_t fence_types)
++{
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++
++ if (fence->fence_class == LNC_ENGINE_ENCODE) {
++ DRM_ERROR("TOPAZ timeout (probable lockup)\n");
++
++ write_lock(&fc->lock);
++ lnc_topaz_handle_timeout(fence->fdev);
++ ttm_fence_handler(fence->fdev, fence->fence_class,
++ fence->sequence, fence_types, -EBUSY);
++ write_unlock(&fc->lock);
++ } else {
++ DRM_ERROR("MSVDX timeout (probable lockup)\n");
++ write_lock(&fc->lock);
++ ttm_fence_handler(fence->fdev, fence->fence_class,
++ fence->sequence, fence_types, -EBUSY);
++ write_unlock(&fc->lock);
++ }
++}
++
++void psb_fence_handler(struct drm_device *dev, uint32_t fence_class)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_fence_device *fdev = &dev_priv->fdev;
++ struct ttm_fence_class_manager *fc =
++ &fdev->fence_class[fence_class];
++ unsigned long irq_flags;
++
++ write_lock_irqsave(&fc->lock, irq_flags);
++ psb_fence_poll(fdev, fence_class, fc->waiting_types);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++}
++
++
++static struct ttm_fence_driver psb_ttm_fence_driver = {
++ .has_irq = NULL,
++ .emit = psb_fence_emit_sequence,
++ .flush = NULL,
++ .poll = psb_fence_poll,
++ .needed_flush = NULL,
++ .wait = NULL,
++ .signaled = NULL,
++ .lockup = psb_fence_lockup,
++};
++
++int psb_ttm_fence_device_init(struct ttm_fence_device *fdev)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(fdev, struct drm_psb_private, fdev);
++ struct ttm_fence_class_init fci = {.wrap_diff = (1 << 30),
++ .flush_diff = (1 << 29),
++ .sequence_mask = 0xFFFFFFFF
++ };
++
++ return ttm_fence_device_init(PSB_NUM_ENGINES,
++ dev_priv->mem_global_ref.object,
++ fdev, &fci, 1,
++ &psb_ttm_fence_driver);
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_gtt.c b/drivers/gpu/drm/mrst/drv/psb_gtt.c
+new file mode 100644
+index 0000000..5f66e75
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_gtt.c
+@@ -0,0 +1,1040 @@
++/*
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
++ */
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_pvr_glue.h"
++
++static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
++{
++ uint32_t mask = PSB_PTE_VALID;
++
++ if (type & PSB_MMU_CACHED_MEMORY)
++ mask |= PSB_PTE_CACHED;
++ if (type & PSB_MMU_RO_MEMORY)
++ mask |= PSB_PTE_RO;
++ if (type & PSB_MMU_WO_MEMORY)
++ mask |= PSB_PTE_WO;
++
++ return (pfn << PAGE_SHIFT) | mask;
++}
++
++struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
++{
++ struct psb_gtt *tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
++
++ if (!tmp)
++ return NULL;
++
++ init_rwsem(&tmp->sem);
++ tmp->dev = dev;
++
++ return tmp;
++}
++
++void psb_gtt_takedown(struct psb_gtt *pg, int free)
++{
++ struct drm_psb_private *dev_priv = pg->dev->dev_private;
++
++ if (!pg)
++ return;
++
++ if (pg->gtt_map) {
++ iounmap(pg->gtt_map);
++ pg->gtt_map = NULL;
++ }
++ if (pg->initialized) {
++ pci_write_config_word(pg->dev->pdev, PSB_GMCH_CTRL,
++ pg->gmch_ctrl);
++ PSB_WVDC32(pg->pge_ctl, PSB_PGETBL_CTL);
++ (void) PSB_RVDC32(PSB_PGETBL_CTL);
++ }
++ if (free)
++ kfree(pg);
++}
++
++int psb_gtt_init(struct psb_gtt *pg, int resume)
++{
++ struct drm_device *dev = pg->dev;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ unsigned gtt_pages;
++ unsigned long stolen_size, vram_stolen_size, ci_stolen_size;
++ unsigned long rar_stolen_size;
++ unsigned i, num_pages;
++ unsigned pfn_base;
++ uint32_t ci_pages, vram_pages;
++ uint32_t tt_pages;
++ uint32_t *ttm_gtt_map;
++ uint32_t dvmt_mode = 0;
++
++ int ret = 0;
++ uint32_t pte;
++
++ pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &pg->gmch_ctrl);
++ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
++ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
++
++ pg->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
++ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
++ (void) PSB_RVDC32(PSB_PGETBL_CTL);
++
++ pg->initialized = 1;
++
++ pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;
++
++ pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
++ /* fix me: video mmu has hw bug to access 0x0D0000000,
++ * then make gatt start at 0x0e000,0000 */
++ pg->mmu_gatt_start = 0xE0000000;
++ pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
++ gtt_pages =
++ pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
++ pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
++ >> PAGE_SHIFT;
++
++ pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
++ vram_stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;
++
++ /* CI is not included in the stolen size since the TOPAZ MMU bug */
++ ci_stolen_size = dev_priv->ci_region_size;
++ /* Don't add CI & RAR share buffer space
++ * managed by TTM to stolen_size */
++ stolen_size = vram_stolen_size;
++
++ rar_stolen_size = dev_priv->rar_region_size;
++
++ printk(KERN_INFO"GMMADR(region 0) start: 0x%08x (%dM).\n",
++ pg->gatt_start, pg->gatt_pages/256);
++ printk(KERN_INFO"GTTADR(region 3) start: 0x%08x (can map %dM RAM), and actual RAM base 0x%08x.\n",
++ pg->gtt_start, gtt_pages * 4, pg->gtt_phys_start);
++ printk(KERN_INFO"Stole memory information \n");
++ printk(KERN_INFO" base in RAM: 0x%x \n", pg->stolen_base);
++ printk(KERN_INFO" size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
++ vram_stolen_size/1024);
++ dvmt_mode = (pg->gmch_ctrl >> 4) & 0x7;
++ printk(KERN_INFO" the correct size should be: %dM(dvmt mode=%d) \n",
++ (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
++
++ if (ci_stolen_size > 0)
++ printk(KERN_INFO"CI Stole memory: RAM base = 0x%08x, size = %lu M \n",
++ dev_priv->ci_region_start,
++ ci_stolen_size / 1024 / 1024);
++ if (rar_stolen_size > 0)
++ printk(KERN_INFO"RAR Stole memory: RAM base = 0x%08x, size = %lu M \n",
++ dev_priv->rar_region_start,
++ rar_stolen_size / 1024 / 1024);
++
++ if (resume && (gtt_pages != pg->gtt_pages) &&
++ (stolen_size != pg->stolen_size)) {
++ DRM_ERROR("GTT resume error.\n");
++ ret = -EINVAL;
++ goto out_err;
++ }
++
++ pg->gtt_pages = gtt_pages;
++ pg->stolen_size = stolen_size;
++ pg->vram_stolen_size = vram_stolen_size;
++ pg->ci_stolen_size = ci_stolen_size;
++ pg->rar_stolen_size = rar_stolen_size;
++ pg->gtt_map =
++ ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
++ if (!pg->gtt_map) {
++ DRM_ERROR("Failure to map gtt.\n");
++ ret = -ENOMEM;
++ goto out_err;
++ }
++
++ pg->vram_addr = ioremap_wc(pg->stolen_base, stolen_size);
++ if (!pg->vram_addr) {
++ DRM_ERROR("Failure to map stolen base.\n");
++ ret = -ENOMEM;
++ goto out_err;
++ }
++
++ DRM_DEBUG("%s: vram kernel virtual address %p\n", pg->vram_addr);
++
++ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
++ (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
++
++ ttm_gtt_map = pg->gtt_map + tt_pages / 2;
++
++ /*
++ * insert vram stolen pages.
++ */
++
++ pfn_base = pg->stolen_base >> PAGE_SHIFT;
++ vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
++ printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
++ num_pages, pfn_base, 0);
++ for (i = 0; i < num_pages; ++i) {
++ pte = psb_gtt_mask_pte(pfn_base + i, 0);
++ iowrite32(pte, pg->gtt_map + i);
++ }
++
++ /*
++ * Init rest of gtt managed by IMG.
++ */
++ pfn_base = page_to_pfn(dev_priv->scratch_page);
++ pte = psb_gtt_mask_pte(pfn_base, 0);
++ for (; i < tt_pages / 2 - 1; ++i)
++ iowrite32(pte, pg->gtt_map + i);
++
++ /*
++ * insert CI stolen pages
++ */
++
++ pfn_base = dev_priv->ci_region_start >> PAGE_SHIFT;
++ ci_pages = num_pages = ci_stolen_size >> PAGE_SHIFT;
++ printk(KERN_INFO"Set up %d CI stolen pages starting at 0x%08x, GTT offset %dK\n",
++ num_pages, pfn_base, (ttm_gtt_map - pg->gtt_map) * 4);
++ for (i = 0; i < num_pages; ++i) {
++ pte = psb_gtt_mask_pte(pfn_base + i, 0);
++ iowrite32(pte, ttm_gtt_map + i);
++ }
++
++ /*
++ * insert RAR stolen pages
++ */
++ if (rar_stolen_size != 0) {
++ pfn_base = dev_priv->rar_region_start >> PAGE_SHIFT;
++ num_pages = rar_stolen_size >> PAGE_SHIFT;
++ printk(KERN_INFO"Set up %d RAR stolen pages starting at 0x%08x, GTT offset %dK\n",
++ num_pages, pfn_base,
++ (ttm_gtt_map - pg->gtt_map + i) * 4);
++ for (; i < num_pages + ci_pages; ++i) {
++ pte = psb_gtt_mask_pte(pfn_base + i - ci_pages, 0);
++ iowrite32(pte, ttm_gtt_map + i);
++ }
++ }
++ /*
++ * Init rest of gtt managed by TTM.
++ */
++
++ pfn_base = page_to_pfn(dev_priv->scratch_page);
++ pte = psb_gtt_mask_pte(pfn_base, 0);
++ PSB_DEBUG_INIT("Initializing the rest of a total "
++ "of %d gtt pages.\n", pg->gatt_pages);
++
++ for (; i < pg->gatt_pages - tt_pages / 2; ++i)
++ iowrite32(pte, ttm_gtt_map + i);
++ (void) ioread32(pg->gtt_map + i - 1);
++
++ return 0;
++
++out_err:
++ psb_gtt_takedown(pg, 0);
++ return ret;
++}
++
++int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
++ unsigned offset_pages, unsigned num_pages,
++ unsigned desired_tile_stride,
++ unsigned hw_tile_stride, int type)
++{
++ unsigned rows = 1;
++ unsigned add;
++ unsigned row_add;
++ unsigned i;
++ unsigned j;
++ uint32_t *cur_page = NULL;
++ uint32_t pte;
++
++ if (hw_tile_stride)
++ rows = num_pages / desired_tile_stride;
++ else
++ desired_tile_stride = num_pages;
++
++ add = desired_tile_stride;
++ row_add = hw_tile_stride;
++
++ down_read(&pg->sem);
++ for (i = 0; i < rows; ++i) {
++ cur_page = pg->gtt_map + offset_pages;
++ for (j = 0; j < desired_tile_stride; ++j) {
++ pte =
++ psb_gtt_mask_pte(page_to_pfn(*pages++), type);
++ iowrite32(pte, cur_page++);
++ }
++ offset_pages += add;
++ }
++ (void) ioread32(cur_page - 1);
++ up_read(&pg->sem);
++
++ return 0;
++}
++
++int psb_gtt_insert_phys_addresses(struct psb_gtt *pg, IMG_CPU_PHYADDR *pPhysFrames,
++ unsigned offset_pages, unsigned num_pages, int type)
++{
++ unsigned j;
++ uint32_t *cur_page = NULL;
++ uint32_t pte;
++
++ //printk("Allocatng IMG GTT mem at %x (pages %d)\n",offset_pages,num_pages);
++ down_read(&pg->sem);
++
++ cur_page = pg->gtt_map + offset_pages;
++ for (j = 0; j < num_pages; ++j)
++ {
++ pte = psb_gtt_mask_pte( (pPhysFrames++)->uiAddr >> PAGE_SHIFT, type);
++ iowrite32(pte, cur_page++);
++ //printk("PTE %d: %x/%x\n",j,(pPhysFrames-1)->uiAddr,pte);
++ }
++ (void) ioread32(cur_page - 1);
++
++ up_read(&pg->sem);
++
++ return 0;
++}
++
++int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
++ unsigned num_pages, unsigned desired_tile_stride,
++ unsigned hw_tile_stride)
++{
++ struct drm_psb_private *dev_priv = pg->dev->dev_private;
++ unsigned rows = 1;
++ unsigned add;
++ unsigned row_add;
++ unsigned i;
++ unsigned j;
++ uint32_t *cur_page = NULL;
++ unsigned pfn_base = page_to_pfn(dev_priv->scratch_page);
++ uint32_t pte = psb_gtt_mask_pte(pfn_base, 0);
++
++ if (hw_tile_stride)
++ rows = num_pages / desired_tile_stride;
++ else
++ desired_tile_stride = num_pages;
++
++ add = desired_tile_stride;
++ row_add = hw_tile_stride;
++
++ down_read(&pg->sem);
++ for (i = 0; i < rows; ++i) {
++ cur_page = pg->gtt_map + offset_pages;
++ for (j = 0; j < desired_tile_stride; ++j)
++ iowrite32(pte, cur_page++);
++
++ offset_pages += add;
++ }
++ (void) ioread32(cur_page - 1);
++ up_read(&pg->sem);
++
++ return 0;
++}
++
++int psb_gtt_mm_init(struct psb_gtt *pg)
++{
++ struct psb_gtt_mm *gtt_mm;
++ struct drm_psb_private *dev_priv = pg->dev->dev_private;
++ struct drm_open_hash *ht;
++ struct drm_mm *mm;
++ int ret;
++ uint32_t tt_start;
++ uint32_t tt_size;
++
++ if (!pg || !pg->initialized) {
++ DRM_DEBUG("Invalid gtt struct\n");
++ return -EINVAL;
++ }
++
++ gtt_mm = kzalloc(sizeof(struct psb_gtt_mm), GFP_KERNEL);
++ if (!gtt_mm)
++ return -ENOMEM;
++
++ spin_lock_init(&gtt_mm->lock);
++
++ ht = &gtt_mm->hash;
++ ret = drm_ht_create(ht, 20);
++ if (ret) {
++ DRM_DEBUG("Create hash table failed(%d)\n", ret);
++ goto err_free;
++ }
++
++ tt_start = (pg->stolen_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ tt_start = (tt_start < pg->gatt_pages) ? tt_start : pg->gatt_pages;
++ tt_size = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
++ (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
++
++ mm = &gtt_mm->base;
++
++ /*will use tt_start ~ 128M for IMG TT buffers*/
++ ret = drm_mm_init(mm, tt_start, ((tt_size / 2) - tt_start));
++ if (ret) {
++ DRM_DEBUG("drm_mm_int error(%d)\n", ret);
++ goto err_mm_init;
++ }
++
++ gtt_mm->count = 0;
++
++ dev_priv->gtt_mm = gtt_mm;
++
++ DRM_INFO("PSB GTT mem manager ready, tt_start %ld, tt_size %ld pages\n",
++ (unsigned long)tt_start,
++ (unsigned long)((tt_size / 2) - tt_start));
++ return 0;
++err_mm_init:
++ drm_ht_remove(ht);
++
++err_free:
++ kfree(gtt_mm);
++ return ret;
++}
++
++/**
++ * Delete all hash entries;
++ */
++void psb_gtt_mm_takedown(void)
++{
++ return;
++}
++
++static int psb_gtt_mm_get_ht_by_pid_locked(struct psb_gtt_mm *mm,
++ u32 tgid,
++ struct psb_gtt_hash_entry **hentry)
++{
++ struct drm_hash_item *entry;
++ struct psb_gtt_hash_entry *psb_entry;
++ int ret;
++
++ ret = drm_ht_find_item(&mm->hash, tgid, &entry);
++ if (ret) {
++ DRM_DEBUG("Cannot find entry pid=%ld\n", tgid);
++ return ret;
++ }
++
++ psb_entry = container_of(entry, struct psb_gtt_hash_entry, item);
++ if (!psb_entry) {
++ DRM_DEBUG("Invalid entry");
++ return -EINVAL;
++ }
++
++ *hentry = psb_entry;
++ return 0;
++}
++
++
++static int psb_gtt_mm_insert_ht_locked(struct psb_gtt_mm *mm,
++ u32 tgid,
++ struct psb_gtt_hash_entry *hentry)
++{
++ struct drm_hash_item *item;
++ int ret;
++
++ if (!hentry) {
++ DRM_DEBUG("Invalid parameters\n");
++ return -EINVAL;
++ }
++
++ item = &hentry->item;
++ item->key = tgid;
++
++ /**
++ * NOTE: drm_ht_insert_item will perform such a check
++ ret = psb_gtt_mm_get_ht_by_pid(mm, tgid, &tmp);
++ if (!ret) {
++ DRM_DEBUG("Entry already exists for pid %ld\n", tgid);
++ return -EAGAIN;
++ }
++ */
++
++ /*Insert the given entry*/
++ ret = drm_ht_insert_item(&mm->hash, item);
++ if (ret) {
++ DRM_DEBUG("Insert failure\n");
++ return ret;
++ }
++
++ mm->count++;
++
++ return 0;
++}
++
++static int psb_gtt_mm_alloc_insert_ht(struct psb_gtt_mm *mm,
++ u32 tgid,
++ struct psb_gtt_hash_entry **entry)
++{
++ struct psb_gtt_hash_entry *hentry;
++ int ret;
++
++ /*if the hentry for this tgid exists, just get it and return*/
++ spin_lock(&mm->lock);
++ ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &hentry);
++ if (!ret) {
++ DRM_DEBUG("Entry for tgid %ld exist, hentry %p\n",
++ tgid, hentry);
++ *entry = hentry;
++ spin_unlock(&mm->lock);
++ return 0;
++ }
++ spin_unlock(&mm->lock);
++
++ DRM_DEBUG("Entry for tgid %ld doesn't exist, will create it\n", tgid);
++
++ hentry = kzalloc(sizeof(struct psb_gtt_hash_entry), GFP_KERNEL);
++ if (!hentry) {
++ DRM_DEBUG("Kmalloc failled\n");
++ return -ENOMEM;
++ }
++
++ ret = drm_ht_create(&hentry->ht, 20);
++ if (ret) {
++ DRM_DEBUG("Create hash table failed\n");
++ return ret;
++ }
++
++ spin_lock(&mm->lock);
++ ret = psb_gtt_mm_insert_ht_locked(mm, tgid, hentry);
++ spin_unlock(&mm->lock);
++
++ if (!ret)
++ *entry = hentry;
++
++ return ret;
++}
++
++static struct psb_gtt_hash_entry *
++psb_gtt_mm_remove_ht_locked(struct psb_gtt_mm *mm, u32 tgid)
++{
++ struct psb_gtt_hash_entry *tmp;
++ int ret;
++
++ ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &tmp);
++ if (ret) {
++ DRM_DEBUG("Cannot find entry pid %ld\n", tgid);
++ return NULL;
++ }
++
++ /*remove it from ht*/
++ drm_ht_remove_item(&mm->hash, &tmp->item);
++
++ mm->count--;
++
++ return tmp;
++}
++
++static int psb_gtt_mm_remove_free_ht_locked(struct psb_gtt_mm *mm, u32 tgid)
++{
++ struct psb_gtt_hash_entry *entry;
++
++ entry = psb_gtt_mm_remove_ht_locked(mm, tgid);
++
++ if (!entry) {
++ DRM_DEBUG("Invalid entry");
++ return -EINVAL;
++ }
++
++ /*delete ht*/
++ drm_ht_remove(&entry->ht);
++
++ /*free this entry*/
++ kfree(entry);
++ return 0;
++}
++
++static int
++psb_gtt_mm_get_mem_mapping_locked(struct drm_open_hash *ht,
++ u32 key,
++ struct psb_gtt_mem_mapping **hentry)
++{
++ struct drm_hash_item *entry;
++ struct psb_gtt_mem_mapping *mapping;
++ int ret;
++
++ ret = drm_ht_find_item(ht, key, &entry);
++ if (ret) {
++ DRM_DEBUG("Cannot find key %ld\n", key);
++ return ret;
++ }
++
++ mapping = container_of(entry, struct psb_gtt_mem_mapping, item);
++ if (!mapping) {
++ DRM_DEBUG("Invalid entry\n");
++ return -EINVAL;
++ }
++
++ *hentry = mapping;
++ return 0;
++}
++
++static int
++psb_gtt_mm_insert_mem_mapping_locked(struct drm_open_hash *ht,
++ u32 key,
++ struct psb_gtt_mem_mapping *hentry)
++{
++ struct drm_hash_item *item;
++ struct psb_gtt_hash_entry *entry;
++ int ret;
++
++ if (!hentry) {
++ DRM_DEBUG("hentry is NULL\n");
++ return -EINVAL;
++ }
++
++ item = &hentry->item;
++ item->key = key;
++
++ ret = drm_ht_insert_item(ht, item);
++ if (ret) {
++ DRM_DEBUG("insert_item failed\n");
++ return ret;
++ }
++
++ entry = container_of(ht, struct psb_gtt_hash_entry, ht);
++ if (entry)
++ entry->count++;
++
++ return 0;
++}
++
++static int
++psb_gtt_mm_alloc_insert_mem_mapping(struct psb_gtt_mm *mm,
++ struct drm_open_hash *ht,
++ u32 key,
++ struct drm_mm_node *node,
++ struct psb_gtt_mem_mapping **entry)
++{
++ struct psb_gtt_mem_mapping *mapping;
++ int ret;
++
++ if (!node || !ht) {
++ DRM_DEBUG("parameter error\n");
++ return -EINVAL;
++ }
++
++ /*try to get this mem_map */
++ spin_lock(&mm->lock);
++ ret = psb_gtt_mm_get_mem_mapping_locked(ht, key, &mapping);
++ if (!ret) {
++ DRM_DEBUG("mapping entry for key %ld exists, entry %p\n",
++ key, mapping);
++ *entry = mapping;
++ spin_unlock(&mm->lock);
++ return 0;
++ }
++ spin_unlock(&mm->lock);
++
++ DRM_DEBUG("Mapping entry for key %ld doesn't exist, will create it\n",
++ key);
++
++ mapping = kzalloc(sizeof(struct psb_gtt_mem_mapping), GFP_KERNEL);
++ if (!mapping) {
++ DRM_DEBUG("kmalloc failed\n");
++ return -ENOMEM;
++ }
++
++ mapping->node = node;
++
++ spin_lock(&mm->lock);
++ ret = psb_gtt_mm_insert_mem_mapping_locked(ht, key, mapping);
++ spin_unlock(&mm->lock);
++
++ if (!ret)
++ *entry = mapping;
++
++ return ret;
++}
++
++static struct psb_gtt_mem_mapping *
++psb_gtt_mm_remove_mem_mapping_locked(struct drm_open_hash *ht, u32 key)
++{
++ struct psb_gtt_mem_mapping *tmp;
++ struct psb_gtt_hash_entry *entry;
++ int ret;
++
++ ret = psb_gtt_mm_get_mem_mapping_locked(ht, key, &tmp);
++ if (ret) {
++ DRM_DEBUG("Cannot find key %ld\n", key);
++ return NULL;
++ }
++
++ drm_ht_remove_item(ht, &tmp->item);
++
++ entry = container_of(ht, struct psb_gtt_hash_entry, ht);
++ if (entry)
++ entry->count--;
++
++ return tmp;
++}
++
++static int psb_gtt_mm_remove_free_mem_mapping_locked(struct drm_open_hash *ht,
++ u32 key,
++ struct drm_mm_node **node)
++{
++ struct psb_gtt_mem_mapping *entry;
++
++ entry = psb_gtt_mm_remove_mem_mapping_locked(ht, key);
++ if (!entry) {
++ DRM_DEBUG("entry is NULL\n");
++ return -EINVAL;
++ }
++
++ *node = entry->node;
++
++ kfree(entry);
++ return 0;
++}
++
++static int psb_gtt_add_node(struct psb_gtt_mm *mm,
++ u32 tgid,
++ u32 key,
++ struct drm_mm_node *node,
++ struct psb_gtt_mem_mapping **entry)
++{
++ struct psb_gtt_hash_entry *hentry;
++ struct psb_gtt_mem_mapping *mapping;
++ int ret;
++
++ ret = psb_gtt_mm_alloc_insert_ht(mm, tgid, &hentry);
++ if (ret) {
++ DRM_DEBUG("alloc_insert failed\n");
++ return ret;
++ }
++
++ ret = psb_gtt_mm_alloc_insert_mem_mapping(mm,
++ &hentry->ht,
++ key,
++ node,
++ &mapping);
++ if (ret) {
++ DRM_DEBUG("mapping alloc_insert failed\n");
++ return ret;
++ }
++
++ *entry = mapping;
++
++ return 0;
++}
++
++static int psb_gtt_remove_node(struct psb_gtt_mm *mm,
++ u32 tgid,
++ u32 key,
++ struct drm_mm_node **node)
++{
++ struct psb_gtt_hash_entry *hentry;
++ struct drm_mm_node *tmp;
++ int ret;
++
++ spin_lock(&mm->lock);
++ ret = psb_gtt_mm_get_ht_by_pid_locked(mm, tgid, &hentry);
++ if (ret) {
++ DRM_DEBUG("Cannot find entry for pid %ld\n", tgid);
++ spin_unlock(&mm->lock);
++ return ret;
++ }
++ spin_unlock(&mm->lock);
++
++ /*remove mapping entry*/
++ spin_lock(&mm->lock);
++ ret = psb_gtt_mm_remove_free_mem_mapping_locked(&hentry->ht,
++ key,
++ &tmp);
++ if (ret) {
++ DRM_DEBUG("remove_free failed\n");
++ spin_unlock(&mm->lock);
++ return ret;
++ }
++
++ *node = tmp;
++
++ /*check the count of mapping entry*/
++ if (!hentry->count) {
++ DRM_DEBUG("count of mapping entry is zero, tgid=%ld\n", tgid);
++ psb_gtt_mm_remove_free_ht_locked(mm, tgid);
++ }
++
++ spin_unlock(&mm->lock);
++
++ return 0;
++}
++
++static int psb_gtt_mm_alloc_mem(struct psb_gtt_mm *mm,
++ uint32_t pages,
++ uint32_t align,
++ struct drm_mm_node **node)
++{
++ struct drm_mm_node *tmp_node;
++ int ret;
++
++ do {
++ ret = drm_mm_pre_get(&mm->base);
++ if (unlikely(ret)) {
++ DRM_DEBUG("drm_mm_pre_get error\n");
++ return ret;
++ }
++
++ spin_lock(&mm->lock);
++ tmp_node = drm_mm_search_free(&mm->base, pages, align, 1);
++ if (unlikely(!tmp_node)) {
++ DRM_DEBUG("No free node found\n");
++ spin_unlock(&mm->lock);
++ break;
++ }
++
++ tmp_node = drm_mm_get_block_atomic(tmp_node, pages, align);
++ spin_unlock(&mm->lock);
++ } while (!tmp_node);
++
++ if (!tmp_node) {
++ DRM_DEBUG("Node allocation failed\n");
++ return -ENOMEM;
++ }
++
++ *node = tmp_node;
++ return 0;
++}
++
++static void psb_gtt_mm_free_mem(struct psb_gtt_mm *mm, struct drm_mm_node *node)
++{
++ spin_lock(&mm->lock);
++ drm_mm_put_block(node);
++ spin_unlock(&mm->lock);
++}
++
++int psb_gtt_map_meminfo(struct drm_device *dev,
++ IMG_HANDLE hKernelMemInfo,
++ uint32_t *offset)
++{
++ struct drm_psb_private *dev_priv
++ = (struct drm_psb_private *)dev->dev_private;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ struct psb_gtt_mm *mm = dev_priv->gtt_mm;
++ struct psb_gtt *pg = dev_priv->pg;
++ uint32_t size, pages, offset_pages;
++ void *kmem;
++ struct drm_mm_node *node;
++ struct page **page_list;
++ struct psb_gtt_mem_mapping *mapping = NULL;
++ int ret;
++
++ ret = psb_get_meminfo_by_handle(hKernelMemInfo, &psKernelMemInfo);
++ if (ret) {
++ DRM_DEBUG("Cannot find kernelMemInfo handle %ld\n",
++ hKernelMemInfo);
++ return -EINVAL;
++ }
++
++ DRM_DEBUG("Got psKernelMemInfo %p for handle %lx\n",
++ psKernelMemInfo, (u32)hKernelMemInfo);
++
++ size = psKernelMemInfo->ui32AllocSize;
++ kmem = psKernelMemInfo->pvLinAddrKM;
++ pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++
++ DRM_DEBUG("KerMemInfo size %ld, cpuVadr %lx, pages %ld, osMemHdl %lx\n",
++ size, kmem, pages, psKernelMemInfo->sMemBlk.hOSMemHandle);
++
++ if (!kmem)
++ DRM_DEBUG("kmem is NULL");
++
++ /*get pages*/
++ ret = psb_get_pages_by_mem_handle(psKernelMemInfo->sMemBlk.hOSMemHandle,
++ &page_list);
++ if (ret) {
++ DRM_DEBUG("get pages error\n");
++ return ret;
++ }
++
++ DRM_DEBUG("get %ld pages\n", pages);
++
++ /*alloc memory in TT apeture*/
++ ret = psb_gtt_mm_alloc_mem(mm, pages, 0, &node);
++ if (ret) {
++ DRM_DEBUG("alloc TT memory error\n");
++ goto failed_pages_alloc;
++ }
++
++ /*update psb_gtt_mm*/
++ ret = psb_gtt_add_node(mm,
++ (u32)psb_get_tgid(),
++ (u32)hKernelMemInfo,
++ node,
++ &mapping);
++ if (ret) {
++ DRM_DEBUG("add_node failed");
++ goto failed_add_node;
++ }
++
++ node = mapping->node;
++ offset_pages = node->start;
++
++ DRM_DEBUG("get free node for %ld pages, offset %ld pages",
++ pages, offset_pages);
++
++ /*update gtt*/
++ psb_gtt_insert_pages(pg, page_list,
++ (unsigned)offset_pages,
++ (unsigned)pages,
++ 0,
++ 0,
++ 0);
++
++ *offset = offset_pages;
++ return 0;
++
++failed_add_node:
++ psb_gtt_mm_free_mem(mm, node);
++failed_pages_alloc:
++ kfree(page_list);
++ return ret;
++}
++
++int psb_gtt_unmap_meminfo(struct drm_device *dev, IMG_HANDLE hKernelMemInfo)
++{
++ struct drm_psb_private *dev_priv
++ = (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt_mm *mm = dev_priv->gtt_mm;
++ struct psb_gtt *pg = dev_priv->pg;
++ uint32_t pages, offset_pages;
++ struct drm_mm_node *node;
++ int ret;
++
++ ret = psb_gtt_remove_node(mm,
++ (u32)psb_get_tgid(),
++ (u32)hKernelMemInfo,
++ &node);
++ if (ret) {
++ DRM_DEBUG("remove node failed\n");
++ return ret;
++ }
++
++ /*remove gtt entries*/
++ offset_pages = node->start;
++ pages = node->size;
++
++ psb_gtt_remove_pages(pg, offset_pages, pages, 0, 0);
++
++
++ /*free tt node*/
++
++ psb_gtt_mm_free_mem(mm, node);
++ return 0;
++}
++
++int psb_gtt_map_meminfo_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct psb_gtt_mapping_arg *arg
++ = (struct psb_gtt_mapping_arg *)data;
++ uint32_t *offset_pages = &arg->offset_pages;
++
++ DRM_DEBUG("\n");
++
++ return psb_gtt_map_meminfo(dev, arg->hKernelMemInfo, offset_pages);
++}
++
++int psb_gtt_unmap_meminfo_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++
++ struct psb_gtt_mapping_arg *arg
++ = (struct psb_gtt_mapping_arg *)data;
++
++ DRM_DEBUG("\n");
++
++ return psb_gtt_unmap_meminfo(dev, arg->hKernelMemInfo);
++}
++
++int psb_gtt_map_pvr_memory(struct drm_device *dev,
++ unsigned int hHandle,
++ unsigned int ui32TaskId,
++ IMG_CPU_PHYADDR *pPages,
++ unsigned int ui32PagesNum,
++ unsigned int *ui32Offset)
++{
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt_mm * mm = dev_priv->gtt_mm;
++ struct psb_gtt * pg = dev_priv->pg;
++
++ uint32_t size, pages, offset_pages;
++ struct drm_mm_node * node = NULL;
++ struct psb_gtt_mem_mapping * mapping = NULL;
++ int ret;
++
++ size = ui32PagesNum * PAGE_SIZE;
++ pages = 0;
++
++ /*alloc memory in TT apeture*/
++ ret = psb_gtt_mm_alloc_mem(mm, ui32PagesNum, 0, &node);
++ if(ret)
++ {
++ DRM_DEBUG("alloc TT memory error\n");
++ goto failed_pages_alloc;
++ }
++
++ /*update psb_gtt_mm*/
++ ret = psb_gtt_add_node(mm,
++ (u32)ui32TaskId,
++ (u32)hHandle,
++ node,
++ &mapping);
++ if(ret)
++ {
++ DRM_DEBUG("add_node failed");
++ goto failed_add_node;
++ }
++
++ node = mapping->node;
++ offset_pages = node->start;
++
++ DRM_DEBUG("get free node for %ld pages, offset %ld pages", pages, offset_pages);
++
++ /*update gtt*/
++ psb_gtt_insert_phys_addresses( pg, pPages, (unsigned)offset_pages, (unsigned)ui32PagesNum, 0 );
++
++ *ui32Offset = offset_pages;
++ return 0;
++
++failed_add_node:
++ psb_gtt_mm_free_mem(mm, node);
++failed_pages_alloc:
++ return ret;
++}
++
++
++int psb_gtt_unmap_pvr_memory(struct drm_device *dev, unsigned int hHandle, unsigned int ui32TaskId)
++{
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt_mm * mm = dev_priv->gtt_mm;
++ struct psb_gtt * pg = dev_priv->pg;
++ uint32_t pages, offset_pages;
++ struct drm_mm_node * node;
++ int ret;
++
++ ret = psb_gtt_remove_node(mm,
++ (u32)ui32TaskId,
++ (u32)hHandle,
++ &node);
++ if(ret)
++ {
++ printk("remove node failed\n");
++ return ret;
++ }
++
++ /*remove gtt entries*/
++ offset_pages = node->start;
++ pages = node->size;
++
++ psb_gtt_remove_pages(pg, offset_pages, pages, 0, 0);
++
++ /*free tt node*/
++ psb_gtt_mm_free_mem(mm, node);
++ return 0;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_gtt.h b/drivers/gpu/drm/mrst/drv/psb_gtt.h
+new file mode 100644
+index 0000000..ab19989
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_gtt.h
+@@ -0,0 +1,111 @@
++/**************************************************************************
++ * Copyright (c) 2007-2008, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _PSB_GTT_H_
++#define _PSB_GTT_H_
++
++#include <drm/drmP.h>
++
++#include "img_types.h"
++
++struct psb_gtt {
++ struct drm_device *dev;
++ int initialized;
++ uint32_t gatt_start;
++ uint32_t mmu_gatt_start;
++ uint32_t ci_start;
++ uint32_t rar_start;
++ uint32_t gtt_start;
++ uint32_t gtt_phys_start;
++ unsigned gtt_pages;
++ unsigned gatt_pages;
++ uint32_t stolen_base;
++ void *vram_addr;
++ uint32_t pge_ctl;
++ u16 gmch_ctrl;
++ unsigned long stolen_size;
++ unsigned long vram_stolen_size;
++ unsigned long ci_stolen_size;
++ unsigned long rar_stolen_size;
++ uint32_t *gtt_map;
++ struct rw_semaphore sem;
++};
++
++struct psb_gtt_mm {
++ struct drm_mm base;
++ struct drm_open_hash hash;
++ uint32_t count;
++ spinlock_t lock;
++};
++
++struct psb_gtt_hash_entry {
++ struct drm_open_hash ht;
++ uint32_t count;
++ struct drm_hash_item item;
++};
++
++struct psb_gtt_mem_mapping {
++ struct drm_mm_node *node;
++ struct drm_hash_item item;
++};
++
++#if 0
++/*Ioctl args*/
++struct psb_gtt_mapping_arg {
++ IMG_HANDLE hKernelMemInfo;
++};
++#endif
++
++/*Exported functions*/
++extern int psb_gtt_init(struct psb_gtt *pg, int resume);
++extern int psb_gtt_insert_pages(struct psb_gtt *pg, struct page **pages,
++ unsigned offset_pages, unsigned num_pages,
++ unsigned desired_tile_stride,
++ unsigned hw_tile_stride, int type);
++extern int psb_gtt_remove_pages(struct psb_gtt *pg, unsigned offset_pages,
++ unsigned num_pages,
++ unsigned desired_tile_stride,
++ unsigned hw_tile_stride);
++
++extern struct psb_gtt *psb_gtt_alloc(struct drm_device *dev);
++extern void psb_gtt_takedown(struct psb_gtt *pg, int free);
++extern int psb_gtt_map_meminfo(struct drm_device *dev,
++ IMG_HANDLE hKernelMemInfo,
++ uint32_t *offset);
++extern int psb_gtt_unmap_meminfo(struct drm_device *dev,
++ IMG_HANDLE hKernelMemInfo);
++extern int psb_gtt_map_meminfo_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_gtt_unmap_meminfo_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern int psb_gtt_mm_init(struct psb_gtt *pg);
++extern void psb_gtt_mm_takedown(void);
++
++extern int psb_gtt_map_pvr_memory(struct drm_device *dev,
++ unsigned int hHandle,
++ unsigned int ui32TaskId,
++ IMG_CPU_PHYADDR *pPages,
++ unsigned int ui32PagesNum,
++ unsigned int *ui32Offset);
++
++extern int psb_gtt_unmap_pvr_memory(struct drm_device *dev,
++ unsigned int hHandle,
++ unsigned int ui32TaskId);
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_hotplug.c b/drivers/gpu/drm/mrst/drv/psb_hotplug.c
+new file mode 100644
+index 0000000..d50fd83
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_hotplug.c
+@@ -0,0 +1,425 @@
++/*
++ * Copyright © 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * James C. Gualario <james.c.gualario@intel.com>
++ *
++ */
++
++#include "psb_umevents.h"
++#include "psb_hotplug.h"
++/**
++ * inform the kernel of the work to be performed and related function.
++ *
++ */
++DECLARE_WORK(hotplug_dev_create_work, &psb_hotplug_dev_create_wq);
++DECLARE_WORK(hotplug_dev_remove_work, &psb_hotplug_dev_remove_wq);
++DECLARE_WORK(hotplug_dev_change_work, &psb_hotplug_dev_change_wq);
++/**
++ * psb_hotplug_notify_change_um - notify user mode of hotplug changes
++ *
++ * @name: name of event to notify user mode of change to
++ * @state: hotplug state to search for event object in
++ *
++ */
++int psb_hotplug_notify_change_um(const char *name,
++ struct hotplug_state *state)
++{
++ strcpy(&(state->hotplug_change_wq_data.dev_name_arry
++ [state->hotplug_change_wq_data.dev_name_write][0]), name);
++ state->hotplug_change_wq_data.dev_name_arry_rw_status
++ [state->hotplug_change_wq_data.dev_name_write] =
++ DRM_HOTPLUG_READY_TO_READ;
++ if (state->hotplug_change_wq_data.dev_name_read_write_wrap_ack == 1)
++ state->hotplug_change_wq_data.dev_name_read_write_wrap_ack = 0;
++ state->hotplug_change_wq_data.dev_name_write++;
++ if (state->hotplug_change_wq_data.dev_name_write ==
++ state->hotplug_change_wq_data.dev_name_read) {
++ state->hotplug_change_wq_data.dev_name_write--;
++ return IRQ_NONE;
++ }
++ if (state->hotplug_change_wq_data.dev_name_write >
++ DRM_HOTPLUG_RING_DEPTH_MAX) {
++ state->hotplug_change_wq_data.dev_name_write = 0;
++ state->hotplug_change_wq_data.dev_name_write_wrap = 1;
++ }
++ state->hotplug_change_wq_data.hotplug_dev_list = state->list;
++ queue_work(state->hotplug_wq, &(state->hotplug_change_wq_data.work));
++ return IRQ_HANDLED;
++}
++/**
++ *
++ * psb_hotplug_create_and_notify_um - create and notify user mode of new dev
++ *
++ * @name: name to give for new event / device
++ * @state: hotplug state to track new event /device in
++ *
++ */
++int psb_hotplug_create_and_notify_um(const char *name,
++ struct hotplug_state *state)
++{
++ strcpy(&(state->hotplug_create_wq_data.dev_name_arry
++ [state->hotplug_create_wq_data.dev_name_write][0]), name);
++ state->hotplug_create_wq_data.dev_name_arry_rw_status
++ [state->hotplug_create_wq_data.dev_name_write] =
++ DRM_HOTPLUG_READY_TO_READ;
++ if (state->hotplug_create_wq_data.dev_name_read_write_wrap_ack == 1)
++ state->hotplug_create_wq_data.dev_name_read_write_wrap_ack = 0;
++ state->hotplug_create_wq_data.dev_name_write++;
++ if (state->hotplug_create_wq_data.dev_name_write ==
++ state->hotplug_create_wq_data.dev_name_read) {
++ state->hotplug_create_wq_data.dev_name_write--;
++ return IRQ_NONE;
++ }
++ if (state->hotplug_create_wq_data.dev_name_write >
++ DRM_HOTPLUG_RING_DEPTH_MAX) {
++ state->hotplug_create_wq_data.dev_name_write = 0;
++ state->hotplug_create_wq_data.dev_name_write_wrap = 1;
++ }
++ state->hotplug_create_wq_data.hotplug_dev_list = state->list;
++ queue_work(state->hotplug_wq, &(state->hotplug_create_wq_data.work));
++ return IRQ_HANDLED;
++}
++EXPORT_SYMBOL(psb_hotplug_create_and_notify_um);
++/**
++ * psb_hotplug_remove_and_notify_um - remove device and notify user mode
++ *
++ * @name: name of event / device to remove
++ * @state: hotplug state to remove event / device from
++ *
++ */
++int psb_hotplug_remove_and_notify_um(const char *name,
++ struct hotplug_state *state)
++{
++ strcpy(&(state->hotplug_remove_wq_data.dev_name_arry
++ [state->hotplug_remove_wq_data.dev_name_write][0]), name);
++ state->hotplug_remove_wq_data.dev_name_arry_rw_status
++ [state->hotplug_remove_wq_data.dev_name_write] =
++ DRM_HOTPLUG_READY_TO_READ;
++ if (state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack == 1)
++ state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack = 0;
++ state->hotplug_remove_wq_data.dev_name_write++;
++ if (state->hotplug_remove_wq_data.dev_name_write ==
++ state->hotplug_remove_wq_data.dev_name_read) {
++ state->hotplug_remove_wq_data.dev_name_write--;
++ return IRQ_NONE;
++ }
++ if (state->hotplug_remove_wq_data.dev_name_write >
++ DRM_HOTPLUG_RING_DEPTH_MAX) {
++ state->hotplug_remove_wq_data.dev_name_write = 0;
++ state->hotplug_remove_wq_data.dev_name_write_wrap = 1;
++ }
++ state->hotplug_remove_wq_data.hotplug_dev_list = state->list;
++ queue_work(state->hotplug_wq, &(state->hotplug_remove_wq_data.work));
++ return IRQ_HANDLED;
++}
++EXPORT_SYMBOL(psb_hotplug_remove_and_notify_um);
++/**
++ * psb_hotplug_device_pool_create_and_init - make new hotplug device pool
++ *
++ * @parent_kobj: parent kobject to associate hotplug kset with
++ * @state: hotplug state to assocaite workqueues with
++ *
++ */
++struct umevent_list *psb_hotplug_device_pool_create_and_init(
++ struct kobject *parent_kobj,
++ struct hotplug_state *state)
++{
++ struct umevent_list *new_hotplug_dev_list = NULL;
++
++ new_hotplug_dev_list = psb_umevent_create_list();
++ if (new_hotplug_dev_list)
++ psb_umevent_init(parent_kobj, new_hotplug_dev_list,
++ "psb_hotplug");
++
++ state->hotplug_wq = create_singlethread_workqueue("hotplug-wq");
++ if (!state->hotplug_wq)
++ return NULL;
++
++ INIT_WORK(&state->hotplug_create_wq_data.work,
++ psb_hotplug_dev_create_wq);
++ INIT_WORK(&state->hotplug_remove_wq_data.work,
++ psb_hotplug_dev_remove_wq);
++ INIT_WORK(&state->hotplug_change_wq_data.work,
++ psb_hotplug_dev_change_wq);
++
++ state->hotplug_create_wq_data.dev_name_read = 0;
++ state->hotplug_create_wq_data.dev_name_write = 0;
++ state->hotplug_create_wq_data.dev_name_write_wrap = 0;
++ state->hotplug_create_wq_data.dev_name_read_write_wrap_ack = 0;
++ memset(&(state->hotplug_create_wq_data.dev_name_arry_rw_status[0]),
++ 0, sizeof(int)*DRM_HOTPLUG_RING_DEPTH);
++
++ state->hotplug_remove_wq_data.dev_name_read = 0;
++ state->hotplug_remove_wq_data.dev_name_write = 0;
++ state->hotplug_remove_wq_data.dev_name_write_wrap = 0;
++ state->hotplug_remove_wq_data.dev_name_read_write_wrap_ack = 0;
++ memset(&(state->hotplug_remove_wq_data.dev_name_arry_rw_status[0]),
++ 0, sizeof(int)*DRM_HOTPLUG_RING_DEPTH);
++
++ state->hotplug_change_wq_data.dev_name_read = 0;
++ state->hotplug_change_wq_data.dev_name_write = 0;
++ state->hotplug_change_wq_data.dev_name_write_wrap = 0;
++ state->hotplug_change_wq_data.dev_name_read_write_wrap_ack = 0;
++ memset(&(state->hotplug_change_wq_data.dev_name_arry_rw_status[0]),
++ 0, sizeof(int)*DRM_HOTPLUG_RING_DEPTH);
++
++ return new_hotplug_dev_list;
++}
++EXPORT_SYMBOL(psb_hotplug_device_pool_create_and_init);
++/**
++ *
++ * psb_hotplug_init - init hotplug subsystem
++ *
++ * @parent_kobj: parent kobject to associate hotplug state with
++ *
++ */
++struct hotplug_state *psb_hotplug_init(struct kobject *parent_kobj)
++{
++ struct hotplug_state *state;
++ state = kzalloc(sizeof(struct hotplug_state), GFP_KERNEL);
++ state->list = NULL;
++ state->list = psb_hotplug_device_pool_create_and_init(
++ parent_kobj,
++ state);
++ return state;
++}
++/**
++ * psb_hotplug_device_pool_destroy - destroy all hotplug related resources
++ *
++ * @state: hotplug state to destroy
++ *
++ */
++void psb_hotplug_device_pool_destroy(struct hotplug_state *state)
++{
++ flush_workqueue(state->hotplug_wq);
++ destroy_workqueue(state->hotplug_wq);
++ psb_umevent_cleanup(state->list);
++ kfree(state);
++}
++EXPORT_SYMBOL(psb_hotplug_device_pool_destroy);
++/**
++ * psb_hotplug_dev_create_wq - create workqueue implementation
++ *
++ * @work: work struct to use for kernel scheduling
++ *
++ */
++void psb_hotplug_dev_create_wq(struct work_struct *work)
++{
++ struct hotplug_disp_workqueue_data *wq_data;
++ struct umevent_obj *wq_working_hotplug_disp_obj;
++ wq_data = to_hotplug_disp_workqueue_data(work);
++ if (wq_data->dev_name_write_wrap == 1) {
++ wq_data->dev_name_read_write_wrap_ack = 1;
++ wq_data->dev_name_write_wrap = 0;
++ while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ wq_working_hotplug_disp_obj =
++ psb_create_umevent_obj(
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0],
++ wq_data->hotplug_dev_list);
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++ psb_umevent_notify
++ (wq_working_hotplug_disp_obj);
++ }
++ wq_data->dev_name_read++;
++ }
++ wq_data->dev_name_read = 0;
++ while (wq_data->dev_name_read < wq_data->dev_name_write-1) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ wq_working_hotplug_disp_obj =
++ psb_create_umevent_obj(
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0],
++ wq_data->hotplug_dev_list);
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++ psb_umevent_notify
++ (wq_working_hotplug_disp_obj);
++ }
++ wq_data->dev_name_read++;
++ }
++ } else {
++ while (wq_data->dev_name_read < wq_data->dev_name_write) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ wq_working_hotplug_disp_obj =
++ psb_create_umevent_obj(
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0],
++ wq_data->hotplug_dev_list);
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++ psb_umevent_notify
++ (wq_working_hotplug_disp_obj);
++ }
++ wq_data->dev_name_read++;
++ }
++ }
++ if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX)
++ wq_data->dev_name_read = 0;
++}
++EXPORT_SYMBOL(psb_hotplug_dev_create_wq);
++/**
++ * psb_hotplug_dev_remove_wq - remove workqueue implementation
++ *
++ * @work: work struct to use for kernel scheduling
++ *
++ */
++void psb_hotplug_dev_remove_wq(struct work_struct *work)
++{
++ struct hotplug_disp_workqueue_data *wq_data;
++ wq_data = to_hotplug_disp_workqueue_data(work);
++ if (wq_data->dev_name_write_wrap == 1) {
++ wq_data->dev_name_read_write_wrap_ack = 1;
++ wq_data->dev_name_write_wrap = 0;
++ while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ psb_umevent_remove_from_list(
++ wq_data->hotplug_dev_list,
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0]);
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++ }
++ wq_data->dev_name_read++;
++ }
++ wq_data->dev_name_read = 0;
++ while (wq_data->dev_name_read < wq_data->dev_name_write-1) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ psb_umevent_remove_from_list(
++ wq_data->hotplug_dev_list,
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0]);
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++ }
++ wq_data->dev_name_read++;
++ }
++ } else {
++ while (wq_data->dev_name_read < wq_data->dev_name_write) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ psb_umevent_remove_from_list(
++ wq_data->hotplug_dev_list,
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0]);
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++ }
++ wq_data->dev_name_read++;
++ }
++ }
++ if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX)
++ wq_data->dev_name_read = 0;
++}
++EXPORT_SYMBOL(psb_hotplug_dev_remove_wq);
++/**
++ * psb_hotplug_dev_change_wq - change workqueue implementation
++ *
++ * @work: work struct to use for kernel scheduling
++ *
++ */
++void psb_hotplug_dev_change_wq(struct work_struct *work)
++{
++ struct hotplug_disp_workqueue_data *wq_data;
++ struct umevent_obj *wq_working_hotplug_disp_obj;
++ wq_data = to_hotplug_disp_workqueue_data(work);
++ if (wq_data->dev_name_write_wrap == 1) {
++ wq_data->dev_name_read_write_wrap_ack = 1;
++ wq_data->dev_name_write_wrap = 0;
++ while (wq_data->dev_name_read != DRM_HOTPLUG_RING_DEPTH_MAX) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++
++ wq_working_hotplug_disp_obj =
++ psb_umevent_find_obj(
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0],
++ wq_data->hotplug_dev_list);
++ psb_umevent_notify_change_gfxsock
++ (wq_working_hotplug_disp_obj,
++ DRM_HOTPLUG_SOCKET_GROUP_ID);
++ }
++ wq_data->dev_name_read++;
++ }
++ wq_data->dev_name_read = 0;
++ while (wq_data->dev_name_read < wq_data->dev_name_write-1) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++
++ wq_working_hotplug_disp_obj =
++ psb_umevent_find_obj(
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0],
++ wq_data->hotplug_dev_list);
++ psb_umevent_notify_change_gfxsock
++ (wq_working_hotplug_disp_obj,
++ DRM_HOTPLUG_SOCKET_GROUP_ID);
++ }
++ wq_data->dev_name_read++;
++ }
++ } else {
++ while (wq_data->dev_name_read < wq_data->dev_name_write) {
++ if (wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] ==
++ DRM_HOTPLUG_READY_TO_READ) {
++ wq_data->dev_name_arry_rw_status
++ [wq_data->dev_name_read] =
++ DRM_HOTPLUG_READ_COMPLETE;
++
++ wq_working_hotplug_disp_obj =
++ psb_umevent_find_obj(
++ &wq_data->dev_name_arry
++ [wq_data->dev_name_read][0],
++ wq_data->hotplug_dev_list);
++ psb_umevent_notify_change_gfxsock
++ (wq_working_hotplug_disp_obj,
++ DRM_HOTPLUG_SOCKET_GROUP_ID);
++ }
++ wq_data->dev_name_read++;
++ }
++ }
++ if (wq_data->dev_name_read > DRM_HOTPLUG_RING_DEPTH_MAX)
++ wq_data->dev_name_read = 0;
++}
++EXPORT_SYMBOL(psb_hotplug_dev_change_wq);
+diff --git a/drivers/gpu/drm/mrst/drv/psb_hotplug.h b/drivers/gpu/drm/mrst/drv/psb_hotplug.h
+new file mode 100644
+index 0000000..b6e42a4
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_hotplug.h
+@@ -0,0 +1,90 @@
++/*
++ * Copyright © 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * James C. Gualario <james.c.gualario@intel.com>
++ *
++ */
++#ifndef _PSB_HOTPLUG_H_
++#define _PSB_HOTPLUG_H_
++/**
++ * required includes
++ *
++ */
++#include "psb_umevents.h"
++/**
++ * hotplug specific defines
++ *
++ */
++#define DRM_HOTPLUG_RING_DEPTH 256
++#define DRM_HOTPLUG_RING_DEPTH_MAX (DRM_HOTPLUG_RING_DEPTH-1)
++#define DRM_HOTPLUG_READY_TO_READ 1
++#define DRM_HOTPLUG_READ_COMPLETE 2
++/**
++ * hotplug workqueue data struct.
++ */
++struct hotplug_disp_workqueue_data {
++ struct work_struct work;
++ const char *dev_name;
++ int dev_name_write;
++ int dev_name_read;
++ int dev_name_write_wrap;
++ int dev_name_read_write_wrap_ack;
++ char dev_name_arry[DRM_HOTPLUG_RING_DEPTH][24];
++ int dev_name_arry_rw_status[DRM_HOTPLUG_RING_DEPTH];
++ struct umevent_list *hotplug_dev_list;
++};
++/**
++ * hotplug state structure
++ *
++ */
++struct hotplug_state {
++ struct workqueue_struct *hotplug_wq;
++ struct hotplug_disp_workqueue_data hotplug_remove_wq_data;
++ struct hotplug_disp_workqueue_data hotplug_create_wq_data;
++ struct hotplug_disp_workqueue_data hotplug_change_wq_data;
++ struct umevent_list *list;
++};
++/**
++ * main interface function prototytpes for hotplug support.
++ *
++ */
++struct hotplug_state *psb_hotplug_init(struct kobject *parent_kobj);
++extern int psb_hotplug_notify_change_um(const char *name,
++ struct hotplug_state *state);
++extern int psb_hotplug_create_and_notify_um(const char *name,
++ struct hotplug_state *state);
++extern int psb_hotplug_remove_and_notify_um(const char *name,
++ struct hotplug_state *state);
++extern struct umevent_list *psb_hotplug_device_pool_create_and_init(
++ struct kobject *parent_kobj,
++ struct hotplug_state *state);
++extern void psb_hotplug_device_pool_destroy(struct hotplug_state *state);
++/**
++ * to go back and forth between work strauct and workqueue data
++ *
++ */
++#define to_hotplug_disp_workqueue_data(x) \
++ container_of(x, struct hotplug_disp_workqueue_data, work)
++
++/**
++ * function prototypes for workqueue implementation
++ *
++ */
++extern void psb_hotplug_dev_create_wq(struct work_struct *work);
++extern void psb_hotplug_dev_remove_wq(struct work_struct *work);
++extern void psb_hotplug_dev_change_wq(struct work_struct *work);
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_bios.c b/drivers/gpu/drm/mrst/drv/psb_intel_bios.c
+new file mode 100644
+index 0000000..e752bde
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_bios.c
+@@ -0,0 +1,305 @@
++/*
++ * Copyright (c) 2006 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ *
++ */
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include "psb_drm.h"
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_bios.h"
++
++
++static void *find_section(struct bdb_header *bdb, int section_id)
++{
++ u8 *base = (u8 *)bdb;
++ int index = 0;
++ u16 total, current_size;
++ u8 current_id;
++
++ /* skip to first section */
++ index += bdb->header_size;
++ total = bdb->bdb_size;
++
++ /* walk the sections looking for section_id */
++ while (index < total) {
++ current_id = *(base + index);
++ index++;
++ current_size = *((u16 *)(base + index));
++ index += 2;
++ if (current_id == section_id)
++ return base + index;
++ index += current_size;
++ }
++
++ return NULL;
++}
++
++static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
++ struct lvds_dvo_timing *dvo_timing)
++{
++ panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
++ dvo_timing->hactive_lo;
++ panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
++ ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
++ panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
++ dvo_timing->hsync_pulse_width;
++ panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
++ ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
++
++ panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
++ dvo_timing->vactive_lo;
++ panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
++ dvo_timing->vsync_off;
++ panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
++ dvo_timing->vsync_pulse_width;
++ panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
++ ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
++ panel_fixed_mode->clock = dvo_timing->clock * 10;
++ panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
++
++ /* Some VBTs have bogus h/vtotal values */
++ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
++ panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
++ if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
++ panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
++
++ drm_mode_set_name(panel_fixed_mode);
++}
++
++static void parse_backlight_data(struct drm_psb_private *dev_priv,
++ struct bdb_header *bdb)
++{
++ struct bdb_lvds_backlight *vbt_lvds_bl = NULL;
++ struct bdb_lvds_backlight *lvds_bl;
++ u8 p_type = 0;
++ void *bl_start = NULL;
++ struct bdb_lvds_options *lvds_opts
++ = find_section(bdb, BDB_LVDS_OPTIONS);
++
++ dev_priv->lvds_bl = NULL;
++
++ if (lvds_opts) {
++ DRM_DEBUG("lvds_options found at %p\n", lvds_opts);
++ p_type = lvds_opts->panel_type;
++ } else {
++ DRM_DEBUG("no lvds_options\n");
++ return;
++ }
++
++ bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
++ vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
++
++ lvds_bl = kzalloc(sizeof(*vbt_lvds_bl), GFP_KERNEL);
++ if (!lvds_bl) {
++ DRM_DEBUG("No memory\n");
++ return;
++ }
++
++ memcpy(lvds_bl, vbt_lvds_bl, sizeof(*vbt_lvds_bl));
++
++ dev_priv->lvds_bl = lvds_bl;
++}
++
++/* Try to find integrated panel data */
++static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
++ struct bdb_header *bdb)
++{
++ struct bdb_lvds_options *lvds_options;
++ struct bdb_lvds_lfp_data *lvds_lfp_data;
++ struct bdb_lvds_lfp_data_entry *entry;
++ struct lvds_dvo_timing *dvo_timing;
++ struct drm_display_mode *panel_fixed_mode;
++
++ /* Defaults if we can't find VBT info */
++ dev_priv->lvds_dither = 0;
++ dev_priv->lvds_vbt = 0;
++
++ lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
++ if (!lvds_options)
++ return;
++
++ dev_priv->lvds_dither = lvds_options->pixel_dither;
++ if (lvds_options->panel_type == 0xff)
++ return;
++
++ lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
++ if (!lvds_lfp_data)
++ return;
++
++ dev_priv->lvds_vbt = 1;
++
++ entry = &lvds_lfp_data->data[lvds_options->panel_type];
++ dvo_timing = &entry->dvo_timing;
++
++ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode),
++ GFP_KERNEL);
++
++ fill_detail_timing_data(panel_fixed_mode, dvo_timing);
++
++ dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
++
++ DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
++ drm_mode_debug_printmodeline(panel_fixed_mode);
++
++ return;
++}
++
++/* Try to find sdvo panel data */
++static void parse_sdvo_panel_data(struct drm_psb_private *dev_priv,
++ struct bdb_header *bdb)
++{
++ struct bdb_sdvo_lvds_options *sdvo_lvds_options;
++ struct lvds_dvo_timing *dvo_timing;
++ struct drm_display_mode *panel_fixed_mode;
++
++ dev_priv->sdvo_lvds_vbt_mode = NULL;
++
++ sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
++ if (!sdvo_lvds_options)
++ return;
++
++ dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
++ if (!dvo_timing)
++ return;
++
++ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
++
++ if (!panel_fixed_mode)
++ return;
++
++ fill_detail_timing_data(panel_fixed_mode,
++ dvo_timing + sdvo_lvds_options->panel_type);
++
++ dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
++
++ return;
++}
++
++static void parse_general_features(struct drm_psb_private *dev_priv,
++ struct bdb_header *bdb)
++{
++ struct bdb_general_features *general;
++
++ /* Set sensible defaults in case we can't find the general block */
++ dev_priv->int_tv_support = 1;
++ dev_priv->int_crt_support = 1;
++
++ general = find_section(bdb, BDB_GENERAL_FEATURES);
++ if (general) {
++ dev_priv->int_tv_support = general->int_tv_support;
++ dev_priv->int_crt_support = general->int_crt_support;
++ dev_priv->lvds_use_ssc = general->enable_ssc;
++
++ if (dev_priv->lvds_use_ssc) {
++ if (IS_I855(dev_priv->dev))
++ dev_priv->lvds_ssc_freq
++ = general->ssc_freq ? 66 : 48;
++ else
++ dev_priv->lvds_ssc_freq
++ = general->ssc_freq ? 100 : 96;
++ }
++ }
++}
++
++/**
++ * psb_intel_init_bios - initialize VBIOS settings & find VBT
++ * @dev: DRM device
++ *
++ * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
++ * to appropriate values.
++ *
++ * VBT existence is a sanity check that is relied on by other i830_bios.c code.
++ * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
++ * feed an updated VBT back through that, compared to what we'll fetch using
++ * this method of groping around in the BIOS data.
++ *
++ * Returns 0 on success, nonzero on failure.
++ */
++bool psb_intel_init_bios(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct pci_dev *pdev = dev->pdev;
++ struct vbt_header *vbt = NULL;
++ struct bdb_header *bdb;
++ u8 __iomem *bios;
++ size_t size;
++ int i;
++
++ bios = pci_map_rom(pdev, &size);
++ if (!bios)
++ return -1;
++
++ /* Scour memory looking for the VBT signature */
++ for (i = 0; i + 4 < size; i++) {
++ if (!memcmp(bios + i, "$VBT", 4)) {
++ vbt = (struct vbt_header *)(bios + i);
++ break;
++ }
++ }
++
++ if (!vbt) {
++ DRM_ERROR("VBT signature missing\n");
++ pci_unmap_rom(pdev, bios);
++ return -1;
++ }
++
++ bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
++
++ /* Grab useful general definitions */
++ parse_general_features(dev_priv, bdb);
++ parse_lfp_panel_data(dev_priv, bdb);
++ parse_sdvo_panel_data(dev_priv, bdb);
++ parse_backlight_data(dev_priv, bdb);
++
++ pci_unmap_rom(pdev, bios);
++
++ return 0;
++}
++
++/**
++ * Destory and free VBT data
++ */
++void psb_intel_destory_bios(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct drm_display_mode *sdvo_lvds_vbt_mode =
++ dev_priv->sdvo_lvds_vbt_mode;
++ struct drm_display_mode *lfp_lvds_vbt_mode =
++ dev_priv->lfp_lvds_vbt_mode;
++ struct bdb_lvds_backlight *lvds_bl =
++ dev_priv->lvds_bl;
++
++ /*free sdvo panel mode*/
++ if (sdvo_lvds_vbt_mode) {
++ dev_priv->sdvo_lvds_vbt_mode = NULL;
++ kfree(sdvo_lvds_vbt_mode);
++ }
++
++ if (lfp_lvds_vbt_mode) {
++ dev_priv->lfp_lvds_vbt_mode = NULL;
++ kfree(lfp_lvds_vbt_mode);
++ }
++
++ if (lvds_bl) {
++ dev_priv->lvds_bl = NULL;
++ kfree(lvds_bl);
++ }
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_bios.h b/drivers/gpu/drm/mrst/drv/psb_intel_bios.h
+new file mode 100644
+index 0000000..dfcae62
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_bios.h
+@@ -0,0 +1,430 @@
++/*
++ * Copyright (c) 2006 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ *
++ */
++
++#ifndef _I830_BIOS_H_
++#define _I830_BIOS_H_
++
++#include <drm/drmP.h>
++
++struct vbt_header {
++ u8 signature[20]; /**< Always starts with 'VBT$' */
++ u16 version; /**< decimal */
++ u16 header_size; /**< in bytes */
++ u16 vbt_size; /**< in bytes */
++ u8 vbt_checksum;
++ u8 reserved0;
++ u32 bdb_offset; /**< from beginning of VBT */
++ u32 aim_offset[4]; /**< from beginning of VBT */
++} __attribute__((packed));
++
++
++struct bdb_header {
++ u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
++ u16 version; /**< decimal */
++ u16 header_size; /**< in bytes */
++ u16 bdb_size; /**< in bytes */
++};
++
++/* strictly speaking, this is a "skip" block, but it has interesting info */
++struct vbios_data {
++ u8 type; /* 0 == desktop, 1 == mobile */
++ u8 relstage;
++ u8 chipset;
++ u8 lvds_present:1;
++ u8 tv_present:1;
++ u8 rsvd2:6; /* finish byte */
++ u8 rsvd3[4];
++ u8 signon[155];
++ u8 copyright[61];
++ u16 code_segment;
++ u8 dos_boot_mode;
++ u8 bandwidth_percent;
++ u8 rsvd4; /* popup memory size */
++ u8 resize_pci_bios;
++ u8 rsvd5; /* is crt already on ddc2 */
++} __attribute__((packed));
++
++/*
++ * There are several types of BIOS data blocks (BDBs), each block has
++ * an ID and size in the first 3 bytes (ID in first, size in next 2).
++ * Known types are listed below.
++ */
++#define BDB_GENERAL_FEATURES 1
++#define BDB_GENERAL_DEFINITIONS 2
++#define BDB_OLD_TOGGLE_LIST 3
++#define BDB_MODE_SUPPORT_LIST 4
++#define BDB_GENERIC_MODE_TABLE 5
++#define BDB_EXT_MMIO_REGS 6
++#define BDB_SWF_IO 7
++#define BDB_SWF_MMIO 8
++#define BDB_DOT_CLOCK_TABLE 9
++#define BDB_MODE_REMOVAL_TABLE 10
++#define BDB_CHILD_DEVICE_TABLE 11
++#define BDB_DRIVER_FEATURES 12
++#define BDB_DRIVER_PERSISTENCE 13
++#define BDB_EXT_TABLE_PTRS 14
++#define BDB_DOT_CLOCK_OVERRIDE 15
++#define BDB_DISPLAY_SELECT 16
++/* 17 rsvd */
++#define BDB_DRIVER_ROTATION 18
++#define BDB_DISPLAY_REMOVE 19
++#define BDB_OEM_CUSTOM 20
++#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
++#define BDB_SDVO_LVDS_OPTIONS 22
++#define BDB_SDVO_PANEL_DTDS 23
++#define BDB_SDVO_LVDS_PNP_IDS 24
++#define BDB_SDVO_LVDS_POWER_SEQ 25
++#define BDB_TV_OPTIONS 26
++#define BDB_LVDS_OPTIONS 40
++#define BDB_LVDS_LFP_DATA_PTRS 41
++#define BDB_LVDS_LFP_DATA 42
++#define BDB_LVDS_BACKLIGHT 43
++#define BDB_LVDS_POWER 44
++#define BDB_SKIP 254 /* VBIOS private block, ignore */
++
++struct bdb_general_features {
++ /* bits 1 */
++ u8 panel_fitting:2;
++ u8 flexaim:1;
++ u8 msg_enable:1;
++ u8 clear_screen:3;
++ u8 color_flip:1;
++
++ /* bits 2 */
++ u8 download_ext_vbt:1;
++ u8 enable_ssc:1;
++ u8 ssc_freq:1;
++ u8 enable_lfp_on_override:1;
++ u8 disable_ssc_ddt:1;
++ u8 rsvd8:3; /* finish byte */
++
++ /* bits 3 */
++ u8 disable_smooth_vision:1;
++ u8 single_dvi:1;
++ u8 rsvd9:6; /* finish byte */
++
++ /* bits 4 */
++ u8 legacy_monitor_detect;
++
++ /* bits 5 */
++ u8 int_crt_support:1;
++ u8 int_tv_support:1;
++ u8 rsvd11:6; /* finish byte */
++} __attribute__((packed));
++
++struct bdb_general_definitions {
++ /* DDC GPIO */
++ u8 crt_ddc_gmbus_pin;
++
++ /* DPMS bits */
++ u8 dpms_acpi:1;
++ u8 skip_boot_crt_detect:1;
++ u8 dpms_aim:1;
++ u8 rsvd1:5; /* finish byte */
++
++ /* boot device bits */
++ u8 boot_display[2];
++ u8 child_dev_size;
++
++ /* device info */
++ u8 tv_or_lvds_info[33];
++ u8 dev1[33];
++ u8 dev2[33];
++ u8 dev3[33];
++ u8 dev4[33];
++ /* may be another device block here on some platforms */
++};
++
++struct bdb_lvds_options {
++ u8 panel_type;
++ u8 rsvd1;
++ /* LVDS capabilities, stored in a dword */
++ u8 pfit_mode:2;
++ u8 pfit_text_mode_enhanced:1;
++ u8 pfit_gfx_mode_enhanced:1;
++ u8 pfit_ratio_auto:1;
++ u8 pixel_dither:1;
++ u8 lvds_edid:1;
++ u8 rsvd2:1;
++ u8 rsvd4;
++} __attribute__((packed));
++
++struct bdb_lvds_backlight {
++ u8 type:2;
++ u8 pol:1;
++ u8 gpio:3;
++ u8 gmbus:2;
++ u16 freq;
++ u8 minbrightness;
++ u8 i2caddr;
++ u8 brightnesscmd;
++ /*FIXME: more...*/
++} __attribute__((packed));
++
++/* LFP pointer table contains entries to the struct below */
++struct bdb_lvds_lfp_data_ptr {
++ u16 fp_timing_offset; /* offsets are from start of bdb */
++ u8 fp_table_size;
++ u16 dvo_timing_offset;
++ u8 dvo_table_size;
++ u16 panel_pnp_id_offset;
++ u8 pnp_table_size;
++} __attribute__((packed));
++
++struct bdb_lvds_lfp_data_ptrs {
++ u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
++ struct bdb_lvds_lfp_data_ptr ptr[16];
++} __attribute__((packed));
++
++/* LFP data has 3 blocks per entry */
++struct lvds_fp_timing {
++ u16 x_res;
++ u16 y_res;
++ u32 lvds_reg;
++ u32 lvds_reg_val;
++ u32 pp_on_reg;
++ u32 pp_on_reg_val;
++ u32 pp_off_reg;
++ u32 pp_off_reg_val;
++ u32 pp_cycle_reg;
++ u32 pp_cycle_reg_val;
++ u32 pfit_reg;
++ u32 pfit_reg_val;
++ u16 terminator;
++} __attribute__((packed));
++
++struct lvds_dvo_timing {
++ u16 clock; /**< In 10khz */
++ u8 hactive_lo;
++ u8 hblank_lo;
++ u8 hblank_hi:4;
++ u8 hactive_hi:4;
++ u8 vactive_lo;
++ u8 vblank_lo;
++ u8 vblank_hi:4;
++ u8 vactive_hi:4;
++ u8 hsync_off_lo;
++ u8 hsync_pulse_width;
++ u8 vsync_pulse_width:4;
++ u8 vsync_off:4;
++ u8 rsvd0:6;
++ u8 hsync_off_hi:2;
++ u8 h_image;
++ u8 v_image;
++ u8 max_hv;
++ u8 h_border;
++ u8 v_border;
++ u8 rsvd1:3;
++ u8 digital:2;
++ u8 vsync_positive:1;
++ u8 hsync_positive:1;
++ u8 rsvd2:1;
++} __attribute__((packed));
++
++struct lvds_pnp_id {
++ u16 mfg_name;
++ u16 product_code;
++ u32 serial;
++ u8 mfg_week;
++ u8 mfg_year;
++} __attribute__((packed));
++
++struct bdb_lvds_lfp_data_entry {
++ struct lvds_fp_timing fp_timing;
++ struct lvds_dvo_timing dvo_timing;
++ struct lvds_pnp_id pnp_id;
++} __attribute__((packed));
++
++struct bdb_lvds_lfp_data {
++ struct bdb_lvds_lfp_data_entry data[16];
++} __attribute__((packed));
++
++struct aimdb_header {
++ char signature[16];
++ char oem_device[20];
++ u16 aimdb_version;
++ u16 aimdb_header_size;
++ u16 aimdb_size;
++} __attribute__((packed));
++
++struct aimdb_block {
++ u8 aimdb_id;
++ u16 aimdb_size;
++} __attribute__((packed));
++
++struct vch_panel_data {
++ u16 fp_timing_offset;
++ u8 fp_timing_size;
++ u16 dvo_timing_offset;
++ u8 dvo_timing_size;
++ u16 text_fitting_offset;
++ u8 text_fitting_size;
++ u16 graphics_fitting_offset;
++ u8 graphics_fitting_size;
++} __attribute__((packed));
++
++struct vch_bdb_22 {
++ struct aimdb_block aimdb_block;
++ struct vch_panel_data panels[16];
++} __attribute__((packed));
++
++struct bdb_sdvo_lvds_options {
++ u8 panel_backlight;
++ u8 h40_set_panel_type;
++ u8 panel_type;
++ u8 ssc_clk_freq;
++ u16 als_low_trip;
++ u16 als_high_trip;
++ u8 sclalarcoeff_tab_row_num;
++ u8 sclalarcoeff_tab_row_size;
++ u8 coefficient[8];
++ u8 panel_misc_bits_1;
++ u8 panel_misc_bits_2;
++ u8 panel_misc_bits_3;
++ u8 panel_misc_bits_4;
++} __attribute__((packed));
++
++
++extern bool psb_intel_init_bios(struct drm_device *dev);
++extern void psb_intel_destory_bios(struct drm_device *dev);
++
++/*
++ * Driver<->VBIOS interaction occurs through scratch bits in
++ * GR18 & SWF*.
++ */
++
++/* GR18 bits are set on display switch and hotkey events */
++#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
++#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
++#define GR18_HK_NONE (0x0<<3)
++#define GR18_HK_LFP_STRETCH (0x1<<3)
++#define GR18_HK_TOGGLE_DISP (0x2<<3)
++#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
++#define GR18_HK_POPUP_DISABLED (0x6<<3)
++#define GR18_HK_POPUP_ENABLED (0x7<<3)
++#define GR18_HK_PFIT (0x8<<3)
++#define GR18_HK_APM_CHANGE (0xa<<3)
++#define GR18_HK_MULTIPLE (0xc<<3)
++#define GR18_USER_INT_EN (1<<2)
++#define GR18_A0000_FLUSH_EN (1<<1)
++#define GR18_SMM_EN (1<<0)
++
++/* Set by driver, cleared by VBIOS */
++#define SWF00_YRES_SHIFT 16
++#define SWF00_XRES_SHIFT 0
++#define SWF00_RES_MASK 0xffff
++
++/* Set by VBIOS at boot time and driver at runtime */
++#define SWF01_TV2_FORMAT_SHIFT 8
++#define SWF01_TV1_FORMAT_SHIFT 0
++#define SWF01_TV_FORMAT_MASK 0xffff
++
++#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
++#define SWF10_GTT_OVERRIDE_EN (1<<28)
++#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
++#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
++#define SWF10_OLD_TOGGLE 0x0
++#define SWF10_TOGGLE_LIST_1 0x1
++#define SWF10_TOGGLE_LIST_2 0x2
++#define SWF10_TOGGLE_LIST_3 0x3
++#define SWF10_TOGGLE_LIST_4 0x4
++#define SWF10_PANNING_EN (1<<23)
++#define SWF10_DRIVER_LOADED (1<<22)
++#define SWF10_EXTENDED_DESKTOP (1<<21)
++#define SWF10_EXCLUSIVE_MODE (1<<20)
++#define SWF10_OVERLAY_EN (1<<19)
++#define SWF10_PLANEB_HOLDOFF (1<<18)
++#define SWF10_PLANEA_HOLDOFF (1<<17)
++#define SWF10_VGA_HOLDOFF (1<<16)
++#define SWF10_ACTIVE_DISP_MASK 0xffff
++#define SWF10_PIPEB_LFP2 (1<<15)
++#define SWF10_PIPEB_EFP2 (1<<14)
++#define SWF10_PIPEB_TV2 (1<<13)
++#define SWF10_PIPEB_CRT2 (1<<12)
++#define SWF10_PIPEB_LFP (1<<11)
++#define SWF10_PIPEB_EFP (1<<10)
++#define SWF10_PIPEB_TV (1<<9)
++#define SWF10_PIPEB_CRT (1<<8)
++#define SWF10_PIPEA_LFP2 (1<<7)
++#define SWF10_PIPEA_EFP2 (1<<6)
++#define SWF10_PIPEA_TV2 (1<<5)
++#define SWF10_PIPEA_CRT2 (1<<4)
++#define SWF10_PIPEA_LFP (1<<3)
++#define SWF10_PIPEA_EFP (1<<2)
++#define SWF10_PIPEA_TV (1<<1)
++#define SWF10_PIPEA_CRT (1<<0)
++
++#define SWF11_MEMORY_SIZE_SHIFT 16
++#define SWF11_SV_TEST_EN (1<<15)
++#define SWF11_IS_AGP (1<<14)
++#define SWF11_DISPLAY_HOLDOFF (1<<13)
++#define SWF11_DPMS_REDUCED (1<<12)
++#define SWF11_IS_VBE_MODE (1<<11)
++#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
++#define SWF11_DPMS_MASK 0x07
++#define SWF11_DPMS_OFF (1<<2)
++#define SWF11_DPMS_SUSPEND (1<<1)
++#define SWF11_DPMS_STANDBY (1<<0)
++#define SWF11_DPMS_ON 0
++
++#define SWF14_GFX_PFIT_EN (1<<31)
++#define SWF14_TEXT_PFIT_EN (1<<30)
++#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
++#define SWF14_POPUP_EN (1<<28)
++#define SWF14_DISPLAY_HOLDOFF (1<<27)
++#define SWF14_DISP_DETECT_EN (1<<26)
++#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
++#define SWF14_DRIVER_STATUS (1<<24)
++#define SWF14_OS_TYPE_WIN9X (1<<23)
++#define SWF14_OS_TYPE_WINNT (1<<22)
++/* 21:19 rsvd */
++#define SWF14_PM_TYPE_MASK 0x00070000
++#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
++#define SWF14_PM_ACPI (0x3 << 16)
++#define SWF14_PM_APM_12 (0x2 << 16)
++#define SWF14_PM_APM_11 (0x1 << 16)
++#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
++ /* if GR18 indicates a display switch */
++#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
++#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
++#define SWF14_DS_PIPEB_TV2_EN (1<<13)
++#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
++#define SWF14_DS_PIPEB_LFP_EN (1<<11)
++#define SWF14_DS_PIPEB_EFP_EN (1<<10)
++#define SWF14_DS_PIPEB_TV_EN (1<<9)
++#define SWF14_DS_PIPEB_CRT_EN (1<<8)
++#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
++#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
++#define SWF14_DS_PIPEA_TV2_EN (1<<5)
++#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
++#define SWF14_DS_PIPEA_LFP_EN (1<<3)
++#define SWF14_DS_PIPEA_EFP_EN (1<<2)
++#define SWF14_DS_PIPEA_TV_EN (1<<1)
++#define SWF14_DS_PIPEA_CRT_EN (1<<0)
++ /* if GR18 indicates a panel fitting request */
++#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
++ /* if GR18 indicates an APM change request */
++#define SWF14_APM_HIBERNATE 0x4
++#define SWF14_APM_SUSPEND 0x3
++#define SWF14_APM_STANDBY 0x1
++#define SWF14_APM_RESTORE 0x0
++
++#endif /* _I830_BIOS_H_ */
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_display.c b/drivers/gpu/drm/mrst/drv/psb_intel_display.c
+new file mode 100644
+index 0000000..10c6dec
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_display.c
+@@ -0,0 +1,2538 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++
++#include <linux/i2c.h>
++
++#include <drm/drmP.h>
++#include "psb_fb.h"
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_display.h"
++#include "ospm_power.h"
++
++struct psb_intel_clock_t {
++ /* given values */
++ int n;
++ int m1, m2;
++ int p1, p2;
++ /* derived values */
++ int dot;
++ int vco;
++ int m;
++ int p;
++};
++
++struct psb_intel_range_t {
++ int min, max;
++};
++
++struct psb_intel_p2_t {
++ int dot_limit;
++ int p2_slow, p2_fast;
++};
++
++#define INTEL_P2_NUM 2
++
++struct psb_intel_limit_t {
++ struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
++ struct psb_intel_p2_t p2;
++};
++
++#define I8XX_DOT_MIN 25000
++#define I8XX_DOT_MAX 350000
++#define I8XX_VCO_MIN 930000
++#define I8XX_VCO_MAX 1400000
++#define I8XX_N_MIN 3
++#define I8XX_N_MAX 16
++#define I8XX_M_MIN 96
++#define I8XX_M_MAX 140
++#define I8XX_M1_MIN 18
++#define I8XX_M1_MAX 26
++#define I8XX_M2_MIN 6
++#define I8XX_M2_MAX 16
++#define I8XX_P_MIN 4
++#define I8XX_P_MAX 128
++#define I8XX_P1_MIN 2
++#define I8XX_P1_MAX 33
++#define I8XX_P1_LVDS_MIN 1
++#define I8XX_P1_LVDS_MAX 6
++#define I8XX_P2_SLOW 4
++#define I8XX_P2_FAST 2
++#define I8XX_P2_LVDS_SLOW 14
++#define I8XX_P2_LVDS_FAST 14 /* No fast option */
++#define I8XX_P2_SLOW_LIMIT 165000
++
++#define I9XX_DOT_MIN 20000
++#define I9XX_DOT_MAX 400000
++#define I9XX_VCO_MIN 1400000
++#define I9XX_VCO_MAX 2800000
++#define I9XX_N_MIN 3
++#define I9XX_N_MAX 8
++#define I9XX_M_MIN 70
++#define I9XX_M_MAX 120
++#define I9XX_M1_MIN 10
++#define I9XX_M1_MAX 20
++#define I9XX_M2_MIN 5
++#define I9XX_M2_MAX 9
++#define I9XX_P_SDVO_DAC_MIN 5
++#define I9XX_P_SDVO_DAC_MAX 80
++#define I9XX_P_LVDS_MIN 7
++#define I9XX_P_LVDS_MAX 98
++#define I9XX_P1_MIN 1
++#define I9XX_P1_MAX 8
++#define I9XX_P2_SDVO_DAC_SLOW 10
++#define I9XX_P2_SDVO_DAC_FAST 5
++#define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000
++#define I9XX_P2_LVDS_SLOW 14
++#define I9XX_P2_LVDS_FAST 7
++#define I9XX_P2_LVDS_SLOW_LIMIT 112000
++
++#define INTEL_LIMIT_I8XX_DVO_DAC 0
++#define INTEL_LIMIT_I8XX_LVDS 1
++#define INTEL_LIMIT_I9XX_SDVO_DAC 2
++#define INTEL_LIMIT_I9XX_LVDS 3
++
++static const struct psb_intel_limit_t psb_intel_limits[] = {
++ { /* INTEL_LIMIT_I8XX_DVO_DAC */
++ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
++ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
++ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
++ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
++ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
++ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
++ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
++ .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX},
++ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
++ .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST},
++ },
++ { /* INTEL_LIMIT_I8XX_LVDS */
++ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
++ .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
++ .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
++ .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
++ .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
++ .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
++ .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
++ .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX},
++ .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
++ .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST},
++ },
++ { /* INTEL_LIMIT_I9XX_SDVO_DAC */
++ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
++ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
++ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
++ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
++ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
++ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
++ .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX},
++ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
++ .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
++ .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast =
++ I9XX_P2_SDVO_DAC_FAST},
++ },
++ { /* INTEL_LIMIT_I9XX_LVDS */
++ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
++ .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
++ .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
++ .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
++ .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
++ .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
++ .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX},
++ .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
++ /* The single-channel range is 25-112Mhz, and dual-channel
++ * is 80-224Mhz. Prefer single channel as much as possible.
++ */
++ .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
++ .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST},
++ },
++};
++
++static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ const struct psb_intel_limit_t *limit;
++
++ if (IS_I9XX(dev)) {
++ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
++ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
++ else
++ limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
++ } else {
++ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
++ limit = &psb_intel_limits[INTEL_LIMIT_I8XX_LVDS];
++ else
++ limit = &psb_intel_limits[INTEL_LIMIT_I8XX_DVO_DAC];
++ }
++ return limit;
++}
++
++/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
++
++static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
++{
++ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
++ clock->p = clock->p1 * clock->p2;
++ clock->vco = refclk * clock->m / (clock->n + 2);
++ clock->dot = clock->vco / clock->p;
++}
++
++/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
++
++static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock)
++{
++ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
++ clock->p = clock->p1 * clock->p2;
++ clock->vco = refclk * clock->m / (clock->n + 2);
++ clock->dot = clock->vco / clock->p;
++}
++
++static void psb_intel_clock(struct drm_device *dev, int refclk,
++ struct psb_intel_clock_t *clock)
++{
++ if (IS_I9XX(dev))
++ return i9xx_clock(refclk, clock);
++ else
++ return i8xx_clock(refclk, clock);
++}
++
++/**
++ * Returns whether any output on the specified pipe is of the specified type
++ */
++bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct drm_connector *l_entry;
++
++ list_for_each_entry(l_entry, &mode_config->connector_list, head) {
++ if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(l_entry);
++ if (psb_intel_output->type == type)
++ return true;
++ }
++ }
++ return false;
++}
++
++#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
++/**
++ * Returns whether the given set of divisors are valid for a given refclk with
++ * the given connectors.
++ */
++
++static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
++ struct psb_intel_clock_t *clock)
++{
++ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
++
++ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
++ INTELPllInvalid("p1 out of range\n");
++ if (clock->p < limit->p.min || limit->p.max < clock->p)
++ INTELPllInvalid("p out of range\n");
++ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
++ INTELPllInvalid("m2 out of range\n");
++ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
++ INTELPllInvalid("m1 out of range\n");
++ if (clock->m1 <= clock->m2)
++ INTELPllInvalid("m1 <= m2\n");
++ if (clock->m < limit->m.min || limit->m.max < clock->m)
++ INTELPllInvalid("m out of range\n");
++ if (clock->n < limit->n.min || limit->n.max < clock->n)
++ INTELPllInvalid("n out of range\n");
++ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
++ INTELPllInvalid("vco out of range\n");
++ /* XXX: We may need to be checking "Dot clock"
++ * depending on the multiplier, connector, etc.,
++ * rather than just a single range.
++ */
++ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
++ INTELPllInvalid("dot out of range\n");
++
++ return true;
++}
++
++/**
++ * Returns a set of divisors for the desired target clock with the given
++ * refclk, or FALSE. The returned values represent the clock equation:
++ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
++ */
++static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
++ int refclk,
++ struct psb_intel_clock_t *best_clock)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_clock_t clock;
++ const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
++ int err = target;
++
++ if (IS_I9XX(dev) && psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
++ (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
++ /*
++ * For LVDS, if the panel is on, just rely on its current
++ * settings for dual-channel. We haven't figured out how to
++ * reliably set up different single/dual channel state, if we
++ * even can.
++ */
++ if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
++ LVDS_CLKB_POWER_UP)
++ clock.p2 = limit->p2.p2_fast;
++ else
++ clock.p2 = limit->p2.p2_slow;
++ } else {
++ if (target < limit->p2.dot_limit)
++ clock.p2 = limit->p2.p2_slow;
++ else
++ clock.p2 = limit->p2.p2_fast;
++ }
++
++ memset(best_clock, 0, sizeof(*best_clock));
++
++ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
++ clock.m1++) {
++ for (clock.m2 = limit->m2.min;
++ clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
++ clock.m2++) {
++ for (clock.n = limit->n.min;
++ clock.n <= limit->n.max; clock.n++) {
++ for (clock.p1 = limit->p1.min;
++ clock.p1 <= limit->p1.max;
++ clock.p1++) {
++ int this_err;
++
++ psb_intel_clock(dev, refclk, &clock);
++
++ if (!psb_intel_PLL_is_valid
++ (crtc, &clock))
++ continue;
++
++ this_err = abs(clock.dot - target);
++ if (this_err < err) {
++ *best_clock = clock;
++ err = this_err;
++ }
++ }
++ }
++ }
++ }
++
++ return err != target;
++}
++
++void psb_intel_wait_for_vblank(struct drm_device *dev)
++{
++ /* Wait for 20ms, i.e. one cycle at 50hz. */
++ udelay(20000);
++}
++
++int psb_intel_pipe_set_base(struct drm_crtc *crtc,
++ int x, int y, struct drm_framebuffer *old_fb)
++{
++ struct drm_device *dev = crtc->dev;
++ /* struct drm_i915_master_private *master_priv; */
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
++ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
++ int pipe = psb_intel_crtc->pipe;
++ unsigned long Start, Offset;
++ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
++ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
++ int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++ u32 dspcntr;
++ int ret = 0;
++
++ /* no fb bound */
++ if (!crtc->fb) {
++ DRM_DEBUG("No FB bound\n");
++ return 0;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return 0;
++
++ if (IS_MRST(dev) && (pipe == 0))
++ dspbase = MRST_DSPABASE;
++
++ Start = mode_dev->bo_offset(dev, psbfb);
++ Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
++
++ REG_WRITE(dspstride, crtc->fb->pitch);
++
++ dspcntr = REG_READ(dspcntr_reg);
++ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
++
++ switch (crtc->fb->bits_per_pixel) {
++ case 8:
++ dspcntr |= DISPPLANE_8BPP;
++ break;
++ case 16:
++ if (crtc->fb->depth == 15)
++ dspcntr |= DISPPLANE_15_16BPP;
++ else
++ dspcntr |= DISPPLANE_16BPP;
++ break;
++ case 24:
++ case 32:
++ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
++ break;
++ default:
++ DRM_ERROR("Unknown color depth\n");
++ ret = -EINVAL;
++ goto psb_intel_pipe_set_base_exit;
++ }
++ REG_WRITE(dspcntr_reg, dspcntr);
++
++ DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
++ if (IS_I965G(dev) || IS_MRST(dev)) {
++ REG_WRITE(dspbase, Offset);
++ REG_READ(dspbase);
++ REG_WRITE(dspsurf, Start);
++ REG_READ(dspsurf);
++ } else {
++ REG_WRITE(dspbase, Start + Offset);
++ REG_READ(dspbase);
++ }
++
++psb_intel_pipe_set_base_exit:
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return ret;
++}
++
++/**
++ * Sets the power management mode of the pipe and plane.
++ *
++ * This code should probably grow support for turning the cursor off and back
++ * on appropriately at the same time as we're turning the pipe off/on.
++ */
++static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
++{
++ struct drm_device *dev = crtc->dev;
++ /* struct drm_i915_master_private *master_priv; */
++ /* struct drm_i915_private *dev_priv = dev->dev_private; */
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++ int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++ u32 temp;
++ bool enabled;
++
++ /* XXX: When our outputs are all unaware of DPMS modes other than off
++ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
++ */
++ switch (mode) {
++ case DRM_MODE_DPMS_ON:
++ case DRM_MODE_DPMS_STANDBY:
++ case DRM_MODE_DPMS_SUSPEND:
++ /* Enable the DPLL */
++ temp = REG_READ(dpll_reg);
++ if ((temp & DPLL_VCO_ENABLE) == 0) {
++ REG_WRITE(dpll_reg, temp);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ }
++
++ /* Enable the pipe */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) == 0)
++ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
++
++ /* Enable the plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
++ REG_WRITE(dspcntr_reg,
++ temp | DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ }
++
++ psb_intel_crtc_load_lut(crtc);
++
++ /* Give the overlay scaler a chance to enable
++ * if it's on this pipe */
++ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
++ break;
++ case DRM_MODE_DPMS_OFF:
++ /* Give the overlay scaler a chance to disable
++ * if it's on this pipe */
++ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
++
++ /* Disable the VGA plane that we never use */
++ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
++
++ /* Disable display plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
++ REG_WRITE(dspcntr_reg,
++ temp & ~DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ REG_READ(dspbase_reg);
++ }
++
++ if (!IS_I9XX(dev)) {
++ /* Wait for vblank for the disable to take effect */
++ psb_intel_wait_for_vblank(dev);
++ }
++
++ /* Next, disable display pipes */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) != 0) {
++ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
++ REG_READ(pipeconf_reg);
++ }
++
++ /* Wait for vblank for the disable to take effect. */
++ psb_intel_wait_for_vblank(dev);
++
++ temp = REG_READ(dpll_reg);
++ if ((temp & DPLL_VCO_ENABLE) != 0) {
++ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ }
++
++ /* Wait for the clocks to turn off. */
++ udelay(150);
++ break;
++ }
++
++ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
++
++#if 0 /* JB: Add vblank support later */
++ if (enabled)
++ dev_priv->vblank_pipe |= (1 << pipe);
++ else
++ dev_priv->vblank_pipe &= ~(1 << pipe);
++#endif
++
++#if 0 /* JB: Add sarea support later */
++ if (!dev->primary->master)
++ return 0;
++
++ master_priv = dev->primary->master->driver_priv;
++ if (!master_priv->sarea_priv)
++ return 0;
++
++ switch (pipe) {
++ case 0:
++ master_priv->sarea_priv->planeA_w =
++ enabled ? crtc->mode.hdisplay : 0;
++ master_priv->sarea_priv->planeA_h =
++ enabled ? crtc->mode.vdisplay : 0;
++ break;
++ case 1:
++ master_priv->sarea_priv->planeB_w =
++ enabled ? crtc->mode.hdisplay : 0;
++ master_priv->sarea_priv->planeB_h =
++ enabled ? crtc->mode.vdisplay : 0;
++ break;
++ default:
++ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
++ break;
++ }
++#endif
++
++ /*Set FIFO Watermarks*/
++ REG_WRITE(DSPARB, 0x3F3E);
++}
++
++static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
++{
++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++}
++
++static void psb_intel_crtc_commit(struct drm_crtc *crtc)
++{
++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
++}
++
++void psb_intel_encoder_prepare(struct drm_encoder *encoder)
++{
++ struct drm_encoder_helper_funcs *encoder_funcs =
++ encoder->helper_private;
++ /* lvds has its own version of prepare see psb_intel_lvds_prepare */
++ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
++}
++
++void psb_intel_encoder_commit(struct drm_encoder *encoder)
++{
++ struct drm_encoder_helper_funcs *encoder_funcs =
++ encoder->helper_private;
++ /* lvds has its own version of commit see psb_intel_lvds_commit */
++ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
++}
++
++static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ return true;
++}
++
++
++/**
++ * Return the pipe currently connected to the panel fitter,
++ * or -1 if the panel fitter is not present or not in use
++ */
++static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
++{
++ u32 pfit_control;
++
++ /* i830 doesn't have a panel fitter */
++ if (IS_I830(dev))
++ return -1;
++
++ pfit_control = REG_READ(PFIT_CONTROL);
++
++ /* See if the panel fitter is in use */
++ if ((pfit_control & PFIT_ENABLE) == 0)
++ return -1;
++
++ /* 965 can place panel fitter on either pipe */
++ if (IS_I965G(dev) || IS_MRST(dev))
++ return (pfit_control >> 29) & 0x3;
++
++ /* older chips can only use pipe 1 */
++ return 1;
++}
++
++static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode,
++ int x, int y,
++ struct drm_framebuffer *old_fb)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ int fp_reg = (pipe == 0) ? FPA0 : FPB0;
++ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
++ int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
++ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
++ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
++ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
++ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
++ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
++ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
++ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
++ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
++ int refclk;
++ struct psb_intel_clock_t clock;
++ u32 dpll = 0, fp = 0, dspcntr, pipeconf;
++ bool ok, is_sdvo = false, is_dvo = false;
++ bool is_crt = false, is_lvds = false, is_tv = false;
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct drm_connector *connector;
++
++ list_for_each_entry(connector, &mode_config->connector_list, head) {
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ if (!connector->encoder
++ || connector->encoder->crtc != crtc)
++ continue;
++
++ switch (psb_intel_output->type) {
++ case INTEL_OUTPUT_LVDS:
++ is_lvds = true;
++ break;
++ case INTEL_OUTPUT_SDVO:
++ is_sdvo = true;
++ break;
++ case INTEL_OUTPUT_DVO:
++ is_dvo = true;
++ break;
++ case INTEL_OUTPUT_TVOUT:
++ is_tv = true;
++ break;
++ case INTEL_OUTPUT_ANALOG:
++ is_crt = true;
++ break;
++ }
++ }
++
++ if (IS_I9XX(dev))
++ refclk = 96000;
++ else
++ refclk = 48000;
++
++ ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
++ &clock);
++ if (!ok) {
++ DRM_ERROR("Couldn't find PLL settings for mode!\n");
++ return 0;
++ }
++
++ fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
++
++ dpll = DPLL_VGA_MODE_DIS;
++ if (IS_I9XX(dev)) {
++ if (is_lvds) {
++ dpll |= DPLLB_MODE_LVDS;
++ if (IS_POULSBO(dev))
++ dpll |= DPLL_DVO_HIGH_SPEED;
++ } else
++ dpll |= DPLLB_MODE_DAC_SERIAL;
++ if (is_sdvo) {
++ dpll |= DPLL_DVO_HIGH_SPEED;
++ if (IS_I945G(dev) ||
++ IS_I945GM(dev) ||
++ IS_POULSBO(dev)) {
++ int sdvo_pixel_multiply =
++ adjusted_mode->clock / mode->clock;
++ dpll |=
++ (sdvo_pixel_multiply -
++ 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
++ }
++ }
++
++ /* compute bitmask from p1 value */
++ dpll |= (1 << (clock.p1 - 1)) << 16;
++ switch (clock.p2) {
++ case 5:
++ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
++ break;
++ case 7:
++ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
++ break;
++ case 10:
++ dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
++ break;
++ case 14:
++ dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
++ break;
++ }
++ if (IS_I965G(dev))
++ dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
++ } else {
++ if (is_lvds) {
++ dpll |=
++ (1 << (clock.p1 - 1)) <<
++ DPLL_FPA01_P1_POST_DIV_SHIFT;
++ } else {
++ if (clock.p1 == 2)
++ dpll |= PLL_P1_DIVIDE_BY_TWO;
++ else
++ dpll |=
++ (clock.p1 -
++ 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
++ if (clock.p2 == 4)
++ dpll |= PLL_P2_DIVIDE_BY_4;
++ }
++ }
++
++ if (is_tv) {
++ /* XXX: just matching BIOS for now */
++/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
++ dpll |= 3;
++ }
++#if 0
++ else if (is_lvds)
++ dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
++#endif
++ else
++ dpll |= PLL_REF_INPUT_DREFCLK;
++
++ /* setup pipeconf */
++ pipeconf = REG_READ(pipeconf_reg);
++
++ /* Set up the display plane register */
++ dspcntr = DISPPLANE_GAMMA_ENABLE;
++
++ if (pipe == 0)
++ dspcntr |= DISPPLANE_SEL_PIPE_A;
++ else
++ dspcntr |= DISPPLANE_SEL_PIPE_B;
++
++ dspcntr |= DISPLAY_PLANE_ENABLE;
++ pipeconf |= PIPEACONF_ENABLE;
++ dpll |= DPLL_VCO_ENABLE;
++
++
++ /* Disable the panel fitter if it was on our pipe */
++ if (psb_intel_panel_fitter_pipe(dev) == pipe)
++ REG_WRITE(PFIT_CONTROL, 0);
++
++ DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
++ drm_mode_debug_printmodeline(mode);
++
++ if (dpll & DPLL_VCO_ENABLE) {
++ REG_WRITE(fp_reg, fp);
++ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ udelay(150);
++ }
++
++ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
++ * This is an exception to the general rule that mode_set doesn't turn
++ * things on.
++ */
++ if (is_lvds) {
++ u32 lvds = REG_READ(LVDS);
++
++ lvds |=
++ LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
++ LVDS_PIPEB_SELECT;
++ /* Set the B0-B3 data pairs corresponding to
++ * whether we're going to
++ * set the DPLLs for dual-channel mode or not.
++ */
++ if (clock.p2 == 7)
++ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
++ else
++ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
++
++ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
++ * appropriately here, but we need to look more
++ * thoroughly into how panels behave in the two modes.
++ */
++
++ REG_WRITE(LVDS, lvds);
++ REG_READ(LVDS);
++ }
++
++ REG_WRITE(fp_reg, fp);
++ REG_WRITE(dpll_reg, dpll);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++
++ if (IS_I965G(dev)) {
++ int sdvo_pixel_multiply =
++ adjusted_mode->clock / mode->clock;
++ REG_WRITE(dpll_md_reg,
++ (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
++ ((sdvo_pixel_multiply -
++ 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
++ } else {
++ /* write it again -- the BIOS does, after all */
++ REG_WRITE(dpll_reg, dpll);
++ }
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++
++ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
++ ((adjusted_mode->crtc_htotal - 1) << 16));
++ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
++ ((adjusted_mode->crtc_hblank_end - 1) << 16));
++ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
++ ((adjusted_mode->crtc_hsync_end - 1) << 16));
++ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
++ ((adjusted_mode->crtc_vtotal - 1) << 16));
++ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
++ ((adjusted_mode->crtc_vblank_end - 1) << 16));
++ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
++ ((adjusted_mode->crtc_vsync_end - 1) << 16));
++ /* pipesrc and dspsize control the size that is scaled from,
++ * which should always be the user's requested size.
++ */
++ REG_WRITE(dspsize_reg,
++ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
++ REG_WRITE(dsppos_reg, 0);
++ REG_WRITE(pipesrc_reg,
++ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
++ REG_WRITE(pipeconf_reg, pipeconf);
++ REG_READ(pipeconf_reg);
++
++ psb_intel_wait_for_vblank(dev);
++
++ REG_WRITE(dspcntr_reg, dspcntr);
++
++ /* Flush the plane changes */
++ {
++ struct drm_crtc_helper_funcs *crtc_funcs =
++ crtc->helper_private;
++ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
++ }
++
++ psb_intel_wait_for_vblank(dev);
++
++ return 0;
++}
++
++/** Loads the palette/gamma unit for the CRTC with the prepared values */
++void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int palreg = (psb_intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
++ int i;
++
++ /* The clocks have to be on to load the palette. */
++ if (!crtc->enabled)
++ return;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ for (i = 0; i < 256; i++) {
++ REG_WRITE(palreg + 4 * i,
++ ((psb_intel_crtc->lut_r[i] +
++ psb_intel_crtc->lut_adj[i]) << 16) |
++ ((psb_intel_crtc->lut_g[i] +
++ psb_intel_crtc->lut_adj[i]) << 8) |
++ (psb_intel_crtc->lut_b[i] +
++ psb_intel_crtc->lut_adj[i]));
++ }
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ for (i = 0; i < 256; i++) {
++ dev_priv->save_palette_a[i] =
++ ((psb_intel_crtc->lut_r[i] +
++ psb_intel_crtc->lut_adj[i]) << 16) |
++ ((psb_intel_crtc->lut_g[i] +
++ psb_intel_crtc->lut_adj[i]) << 8) |
++ (psb_intel_crtc->lut_b[i] +
++ psb_intel_crtc->lut_adj[i]);
++ }
++
++ }
++}
++
++#ifndef CONFIG_X86_MRST
++/**
++ * Save HW states of giving crtc
++ */
++static void psb_intel_crtc_save(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ /* struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private; */
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
++ int pipeA = (psb_intel_crtc->pipe == 0);
++ uint32_t paletteReg;
++ int i;
++
++ DRM_DEBUG("\n");
++
++ if (!crtc_state) {
++ DRM_DEBUG("No CRTC state found\n");
++ return;
++ }
++
++ crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
++ crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
++ crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
++ crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
++ crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
++ crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
++ crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
++ crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
++ crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
++ crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
++ crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
++ crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
++ crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
++
++ /*NOTE: DSPSIZE DSPPOS only for psb*/
++ crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
++ crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
++
++ crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
++
++ DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
++ crtc_state->saveDSPCNTR,
++ crtc_state->savePIPECONF,
++ crtc_state->savePIPESRC,
++ crtc_state->saveFP0,
++ crtc_state->saveFP1,
++ crtc_state->saveDPLL,
++ crtc_state->saveHTOTAL,
++ crtc_state->saveHBLANK,
++ crtc_state->saveHSYNC,
++ crtc_state->saveVTOTAL,
++ crtc_state->saveVBLANK,
++ crtc_state->saveVSYNC,
++ crtc_state->saveDSPSTRIDE,
++ crtc_state->saveDSPSIZE,
++ crtc_state->saveDSPPOS,
++ crtc_state->saveDSPBASE
++ );
++
++ paletteReg = pipeA ? PALETTE_A : PALETTE_B;
++ for (i = 0; i < 256; ++i)
++ crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
++}
++
++/**
++ * Restore HW states of giving crtc
++ */
++static void psb_intel_crtc_restore(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ /* struct drm_psb_private * dev_priv =
++ (struct drm_psb_private *)dev->dev_private; */
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
++ /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
++ int pipeA = (psb_intel_crtc->pipe == 0);
++ uint32_t paletteReg;
++ int i;
++
++ DRM_DEBUG("\n");
++
++ if (!crtc_state) {
++ DRM_DEBUG("No crtc state\n");
++ return;
++ }
++
++ DRM_DEBUG(
++ "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
++ REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
++ REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
++ REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
++ REG_READ(pipeA ? FPA0 : FPB0),
++ REG_READ(pipeA ? FPA1 : FPB1),
++ REG_READ(pipeA ? DPLL_A : DPLL_B),
++ REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
++ REG_READ(pipeA ? HBLANK_A : HBLANK_B),
++ REG_READ(pipeA ? HSYNC_A : HSYNC_B),
++ REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
++ REG_READ(pipeA ? VBLANK_A : VBLANK_B),
++ REG_READ(pipeA ? VSYNC_A : VSYNC_B),
++ REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
++ REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
++ REG_READ(pipeA ? DSPAPOS : DSPBPOS),
++ REG_READ(pipeA ? DSPABASE : DSPBBASE)
++ );
++
++ DRM_DEBUG(
++ "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
++ crtc_state->saveDSPCNTR,
++ crtc_state->savePIPECONF,
++ crtc_state->savePIPESRC,
++ crtc_state->saveFP0,
++ crtc_state->saveFP1,
++ crtc_state->saveDPLL,
++ crtc_state->saveHTOTAL,
++ crtc_state->saveHBLANK,
++ crtc_state->saveHSYNC,
++ crtc_state->saveVTOTAL,
++ crtc_state->saveVBLANK,
++ crtc_state->saveVSYNC,
++ crtc_state->saveDSPSTRIDE,
++ crtc_state->saveDSPSIZE,
++ crtc_state->saveDSPPOS,
++ crtc_state->saveDSPBASE
++ );
++
++
++#if 0
++ if (drm_helper_crtc_in_use(crtc))
++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++
++
++ if (psb_intel_panel_fitter_pipe(dev) == psb_intel_crtc->pipe) {
++ REG_WRITE(PFIT_CONTROL, crtc_state->savePFITCTRL);
++ DRM_DEBUG("write pfit_controle: %x\n", REG_READ(PFIT_CONTROL));
++ }
++#endif
++
++ if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
++ REG_WRITE(pipeA ? DPLL_A : DPLL_B,
++ crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
++ REG_READ(pipeA ? DPLL_A : DPLL_B);
++ DRM_DEBUG("write dpll: %x\n",
++ REG_READ(pipeA ? DPLL_A : DPLL_B));
++ udelay(150);
++ }
++
++ REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
++ REG_READ(pipeA ? FPA0 : FPB0);
++
++ REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
++ REG_READ(pipeA ? FPA1 : FPB1);
++
++ REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
++ REG_READ(pipeA ? DPLL_A : DPLL_B);
++ udelay(150);
++
++ REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
++ REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
++ REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
++ REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
++ REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
++ REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
++ REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
++
++ REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
++ REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
++
++ REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
++ REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
++ REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
++
++ psb_intel_wait_for_vblank(dev);
++
++ REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
++ REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
++
++ psb_intel_wait_for_vblank(dev);
++
++ paletteReg = pipeA ? PALETTE_A : PALETTE_B;
++ for (i = 0; i < 256; ++i)
++ REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
++}
++#endif
++
++static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
++ struct drm_file *file_priv,
++ uint32_t handle,
++ uint32_t width, uint32_t height)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct psb_gtt *pg = dev_priv->pg;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct psb_intel_mode_device *mode_dev = psb_intel_crtc->mode_dev;
++ int pipe = psb_intel_crtc->pipe;
++ uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
++ uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
++ uint32_t temp;
++ size_t addr = 0;
++ uint32_t page_offset;
++ size_t size;
++ void *bo;
++ int ret;
++
++ DRM_DEBUG("\n");
++
++ /* if we want to turn of the cursor ignore width and height */
++ if (!handle) {
++ DRM_DEBUG("cursor off\n");
++ /* turn of the cursor */
++ temp = 0;
++ temp |= CURSOR_MODE_DISABLE;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ REG_WRITE(control, temp);
++ REG_WRITE(base, 0);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++
++ /* unpin the old bo */
++ if (psb_intel_crtc->cursor_bo) {
++ mode_dev->bo_unpin_for_scanout(dev,
++ psb_intel_crtc->
++ cursor_bo);
++ psb_intel_crtc->cursor_bo = NULL;
++ }
++
++ return 0;
++ }
++
++ /* Currently we only support 64x64 cursors */
++ if (width != 64 || height != 64) {
++ DRM_ERROR("we currently only support 64x64 cursors\n");
++ return -EINVAL;
++ }
++
++ bo = mode_dev->bo_from_handle(dev, file_priv, handle);
++ if (!bo)
++ return -ENOENT;
++
++ ret = mode_dev->bo_pin_for_scanout(dev, bo);
++ if (ret)
++ return ret;
++ size = mode_dev->bo_size(dev, bo);
++ if (size < width * height * 4) {
++ DRM_ERROR("buffer is to small\n");
++ return -ENOMEM;
++ }
++
++ /*insert this bo into gtt*/
++ DRM_DEBUG("%s: map meminfo for hw cursor. handle %x\n",
++ __func__, handle);
++
++ ret = psb_gtt_map_meminfo(dev, (IMG_HANDLE)handle, &page_offset);
++ if (ret) {
++ DRM_ERROR("Can not map meminfo to GTT. handle 0x%x\n", handle);
++ return ret;
++ }
++
++ addr = page_offset << PAGE_SHIFT;
++
++ if (IS_POULSBO(dev))
++ addr += pg->stolen_base;
++
++ psb_intel_crtc->cursor_addr = addr;
++
++ temp = 0;
++ /* set the pipe for the cursor */
++ temp |= (pipe << 28);
++ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ REG_WRITE(control, temp);
++ REG_WRITE(base, addr);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++
++ /* unpin the old bo */
++ if (psb_intel_crtc->cursor_bo && psb_intel_crtc->cursor_bo != bo) {
++ mode_dev->bo_unpin_for_scanout(dev, psb_intel_crtc->cursor_bo);
++ psb_intel_crtc->cursor_bo = bo;
++ }
++
++ return 0;
++}
++
++static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ uint32_t temp = 0;
++ uint32_t adder;
++
++ if (x < 0) {
++ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
++ x = -x;
++ }
++ if (y < 0) {
++ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
++ y = -y;
++ }
++
++ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
++ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
++
++ adder = psb_intel_crtc->cursor_addr;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
++ REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ return 0;
++}
++
++static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
++ u16 *green, u16 *blue, uint32_t size)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int i;
++
++ if (size != 256)
++ return;
++
++ for (i = 0; i < 256; i++) {
++ psb_intel_crtc->lut_r[i] = red[i] >> 8;
++ psb_intel_crtc->lut_g[i] = green[i] >> 8;
++ psb_intel_crtc->lut_b[i] = blue[i] >> 8;
++ }
++
++ psb_intel_crtc_load_lut(crtc);
++}
++
++/* Returns the clock of the currently programmed mode of the given pipe. */
++static int psb_intel_crtc_clock_get(struct drm_device *dev,
++ struct drm_crtc *crtc)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ u32 dpll;
++ u32 fp;
++ struct psb_intel_clock_t clock;
++ bool is_lvds;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
++ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
++ fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
++ else
++ fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
++ is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ dpll = (pipe == 0) ?
++ dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
++
++ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
++ fp = (pipe == 0) ?
++ dev_priv->saveFPA0 :
++ dev_priv->saveFPB0;
++ else
++ fp = (pipe == 0) ?
++ dev_priv->saveFPA1 :
++ dev_priv->saveFPB1;
++
++ is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
++ }
++
++ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
++ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
++ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
++
++ if (is_lvds) {
++ clock.p1 =
++ ffs((dpll &
++ DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
++ DPLL_FPA01_P1_POST_DIV_SHIFT);
++ clock.p2 = 14;
++
++ if ((dpll & PLL_REF_INPUT_MASK) ==
++ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
++ /* XXX: might not be 66MHz */
++ i8xx_clock(66000, &clock);
++ } else
++ i8xx_clock(48000, &clock);
++ } else {
++ if (dpll & PLL_P1_DIVIDE_BY_TWO)
++ clock.p1 = 2;
++ else {
++ clock.p1 =
++ ((dpll &
++ DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
++ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
++ }
++ if (dpll & PLL_P2_DIVIDE_BY_4)
++ clock.p2 = 4;
++ else
++ clock.p2 = 2;
++
++ i8xx_clock(48000, &clock);
++ }
++
++ /* XXX: It would be nice to validate the clocks, but we can't reuse
++ * i830PllIsValid() because it relies on the xf86_config connector
++ * configuration being accurate, which it isn't necessarily.
++ */
++
++ return clock.dot;
++}
++
++/** Returns the currently programmed mode of the given pipe. */
++struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
++ struct drm_crtc *crtc)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ struct drm_display_mode *mode;
++ int htot;
++ int hsync;
++ int vtot;
++ int vsync;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
++ hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
++ vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
++ vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ htot = (pipe == 0) ?
++ dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
++ hsync = (pipe == 0) ?
++ dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
++ vtot = (pipe == 0) ?
++ dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
++ vsync = (pipe == 0) ?
++ dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
++ }
++
++ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++ if (!mode)
++ return NULL;
++
++ mode->clock = psb_intel_crtc_clock_get(dev, crtc);
++ mode->hdisplay = (htot & 0xffff) + 1;
++ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
++ mode->hsync_start = (hsync & 0xffff) + 1;
++ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
++ mode->vdisplay = (vtot & 0xffff) + 1;
++ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
++ mode->vsync_start = (vsync & 0xffff) + 1;
++ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
++
++ drm_mode_set_name(mode);
++ drm_mode_set_crtcinfo(mode, 0);
++
++ return mode;
++}
++
++static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
++{
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++
++#ifndef CONFIG_X86_MRST
++ kfree(psb_intel_crtc->crtc_state);
++#endif
++ drm_crtc_cleanup(crtc);
++ kfree(psb_intel_crtc);
++}
++
++static const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
++ .dpms = psb_intel_crtc_dpms,
++ .mode_fixup = psb_intel_crtc_mode_fixup,
++ .mode_set = psb_intel_crtc_mode_set,
++ .mode_set_base = psb_intel_pipe_set_base,
++ .prepare = psb_intel_crtc_prepare,
++ .commit = psb_intel_crtc_commit,
++};
++
++static const struct drm_crtc_helper_funcs mrst_helper_funcs;
++
++const struct drm_crtc_funcs psb_intel_crtc_funcs = {
++#ifndef CONFIG_X86_MRST
++ .save = psb_intel_crtc_save,
++ .restore = psb_intel_crtc_restore,
++#endif
++ .cursor_set = psb_intel_crtc_cursor_set,
++ .cursor_move = psb_intel_crtc_cursor_move,
++ .gamma_set = psb_intel_crtc_gamma_set,
++ .set_config = drm_crtc_helper_set_config,
++ .destroy = psb_intel_crtc_destroy,
++};
++
++
++void psb_intel_crtc_init(struct drm_device *dev, int pipe,
++ struct psb_intel_mode_device *mode_dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct psb_intel_crtc *psb_intel_crtc;
++ int i;
++ uint16_t *r_base, *g_base, *b_base;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter psb_intel_crtc_init \n");
++#endif /* PRINT_JLIU7 */
++
++ /* We allocate a extra array of drm_connector pointers
++ * for fbdev after the crtc */
++ psb_intel_crtc =
++ kzalloc(sizeof(struct psb_intel_crtc) +
++ (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
++ GFP_KERNEL);
++ if (psb_intel_crtc == NULL)
++ return;
++
++#ifndef CONFIG_X86_MRST
++ psb_intel_crtc->crtc_state =
++ kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
++ if (!psb_intel_crtc->crtc_state) {
++ DRM_INFO("Crtc state error: No memory\n");
++ kfree(psb_intel_crtc);
++ return;
++ }
++#endif
++
++ drm_crtc_init(dev, &psb_intel_crtc->base, &psb_intel_crtc_funcs);
++
++ drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
++ psb_intel_crtc->pipe = pipe;
++ psb_intel_crtc->plane = pipe;
++
++ r_base = psb_intel_crtc->base.gamma_store;
++ g_base = r_base + 256;
++ b_base = g_base + 256;
++ for (i = 0; i < 256; i++) {
++ psb_intel_crtc->lut_r[i] = i;
++ psb_intel_crtc->lut_g[i] = i;
++ psb_intel_crtc->lut_b[i] = i;
++ r_base[i] = i << 8;
++ g_base[i] = i << 8;
++ b_base[i] = i << 8;
++
++ psb_intel_crtc->lut_adj[i] = 0;
++ }
++
++ psb_intel_crtc->mode_dev = mode_dev;
++ psb_intel_crtc->cursor_addr = 0;
++
++ if (IS_MRST(dev)) {
++ drm_crtc_helper_add(&psb_intel_crtc->base, &mrst_helper_funcs);
++ } else {
++ drm_crtc_helper_add(&psb_intel_crtc->base,
++ &psb_intel_helper_funcs);
++ }
++
++ /* Setup the array of drm_connector pointer array */
++ psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
++ BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
++ dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL);
++ dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] = &psb_intel_crtc->base;
++ dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] = &psb_intel_crtc->base;
++ psb_intel_crtc->mode_set.connectors =
++ (struct drm_connector **) (psb_intel_crtc + 1);
++ psb_intel_crtc->mode_set.num_connectors = 0;
++}
++
++int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
++ struct drm_mode_object *drmmode_obj;
++ struct psb_intel_crtc *crtc;
++
++ if (!dev_priv) {
++ DRM_ERROR("called with no initialization\n");
++ return -EINVAL;
++ }
++
++ drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
++ DRM_MODE_OBJECT_CRTC);
++
++ if (!drmmode_obj) {
++ DRM_ERROR("no such CRTC id\n");
++ return -EINVAL;
++ }
++
++ crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj));
++ pipe_from_crtc_id->pipe = crtc->pipe;
++
++ return 0;
++}
++
++struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
++{
++ struct drm_crtc *crtc = NULL;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ if (psb_intel_crtc->pipe == pipe)
++ break;
++ }
++ return crtc;
++}
++
++int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
++{
++ int index_mask = 0;
++ struct drm_connector *connector;
++ int entry = 0;
++
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ if (type_mask & (1 << psb_intel_output->type))
++ index_mask |= (1 << entry);
++ entry++;
++ }
++ return index_mask;
++}
++
++#if 0 /* JB: Should be per device */
++static void psb_intel_setup_outputs(struct drm_device *dev)
++{
++ struct drm_connector *connector;
++
++ psb_intel_crt_init(dev);
++
++ /* Set up integrated LVDS */
++ if (IS_MOBILE(dev) && !IS_I830(dev))
++ psb_intel_lvds_init(dev);
++
++ if (IS_I9XX(dev)) {
++ psb_intel_sdvo_init(dev, SDVOB);
++ psb_intel_sdvo_init(dev, SDVOC);
++ } else
++ psb_intel_dvo_init(dev);
++
++ if (IS_I9XX(dev) && !IS_I915G(dev))
++ psb_intel_tv_init(dev);
++
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct drm_encoder *encoder = &psb_intel_output->enc;
++ int crtc_mask = 0, clone_mask = 0;
++
++ /* valid crtcs */
++ switch (psb_intel_output->type) {
++ case INTEL_OUTPUT_DVO:
++ case INTEL_OUTPUT_SDVO:
++ crtc_mask = ((1 << 0) | (1 << 1));
++ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
++ (1 << INTEL_OUTPUT_DVO) |
++ (1 << INTEL_OUTPUT_SDVO));
++ break;
++ case INTEL_OUTPUT_ANALOG:
++ crtc_mask = ((1 << 0) | (1 << 1));
++ clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
++ (1 << INTEL_OUTPUT_DVO) |
++ (1 << INTEL_OUTPUT_SDVO));
++ break;
++ case INTEL_OUTPUT_LVDS:
++ crtc_mask = (1 << 1);
++ clone_mask = (1 << INTEL_OUTPUT_LVDS);
++ break;
++ case INTEL_OUTPUT_TVOUT:
++ crtc_mask = ((1 << 0) | (1 << 1));
++ clone_mask = (1 << INTEL_OUTPUT_TVOUT);
++ break;
++ }
++ encoder->possible_crtcs = crtc_mask;
++ encoder->possible_clones =
++ psb_intel_connector_clones(dev, clone_mask);
++ }
++}
++#endif
++
++#if 0 /* JB: Rework framebuffer code into something none device specific */
++static void psb_intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
++{
++ struct psb_intel_framebuffer *psb_intel_fb =
++ to_psb_intel_framebuffer(fb);
++ struct drm_device *dev = fb->dev;
++
++ if (fb->fbdev)
++ intelfb_remove(dev, fb);
++
++ drm_framebuffer_cleanup(fb);
++ drm_gem_object_unreference(fb->mm_private);
++
++ kfree(psb_intel_fb);
++}
++
++static int psb_intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
++ struct drm_file *file_priv,
++ unsigned int *handle)
++{
++ struct drm_gem_object *object = fb->mm_private;
++
++ return drm_gem_handle_create(file_priv, object, handle);
++}
++
++static const struct drm_framebuffer_funcs psb_intel_fb_funcs = {
++ .destroy = psb_intel_user_framebuffer_destroy,
++ .create_handle = psb_intel_user_framebuffer_create_handle,
++};
++
++struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device *dev,
++ struct drm_mode_fb_cmd
++ *mode_cmd,
++ void *mm_private)
++{
++ struct psb_intel_framebuffer *psb_intel_fb;
++
++ psb_intel_fb = kzalloc(sizeof(*psb_intel_fb), GFP_KERNEL);
++ if (!psb_intel_fb)
++ return NULL;
++
++ if (!drm_framebuffer_init(dev,
++ &psb_intel_fb->base,
++ &psb_intel_fb_funcs))
++ return NULL;
++
++ drm_helper_mode_fill_fb_struct(&psb_intel_fb->base, mode_cmd);
++
++ return &psb_intel_fb->base;
++}
++
++
++static struct drm_framebuffer *psb_intel_user_framebuffer_create(struct
++ drm_device
++ *dev,
++ struct
++ drm_file
++ *filp,
++ struct
++ drm_mode_fb_cmd
++ *mode_cmd)
++{
++ struct drm_gem_object *obj;
++
++ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
++ if (!obj)
++ return NULL;
++
++ return psb_intel_framebuffer_create(dev, mode_cmd, obj);
++}
++
++static int psb_intel_insert_new_fb(struct drm_device *dev,
++ struct drm_file *file_priv,
++ struct drm_framebuffer *fb,
++ struct drm_mode_fb_cmd *mode_cmd)
++{
++ struct psb_intel_framebuffer *psb_intel_fb;
++ struct drm_gem_object *obj;
++ struct drm_crtc *crtc;
++
++ psb_intel_fb = to_psb_intel_framebuffer(fb);
++
++ mutex_lock(&dev->struct_mutex);
++ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
++
++ if (!obj) {
++ mutex_unlock(&dev->struct_mutex);
++ return -EINVAL;
++ }
++ drm_gem_object_unreference(psb_intel_fb->base.mm_private);
++ drm_helper_mode_fill_fb_struct(fb, mode_cmd, obj);
++ mutex_unlock(&dev->struct_mutex);
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++ if (crtc->fb == fb) {
++ struct drm_crtc_helper_funcs *crtc_funcs =
++ crtc->helper_private;
++ crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y);
++ }
++ }
++ return 0;
++}
++
++static const struct drm_mode_config_funcs psb_intel_mode_funcs = {
++ .resize_fb = psb_intel_insert_new_fb,
++ .fb_create = psb_intel_user_framebuffer_create,
++ .fb_changed = intelfb_probe,
++};
++#endif
++
++#if 0 /* Should be per device */
++void psb_intel_modeset_init(struct drm_device *dev)
++{
++ int num_pipe;
++ int i;
++
++ drm_mode_config_init(dev);
++
++ dev->mode_config.min_width = 0;
++ dev->mode_config.min_height = 0;
++
++ dev->mode_config.funcs = (void *) &psb_intel_mode_funcs;
++
++ if (IS_I965G(dev)) {
++ dev->mode_config.max_width = 8192;
++ dev->mode_config.max_height = 8192;
++ } else {
++ dev->mode_config.max_width = 2048;
++ dev->mode_config.max_height = 2048;
++ }
++
++ /* set memory base */
++ /* MRST and PSB should use BAR 2*/
++ dev->mode_config.fb_base =
++ pci_resource_start(dev->pdev, 2);
++
++ if (IS_MOBILE(dev) || IS_I9XX(dev))
++ num_pipe = 2;
++ else
++ num_pipe = 1;
++ DRM_DEBUG("%d display pipe%s available.\n",
++ num_pipe, num_pipe > 1 ? "s" : "");
++
++ for (i = 0; i < num_pipe; i++)
++ psb_intel_crtc_init(dev, i);
++
++ psb_intel_setup_outputs(dev);
++
++ /* setup fbs */
++ /* drm_initial_config(dev); */
++}
++#endif
++
++void psb_intel_modeset_cleanup(struct drm_device *dev)
++{
++ drm_mode_config_cleanup(dev);
++}
++
++
++/* current intel driver doesn't take advantage of encoders
++ always give back the encoder for the connector
++*/
++struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
++{
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ return &psb_intel_output->enc;
++}
++
++/* MRST_PLATFORM start */
++
++#if DUMP_REGISTER
++void dump_dc_registers(struct drm_device *dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ unsigned int i = 0;
++
++ DRM_INFO("jliu7 dump_dc_registers\n");
++
++
++ if (0x80000000 & REG_READ(0x70008)) {
++ for (i = 0x20a0; i < 0x20af; i += 4) {
++ DRM_INFO("jliu7 interrupt register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0xf014; i < 0xf047; i += 4) {
++ DRM_INFO("jliu7 pipe A dpll register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x60000; i < 0x6005f; i += 4) {
++ DRM_INFO
++ ("jliu7 pipe A timing register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x61140; i < 0x61143; i += 4) {
++ DRM_INFO("jliu7 SDBOB register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x61180; i < 0x6123F; i += 4) {
++ DRM_INFO
++ ("jliu7 LVDS PORT register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x61254; i < 0x612AB; i += 4) {
++ DRM_INFO("jliu7 BLC register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x70000; i < 0x70047; i += 4) {
++ DRM_INFO
++ ("jliu7 PIPE A control register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x70180; i < 0x7020b; i += 4) {
++ DRM_INFO("jliu7 display A control register=0x%x,"
++ "value=%x\n", i,
++ (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x71400; i < 0x71403; i += 4) {
++ DRM_INFO
++ ("jliu7 VGA Display Plane Control register=0x%x,"
++ "value=%x\n", i, (unsigned int) REG_READ(i));
++ }
++ }
++
++ if (0x80000000 & REG_READ(0x71008)) {
++ for (i = 0x61000; i < 0x6105f; i += 4) {
++ DRM_INFO
++ ("jliu7 pipe B timing register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x71000; i < 0x71047; i += 4) {
++ DRM_INFO
++ ("jliu7 PIPE B control register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++
++ for (i = 0x71180; i < 0x7120b; i += 4) {
++ DRM_INFO("jliu7 display B control register=0x%x,"
++ "value=%x\n", i,
++ (unsigned int) REG_READ(i));
++ }
++ }
++#if 0
++ for (i = 0x70080; i < 0x700df; i += 4) {
++ DRM_INFO("jliu7 cursor A & B register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++ }
++#endif
++
++}
++
++void dump_dsi_registers(struct drm_device *dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ unsigned int i = 0;
++
++ DRM_INFO("jliu7 dump_dsi_registers\n");
++
++ for (i = 0xb000; i < 0xb064; i += 4) {
++ DRM_INFO("jliu7 MIPI IP register=0x%x, value=%x\n", i,
++ (unsigned int) REG_READ(i));
++ }
++
++ i = 0xb104;
++ DRM_INFO("jliu7 MIPI control register=0x%x, value=%x\n",
++ i, (unsigned int) REG_READ(i));
++}
++#endif /* DUMP_REGISTER */
++
++
++struct mrst_limit_t {
++ struct psb_intel_range_t dot, m, p1;
++};
++
++struct mrst_clock_t {
++ /* derived values */
++ int dot;
++ int m;
++ int p1;
++};
++
++#define MRST_LIMIT_LVDS_100L 0
++#define MRST_LIMIT_LVDS_83 1
++#define MRST_LIMIT_LVDS_100 2
++
++#define MRST_DOT_MIN 19750
++#define MRST_DOT_MAX 120000
++#define MRST_M_MIN_100L 20
++#define MRST_M_MIN_100 10
++#define MRST_M_MIN_83 12
++#define MRST_M_MAX_100L 34
++#define MRST_M_MAX_100 17
++#define MRST_M_MAX_83 20
++#define MRST_P1_MIN 2
++#define MRST_P1_MAX_0 7
++#define MRST_P1_MAX_1 8
++
++static const struct mrst_limit_t mrst_limits[] = {
++ { /* MRST_LIMIT_LVDS_100L */
++ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
++ .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
++ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
++ },
++ { /* MRST_LIMIT_LVDS_83L */
++ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
++ .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
++ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
++ },
++ { /* MRST_LIMIT_LVDS_100 */
++ .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
++ .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
++ .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
++ },
++};
++
++#define MRST_M_MIN 10
++static const u32 mrst_m_converts[] = {
++ 0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
++ 0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
++ 0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
++};
++
++#define COUNT_MAX 0x10000000
++void mrstWaitForPipeDisable(struct drm_device *dev)
++{
++ int count, temp;
++
++ /* FIXME JLIU7_PO */
++ psb_intel_wait_for_vblank(dev);
++ return;
++
++ /* Wait for for the pipe disable to take effect. */
++ for (count = 0; count < COUNT_MAX; count++) {
++ temp = REG_READ(PIPEACONF);
++ if ((temp & PIPEACONF_PIPE_STATE) == 0)
++ break;
++ }
++
++ if (count == COUNT_MAX) {
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 mrstWaitForPipeDisable time out. \n");
++#endif /* PRINT_JLIU7 */
++ } else {
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 mrstWaitForPipeDisable cout = %d. \n",
++ count);
++#endif /* PRINT_JLIU7 */
++ }
++}
++
++void mrstWaitForPipeEnable(struct drm_device *dev)
++{
++ int count, temp;
++
++ /* FIXME JLIU7_PO */
++ psb_intel_wait_for_vblank(dev);
++ return;
++
++ /* Wait for for the pipe disable to take effect. */
++ for (count = 0; count < COUNT_MAX; count++) {
++ temp = REG_READ(PIPEACONF);
++ if ((temp & PIPEACONF_PIPE_STATE) == 1)
++ break;
++ }
++
++ if (count == COUNT_MAX) {
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 mrstWaitForPipeEnable time out. \n");
++#endif /* PRINT_JLIU7 */
++ } else {
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 mrstWaitForPipeEnable cout = %d. \n",
++ count);
++#endif /* PRINT_JLIU7 */
++ }
++}
++
++static const struct mrst_limit_t *mrst_limit(struct drm_crtc *crtc)
++{
++ const struct mrst_limit_t *limit;
++ struct drm_device *dev = crtc->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
++ || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
++ if (dev_priv->sku_100L)
++ limit = &mrst_limits[MRST_LIMIT_LVDS_100L];
++ if (dev_priv->sku_83)
++ limit = &mrst_limits[MRST_LIMIT_LVDS_83];
++ if (dev_priv->sku_100)
++ limit = &mrst_limits[MRST_LIMIT_LVDS_100];
++ } else {
++ limit = NULL;
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 jliu7 mrst_limit Wrong display type. \n");
++#endif /* PRINT_JLIU7 */
++ }
++
++ return limit;
++}
++
++/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
++static void mrst_clock(int refclk, struct mrst_clock_t *clock)
++{
++ clock->dot = (refclk * clock->m) / (14 * clock->p1);
++}
++
++void mrstPrintPll(char *prefix, struct mrst_clock_t *clock)
++{
++#if PRINT_JLIU7
++ DRM_INFO
++ ("JLIU7 mrstPrintPll %s: dotclock = %d, m = %d, p1 = %d. \n",
++ prefix, clock->dot, clock->m, clock->p1);
++#endif /* PRINT_JLIU7 */
++}
++
++/**
++ * Returns a set of divisors for the desired target clock with the given refclk,
++ * or FALSE. Divisor values are the actual divisors for
++ */
++static bool
++mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
++ struct mrst_clock_t *best_clock)
++{
++ struct mrst_clock_t clock;
++ const struct mrst_limit_t *limit = mrst_limit(crtc);
++ int err = target;
++
++ memset(best_clock, 0, sizeof(*best_clock));
++
++ for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
++ for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
++ clock.p1++) {
++ int this_err;
++
++ mrst_clock(refclk, &clock);
++
++ this_err = abs(clock.dot - target);
++ if (this_err < err) {
++ *best_clock = clock;
++ err = this_err;
++ }
++ }
++ }
++ DRM_DEBUG("mrstFindBestPLL err = %d.\n", err);
++
++ return err != target;
++}
++
++/**
++ * Sets the power management mode of the pipe and plane.
++ *
++ * This code should probably grow support for turning the cursor off and back
++ * on appropriately at the same time as we're turning the pipe off/on.
++ */
++static void mrst_crtc_dpms(struct drm_crtc *crtc, int mode)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ int pipe = psb_intel_crtc->pipe;
++ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++ int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++ u32 temp;
++ bool enabled;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_crtc_dpms, mode = %d, pipe = %d \n",
++ mode, pipe);
++#endif /* PRINT_JLIU7 */
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ /* XXX: When our outputs are all unaware of DPMS modes other than off
++ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
++ */
++ switch (mode) {
++ case DRM_MODE_DPMS_ON:
++ case DRM_MODE_DPMS_STANDBY:
++ case DRM_MODE_DPMS_SUSPEND:
++ /* Enable the DPLL */
++ temp = REG_READ(dpll_reg);
++ if ((temp & DPLL_VCO_ENABLE) == 0) {
++ REG_WRITE(dpll_reg, temp);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++ }
++
++ /* Enable the pipe */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) == 0)
++ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
++
++ /* Enable the plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
++ REG_WRITE(dspcntr_reg,
++ temp | DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ }
++
++ psb_intel_crtc_load_lut(crtc);
++
++ /* Give the overlay scaler a chance to enable
++ if it's on this pipe */
++ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
++ break;
++ case DRM_MODE_DPMS_OFF:
++ /* Give the overlay scaler a chance to disable
++ * if it's on this pipe */
++ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
++
++ /* Disable the VGA plane that we never use */
++ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
++
++ /* Disable display plane */
++ temp = REG_READ(dspcntr_reg);
++ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
++ REG_WRITE(dspcntr_reg,
++ temp & ~DISPLAY_PLANE_ENABLE);
++ /* Flush the plane changes */
++ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++ REG_READ(dspbase_reg);
++ }
++
++ if (!IS_I9XX(dev)) {
++ /* Wait for vblank for the disable to take effect */
++ psb_intel_wait_for_vblank(dev);
++ }
++
++ /* Next, disable display pipes */
++ temp = REG_READ(pipeconf_reg);
++ if ((temp & PIPEACONF_ENABLE) != 0) {
++ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
++ REG_READ(pipeconf_reg);
++ }
++
++ /* Wait for for the pipe disable to take effect. */
++ mrstWaitForPipeDisable(dev);
++
++ temp = REG_READ(dpll_reg);
++ if ((temp & DPLL_VCO_ENABLE) != 0) {
++ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++ }
++
++ /* Wait for the clocks to turn off. */
++ udelay(150);
++ break;
++ }
++
++#if DUMP_REGISTER
++ dump_dc_registers(dev);
++#endif /* DUMP_REGISTER */
++
++ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
++
++#if 0 /* JB: Add vblank support later */
++ if (enabled)
++ dev_priv->vblank_pipe |= (1 << pipe);
++ else
++ dev_priv->vblank_pipe &= ~(1 << pipe);
++#endif
++
++#if 0 /* JB: Add sarea support later */
++ if (!dev->primary->master)
++ return;
++
++ master_priv = dev->primary->master->driver_priv;
++ if (!master_priv->sarea_priv)
++ return;
++
++ switch (pipe) {
++ case 0:
++ master_priv->sarea_priv->planeA_w =
++ enabled ? crtc->mode.hdisplay : 0;
++ master_priv->sarea_priv->planeA_h =
++ enabled ? crtc->mode.vdisplay : 0;
++ break;
++ case 1:
++ master_priv->sarea_priv->planeB_w =
++ enabled ? crtc->mode.hdisplay : 0;
++ master_priv->sarea_priv->planeB_h =
++ enabled ? crtc->mode.vdisplay : 0;
++ break;
++ default:
++ DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
++ break;
++ }
++#endif
++
++ /*Set FIFO Watermarks*/
++ REG_WRITE(DSPARB, 0x3FFF);
++ REG_WRITE(DSPFW1, 0x3F88080A);
++ REG_WRITE(DSPFW2, 0x0b060808);
++ REG_WRITE(DSPFW3, 0x0);
++ REG_WRITE(DSPFW4, 0x08030404);
++ REG_WRITE(DSPFW5, 0x04040404);
++ REG_WRITE(DSPFW6, 0x78);
++ REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
++ /* Must write Bit 14 of the Chicken Bit Register */
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++static int mrst_crtc_mode_set(struct drm_crtc *crtc,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode,
++ int x, int y,
++ struct drm_framebuffer *old_fb)
++{
++ struct drm_device *dev = crtc->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ int pipe = psb_intel_crtc->pipe;
++ int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
++ int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
++ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
++ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
++ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
++ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
++ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
++ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
++ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
++ int refclk = 0;
++ struct mrst_clock_t clock;
++ u32 dpll = 0, fp = 0, dspcntr, pipeconf, lvdsport;
++ bool ok, is_sdvo = false;
++ bool is_crt = false, is_lvds = false, is_tv = false;
++ bool is_mipi = false;
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct psb_intel_output *psb_intel_output = NULL;
++ uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
++ struct drm_encoder *encoder;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_crtc_mode_set \n");
++#endif /* PRINT_JLIU7 */
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return 0;
++
++ memcpy(&psb_intel_crtc->saved_mode,
++ mode,
++ sizeof(struct drm_display_mode));
++ memcpy(&psb_intel_crtc->saved_adjusted_mode,
++ adjusted_mode,
++ sizeof(struct drm_display_mode));
++
++ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
++
++ if (encoder->crtc != crtc)
++ continue;
++
++ psb_intel_output = enc_to_psb_intel_output(encoder);
++ switch (psb_intel_output->type) {
++ case INTEL_OUTPUT_LVDS:
++ is_lvds = true;
++ break;
++ case INTEL_OUTPUT_SDVO:
++ is_sdvo = true;
++ break;
++ case INTEL_OUTPUT_TVOUT:
++ is_tv = true;
++ break;
++ case INTEL_OUTPUT_ANALOG:
++ is_crt = true;
++ break;
++ case INTEL_OUTPUT_MIPI:
++ is_mipi = true;
++ break;
++ }
++ }
++
++ if (is_lvds | is_mipi) {
++ /*FIXME JLIU7 Get panel power delay parameters from
++ config data */
++ REG_WRITE(0x61208, 0x25807d0);
++ REG_WRITE(0x6120c, 0x1f407d0);
++ REG_WRITE(0x61210, 0x270f04);
++ }
++
++ /* Disable the VGA plane that we never use */
++ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
++
++ /* Disable the panel fitter if it was on our pipe */
++ if (psb_intel_panel_fitter_pipe(dev) == pipe)
++ REG_WRITE(PFIT_CONTROL, 0);
++
++ REG_WRITE(pipesrc_reg,
++ ((mode->crtc_hdisplay - 1) << 16) |
++ (mode->crtc_vdisplay - 1));
++
++ if (psb_intel_output)
++ drm_connector_property_get_value(&psb_intel_output->base,
++ dev->mode_config.scaling_mode_property, &scalingType);
++
++ if (scalingType == DRM_MODE_SCALE_CENTER) {
++ /* Moorestown doesn't have register support for centering so
++ * we need to mess with the h/vblank and h/vsync start and
++ * ends to get centering */
++ int offsetX = 0, offsetY = 0;
++
++ offsetX = (adjusted_mode->crtc_hdisplay -
++ mode->crtc_hdisplay) / 2;
++ offsetY = (adjusted_mode->crtc_vdisplay -
++ mode->crtc_vdisplay) / 2;
++
++ REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
++ ((adjusted_mode->crtc_htotal - 1) << 16));
++ REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
++ ((adjusted_mode->crtc_vtotal - 1) << 16));
++ REG_WRITE(hblank_reg,
++ (adjusted_mode->crtc_hblank_start - offsetX - 1) |
++ ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
++ REG_WRITE(hsync_reg,
++ (adjusted_mode->crtc_hsync_start - offsetX - 1) |
++ ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
++ REG_WRITE(vblank_reg,
++ (adjusted_mode->crtc_vblank_start - offsetY - 1) |
++ ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
++ REG_WRITE(vsync_reg,
++ (adjusted_mode->crtc_vsync_start - offsetY - 1) |
++ ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
++ } else {
++ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
++ ((adjusted_mode->crtc_htotal - 1) << 16));
++ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
++ ((adjusted_mode->crtc_vtotal - 1) << 16));
++ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
++ ((adjusted_mode->crtc_hblank_end - 1) << 16));
++ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
++ ((adjusted_mode->crtc_hsync_end - 1) << 16));
++ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
++ ((adjusted_mode->crtc_vblank_end - 1) << 16));
++ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
++ ((adjusted_mode->crtc_vsync_end - 1) << 16));
++ }
++
++ /* Flush the plane changes */
++ {
++ struct drm_crtc_helper_funcs *crtc_funcs =
++ crtc->helper_private;
++ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
++ }
++
++ /* setup pipeconf */
++ pipeconf = REG_READ(pipeconf_reg);
++
++ /* Set up the display plane register */
++ dspcntr = REG_READ(dspcntr_reg);
++ dspcntr |= DISPPLANE_GAMMA_ENABLE;
++
++ if (pipe == 0)
++ dspcntr |= DISPPLANE_SEL_PIPE_A;
++ else
++ dspcntr |= DISPPLANE_SEL_PIPE_B;
++
++ dev_priv->dspcntr = dspcntr |= DISPLAY_PLANE_ENABLE;
++ dev_priv->pipeconf = pipeconf |= PIPEACONF_ENABLE;
++
++ if (is_mipi)
++ goto mrst_crtc_mode_set_exit;
++
++ if (dev_priv->sku_100L)
++ refclk = 100000;
++ else if (dev_priv->sku_83)
++ refclk = 166000;
++ else if (dev_priv->sku_100)
++ refclk = 200000;
++
++ dpll = 0; /*BIT16 = 0 for 100MHz reference */
++
++ ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
++
++ if (!ok) {
++#if PRINT_JLIU7
++ DRM_INFO
++ ("JLIU7 mrstFindBestPLL fail in mrst_crtc_mode_set. \n");
++#endif /* PRINT_JLIU7 */
++ } else {
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 mrst_crtc_mode_set pixel clock = %d,"
++ "m = %x, p1 = %x. \n", clock.dot, clock.m,
++ clock.p1);
++#endif /* PRINT_JLIU7 */
++ }
++
++ fp = mrst_m_converts[(clock.m - MRST_M_MIN)] << 8;
++
++ dpll |= DPLL_VGA_MODE_DIS;
++
++
++ dpll |= DPLL_VCO_ENABLE;
++
++ if (is_lvds)
++ dpll |= DPLLA_MODE_LVDS;
++ else
++ dpll |= DPLLB_MODE_DAC_SERIAL;
++
++ if (is_sdvo) {
++ int sdvo_pixel_multiply =
++ adjusted_mode->clock / mode->clock;
++
++ dpll |= DPLL_DVO_HIGH_SPEED;
++ dpll |=
++ (sdvo_pixel_multiply -
++ 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
++ }
++
++
++ /* compute bitmask from p1 value */
++ dpll |= (1 << (clock.p1 - 2)) << 17;
++
++ dpll |= DPLL_VCO_ENABLE;
++
++#if PRINT_JLIU7
++ mrstPrintPll("chosen", &clock);
++#endif /* PRINT_JLIU7 */
++
++#if 0
++ if (!xf86ModesEqual(mode, adjusted_mode)) {
++ xf86DrvMsg(pScrn->scrnIndex, X_INFO,
++ "Adjusted mode for pipe %c:\n",
++ pipe == 0 ? 'A' : 'B');
++ xf86PrintModeline(pScrn->scrnIndex, mode);
++ }
++ i830PrintPll("chosen", &clock);
++#endif
++
++ if (dpll & DPLL_VCO_ENABLE) {
++ REG_WRITE(fp_reg, fp);
++ REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
++ REG_READ(dpll_reg);
++/* FIXME jliu7 check the DPLLA lock bit PIPEACONF[29] */
++ udelay(150);
++ }
++
++ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
++ * This is an exception to the general rule that mode_set doesn't turn
++ * things on.
++ */
++ if (is_lvds) {
++
++ /*lvdsport = 0x803003c0;*/
++ /*lvdsport = 0x813003c0;*/
++ lvdsport = dev_priv->gct_data.Panel_Port_Control;
++
++ REG_WRITE(LVDS, lvdsport);
++ }
++
++ REG_WRITE(fp_reg, fp);
++ REG_WRITE(dpll_reg, dpll);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++
++ /* write it again -- the BIOS does, after all */
++ REG_WRITE(dpll_reg, dpll);
++ REG_READ(dpll_reg);
++ /* Wait for the clocks to stabilize. */
++ udelay(150);
++
++ REG_WRITE(pipeconf_reg, pipeconf);
++ REG_READ(pipeconf_reg);
++
++ /* Wait for for the pipe enable to take effect. */
++ mrstWaitForPipeEnable(dev);
++
++ REG_WRITE(dspcntr_reg, dspcntr);
++ psb_intel_wait_for_vblank(dev);
++
++mrst_crtc_mode_set_exit:
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++
++ return 0;
++}
++
++
++static const struct drm_crtc_helper_funcs mrst_helper_funcs = {
++ .dpms = mrst_crtc_dpms,
++ .mode_fixup = psb_intel_crtc_mode_fixup,
++ .mode_set = mrst_crtc_mode_set,
++ .mode_set_base = psb_intel_pipe_set_base,
++ .prepare = psb_intel_crtc_prepare,
++ .commit = psb_intel_crtc_commit,
++};
++
++/* MRST_PLATFORM end */
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_display.h b/drivers/gpu/drm/mrst/drv/psb_intel_display.h
+new file mode 100644
+index 0000000..74e3b5e
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_display.h
+@@ -0,0 +1,25 @@
++/* copyright (c) 2008, Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++
++#ifndef _INTEL_DISPLAY_H_
++#define _INTEL_DISPLAY_H_
++
++bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_drv.h b/drivers/gpu/drm/mrst/drv/psb_intel_drv.h
+new file mode 100644
+index 0000000..9e77cce
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_drv.h
+@@ -0,0 +1,283 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef __INTEL_DRV_H__
++#define __INTEL_DRV_H__
++
++#include <linux/i2c.h>
++#include <linux/i2c-id.h>
++#include <linux/i2c-algo-bit.h>
++#include <drm/drm_crtc.h>
++
++#include <drm/drm_crtc_helper.h>
++
++/*
++ * MOORESTOWN defines
++ */
++#define MRST_I2C 0
++
++#define DUMP_REGISTER 0
++#define MRST_24BIT_LVDS 1
++#define MRST_24BIT_DOT_1 0
++#define MRST_24BIT_WA 0
++
++#define PRINT_JLIU7 0
++#define DELAY_TIME1 2000 /* 1000 = 1ms */
++
++/*
++ * Display related stuff
++ */
++
++/* store information about an Ixxx DVO */
++/* The i830->i865 use multiple DVOs with multiple i2cs */
++/* the i915, i945 have a single sDVO i2c bus - which is different */
++#define MAX_OUTPUTS 6
++/* maximum connectors per crtcs in the mode set */
++#define INTELFB_CONN_LIMIT 4
++
++#define INTEL_I2C_BUS_DVO 1
++#define INTEL_I2C_BUS_SDVO 2
++
++/* these are outputs from the chip - integrated only
++ * external chips are via DVO or SDVO output */
++#define INTEL_OUTPUT_UNUSED 0
++#define INTEL_OUTPUT_ANALOG 1
++#define INTEL_OUTPUT_DVO 2
++#define INTEL_OUTPUT_SDVO 3
++#define INTEL_OUTPUT_LVDS 4
++#define INTEL_OUTPUT_TVOUT 5
++#define INTEL_OUTPUT_MIPI 6
++
++#define INTEL_DVO_CHIP_NONE 0
++#define INTEL_DVO_CHIP_LVDS 1
++#define INTEL_DVO_CHIP_TMDS 2
++#define INTEL_DVO_CHIP_TVOUT 4
++
++enum mipi_panel_type {
++ NSC_800X480 = 1,
++ LGE_480X1024 = 2,
++ TPO_864X480 = 3
++};
++
++struct opregion_header {
++ u8 signature[16];
++ u32 size;
++ u32 opregion_ver;
++ u8 bios_ver[32];
++ u8 vbios_ver[16];
++ u8 driver_ver[16];
++ u32 mboxes;
++ u8 reserved[164];
++} __attribute__((packed));
++
++struct opregion_apci {
++ /*FIXME: add it later*/
++} __attribute__((packed));
++
++struct opregion_swsci {
++ /*FIXME: add it later*/
++} __attribute__((packed));
++
++struct opregion_acpi {
++ /*FIXME: add it later*/
++} __attribute__((packed));
++
++struct psb_intel_opregion {
++ struct opregion_header *header;
++ struct opregion_acpi *acpi;
++ struct opregion_swsci *swsci;
++ struct opregion_asle *asle;
++ int enabled;
++};
++
++/**
++ * Hold information useally put on the device driver privates here,
++ * since it needs to be shared across multiple of devices drivers privates.
++*/
++struct psb_intel_mode_device {
++
++ /*
++ * Abstracted memory manager operations
++ */
++ void *(*bo_from_handle) (struct drm_device *dev,
++ struct drm_file *file_priv,
++ unsigned int handle);
++ size_t(*bo_size) (struct drm_device *dev, void *bo);
++ size_t(*bo_offset) (struct drm_device *dev, void *bo);
++ int (*bo_pin_for_scanout) (struct drm_device *dev, void *bo);
++ int (*bo_unpin_for_scanout) (struct drm_device *dev, void *bo);
++
++ /*
++ * Cursor
++ */
++ int cursor_needs_physical;
++
++ /*
++ * LVDS info
++ */
++ int backlight_duty_cycle; /* restore backlight to this value */
++ bool panel_wants_dither;
++ struct drm_display_mode *panel_fixed_mode;
++ struct drm_display_mode *vbt_mode; /* if any */
++
++ uint32_t saveBLC_PWM_CTL;
++};
++
++struct psb_intel_i2c_chan {
++ /* for getting at dev. private (mmio etc.) */
++ struct drm_device *drm_dev;
++ u32 reg; /* GPIO reg */
++ struct i2c_adapter adapter;
++ struct i2c_algo_bit_data algo;
++ u8 slave_addr;
++};
++
++struct psb_intel_output {
++ struct drm_connector base;
++
++ struct drm_encoder enc;
++ int type;
++ struct psb_intel_i2c_chan *i2c_bus; /* for control functions */
++ struct psb_intel_i2c_chan *ddc_bus; /* for DDC only stuff */
++ bool load_detect_temp;
++ void *dev_priv;
++
++ struct psb_intel_mode_device *mode_dev;
++
++};
++
++struct psb_intel_crtc_state {
++ uint32_t saveDSPCNTR;
++ uint32_t savePIPECONF;
++ uint32_t savePIPESRC;
++ uint32_t saveDPLL;
++ uint32_t saveFP0;
++ uint32_t saveFP1;
++ uint32_t saveHTOTAL;
++ uint32_t saveHBLANK;
++ uint32_t saveHSYNC;
++ uint32_t saveVTOTAL;
++ uint32_t saveVBLANK;
++ uint32_t saveVSYNC;
++ uint32_t saveDSPSTRIDE;
++ uint32_t saveDSPSIZE;
++ uint32_t saveDSPPOS;
++ uint32_t saveDSPBASE;
++ uint32_t savePalette[256];
++};
++
++struct psb_intel_crtc {
++ struct drm_crtc base;
++ int pipe;
++ int plane;
++ uint32_t cursor_addr;
++ u8 lut_r[256], lut_g[256], lut_b[256];
++ u8 lut_adj[256];
++ struct psb_intel_framebuffer *fbdev_fb;
++ /* a mode_set for fbdev users on this crtc */
++ struct drm_mode_set mode_set;
++
++ /* current bo we scanout from */
++ void *scanout_bo;
++
++ /* current bo we cursor from */
++ void *cursor_bo;
++
++ struct drm_display_mode saved_mode;
++ struct drm_display_mode saved_adjusted_mode;
++
++ struct psb_intel_mode_device *mode_dev;
++
++/*FIXME: Workaround to avoid MRST block.*/
++#ifndef CONFIG_X86_MRST
++ /* Saved Crtc HW states */
++ struct psb_intel_crtc_state *crtc_state;
++#endif
++};
++
++#define to_psb_intel_crtc(x) \
++ container_of(x, struct psb_intel_crtc, base)
++#define to_psb_intel_output(x) \
++ container_of(x, struct psb_intel_output, base)
++#define enc_to_psb_intel_output(x) \
++ container_of(x, struct psb_intel_output, enc)
++#define to_psb_intel_framebuffer(x) \
++ container_of(x, struct psb_framebuffer, base)
++
++struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
++ const u32 reg, const char *name);
++void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan);
++int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output);
++extern bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output);
++
++extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
++ struct psb_intel_mode_device *mode_dev);
++extern void psb_intel_crt_init(struct drm_device *dev);
++extern void psb_intel_sdvo_init(struct drm_device *dev, int output_device);
++extern void psb_intel_dvo_init(struct drm_device *dev);
++extern void psb_intel_tv_init(struct drm_device *dev);
++extern void psb_intel_lvds_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev);
++extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level);
++extern void mrst_lvds_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev);
++extern void mrst_dsi_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev);
++
++extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
++extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
++extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
++
++extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
++ *connector);
++
++extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
++ struct drm_crtc *crtc);
++extern void psb_intel_wait_for_vblank(struct drm_device *dev);
++extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
++ struct drm_file *file_priv);
++extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
++ int pipe);
++extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
++ int sdvoB);
++extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector);
++extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
++ int enable);
++extern int intelfb_probe(struct drm_device *dev);
++extern int intelfb_remove(struct drm_device *dev,
++ struct drm_framebuffer *fb);
++extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
++ *dev, struct
++ drm_mode_fb_cmd
++ *mode_cmd,
++ void *mm_private);
++extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode);
++extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode);
++extern int psb_intel_lvds_set_property(struct drm_connector *connector,
++ struct drm_property *property,
++ uint64_t value);
++extern void psb_intel_lvds_destroy(struct drm_connector *connector);
++extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs;
++
++extern uint8_t blc_pol;
++extern uint8_t blc_freq;
++
++#endif /* __INTEL_DRV_H__ */
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_dsi.c b/drivers/gpu/drm/mrst/drv/psb_intel_dsi.c
+new file mode 100644
+index 0000000..3d45df8
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_dsi.c
+@@ -0,0 +1,2450 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * jim liu <jim.liu@intel.com>
++ */
++
++#include <linux/backlight.h>
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_edid.h>
++#include <asm/ipc_defs.h>
++
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "ospm_power.h"
++
++#define DRM_MODE_ENCODER_MIPI 5
++
++#define BRIGHTNESS_MAX_LEVEL 100
++#define BLC_POLARITY_NORMAL 0
++
++#if DUMP_REGISTER
++extern void dump_dsi_registers(struct drm_device *dev);
++#endif /* DUMP_REGISTER */
++void mrst_init_TPO_MIPI(struct drm_device *dev);
++
++int dsi_backlight; /* restore backlight to this value */
++
++/**
++ * Returns the maximum level of the backlight duty cycle field.
++ */
++static u32 mrst_dsi_get_max_backlight(struct drm_device *dev)
++{
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_get_max_backlight \n");
++#endif /* PRINT_JLIU7 */
++
++ return BRIGHTNESS_MAX_LEVEL;
++
++/* FIXME jliu7 need to revisit */
++}
++
++/**
++ * Sets the power state for the panel.
++ */
++static void mrst_dsi_set_power(struct drm_device *dev,
++ struct psb_intel_output *output, bool on)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ DRM_INFO("Enter mrst_dsi_set_power \n");
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ if (on) {
++ /* program MIPI DSI controller and Display Controller
++ * set the device ready bit + set 'turn on' bit b048
++ * wait for 100 ms ??
++ * set pipe enable bit */
++ REG_WRITE(DPI_CONTROL_REG, 2);
++ msleep(100);
++ if (dev_priv->panel_make == TPO_864X480)
++ dev_priv->init_drvIC(dev); /* initialize the panel */
++ /* Turn on backlight */
++ REG_WRITE(BLC_PWM_CTL, 0x2faf1fc9);
++ } else {
++ /* set the shutdown bit b048h
++ * de-assert pipe enable
++ * clear device ready bit unless DBI is to be left on */
++ REG_WRITE(BLC_PWM_CTL, 0x2faf0000); /* turn off backlight */
++ REG_WRITE(DPI_CONTROL_REG, 1); /* send shut down message */
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++static void mrst_dsi_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_dpms \n");
++#endif /* PRINT_JLIU7 */
++
++ if (mode == DRM_MODE_DPMS_ON)
++ mrst_dsi_set_power(dev, output, true);
++ else
++ mrst_dsi_set_power(dev, output, false);
++
++ /* XXX: We never power down the DSI pairs. */
++}
++
++static void mrst_dsi_save(struct drm_connector *connector)
++{
++#if 0 /* JB: Disable for drop */
++ struct drm_device *dev = connector->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_save \n");
++#endif /* PRINT_JLIU7 */
++
++ dev_priv->savePP_ON = REG_READ(LVDSPP_ON);
++ dev_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
++ dev_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
++ dev_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
++ dev_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
++ BACKLIGHT_DUTY_CYCLE_MASK);
++
++ /*
++ * make backlight to full brightness
++ */
++ dsi_backlight = mrst_dsi_get_max_backlight(dev);
++#endif
++}
++
++static void mrst_dsi_restore(struct drm_connector *connector)
++{
++#if 0 /* JB: Disable for drop */
++ struct drm_device *dev = connector->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_restore \n");
++#endif /* PRINT_JLIU7 */
++
++ REG_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
++ REG_WRITE(LVDSPP_ON, dev_priv->savePP_ON);
++ REG_WRITE(LVDSPP_OFF, dev_priv->savePP_OFF);
++ REG_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
++ *REG_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
++ if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
++ mrst_dsi_set_power(dev, true);
++ else
++ mrst_dsi_set_power(dev, false);
++#endif
++}
++
++static void mrst_dsi_prepare(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ struct psb_intel_mode_device *mode_dev = output->mode_dev;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_prepare \n");
++#endif /* PRINT_JLIU7 */
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
++ BACKLIGHT_DUTY_CYCLE_MASK);
++
++ mrst_dsi_set_power(dev, output, false);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++static void mrst_dsi_commit(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ struct psb_intel_mode_device *mode_dev = output->mode_dev;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_commit \n");
++#endif /* PRINT_JLIU7 */
++
++ if (mode_dev->backlight_duty_cycle == 0)
++ mode_dev->backlight_duty_cycle =
++ mrst_dsi_get_max_backlight(dev);
++
++ mrst_dsi_set_power(dev, output, true);
++
++#if DUMP_REGISTER
++ dump_dsi_registers(dev);
++#endif /* DUMP_REGISTER */
++}
++
++#if 0
++/* ************************************************************************* *\
++FUNCTION: GetHS_TX_timeoutCount
++DESCRIPTION: In burst mode, value greater than one DPI line Time in byte clock
++ (txbyteclkhs). To timeout this timer 1+ of the
++ above said value is recommended.
++
++ In non-burst mode, Value greater than one DPI frame time
++ in byte clock(txbyteclkhs).
++
++ To timeout this timer 1+ of the above said value is recommended.
++
++\* ************************************************************************* */
++static u32 GetHS_TX_timeoutCount(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++
++ u32 timeoutCount = 0, HTOT_count = 0, VTOT_count = 0, HTotalPixel = 0;
++
++ /* Total pixels need to be transfer per line*/
++ HTotalPixel = (dev_priv->HsyncWidth +
++ dev_priv->HbackPorch +
++ dev_priv->HfrontPorch) *
++ dev_priv->laneCount +
++ dev_priv->HactiveArea;
++
++ /* byte count = (pixel count * bits per pixel) / 8 */
++ HTOT_count = (HTotalPixel * dev_priv->bpp) / 8;
++
++ if (dev_priv->videoModeFormat == BURST_MODE) {
++ timeoutCount = HTOT_count + 1;
++#if 1 /*FIXME remove it after power-on */
++ VTOT_count = dev_priv->VactiveArea +
++ dev_priv->VbackPorch +
++ dev_priv->VfrontPorch + dev_priv->VsyncWidth;
++
++ /* timeoutCount = (HTOT_count * VTOT_count) + 1; */
++ timeoutCount = (HTOT_count * VTOT_count) + 1;
++#endif
++ } else {
++ VTOT_count = dev_priv->VactiveArea +
++ dev_priv->VbackPorch +
++ dev_priv->VfrontPorch +
++ dev_priv->VsyncWidth;
++ /* timeoutCount = (HTOT_count * VTOT_count) + 1; */
++ timeoutCount = (HTOT_count * VTOT_count) + 1;
++ }
++
++ return timeoutCount & 0xFFFF;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetLP_RX_timeoutCount
++
++DESCRIPTION: The timeout value is protocol specific. Time out value is
++ calculated from txclkesc(50ns).
++
++ Minimum value =
++ Time to send one Trigger message = 4 X txclkesc
++ [Escape mode entry sequence)
++ + 8-bit trigger message (2x8xtxclkesc)
++ +1 txclksesc [stop_state]
++ = 21 X txclkesc [ 15h]
++
++ Maximum Value =
++ Time to send a long packet with maximum payload data
++ = 4 X txclkesc [Escape mode entry sequence)
++ + 8-bit Low power data transmission Command (2x8xtxclkesc)
++ + packet header [ 4X8X2X txclkesc]
++ +payload [ nX8X2Xtxclkesc]
++ +CRC[2X8X2txclkesc]
++ +1 txclksesc [stop_state]
++ = 117 txclkesc +n[payload in terms of bytes]X16txclkesc.
++
++\* ************************************************************************* */
++static u32 GetLP_RX_timeoutCount(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++
++ u32 timeoutCount = 0;
++
++ if (dev_priv->config_phase) {
++ /* Assuming 256 byte DDB data.*/
++ timeoutCount = 117 + 256 * 16;
++ } else {
++ /* For DPI video only mode use the minimum value.*/
++ timeoutCount = 0x15;
++#if 1 /*FIXME remove it after power-on */
++ /* Assuming 256 byte DDB data.*/
++ timeoutCount = 117 + 256 * 16;
++#endif
++ }
++
++ return timeoutCount;
++}
++#endif /* #if 0 - to avoid warnings */
++
++/* ************************************************************************* *\
++FUNCTION: GetHSA_Count
++
++DESCRIPTION: Shows the horizontal sync value in terms of byte clock
++ (txbyteclkhs)
++ Minimum HSA period should be sufficient to transmit a hsync start short
++ packet(4 bytes)
++ i) For Non-burst Mode with sync pulse, Min value 4 in decimal
++ [plus an optional 6 bytes for a zero payload blanking
++ packet]. But if the value is less than 10 but more
++ than 4, then this count will be added to the HBP s
++ count for one lane.
++ ii) For Non-Burst Sync Event & Burst Mode, there is no HSA,
++ so you can program this to zero. If you program this
++ register, these byte values will be added to HBP.
++ iii) For Burst mode of operation, normally the values
++ programmed in terms of byte clock are based on the
++ principle - time for transfering
++ HSA in Burst mode is the same as in non-bust mode.
++\* ************************************************************************* */
++static u32 GetHSA_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 HSA_count;
++ u32 HSA_countX8;
++
++ /* byte clock count = (pixel clock count * bits per pixel) /8 */
++ /*HSA_countX8 = dev_priv->HsyncWidth * dev_priv->bpp;
++
++ if (dev_priv->videoModeFormat == BURST_MODE) {
++ HSA_countX8 *= dev_priv->DDR_Clock /
++ dev_priv->DDR_Clock_Calculated;
++ }
++
++ HSA_count = HSA_countX8 / 8;*/
++
++ /* since mode_set already computed Display Controller timings,
++ * read the register and compute mipi timings.
++ */
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ HSA_countX8 = REG_READ(HSYNC_A);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else
++ HSA_countX8 = dev_priv->saveHSYNC_A;
++
++ /* Get the hsync pulse width */
++ HSA_count = ((HSA_countX8 & 0xffff0000)>>16) - (HSA_countX8 & 0xffff);
++ /* compute HSA according to equation:
++ (hsync_end - hsync_start) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/
++ HSA_count = (HSA_count * dev_priv->bpp)/(2 * 8 * 2);
++ if (HSA_count < 4) /* minimum value of 4 */
++ HSA_count = 4;
++
++ return HSA_count;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetHBP_Count
++
++DESCRIPTION: Shows the horizontal back porch value in terms of txbyteclkhs.
++ Minimum HBP period should be sufficient to transmit a �hsync end short
++ packet(4 bytes) + Blanking packet overhead(6 bytes) +
++ RGB packet header(4 bytes)�
++ For Burst mode of operation, normally the values programmed in terms of
++ byte clock are based on the principle - time for transfering HBP
++ in Burst mode is the same as in non-bust mode.
++
++ Min value � 14 in decimal
++ [accounted with zero payload for blanking packet] for one lane.
++ Max value � any value greater than 14 based on DPI resolution
++\* ************************************************************************* */
++static u32 GetHBP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 HBP_count;
++ u32 HBE, HSE;
++
++ /* byte clock count = (pixel clock count * bits per pixel) /8 */
++ /*HBP_countX8 = dev_priv->HbackPorch * dev_priv->bpp;
++
++ if (dev_priv->videoModeFormat == BURST_MODE) {
++ HBP_countX8 *= dev_priv->DDR_Clock /
++ dev_priv->DDR_Clock_Calculated;
++ }
++
++ HBP_count = HBP_countX8 / 8;*/
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ HBE = (REG_READ(HBLANK_A) & 0xffff0000) >> 16;
++ HSE = (REG_READ(HSYNC_A) & 0xffff0000) >> 16;
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ HBE = (dev_priv->saveHBLANK_A & 0xffff0000) >> 16;
++ HSE = (dev_priv->saveHSYNC_A & 0xffff0000) >> 16;
++ }
++
++ /* Get the hsync pulse width */
++ HBP_count = HBE - HSE;
++ /*compute HSA according to equation:
++ *(hblank_end - hsync_end) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/
++ HBP_count = (HBP_count * dev_priv->bpp)/(2 * 8 * 2);
++ if (HBP_count < 8) /* minimum value of 8 */
++ HBP_count = 8;
++
++ return HBP_count;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetHFP_Count
++
++DESCRIPTION: Shows the horizontal front porch value in terms of txbyteclkhs.
++Minimum HFP period should be sufficient to transmit �RGB Data packet
++footer(2 bytes) + Blanking packet overhead(6 bytes)� for non burst mode.
++
++For burst mode, Minimum HFP period should be sufficient to transmit
++Blanking packet overhead(6 bytes)�
++
++For Burst mode of operation, normally the values programmed in terms of
++ byte clock are based on the principle - time for transfering HFP
++ in Burst mode is the same as in non-bust mode.
++
++Min value � 8 in decimal for non-burst mode [accounted with zero payload
++ for blanking packet] for one lane.
++Min value � 6 in decimal for burst mode for one lane.
++
++Max value � any value greater than the minimum vaue based on DPI resolution
++\* ************************************************************************* */
++static u32 GetHFP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 HFP_count;
++ u32 HBS, HSS;
++
++ /* byte clock count = (pixel clock count * bits per pixel) /8 */
++ /*HFP_countX8 = dev_priv->HfrontPorch * dev_priv->bpp;
++
++ if (dev_priv->videoModeFormat == BURST_MODE) {
++ HFP_countX8 *= dev_priv->DDR_Clock /
++ dev_priv->DDR_Clock_Calculated;
++ }
++
++ HFP_count = HFP_countX8 / 8;*/
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ HBS = REG_READ(HBLANK_A) & 0xffff;
++ HSS = REG_READ(HSYNC_A) & 0xffff;
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ HBS = dev_priv->saveHBLANK_A & 0xffff;
++ HSS = dev_priv->saveHSYNC_A & 0xffff;
++ }
++
++ /* Get the hsync pulse width */
++ HFP_count = HSS - HBS;
++ /*compute HSA according to equation:
++ *(hblank_end - hsync_end) * 24 bpp / (2 * 8 bits per lane * 2 lanes)*/
++ HFP_count = (HFP_count * dev_priv->bpp)/(2 * 8 * 2);
++ if (HFP_count < 8) /* minimum value of 8 */
++ HFP_count = 8;
++
++ return HFP_count;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetHAdr_Count
++
++DESCRIPTION: Shows the horizontal active area value in terms of txbyteclkhs.
++ In Non Burst Mode, Count equal to RGB word count value
++
++In Burst Mode, RGB pixel packets are time-compressed, leaving more time
++ during a scan line for LP mode (saving power) or for multiplexing
++ other transmissions onto the DSI link. Hence, the count equals the
++ time in txbyteclkhs for sending time compressed RGB pixels plus
++ the time needed for moving to power save mode or the time needed
++ for secondary channel to use the DSI link.
++
++But if the left out time for moving to low power mode is less than
++ 8 txbyteclkhs [2txbyteclkhs for RGB data packet footer and
++ 6txbyteclkhs for a blanking packet with zero payload], then
++ this count will be added to the HFP's count for one lane.
++
++Min value � 8 in decimal for non-burst mode [accounted with zero payload
++ for blanking packet] for one lane.
++Min value � 6 in decimal for burst mode for one lane.
++
++Max value � any value greater than the minimum vaue based on DPI resolution
++\* ************************************************************************* */
++static u32 GetHAdr_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 HAdr_count;
++ u32 Hactive;
++
++ /* byte clock count = (pixel clock count * bits per pixel) /8 */
++ /*HAdr_countX8 = dev_priv->HactiveArea * dev_priv->bpp;
++
++ if (dev_priv->videoModeFormat == BURST_MODE) {
++ HAdr_countX8 *= dev_priv->DDR_Clock /
++ dev_priv->DDR_Clock_Calculated;
++ }
++
++ HAdr_count = HAdr_countX8 / 8;*/
++
++ /* use HactiveArea instead of H_TOTAL register or else panel
++ centering won't work.*/
++ Hactive = dev_priv->HactiveArea;
++
++ /* compute HAdr according to equation:
++ * (hactive * 24 bpp/8) / 2 lanes)*/
++
++ HAdr_count = (Hactive * dev_priv->bpp/8) / 2;
++
++ return HAdr_count;
++}
++
++/* ************************************************************************* *\
++FUNCTION: GetVSA_Count
++
++DESCRIPTION: Shows the vertical sync value in terms of lines
++
++\* ************************************************************************* */
++static u32 GetVSA_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 VSA_count;
++ u32 VSA_countX8;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ VSA_countX8 = REG_READ(VSYNC_A);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else
++ VSA_countX8 = dev_priv->saveVSYNC_A;
++
++ /* Get the vsync pulse width */
++ VSA_count = ((VSA_countX8 & 0xffff0000)>>16) - (VSA_countX8 & 0xffff);
++
++ if (VSA_count < 2) /* minimum value of 2 */
++ VSA_count = 2;
++
++ return VSA_count;
++}
++
++/* ************************************************************************* *\
++ * FUNCTION: GetVBP_Count
++ *
++ * DESCRIPTION: Shows the vertical back porch value in lines.
++ *
++\* ************************************************************************* */
++static u32 GetVBP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 VBP_count;
++ u32 VBE, VSE;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ VBE = (REG_READ(VBLANK_A) & 0xffff0000) >> 16;
++ VSE = (REG_READ(VSYNC_A) & 0xffff0000) >> 16;
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ VBE = (dev_priv->saveVBLANK_A & 0xffff0000) >> 16;
++ VSE = (dev_priv->saveVSYNC_A & 0xffff0000) >> 16;
++ }
++
++ /* Get the hsync pulse width */
++ VBP_count = VBE - VSE;
++
++ if (VBP_count < 2) /* minimum value of 2 */
++ VBP_count = 2;
++
++ return VBP_count;
++}
++/* ************************************************************************* *\
++ * FUNCTION: GetVFP_Count
++ *
++ * DESCRIPTION: Shows the vertical front porch value in terms of lines.
++ *
++\* ************************************************************************* */
++static u32 GetVFP_Count(struct drm_device *dev, DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 VFP_count;
++ u32 VBS, VSS;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ VBS = REG_READ(VBLANK_A) & 0xffff;
++ VSS = REG_READ(VSYNC_A) & 0xffff;
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ VBS = dev_priv->saveVBLANK_A & 0xffff;
++ VSS = dev_priv->saveVSYNC_A & 0xffff;
++ }
++
++ /* Get the hsync pulse width */
++ VFP_count = VSS - VBS;
++
++ if (VFP_count < 2) /* minimum value of 2 */
++ VFP_count = 2;
++
++ return VFP_count;
++}
++
++#if 0
++/* ************************************************************************* *\
++FUNCTION: GetHighLowSwitchCount
++
++DESCRIPTION: High speed to low power or Low power to high speed switching time
++ in terms byte clock (txbyteclkhs). This value is based on the
++ byte clock (txbyteclkhs) and low power clock frequency (txclkesc)
++
++Typical value - Number of byte clocks required to switch from low power mode
++ to high speed mode after "txrequesths" is asserted.
++
++The worst count value among the low to high or high to low switching time
++ in terms of txbyteclkhs has to be programmed in this register.
++
++Usefull Formulae:
++ DDR clock period = 2 times UI
++ txbyteclkhs clock = 8 times UI
++ Tlpx = 1 / txclkesc
++ CALCULATION OF LOW POWER TO HIGH SPEED SWITCH COUNT VALUE
++ (from Standard D-PHY spec)
++
++ LP01 + LP00 + HS0 = 1Tlpx + 1Tlpx + 3Tlpx [Approx] +
++ 1DDR clock [2UI] + 1txbyteclkhs clock [8UI]
++
++ CALCULATION OF HIGH SPEED TO LOW POWER SWITCH COUNT VALUE
++ (from Standard D-PHY spec)
++
++ Ths-trail = 1txbyteclkhs clock [8UI] +
++ 5DDR clock [10UI] + 4 Tlpx [Approx]
++\* ************************************************************************* */
++static u32 GetHighLowSwitchCount(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 HighLowSwitchCount, HighToLowSwitchCount, LowToHighSwitchCount;
++
++/* ************************************************************************* *\
++CALCULATION OF HIGH SPEED TO LOW POWER SWITCH COUNT VALUE
++(from Standard D-PHY spec)
++
++Ths-trail = 1txbyteclkhs clock [8UI] + 5DDR clock [10UI] + 4 Tlpx [Approx]
++
++Tlpx = 50 ns, Using max txclkesc (20MHz)
++
++txbyteclkhs_period = 4000 / dev_priv->DDR_Clock; in ns
++UI_period = 500 / dev_priv->DDR_Clock; in ns
++
++HS_to_LP = Ths-trail = 18 * UI_period + 4 * Tlpx
++ = 9000 / dev_priv->DDR_Clock + 200;
++
++HighToLowSwitchCount = HS_to_LP / txbyteclkhs_period
++ = (9000 / dev_priv->DDR_Clock + 200) / (4000 / dev_priv->DDR_Clock)
++ = (9000 + (200 * dev_priv->DDR_Clock)) / 4000
++
++\* ************************************************************************* */
++ HighToLowSwitchCount = (9000 + (200 * dev_priv->DDR_Clock)) / 4000 + 1;
++
++/* ************************************************************************* *\
++CALCULATION OF LOW POWER TO HIGH SPEED SWITCH COUNT VALUE
++(from Standard D-PHY spec)
++
++LP01 + LP00 + HS0 = 1Tlpx + 1Tlpx + 3Tlpx [Approx] +
++1DDR clock [2UI] + 1txbyteclkhs clock [8UI]
++
++ LP_to_HS = 10 * UI_period + 5 * Tlpx =
++ = 5000 / dev_priv->DDR_Clock + 250;
++
++ LowToHighSwitchCount = LP_to_HS / txbyteclkhs_period
++ = (5000 / dev_priv->DDR_Clock + 250) /
++ (4000 / dev_priv->DDR_Clock)
++
++ = (5000 + (250 * dev_priv->DDR_Clock)) / 4000
++
++\* ************************************************************************* */
++ LowToHighSwitchCount = (5000 + (250 * dev_priv->DDR_Clock)) / 4000 + 1;
++
++ if (HighToLowSwitchCount > LowToHighSwitchCount)
++ HighLowSwitchCount = HighToLowSwitchCount;
++ else
++ HighLowSwitchCount = LowToHighSwitchCount;
++
++ /* FIXME jliu need to fine tune the above formulae and remove the
++ * following after power on */
++ if (HighLowSwitchCount < 0x1f)
++ HighLowSwitchCount = 0x1f;
++
++ return HighLowSwitchCount;
++}
++
++/* ************************************************************************* *\
++FUNCTION: mrst_gen_long_write
++DESCRIPTION:
++\* ************************************************************************* */
++static void mrst_gen_long_write(struct drm_device *dev,
++ u32 *data,
++ u16 wc,
++ u8 vc)
++{
++ u32 gen_data_reg = HS_GEN_DATA_REG;
++ u32 gen_ctrl_reg = HS_GEN_CTRL_REG;
++ u32 date_full_bit = HS_DATA_FIFO_FULL;
++ u32 control_full_bit = HS_CTRL_FIFO_FULL;
++ u16 wc_saved = wc;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_gen_long_write \n");
++#endif /* PRINT_JLIU7 */
++
++ /* sanity check */
++ if (vc > 4) {
++ DRM_ERROR
++ (KERN_ERR "MIPI Virtual channel Can't greater than 4.\n");
++ return;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ if (0) { /* FIXME JLIU7 check if it is in LP*/
++ gen_data_reg = LP_GEN_DATA_REG;
++ gen_ctrl_reg = LP_GEN_CTRL_REG;
++ date_full_bit = LP_DATA_FIFO_FULL;
++ control_full_bit = LP_CTRL_FIFO_FULL;
++ }
++
++ while (wc >= 4) {
++ /* Check if MIPI IP generic data fifo is not full */
++ while ((REG_READ(GEN_FIFO_STAT_REG) & date_full_bit)
++ == date_full_bit) {
++ /* Do Nothing Here */
++ /* This will make checkpatch work */
++ }
++
++ /* write to data buffer */
++ REG_WRITE(gen_data_reg, *data);
++
++ wc -= 4;
++ data++;
++ }
++
++ switch (wc) {
++ case 1:
++ REG_WRITE8(gen_data_reg, *((u8 *)data));
++ break;
++ case 2:
++ REG_WRITE16(gen_data_reg, *((u16 *)data));
++ break;
++ case 3:
++ REG_WRITE16(gen_data_reg, *((u16 *)data));
++ data = (u32 *)((u8 *) data + 2);
++ REG_WRITE8(gen_data_reg, *((u8 *)data));
++ break;
++ }
++
++ /* Check if MIPI IP generic control fifo is not full */
++ while ((REG_READ(GEN_FIFO_STAT_REG) & control_full_bit)
++ == control_full_bit) {
++ /* Do Nothing Here */
++ /* This will make Checkpatch work */
++ }
++ /* write to control buffer */
++ REG_WRITE(gen_ctrl_reg, 0x29 | (wc_saved << 8) | (vc << 6));
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++/* ************************************************************************* *\
++FUNCTION: mrst_init_HIMAX_MIPI_bridge
++DESCRIPTION:
++\* ************************************************************************* */
++static void mrst_init_HIMAX_MIPI_bridge(struct drm_device *dev)
++{
++ u32 gen_data[2];
++ u16 wc = 0;
++ u8 vc = 0;
++ u32 gen_data_intel = 0x200105;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_init_HIMAX_MIPI_bridge \n");
++#endif /* PRINT_JLIU7 */
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ /* exit sleep mode */
++ wc = 0x5;
++ gen_data[0] = gen_data_intel | (0x11 << 24);
++ gen_data[1] = 0;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_pixel_format */
++ gen_data[0] = gen_data_intel | (0x3A << 24);
++ gen_data[1] = 0x77;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* Set resolution for (800X480) */
++ wc = 0x8;
++ gen_data[0] = gen_data_intel | (0x2A << 24);
++ gen_data[1] = 0x1F030000;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[0] = gen_data_intel | (0x2B << 24);
++ gen_data[1] = 0xDF010000;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* System control */
++ wc = 0x6;
++ gen_data[0] = gen_data_intel | (0xEE << 24);
++ gen_data[1] = 0x10FA;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* INPUT TIMING FOR TEST PATTERN(800X480) */
++ /* H-size */
++ gen_data[1] = 0x2000;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0301;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* V-size */
++ gen_data[1] = 0xE002;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0103;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* H-total */
++ gen_data[1] = 0x2004;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0405;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* V-total */
++ gen_data[1] = 0x0d06;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0207;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* H-blank */
++ gen_data[1] = 0x0308;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0009;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* H-blank */
++ gen_data[1] = 0x030A;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x000B;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* H-start */
++ gen_data[1] = 0xD80C;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x000D;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* V-start */
++ gen_data[1] = 0x230E;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x000F;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* RGB domain */
++ gen_data[1] = 0x0027;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* INP_FORM Setting */
++ /* set_1 */
++ gen_data[1] = 0x1C10;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_2 */
++ gen_data[1] = 0x0711;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_3 */
++ gen_data[1] = 0x0012;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_4 */
++ gen_data[1] = 0x0013;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_5 */
++ gen_data[1] = 0x2314;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_6 */
++ gen_data[1] = 0x0015;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_7 */
++ gen_data[1] = 0x2316;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_8 */
++ gen_data[1] = 0x0017;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_1 */
++ gen_data[1] = 0x0330;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC Setting */
++ /* FRC_set_2 */
++ gen_data[1] = 0x237A;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC_set_3 */
++ gen_data[1] = 0x4C7B;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC_set_4 */
++ gen_data[1] = 0x037C;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC_set_5 */
++ gen_data[1] = 0x3482;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC_set_7 */
++ gen_data[1] = 0x1785;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++#if 0
++ /* FRC_set_8 */
++ gen_data[1] = 0xD08F;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++#endif
++
++ /* OUTPUT TIMING FOR TEST PATTERN (800X480) */
++ /* out_htotal */
++ gen_data[1] = 0x2090;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0491;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_hsync */
++ gen_data[1] = 0x0392;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0093;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_hstart */
++ gen_data[1] = 0xD894;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0095;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_hsize */
++ gen_data[1] = 0x2096;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0397;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_vtotal */
++ gen_data[1] = 0x0D98;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x0299;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_vsync */
++ gen_data[1] = 0x039A;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x009B;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_vstart */
++ gen_data[1] = 0x239C;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x009D;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* out_vsize */
++ gen_data[1] = 0xE09E;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x019F;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* FRC_set_6 */
++ gen_data[1] = 0x9084;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* Other setting */
++ gen_data[1] = 0x0526;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* RBG domain */
++ gen_data[1] = 0x1177;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* rgbw */
++ /* set_1 */
++ gen_data[1] = 0xD28F;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_2 */
++ gen_data[1] = 0x02D0;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_3 */
++ gen_data[1] = 0x08D1;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_4 */
++ gen_data[1] = 0x05D2;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_5 */
++ gen_data[1] = 0x24D4;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* set_6 */
++ gen_data[1] = 0x00D5;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x02D7;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x00D8;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ gen_data[1] = 0x48F3;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0xD4F2;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x3D8E;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x60FD;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x00B5;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++ gen_data[1] = 0x48F4;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ /* inside patten */
++ gen_data[1] = 0x0060;
++ mrst_gen_long_write(dev, gen_data, wc, vc);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++#endif
++
++static void mrst_wait_for_LP_CTRL_FIFO(struct drm_device *dev)
++{
++ int timeout = 0;
++ udelay(500);
++
++ /* This will time out after approximately 2+ seconds */
++ while ((timeout < 20000) && (REG_READ(GEN_FIFO_STAT_REG) &
++ LP_CTRL_FIFO_FULL)) {
++ udelay(100);
++ timeout++;
++ }
++
++ if (timeout == 20000)
++ DRM_INFO("MIPI: LP CMD FIFO was never cleared!\n");
++}
++
++static void mrst_wait_for_HS_DATA_FIFO(struct drm_device *dev)
++{
++ int timeout = 0;
++ udelay(500);
++
++ /* This will time out after approximately 2+ seconds */
++ while ((timeout < 20000) && (REG_READ(GEN_FIFO_STAT_REG) &
++ HS_DATA_FIFO_FULL)) {
++ udelay(100);
++ timeout++;
++ }
++
++ if (timeout == 20000)
++ DRM_INFO("MIPI: HS Data FIFO was never cleared!\n");
++}
++
++static void mrst_wait_for_HS_CTRL_FIFO(struct drm_device *dev)
++{
++ int timeout = 0;
++ udelay(500);
++
++ /* This will time out after approximately 2+ seconds */
++ while ((timeout < 20000) && (REG_READ(GEN_FIFO_STAT_REG) &
++ HS_CTRL_FIFO_FULL)) {
++ udelay(100);
++ timeout++;
++ }
++ if (timeout == 20000)
++ DRM_INFO("MIPI: HS CMD FIFO was never cleared!\n");
++}
++
++/* ************************************************************************* *\
++FUNCTION: mrst_init_NSC_MIPI_bridge
++DESCRIPTION: This function is called only by mrst_dsi_mode_set and
++ restore_display_registers. since this function does not
++ acquire the mutex, it is important that the calling function
++ does!
++\* ************************************************************************* */
++void mrst_init_NSC_MIPI_bridge(struct drm_device *dev)
++{
++
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++
++ DRM_INFO("Enter mrst_init_NSC_MIPI_bridge.\n");
++
++ /* Program MIPI IP to 100MHz DSI, Non-Burst mode with sync event,
++ 2 Data Lanes */
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* enable RGB24*/
++ REG_WRITE(LP_GEN_CTRL_REG, 0x003205e3);
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* enable all error reporting*/
++ REG_WRITE(LP_GEN_CTRL_REG, 0x000040e3);
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ REG_WRITE(LP_GEN_CTRL_REG, 0x000041e3);
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* enable 2 data lane; video shaping & error reporting */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x00a842e3); /* 0x006842e3 for 1 data lane */
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* HS timeout */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x009243e3);
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* setle = 6h; low power timeout = ((2^21)-1)*4TX_esc_clks. */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x00e645e3);
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* enable all virtual channels */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x000f46e3);
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* set output strength to low-drive */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x00007de3);
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ if (dev_priv->sku_83) {
++ /* set escape clock to divede by 8 */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x000044e3);
++ } else if (dev_priv->sku_100L) {
++ /* set escape clock to divede by 16 */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x001044e3);
++ } else if (dev_priv->sku_100) {
++ /* set escape clock to divede by 32*/
++ /*REG_WRITE(LP_GEN_CTRL_REG, 0x003044e3);*/
++ REG_WRITE(LP_GEN_CTRL_REG, 0x001044e3);
++
++ /*mrst_wait_for_LP_CTRL_FIFO(dev);*/
++ /* setle = 6h; low power timeout = ((2^21)-1)*4TX_esc_clks. */
++ /*REG_WRITE(LP_GEN_CTRL_REG, 0x00ec45e3);*/
++ }
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ /* CFG_VALID=1; RGB_CLK_EN=1. */
++ REG_WRITE(LP_GEN_CTRL_REG, 0x00057fe3);
++
++ /*ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);*/
++}
++
++static int mrst_check_mipi_error(struct drm_device *dev)
++{
++ u32 int_status_reg = 0;
++ u32 relevant_error_bits = 0x0fff; /* only care about error bits 0-11 */
++ u32 reported_errors = 0;
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ REG_WRITE(LP_GEN_CTRL_REG, 0x010524); /* 2-parameter gen short read */
++
++ /* sleep 100 microseconds */
++ udelay(100);
++
++ int_status_reg = REG_READ(INTR_STAT_REG);
++ printk(KERN_ALERT "MIPI Intr Status Reg: 0x%X\n", int_status_reg);
++
++ reported_errors = int_status_reg & relevant_error_bits;
++ if (reported_errors) {
++ printk(KERN_ALERT "MIPI Init sequence reported errs: 0x%X\n",
++ reported_errors);
++ /* Clear the error bits */
++ REG_WRITE(INTR_STAT_REG, reported_errors);
++ return reported_errors;
++ }
++
++ return 0;
++}
++
++/* ************************************************************************* *\
++ * FUNCTION: mrst_init_TPO_MIPI
++ *
++ * DESCRIPTION: This function is called only by mrst_dsi_mode_set and
++ * restore_display_registers. since this function does not
++ * acquire the mutex, it is important that the calling function
++ * does!
++\* ************************************************************************* */
++void mrst_init_TPO_MIPI(struct drm_device *dev)
++{
++ /*DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;*/
++
++ DRM_INFO("Enter mrst init TPO MIPI display.\n");
++
++ /* Flip page order */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00008036);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000229);
++
++ /* 0xF0 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x005a5af0);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000329);
++
++ /* Write protection key */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x005a5af1);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000329);
++
++ /* 0xFC */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x005a5afc);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000329);
++
++ /* 0xB7 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x770000b7);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00000044);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000529);
++
++ /* 0xB6 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x000a0ab6);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000329);
++
++ /* 0xF2 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x081010f2);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x4a070708);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x000000c5);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000929);
++
++ /* 0xF8 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x024003f8);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x01030a04);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x0e020220);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00000004);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000d29);
++
++ /* 0xE2 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x398fc3e2);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x0000916f);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000629);
++
++ /* 0xB0 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x000000b0);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000229);
++
++ /* 0xF4 */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x240242f4);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x78ee2002);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x2a071050);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x507fee10);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x10300710);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00001429);
++
++ /* 0xBA */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x19fe07ba);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x101c0a31);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00000010);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000929);
++
++ /* 0xBB */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x28ff07bb);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x24280a31);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00000034);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000929);
++
++ /* 0xFB */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x535d05fb);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x1b1a2130);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x221e180e);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x131d2120);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x535d0508);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x1c1a2131);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x231f160d);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x111b2220);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x535c2008);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x1f1d2433);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x2c251a10);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x2c34372d);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00000023);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00003129);
++
++ /* 0xFA */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x525c0bfa);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x1c1c232f);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x2623190e);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x18212625);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x545d0d0e);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x1e1d2333);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x26231a10);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x1a222725);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x545d280f);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x21202635);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x31292013);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x31393d33);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x00000029);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00003129);
++
++ /* Set DM */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(0xb068, 0x000100f7);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(0xb070, 0x00000329);
++}
++
++static void panel_reset_on(void)
++{
++ struct ipc_pmic_reg_data tmp_reg = {0};
++
++ tmp_reg.ioc = 1;
++ tmp_reg.num_entries = 1;
++
++ tmp_reg.pmic_reg_data[0].register_address = 0xf4;
++ if (ipc_pmic_register_read(&tmp_reg)) {
++ printk(KERN_WARNING "pnl_rst_on: fail to read pmic 0xf4!\n");
++ return;
++ }
++ tmp_reg.pmic_reg_data[0].value &= 0xbf;
++
++ if (ipc_pmic_register_write(&tmp_reg, TRUE))
++ printk(KERN_WARNING "pnl_rst_on: fail to write pmic 0xe6!\n");
++}
++
++static void panel_reset_off(void)
++{
++ struct ipc_pmic_reg_data tmp_reg = {0};
++
++ printk(KERN_INFO "panel_reset_off\n");
++
++ tmp_reg.ioc = 1;
++ tmp_reg.num_entries = 1;
++
++ tmp_reg.pmic_reg_data[0].register_address = 0xf4;
++ if (ipc_pmic_register_read(&tmp_reg)) {
++ printk(KERN_WARNING "pnl_rst_off: fail to read pmic 0xf4!\n");
++ return;
++ }
++ tmp_reg.pmic_reg_data[0].value |= 0x40;
++
++ if (ipc_pmic_register_write(&tmp_reg, TRUE))
++ printk(KERN_WARNING "pnl_rst_off: fail to write pmic 0xe6!\n");
++}
++
++static void panel_reset(void)
++{
++ printk(KERN_INFO "panel_reset\n");
++
++ panel_reset_on();
++ msleep(20);
++ panel_reset_off();
++ msleep(20);
++}
++
++/* ************************************************************************* *\
++ * FUNCTION: mrst_init_LGE_MIPI
++ *
++ * DESCRIPTION: This function is called only by mrst_dsi_mode_set and
++ * restore_display_registers. since this function does not
++ * acquire the mutex, it is important that the calling function
++ * does!
++\* ************************************************************************* */
++void mrst_init_LGE_MIPI(struct drm_device *dev)
++{
++ /*DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;*/
++ int i = 0;
++
++ DRM_INFO("Enter mrst init LGE MIPI display.\n");
++
++ mrst_wait_for_LP_CTRL_FIFO(dev);
++ REG_WRITE(0xb06c, 0x00870123);
++
++ /* LGE 480x1024 Panel Initialization sequence */
++ for (i = 0; i < 10; i++) {
++ /* Panel Characteristics Settings */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xb2200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x0ec820);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x7 << 8 | 0x0 << 6);
++
++ /* Panel Driver Setting */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xb3200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x5 << 8 | 0x0 << 6);
++
++ /* Display Mode Control */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xb4200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x5 << 8 | 0x0 << 6);
++
++ /* Display Mode and Frame Memory write Mode Setting */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xb5200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x000f0f12);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x9 << 8 | 0x0 << 6);
++
++ /* Display Control (GIP Specific) */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xb6200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x40021803);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x3010);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xa << 8 | 0x0 << 6);
++
++ /* Power Setting */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xc0200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x1f01);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x6 << 8 | 0x0 << 6);
++
++ /* Power Setting */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xc3200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x03040407);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x07);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x9 << 8 | 0x0 << 6);
++
++ /* */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xc4200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x15154412);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x6d04);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xa << 8 | 0x0 << 6);
++
++ /* */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xc5200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x64);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x5 << 8 | 0x0 << 6);
++
++ /* */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xc6200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x004024);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x7 << 8 | 0x0 << 6);
++
++ /* red */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xd0200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x06774701);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00200000);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xd << 8 | 0x0 << 6);
++
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xd1200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x06774701);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00200000);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xd << 8 | 0x0 << 6);
++
++ /* green */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xd2200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x06774701);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00200000);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xd << 8 | 0x0 << 6);
++
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xd3200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x06774701);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00200000);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xd << 8 | 0x0 << 6);
++
++ /* blue */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xd4200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x06774701);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00200000);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xd << 8 | 0x0 << 6);
++
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0xd5200105);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x06774701);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x00200000);
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x02);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0xd << 8 | 0x0 << 6);
++
++ if (!mrst_check_mipi_error(dev)) {
++ i = 0;
++ break;
++ }
++ }
++
++ for (i = 0; i < 10; i++) {
++ /* Sleep Out */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x11200105);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x4 << 8 | 0x0 << 6);
++
++ if (!mrst_check_mipi_error(dev)) {
++ i = 0;
++ break;
++ }
++ }
++
++ udelay(10000);
++
++ for (i = 0; i < 10; i++) {
++ /* Display On */
++ mrst_wait_for_HS_DATA_FIFO(dev);
++ REG_WRITE(HS_GEN_DATA_REG, 0x29200105);
++ mrst_wait_for_HS_CTRL_FIFO(dev);
++ REG_WRITE(HS_GEN_CTRL_REG, 0x29 | 0x4 << 8 | 0x0 << 6);
++
++ if (!mrst_check_mipi_error(dev)) {
++ i = 0;
++ break;
++ }
++ }
++
++ /*ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);*/
++}
++
++/*enum mipi_panel_type {
++ NSC_800X480 = 0,
++ LGE_480X1024 = 1,
++ TPO_864X480 = 2
++};*/
++
++static void mrst_dsi_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct psb_intel_mode_device *mode_dev =
++ enc_to_psb_intel_output(encoder)->mode_dev;
++ struct drm_device *dev = encoder->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 dsiFuncPrgValue = 0;
++ u32 SupportedFormat = 0;
++ u32 channelNumber = 0;
++ u32 DBI_dataWidth = 0;
++ u32 resolution = 0;
++ u32 mipi_control_val = 0;
++ u32 intr_en_val = 0;
++ u32 turnaround_timeout_val = 0;
++ u32 device_reset_val = 0;
++ u32 init_count_val = 0;
++ u32 hs_tx_timeout_val = 0;
++ u32 lp_rx_timeout_val = 0;
++ u32 high_low_switch_count_val = 0;
++ u32 eot_disable_val = 0;
++ u32 lp_byteclk_val = 0;
++ u32 device_ready_val = 0;
++ /*u32 dpi_control_val = 0;*/
++ u32 vsa_count = 0;
++ u32 vbp_count = 0;
++ u32 vfp_count = 0;
++ u32 hsa_count = 0;
++ u32 hbp_count = 0;
++ u32 hfp_count = 0;
++ u32 haa_count = 0;
++ u32 video_mode_format = 0;
++ u32 max_ret_packet_size = 0;
++ uint64_t curValue = DRM_MODE_SCALE_FULLSCREEN;
++ /*enum mipi_panel_type panel_make;*/
++ u32 mipi_port, tmp_VAL;
++
++ DRM_INFO("enter mrst_dsi_mode_set \n");
++#if 0
++ switch (dev_priv->gct_data.bpi) {
++ case 1:
++ panel_make = NSC_800X480;
++ break;
++ case 2:
++ panel_make = TPO_864X480;
++ break;
++ case 3:
++ panel_make = LGE_480X1024;
++ break;
++ default:
++ DRM_INFO("MIPI: unknown panel type! Setting NSC.\n");
++ panel_make = NSC_800X480; /* assume NSC */
++ }
++
++ /* Force TPO for Aava testing */
++ panel_make = TPO_864X480;
++#endif
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ switch (dev_priv->bpp) {
++ case 16:
++ SupportedFormat = RGB_565_FMT;
++ break;
++ case 18:
++ SupportedFormat = RGB_666_FMT;
++ break;
++ case 24:
++ SupportedFormat = RGB_888_FMT;
++ break;
++ default:
++ DRM_INFO("mrst_dsi_mode_set, invalid bpp \n");
++ break;
++ }
++
++
++ if (dev_priv->dpi) {
++ drm_connector_property_get_value(
++ &enc_to_psb_intel_output(encoder)->base,
++ dev->mode_config.scaling_mode_property,
++ &curValue);
++
++ if (curValue == DRM_MODE_SCALE_CENTER)
++ REG_WRITE(PFIT_CONTROL, 0);
++ else if (curValue == DRM_MODE_SCALE_ASPECT) {
++ if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) ||
++ (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
++ if ((adjusted_mode->crtc_hdisplay *
++ mode->vdisplay) == (mode->hdisplay *
++ adjusted_mode->crtc_vdisplay))
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++ else if ((adjusted_mode->crtc_hdisplay *
++ mode->vdisplay) > (mode->hdisplay *
++ adjusted_mode->crtc_vdisplay))
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
++ PFIT_SCALING_MODE_PILLARBOX);
++ else
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
++ PFIT_SCALING_MODE_LETTERBOX);
++ } else
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++ } else /*(curValue == DRM_MODE_SCALE_FULLSCREEN)*/
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++
++ switch (dev_priv->panel_make) {
++ case NSC_800X480:
++ intr_en_val = 0xffffffff;
++ turnaround_timeout_val = 0x00000001;
++ device_reset_val = 0x000000ff;
++ init_count_val = 0x00000fff;
++ resolution = dev_priv->HactiveArea |
++ (dev_priv->VactiveArea << RES_V_POS);
++ SupportedFormat <<= FMT_DPI_POS;
++ dsiFuncPrgValue = dev_priv->laneCount | SupportedFormat;
++ vsa_count = GetVSA_Count(dev, dev_priv);
++ vbp_count = GetVBP_Count(dev, dev_priv);
++ vfp_count = GetVFP_Count(dev, dev_priv);
++ hsa_count = GetHSA_Count(dev, dev_priv);
++ hbp_count = GetHBP_Count(dev, dev_priv);
++ hfp_count = GetHFP_Count(dev, dev_priv);
++ haa_count = GetHAdr_Count(dev, dev_priv);
++ video_mode_format = dev_priv->videoModeFormat;
++ hs_tx_timeout_val = 0x00001000;
++ lp_rx_timeout_val = 0x0000ffff;
++ high_low_switch_count_val = 0x46;
++ eot_disable_val = 0x00000000;
++ lp_byteclk_val = 0x00000004;
++ device_ready_val = 0x00000001;
++ max_ret_packet_size = 0x40;
++ break;
++ case TPO_864X480:
++ intr_en_val = 0xffffffff;
++ turnaround_timeout_val = 0x0000000a;
++ device_reset_val = 0x000000ff;
++ init_count_val = 0x00000fff;
++ resolution = 0x01e00360;
++ dsiFuncPrgValue = 0x00000202;
++ vsa_count = 0x00000004;
++ vbp_count = 0x00000008;
++ vfp_count = 0x00000008;
++ hsa_count = 0x00000006;
++ hbp_count = 0x0000000f;
++ hfp_count = 0x0000000f;
++ haa_count = 0x00000510;
++ video_mode_format = 0x00000003;
++ hs_tx_timeout_val = 0x00090000;
++ lp_rx_timeout_val = 0x0000ffff;
++ high_low_switch_count_val = 0x00000046;
++ eot_disable_val = 0x00000000;
++ lp_byteclk_val = 0x00000004;
++ device_ready_val = 0x00000001;
++ max_ret_packet_size = 0x40;
++ break;
++ case LGE_480X1024:
++ intr_en_val = 0xffffffff;
++ turnaround_timeout_val = 0x00000012;
++ device_reset_val = 0x000000ff;
++ init_count_val = 0x00000fff;
++ resolution = 0x040001e0;
++ dsiFuncPrgValue = 0x00000202;
++ vsa_count = 0x00000005;
++ vbp_count = 0x0000000f;
++ vfp_count = 0x0000000f;
++ hsa_count = 0x00000008;
++ hbp_count = 0x00000018;
++ hfp_count = 0x0000000f;
++ haa_count = 0x00000320;
++ video_mode_format = 0x00000003;
++ hs_tx_timeout_val = 0x00ffffff;
++ lp_rx_timeout_val = 0x0000ffff;
++ high_low_switch_count_val = 0x00000016;
++ eot_disable_val = 0x00000000;
++ lp_byteclk_val = 0x00000004;
++ device_ready_val = 0x00000001;
++ max_ret_packet_size = 0x40;
++ break;
++ }
++
++ /* set 100 mhz dsi clk based on sku */
++ if (dev_priv->sku_83)
++ mipi_control_val = 0x0018; /* 100 mhz * 1 = 100 mhz */
++ else if (dev_priv->sku_100L)
++ mipi_control_val = 0x0019; /* 50 mhz * 2 = 100 mhz */
++ else if (dev_priv->sku_100)
++ mipi_control_val = 0x0018; /* 100 mhz * 1 = 100 mhz */
++
++ /* wait for PIPE A to disable */
++ while (REG_READ(0x70008) & 0x40000000) {
++ /* Do Nothing Here */
++ /* This should make checkpatch work */
++ }
++
++ /* wait for DPI FIFO to clear */
++ while ((REG_READ(GEN_FIFO_STAT_REG) & DPI_FIFO_EMPTY)
++ != DPI_FIFO_EMPTY) {
++ /* Do Nothing Here */
++ /* This should make checkpatch work */
++ }
++
++ /* Clear Device Ready Bit */
++ REG_WRITE(DEVICE_READY_REG, 0x00000000);
++
++ /* clear intr status register */
++ tmp_VAL = REG_READ(INTR_STAT_REG);
++ REG_WRITE(INTR_STAT_REG, tmp_VAL);
++
++ /* Reset Aava panel */
++ if (dev_priv->panel_make == TPO_864X480) {
++ panel_reset();
++ msleep(1000);
++ }
++
++ /* Enable MIPI Port */
++ mipi_port = MIPI_PORT_EN | MIPI_BORDER_EN;
++
++ /* Enable dithering if required */
++ if (mode_dev->panel_wants_dither)
++ mipi_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
++
++ REG_WRITE(MIPI, mipi_port);
++
++ /* set the lane speed */
++ REG_WRITE(MIPI_CONTROL_REG, mipi_control_val);
++
++ /* Enable all the error interrupt */
++ REG_WRITE(INTR_EN_REG, intr_en_val);
++ REG_WRITE(TURN_AROUND_TIMEOUT_REG, turnaround_timeout_val);
++ REG_WRITE(DEVICE_RESET_REG, device_reset_val);
++ REG_WRITE(INIT_COUNT_REG, init_count_val);
++
++ REG_WRITE(DSI_FUNC_PRG_REG, dsiFuncPrgValue);
++
++ REG_WRITE(DPI_RESOLUTION_REG, resolution);
++ /*REG_WRITE(DBI_RESOLUTION_REG, 0x00000000);*/
++
++ REG_WRITE(VERT_SYNC_PAD_COUNT_REG, vsa_count);
++ REG_WRITE(VERT_BACK_PORCH_COUNT_REG, vbp_count);
++ REG_WRITE(VERT_FRONT_PORCH_COUNT_REG, vfp_count);
++
++ REG_WRITE(HORIZ_SYNC_PAD_COUNT_REG, hsa_count);
++ REG_WRITE(HORIZ_BACK_PORCH_COUNT_REG, hbp_count);
++ REG_WRITE(HORIZ_FRONT_PORCH_COUNT_REG, hfp_count);
++ REG_WRITE(HORIZ_ACTIVE_AREA_COUNT_REG, haa_count);
++
++ REG_WRITE(VIDEO_FMT_REG, video_mode_format);
++
++ REG_WRITE(HS_TX_TIMEOUT_REG, hs_tx_timeout_val);
++ REG_WRITE(LP_RX_TIMEOUT_REG, lp_rx_timeout_val);
++
++ REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG,
++ high_low_switch_count_val);
++
++ REG_WRITE(EOT_DISABLE_REG, eot_disable_val);
++
++ REG_WRITE(LP_BYTECLK_REG, lp_byteclk_val);
++ REG_WRITE(MAX_RET_PAK_REG, max_ret_packet_size);
++
++ REG_WRITE(DEVICE_READY_REG, device_ready_val);
++ REG_WRITE(DPI_CONTROL_REG, DPI_TURN_ON);
++ } else {
++ /* JLIU7 FIXME VIRTUAL_CHANNEL_NUMBER_1 or
++ * VIRTUAL_CHANNEL_NUMBER_0*/
++ channelNumber =
++ VIRTUAL_CHANNEL_NUMBER_1 << DBI_CHANNEL_NUMBER_POS;
++ DBI_dataWidth = DBI_DATA_WIDTH_16BIT << DBI_DATA_WIDTH_POS;
++ dsiFuncPrgValue =
++ dev_priv->laneCount | channelNumber | DBI_dataWidth;
++ /* JLIU7 FIXME */
++ SupportedFormat <<= FMT_DBI_POS;
++ dsiFuncPrgValue |= SupportedFormat;
++ REG_WRITE(DSI_FUNC_PRG_REG, dsiFuncPrgValue);
++
++ REG_WRITE(DPI_RESOLUTION_REG, 0x00000000);
++ REG_WRITE(DBI_RESOLUTION_REG, resolution);
++ }
++
++ dev_priv->dsi_device_ready = true;
++
++ if ((dev_priv->panel_make == NSC_800X480) || (dev_priv->panel_make == LGE_480X1024))
++ dev_priv->init_drvIC(dev); /* initialize the mipi panel */
++
++ /* set the dphy settings for 100 mhz */
++ REG_WRITE(0xb080, 0x0b061c04);
++
++ REG_WRITE(PIPEACONF, dev_priv->pipeconf);
++ /* REG_READ(PIPEACONF); */
++
++ /* Wait for 20ms for the pipe enable to take effect. */
++ /*udelay(20000);*/
++
++ REG_WRITE(DSPACNTR, dev_priv->dspcntr);
++
++ /* Wait for 20ms for the plane enable to take effect. */
++ /*udelay(20000);*/
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++/**
++ * Detect the MIPI connection.
++ *
++ * This always returns CONNECTOR_STATUS_CONNECTED.
++ * This connector should only have
++ * been set up if the MIPI was actually connected anyway.
++ */
++static enum drm_connector_status mrst_dsi_detect(struct drm_connector
++ *connector)
++{
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_dsi_detect \n");
++#endif /* PRINT_JLIU7 */
++
++ return connector_status_connected;
++}
++
++/**
++ * Return the list of MIPI DDB modes if available.
++ */
++static int mrst_dsi_get_modes(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev;
++
++/* FIXME get the MIPI DDB modes */
++
++ /* Didn't get an DDB, so
++ * Set wide sync ranges so we get all modes
++ * handed to valid_mode for checking
++ */
++ connector->display_info.min_vfreq = 0;
++ connector->display_info.max_vfreq = 200;
++ connector->display_info.min_hfreq = 0;
++ connector->display_info.max_hfreq = 200;
++
++ if (mode_dev->panel_fixed_mode != NULL) {
++ struct drm_display_mode *mode =
++ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
++ drm_mode_probed_add(connector, mode);
++ return 1;
++ }
++
++ return 0;
++}
++
++static const struct drm_encoder_helper_funcs mrst_dsi_helper_funcs = {
++ .dpms = mrst_dsi_dpms,
++ .mode_fixup = psb_intel_lvds_mode_fixup,
++ .prepare = mrst_dsi_prepare,
++ .mode_set = mrst_dsi_mode_set,
++ .commit = mrst_dsi_commit,
++};
++
++static const struct drm_connector_helper_funcs
++ mrst_dsi_connector_helper_funcs = {
++ .get_modes = mrst_dsi_get_modes,
++ .mode_valid = psb_intel_lvds_mode_valid,
++ .best_encoder = psb_intel_best_encoder,
++};
++
++static const struct drm_connector_funcs mrst_dsi_connector_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .save = mrst_dsi_save,
++ .restore = mrst_dsi_restore,
++ .detect = mrst_dsi_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = psb_intel_lvds_set_property,
++ .destroy = psb_intel_lvds_destroy,
++};
++
++/** Returns the panel fixed mode from configuration. */
++/** FIXME JLIU7 need to revist it. */
++struct drm_display_mode *mrst_dsi_get_configuration_mode(struct drm_device *dev)
++{
++ struct drm_display_mode *mode;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ u8 panel_index = dev_priv->gct_data.bpi;
++ u8 panel_type = dev_priv->gct_data.pt;
++ struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
++ bool use_gct = false;
++
++ DRM_INFO("Enter mrst_dsi_get_configuration_mode\n");
++
++ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++ if (!mode)
++ return NULL;
++
++ if (dev_priv->vbt_data.Size != 0x00) /*if non-zero, vbt is present*/
++ if ((1<<panel_index) & panel_type) /* if non-zero,*/
++ use_gct = true; /*then mipi panel.*/
++
++ if (use_gct) {
++ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
++ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
++ mode->hsync_start = mode->hdisplay + \
++ ((ti->hsync_offset_hi << 8) | \
++ ti->hsync_offset_lo);
++ mode->hsync_end = mode->hsync_start + \
++ ((ti->hsync_pulse_width_hi << 8) | \
++ ti->hsync_pulse_width_lo);
++ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
++ ti->hblank_lo);
++ mode->vsync_start = \
++ mode->vdisplay + ((ti->vsync_offset_hi << 4) | \
++ ti->vsync_offset_lo);
++ mode->vsync_end = \
++ mode->vsync_start + ((ti->vsync_pulse_width_hi << 4) | \
++ ti->vsync_pulse_width_lo);
++ mode->vtotal = mode->vdisplay + \
++ ((ti->vblank_hi << 8) | ti->vblank_lo);
++ mode->clock = ti->pixel_clock * 10;
++#if 1
++ printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay);
++ printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay);
++ printk(KERN_INFO "HSS is %d\n", mode->hsync_start);
++ printk(KERN_INFO "HSE is %d\n", mode->hsync_end);
++ printk(KERN_INFO "htotal is %d\n", mode->htotal);
++ printk(KERN_INFO "VSS is %d\n", mode->vsync_start);
++ printk(KERN_INFO "VSE is %d\n", mode->vsync_end);
++ printk(KERN_INFO "vtotal is %d\n", mode->vtotal);
++ printk(KERN_INFO "clock is %d\n", mode->clock);
++#endif
++
++ } else {
++
++#if 0 /* LGE 480x1024 tentative timings */
++ mode->hdisplay = 480;
++ mode->vdisplay = 1024;
++ mode->hsync_start = 499;
++ mode->hsync_end = 506;
++ mode->htotal = 517;
++ mode->vsync_start = 1039;
++ mode->vsync_end = 1041;
++ mode->vtotal = 1047;
++ mode->clock = 33264;
++#endif
++#if 1 /*FIXME jliu7 remove it later */
++ /* copy from SV - hard coded fixed mode for
++ * DSI TPO TD043MTEA2 LCD panel */
++ mode->hdisplay = 864;
++ mode->vdisplay = 480;
++ mode->hsync_start = 873;
++ mode->hsync_end = 876;
++ mode->htotal = 887;
++ mode->vsync_start = 487;
++ mode->vsync_end = 490;
++ mode->vtotal = 499;
++ mode->clock = 33264;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for DSI TPO TD043MTEA2 LCD panel */
++ mode->hdisplay = 800;
++ mode->vdisplay = 480;
++ mode->hsync_start = 836;
++ mode->hsync_end = 846;
++ mode->htotal = 1056;
++ mode->vsync_start = 489;
++ mode->vsync_end = 491;
++ mode->vtotal = 525;
++ mode->clock = 33264;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 800x480 */
++ mode->hdisplay = 800;
++ mode->vdisplay = 480;
++ mode->hsync_start = 801;
++ mode->hsync_end = 802;
++ mode->htotal = 1024;
++ mode->vsync_start = 481;
++ mode->vsync_end = 482;
++ mode->vtotal = 525;
++ mode->clock = 30994;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later, jliu7 modify it according to the spec*/
++ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1072;
++ mode->hsync_end = 1104;
++ mode->htotal = 1184;
++ mode->vsync_start = 603;
++ mode->vsync_end = 604;
++ mode->vtotal = 608;
++ mode->clock = 53990;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it, it is copied from SBIOS */
++ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1104;
++ mode->hsync_end = 1136;
++ mode->htotal = 1184;
++ mode->vsync_start = 603;
++ mode->vsync_end = 604;
++ mode->vtotal = 608;
++ mode->clock = 53990;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1124;
++ mode->hsync_end = 1204;
++ mode->htotal = 1312;
++ mode->vsync_start = 607;
++ mode->vsync_end = 610;
++ mode->vtotal = 621;
++ mode->clock = 48885;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 1024x768 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 768;
++ mode->hsync_start = 1048;
++ mode->hsync_end = 1184;
++ mode->htotal = 1344;
++ mode->vsync_start = 771;
++ mode->vsync_end = 777;
++ mode->vtotal = 806;
++ mode->clock = 65000;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 1366x768 */
++ mode->hdisplay = 1366;
++ mode->vdisplay = 768;
++ mode->hsync_start = 1430;
++ mode->hsync_end = 1558;
++ mode->htotal = 1664;
++ mode->vsync_start = 769;
++ mode->vsync_end = 770;
++ mode->vtotal = 776;
++ mode->clock = 77500;
++#endif /*FIXME jliu7 remove it later */
++ }
++ drm_mode_set_name(mode);
++ drm_mode_set_crtcinfo(mode, 0);
++
++ return mode;
++}
++
++/* ************************************************************************* *\
++FUNCTION: mrstDSI_clockInit
++DESCRIPTION:
++
++\* ************************************************************************* */
++static u32 sku_83_mipi_2xclk[4] = {166667, 333333, 444444, 666667};
++static u32 sku_100_mipi_2xclk[4] = {200000, 400000, 533333, 800000};
++static u32 sku_100L_mipi_2xclk[4] = {100000, 200000, 266667, 400000};
++#define MIPI_2XCLK_COUNT 0x04
++
++static bool mrstDSI_clockInit(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ u32 Htotal = 0, Vtotal = 0, RRate = 0, mipi_2xclk = 0;
++ u32 i = 0;
++ u32 *p_mipi_2xclk = NULL;
++
++#if 0 /* JLIU7_PO old values */
++ /* FIXME jliu7 DPI hard coded for TPO TD043MTEA2 LCD panel */
++ dev_priv->pixelClock = 33264; /*KHz*/
++ dev_priv->HsyncWidth = 10;
++ dev_priv->HbackPorch = 210;
++ dev_priv->HfrontPorch = 36;
++ dev_priv->HactiveArea = 800;
++ dev_priv->VsyncWidth = 2;
++ dev_priv->VbackPorch = 34;
++ dev_priv->VfrontPorch = 9;
++ dev_priv->VactiveArea = 480;
++ dev_priv->bpp = 24;
++
++ /* FIXME jliu7 DBI hard coded for TPO TD043MTEA2 LCD panel */
++ dev_priv->dbi_pixelClock = 33264; /*KHz*/
++ dev_priv->dbi_HsyncWidth = 10;
++ dev_priv->dbi_HbackPorch = 210;
++ dev_priv->dbi_HfrontPorch = 36;
++ dev_priv->dbi_HactiveArea = 800;
++ dev_priv->dbi_VsyncWidth = 2;
++ dev_priv->dbi_VbackPorch = 34;
++ dev_priv->dbi_VfrontPorch = 9;
++ dev_priv->dbi_VactiveArea = 480;
++ dev_priv->dbi_bpp = 24;
++#else /* JLIU7_PO old values */
++ /* FIXME jliu7 DPI hard coded for TPO TD043MTEA2 LCD panel */
++ /* FIXME Pre-Si value, 1 or 2 lanes; 50MHz; Non-Burst w/ sync event */
++ dev_priv->pixelClock = 33264; /*KHz*/
++ dev_priv->HsyncWidth = 10;
++ dev_priv->HbackPorch = 8;
++ dev_priv->HfrontPorch = 3;
++ dev_priv->HactiveArea = 800;
++ dev_priv->VsyncWidth = 2;
++ dev_priv->VbackPorch = 3;
++ dev_priv->VfrontPorch = 2;
++ dev_priv->VactiveArea = 480;
++ dev_priv->bpp = 24;
++
++ /* FIXME jliu7 DBI hard coded for TPO TD043MTEA2 LCD panel */
++ dev_priv->dbi_pixelClock = 33264; /*KHz*/
++ dev_priv->dbi_HsyncWidth = 10;
++ dev_priv->dbi_HbackPorch = 8;
++ dev_priv->dbi_HfrontPorch = 3;
++ dev_priv->dbi_HactiveArea = 800;
++ dev_priv->dbi_VsyncWidth = 2;
++ dev_priv->dbi_VbackPorch = 3;
++ dev_priv->dbi_VfrontPorch = 2;
++ dev_priv->dbi_VactiveArea = 480;
++ dev_priv->dbi_bpp = 24;
++#endif /* JLIU7_PO old values */
++
++ Htotal = dev_priv->HsyncWidth
++ + dev_priv->HbackPorch
++ + dev_priv->HfrontPorch
++ + dev_priv->HactiveArea;
++ Vtotal = dev_priv->VsyncWidth
++ + dev_priv->VbackPorch
++ + dev_priv->VfrontPorch
++ + dev_priv->VactiveArea;
++
++ RRate = ((dev_priv->pixelClock * 1000) / (Htotal * Vtotal)) + 1;
++
++ dev_priv->RRate = RRate;
++
++ /* ddr clock frequence = (pixel clock frequence * bits per pixel)/2*/
++ mipi_2xclk = (dev_priv->pixelClock * dev_priv->bpp) /
++ dev_priv->laneCount; /* KHz */
++ dev_priv->DDR_Clock_Calculated = mipi_2xclk / 2; /* KHz */
++
++ DRM_DEBUG("mrstDSI_clockInit RRate = %d, mipi_2xclk = %d. \n",
++ RRate, mipi_2xclk);
++
++ if (dev_priv->sku_100)
++ p_mipi_2xclk = sku_100_mipi_2xclk;
++ else if (dev_priv->sku_100L)
++ p_mipi_2xclk = sku_100L_mipi_2xclk;
++ else
++ p_mipi_2xclk = sku_83_mipi_2xclk;
++
++ for (; i < MIPI_2XCLK_COUNT; i++) {
++ if ((dev_priv->DDR_Clock_Calculated * 2) < p_mipi_2xclk[i])
++ break;
++ }
++
++ if (i == MIPI_2XCLK_COUNT) {
++ DRM_DEBUG("mrstDSI_clkInit DDR clk too big-DDR_Clk_Calcd=%d\n",
++ dev_priv->DDR_Clock_Calculated);
++ return false;
++ }
++
++ dev_priv->DDR_Clock = p_mipi_2xclk[i] / 2;
++ dev_priv->ClockBits = i;
++
++#if 1 /* FIXME remove it after power on*/
++ DRM_DEBUG("mrstDSI_clkInit mipi_2x_clk_divr=0x%x, DDR_Clk_Calcd=%d\n",
++ i,
++ dev_priv->DDR_Clock_Calculated);
++#endif /* FIXME remove it after power on*/
++
++ return true;
++}
++
++/**
++ * mrst_dsi_init - setup MIPI connectors on this device
++ * @dev: drm device
++ *
++ * Create the connector, try to figure out what
++ * modes we can display on the MIPI panel (if present).
++ */
++void mrst_dsi_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ struct psb_intel_output *psb_intel_output;
++ struct drm_connector *connector;
++ struct drm_encoder *encoder;
++
++ DRM_INFO("JLIU7 enter mrst_dsi_init \n");
++
++ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ psb_intel_output->mode_dev = mode_dev;
++ connector = &psb_intel_output->base;
++ encoder = &psb_intel_output->enc;
++ drm_connector_init(dev, &psb_intel_output->base,
++ &mrst_dsi_connector_funcs,
++ DRM_MODE_CONNECTOR_MIPI);
++
++ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
++ DRM_MODE_ENCODER_MIPI);
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ psb_intel_output->type = INTEL_OUTPUT_MIPI;
++
++ drm_encoder_helper_add(encoder, &mrst_dsi_helper_funcs);
++ drm_connector_helper_add(connector,
++ &mrst_dsi_connector_helper_funcs);
++ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++
++ drm_connector_attach_property(connector,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_FULLSCREEN);
++ drm_connector_attach_property(connector,
++ dev_priv->backlight_property,
++ BRIGHTNESS_MAX_LEVEL);
++
++ dsi_backlight = BRIGHTNESS_MAX_LEVEL;
++ blc_pol = BLC_POLARITY_NORMAL;
++ blc_freq = 0xc8;
++
++ mode_dev->panel_wants_dither = false;
++ if (dev_priv->vbt_data.Size != 0x00) {
++ mode_dev->panel_wants_dither = (dev_priv->gct_data.Panel_MIPI_Display_Descriptor & (BIT3 | BIT4));
++ switch (dev_priv->gct_data.bpi) { /* set panel make */
++ case 1:
++ dev_priv->panel_make = NSC_800X480;
++ break;
++ case 2:
++ dev_priv->panel_make = TPO_864X480;
++ break;
++ case 3:
++ dev_priv->panel_make = LGE_480X1024;
++ break;
++ default:
++ DRM_INFO("MIPI: unknown panel type! Setting NSC.\n");
++ dev_priv->panel_make = NSC_800X480; /* assume NSC */
++ }
++ } else {
++ DRM_INFO("MIPI: No GCT! Setting NSC.\n");
++ dev_priv->panel_make = NSC_800X480;
++ }
++
++ /* set panel initialize function */
++ switch (dev_priv->panel_make) {
++ case NSC_800X480:
++ dev_priv->init_drvIC = mrst_init_NSC_MIPI_bridge;
++ break;
++ case TPO_864X480:
++ dev_priv->init_drvIC = mrst_init_TPO_MIPI;
++ break;
++ case LGE_480X1024:
++ dev_priv->init_drvIC = mrst_init_LGE_MIPI;
++ break;
++ }
++
++ /*
++ * MIPI discovery:
++ * 1) check for DDB data
++ * 2) check for VBT data
++ * 4) make sure lid is open
++ * if closed, act like it's not there for now
++ */
++
++ /* FIXME jliu7 we only support DPI */
++ dev_priv->dpi = true;
++
++ /* FIXME hard coded 4 lanes for Himax HX8858-A,
++ * 2 lanes for NSC LM2550 */
++ dev_priv->laneCount = 2;
++
++ /* FIXME hard coded for NSC PO. */
++ /* We only support BUST_MODE */
++ dev_priv->videoModeFormat = NON_BURST_MODE_SYNC_EVENTS;
++ /* FIXME change it to true if GET_DDB works */
++ dev_priv->config_phase = false;
++
++ if (!mrstDSI_clockInit(dev_priv)) {
++ DRM_DEBUG("Can't iniitialize MRST DSI clock.\n");
++#if 0 /* FIXME JLIU7 */
++ goto failed_find;
++#endif /* FIXME JLIU7 */
++ }
++
++ /*
++ * If we didn't get DDB data, try geting panel timing
++ * from configuration data
++ */
++ mode_dev->panel_fixed_mode = mrst_dsi_get_configuration_mode(dev);
++
++ if (mode_dev->panel_fixed_mode) {
++ mode_dev->panel_fixed_mode->type |=
++ DRM_MODE_TYPE_PREFERRED;
++ goto out; /* FIXME: check for quirks */
++ }
++
++ /* If we still don't have a mode after all that, give up. */
++ if (!mode_dev->panel_fixed_mode) {
++ DRM_DEBUG
++ ("Found no modes on the lvds, ignoring the LVDS\n");
++ goto failed_find;
++ }
++
++out:
++ drm_sysfs_connector_add(connector);
++ return;
++
++failed_find:
++ DRM_DEBUG("No MIIP modes found, disabling.\n");
++ drm_encoder_cleanup(encoder);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_dsi_aava.c b/drivers/gpu/drm/mrst/drv/psb_intel_dsi_aava.c
+new file mode 100644
+index 0000000..6c21480
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_dsi_aava.c
+@@ -0,0 +1,996 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * jim liu <jim.liu@intel.com>
++ */
++
++/* This enables setting backlights on with a delay at startup,
++ should be removed after resolving issue with backlights going off
++ after setting them on in initial mrst_dsi_set_power call */
++#define AAVA_BACKLIGHT_HACK
++
++#include <linux/backlight.h>
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_edid.h>
++
++#include <asm/ipc_defs.h>
++
++#ifdef AAVA_BACKLIGHT_HACK
++#include <linux/workqueue.h>
++#endif /* AAVA_BACKLIGHT_HACK */
++
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "ospm_power.h"
++
++#define DRM_MODE_ENCODER_MIPI 5
++
++//#define DBG_PRINTS 1
++#define DBG_PRINTS 0
++
++#define NEW_CRAP_SAMPLE_SETTINGS
++
++#define AAVA_EV_0_5
++
++#define VSIZE 480
++#define HSIZE 864
++#define HFP_DOTS 10
++#define HBP_DOTS 10
++#define HSYNC_DOTS 4
++#define VFP_LINES 8
++#define VBP_LINES 8
++#define VSYNC_LINES 4
++
++#define MIPI_LANES 2
++#define MIPI_HACT ((HSIZE * 3) / MIPI_LANES)
++#define MIPI_HFP ((HFP_DOTS * 3) / MIPI_LANES)
++#define MIPI_HBP ((HBP_DOTS * 3) / MIPI_LANES)
++#define MIPI_HSPAD ((HSYNC_DOTS * 3) / MIPI_LANES)
++#define MIPI_VFP VFP_LINES
++#define MIPI_VSPAD VSYNC_LINES
++#define MIPI_VBP VBP_LINES
++
++#define DISP_HPIX (HSIZE - 1)
++#define DISP_VPIX (VSIZE - 1)
++#define DISP_HBLANK_START DISP_HPIX
++#define DISP_HBLANK_END (DISP_HBLANK_START + HFP_DOTS + HSYNC_DOTS + HBP_DOTS - 1)
++#define DISP_HSYNC_START (DISP_HBLANK_START + HFP_DOTS - 1)
++#define DISP_HSYNC_END (DISP_HSYNC_START + HSYNC_DOTS - 1)
++#define DISP_VBLANK_START DISP_VPIX
++#define DISP_VBLANK_END (DISP_VBLANK_START + VFP_LINES + VSYNC_LINES + VBP_LINES - 1)
++#define DISP_VSYNC_START (DISP_VBLANK_START + VFP_LINES - 1)
++#define DISP_VSYNC_END (DISP_VSYNC_START + VSYNC_LINES - 1)
++
++#define BRIGHTNESS_MAX_LEVEL 100
++
++static unsigned int dphy_reg = 0x0d0a7f06;
++static unsigned int mipi_clock = 0x2;
++
++#ifdef AAVA_BACKLIGHT_HACK
++static void bl_work_handler(struct work_struct *work);
++DECLARE_DELAYED_WORK(bl_work, bl_work_handler);
++#endif /* AAVA_BACKLIGHT_HACK */
++
++// Temporary access from sysfs begin
++static struct drm_encoder *orig_encoder;
++static void mrst_dsi_prepare(struct drm_encoder *encoder);
++static void mrst_dsi_commit(struct drm_encoder *encoder);
++static void mrst_dsi_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode);
++static void panel_reset(void);
++
++static ssize_t dphy_store(struct class *class, const char *buf, size_t len)
++{
++ ssize_t status;
++ unsigned long value;
++
++ status = strict_strtoul(buf, 16, &value);
++ dphy_reg = value;
++ printk("!!! dphy_reg = %x, clock = %x\n", dphy_reg, mipi_clock);
++
++ return len;
++}
++
++static ssize_t clock_store(struct class *class, const char *buf, size_t len)
++{
++ ssize_t status;
++ unsigned long value;
++
++ status = strict_strtoul(buf, 0, &value);
++ mipi_clock = value;
++ printk("!!! dphy_reg = %x, clock = %x\n", dphy_reg, mipi_clock);
++
++ return len;
++}
++
++static ssize_t apply_settings(struct class *class, const char *buf, size_t len)
++{
++ ssize_t status;
++ long value;
++
++ printk("!!! dphy_reg = %x, clock = %x\n", dphy_reg, mipi_clock);
++
++ status = strict_strtoul(buf, 0, &value);
++ if (value > 0) {
++ mrst_dsi_prepare(orig_encoder);
++ msleep(500);
++ if (value > 1) {
++ panel_reset();
++ msleep(500);
++ }
++ mrst_dsi_mode_set(orig_encoder, NULL, NULL);
++ msleep(500);
++ mrst_dsi_commit(orig_encoder);
++ }
++
++ return len;
++}
++// Temporary access from sysfs end
++
++static void panel_init(struct drm_device *dev)
++{
++#if DBG_PRINTS
++ printk("panel_init\n");
++#endif /* DBG_PRINTS */
++
++ /* Flip page order */
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x00008036);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000229);
++
++#ifdef NEW_CRAP_SAMPLE_SETTINGS
++ // 0xF0, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x005a5af0);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000329);
++#endif
++
++ /* Write protection key */
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x005a5af1);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000329);
++
++#ifdef NEW_CRAP_SAMPLE_SETTINGS
++ // 0xFC, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x005a5afc);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000329);
++
++ // 0xB7, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++#ifdef DOES_NOT_WORK
++ /* Suggested by TPO, doesn't work as usual */
++ REG_WRITE(0xb068, 0x110000b7);
++ REG_WRITE(0xb068, 0x00000044);
++#else
++ REG_WRITE(0xb068, 0x770000b7);
++ REG_WRITE(0xb068, 0x00000044);
++#endif
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000529);
++
++ // 0xB6, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x000a0ab6);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000329);
++
++ // 0xF2, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x081010f2);
++ REG_WRITE(0xb068, 0x4a070708);
++ REG_WRITE(0xb068, 0x000000c5);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000929);
++
++ // 0xF8, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x024003f8);
++ REG_WRITE(0xb068, 0x01030a04);
++ REG_WRITE(0xb068, 0x0e020220);
++ REG_WRITE(0xb068, 0x00000004);
++
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000d29);
++
++ // 0xE2, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x398fc3e2);
++ REG_WRITE(0xb068, 0x0000916f);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000629);
++
++#ifdef DOES_NOT_WORK
++ /* Suggested by TPO, doesn't work as usual */
++ // 0xE3, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x20f684e3);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000429);
++
++ msleep(50);
++#endif
++
++ // 0xB0, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x000000b0);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000229);
++
++ // 0xF4, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x240242f4);
++ REG_WRITE(0xb068, 0x78ee2002);
++ REG_WRITE(0xb068, 0x2a071050);
++ REG_WRITE(0xb068, 0x507fee10);
++ REG_WRITE(0xb068, 0x10300710);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00001429);
++
++ // 0xBA, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x19fe07ba);
++ REG_WRITE(0xb068, 0x101c0a31);
++ REG_WRITE(0xb068, 0x00000010);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000929);
++
++ // 0xBB, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x28ff07bb);
++ REG_WRITE(0xb068, 0x24280a31);
++ REG_WRITE(0xb068, 0x00000034);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000929);
++
++ // 0xFB, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x535d05fb);
++ REG_WRITE(0xb068, 0x1b1a2130);
++ REG_WRITE(0xb068, 0x221e180e);
++ REG_WRITE(0xb068, 0x131d2120);
++ REG_WRITE(0xb068, 0x535d0508);
++ REG_WRITE(0xb068, 0x1c1a2131);
++ REG_WRITE(0xb068, 0x231f160d);
++ REG_WRITE(0xb068, 0x111b2220);
++ REG_WRITE(0xb068, 0x535c2008);
++ REG_WRITE(0xb068, 0x1f1d2433);
++ REG_WRITE(0xb068, 0x2c251a10);
++ REG_WRITE(0xb068, 0x2c34372d);
++ REG_WRITE(0xb068, 0x00000023);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00003129);
++
++ // 0xFA, for new crap displays
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x525c0bfa);
++ REG_WRITE(0xb068, 0x1c1c232f);
++ REG_WRITE(0xb068, 0x2623190e);
++ REG_WRITE(0xb068, 0x18212625);
++ REG_WRITE(0xb068, 0x545d0d0e);
++ REG_WRITE(0xb068, 0x1e1d2333);
++ REG_WRITE(0xb068, 0x26231a10);
++ REG_WRITE(0xb068, 0x1a222725);
++ REG_WRITE(0xb068, 0x545d280f);
++ REG_WRITE(0xb068, 0x21202635);
++ REG_WRITE(0xb068, 0x31292013);
++ REG_WRITE(0xb068, 0x31393d33);
++ REG_WRITE(0xb068, 0x00000029);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00003129);
++#endif
++
++ /* Set DM */
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_DATA_FIFO_FULL) ==
++ HS_DATA_FIFO_FULL);
++ REG_WRITE(0xb068, 0x000100f7);
++ while ((REG_READ(GEN_FIFO_STAT_REG) & HS_CTRL_FIFO_FULL) ==
++ HS_CTRL_FIFO_FULL);
++ REG_WRITE(0xb070, 0x00000329);
++}
++
++
++static void panel_reset_on(void)
++{
++ struct ipc_pmic_reg_data tmp_reg = {0};
++#if DBG_PRINTS
++ printk("panel_reset_on\n");
++#endif /* DBG_PRINTS */
++ tmp_reg.ioc = 1;
++ tmp_reg.num_entries = 1;
++#ifdef AAVA_EV_0_5
++ tmp_reg.pmic_reg_data[0].register_address = 0xe6;
++ tmp_reg.pmic_reg_data[0].value = 0x01;
++#else /* CDK */
++ tmp_reg.pmic_reg_data[0].register_address = 0xf4;
++ if (ipc_pmic_register_read(&tmp_reg)) {
++ printk("panel_reset_on: failed to read pmic reg 0xf4!\n");
++ return;
++ }
++ tmp_reg.pmic_reg_data[0].value &= 0xbf;
++#endif /* AAVA_EV_0_5 */
++ if (ipc_pmic_register_write(&tmp_reg, TRUE)) {
++ printk("panel_reset_on: failed to write pmic reg 0xe6!\n");
++ }
++}
++
++
++static void panel_reset_off(void)
++{
++ struct ipc_pmic_reg_data tmp_reg = {0};
++#if DBG_PRINTS
++ printk("panel_reset_off\n");
++#endif /* DBG_PRINTS */
++ tmp_reg.ioc = 1;
++ tmp_reg.num_entries = 1;
++#ifdef AAVA_EV_0_5
++ tmp_reg.pmic_reg_data[0].register_address = 0xe6;
++ tmp_reg.pmic_reg_data[0].value = 0x09;
++#else /* CDK */
++ tmp_reg.pmic_reg_data[0].register_address = 0xf4;
++ if (ipc_pmic_register_read(&tmp_reg)) {
++ printk("panel_reset_off: failed to read pmic reg 0xf4!\n");
++ return;
++ }
++ tmp_reg.pmic_reg_data[0].value |= 0x40;
++#endif /* AAVA_EV_0_5 */
++ if (ipc_pmic_register_write(&tmp_reg, TRUE)) {
++ printk("panel_reset_off: failed to write pmic reg 0xe6!\n");
++ }
++}
++
++
++static void panel_reset(void)
++{
++#if DBG_PRINTS
++ printk("panel_reset\n");
++#endif /* DBG_PRINTS */
++
++ panel_reset_on();
++ msleep(20);
++ panel_reset_off();
++ msleep(20);
++}
++
++
++static void backlight_state(bool on)
++{
++ struct ipc_pmic_reg_data tmp_reg;
++
++#if DBG_PRINTS
++ printk("backlight_state\n");
++#endif /* DBG_PRINTS */
++
++ tmp_reg.ioc = 1;
++ tmp_reg.num_entries = 2;
++ tmp_reg.pmic_reg_data[0].register_address = 0x2a;
++ tmp_reg.pmic_reg_data[1].register_address = 0x28;
++
++ if( on ) {
++#if DBG_PRINTS
++ printk("backlight_state: ON\n");
++#endif /* DBG_PRINTS */
++ tmp_reg.pmic_reg_data[0].value = 0xaa;
++#ifdef AAVA_EV_0_5
++ tmp_reg.pmic_reg_data[1].value = 0x30;
++#else /* CDK */
++ tmp_reg.pmic_reg_data[1].value = 0x60;
++#endif /* AAVA_EV_0_5 */
++ } else {
++#if DBG_PRINTS
++ printk("backlight_state: OFF\n");
++#endif /* DBG_PRINTS */
++ tmp_reg.pmic_reg_data[0].value = 0x00;
++ tmp_reg.pmic_reg_data[1].value = 0x00;
++ }
++
++ if (ipc_pmic_register_write(&tmp_reg, TRUE)) {
++ printk("backlight_state: failed to write pmic regs 0x2a and 0x28!\n");
++ }
++}
++
++#ifdef AAVA_BACKLIGHT_HACK
++static void bl_work_handler(struct work_struct *work)
++{
++ backlight_state(true);
++}
++#endif /* AAVA_BACKLIGHT_HACK */
++
++
++/**
++ * Sets the power state for the panel.
++ */
++static void mrst_dsi_set_power(struct drm_device *dev,
++ struct psb_intel_output *output, bool on)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 pp_status;
++
++#if DBG_PRINTS
++ printk("mrst_dsi_set_power\n");
++#endif /* DBG_PRINTS */
++
++ /*
++ * The DIS device must be ready before we can change power state.
++ */
++ if (!dev_priv->dsi_device_ready)
++ {
++#if DBG_PRINTS
++ printk("mrst_dsi_set_power: !dev_priv->dsi_device_ready!\n");
++#endif /* DBG_PRINTS */
++ return;
++ }
++
++ /*
++ * We don't support dual DSI yet. May be in POR in the future.
++ */
++ if (dev_priv->dual_display)
++ {
++#if DBG_PRINTS
++ printk("mrst_dsi_set_power: dev_priv->dual_display!\n");
++#endif /* DBG_PRINTS */
++ return;
++ }
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ if (on) {
++#if DBG_PRINTS
++ printk("mrst_dsi_set_power: on\n");
++#endif /* DBG_PRINTS */
++ if (dev_priv->dpi && !dev_priv->dpi_panel_on) {
++#if DBG_PRINTS
++ printk("mrst_dsi_set_power: dpi\n");
++#endif /* DBG_PRINTS */
++ REG_WRITE(DPI_CONTROL_REG, DPI_TURN_ON);
++ REG_WRITE(PP_CONTROL,
++ (REG_READ(PP_CONTROL) | POWER_TARGET_ON));
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
++
++ /* Run TPO display specific initialisations */
++// MiKo TBD, this delay may need to be tuned
++ msleep(50);
++ panel_init(dev);
++
++ /* Set backlights on */
++ backlight_state( true );
++ dev_priv->dpi_panel_on = true;
++ }
++ } else {
++#if DBG_PRINTS
++ printk("mrst_dsi_set_power: off\n");
++#endif /* DBG_PRINTS */
++ if (dev_priv->dpi && dev_priv->dpi_panel_on) {
++#if DBG_PRINTS
++ printk("mrst_dsi_set_power: dpi\n");
++#endif /* DBG_PRINTS */
++ /* Set backlights off */
++ backlight_state( false );
++
++// MiKo TBD, something clever could be done here to save power, for example:
++// -Set display to sleep mode, or
++// -Set display to HW reset, or
++// -Shutdown the voltages to display
++
++ REG_WRITE(PP_CONTROL,
++ (REG_READ(PP_CONTROL) & ~POWER_TARGET_ON));
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while (pp_status & PP_ON);
++
++ REG_WRITE(DPI_CONTROL_REG, DPI_SHUT_DOWN);
++
++ dev_priv->dpi_panel_on = false;
++ }
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++
++static void mrst_dsi_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++
++#if DBG_PRINTS
++ printk("mrst_dsi_dpms\n");
++#endif /* DBG_PRINTS */
++
++ if (mode == DRM_MODE_DPMS_ON)
++ mrst_dsi_set_power(dev, output, true);
++ else
++ mrst_dsi_set_power(dev, output, false);
++}
++
++
++static void mrst_dsi_save(struct drm_connector *connector)
++{
++#if DBG_PRINTS
++ printk("mrst_dsi_save\n");
++#endif /* DBG_PRINTS */
++ // MiKo TBD
++}
++
++
++static void mrst_dsi_restore(struct drm_connector *connector)
++{
++#if DBG_PRINTS
++ printk("mrst_dsi_restore\n");
++#endif /* DBG_PRINTS */
++ // MiKo TBD
++}
++
++
++static void mrst_dsi_prepare(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++
++#if DBG_PRINTS
++ printk("mrst_dsi_prepare\n");
++#endif /* DBG_PRINTS */
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ mrst_dsi_set_power(dev, output, false);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++
++static void mrst_dsi_commit(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++
++#if DBG_PRINTS
++ printk("mrst_dsi_commit\n");
++#endif /* DBG_PRINTS */
++
++ mrst_dsi_set_power(dev, output, true);
++}
++
++
++static void mrst_dsi_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct drm_device *dev = encoder->dev;
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ u32 SupportedFormat = 0;
++ u32 resolution = 0;
++ uint64_t curValue = DRM_MODE_SCALE_FULLSCREEN;
++
++#if DBG_PRINTS
++ printk("mrst_dsi_mode_set\n");
++#endif /* DBG_PRINTS */
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ /* Sleep to ensure that the graphics engine is ready
++ * since its mode_set is called before ours
++ */
++ msleep(100);
++
++ switch (dev_priv->bpp)
++ {
++ case 24:
++ SupportedFormat = RGB_888_FMT;
++ break;
++ default:
++ printk("mrst_dsi_mode_set, invalid bpp!\n");
++ break;
++ }
++
++ if (dev_priv->dpi) {
++ drm_connector_property_get_value(
++ &enc_to_psb_intel_output(encoder)->base,
++ dev->mode_config.scaling_mode_property,
++ &curValue);
++ if (curValue == DRM_MODE_SCALE_CENTER) {
++ REG_WRITE(PFIT_CONTROL, 0);
++ } else if (curValue == DRM_MODE_SCALE_FULLSCREEN) {
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++ } else {
++ printk("mrst_dsi_mode_set, scaling not supported!\n");
++ REG_WRITE(PFIT_CONTROL, 0);
++ }
++
++
++ /* MIPI clock ratio 1:1 */
++ //REG_WRITE(MIPI_CONTROL_REG, 0x00000018);
++ //REG_WRITE(0xb080, 0x0b061a02);
++
++ /* MIPI clock ratio 2:1 */
++ //REG_WRITE(MIPI_CONTROL_REG, 0x00000019);
++ //REG_WRITE(0xb080, 0x3f1f1c04);
++
++ /* MIPI clock ratio 3:1 */
++ //REG_WRITE(MIPI_CONTROL_REG, 0x0000001a);
++ //REG_WRITE(0xb080, 0x091f7f08);
++
++ /* MIPI clock ratio 4:1 */
++ REG_WRITE(MIPI_CONTROL_REG, (0x00000018 | mipi_clock));
++ REG_WRITE(0xb080, dphy_reg);
++
++ /* Enable all interrupts */
++ REG_WRITE(INTR_EN_REG, 0xffffffff);
++
++ REG_WRITE(TURN_AROUND_TIMEOUT_REG, 0x0000000A);
++ REG_WRITE(DEVICE_RESET_REG, 0x000000ff);
++ REG_WRITE(INIT_COUNT_REG, 0x00000fff);
++ REG_WRITE(HS_TX_TIMEOUT_REG, 0x90000);
++ REG_WRITE(LP_RX_TIMEOUT_REG, 0xffff);
++ REG_WRITE(HIGH_LOW_SWITCH_COUNT_REG, 0x46);
++ REG_WRITE(EOT_DISABLE_REG, 0x00000000);
++ REG_WRITE(LP_BYTECLK_REG, 0x00000004);
++
++ REG_WRITE(VIDEO_FMT_REG, dev_priv->videoModeFormat);
++
++ SupportedFormat <<= FMT_DPI_POS;
++ REG_WRITE(DSI_FUNC_PRG_REG,
++ (dev_priv->laneCount | SupportedFormat));
++
++ resolution = dev_priv->HactiveArea |
++ (dev_priv->VactiveArea << RES_V_POS);
++ REG_WRITE(DPI_RESOLUTION_REG, resolution);
++
++ REG_WRITE(VERT_SYNC_PAD_COUNT_REG, dev_priv->VsyncWidth);
++ REG_WRITE(VERT_BACK_PORCH_COUNT_REG, dev_priv->VbackPorch);
++ REG_WRITE(VERT_FRONT_PORCH_COUNT_REG, dev_priv->VfrontPorch);
++
++ REG_WRITE(HORIZ_SYNC_PAD_COUNT_REG, dev_priv->HsyncWidth);
++ REG_WRITE(HORIZ_BACK_PORCH_COUNT_REG, dev_priv->HbackPorch);
++ REG_WRITE(HORIZ_FRONT_PORCH_COUNT_REG, dev_priv->HfrontPorch);
++ REG_WRITE(HORIZ_ACTIVE_AREA_COUNT_REG, MIPI_HACT);
++ }
++
++ /* Enable MIPI Port */
++ REG_WRITE(MIPI, MIPI_PORT_EN);
++
++ REG_WRITE(DEVICE_READY_REG, 0x00000001);
++ REG_WRITE(DPI_CONTROL_REG, 0x00000002); /* Turn On */
++
++ dev_priv->dsi_device_ready = true;
++
++ /* Enable pipe */
++ REG_WRITE(PIPEACONF, dev_priv->pipeconf);
++ REG_READ(PIPEACONF);
++
++ /* Wait for 20ms for the pipe enable to take effect. */
++ udelay(20000);
++
++ /* Enable plane */
++ REG_WRITE(DSPACNTR, dev_priv->dspcntr);
++
++ /* Wait for 20ms for the plane enable to take effect. */
++ udelay(20000);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++
++/**
++ * Detect the MIPI connection.
++ *
++ * This always returns CONNECTOR_STATUS_CONNECTED.
++ * This connector should only have
++ * been set up if the MIPI was actually connected anyway.
++ */
++static enum drm_connector_status mrst_dsi_detect(struct drm_connector
++ *connector)
++{
++#if DBG_PRINTS
++ printk("mrst_dsi_detect\n");
++#endif /* DBG_PRINTS */
++ return connector_status_connected;
++}
++
++
++/**
++ * Return the list of MIPI DDB modes if available.
++ */
++static int mrst_dsi_get_modes(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector);
++ struct psb_intel_mode_device *mode_dev = psb_intel_output->mode_dev;
++
++ /* Didn't get an DDB, so
++ * Set wide sync ranges so we get all modes
++ * handed to valid_mode for checking
++ */
++ connector->display_info.min_vfreq = 0;
++ connector->display_info.max_vfreq = 200;
++ connector->display_info.min_hfreq = 0;
++ connector->display_info.max_hfreq = 200;
++
++ if (mode_dev->panel_fixed_mode != NULL) {
++ struct drm_display_mode *mode =
++ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
++ drm_mode_probed_add(connector, mode);
++ return 1;
++ }
++ return 0;
++}
++
++
++static const struct drm_encoder_helper_funcs mrst_dsi_helper_funcs = {
++ .dpms = mrst_dsi_dpms,
++ .mode_fixup = psb_intel_lvds_mode_fixup,
++ .prepare = mrst_dsi_prepare,
++ .mode_set = mrst_dsi_mode_set,
++ .commit = mrst_dsi_commit,
++};
++
++
++static const struct drm_connector_helper_funcs
++ mrst_dsi_connector_helper_funcs = {
++ .get_modes = mrst_dsi_get_modes,
++ .mode_valid = psb_intel_lvds_mode_valid,
++ .best_encoder = psb_intel_best_encoder,
++};
++
++
++static const struct drm_connector_funcs mrst_dsi_connector_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .save = mrst_dsi_save,
++ .restore = mrst_dsi_restore,
++ .detect = mrst_dsi_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = psb_intel_lvds_set_property,
++ .destroy = psb_intel_lvds_destroy,
++};
++
++
++/** Returns the panel fixed mode from configuration. */
++struct drm_display_mode *mrst_dsi_get_configuration_mode(struct drm_device *dev)
++{
++ struct drm_display_mode *mode;
++
++ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++ if (!mode)
++ return NULL;
++
++ /* MiKo, fixed mode for TPO display
++ Note: Using defined values for easier match with ITP scripts
++ and adding 1 since psb_intel_display.c decreases by 1
++ */
++ mode->hdisplay = (DISP_HPIX + 1);
++ mode->vdisplay = (DISP_VPIX + 1);
++ mode->hsync_start = (DISP_HSYNC_START + 1);
++ mode->hsync_end = (DISP_HSYNC_END + 1);
++ mode->htotal = (DISP_HBLANK_END + 1);
++ mode->vsync_start = (DISP_VSYNC_START + 1);
++ mode->vsync_end = (DISP_VSYNC_END + 1);
++ mode->vtotal = (DISP_VBLANK_END + 1);
++ mode->clock = 33264;
++
++ drm_mode_set_name(mode);
++ drm_mode_set_crtcinfo(mode, 0);
++
++ return mode;
++}
++
++
++/* ************************************************************************* *\
++FUNCTION: mrst_mipi_settings_init
++ `
++DESCRIPTION:
++
++\* ************************************************************************* */
++static bool mrst_mipi_settings_init(DRM_DRIVER_PRIVATE_T *dev_priv)
++{
++ /* MiKo, fixed values for TPO display */
++ dev_priv->pixelClock = 33264;
++ dev_priv->HsyncWidth = MIPI_HSPAD;
++ dev_priv->HbackPorch = MIPI_HBP;
++ dev_priv->HfrontPorch = MIPI_HFP;
++ dev_priv->HactiveArea = HSIZE;
++ dev_priv->VsyncWidth = MIPI_VSPAD;
++ dev_priv->VbackPorch = MIPI_VBP;
++ dev_priv->VfrontPorch = MIPI_VFP;
++ dev_priv->VactiveArea = VSIZE;
++ dev_priv->bpp = 24;
++
++ /* video mode */
++ dev_priv->dpi = true;
++
++ /* MiKo, set these true by default to ensure that first mode set is done
++ cleanly
++ */
++ dev_priv->dpi_panel_on = true;
++ dev_priv->dsi_device_ready = true;
++
++ /* 2 lanes */
++ dev_priv->laneCount = MIPI_LANES;
++
++ /* Burst mode */
++ dev_priv->videoModeFormat = BURST_MODE;
++
++ return true;
++}
++
++
++/**
++ * mrst_dsi_init - setup MIPI connectors on this device
++ * @dev: drm device
++ *
++ * Create the connector, try to figure out what
++ * modes we can display on the MIPI panel (if present).
++ */
++void mrst_dsi_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev)
++{
++ DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
++ struct psb_intel_output *psb_intel_output;
++ struct drm_connector *connector;
++ struct drm_encoder *encoder;
++
++#if DBG_PRINTS
++ printk("mrst_dsi_init\n");
++#endif /* DBG_PRINTS */
++
++ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ panel_reset();
++
++#ifdef AAVA_BACKLIGHT_HACK
++ schedule_delayed_work(&bl_work, 2*HZ);
++#endif /* AAVA_BACKLIGHT_HACK */
++
++ psb_intel_output->mode_dev = mode_dev;
++ connector = &psb_intel_output->base;
++ encoder = &psb_intel_output->enc;
++ drm_connector_init(dev,
++ &psb_intel_output->base,
++ &mrst_dsi_connector_funcs,
++ DRM_MODE_CONNECTOR_MIPI);
++
++ drm_encoder_init(dev,
++ &psb_intel_output->enc,
++ &psb_intel_lvds_enc_funcs,
++ DRM_MODE_ENCODER_MIPI);
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ psb_intel_output->type = INTEL_OUTPUT_MIPI;
++
++ drm_encoder_helper_add(encoder, &mrst_dsi_helper_funcs);
++ drm_connector_helper_add(connector, &mrst_dsi_connector_helper_funcs);
++ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++
++ drm_connector_attach_property(connector,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_FULLSCREEN);
++ drm_connector_attach_property(connector,
++ dev_priv->backlight_property,
++ BRIGHTNESS_MAX_LEVEL);
++
++ if (!mrst_mipi_settings_init(dev_priv))
++ printk("Can't initialize MIPI settings\n");
++
++ /* No config phase */
++ dev_priv->config_phase = false;
++
++ /* Get the fixed mode */
++ mode_dev->panel_fixed_mode = mrst_dsi_get_configuration_mode(dev);
++ if (mode_dev->panel_fixed_mode) {
++ mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
++ } else {
++ printk("Found no modes for MIPI!\n");
++ goto failed_find;
++ }
++// Temporary access from sysfs begin
++ orig_encoder = encoder;
++// Temporary access from sysfs end
++ drm_sysfs_connector_add(connector);
++ return;
++
++failed_find:
++ drm_encoder_cleanup(encoder);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
++
++// Temporary access from sysfs begin
++static struct class_attribute miko_class_attrs[] = {
++ __ATTR(dphy, 0644, NULL, dphy_store),
++ __ATTR(clock, 0644, NULL, clock_store),
++ __ATTR(apply, 0200, NULL, apply_settings),
++ __ATTR_NULL,
++};
++
++static struct class miko_class = {
++ .name = "miko",
++ .owner = THIS_MODULE,
++
++ .class_attrs = miko_class_attrs,
++};
++
++static int __init miko_sysfs_init(void)
++{
++ int status;
++
++ status = class_register(&miko_class);
++ if (status < 0)
++ return status;
++
++ return status;
++}
++postcore_initcall(miko_sysfs_init);
++// Temporary access from sysfs end
++
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_i2c.c b/drivers/gpu/drm/mrst/drv/psb_intel_i2c.c
+new file mode 100644
+index 0000000..415847d
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_i2c.c
+@@ -0,0 +1,172 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++
++#include <linux/i2c.h>
++#include <linux/i2c-id.h>
++#include <linux/i2c-algo-bit.h>
++
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++
++/*
++ * Intel GPIO access functions
++ */
++
++#define I2C_RISEFALL_TIME 20
++
++static int get_clock(void *data)
++{
++ struct psb_intel_i2c_chan *chan = data;
++ struct drm_device *dev = chan->drm_dev;
++ u32 val;
++
++ val = REG_READ(chan->reg);
++ return (val & GPIO_CLOCK_VAL_IN) != 0;
++}
++
++static int get_data(void *data)
++{
++ struct psb_intel_i2c_chan *chan = data;
++ struct drm_device *dev = chan->drm_dev;
++ u32 val;
++
++ val = REG_READ(chan->reg);
++ return (val & GPIO_DATA_VAL_IN) != 0;
++}
++
++static void set_clock(void *data, int state_high)
++{
++ struct psb_intel_i2c_chan *chan = data;
++ struct drm_device *dev = chan->drm_dev;
++ u32 reserved = 0, clock_bits;
++
++ /* On most chips, these bits must be preserved in software. */
++ if (!IS_I830(dev) && !IS_845G(dev))
++ reserved =
++ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
++ GPIO_CLOCK_PULLUP_DISABLE);
++
++ if (state_high)
++ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
++ else
++ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
++ GPIO_CLOCK_VAL_MASK;
++ REG_WRITE(chan->reg, reserved | clock_bits);
++ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
++}
++
++static void set_data(void *data, int state_high)
++{
++ struct psb_intel_i2c_chan *chan = data;
++ struct drm_device *dev = chan->drm_dev;
++ u32 reserved = 0, data_bits;
++
++ /* On most chips, these bits must be preserved in software. */
++ if (!IS_I830(dev) && !IS_845G(dev))
++ reserved =
++ REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
++ GPIO_CLOCK_PULLUP_DISABLE);
++
++ if (state_high)
++ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
++ else
++ data_bits =
++ GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
++ GPIO_DATA_VAL_MASK;
++
++ REG_WRITE(chan->reg, reserved | data_bits);
++ udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
++}
++
++/**
++ * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
++ * @dev: DRM device
++ * @output: driver specific output device
++ * @reg: GPIO reg to use
++ * @name: name for this bus
++ *
++ * Creates and registers a new i2c bus with the Linux i2c layer, for use
++ * in output probing and control (e.g. DDC or SDVO control functions).
++ *
++ * Possible values for @reg include:
++ * %GPIOA
++ * %GPIOB
++ * %GPIOC
++ * %GPIOD
++ * %GPIOE
++ * %GPIOF
++ * %GPIOG
++ * %GPIOH
++ * see PRM for details on how these different busses are used.
++ */
++struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
++ const u32 reg, const char *name)
++{
++ struct psb_intel_i2c_chan *chan;
++
++ chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
++ if (!chan)
++ goto out_free;
++
++ chan->drm_dev = dev;
++ chan->reg = reg;
++ snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
++ chan->adapter.owner = THIS_MODULE;
++ chan->adapter.algo_data = &chan->algo;
++ chan->adapter.dev.parent = &dev->pdev->dev;
++ chan->algo.setsda = set_data;
++ chan->algo.setscl = set_clock;
++ chan->algo.getsda = get_data;
++ chan->algo.getscl = get_clock;
++ chan->algo.udelay = 20;
++ chan->algo.timeout = usecs_to_jiffies(2200);
++ chan->algo.data = chan;
++
++ i2c_set_adapdata(&chan->adapter, chan);
++
++ if (i2c_bit_add_bus(&chan->adapter))
++ goto out_free;
++
++ /* JJJ: raise SCL and SDA? */
++ set_data(chan, 1);
++ set_clock(chan, 1);
++ udelay(20);
++
++ return chan;
++
++out_free:
++ kfree(chan);
++ return NULL;
++}
++
++/**
++ * psb_intel_i2c_destroy - unregister and free i2c bus resources
++ * @output: channel to free
++ *
++ * Unregister the adapter from the i2c layer, then free the structure.
++ */
++void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan)
++{
++ if (!chan)
++ return;
++
++ i2c_del_adapter(&chan->adapter);
++ kfree(chan);
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_lvds.c b/drivers/gpu/drm/mrst/drv/psb_intel_lvds.c
+new file mode 100644
+index 0000000..b426b53
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_lvds.c
+@@ -0,0 +1,1385 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ * Dave Airlie <airlied@linux.ie>
++ * Jesse Barnes <jesse.barnes@intel.com>
++ */
++
++#include <linux/i2c.h>
++/* #include <drm/drm_crtc.h> */
++/* #include <drm/drm_edid.h> */
++#include <drm/drmP.h>
++
++#include "psb_intel_bios.h"
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "ospm_power.h"
++
++/* MRST defines start */
++uint8_t blc_freq;
++uint8_t blc_minbrightness;
++uint8_t blc_i2caddr;
++uint8_t blc_brightnesscmd;
++int lvds_backlight; /* restore backlight to this value */
++
++u32 CoreClock;
++u32 PWMControlRegFreq;
++
++/**
++ * LVDS I2C backlight control macros
++ */
++#define BRIGHTNESS_MAX_LEVEL 100
++#define BRIGHTNESS_MASK 0xFF
++#define BLC_I2C_TYPE 0x01
++#define BLC_PWM_TYPT 0x02
++
++#define BLC_POLARITY_NORMAL 0
++#define BLC_POLARITY_INVERSE 1
++
++#define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE)
++#define PSB_BLC_MIN_PWM_REG_FREQ (0x2)
++#define PSB_BLC_PWM_PRECISION_FACTOR (10)
++#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
++#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
++
++struct psb_intel_lvds_priv {
++ /**
++ * Saved LVDO output states
++ */
++ uint32_t savePP_ON;
++ uint32_t savePP_OFF;
++ uint32_t saveLVDS;
++ uint32_t savePP_CONTROL;
++ uint32_t savePP_CYCLE;
++ uint32_t savePFIT_CONTROL;
++ uint32_t savePFIT_PGM_RATIOS;
++ uint32_t saveBLC_PWM_CTL;
++};
++
++/* MRST defines end */
++
++/**
++ * Returns the maximum level of the backlight duty cycle field.
++ */
++static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ u32 retVal;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ retVal = ((REG_READ(BLC_PWM_CTL) &
++ BACKLIGHT_MODULATION_FREQ_MASK) >>
++ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else
++ retVal = ((dev_priv->saveBLC_PWM_CTL &
++ BACKLIGHT_MODULATION_FREQ_MASK) >>
++ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
++
++ return retVal;
++}
++
++/**
++ * Set LVDS backlight level by I2C command
++ */
++static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
++ unsigned int level)
++ {
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
++ u8 out_buf[2];
++ unsigned int blc_i2c_brightness;
++
++ struct i2c_msg msgs[] = {
++ {
++ .addr = lvds_i2c_bus->slave_addr,
++ .flags = 0,
++ .len = 2,
++ .buf = out_buf,
++ }
++ };
++
++ blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
++ BRIGHTNESS_MASK /
++ BRIGHTNESS_MAX_LEVEL);
++
++ if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
++ blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
++
++ out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
++ out_buf[1] = (u8)blc_i2c_brightness;
++
++ if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) {
++ DRM_DEBUG("I2C set brightness.(command, value) (%d, %d)\n",
++ blc_brightnesscmd,
++ blc_i2c_brightness);
++ return 0;
++ }
++
++ DRM_ERROR("I2C transfer error\n");
++ return -1;
++}
++
++
++static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ u32 max_pwm_blc;
++ u32 blc_pwm_duty_cycle;
++
++ max_pwm_blc = psb_intel_lvds_get_max_backlight(dev);
++
++ /*BLC_PWM_CTL Should be initiated while backlight device init*/
++ BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
++
++ blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
++
++ if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
++ blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
++
++ blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
++ REG_WRITE(BLC_PWM_CTL,
++ (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
++ (blc_pwm_duty_cycle));
++
++ return 0;
++}
++
++/**
++ * Set LVDS backlight level either by I2C or PWM
++ */
++void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
++{
++ /*u32 blc_pwm_ctl;*/
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++
++ DRM_DEBUG("backlight level is %d\n", level);
++
++ if (!dev_priv->lvds_bl) {
++ DRM_ERROR("NO LVDS Backlight Info\n");
++ return;
++ }
++
++ if (IS_MRST(dev)) {
++ DRM_ERROR(
++ "psb_intel_lvds_set_brightness called...not expected\n");
++ return;
++ }
++
++ if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
++ psb_lvds_i2c_set_brightness(dev, level);
++ else
++ psb_lvds_pwm_set_brightness(dev, level);
++}
++
++/**
++ * Sets the backlight level.
++ *
++ * \param level backlight level, from 0 to psb_intel_lvds_get_max_backlight().
++ */
++static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ u32 blc_pwm_ctl;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_ONLY_IF_ON)) {
++ blc_pwm_ctl =
++ REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
++ REG_WRITE(BLC_PWM_CTL,
++ (blc_pwm_ctl |
++ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ } else {
++ blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
++ ~BACKLIGHT_DUTY_CYCLE_MASK;
++ dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
++ (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
++ }
++}
++
++/**
++ * Sets the power state for the panel.
++ */
++static void psb_intel_lvds_set_power(struct drm_device *dev,
++ struct psb_intel_output *output, bool on)
++{
++ u32 pp_status;
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ if (on) {
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
++ POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while ((pp_status & PP_ON) == 0);
++
++ psb_intel_lvds_set_backlight(dev,
++ output->
++ mode_dev->backlight_duty_cycle);
++ } else {
++ psb_intel_lvds_set_backlight(dev, 0);
++
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
++ ~POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while (pp_status & PP_ON);
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++
++ if (mode == DRM_MODE_DPMS_ON)
++ psb_intel_lvds_set_power(dev, output, true);
++ else
++ psb_intel_lvds_set_power(dev, output, false);
++
++ /* XXX: We never power down the LVDS pairs. */
++}
++
++static void psb_intel_lvds_save(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_lvds_priv *lvds_priv =
++ (struct psb_intel_lvds_priv *)psb_intel_output->dev_priv;
++
++ if (IS_POULSBO(dev)) {
++ lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
++ lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
++ lvds_priv->saveLVDS = REG_READ(LVDS);
++ lvds_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
++ lvds_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
++ /*lvds_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);*/
++ lvds_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++ lvds_priv->savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
++ lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
++
++ /*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/
++ dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
++ BACKLIGHT_DUTY_CYCLE_MASK);
++
++ /*
++ * If the light is off at server startup,
++ * just make it full brightness
++ */
++ if (dev_priv->backlight_duty_cycle == 0)
++ dev_priv->backlight_duty_cycle =
++ psb_intel_lvds_get_max_backlight(dev);
++
++ DRM_DEBUG("(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
++ lvds_priv->savePP_ON,
++ lvds_priv->savePP_OFF,
++ lvds_priv->saveLVDS,
++ lvds_priv->savePP_CONTROL,
++ lvds_priv->savePP_CYCLE,
++ lvds_priv->saveBLC_PWM_CTL);
++ }
++}
++
++static void psb_intel_lvds_restore(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ u32 pp_status;
++
++ /*struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;*/
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_lvds_priv *lvds_priv =
++ (struct psb_intel_lvds_priv *)psb_intel_output->dev_priv;
++
++ if (IS_POULSBO(dev)) {
++ DRM_DEBUG("(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
++ lvds_priv->savePP_ON,
++ lvds_priv->savePP_OFF,
++ lvds_priv->saveLVDS,
++ lvds_priv->savePP_CONTROL,
++ lvds_priv->savePP_CYCLE,
++ lvds_priv->saveBLC_PWM_CTL);
++
++ REG_WRITE(BLC_PWM_CTL, lvds_priv->saveBLC_PWM_CTL);
++ REG_WRITE(PFIT_CONTROL, lvds_priv->savePFIT_CONTROL);
++ REG_WRITE(PFIT_PGM_RATIOS, lvds_priv->savePFIT_PGM_RATIOS);
++ REG_WRITE(LVDSPP_ON, lvds_priv->savePP_ON);
++ REG_WRITE(LVDSPP_OFF, lvds_priv->savePP_OFF);
++ /*REG_WRITE(PP_DIVISOR, lvds_priv->savePP_DIVISOR);*/
++ REG_WRITE(PP_CYCLE, lvds_priv->savePP_CYCLE);
++ REG_WRITE(PP_CONTROL, lvds_priv->savePP_CONTROL);
++ REG_WRITE(LVDS, lvds_priv->saveLVDS);
++
++ if (lvds_priv->savePP_CONTROL & POWER_TARGET_ON) {
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
++ POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while ((pp_status & PP_ON) == 0);
++ } else {
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
++ ~POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while (pp_status & PP_ON);
++ }
++ }
++}
++
++int psb_intel_lvds_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
++{
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct drm_display_mode *fixed_mode =
++ psb_intel_output->mode_dev->panel_fixed_mode;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter psb_intel_lvds_mode_valid \n");
++#endif /* PRINT_JLIU7 */
++
++ /* just in case */
++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ return MODE_NO_DBLESCAN;
++
++ /* just in case */
++ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
++ return MODE_NO_INTERLACE;
++
++ if (fixed_mode) {
++ if (mode->hdisplay > fixed_mode->hdisplay)
++ return MODE_PANEL;
++ if (mode->vdisplay > fixed_mode->vdisplay)
++ return MODE_PANEL;
++ }
++ return MODE_OK;
++}
++
++bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct psb_intel_mode_device *mode_dev =
++ enc_to_psb_intel_output(encoder)->mode_dev;
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_crtc *psb_intel_crtc =
++ to_psb_intel_crtc(encoder->crtc);
++ struct drm_encoder *tmp_encoder;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter psb_intel_lvds_mode_fixup \n");
++#endif /* PRINT_JLIU7 */
++
++ /* Should never happen!! */
++ if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) {
++ printk(KERN_ERR
++ "Can't support LVDS/MIPI on pipe B on MRST\n");
++ return false;
++ } else if (!IS_MRST(dev) && !IS_I965G(dev)
++ && psb_intel_crtc->pipe == 0) {
++ printk(KERN_ERR "Can't support LVDS on pipe A\n");
++ return false;
++ }
++ /* Should never happen!! */
++ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
++ head) {
++ if (tmp_encoder != encoder
++ && tmp_encoder->crtc == encoder->crtc) {
++ printk(KERN_ERR "Can't enable LVDS and another "
++ "encoder on the same pipe\n");
++ return false;
++ }
++ }
++
++ /*
++ * If we have timings from the BIOS for the panel, put them in
++ * to the adjusted mode. The CRTC will be set up for this mode,
++ * with the panel scaling set up to source from the H/VDisplay
++ * of the original mode.
++ */
++ if (mode_dev->panel_fixed_mode != NULL) {
++ adjusted_mode->hdisplay =
++ mode_dev->panel_fixed_mode->hdisplay;
++ adjusted_mode->hsync_start =
++ mode_dev->panel_fixed_mode->hsync_start;
++ adjusted_mode->hsync_end =
++ mode_dev->panel_fixed_mode->hsync_end;
++ adjusted_mode->htotal = mode_dev->panel_fixed_mode->htotal;
++ adjusted_mode->vdisplay =
++ mode_dev->panel_fixed_mode->vdisplay;
++ adjusted_mode->vsync_start =
++ mode_dev->panel_fixed_mode->vsync_start;
++ adjusted_mode->vsync_end =
++ mode_dev->panel_fixed_mode->vsync_end;
++ adjusted_mode->vtotal = mode_dev->panel_fixed_mode->vtotal;
++ adjusted_mode->clock = mode_dev->panel_fixed_mode->clock;
++ drm_mode_set_crtcinfo(adjusted_mode,
++ CRTC_INTERLACE_HALVE_V);
++ }
++
++ /*
++ * XXX: It would be nice to support lower refresh rates on the
++ * panels to reduce power consumption, and perhaps match the
++ * user's requested refresh rate.
++ */
++
++ return true;
++}
++
++static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ struct psb_intel_mode_device *mode_dev = output->mode_dev;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter psb_intel_lvds_prepare \n");
++#endif /* PRINT_JLIU7 */
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
++ BACKLIGHT_DUTY_CYCLE_MASK);
++
++ psb_intel_lvds_set_power(dev, output, false);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++static void psb_intel_lvds_commit(struct drm_encoder *encoder)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++ struct psb_intel_mode_device *mode_dev = output->mode_dev;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter psb_intel_lvds_commit \n");
++#endif /* PRINT_JLIU7 */
++
++ if (mode_dev->backlight_duty_cycle == 0)
++ mode_dev->backlight_duty_cycle =
++ psb_intel_lvds_get_max_backlight(dev);
++
++ psb_intel_lvds_set_power(dev, output, true);
++}
++
++static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct psb_intel_mode_device *mode_dev =
++ enc_to_psb_intel_output(encoder)->mode_dev;
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(
++ encoder->crtc);
++ u32 pfit_control;
++
++ /*
++ * The LVDS pin pair will already have been turned on in the
++ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
++ * settings.
++ */
++
++ /*
++ * Enable automatic panel scaling so that non-native modes fill the
++ * screen. Should be enabled before the pipe is enabled, according to
++ * register description and PRM.
++ */
++ if (mode->hdisplay != adjusted_mode->hdisplay ||
++ mode->vdisplay != adjusted_mode->vdisplay)
++ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
++ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
++ HORIZ_INTERP_BILINEAR);
++ else
++ pfit_control = 0;
++
++ if (!IS_I965G(dev)) {
++ if (mode_dev->panel_wants_dither)
++ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
++ } else
++ pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT;
++
++ REG_WRITE(PFIT_CONTROL, pfit_control);
++}
++
++/**
++ * Detect the LVDS connection.
++ *
++ * This always returns CONNECTOR_STATUS_CONNECTED.
++ * This connector should only have
++ * been set up if the LVDS was actually connected anyway.
++ */
++static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
++ *connector)
++{
++ return connector_status_connected;
++}
++
++/**
++ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
++ */
++static int psb_intel_lvds_get_modes(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_mode_device *mode_dev =
++ psb_intel_output->mode_dev;
++ int ret = 0;
++
++ if (!IS_MRST(dev))
++ ret = psb_intel_ddc_get_modes(psb_intel_output);
++
++ if (ret)
++ return ret;
++
++ /* Didn't get an EDID, so
++ * Set wide sync ranges so we get all modes
++ * handed to valid_mode for checking
++ */
++ connector->display_info.min_vfreq = 0;
++ connector->display_info.max_vfreq = 200;
++ connector->display_info.min_hfreq = 0;
++ connector->display_info.max_hfreq = 200;
++
++ if (mode_dev->panel_fixed_mode != NULL) {
++ struct drm_display_mode *mode =
++ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
++ drm_mode_probed_add(connector, mode);
++ return 1;
++ }
++
++ return 0;
++}
++
++/**
++ * psb_intel_lvds_destroy - unregister and free LVDS structures
++ * @connector: connector to free
++ *
++ * Unregister the DDC bus for this connector then free the driver private
++ * structure.
++ */
++void psb_intel_lvds_destroy(struct drm_connector *connector)
++{
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ if (psb_intel_output->ddc_bus)
++ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
++ drm_sysfs_connector_remove(connector);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
++
++int psb_intel_lvds_set_property(struct drm_connector *connector,
++ struct drm_property *property,
++ uint64_t value)
++{
++ struct drm_encoder *pEncoder = connector->encoder;
++
++ if (!strcmp(property->name, "scaling mode") && pEncoder) {
++ struct psb_intel_crtc *pPsbCrtc =
++ to_psb_intel_crtc(pEncoder->crtc);
++ uint64_t curValue;
++
++ if (!pPsbCrtc)
++ goto set_prop_error;
++
++ switch (value) {
++ case DRM_MODE_SCALE_FULLSCREEN:
++ break;
++ case DRM_MODE_SCALE_CENTER:
++ break;
++ case DRM_MODE_SCALE_ASPECT:
++ break;
++ default:
++ goto set_prop_error;
++ }
++
++ if (drm_connector_property_get_value(connector,
++ property,
++ &curValue))
++ goto set_prop_error;
++
++ if (curValue == value)
++ goto set_prop_done;
++
++ if (drm_connector_property_set_value(connector,
++ property,
++ value))
++ goto set_prop_error;
++
++ if (pPsbCrtc->saved_mode.hdisplay != 0 &&
++ pPsbCrtc->saved_mode.vdisplay != 0) {
++ if (!drm_crtc_helper_set_mode(pEncoder->crtc,
++ &pPsbCrtc->saved_mode,
++ pEncoder->crtc->x,
++ pEncoder->crtc->y,
++ pEncoder->crtc->fb))
++ goto set_prop_error;
++ }
++ } else if (!strcmp(property->name, "backlight") && pEncoder) {
++ if (drm_connector_property_set_value(connector,
++ property,
++ value))
++ goto set_prop_error;
++ else {
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++ struct backlight_device bd;
++ bd.props.brightness = value;
++ psb_set_brightness(&bd);
++#endif
++ }
++ }
++
++set_prop_done:
++ return 0;
++set_prop_error:
++ return -1;
++}
++
++static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = {
++ .dpms = psb_intel_lvds_encoder_dpms,
++ .mode_fixup = psb_intel_lvds_mode_fixup,
++ .prepare = psb_intel_lvds_prepare,
++ .mode_set = psb_intel_lvds_mode_set,
++ .commit = psb_intel_lvds_commit,
++};
++
++static const struct drm_connector_helper_funcs
++ psb_intel_lvds_connector_helper_funcs = {
++ .get_modes = psb_intel_lvds_get_modes,
++ .mode_valid = psb_intel_lvds_mode_valid,
++ .best_encoder = psb_intel_best_encoder,
++};
++
++static const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .save = psb_intel_lvds_save,
++ .restore = psb_intel_lvds_restore,
++ .detect = psb_intel_lvds_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = psb_intel_lvds_set_property,
++ .destroy = psb_intel_lvds_destroy,
++};
++
++
++static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
++{
++ drm_encoder_cleanup(encoder);
++}
++
++const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
++ .destroy = psb_intel_lvds_enc_destroy,
++};
++
++
++
++/**
++ * psb_intel_lvds_init - setup LVDS connectors on this device
++ * @dev: drm device
++ *
++ * Create the connector, register the LVDS DDC bus, and try to figure out what
++ * modes we can display on the LVDS panel (if present).
++ */
++void psb_intel_lvds_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev)
++{
++ struct psb_intel_output *psb_intel_output;
++ struct psb_intel_lvds_priv *lvds_priv;
++ struct drm_connector *connector;
++ struct drm_encoder *encoder;
++ struct drm_display_mode *scan; /* *modes, *bios_mode; */
++ struct drm_crtc *crtc;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ u32 lvds;
++ int pipe;
++
++ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL);
++ if (!lvds_priv) {
++ kfree(psb_intel_output);
++ DRM_DEBUG("LVDS private allocation error\n");
++ return;
++ }
++
++ psb_intel_output->dev_priv = lvds_priv;
++
++ psb_intel_output->mode_dev = mode_dev;
++ connector = &psb_intel_output->base;
++ encoder = &psb_intel_output->enc;
++ drm_connector_init(dev, &psb_intel_output->base,
++ &psb_intel_lvds_connector_funcs,
++ DRM_MODE_CONNECTOR_LVDS);
++
++ drm_encoder_init(dev, &psb_intel_output->enc,
++ &psb_intel_lvds_enc_funcs,
++ DRM_MODE_ENCODER_LVDS);
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ psb_intel_output->type = INTEL_OUTPUT_LVDS;
++
++ drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
++ drm_connector_helper_add(connector,
++ &psb_intel_lvds_connector_helper_funcs);
++ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++
++ /*Attach connector properties*/
++ drm_connector_attach_property(connector,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_FULLSCREEN);
++ drm_connector_attach_property(connector,
++ dev_priv->backlight_property,
++ BRIGHTNESS_MAX_LEVEL);
++
++ /**
++ * Set up I2C bus
++ * FIXME: distroy i2c_bus when exit
++ */
++ psb_intel_output->i2c_bus = psb_intel_i2c_create(dev,
++ GPIOB,
++ "LVDSBLC_B");
++ if (!psb_intel_output->i2c_bus) {
++ dev_printk(KERN_ERR,
++ &dev->pdev->dev, "I2C bus registration failed.\n");
++ goto failed_blc_i2c;
++ }
++ psb_intel_output->i2c_bus->slave_addr = 0x2C;
++ dev_priv->lvds_i2c_bus = psb_intel_output->i2c_bus;
++
++ /*
++ * LVDS discovery:
++ * 1) check for EDID on DDC
++ * 2) check for VBT data
++ * 3) check to see if LVDS is already on
++ * if none of the above, no panel
++ * 4) make sure lid is open
++ * if closed, act like it's not there for now
++ */
++
++ /* Set up the DDC bus. */
++ psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
++ GPIOC,
++ "LVDSDDC_C");
++ if (!psb_intel_output->ddc_bus) {
++ dev_printk(KERN_ERR, &dev->pdev->dev,
++ "DDC bus registration " "failed.\n");
++ goto failed_ddc;
++ }
++
++ /*
++ * Attempt to get the fixed panel mode from DDC. Assume that the
++ * preferred mode is the right one.
++ */
++ psb_intel_ddc_get_modes(psb_intel_output);
++ list_for_each_entry(scan, &connector->probed_modes, head) {
++ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
++ mode_dev->panel_fixed_mode =
++ drm_mode_duplicate(dev, scan);
++ goto out; /* FIXME: check for quirks */
++ }
++ }
++
++ /* Failed to get EDID, what about VBT? do we need this?*/
++ if (mode_dev->vbt_mode)
++ mode_dev->panel_fixed_mode =
++ drm_mode_duplicate(dev, mode_dev->vbt_mode);
++
++ if (!mode_dev->panel_fixed_mode)
++ if (dev_priv->lfp_lvds_vbt_mode)
++ mode_dev->panel_fixed_mode =
++ drm_mode_duplicate(dev,
++ dev_priv->lfp_lvds_vbt_mode);
++
++ /*
++ * If we didn't get EDID, try checking if the panel is already turned
++ * on. If so, assume that whatever is currently programmed is the
++ * correct mode.
++ */
++ lvds = REG_READ(LVDS);
++ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
++ crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
++
++ if (crtc && (lvds & LVDS_PORT_EN)) {
++ mode_dev->panel_fixed_mode =
++ psb_intel_crtc_mode_get(dev, crtc);
++ if (mode_dev->panel_fixed_mode) {
++ mode_dev->panel_fixed_mode->type |=
++ DRM_MODE_TYPE_PREFERRED;
++ goto out; /* FIXME: check for quirks */
++ }
++ }
++
++ /* If we still don't have a mode after all that, give up. */
++ if (!mode_dev->panel_fixed_mode) {
++ DRM_DEBUG
++ ("Found no modes on the lvds, ignoring the LVDS\n");
++ goto failed_find;
++ }
++
++ /* FIXME: detect aopen & mac mini type stuff automatically? */
++ /*
++ * Blacklist machines with BIOSes that list an LVDS panel without
++ * actually having one.
++ */
++ if (IS_I945GM(dev)) {
++ /* aopen mini pc */
++ if (dev->pdev->subsystem_vendor == 0xa0a0) {
++ DRM_DEBUG
++ ("Suspected AOpen Mini PC, ignoring the LVDS\n");
++ goto failed_find;
++ }
++
++ if ((dev->pdev->subsystem_vendor == 0x8086) &&
++ (dev->pdev->subsystem_device == 0x7270)) {
++ /* It's a Mac Mini or Macbook Pro. */
++
++ if (mode_dev->panel_fixed_mode != NULL &&
++ mode_dev->panel_fixed_mode->hdisplay == 800 &&
++ mode_dev->panel_fixed_mode->vdisplay == 600) {
++ DRM_DEBUG
++ ("Suspected Mac Mini, ignoring the LVDS\n");
++ goto failed_find;
++ }
++ }
++ }
++
++out:
++ drm_sysfs_connector_add(connector);
++
++#if PRINT_JLIU7
++ DRM_INFO("PRINT_JLIU7 hdisplay = %d\n",
++ mode_dev->panel_fixed_mode->hdisplay);
++ DRM_INFO("PRINT_JLIU7 vdisplay = %d\n",
++ mode_dev->panel_fixed_mode->vdisplay);
++ DRM_INFO("PRINT_JLIU7 hsync_start = %d\n",
++ mode_dev->panel_fixed_mode->hsync_start);
++ DRM_INFO("PRINT_JLIU7 hsync_end = %d\n",
++ mode_dev->panel_fixed_mode->hsync_end);
++ DRM_INFO("PRINT_JLIU7 htotal = %d\n",
++ mode_dev->panel_fixed_mode->htotal);
++ DRM_INFO("PRINT_JLIU7 vsync_start = %d\n",
++ mode_dev->panel_fixed_mode->vsync_start);
++ DRM_INFO("PRINT_JLIU7 vsync_end = %d\n",
++ mode_dev->panel_fixed_mode->vsync_end);
++ DRM_INFO("PRINT_JLIU7 vtotal = %d\n",
++ mode_dev->panel_fixed_mode->vtotal);
++ DRM_INFO("PRINT_JLIU7 clock = %d\n",
++ mode_dev->panel_fixed_mode->clock);
++#endif /* PRINT_JLIU7 */
++ return;
++
++failed_find:
++ if (psb_intel_output->ddc_bus)
++ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
++failed_ddc:
++ if (psb_intel_output->i2c_bus)
++ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
++failed_blc_i2c:
++ drm_encoder_cleanup(encoder);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
++
++/* MRST platform start */
++
++/*
++ * FIXME need to move to register define head file
++ */
++#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
++#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
++
++/* The max/min PWM frequency in BPCR[31:17] - */
++/* The smallest number is 1 (not 0) that can fit in the
++ * 15-bit field of the and then*/
++/* shifts to the left by one bit to get the actual 16-bit
++ * value that the 15-bits correspond to.*/
++#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
++
++#define BRIGHTNESS_MAX_LEVEL 100
++#define BLC_PWM_PRECISION_FACTOR 10 /* 10000000 */
++#define BLC_PWM_FREQ_CALC_CONSTANT 32
++#define MHz 1000000
++#define BLC_POLARITY_NORMAL 0
++#define BLC_POLARITY_INVERSE 1
++
++/**
++ * Calculate PWM control register value.
++ */
++#if 0
++static bool mrstLVDSCalculatePWMCtrlRegFreq(struct drm_device *dev)
++{
++ unsigned long value = 0;
++ if (blc_freq == 0) {
++ /* DRM_ERROR(KERN_ERR "mrstLVDSCalculatePWMCtrlRegFreq:
++ * Frequency Requested is 0.\n"); */
++ return false;
++ }
++
++ value = (CoreClock * MHz);
++ value = (value / BLC_PWM_FREQ_CALC_CONSTANT);
++ value = (value * BLC_PWM_PRECISION_FACTOR);
++ value = (value / blc_freq);
++ value = (value / BLC_PWM_PRECISION_FACTOR);
++
++ if (value > (unsigned long) MRST_BLC_MAX_PWM_REG_FREQ) {
++ return 0;
++ } else {
++ PWMControlRegFreq = (u32) value;
++ return 1;
++ }
++}
++#endif
++/**
++ * Sets the power state for the panel.
++ */
++static void mrst_lvds_set_power(struct drm_device *dev,
++ struct psb_intel_output *output, bool on)
++{
++ u32 pp_status;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_lvds_set_power \n");
++#endif /* PRINT_JLIU7 */
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ if (on) {
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
++ POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
++ } else {
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
++ ~POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while (pp_status & PP_ON);
++ }
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++static void mrst_lvds_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_lvds_dpms \n");
++#endif /* PRINT_JLIU7 */
++
++ if (mode == DRM_MODE_DPMS_ON)
++ mrst_lvds_set_power(dev, output, true);
++ else
++ mrst_lvds_set_power(dev, output, false);
++
++ /* XXX: We never power down the LVDS pairs. */
++}
++
++static void mrst_lvds_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct psb_intel_mode_device *mode_dev =
++ enc_to_psb_intel_output(encoder)->mode_dev;
++ struct drm_device *dev = encoder->dev;
++ u32 lvds_port;
++ uint64_t curValue = DRM_MODE_SCALE_FULLSCREEN;
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_lvds_mode_set \n");
++#endif /* PRINT_JLIU7 */
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return;
++
++ /*
++ * The LVDS pin pair will already have been turned on in the
++ * psb_intel_crtc_mode_set since it has a large impact on the DPLL
++ * settings.
++ */
++ /*FIXME JLIU7 Get panel power delay parameters from config data */
++ REG_WRITE(0x61208, 0x25807d0);
++ REG_WRITE(0x6120c, 0x1f407d0);
++ REG_WRITE(0x61210, 0x270f04);
++
++ lvds_port = (REG_READ(LVDS) &
++ (~LVDS_PIPEB_SELECT)) |
++ LVDS_PORT_EN |
++ LVDS_BORDER_EN;
++
++ if (mode_dev->panel_wants_dither)
++ lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
++
++ REG_WRITE(LVDS, lvds_port);
++
++ drm_connector_property_get_value(
++ &enc_to_psb_intel_output(encoder)->base,
++ dev->mode_config.scaling_mode_property,
++ &curValue);
++
++ if (curValue == DRM_MODE_SCALE_CENTER)
++ REG_WRITE(PFIT_CONTROL, 0);
++ else if (curValue == DRM_MODE_SCALE_ASPECT) {
++ if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) ||
++ (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
++ if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) ==
++ (mode->hdisplay * adjusted_mode->crtc_vdisplay))
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++ else if ((adjusted_mode->crtc_hdisplay *
++ mode->vdisplay) > (mode->hdisplay *
++ adjusted_mode->crtc_vdisplay))
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
++ PFIT_SCALING_MODE_PILLARBOX);
++ else
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
++ PFIT_SCALING_MODE_LETTERBOX);
++ } else
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++ } else /*(curValue == DRM_MODE_SCALE_FULLSCREEN)*/
++ REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++}
++
++
++static const struct drm_encoder_helper_funcs mrst_lvds_helper_funcs = {
++ .dpms = mrst_lvds_dpms,
++ .mode_fixup = psb_intel_lvds_mode_fixup,
++ .prepare = psb_intel_lvds_prepare,
++ .mode_set = mrst_lvds_mode_set,
++ .commit = psb_intel_lvds_commit,
++};
++
++/** Returns the panel fixed mode from configuration. */
++/** FIXME JLIU7 need to revist it. */
++struct drm_display_mode *mrst_lvds_get_configuration_mode(struct drm_device
++ *dev)
++{
++ struct drm_display_mode *mode;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
++
++ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++ if (!mode)
++ return NULL;
++
++ if (dev_priv->vbt_data.Size != 0x00) { /*if non-zero, then use vbt*/
++
++ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
++ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
++ mode->hsync_start = mode->hdisplay + \
++ ((ti->hsync_offset_hi << 8) | \
++ ti->hsync_offset_lo);
++ mode->hsync_end = mode->hsync_start + \
++ ((ti->hsync_pulse_width_hi << 8) | \
++ ti->hsync_pulse_width_lo);
++ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
++ ti->hblank_lo);
++ mode->vsync_start = \
++ mode->vdisplay + ((ti->vsync_offset_hi << 4) | \
++ ti->vsync_offset_lo);
++ mode->vsync_end = \
++ mode->vsync_start + ((ti->vsync_pulse_width_hi << 4) | \
++ ti->vsync_pulse_width_lo);
++ mode->vtotal = mode->vdisplay + \
++ ((ti->vblank_hi << 8) | ti->vblank_lo);
++ mode->clock = ti->pixel_clock * 10;
++#if 0
++ printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay);
++ printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay);
++ printk(KERN_INFO "HSS is %d\n", mode->hsync_start);
++ printk(KERN_INFO "HSE is %d\n", mode->hsync_end);
++ printk(KERN_INFO "htotal is %d\n", mode->htotal);
++ printk(KERN_INFO "VSS is %d\n", mode->vsync_start);
++ printk(KERN_INFO "VSE is %d\n", mode->vsync_end);
++ printk(KERN_INFO "vtotal is %d\n", mode->vtotal);
++ printk(KERN_INFO "clock is %d\n", mode->clock);
++#endif
++ } else {
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for TPO LTPS LPJ040K001A */
++ mode->hdisplay = 800;
++ mode->vdisplay = 480;
++ mode->hsync_start = 836;
++ mode->hsync_end = 846;
++ mode->htotal = 1056;
++ mode->vsync_start = 489;
++ mode->vsync_end = 491;
++ mode->vtotal = 525;
++ mode->clock = 33264;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 800x480 */
++ mode->hdisplay = 800;
++ mode->vdisplay = 480;
++ mode->hsync_start = 801;
++ mode->hsync_end = 802;
++ mode->htotal = 1024;
++ mode->vsync_start = 481;
++ mode->vsync_end = 482;
++ mode->vtotal = 525;
++ mode->clock = 30994;
++#endif /*FIXME jliu7 remove it later */
++
++#if 1 /*FIXME jliu7 remove it later, jliu7 modify it according to the spec*/
++ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1072;
++ mode->hsync_end = 1104;
++ mode->htotal = 1184;
++ mode->vsync_start = 603;
++ mode->vsync_end = 604;
++ mode->vtotal = 608;
++ mode->clock = 53990;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it, it is copied from SBIOS */
++ /* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1104;
++ mode->hsync_end = 1136;
++ mode->htotal = 1184;
++ mode->vsync_start = 603;
++ mode->vsync_end = 604;
++ mode->vtotal = 608;
++ mode->clock = 53990;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 600;
++ mode->hsync_start = 1124;
++ mode->hsync_end = 1204;
++ mode->htotal = 1312;
++ mode->vsync_start = 607;
++ mode->vsync_end = 610;
++ mode->vtotal = 621;
++ mode->clock = 48885;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 1024x768 */
++ mode->hdisplay = 1024;
++ mode->vdisplay = 768;
++ mode->hsync_start = 1048;
++ mode->hsync_end = 1184;
++ mode->htotal = 1344;
++ mode->vsync_start = 771;
++ mode->vsync_end = 777;
++ mode->vtotal = 806;
++ mode->clock = 65000;
++#endif /*FIXME jliu7 remove it later */
++
++#if 0 /*FIXME jliu7 remove it later */
++ /* hard coded fixed mode for LVDS 1366x768 */
++ mode->hdisplay = 1366;
++ mode->vdisplay = 768;
++ mode->hsync_start = 1430;
++ mode->hsync_end = 1558;
++ mode->htotal = 1664;
++ mode->vsync_start = 769;
++ mode->vsync_end = 770;
++ mode->vtotal = 776;
++ mode->clock = 77500;
++#endif /*FIXME jliu7 remove it later */
++ }
++ drm_mode_set_name(mode);
++ drm_mode_set_crtcinfo(mode, 0);
++
++ return mode;
++}
++
++/**
++ * mrst_lvds_init - setup LVDS connectors on this device
++ * @dev: drm device
++ *
++ * Create the connector, register the LVDS DDC bus, and try to figure out what
++ * modes we can display on the LVDS panel (if present).
++ */
++void mrst_lvds_init(struct drm_device *dev,
++ struct psb_intel_mode_device *mode_dev)
++{
++ struct psb_intel_output *psb_intel_output;
++ struct drm_connector *connector;
++ struct drm_encoder *encoder;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct edid *edid;
++ int ret = 0;
++ struct i2c_adapter *i2c_adap;
++ struct drm_display_mode *scan; /* *modes, *bios_mode; */
++
++#if PRINT_JLIU7
++ DRM_INFO("JLIU7 enter mrst_lvds_init \n");
++#endif /* PRINT_JLIU7 */
++
++ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ psb_intel_output->mode_dev = mode_dev;
++ connector = &psb_intel_output->base;
++ encoder = &psb_intel_output->enc;
++ drm_connector_init(dev, &psb_intel_output->base,
++ &psb_intel_lvds_connector_funcs,
++ DRM_MODE_CONNECTOR_LVDS);
++
++ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
++ DRM_MODE_ENCODER_LVDS);
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ psb_intel_output->type = INTEL_OUTPUT_LVDS;
++
++ drm_encoder_helper_add(encoder, &mrst_lvds_helper_funcs);
++ drm_connector_helper_add(connector,
++ &psb_intel_lvds_connector_helper_funcs);
++ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++ connector->interlace_allowed = false;
++ connector->doublescan_allowed = false;
++
++ drm_connector_attach_property(connector,
++ dev->mode_config.scaling_mode_property,
++ DRM_MODE_SCALE_FULLSCREEN);
++ drm_connector_attach_property(connector,
++ dev_priv->backlight_property,
++ BRIGHTNESS_MAX_LEVEL);
++
++ lvds_backlight = BRIGHTNESS_MAX_LEVEL;
++
++ mode_dev->panel_wants_dither = false;
++ if (dev_priv->vbt_data.Size != 0x00)
++ mode_dev->panel_wants_dither = (dev_priv->gct_data.Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE);
++
++ /*
++ * LVDS discovery:
++ * 1) check for EDID on DDC
++ * 2) check for VBT data
++ * 3) check to see if LVDS is already on
++ * if none of the above, no panel
++ * 4) make sure lid is open
++ * if closed, act like it's not there for now
++ */
++ i2c_adap = i2c_get_adapter(2);
++ if (i2c_adap == NULL)
++ printk(KERN_ALERT "No ddc adapter available!\n");
++ /* Set up the DDC bus. */
++/* psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
++ GPIOC,
++ "LVDSDDC_C");
++ if (!psb_intel_output->ddc_bus) {
++ dev_printk(KERN_ERR, &dev->pdev->dev,
++ "DDC bus registration " "failed.\n");
++ goto failed_ddc;
++ }*/
++
++ /*
++ * Attempt to get the fixed panel mode from DDC. Assume that the
++ * preferred mode is the right one.
++ */
++ edid = drm_get_edid(connector, i2c_adap);
++ if (edid) {
++ drm_mode_connector_update_edid_property(connector, edid);
++ ret = drm_add_edid_modes(connector, edid);
++ kfree(edid);
++ }
++
++ list_for_each_entry(scan, &connector->probed_modes, head) {
++ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
++ mode_dev->panel_fixed_mode =
++ drm_mode_duplicate(dev, scan);
++ goto out; /* FIXME: check for quirks */
++ }
++ }
++
++ /*
++ * If we didn't get EDID, try geting panel timing
++ * from configuration data
++ */
++ mode_dev->panel_fixed_mode = mrst_lvds_get_configuration_mode(dev);
++
++ if (mode_dev->panel_fixed_mode) {
++ mode_dev->panel_fixed_mode->type |=
++ DRM_MODE_TYPE_PREFERRED;
++ goto out; /* FIXME: check for quirks */
++ }
++
++ /* If we still don't have a mode after all that, give up. */
++ if (!mode_dev->panel_fixed_mode) {
++ DRM_DEBUG
++ ("Found no modes on the lvds, ignoring the LVDS\n");
++ goto failed_find;
++ }
++
++out:
++ drm_sysfs_connector_add(connector);
++ return;
++
++failed_find:
++ DRM_DEBUG("No LVDS modes found, disabling.\n");
++ if (psb_intel_output->ddc_bus)
++ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
++
++/* failed_ddc: */
++
++ drm_encoder_cleanup(encoder);
++ drm_connector_cleanup(connector);
++ kfree(connector);
++}
++
++/* MRST platform end */
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_modes.c b/drivers/gpu/drm/mrst/drv/psb_intel_modes.c
+new file mode 100644
+index 0000000..e248aed
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_modes.c
+@@ -0,0 +1,77 @@
++/*
++ * Copyright (c) 2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authers: Jesse Barnes <jesse.barnes@intel.com>
++ */
++
++#include <linux/i2c.h>
++#include <linux/fb.h>
++#include <drm/drmP.h>
++#include "psb_intel_drv.h"
++
++/**
++ * psb_intel_ddc_probe
++ *
++ */
++bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output)
++{
++ u8 out_buf[] = { 0x0, 0x0 };
++ u8 buf[2];
++ int ret;
++ struct i2c_msg msgs[] = {
++ {
++ .addr = 0x50,
++ .flags = 0,
++ .len = 1,
++ .buf = out_buf,
++ },
++ {
++ .addr = 0x50,
++ .flags = I2C_M_RD,
++ .len = 1,
++ .buf = buf,
++ }
++ };
++
++ ret = i2c_transfer(&psb_intel_output->ddc_bus->adapter, msgs, 2);
++ if (ret == 2)
++ return true;
++
++ return false;
++}
++
++/**
++ * psb_intel_ddc_get_modes - get modelist from monitor
++ * @connector: DRM connector device to use
++ *
++ * Fetch the EDID information from @connector using the DDC bus.
++ */
++int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output)
++{
++ struct edid *edid;
++ int ret = 0;
++
++ edid =
++ drm_get_edid(&psb_intel_output->base,
++ &psb_intel_output->ddc_bus->adapter);
++ if (edid) {
++ drm_mode_connector_update_edid_property(&psb_intel_output->
++ base, edid);
++ ret = drm_add_edid_modes(&psb_intel_output->base, edid);
++ kfree(edid);
++ }
++ return ret;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_reg.h b/drivers/gpu/drm/mrst/drv/psb_intel_reg.h
+new file mode 100644
+index 0000000..d6b8921
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_reg.h
+@@ -0,0 +1,1099 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#define BLC_PWM_CTL 0x61254
++#define BLC_PWM_CTL2 0x61250
++#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
++/**
++ * This is the most significant 15 bits of the number of backlight cycles in a
++ * complete cycle of the modulated backlight control.
++ *
++ * The actual value is this field multiplied by two.
++ */
++#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
++#define BLM_LEGACY_MODE (1 << 16)
++/**
++ * This is the number of cycles out of the backlight modulation cycle for which
++ * the backlight is on.
++ *
++ * This field must be no greater than the number of cycles in the complete
++ * backlight modulation cycle.
++ */
++#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
++#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
++
++#define I915_GCFGC 0xf0
++#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
++#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
++#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
++#define I915_DISPLAY_CLOCK_MASK (7 << 4)
++
++#define I855_HPLLCC 0xc0
++#define I855_CLOCK_CONTROL_MASK (3 << 0)
++#define I855_CLOCK_133_200 (0 << 0)
++#define I855_CLOCK_100_200 (1 << 0)
++#define I855_CLOCK_100_133 (2 << 0)
++#define I855_CLOCK_166_250 (3 << 0)
++
++/* I830 CRTC registers */
++#define HTOTAL_A 0x60000
++#define HBLANK_A 0x60004
++#define HSYNC_A 0x60008
++#define VTOTAL_A 0x6000c
++#define VBLANK_A 0x60010
++#define VSYNC_A 0x60014
++#define PIPEASRC 0x6001c
++#define BCLRPAT_A 0x60020
++#define VSYNCSHIFT_A 0x60028
++
++#define HTOTAL_B 0x61000
++#define HBLANK_B 0x61004
++#define HSYNC_B 0x61008
++#define VTOTAL_B 0x6100c
++#define VBLANK_B 0x61010
++#define VSYNC_B 0x61014
++#define PIPEBSRC 0x6101c
++#define BCLRPAT_B 0x61020
++#define VSYNCSHIFT_B 0x61028
++
++#define PP_STATUS 0x61200
++# define PP_ON (1 << 31)
++/**
++ * Indicates that all dependencies of the panel are on:
++ *
++ * - PLL enabled
++ * - pipe enabled
++ * - LVDS/DVOB/DVOC on
++ */
++# define PP_READY (1 << 30)
++# define PP_SEQUENCE_NONE (0 << 28)
++# define PP_SEQUENCE_ON (1 << 28)
++# define PP_SEQUENCE_OFF (2 << 28)
++# define PP_SEQUENCE_MASK 0x30000000
++#define PP_CONTROL 0x61204
++# define POWER_TARGET_ON (1 << 0)
++
++#define LVDSPP_ON 0x61208
++#define LVDSPP_OFF 0x6120c
++#define PP_CYCLE 0x61210
++
++#define PFIT_CONTROL 0x61230
++# define PFIT_ENABLE (1 << 31)
++# define PFIT_PIPE_MASK (3 << 29)
++# define PFIT_PIPE_SHIFT 29
++# define PFIT_SCALING_MODE_PILLARBOX (1 << 27)
++# define PFIT_SCALING_MODE_LETTERBOX (3 << 26)
++# define VERT_INTERP_DISABLE (0 << 10)
++# define VERT_INTERP_BILINEAR (1 << 10)
++# define VERT_INTERP_MASK (3 << 10)
++# define VERT_AUTO_SCALE (1 << 9)
++# define HORIZ_INTERP_DISABLE (0 << 6)
++# define HORIZ_INTERP_BILINEAR (1 << 6)
++# define HORIZ_INTERP_MASK (3 << 6)
++# define HORIZ_AUTO_SCALE (1 << 5)
++# define PANEL_8TO6_DITHER_ENABLE (1 << 3)
++
++#define PFIT_PGM_RATIOS 0x61234
++# define PFIT_VERT_SCALE_MASK 0xfff00000
++# define PFIT_HORIZ_SCALE_MASK 0x0000fff0
++
++#define PFIT_AUTO_RATIOS 0x61238
++
++
++#define DPLL_A 0x06014
++#define DPLL_B 0x06018
++# define DPLL_VCO_ENABLE (1 << 31)
++# define DPLL_DVO_HIGH_SPEED (1 << 30)
++# define DPLL_SYNCLOCK_ENABLE (1 << 29)
++# define DPLL_VGA_MODE_DIS (1 << 28)
++# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
++# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
++# define DPLL_MODE_MASK (3 << 26)
++# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
++# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
++# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
++# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
++# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
++# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
++/**
++ * The i830 generation, in DAC/serial mode, defines p1 as two plus this
++ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
++ */
++# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
++/**
++ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
++ * this field (only one bit may be set).
++ */
++# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
++# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
++# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required
++ * in DVO non-gang */
++# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
++# define PLL_REF_INPUT_DREFCLK (0 << 13)
++# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
++# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO
++ * TVCLKIN */
++# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
++# define PLL_REF_INPUT_MASK (3 << 13)
++# define PLL_LOAD_PULSE_PHASE_SHIFT 9
++/*
++ * Parallel to Serial Load Pulse phase selection.
++ * Selects the phase for the 10X DPLL clock for the PCIe
++ * digital display port. The range is 4 to 13; 10 or more
++ * is just a flip delay. The default is 6
++ */
++# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
++# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
++
++/**
++ * SDVO multiplier for 945G/GM. Not used on 965.
++ *
++ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
++ */
++# define SDVO_MULTIPLIER_MASK 0x000000ff
++# define SDVO_MULTIPLIER_SHIFT_HIRES 4
++# define SDVO_MULTIPLIER_SHIFT_VGA 0
++
++/** @defgroup DPLL_MD
++ * @{
++ */
++/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
++#define DPLL_A_MD 0x0601c
++/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
++#define DPLL_B_MD 0x06020
++/**
++ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
++ *
++ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
++ */
++# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
++# define DPLL_MD_UDI_DIVIDER_SHIFT 24
++/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
++# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
++# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
++/**
++ * SDVO/UDI pixel multiplier.
++ *
++ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
++ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
++ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
++ * dummy bytes in the datastream at an increased clock rate, with both sides of
++ * the link knowing how many bytes are fill.
++ *
++ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
++ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
++ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
++ * through an SDVO command.
++ *
++ * This register field has values of multiplication factor minus 1, with
++ * a maximum multiplier of 5 for SDVO.
++ */
++# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
++# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
++/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
++ * This best be set to the default value (3) or the CRT won't work. No,
++ * I don't entirely understand what this does...
++ */
++# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
++# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
++/** @} */
++
++#define DPLL_TEST 0x606c
++# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
++# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
++# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
++# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
++# define DPLLB_TEST_N_BYPASS (1 << 19)
++# define DPLLB_TEST_M_BYPASS (1 << 18)
++# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
++# define DPLLA_TEST_N_BYPASS (1 << 3)
++# define DPLLA_TEST_M_BYPASS (1 << 2)
++# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
++
++#define ADPA 0x61100
++#define ADPA_DAC_ENABLE (1<<31)
++#define ADPA_DAC_DISABLE 0
++#define ADPA_PIPE_SELECT_MASK (1<<30)
++#define ADPA_PIPE_A_SELECT 0
++#define ADPA_PIPE_B_SELECT (1<<30)
++#define ADPA_USE_VGA_HVPOLARITY (1<<15)
++#define ADPA_SETS_HVPOLARITY 0
++#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
++#define ADPA_VSYNC_CNTL_ENABLE 0
++#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
++#define ADPA_HSYNC_CNTL_ENABLE 0
++#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
++#define ADPA_VSYNC_ACTIVE_LOW 0
++#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
++#define ADPA_HSYNC_ACTIVE_LOW 0
++
++#define FPA0 0x06040
++#define FPA1 0x06044
++#define FPB0 0x06048
++#define FPB1 0x0604c
++# define FP_N_DIV_MASK 0x003f0000
++# define FP_N_DIV_SHIFT 16
++# define FP_M1_DIV_MASK 0x00003f00
++# define FP_M1_DIV_SHIFT 8
++# define FP_M2_DIV_MASK 0x0000003f
++# define FP_M2_DIV_SHIFT 0
++
++
++#define PORT_HOTPLUG_EN 0x61110
++# define SDVOB_HOTPLUG_INT_EN (1 << 26)
++# define SDVOC_HOTPLUG_INT_EN (1 << 25)
++# define TV_HOTPLUG_INT_EN (1 << 18)
++# define CRT_HOTPLUG_INT_EN (1 << 9)
++# define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
++
++#define PORT_HOTPLUG_STAT 0x61114
++# define CRT_HOTPLUG_INT_STATUS (1 << 11)
++# define TV_HOTPLUG_INT_STATUS (1 << 10)
++# define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
++# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
++# define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
++# define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
++# define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
++# define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
++
++#define SDVOB 0x61140
++#define SDVOC 0x61160
++#define SDVO_ENABLE (1 << 31)
++#define SDVO_PIPE_B_SELECT (1 << 30)
++#define SDVO_STALL_SELECT (1 << 29)
++#define SDVO_INTERRUPT_ENABLE (1 << 26)
++/**
++ * 915G/GM SDVO pixel multiplier.
++ *
++ * Programmed value is multiplier - 1, up to 5x.
++ *
++ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
++ */
++#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
++#define SDVO_PORT_MULTIPLY_SHIFT 23
++#define SDVO_PHASE_SELECT_MASK (15 << 19)
++#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
++#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
++#define SDVOC_GANG_MODE (1 << 16)
++#define SDVO_BORDER_ENABLE (1 << 7)
++#define SDVOB_PCIE_CONCURRENCY (1 << 3)
++#define SDVO_DETECTED (1 << 2)
++/* Bits to be preserved when writing */
++#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
++#define SDVOC_PRESERVE_MASK (1 << 17)
++
++/** @defgroup LVDS
++ * @{
++ */
++/**
++ * This register controls the LVDS output enable, pipe selection, and data
++ * format selection.
++ *
++ * All of the clock/data pairs are force powered down by power sequencing.
++ */
++#define LVDS 0x61180
++/**
++ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
++ * the DPLL semantics change when the LVDS is assigned to that pipe.
++ */
++# define LVDS_PORT_EN (1 << 31)
++/** Selects pipe B for LVDS data. Must be set on pre-965. */
++# define LVDS_PIPEB_SELECT (1 << 30)
++
++/** Turns on border drawing to allow centered display. */
++# define LVDS_BORDER_EN (1 << 15)
++
++/**
++ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
++ * pixel.
++ */
++# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
++# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
++# define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
++/**
++ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
++ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
++ * on.
++ */
++# define LVDS_A3_POWER_MASK (3 << 6)
++# define LVDS_A3_POWER_DOWN (0 << 6)
++# define LVDS_A3_POWER_UP (3 << 6)
++/**
++ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
++ * is set.
++ */
++# define LVDS_CLKB_POWER_MASK (3 << 4)
++# define LVDS_CLKB_POWER_DOWN (0 << 4)
++# define LVDS_CLKB_POWER_UP (3 << 4)
++
++/**
++ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
++ * setting for whether we are in dual-channel mode. The B3 pair will
++ * additionally only be powered up when LVDS_A3_POWER_UP is set.
++ */
++# define LVDS_B0B3_POWER_MASK (3 << 2)
++# define LVDS_B0B3_POWER_DOWN (0 << 2)
++# define LVDS_B0B3_POWER_UP (3 << 2)
++
++#define PIPEACONF 0x70008
++#define PIPEACONF_ENABLE (1<<31)
++#define PIPEACONF_DISABLE 0
++#define PIPEACONF_DOUBLE_WIDE (1<<30)
++#define I965_PIPECONF_ACTIVE (1<<30)
++#define PIPEACONF_SINGLE_WIDE 0
++#define PIPEACONF_PIPE_UNLOCKED 0
++#define PIPEACONF_PIPE_LOCKED (1<<25)
++#define PIPEACONF_PALETTE 0
++#define PIPEACONF_GAMMA (1<<24)
++#define PIPECONF_FORCE_BORDER (1<<25)
++#define PIPECONF_PROGRESSIVE (0 << 21)
++#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
++#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
++
++#define PIPEBCONF 0x71008
++#define PIPEBCONF_ENABLE (1<<31)
++#define PIPEBCONF_DISABLE 0
++#define PIPEBCONF_DOUBLE_WIDE (1<<30)
++#define PIPEBCONF_DISABLE 0
++#define PIPEBCONF_GAMMA (1<<24)
++#define PIPEBCONF_PALETTE 0
++
++#define PIPEBGCMAXRED 0x71010
++#define PIPEBGCMAXGREEN 0x71014
++#define PIPEBGCMAXBLUE 0x71018
++
++#define PIPEASTAT 0x70024
++#define PIPEBSTAT 0x71024
++#define PIPE_VBLANK_CLEAR (1 << 1)
++#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18)
++#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
++
++#define PIPE_VSYNC_ENABL (1UL<<25)
++#define PIPE_VSYNC_CLEAR (1UL<<9)
++#define HISTOGRAM_INT_CONTROL 0x61268
++#define HISTOGRAM_BIN_DATA 0X61264
++#define HISTOGRAM_LOGIC_CONTROL 0x61260
++#define PWM_CONTROL_LOGIC 0x61250
++#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
++#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
++#define PIPE_DPST_EVENT_STATUS (1UL<<7)
++#define HISTOGRAM_INTERRUPT_ENABLE (1UL<<31)
++#define HISTOGRAM_LOGIC_ENABLE (1UL<<31)
++#define PWM_LOGIC_ENABLE (1UL<<31)
++#define PWM_PHASEIN_ENABLE (1UL<<25)
++#define PWM_PHASEIN_INT_ENABLE (1UL<<24)
++#define PWM_PHASEIN_VB_COUNT 0x00001f00
++#define PWM_PHASEIN_INC 0x0000001f
++#define HISTOGRAM_INT_CTRL_CLEAR (1UL<<30)
++#define DPST_YUV_LUMA_MODE 0
++
++struct dpst_ie_histogram_control {
++ union {
++ uint32_t data;
++ struct {
++ uint32_t bin_reg_index:7;
++ uint32_t reserved:4;
++ uint32_t bin_reg_func_select:1;
++ uint32_t sync_to_phase_in:1;
++ uint32_t alt_enhancement_mode:2;
++ uint32_t reserved1:1;
++ uint32_t sync_to_phase_in_count:8;
++ uint32_t histogram_mode_select:1;
++ uint32_t reserved2:4;
++ uint32_t ie_pipe_assignment:1;
++ uint32_t ie_mode_table_enabled:1;
++ uint32_t ie_histogram_enable:1;
++ };
++ };
++};
++
++struct dpst_guardband {
++ union {
++ uint32_t data;
++ struct {
++ uint32_t guardband:22;
++ uint32_t guardband_interrupt_delay:8;
++ uint32_t interrupt_status:1;
++ uint32_t interrupt_enable:1;
++ };
++ };
++};
++
++#define PIPEAFRAMEHIGH 0x70040
++#define PIPEAFRAMEPIXEL 0x70044
++#define PIPEBFRAMEHIGH 0x71040
++#define PIPEBFRAMEPIXEL 0x71044
++#define PIPE_FRAME_HIGH_MASK 0x0000ffff
++#define PIPE_FRAME_HIGH_SHIFT 0
++#define PIPE_FRAME_LOW_MASK 0xff000000
++#define PIPE_FRAME_LOW_SHIFT 24
++#define PIPE_PIXEL_MASK 0x00ffffff
++#define PIPE_PIXEL_SHIFT 0
++
++#define DSPARB 0x70030
++#define DSPFW1 0x70034
++#define DSPFW2 0x70038
++#define DSPFW3 0x7003c
++#define DSPFW4 0x70050
++#define DSPFW5 0x70054
++#define DSPFW6 0x70058
++#define DSPCHICKENBIT 0x70400
++#define DSPACNTR 0x70180
++#define DSPBCNTR 0x71180
++#define DISPLAY_PLANE_ENABLE (1<<31)
++#define DISPLAY_PLANE_DISABLE 0
++#define DISPPLANE_GAMMA_ENABLE (1<<30)
++#define DISPPLANE_GAMMA_DISABLE 0
++#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
++#define DISPPLANE_8BPP (0x2<<26)
++#define DISPPLANE_15_16BPP (0x4<<26)
++#define DISPPLANE_16BPP (0x5<<26)
++#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
++#define DISPPLANE_32BPP (0x7<<26)
++#define DISPPLANE_STEREO_ENABLE (1<<25)
++#define DISPPLANE_STEREO_DISABLE 0
++#define DISPPLANE_SEL_PIPE_MASK (1<<24)
++#define DISPPLANE_SEL_PIPE_A 0
++#define DISPPLANE_SEL_PIPE_B (1<<24)
++#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
++#define DISPPLANE_SRC_KEY_DISABLE 0
++#define DISPPLANE_LINE_DOUBLE (1<<20)
++#define DISPPLANE_NO_LINE_DOUBLE 0
++#define DISPPLANE_STEREO_POLARITY_FIRST 0
++#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
++/* plane B only */
++#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
++#define DISPPLANE_ALPHA_TRANS_DISABLE 0
++#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
++#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
++
++#define DSPABASE 0x70184
++#define DSPALINOFF 0x70184
++#define DSPASTRIDE 0x70188
++
++#define DSPBBASE 0x71184
++#define DSPBLINOFF 0X71184
++#define DSPBADDR DSPBBASE
++#define DSPBSTRIDE 0x71188
++
++#define DSPAKEYVAL 0x70194
++#define DSPAKEYMASK 0x70198
++
++#define DSPAPOS 0x7018C /* reserved */
++#define DSPASIZE 0x70190
++#define DSPBPOS 0x7118C
++#define DSPBSIZE 0x71190
++
++#define DSPASURF 0x7019C
++#define DSPATILEOFF 0x701A4
++
++#define DSPBSURF 0x7119C
++#define DSPBTILEOFF 0x711A4
++
++/* plane C only */
++#define DSPCCNTR 0x72180
++#define DSPCLINOFF 0x72184
++#define DSPCSTRIDE 0x72188
++#define DSPCPOS 0x7218C
++#define DSPCSIZE 0x72190
++#define DSPCSURF 0x7219C
++#define DSPCKEYMAXVAL 0x721A0
++#define DSPCKEYMINVAL 0x72194
++#define DSPCKEYMSK 0x72198
++
++#define VGACNTRL 0x71400
++# define VGA_DISP_DISABLE (1 << 31)
++# define VGA_2X_MODE (1 << 30)
++# define VGA_PIPE_B_SELECT (1 << 29)
++
++/*
++ * Overlay registers
++ */
++#define OV_OVADD 0x30000
++#define OV_OGAMC5 0x30010
++#define OV_OGAMC4 0x30014
++#define OV_OGAMC3 0x30018
++#define OV_OGAMC2 0x3001C
++#define OV_OGAMC1 0x30020
++#define OV_OGAMC0 0x30024
++
++/*
++ * Some BIOS scratch area registers. The 845 (and 830?) store the amount
++ * of video memory available to the BIOS in SWF1.
++ */
++
++#define SWF0 0x71410
++#define SWF1 0x71414
++#define SWF2 0x71418
++#define SWF3 0x7141c
++#define SWF4 0x71420
++#define SWF5 0x71424
++#define SWF6 0x71428
++
++/*
++ * 855 scratch registers.
++ */
++#define SWF00 0x70410
++#define SWF01 0x70414
++#define SWF02 0x70418
++#define SWF03 0x7041c
++#define SWF04 0x70420
++#define SWF05 0x70424
++#define SWF06 0x70428
++
++#define SWF10 SWF0
++#define SWF11 SWF1
++#define SWF12 SWF2
++#define SWF13 SWF3
++#define SWF14 SWF4
++#define SWF15 SWF5
++#define SWF16 SWF6
++
++#define SWF30 0x72414
++#define SWF31 0x72418
++#define SWF32 0x7241c
++
++
++/*
++ * Palette registers
++ */
++#define PALETTE_A 0x0a000
++#define PALETTE_B 0x0a800
++
++#define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC)
++#define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG)
++#define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
++#define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG)
++#define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG)
++
++
++/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G) */
++#define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG)
++#define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG)
++#define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG)
++#define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG)
++
++#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
++ (dev)->pci_device == 0x2982 || \
++ (dev)->pci_device == 0x2992 || \
++ (dev)->pci_device == 0x29A2 || \
++ (dev)->pci_device == 0x2A02 || \
++ (dev)->pci_device == 0x2A12)
++
++#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
++
++#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
++ (dev)->pci_device == 0x29B2 || \
++ (dev)->pci_device == 0x29D2)
++
++#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
++ IS_I945GM(dev) || IS_I965G(dev) || IS_POULSBO(dev) || \
++ IS_MRST(dev))
++
++#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
++ IS_I945GM(dev) || IS_I965GM(dev) || \
++ IS_POULSBO(dev) || IS_MRST(dev))
++
++/* Cursor A & B regs */
++#define CURACNTR 0x70080
++#define CURSOR_MODE_DISABLE 0x00
++#define CURSOR_MODE_64_32B_AX 0x07
++#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
++#define MCURSOR_GAMMA_ENABLE (1 << 26)
++#define CURABASE 0x70084
++#define CURAPOS 0x70088
++#define CURSOR_POS_MASK 0x007FF
++#define CURSOR_POS_SIGN 0x8000
++#define CURSOR_X_SHIFT 0
++#define CURSOR_Y_SHIFT 16
++#define CURBCNTR 0x700c0
++#define CURBBASE 0x700c4
++#define CURBPOS 0x700c8
++
++/*
++ * Interrupt Registers
++ */
++#define IER 0x020a0
++#define IIR 0x020a4
++#define IMR 0x020a8
++#define ISR 0x020ac
++
++/*
++ * MOORESTOWN delta registers
++ */
++#define MRST_DPLL_A 0x0f014
++#define DPLLA_MODE_LVDS (2 << 26) /* mrst */
++#define MRST_FPA0 0x0f040
++#define MRST_FPA1 0x0f044
++#define MRST_PERF_MODE 0x020f4
++
++/* #define LVDS 0x61180 */
++# define MRST_PANEL_8TO6_DITHER_ENABLE (1 << 25)
++# define MRST_PANEL_24_DOT_1_FORMAT (1 << 24)
++# define LVDS_A3_POWER_UP_0_OUTPUT (1 << 6)
++
++#define MIPI 0x61190
++# define MIPI_PORT_EN (1 << 31)
++/** Turns on border drawing to allow centered display. */
++# define MIPI_BORDER_EN (1 << 15)
++
++/* #define PP_CONTROL 0x61204 */
++# define POWER_DOWN_ON_RESET (1 << 1)
++
++/* #define PFIT_CONTROL 0x61230 */
++# define PFIT_PIPE_SELECT (3 << 29)
++# define PFIT_PIPE_SELECT_SHIFT (29)
++
++/* #define BLC_PWM_CTL 0x61254 */
++#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
++#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
++
++/* #define PIPEACONF 0x70008 */
++#define PIPEACONF_PIPE_STATE (1<<30)
++/* #define DSPACNTR 0x70180 */
++#if 0 /*FIXME JLIU7 need to define the following */
++1000 = 32 - bit RGBX(10 : 10 : 10 : 2)
++pixel format.Ignore alpha.1010 = BGRX 10 : 10 : 10 : 2 1100 = 64 - bit RGBX
++(16 : 16 : 16 : 16) 16 bit floating point pixel format.
++Ignore alpha.1110 = 32 - bit RGBX(8 : 8 : 8 : 8) pixel format.
++ Ignore
++ alpha.
++#endif /*FIXME JLIU7 need to define the following */
++
++#define MRST_DSPABASE 0x7019c
++
++/*
++ * MOORESTOWN reserved registers
++ */
++#if 0
++#define DSPAPOS 0x7018C /* reserved */
++#define DSPASIZE 0x70190
++#endif
++/*
++ * Moorestown registers.
++ */
++/*===========================================================================
++; General Constants
++;--------------------------------------------------------------------------*/
++#define BIT0 0x00000001
++#define BIT1 0x00000002
++#define BIT2 0x00000004
++#define BIT3 0x00000008
++#define BIT4 0x00000010
++#define BIT5 0x00000020
++#define BIT6 0x00000040
++#define BIT7 0x00000080
++#define BIT8 0x00000100
++#define BIT9 0x00000200
++#define BIT10 0x00000400
++#define BIT11 0x00000800
++#define BIT12 0x00001000
++#define BIT13 0x00002000
++#define BIT14 0x00004000
++#define BIT15 0x00008000
++#define BIT16 0x00010000
++#define BIT17 0x00020000
++#define BIT18 0x00040000
++#define BIT19 0x00080000
++#define BIT20 0x00100000
++#define BIT21 0x00200000
++#define BIT22 0x00400000
++#define BIT23 0x00800000
++#define BIT24 0x01000000
++#define BIT25 0x02000000
++#define BIT26 0x04000000
++#define BIT27 0x08000000
++#define BIT28 0x10000000
++#define BIT29 0x20000000
++#define BIT30 0x40000000
++#define BIT31 0x80000000
++/*===========================================================================
++; MIPI IP registers
++;--------------------------------------------------------------------------*/
++#define DEVICE_READY_REG 0xb000
++#define INTR_STAT_REG 0xb004
++#define RX_SOT_ERROR BIT0
++#define RX_SOT_SYNC_ERROR BIT1
++#define RX_ESCAPE_MODE_ENTRY_ERROR BIT3
++#define RX_LP_TX_SYNC_ERROR BIT4
++#define RX_HS_RECEIVE_TIMEOUT_ERROR BIT5
++#define RX_FALSE_CONTROL_ERROR BIT6
++#define RX_ECC_SINGLE_BIT_ERROR BIT7
++#define RX_ECC_MULTI_BIT_ERROR BIT8
++#define RX_CHECKSUM_ERROR BIT9
++#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT10
++#define RX_DSI_VC_ID_INVALID BIT11
++#define TX_FALSE_CONTROL_ERROR BIT12
++#define TX_ECC_SINGLE_BIT_ERROR BIT13
++#define TX_ECC_MULTI_BIT_ERROR BIT14
++#define TX_CHECKSUM_ERROR BIT15
++#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED BIT16
++#define TX_DSI_VC_ID_INVALID BIT17
++#define HIGH_CONTENTION BIT18
++#define LOW_CONTENTION BIT19
++#define DPI_FIFO_UNDER_RUN BIT20
++#define HS_TX_TIMEOUT BIT21
++#define LP_RX_TIMEOUT BIT22
++#define TURN_AROUND_ACK_TIMEOUT BIT23
++#define ACK_WITH_NO_ERROR BIT24
++#define INTR_EN_REG 0xb008
++#define DSI_FUNC_PRG_REG 0xb00c
++#define DPI_CHANNEL_NUMBER_POS 0x03
++#define DBI_CHANNEL_NUMBER_POS 0x05
++#define FMT_DPI_POS 0x07
++#define FMT_DBI_POS 0x0A
++#define DBI_DATA_WIDTH_POS 0x0D
++#define HS_TX_TIMEOUT_REG 0xb010
++#define LP_RX_TIMEOUT_REG 0xb014
++#define TURN_AROUND_TIMEOUT_REG 0xb018
++#define DEVICE_RESET_REG 0xb01C
++#define DPI_RESOLUTION_REG 0xb020
++#define RES_V_POS 0x10
++#define DBI_RESOLUTION_REG 0xb024
++#define HORIZ_SYNC_PAD_COUNT_REG 0xb028
++#define HORIZ_BACK_PORCH_COUNT_REG 0xb02C
++#define HORIZ_FRONT_PORCH_COUNT_REG 0xb030
++#define HORIZ_ACTIVE_AREA_COUNT_REG 0xb034
++#define VERT_SYNC_PAD_COUNT_REG 0xb038
++#define VERT_BACK_PORCH_COUNT_REG 0xb03c
++#define VERT_FRONT_PORCH_COUNT_REG 0xb040
++#define HIGH_LOW_SWITCH_COUNT_REG 0xb044
++#define DPI_CONTROL_REG 0xb048
++#define DPI_SHUT_DOWN BIT0
++#define DPI_TURN_ON BIT1
++#define DPI_COLOR_MODE_ON BIT2
++#define DPI_COLOR_MODE_OFF BIT3
++#define DPI_BACK_LIGHT_ON BIT4
++#define DPI_BACK_LIGHT_OFF BIT5
++#define DPI_LP BIT6
++#define DPI_DATA_REG 0xb04c
++#define DPI_BACK_LIGHT_ON_DATA 0x07
++#define DPI_BACK_LIGHT_OFF_DATA 0x17
++#define INIT_COUNT_REG 0xb050
++#define MAX_RET_PAK_REG 0xb054
++#define VIDEO_FMT_REG 0xb058
++#define EOT_DISABLE_REG 0xb05c
++#define LP_BYTECLK_REG 0xb060
++#define LP_GEN_DATA_REG 0xb064
++#define HS_GEN_DATA_REG 0xb068
++#define LP_GEN_CTRL_REG 0xb06C
++#define HS_GEN_CTRL_REG 0xb070
++#define GEN_FIFO_STAT_REG 0xb074
++#define HS_DATA_FIFO_FULL BIT0
++#define HS_DATA_FIFO_HALF_EMPTY BIT1
++#define HS_DATA_FIFO_EMPTY BIT2
++#define LP_DATA_FIFO_FULL BIT8
++#define LP_DATA_FIFO_HALF_EMPTY BIT9
++#define LP_DATA_FIFO_EMPTY BIT10
++#define HS_CTRL_FIFO_FULL BIT16
++#define HS_CTRL_FIFO_HALF_EMPTY BIT17
++#define HS_CTRL_FIFO_EMPTY BIT18
++#define LP_CTRL_FIFO_FULL BIT24
++#define LP_CTRL_FIFO_HALF_EMPTY BIT25
++#define LP_CTRL_FIFO_EMPTY BIT26
++#define DBI_FIFO_EMPTY BIT27
++#define DPI_FIFO_EMPTY BIT28
++#define HS_LS_DBI_ENABLE_REG 0xb078
++#define TXCLKESC_REG 0xb07c
++#define DPHY_PARAM_REG 0xb080
++/*===========================================================================
++; MIPI Adapter registers
++;--------------------------------------------------------------------------*/
++#define MIPI_CONTROL_REG 0xb104
++#define MIPI_2X_CLOCK_BITS (BIT0 | BIT1)
++#define MIPI_DATA_ADDRESS_REG 0xb108
++#define MIPI_DATA_LENGTH_REG 0xb10C
++#define MIPI_COMMAND_ADDRESS_REG 0xb110
++#define MIPI_COMMAND_LENGTH_REG 0xb114
++#define MIPI_READ_DATA_RETURN_REG0 0xb118
++#define MIPI_READ_DATA_RETURN_REG1 0xb11C
++#define MIPI_READ_DATA_RETURN_REG2 0xb120
++#define MIPI_READ_DATA_RETURN_REG3 0xb124
++#define MIPI_READ_DATA_RETURN_REG4 0xb128
++#define MIPI_READ_DATA_RETURN_REG5 0xb12C
++#define MIPI_READ_DATA_RETURN_REG6 0xb130
++#define MIPI_READ_DATA_RETURN_REG7 0xb134
++#define MIPI_READ_DATA_VALID_REG 0xb138
++/* DBI COMMANDS */
++#define soft_reset 0x01
++/* ************************************************************************* *\
++The display module performs a software reset.
++Registers are written with their SW Reset default values.
++\* ************************************************************************* */
++#define get_power_mode 0x0a
++/* ************************************************************************* *\
++The display module returns the current power mode
++\* ************************************************************************* */
++#define get_address_mode 0x0b
++/* ************************************************************************* *\
++The display module returns the current status.
++\* ************************************************************************* */
++#define get_pixel_format 0x0c
++/* ************************************************************************* *\
++This command gets the pixel format for the RGB image data
++used by the interface.
++\* ************************************************************************* */
++#define get_display_mode 0x0d
++/* ************************************************************************* *\
++The display module returns the Display Image Mode status.
++\* ************************************************************************* */
++#define get_signal_mode 0x0e
++/* ************************************************************************* *\
++The display module returns the Display Signal Mode.
++\* ************************************************************************* */
++#define get_diagnostic_result 0x0f
++/* ************************************************************************* *\
++The display module returns the self-diagnostic results following
++a Sleep Out command.
++\* ************************************************************************* */
++#define enter_sleep_mode 0x10
++/* ************************************************************************* *\
++This command causes the display module to enter the Sleep mode.
++In this mode, all unnecessary blocks inside the display module are disabled
++except interface communication. This is the lowest power mode
++the display module supports.
++\* ************************************************************************* */
++#define exit_sleep_mode 0x11
++/* ************************************************************************* *\
++This command causes the display module to exit Sleep mode.
++All blocks inside the display module are enabled.
++\* ************************************************************************* */
++#define enter_partial_mode 0x12
++/* ************************************************************************* *\
++This command causes the display module to enter the Partial Display Mode.
++The Partial Display Mode window is described by the set_partial_area command.
++\* ************************************************************************* */
++#define enter_normal_mode 0x13
++/* ************************************************************************* *\
++This command causes the display module to enter the Normal mode.
++Normal Mode is defined as Partial Display mode and Scroll mode are off
++\* ************************************************************************* */
++#define exit_invert_mode 0x20
++/* ************************************************************************* *\
++This command causes the display module to stop inverting the image data on
++the display device. The frame memory contents remain unchanged.
++No status bits are changed.
++\* ************************************************************************* */
++#define enter_invert_mode 0x21
++/* ************************************************************************* *\
++This command causes the display module to invert the image data only on
++the display device. The frame memory contents remain unchanged.
++No status bits are changed.
++\* ************************************************************************* */
++#define set_gamma_curve 0x26
++/* ************************************************************************* *\
++This command selects the desired gamma curve for the display device.
++Four fixed gamma curves are defined in section DCS spec.
++\* ************************************************************************* */
++#define set_display_off 0x28
++/* ************************************************************************* *\
++This command causes the display module to stop displaying the image data
++on the display device. The frame memory contents remain unchanged.
++No status bits are changed.
++\* ************************************************************************* */
++#define set_display_on 0x29
++/* ************************************************************************* *\
++This command causes the display module to start displaying the image data
++on the display device. The frame memory contents remain unchanged.
++No status bits are changed.
++\* ************************************************************************* */
++#define set_column_address 0x2a
++/* ************************************************************************* *\
++This command defines the column extent of the frame memory accessed by the
++hostprocessor with the read_memory_continue and write_memory_continue commands.
++No status bits are changed.
++\* ************************************************************************* */
++#define set_page_address 0x2b
++/* ************************************************************************* *\
++This command defines the page extent of the frame memory accessed by the host
++processor with the write_memory_continue and read_memory_continue command.
++No status bits are changed.
++\* ************************************************************************* */
++#define write_mem_start 0x2c
++/* ************************************************************************* *\
++This command transfers image data from the host processor to the display
++module s frame memory starting at the pixel location specified by
++preceding set_column_address and set_page_address commands.
++\* ************************************************************************* */
++#define set_partial_area 0x30
++/* ************************************************************************* *\
++This command defines the Partial Display mode s display area.
++There are two parameters associated with
++this command, the first defines the Start Row (SR) and the second the End Row
++(ER). SR and ER refer to the Frame Memory Line Pointer.
++\* ************************************************************************* */
++#define set_scroll_area 0x33
++/* ************************************************************************* *\
++This command defines the display modules Vertical Scrolling Area.
++\* ************************************************************************* */
++#define set_tear_off 0x34
++/* ************************************************************************* *\
++This command turns off the display modules Tearing Effect output signal on
++the TE signal line.
++\* ************************************************************************* */
++#define set_tear_on 0x35
++/* ************************************************************************* *\
++This command turns on the display modules Tearing Effect output signal
++on the TE signal line.
++\* ************************************************************************* */
++#define set_address_mode 0x36
++/* ************************************************************************* *\
++This command sets the data order for transfers from the host processor to
++display modules frame memory,bits B[7:5] and B3, and from the display
++modules frame memory to the display device, bits B[2:0] and B4.
++\* ************************************************************************* */
++#define set_scroll_start 0x37
++/* ************************************************************************* *\
++This command sets the start of the vertical scrolling area in the frame memory.
++The vertical scrolling area is fully defined when this command is used with
++the set_scroll_area command The set_scroll_start command has one parameter,
++the Vertical Scroll Pointer. The VSP defines the line in the frame memory
++that is written to the display device as the first line of the vertical
++scroll area.
++\* ************************************************************************* */
++#define exit_idle_mode 0x38
++/* ************************************************************************* *\
++This command causes the display module to exit Idle mode.
++\* ************************************************************************* */
++#define enter_idle_mode 0x39
++/* ************************************************************************* *\
++This command causes the display module to enter Idle Mode.
++In Idle Mode, color expression is reduced. Colors are shown on the display
++device using the MSB of each of the R, G and B color components in the frame
++memory
++\* ************************************************************************* */
++#define set_pixel_format 0x3a
++/* ************************************************************************* *\
++This command sets the pixel format for the RGB image data used by the interface.
++Bits D[6:4] DPI Pixel Format Definition
++Bits D[2:0] DBI Pixel Format Definition
++Bits D7 and D3 are not used.
++\* ************************************************************************* */
++#define write_mem_cont 0x3c
++/* ************************************************************************* *\
++This command transfers image data from the host processor to the display
++module's frame memory continuing from the pixel location following the
++previous write_memory_continue or write_memory_start command.
++\* ************************************************************************* */
++#define set_tear_scanline 0x44
++/* ************************************************************************* *\
++This command turns on the display modules Tearing Effect output signal on the
++TE signal line when the display module reaches line N.
++\* ************************************************************************* */
++#define get_scanline 0x45
++/* ************************************************************************* *\
++The display module returns the current scanline, N, used to update the
++display device. The total number of scanlines on a display device is
++defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as
++the first line of V Sync and is denoted as Line 0.
++When in Sleep Mode, the value returned by get_scanline is undefined.
++\* ************************************************************************* */
++/* DCS Interface Pixel Formats */
++#define DCS_PIXEL_FORMAT_3BPP 0x1
++#define DCS_PIXEL_FORMAT_8BPP 0x2
++#define DCS_PIXEL_FORMAT_12BPP 0x3
++#define DCS_PIXEL_FORMAT_16BPP 0x5
++#define DCS_PIXEL_FORMAT_18BPP 0x6
++#define DCS_PIXEL_FORMAT_24BPP 0x7
++/* ONE PARAMETER READ DATA */
++#define addr_mode_data 0xfc
++#define diag_res_data 0x00
++#define disp_mode_data 0x23
++#define pxl_fmt_data 0x77
++#define pwr_mode_data 0x74
++#define sig_mode_data 0x00
++/* TWO PARAMETERS READ DATA */
++#define scanline_data1 0xff
++#define scanline_data2 0xff
++/* DPI PIXEL FORMATS */
++#define RGB_565_FMT 0x01 /* RGB 565 FORMAT */
++#define RGB_666_FMT 0x02 /* RGB 666 FORMAT */
++#define LRGB_666_FMT 0x03 /* RGB LOOSELY PACKED
++ * 666 FORMAT
++ */
++#define RGB_888_FMT 0x04 /* RGB 888 FORMAT */
++#define NON_BURST_MODE_SYNC_PULSE 0x01 /* Non Burst Mode
++ * with Sync Pulse
++ */
++#define NON_BURST_MODE_SYNC_EVENTS 0x02 /* Non Burst Mode
++ * with Sync events
++ */
++#define BURST_MODE 0x03 /* Burst Mode */
++#define VIRTUAL_CHANNEL_NUMBER_0 0x00 /* Virtual channel 0 */
++#define VIRTUAL_CHANNEL_NUMBER_1 0x01 /* Virtual channel 1 */
++#define VIRTUAL_CHANNEL_NUMBER_2 0x02 /* Virtual channel 2 */
++#define VIRTUAL_CHANNEL_NUMBER_3 0x03 /* Virtual channel 3 */
++#define DBI_NOT_SUPPORTED 0x00 /* command mode
++ * is not supported
++ */
++#define DBI_DATA_WIDTH_16BIT 0x01 /* 16 bit data */
++#define DBI_DATA_WIDTH_9BIT 0x02 /* 9 bit data */
++#define DBI_DATA_WIDTH_8BIT 0x03 /* 8 bit data */
++#define DBI_COMMAND_BUFFER_SIZE 0x120 /* Allocate at least
++ * 0x100 Byte with 32
++ * byte alignment
++ */
++#define DBI_DATA_BUFFER_SIZE 0x120 /* Allocate at least
++ * 0x100 Byte with 32
++ * byte alignment
++ */
++#define ALIGNMENT_32BYTE_MASK (~(BIT0|BIT1|BIT2|BIT3|BIT4))
++#define SKU_83 0x01
++#define SKU_100 0x02
++#define SKU_100L 0x04
++#define SKU_BYPASS 0x08
++#if 0
++/* ************************************************************************* *\
++DSI command data structure
++\* ************************************************************************* */
++union DSI_LONG_PACKET_HEADER {
++ u32 DSI_longPacketHeader;
++ struct {
++ u8 dataID;
++ u16 wordCount;
++ u8 ECC;
++ };
++#if 0 /*FIXME JLIU7 */
++ struct {
++ u8 DT:6;
++ u8 VC:2;
++ };
++#endif /*FIXME JLIU7 */
++};
++
++union MIPI_ADPT_CMD_LNG_REG {
++ u32 commnadLengthReg;
++ struct {
++ u8 command0;
++ u8 command1;
++ u8 command2;
++ u8 command3;
++ };
++};
++
++struct SET_COLUMN_ADDRESS_DATA {
++ u8 command;
++ u16 SC; /* Start Column */
++ u16 EC; /* End Column */
++};
++
++struct SET_PAGE_ADDRESS_DATA {
++ u8 command;
++ u16 SP; /* Start Page */
++ u16 EP; /* End Page */
++};
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_sdvo.c b/drivers/gpu/drm/mrst/drv/psb_intel_sdvo.c
+new file mode 100644
+index 0000000..87696ed
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_sdvo.c
+@@ -0,0 +1,1408 @@
++/*
++ * Copyright (c) 2006-2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++
++#include <linux/i2c.h>
++#include <linux/delay.h>
++/* #include <drm/drm_crtc.h> */
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_sdvo_regs.h"
++
++struct psb_intel_sdvo_priv {
++ struct psb_intel_i2c_chan *i2c_bus;
++ int slaveaddr;
++ int output_device;
++
++ u16 active_outputs;
++
++ struct psb_intel_sdvo_caps caps;
++ int pixel_clock_min, pixel_clock_max;
++
++ int save_sdvo_mult;
++ u16 save_active_outputs;
++ struct psb_intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
++ struct psb_intel_sdvo_dtd save_output_dtd[16];
++ u32 save_SDVOX;
++ u8 in_out_map[4];
++
++ u8 by_input_wiring;
++ u32 active_device;
++};
++
++/**
++ * Writes the SDVOB or SDVOC with the given value, but always writes both
++ * SDVOB and SDVOC to work around apparent hardware issues (according to
++ * comments in the BIOS).
++ */
++void psb_intel_sdvo_write_sdvox(struct psb_intel_output *psb_intel_output,
++ u32 val)
++{
++ struct drm_device *dev = psb_intel_output->base.dev;
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ u32 bval = val, cval = val;
++ int i;
++
++ if (sdvo_priv->output_device == SDVOB)
++ cval = REG_READ(SDVOC);
++ else
++ bval = REG_READ(SDVOB);
++ /*
++ * Write the registers twice for luck. Sometimes,
++ * writing them only once doesn't appear to 'stick'.
++ * The BIOS does this too. Yay, magic
++ */
++ for (i = 0; i < 2; i++) {
++ REG_WRITE(SDVOB, bval);
++ REG_READ(SDVOB);
++ REG_WRITE(SDVOC, cval);
++ REG_READ(SDVOC);
++ }
++}
++
++static bool psb_intel_sdvo_read_byte(
++ struct psb_intel_output *psb_intel_output,
++ u8 addr, u8 *ch)
++{
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ u8 out_buf[2];
++ u8 buf[2];
++ int ret;
++
++ struct i2c_msg msgs[] = {
++ {
++ .addr = sdvo_priv->i2c_bus->slave_addr,
++ .flags = 0,
++ .len = 1,
++ .buf = out_buf,
++ },
++ {
++ .addr = sdvo_priv->i2c_bus->slave_addr,
++ .flags = I2C_M_RD,
++ .len = 1,
++ .buf = buf,
++ }
++ };
++
++ out_buf[0] = addr;
++ out_buf[1] = 0;
++
++ ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2);
++ if (ret == 2) {
++ /* DRM_DEBUG("got back from addr %02X = %02x\n",
++ * out_buf[0], buf[0]);
++ */
++ *ch = buf[0];
++ return true;
++ }
++
++ DRM_DEBUG("i2c transfer returned %d\n", ret);
++ return false;
++}
++
++static bool psb_intel_sdvo_write_byte(
++ struct psb_intel_output *psb_intel_output,
++ int addr, u8 ch)
++{
++ u8 out_buf[2];
++ struct i2c_msg msgs[] = {
++ {
++ .addr = psb_intel_output->i2c_bus->slave_addr,
++ .flags = 0,
++ .len = 2,
++ .buf = out_buf,
++ }
++ };
++
++ out_buf[0] = addr;
++ out_buf[1] = ch;
++
++ if (i2c_transfer(&psb_intel_output->i2c_bus->adapter, msgs, 1) == 1)
++ return true;
++ return false;
++}
++
++#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
++/** Mapping of command numbers to names, for debug output */
++const static struct _sdvo_cmd_name {
++ u8 cmd;
++ char *name;
++} sdvo_cmd_names[] = {
++SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
++ SDVO_CMD_NAME_ENTRY
++ (SDVO_CMD_SET_TV_RESOLUTION_SUPPORT),
++ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),};
++
++#define SDVO_NAME(dev_priv) \
++ ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
++#define SDVO_PRIV(output) ((struct psb_intel_sdvo_priv *) (output)->dev_priv)
++
++static void psb_intel_sdvo_write_cmd(struct psb_intel_output *psb_intel_output,
++ u8 cmd,
++ void *args,
++ int args_len)
++{
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ int i;
++
++ if (1) {
++ DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
++ for (i = 0; i < args_len; i++)
++ printk(KERN_INFO"%02X ", ((u8 *) args)[i]);
++ for (; i < 8; i++)
++ printk(" ");
++ for (i = 0;
++ i <
++ sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]);
++ i++) {
++ if (cmd == sdvo_cmd_names[i].cmd) {
++ printk("(%s)", sdvo_cmd_names[i].name);
++ break;
++ }
++ }
++ if (i ==
++ sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]))
++ printk("(%02X)", cmd);
++ printk("\n");
++ }
++
++ for (i = 0; i < args_len; i++) {
++ psb_intel_sdvo_write_byte(psb_intel_output,
++ SDVO_I2C_ARG_0 - i,
++ ((u8 *) args)[i]);
++ }
++
++ psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_OPCODE, cmd);
++}
++
++static const char *cmd_status_names[] = {
++ "Power on",
++ "Success",
++ "Not supported",
++ "Invalid arg",
++ "Pending",
++ "Target not specified",
++ "Scaling not supported"
++};
++
++static u8 psb_intel_sdvo_read_response(
++ struct psb_intel_output *psb_intel_output,
++ void *response, int response_len)
++{
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ int i;
++ u8 status;
++ u8 retry = 50;
++
++ while (retry--) {
++ /* Read the command response */
++ for (i = 0; i < response_len; i++) {
++ psb_intel_sdvo_read_byte(psb_intel_output,
++ SDVO_I2C_RETURN_0 + i,
++ &((u8 *) response)[i]);
++ }
++
++ /* read the return status */
++ psb_intel_sdvo_read_byte(psb_intel_output,
++ SDVO_I2C_CMD_STATUS,
++ &status);
++
++ if (1) {
++ DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv));
++ for (i = 0; i < response_len; i++)
++ printk(KERN_INFO"%02X ", ((u8 *) response)[i]);
++ for (; i < 8; i++)
++ printk(" ");
++ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
++ printk(KERN_INFO"(%s)",
++ cmd_status_names[status]);
++ else
++ printk(KERN_INFO"(??? %d)", status);
++ printk("\n");
++ }
++
++ if (status != SDVO_CMD_STATUS_PENDING)
++ return status;
++
++ mdelay(50);
++ }
++
++ return status;
++}
++
++int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
++{
++ if (mode->clock >= 100000)
++ return 1;
++ else if (mode->clock >= 50000)
++ return 2;
++ else
++ return 4;
++}
++
++/**
++ * Don't check status code from this as it switches the bus back to the
++ * SDVO chips which defeats the purpose of doing a bus switch in the first
++ * place.
++ */
++void psb_intel_sdvo_set_control_bus_switch(
++ struct psb_intel_output *psb_intel_output,
++ u8 target)
++{
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_CONTROL_BUS_SWITCH,
++ &target,
++ 1);
++}
++
++static bool psb_intel_sdvo_set_target_input(
++ struct psb_intel_output *psb_intel_output,
++ bool target_0, bool target_1)
++{
++ struct psb_intel_sdvo_set_target_input_args targets = { 0 };
++ u8 status;
++
++ if (target_0 && target_1)
++ return SDVO_CMD_STATUS_NOTSUPP;
++
++ if (target_1)
++ targets.target_1 = 1;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_INPUT,
++ &targets, sizeof(targets));
++
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++
++ return status == SDVO_CMD_STATUS_SUCCESS;
++}
++
++/**
++ * Return whether each input is trained.
++ *
++ * This function is making an assumption about the layout of the response,
++ * which should be checked against the docs.
++ */
++static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_output
++ *psb_intel_output, bool *input_1,
++ bool *input_2)
++{
++ struct psb_intel_sdvo_get_trained_inputs_response response;
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_TRAINED_INPUTS,
++ NULL, 0);
++ status =
++ psb_intel_sdvo_read_response(psb_intel_output, &response,
++ sizeof(response));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ *input_1 = response.input0_trained;
++ *input_2 = response.input1_trained;
++ return true;
++}
++
++static bool psb_intel_sdvo_get_active_outputs(struct psb_intel_output
++ *psb_intel_output, u16 *outputs)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS,
++ NULL, 0);
++ status =
++ psb_intel_sdvo_read_response(psb_intel_output, outputs,
++ sizeof(*outputs));
++
++ return status == SDVO_CMD_STATUS_SUCCESS;
++}
++
++static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_output
++ *psb_intel_output, u16 outputs)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS,
++ &outputs, sizeof(outputs));
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++ return status == SDVO_CMD_STATUS_SUCCESS;
++}
++
++static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_output
++ *psb_intel_output, int mode)
++{
++ u8 status, state = SDVO_ENCODER_STATE_ON;
++
++ switch (mode) {
++ case DRM_MODE_DPMS_ON:
++ state = SDVO_ENCODER_STATE_ON;
++ break;
++ case DRM_MODE_DPMS_STANDBY:
++ state = SDVO_ENCODER_STATE_STANDBY;
++ break;
++ case DRM_MODE_DPMS_SUSPEND:
++ state = SDVO_ENCODER_STATE_SUSPEND;
++ break;
++ case DRM_MODE_DPMS_OFF:
++ state = SDVO_ENCODER_STATE_OFF;
++ break;
++ }
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
++ sizeof(state));
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++
++ return status == SDVO_CMD_STATUS_SUCCESS;
++}
++
++static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_output
++ *psb_intel_output,
++ int *clock_min,
++ int *clock_max)
++{
++ struct psb_intel_sdvo_pixel_clock_range clocks;
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, NULL,
++ 0);
++
++ status =
++ psb_intel_sdvo_read_response(psb_intel_output, &clocks,
++ sizeof(clocks));
++
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ /* Convert the values from units of 10 kHz to kHz. */
++ *clock_min = clocks.min * 10;
++ *clock_max = clocks.max * 10;
++
++ return true;
++}
++
++static bool psb_intel_sdvo_set_target_output(
++ struct psb_intel_output *psb_intel_output,
++ u16 outputs)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_OUTPUT,
++ &outputs, sizeof(outputs));
++
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++ return status == SDVO_CMD_STATUS_SUCCESS;
++}
++
++static bool psb_intel_sdvo_get_timing(struct psb_intel_output *psb_intel_output,
++ u8 cmd, struct psb_intel_sdvo_dtd *dtd)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, cmd, NULL, 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1,
++ sizeof(dtd->part1));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, NULL, 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2,
++ sizeof(dtd->part2));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ return true;
++}
++
++static bool psb_intel_sdvo_get_input_timing(
++ struct psb_intel_output *psb_intel_output,
++ struct psb_intel_sdvo_dtd *dtd)
++{
++ return psb_intel_sdvo_get_timing(psb_intel_output,
++ SDVO_CMD_GET_INPUT_TIMINGS_PART1,
++ dtd);
++}
++#if 0
++static bool psb_intel_sdvo_get_output_timing(
++ struct psb_intel_output *psb_intel_output,
++ struct psb_intel_sdvo_dtd *dtd)
++{
++ return psb_intel_sdvo_get_timing(psb_intel_output,
++ SDVO_CMD_GET_OUTPUT_TIMINGS_PART1,
++ dtd);
++}
++#endif
++static bool psb_intel_sdvo_set_timing(
++ struct psb_intel_output *psb_intel_output,
++ u8 cmd,
++ struct psb_intel_sdvo_dtd *dtd)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, cmd, &dtd->part1,
++ sizeof(dtd->part1));
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, &dtd->part2,
++ sizeof(dtd->part2));
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ return true;
++}
++
++static bool psb_intel_sdvo_set_input_timing(
++ struct psb_intel_output *psb_intel_output,
++ struct psb_intel_sdvo_dtd *dtd)
++{
++ return psb_intel_sdvo_set_timing(psb_intel_output,
++ SDVO_CMD_SET_INPUT_TIMINGS_PART1,
++ dtd);
++}
++
++static bool psb_intel_sdvo_set_output_timing(
++ struct psb_intel_output *psb_intel_output,
++ struct psb_intel_sdvo_dtd *dtd)
++{
++ return psb_intel_sdvo_set_timing(psb_intel_output,
++ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1,
++ dtd);
++}
++
++#if 0
++static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_output
++ *psb_intel_output,
++ struct psb_intel_sdvo_dtd
++ *dtd)
++{
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
++ NULL, 0);
++
++ status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1,
++ sizeof(dtd->part1));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
++ NULL, 0);
++ status =
++ psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2,
++ sizeof(dtd->part2));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ return true;
++}
++#endif
++
++static int psb_intel_sdvo_get_clock_rate_mult(struct psb_intel_output
++ *psb_intel_output)
++{
++ u8 response, status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_CLOCK_RATE_MULT,
++ NULL,
++ 0);
++
++ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 1);
++
++ if (status != SDVO_CMD_STATUS_SUCCESS) {
++ DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
++ return SDVO_CLOCK_RATE_MULT_1X;
++ } else {
++ DRM_DEBUG("Current clock rate multiplier: %d\n", response);
++ }
++
++ return response;
++}
++
++static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_output
++ *psb_intel_output, u8 val)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_CLOCK_RATE_MULT,
++ &val,
++ 1);
++
++ status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ return true;
++}
++
++static bool psb_sdvo_set_current_inoutmap(struct psb_intel_output *output,
++ u32 in0outputmask,
++ u32 in1outputmask)
++{
++ u8 byArgs[4];
++ u8 status;
++ int i;
++ struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv;
++
++ /* Make all fields of the args/ret to zero */
++ memset(byArgs, 0, sizeof(byArgs));
++
++ /* Fill up the arguement values; */
++ byArgs[0] = (u8) (in0outputmask & 0xFF);
++ byArgs[1] = (u8) ((in0outputmask >> 8) & 0xFF);
++ byArgs[2] = (u8) (in1outputmask & 0xFF);
++ byArgs[3] = (u8) ((in1outputmask >> 8) & 0xFF);
++
++
++ /*save inoutmap arg here*/
++ for (i = 0; i < 4; i++)
++ sdvo_priv->in_out_map[i] = byArgs[0];
++
++ psb_intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, byArgs, 4);
++ status = psb_intel_sdvo_read_response(output, NULL, 0);
++
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++ return true;
++}
++
++
++static void psb_intel_sdvo_set_iomap(struct psb_intel_output *output)
++{
++ u32 dwCurrentSDVOIn0 = 0;
++ u32 dwCurrentSDVOIn1 = 0;
++ u32 dwDevMask = 0;
++
++
++ struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv;
++
++ /* Please DO NOT change the following code. */
++ /* SDVOB_IN0 or SDVOB_IN1 ==> sdvo_in0 */
++ /* SDVOC_IN0 or SDVOC_IN1 ==> sdvo_in1 */
++ if (sdvo_priv->by_input_wiring & (SDVOB_IN0 | SDVOC_IN0)) {
++ switch (sdvo_priv->active_device) {
++ case SDVO_DEVICE_LVDS:
++ dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
++ break;
++ case SDVO_DEVICE_TMDS:
++ dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
++ break;
++ case SDVO_DEVICE_TV:
++ dwDevMask =
++ SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 |
++ SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB1 |
++ SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
++ SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
++ break;
++ case SDVO_DEVICE_CRT:
++ dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
++ break;
++ }
++ dwCurrentSDVOIn0 = (sdvo_priv->active_outputs & dwDevMask);
++ } else if (sdvo_priv->by_input_wiring & (SDVOB_IN1 | SDVOC_IN1)) {
++ switch (sdvo_priv->active_device) {
++ case SDVO_DEVICE_LVDS:
++ dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
++ break;
++ case SDVO_DEVICE_TMDS:
++ dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
++ break;
++ case SDVO_DEVICE_TV:
++ dwDevMask =
++ SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 |
++ SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB1 |
++ SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
++ SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
++ break;
++ case SDVO_DEVICE_CRT:
++ dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
++ break;
++ }
++ dwCurrentSDVOIn1 = (sdvo_priv->active_outputs & dwDevMask);
++ }
++
++ psb_sdvo_set_current_inoutmap(output, dwCurrentSDVOIn0,
++ dwCurrentSDVOIn1);
++}
++
++
++static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO
++ * device will be told of the multiplier during mode_set.
++ */
++ adjusted_mode->clock *= psb_intel_sdvo_get_pixel_multiplier(mode);
++ return true;
++}
++
++static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct drm_crtc *crtc = encoder->crtc;
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ struct psb_intel_output *psb_intel_output =
++ enc_to_psb_intel_output(encoder);
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ u16 width, height;
++ u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
++ u16 h_sync_offset, v_sync_offset;
++ u32 sdvox;
++ struct psb_intel_sdvo_dtd output_dtd;
++ int sdvo_pixel_multiply;
++
++ if (!mode)
++ return;
++
++ psb_intel_sdvo_set_target_output(psb_intel_output, 0);
++
++ width = mode->crtc_hdisplay;
++ height = mode->crtc_vdisplay;
++
++ /* do some mode translations */
++ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
++ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
++
++ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
++ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
++
++ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
++ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
++
++ output_dtd.part1.clock = mode->clock / 10;
++ output_dtd.part1.h_active = width & 0xff;
++ output_dtd.part1.h_blank = h_blank_len & 0xff;
++ output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) |
++ ((h_blank_len >> 8) & 0xf);
++ output_dtd.part1.v_active = height & 0xff;
++ output_dtd.part1.v_blank = v_blank_len & 0xff;
++ output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) |
++ ((v_blank_len >> 8) & 0xf);
++
++ output_dtd.part2.h_sync_off = h_sync_offset;
++ output_dtd.part2.h_sync_width = h_sync_len & 0xff;
++ output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
++ (v_sync_len & 0xf);
++ output_dtd.part2.sync_off_width_high =
++ ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) |
++ ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4);
++
++ output_dtd.part2.dtd_flags = 0x18;
++ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
++ output_dtd.part2.dtd_flags |= 0x2;
++ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
++ output_dtd.part2.dtd_flags |= 0x4;
++
++ output_dtd.part2.sdvo_flags = 0;
++ output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0;
++ output_dtd.part2.reserved = 0;
++
++ /* Set the output timing to the screen */
++ psb_intel_sdvo_set_target_output(psb_intel_output,
++ sdvo_priv->active_outputs);
++
++ /* Set the input timing to the screen. Assume always input 0. */
++ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
++
++ psb_intel_sdvo_set_output_timing(psb_intel_output, &output_dtd);
++
++ /* We would like to use i830_sdvo_create_preferred_input_timing() to
++ * provide the device with a timing it can support, if it supports that
++ * feature. However, presumably we would need to adjust the CRTC to
++ * output the preferred timing, and we don't support that currently.
++ */
++#if 0
++ success =
++ psb_intel_sdvo_create_preferred_input_timing(psb_intel_output,
++ clock,
++ width,
++ height);
++ if (success) {
++ struct psb_intel_sdvo_dtd *input_dtd;
++
++ psb_intel_sdvo_get_preferred_input_timing(psb_intel_output,
++ &input_dtd);
++ psb_intel_sdvo_set_input_timing(psb_intel_output, &input_dtd);
++ }
++#else
++ psb_intel_sdvo_set_input_timing(psb_intel_output, &output_dtd);
++#endif
++
++ switch (psb_intel_sdvo_get_pixel_multiplier(mode)) {
++ case 1:
++ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
++ SDVO_CLOCK_RATE_MULT_1X);
++ break;
++ case 2:
++ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
++ SDVO_CLOCK_RATE_MULT_2X);
++ break;
++ case 4:
++ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
++ SDVO_CLOCK_RATE_MULT_4X);
++ break;
++ }
++
++ /* Set the SDVO control regs. */
++ if (0 /*IS_I965GM(dev) */) {
++ sdvox = SDVO_BORDER_ENABLE;
++ } else {
++ sdvox = REG_READ(sdvo_priv->output_device);
++ switch (sdvo_priv->output_device) {
++ case SDVOB:
++ sdvox &= SDVOB_PRESERVE_MASK;
++ break;
++ case SDVOC:
++ sdvox &= SDVOC_PRESERVE_MASK;
++ break;
++ }
++ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
++ }
++ if (psb_intel_crtc->pipe == 1)
++ sdvox |= SDVO_PIPE_B_SELECT;
++
++ sdvo_pixel_multiply = psb_intel_sdvo_get_pixel_multiplier(mode);
++
++#if 0
++ if (IS_I965G(dev)) {
++ /* done in crtc_mode_set as the dpll_md reg must be written
++ * early */
++ } else if (IS_I945G(dev) || IS_I945GM(dev)) {
++ /* done in crtc_mode_set as it lives inside the
++ * dpll register */
++ } else {
++ sdvox |=
++ (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
++ }
++#endif
++
++ psb_intel_sdvo_write_sdvox(psb_intel_output, sdvox);
++
++ psb_intel_sdvo_set_iomap(psb_intel_output);
++}
++
++static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
++{
++ struct drm_device *dev = encoder->dev;
++ struct psb_intel_output *psb_intel_output =
++ enc_to_psb_intel_output(encoder);
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ u32 temp;
++
++ if (mode != DRM_MODE_DPMS_ON) {
++ psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
++ if (0)
++ psb_intel_sdvo_set_encoder_power_state(
++ psb_intel_output,
++ mode);
++
++ if (mode == DRM_MODE_DPMS_OFF) {
++ temp = REG_READ(sdvo_priv->output_device);
++ if ((temp & SDVO_ENABLE) != 0) {
++ psb_intel_sdvo_write_sdvox(psb_intel_output,
++ temp &
++ ~SDVO_ENABLE);
++ }
++ }
++ } else {
++ bool input1, input2;
++ int i;
++ u8 status;
++
++ temp = REG_READ(sdvo_priv->output_device);
++ if ((temp & SDVO_ENABLE) == 0)
++ psb_intel_sdvo_write_sdvox(psb_intel_output,
++ temp | SDVO_ENABLE);
++ for (i = 0; i < 2; i++)
++ psb_intel_wait_for_vblank(dev);
++
++ status =
++ psb_intel_sdvo_get_trained_inputs(psb_intel_output,
++ &input1,
++ &input2);
++
++
++ /* Warn if the device reported failure to sync.
++ * A lot of SDVO devices fail to notify of sync, but it's
++ * a given it the status is a success, we succeeded.
++ */
++ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
++ DRM_DEBUG
++ ("First %s output reported failure to sync\n",
++ SDVO_NAME(sdvo_priv));
++ }
++
++ if (0)
++ psb_intel_sdvo_set_encoder_power_state(
++ psb_intel_output,
++ mode);
++ psb_intel_sdvo_set_active_outputs(psb_intel_output,
++ sdvo_priv->active_outputs);
++ }
++ return;
++}
++
++static void psb_intel_sdvo_save(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ /*int o;*/
++
++ sdvo_priv->save_sdvo_mult =
++ psb_intel_sdvo_get_clock_rate_mult(psb_intel_output);
++ psb_intel_sdvo_get_active_outputs(psb_intel_output,
++ &sdvo_priv->save_active_outputs);
++
++ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
++ psb_intel_sdvo_set_target_input(psb_intel_output,
++ true,
++ false);
++ psb_intel_sdvo_get_input_timing(psb_intel_output,
++ &sdvo_priv->save_input_dtd_1);
++ }
++
++ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
++ psb_intel_sdvo_set_target_input(psb_intel_output,
++ false,
++ true);
++ psb_intel_sdvo_get_input_timing(psb_intel_output,
++ &sdvo_priv->save_input_dtd_2);
++ }
++
++#if 0
++ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) {
++ u16 this_output = (1 << o);
++ if (sdvo_priv->caps.output_flags & this_output) {
++ psb_intel_sdvo_set_target_output(psb_intel_output,
++ this_output);
++ psb_intel_sdvo_get_output_timing(psb_intel_output,
++ &sdvo_priv->
++ save_output_dtd[o]);
++ }
++ }
++#endif
++
++ sdvo_priv->save_SDVOX = REG_READ(sdvo_priv->output_device);
++
++ /*TODO: save the in_out_map state*/
++}
++
++static void psb_intel_sdvo_restore(struct drm_connector *connector)
++{
++ struct drm_device *dev = connector->dev;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++ /*int o;*/
++ int i;
++ bool input1, input2;
++ u8 status;
++
++ psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
++
++#if 0
++ for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) {
++ u16 this_output = (1 << o);
++ if (sdvo_priv->caps.output_flags & this_output) {
++ psb_intel_sdvo_set_target_output(psb_intel_output,
++ this_output);
++ psb_intel_sdvo_set_output_timing(psb_intel_output,
++ &sdvo_priv->
++ save_output_dtd[o]);
++ }
++ }
++#endif
++
++ if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
++ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
++ psb_intel_sdvo_set_input_timing(psb_intel_output,
++ &sdvo_priv->save_input_dtd_1);
++ }
++
++ if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
++ psb_intel_sdvo_set_target_input(psb_intel_output, false, true);
++ psb_intel_sdvo_set_input_timing(psb_intel_output,
++ &sdvo_priv->save_input_dtd_2);
++ }
++
++ psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
++ sdvo_priv->save_sdvo_mult);
++
++ REG_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX);
++
++ if (sdvo_priv->save_SDVOX & SDVO_ENABLE) {
++ for (i = 0; i < 2; i++)
++ psb_intel_wait_for_vblank(dev);
++ status =
++ psb_intel_sdvo_get_trained_inputs(psb_intel_output,
++ &input1,
++ &input2);
++ if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
++ DRM_DEBUG
++ ("First %s output reported failure to sync\n",
++ SDVO_NAME(sdvo_priv));
++ }
++
++ psb_intel_sdvo_set_active_outputs(psb_intel_output,
++ sdvo_priv->save_active_outputs);
++
++ /*TODO: restore in_out_map*/
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_IN_OUT_MAP,
++ sdvo_priv->in_out_map,
++ 4);
++
++ psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
++}
++
++static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
++ struct drm_display_mode *mode)
++{
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++ struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
++
++ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++ return MODE_NO_DBLESCAN;
++
++ if (sdvo_priv->pixel_clock_min > mode->clock)
++ return MODE_CLOCK_LOW;
++
++ if (sdvo_priv->pixel_clock_max < mode->clock)
++ return MODE_CLOCK_HIGH;
++
++ return MODE_OK;
++}
++
++static bool psb_intel_sdvo_get_capabilities(
++ struct psb_intel_output *psb_intel_output,
++ struct psb_intel_sdvo_caps *caps)
++{
++ u8 status;
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_DEVICE_CAPS,
++ NULL,
++ 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output,
++ caps,
++ sizeof(*caps));
++ if (status != SDVO_CMD_STATUS_SUCCESS)
++ return false;
++
++ return true;
++}
++
++struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
++{
++ struct drm_connector *connector = NULL;
++ struct psb_intel_output *iout = NULL;
++ struct psb_intel_sdvo_priv *sdvo;
++
++ /* find the sdvo connector */
++ list_for_each_entry(connector, &dev->mode_config.connector_list,
++ head) {
++ iout = to_psb_intel_output(connector);
++
++ if (iout->type != INTEL_OUTPUT_SDVO)
++ continue;
++
++ sdvo = iout->dev_priv;
++
++ if (sdvo->output_device == SDVOB && sdvoB)
++ return connector;
++
++ if (sdvo->output_device == SDVOC && !sdvoB)
++ return connector;
++
++ }
++
++ return NULL;
++}
++
++int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
++{
++ u8 response[2];
++ u8 status;
++ struct psb_intel_output *psb_intel_output;
++ DRM_DEBUG("\n");
++
++ if (!connector)
++ return 0;
++
++ psb_intel_output = to_psb_intel_output(connector);
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_HOT_PLUG_SUPPORT,
++ NULL,
++ 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output,
++ &response,
++ 2);
++
++ if (response[0] != 0)
++ return 1;
++
++ return 0;
++}
++
++void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
++{
++ u8 response[2];
++ u8 status;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_ACTIVE_HOT_PLUG,
++ NULL,
++ 0);
++ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
++
++ if (on) {
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL,
++ 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output,
++ &response,
++ 2);
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_ACTIVE_HOT_PLUG,
++ &response, 2);
++ } else {
++ response[0] = 0;
++ response[1] = 0;
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_SET_ACTIVE_HOT_PLUG,
++ &response, 2);
++ }
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_ACTIVE_HOT_PLUG,
++ NULL,
++ 0);
++ psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
++}
++
++static enum drm_connector_status psb_intel_sdvo_detect(struct drm_connector
++ *connector)
++{
++ u8 response[2];
++ u8 status;
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ psb_intel_sdvo_write_cmd(psb_intel_output,
++ SDVO_CMD_GET_ATTACHED_DISPLAYS,
++ NULL,
++ 0);
++ status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
++
++ DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]);
++ if ((response[0] != 0) || (response[1] != 0))
++ return connector_status_connected;
++ else
++ return connector_status_disconnected;
++}
++
++static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
++{
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ /* set the bus switch and get the modes */
++ psb_intel_sdvo_set_control_bus_switch(psb_intel_output,
++ SDVO_CONTROL_BUS_DDC2);
++ psb_intel_ddc_get_modes(psb_intel_output);
++
++ if (list_empty(&connector->probed_modes))
++ return 0;
++ return 1;
++#if 0
++ /* Mac mini hack. On this device, I get DDC through the analog, which
++ * load-detects as disconnected. I fail to DDC through the SDVO DDC,
++ * but it does load-detect as connected. So, just steal the DDC bits
++ * from analog when we fail at finding it the right way.
++ */
++ /* TODO */
++ return NULL;
++
++ return NULL;
++#endif
++}
++
++static void psb_intel_sdvo_destroy(struct drm_connector *connector)
++{
++ struct psb_intel_output *psb_intel_output =
++ to_psb_intel_output(connector);
++
++ if (psb_intel_output->i2c_bus)
++ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
++ drm_sysfs_connector_remove(connector);
++ drm_connector_cleanup(connector);
++ kfree(psb_intel_output);
++}
++
++static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
++ .dpms = psb_intel_sdvo_dpms,
++ .mode_fixup = psb_intel_sdvo_mode_fixup,
++ .prepare = psb_intel_encoder_prepare,
++ .mode_set = psb_intel_sdvo_mode_set,
++ .commit = psb_intel_encoder_commit,
++};
++
++static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .save = psb_intel_sdvo_save,
++ .restore = psb_intel_sdvo_restore,
++ .detect = psb_intel_sdvo_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .destroy = psb_intel_sdvo_destroy,
++};
++
++static const struct drm_connector_helper_funcs
++ psb_intel_sdvo_connector_helper_funcs = {
++ .get_modes = psb_intel_sdvo_get_modes,
++ .mode_valid = psb_intel_sdvo_mode_valid,
++ .best_encoder = psb_intel_best_encoder,
++};
++
++void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
++{
++ drm_encoder_cleanup(encoder);
++}
++
++static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
++ .destroy = psb_intel_sdvo_enc_destroy,
++};
++
++
++void psb_intel_sdvo_init(struct drm_device *dev, int output_device)
++{
++ struct drm_connector *connector;
++ struct psb_intel_output *psb_intel_output;
++ struct psb_intel_sdvo_priv *sdvo_priv;
++ struct psb_intel_i2c_chan *i2cbus = NULL;
++ int connector_type;
++ u8 ch[0x40];
++ int i;
++ int encoder_type, output_id;
++
++ psb_intel_output =
++ kcalloc(sizeof(struct psb_intel_output) +
++ sizeof(struct psb_intel_sdvo_priv), 1, GFP_KERNEL);
++ if (!psb_intel_output)
++ return;
++
++ connector = &psb_intel_output->base;
++
++ drm_connector_init(dev, connector, &psb_intel_sdvo_connector_funcs,
++ DRM_MODE_CONNECTOR_Unknown);
++ drm_connector_helper_add(connector,
++ &psb_intel_sdvo_connector_helper_funcs);
++ sdvo_priv = (struct psb_intel_sdvo_priv *) (psb_intel_output + 1);
++ psb_intel_output->type = INTEL_OUTPUT_SDVO;
++
++ connector->interlace_allowed = 0;
++ connector->doublescan_allowed = 0;
++
++ /* setup the DDC bus. */
++ if (output_device == SDVOB)
++ i2cbus =
++ psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
++ else
++ i2cbus =
++ psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
++
++ if (!i2cbus)
++ goto err_connector;
++
++ sdvo_priv->i2c_bus = i2cbus;
++
++ if (output_device == SDVOB) {
++ output_id = 1;
++ sdvo_priv->by_input_wiring = SDVOB_IN0;
++ sdvo_priv->i2c_bus->slave_addr = 0x38;
++ } else {
++ output_id = 2;
++ sdvo_priv->i2c_bus->slave_addr = 0x39;
++ }
++
++ sdvo_priv->output_device = output_device;
++ psb_intel_output->i2c_bus = i2cbus;
++ psb_intel_output->dev_priv = sdvo_priv;
++
++
++ /* Read the regs to test if we can talk to the device */
++ for (i = 0; i < 0x40; i++) {
++ if (!psb_intel_sdvo_read_byte(psb_intel_output, i, &ch[i])) {
++ DRM_DEBUG("No SDVO device found on SDVO%c\n",
++ output_device == SDVOB ? 'B' : 'C');
++ goto err_i2c;
++ }
++ }
++
++ psb_intel_sdvo_get_capabilities(psb_intel_output, &sdvo_priv->caps);
++
++ memset(&sdvo_priv->active_outputs, 0,
++ sizeof(sdvo_priv->active_outputs));
++
++ /* TODO, CVBS, SVID, YPRPB & SCART outputs. */
++ if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) {
++ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
++ sdvo_priv->active_device = SDVO_DEVICE_CRT;
++ connector->display_info.subpixel_order =
++ SubPixelHorizontalRGB;
++ encoder_type = DRM_MODE_ENCODER_DAC;
++ connector_type = DRM_MODE_CONNECTOR_VGA;
++ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) {
++ sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
++ sdvo_priv->active_outputs = SDVO_DEVICE_CRT;
++ connector->display_info.subpixel_order =
++ SubPixelHorizontalRGB;
++ encoder_type = DRM_MODE_ENCODER_DAC;
++ connector_type = DRM_MODE_CONNECTOR_VGA;
++ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) {
++ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
++ sdvo_priv->active_device = SDVO_DEVICE_TMDS;
++ connector->display_info.subpixel_order =
++ SubPixelHorizontalRGB;
++ encoder_type = DRM_MODE_ENCODER_TMDS;
++ connector_type = DRM_MODE_CONNECTOR_DVID;
++ } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) {
++ sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
++ sdvo_priv->active_device = SDVO_DEVICE_TMDS;
++ connector->display_info.subpixel_order =
++ SubPixelHorizontalRGB;
++ encoder_type = DRM_MODE_ENCODER_TMDS;
++ connector_type = DRM_MODE_CONNECTOR_DVID;
++ } else {
++ unsigned char bytes[2];
++
++ memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
++ DRM_DEBUG
++ ("%s: No active RGB or TMDS outputs (0x%02x%02x)\n",
++ SDVO_NAME(sdvo_priv), bytes[0], bytes[1]);
++ goto err_i2c;
++ }
++
++ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_sdvo_enc_funcs,
++ encoder_type);
++ drm_encoder_helper_add(&psb_intel_output->enc,
++ &psb_intel_sdvo_helper_funcs);
++ connector->connector_type = connector_type;
++
++ drm_mode_connector_attach_encoder(&psb_intel_output->base,
++ &psb_intel_output->enc);
++ drm_sysfs_connector_add(connector);
++
++ /* Set the input timing to the screen. Assume always input 0. */
++ psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
++
++ psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_output,
++ &sdvo_priv->pixel_clock_min,
++ &sdvo_priv->
++ pixel_clock_max);
++
++
++ DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, "
++ "clock range %dMHz - %dMHz, "
++ "input 1: %c, input 2: %c, "
++ "output 1: %c, output 2: %c\n",
++ SDVO_NAME(sdvo_priv),
++ sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
++ sdvo_priv->caps.device_rev_id,
++ sdvo_priv->pixel_clock_min / 1000,
++ sdvo_priv->pixel_clock_max / 1000,
++ (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
++ (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
++ /* check currently supported outputs */
++ sdvo_priv->caps.output_flags &
++ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
++ sdvo_priv->caps.output_flags &
++ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
++
++ psb_intel_output->ddc_bus = i2cbus;
++
++ return;
++
++err_i2c:
++ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
++err_connector:
++ drm_connector_cleanup(connector);
++ kfree(psb_intel_output);
++
++ return;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_intel_sdvo_regs.h b/drivers/gpu/drm/mrst/drv/psb_intel_sdvo_regs.h
+new file mode 100644
+index 0000000..ed2f136
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_intel_sdvo_regs.h
+@@ -0,0 +1,338 @@
++/*
++ * SDVO command definitions and structures.
++ *
++ * Copyright (c) 2008, Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ */
++
++#define SDVO_OUTPUT_FIRST (0)
++#define SDVO_OUTPUT_TMDS0 (1 << 0)
++#define SDVO_OUTPUT_RGB0 (1 << 1)
++#define SDVO_OUTPUT_CVBS0 (1 << 2)
++#define SDVO_OUTPUT_SVID0 (1 << 3)
++#define SDVO_OUTPUT_YPRPB0 (1 << 4)
++#define SDVO_OUTPUT_SCART0 (1 << 5)
++#define SDVO_OUTPUT_LVDS0 (1 << 6)
++#define SDVO_OUTPUT_TMDS1 (1 << 8)
++#define SDVO_OUTPUT_RGB1 (1 << 9)
++#define SDVO_OUTPUT_CVBS1 (1 << 10)
++#define SDVO_OUTPUT_SVID1 (1 << 11)
++#define SDVO_OUTPUT_YPRPB1 (1 << 12)
++#define SDVO_OUTPUT_SCART1 (1 << 13)
++#define SDVO_OUTPUT_LVDS1 (1 << 14)
++#define SDVO_OUTPUT_LAST (14)
++
++struct psb_intel_sdvo_caps {
++ u8 vendor_id;
++ u8 device_id;
++ u8 device_rev_id;
++ u8 sdvo_version_major;
++ u8 sdvo_version_minor;
++ unsigned int sdvo_inputs_mask:2;
++ unsigned int smooth_scaling:1;
++ unsigned int sharp_scaling:1;
++ unsigned int up_scaling:1;
++ unsigned int down_scaling:1;
++ unsigned int stall_support:1;
++ unsigned int pad:1;
++ u16 output_flags;
++} __attribute__ ((packed));
++
++/** This matches the EDID DTD structure, more or less */
++struct psb_intel_sdvo_dtd {
++ struct {
++ u16 clock; /**< pixel clock, in 10kHz units */
++ u8 h_active; /**< lower 8 bits (pixels) */
++ u8 h_blank; /**< lower 8 bits (pixels) */
++ u8 h_high; /**< upper 4 bits each h_active, h_blank */
++ u8 v_active; /**< lower 8 bits (lines) */
++ u8 v_blank; /**< lower 8 bits (lines) */
++ u8 v_high; /**< upper 4 bits each v_active, v_blank */
++ } part1;
++
++ struct {
++ u8 h_sync_off;
++ /**< lower 8 bits, from hblank start */
++ u8 h_sync_width;/**< lower 8 bits (pixels) */
++ /** lower 4 bits each vsync offset, vsync width */
++ u8 v_sync_off_width;
++ /**
++ * 2 high bits of hsync offset, 2 high bits of hsync width,
++ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
++ */
++ u8 sync_off_width_high;
++ u8 dtd_flags;
++ u8 sdvo_flags;
++ /** bits 6-7 of vsync offset at bits 6-7 */
++ u8 v_sync_off_high;
++ u8 reserved;
++ } part2;
++} __attribute__ ((packed));
++
++struct psb_intel_sdvo_pixel_clock_range {
++ u16 min; /**< pixel clock, in 10kHz units */
++ u16 max; /**< pixel clock, in 10kHz units */
++} __attribute__ ((packed));
++
++struct psb_intel_sdvo_preferred_input_timing_args {
++ u16 clock;
++ u16 width;
++ u16 height;
++} __attribute__ ((packed));
++
++/* I2C registers for SDVO */
++#define SDVO_I2C_ARG_0 0x07
++#define SDVO_I2C_ARG_1 0x06
++#define SDVO_I2C_ARG_2 0x05
++#define SDVO_I2C_ARG_3 0x04
++#define SDVO_I2C_ARG_4 0x03
++#define SDVO_I2C_ARG_5 0x02
++#define SDVO_I2C_ARG_6 0x01
++#define SDVO_I2C_ARG_7 0x00
++#define SDVO_I2C_OPCODE 0x08
++#define SDVO_I2C_CMD_STATUS 0x09
++#define SDVO_I2C_RETURN_0 0x0a
++#define SDVO_I2C_RETURN_1 0x0b
++#define SDVO_I2C_RETURN_2 0x0c
++#define SDVO_I2C_RETURN_3 0x0d
++#define SDVO_I2C_RETURN_4 0x0e
++#define SDVO_I2C_RETURN_5 0x0f
++#define SDVO_I2C_RETURN_6 0x10
++#define SDVO_I2C_RETURN_7 0x11
++#define SDVO_I2C_VENDOR_BEGIN 0x20
++
++/* Status results */
++#define SDVO_CMD_STATUS_POWER_ON 0x0
++#define SDVO_CMD_STATUS_SUCCESS 0x1
++#define SDVO_CMD_STATUS_NOTSUPP 0x2
++#define SDVO_CMD_STATUS_INVALID_ARG 0x3
++#define SDVO_CMD_STATUS_PENDING 0x4
++#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
++#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
++
++/* SDVO commands, argument/result registers */
++
++#define SDVO_CMD_RESET 0x01
++
++/** Returns a struct psb_intel_sdvo_caps */
++#define SDVO_CMD_GET_DEVICE_CAPS 0x02
++
++#define SDVO_CMD_GET_FIRMWARE_REV 0x86
++# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
++# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
++# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
++
++/**
++ * Reports which inputs are trained (managed to sync).
++ *
++ * Devices must have trained within 2 vsyncs of a mode change.
++ */
++#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
++struct psb_intel_sdvo_get_trained_inputs_response {
++ unsigned int input0_trained:1;
++ unsigned int input1_trained:1;
++ unsigned int pad:6;
++} __attribute__ ((packed));
++
++/** Returns a struct psb_intel_sdvo_output_flags of active outputs. */
++#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
++
++/**
++ * Sets the current set of active outputs.
++ *
++ * Takes a struct psb_intel_sdvo_output_flags.
++ * Must be preceded by a SET_IN_OUT_MAP
++ * on multi-output devices.
++ */
++#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
++
++/**
++ * Returns the current mapping of SDVO inputs to outputs on the device.
++ *
++ * Returns two struct psb_intel_sdvo_output_flags structures.
++ */
++#define SDVO_CMD_GET_IN_OUT_MAP 0x06
++
++/**
++ * Sets the current mapping of SDVO inputs to outputs on the device.
++ *
++ * Takes two struct i380_sdvo_output_flags structures.
++ */
++#define SDVO_CMD_SET_IN_OUT_MAP 0x07
++
++/**
++ * Returns a struct psb_intel_sdvo_output_flags of attached displays.
++ */
++#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
++
++/**
++ * Returns a struct psb_intel_sdvo_ouptut_flags of displays supporting hot plugging.
++ */
++#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
++
++/**
++ * Takes a struct psb_intel_sdvo_output_flags.
++ */
++#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
++
++/**
++ * Returns a struct psb_intel_sdvo_output_flags of displays with hot plug
++ * interrupts enabled.
++ */
++#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
++
++#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
++struct psb_intel_sdvo_get_interrupt_event_source_response {
++ u16 interrupt_status;
++ unsigned int ambient_light_interrupt:1;
++ unsigned int pad:7;
++} __attribute__ ((packed));
++
++/**
++ * Selects which input is affected by future input commands.
++ *
++ * Commands affected include SET_INPUT_TIMINGS_PART[12],
++ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
++ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
++ */
++#define SDVO_CMD_SET_TARGET_INPUT 0x10
++struct psb_intel_sdvo_set_target_input_args {
++ unsigned int target_1:1;
++ unsigned int pad:7;
++} __attribute__ ((packed));
++
++/**
++ * Takes a struct psb_intel_sdvo_output_flags of which outputs are targetted by
++ * future output commands.
++ *
++ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
++ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
++ */
++#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
++
++#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
++#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
++#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
++#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
++#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
++#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
++#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
++#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
++/* Part 1 */
++# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
++# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
++# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
++# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
++# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
++# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
++# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
++# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
++/* Part 2 */
++# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
++# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
++# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
++# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
++# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
++# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
++# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
++# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
++# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
++# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
++# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
++# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
++# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
++# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
++# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
++# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
++# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
++# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
++
++/**
++ * Generates a DTD based on the given width, height, and flags.
++ *
++ * This will be supported by any device supporting scaling or interlaced
++ * modes.
++ */
++#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
++# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
++# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
++# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
++# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
++# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
++# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
++# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
++# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
++# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
++
++#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
++#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
++
++/** Returns a struct psb_intel_sdvo_pixel_clock_range */
++#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
++/** Returns a struct psb_intel_sdvo_pixel_clock_range */
++#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
++
++/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
++#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
++
++/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
++#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
++/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
++#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
++# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
++# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
++# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
++
++#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
++
++#define SDVO_CMD_GET_TV_FORMAT 0x28
++
++#define SDVO_CMD_SET_TV_FORMAT 0x29
++
++#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
++#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
++#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
++# define SDVO_ENCODER_STATE_ON (1 << 0)
++# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
++# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
++# define SDVO_ENCODER_STATE_OFF (1 << 3)
++
++#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93
++
++#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
++# define SDVO_CONTROL_BUS_PROM 0x0
++# define SDVO_CONTROL_BUS_DDC1 0x1
++# define SDVO_CONTROL_BUS_DDC2 0x2
++# define SDVO_CONTROL_BUS_DDC3 0x3
++
++/* SDVO Bus & SDVO Inputs wiring details*/
++/* Bit 0: Is SDVOB connected to In0 (1 = yes, 0 = no*/
++/* Bit 1: Is SDVOB connected to In1 (1 = yes, 0 = no*/
++/* Bit 2: Is SDVOC connected to In0 (1 = yes, 0 = no*/
++/* Bit 3: Is SDVOC connected to In1 (1 = yes, 0 = no*/
++#define SDVOB_IN0 0x01
++#define SDVOB_IN1 0x02
++#define SDVOC_IN0 0x04
++#define SDVOC_IN1 0x08
++
++#define SDVO_DEVICE_NONE 0x00
++#define SDVO_DEVICE_CRT 0x01
++#define SDVO_DEVICE_TV 0x02
++#define SDVO_DEVICE_LVDS 0x04
++#define SDVO_DEVICE_TMDS 0x08
++
+diff --git a/drivers/gpu/drm/mrst/drv/psb_mmu.c b/drivers/gpu/drm/mrst/drv/psb_mmu.c
+new file mode 100644
+index 0000000..cced0a8
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_mmu.c
+@@ -0,0 +1,1010 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_reg.h"
++
++/*
++ * Code for the SGX MMU:
++ */
++
++/*
++ * clflush on one processor only:
++ * clflush should apparently flush the cache line on all processors in an
++ * SMP system.
++ */
++
++/*
++ * kmap atomic:
++ * The usage of the slots must be completely encapsulated within a spinlock, and
++ * no other functions that may be using the locks for other purposed may be
++ * called from within the locked region.
++ * Since the slots are per processor, this will guarantee that we are the only
++ * user.
++ */
++
++/*
++ * TODO: Inserting ptes from an interrupt handler:
++ * This may be desirable for some SGX functionality where the GPU can fault in
++ * needed pages. For that, we need to make an atomic insert_pages function, that
++ * may fail.
++ * If it fails, the caller need to insert the page using a workqueue function,
++ * but on average it should be fast.
++ */
++
++struct psb_mmu_driver {
++ /* protects driver- and pd structures. Always take in read mode
++ * before taking the page table spinlock.
++ */
++ struct rw_semaphore sem;
++
++ /* protects page tables, directory tables and pt tables.
++ * and pt structures.
++ */
++ spinlock_t lock;
++
++ atomic_t needs_tlbflush;
++
++ uint8_t __iomem *register_map;
++ struct psb_mmu_pd *default_pd;
++ /*uint32_t bif_ctrl;*/
++ int has_clflush;
++ int clflush_add;
++ unsigned long clflush_mask;
++
++ struct drm_psb_private *dev_priv;
++};
++
++struct psb_mmu_pd;
++
++struct psb_mmu_pt {
++ struct psb_mmu_pd *pd;
++ uint32_t index;
++ uint32_t count;
++ struct page *p;
++ uint32_t *v;
++};
++
++struct psb_mmu_pd {
++ struct psb_mmu_driver *driver;
++ int hw_context;
++ struct psb_mmu_pt **tables;
++ struct page *p;
++ struct page *dummy_pt;
++ struct page *dummy_page;
++ uint32_t pd_mask;
++ uint32_t invalid_pde;
++ uint32_t invalid_pte;
++};
++
++static inline uint32_t psb_mmu_pt_index(uint32_t offset)
++{
++ return (offset >> PSB_PTE_SHIFT) & 0x3FF;
++}
++
++static inline uint32_t psb_mmu_pd_index(uint32_t offset)
++{
++ return offset >> PSB_PDE_SHIFT;
++}
++
++#if defined(CONFIG_X86)
++static inline void psb_clflush(void *addr)
++{
++ __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
++}
++
++static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
++ void *addr)
++{
++ if (!driver->has_clflush)
++ return;
++
++ mb();
++ psb_clflush(addr);
++ mb();
++}
++#else
++
++static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
++ void *addr)
++{;
++}
++
++#endif
++
++static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
++ int force)
++{
++ if (atomic_read(&driver->needs_tlbflush) || force) {
++ if (driver->dev_priv) {
++ atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1);
++ if (IS_MRST(driver->dev_priv->dev))
++ atomic_set( \
++ &driver->dev_priv->topaz_mmu_invaldc, 1);
++ }
++ }
++ atomic_set(&driver->needs_tlbflush, 0);
++}
++
++static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
++{
++ down_write(&driver->sem);
++ psb_mmu_flush_pd_locked(driver, force);
++ up_write(&driver->sem);
++}
++
++void psb_mmu_flush(struct psb_mmu_driver *driver)
++{
++ down_write(&driver->sem);
++ if (driver->dev_priv) {
++ atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1);
++ if (IS_MRST(driver->dev_priv->dev))
++ atomic_set(&driver->dev_priv->topaz_mmu_invaldc, 1);
++ }
++
++ up_write(&driver->sem);
++}
++
++void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
++{
++ ttm_tt_cache_flush(&pd->p, 1);
++ down_write(&pd->driver->sem);
++ wmb();
++ psb_mmu_flush_pd_locked(pd->driver, 1);
++ pd->hw_context = hw_context;
++ up_write(&pd->driver->sem);
++
++}
++
++static inline unsigned long psb_pd_addr_end(unsigned long addr,
++ unsigned long end)
++{
++
++ addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
++ return (addr < end) ? addr : end;
++}
++
++static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
++{
++ uint32_t mask = PSB_PTE_VALID;
++
++ if (type & PSB_MMU_CACHED_MEMORY)
++ mask |= PSB_PTE_CACHED;
++ if (type & PSB_MMU_RO_MEMORY)
++ mask |= PSB_PTE_RO;
++ if (type & PSB_MMU_WO_MEMORY)
++ mask |= PSB_PTE_WO;
++
++ return (pfn << PAGE_SHIFT) | mask;
++}
++
++struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
++ int trap_pagefaults, int invalid_type)
++{
++ struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
++ uint32_t *v;
++ int i;
++
++ if (!pd)
++ return NULL;
++
++ pd->p = alloc_page(GFP_DMA32);
++ if (!pd->p)
++ goto out_err1;
++ pd->dummy_pt = alloc_page(GFP_DMA32);
++ if (!pd->dummy_pt)
++ goto out_err2;
++ pd->dummy_page = alloc_page(GFP_DMA32);
++ if (!pd->dummy_page)
++ goto out_err3;
++
++ if (!trap_pagefaults) {
++ pd->invalid_pde =
++ psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
++ invalid_type);
++ pd->invalid_pte =
++ psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
++ invalid_type);
++ } else {
++ pd->invalid_pde = 0;
++ pd->invalid_pte = 0;
++ }
++
++ v = kmap(pd->dummy_pt);
++ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
++ v[i] = pd->invalid_pte;
++
++ kunmap(pd->dummy_pt);
++
++ v = kmap(pd->p);
++ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
++ v[i] = pd->invalid_pde;
++
++ kunmap(pd->p);
++
++ clear_page(kmap(pd->dummy_page));
++ kunmap(pd->dummy_page);
++
++ pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
++ if (!pd->tables)
++ goto out_err4;
++
++ pd->hw_context = -1;
++ pd->pd_mask = PSB_PTE_VALID;
++ pd->driver = driver;
++
++ return pd;
++
++out_err4:
++ __free_page(pd->dummy_page);
++out_err3:
++ __free_page(pd->dummy_pt);
++out_err2:
++ __free_page(pd->p);
++out_err1:
++ kfree(pd);
++ return NULL;
++}
++
++void psb_mmu_free_pt(struct psb_mmu_pt *pt)
++{
++ __free_page(pt->p);
++ kfree(pt);
++}
++
++void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
++{
++ struct psb_mmu_driver *driver = pd->driver;
++ struct psb_mmu_pt *pt;
++ int i;
++
++ down_write(&driver->sem);
++ if (pd->hw_context != -1)
++ psb_mmu_flush_pd_locked(driver, 1);
++
++ /* Should take the spinlock here, but we don't need to do that
++ since we have the semaphore in write mode. */
++
++ for (i = 0; i < 1024; ++i) {
++ pt = pd->tables[i];
++ if (pt)
++ psb_mmu_free_pt(pt);
++ }
++
++ vfree(pd->tables);
++ __free_page(pd->dummy_page);
++ __free_page(pd->dummy_pt);
++ __free_page(pd->p);
++ kfree(pd);
++ up_write(&driver->sem);
++}
++
++static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
++{
++ struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
++ void *v;
++ uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
++ uint32_t clflush_count = PAGE_SIZE / clflush_add;
++ spinlock_t *lock = &pd->driver->lock;
++ uint8_t *clf;
++ uint32_t *ptes;
++ int i;
++
++ if (!pt)
++ return NULL;
++
++ pt->p = alloc_page(GFP_DMA32);
++ if (!pt->p) {
++ kfree(pt);
++ return NULL;
++ }
++
++ spin_lock(lock);
++
++ v = kmap_atomic(pt->p, KM_USER0);
++ clf = (uint8_t *) v;
++ ptes = (uint32_t *) v;
++ for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
++ *ptes++ = pd->invalid_pte;
++
++
++#if defined(CONFIG_X86)
++ if (pd->driver->has_clflush && pd->hw_context != -1) {
++ mb();
++ for (i = 0; i < clflush_count; ++i) {
++ psb_clflush(clf);
++ clf += clflush_add;
++ }
++ mb();
++ }
++#endif
++ kunmap_atomic(v, KM_USER0);
++ spin_unlock(lock);
++
++ pt->count = 0;
++ pt->pd = pd;
++ pt->index = 0;
++
++ return pt;
++}
++
++struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
++ unsigned long addr)
++{
++ uint32_t index = psb_mmu_pd_index(addr);
++ struct psb_mmu_pt *pt;
++ uint32_t *v;
++ spinlock_t *lock = &pd->driver->lock;
++
++ spin_lock(lock);
++ pt = pd->tables[index];
++ while (!pt) {
++ spin_unlock(lock);
++ pt = psb_mmu_alloc_pt(pd);
++ if (!pt)
++ return NULL;
++ spin_lock(lock);
++
++ if (pd->tables[index]) {
++ spin_unlock(lock);
++ psb_mmu_free_pt(pt);
++ spin_lock(lock);
++ pt = pd->tables[index];
++ continue;
++ }
++
++ v = kmap_atomic(pd->p, KM_USER0);
++ pd->tables[index] = pt;
++ v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
++ pt->index = index;
++ kunmap_atomic((void *) v, KM_USER0);
++
++ if (pd->hw_context != -1) {
++ psb_mmu_clflush(pd->driver, (void *) &v[index]);
++ atomic_set(&pd->driver->needs_tlbflush, 1);
++ }
++ }
++ pt->v = kmap_atomic(pt->p, KM_USER0);
++ return pt;
++}
++
++static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
++ unsigned long addr)
++{
++ uint32_t index = psb_mmu_pd_index(addr);
++ struct psb_mmu_pt *pt;
++ spinlock_t *lock = &pd->driver->lock;
++
++ spin_lock(lock);
++ pt = pd->tables[index];
++ if (!pt) {
++ spin_unlock(lock);
++ return NULL;
++ }
++ pt->v = kmap_atomic(pt->p, KM_USER0);
++ return pt;
++}
++
++static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
++{
++ struct psb_mmu_pd *pd = pt->pd;
++ uint32_t *v;
++
++ kunmap_atomic(pt->v, KM_USER0);
++ if (pt->count == 0) {
++ v = kmap_atomic(pd->p, KM_USER0);
++ v[pt->index] = pd->invalid_pde;
++ pd->tables[pt->index] = NULL;
++
++ if (pd->hw_context != -1) {
++ psb_mmu_clflush(pd->driver,
++ (void *) &v[pt->index]);
++ atomic_set(&pd->driver->needs_tlbflush, 1);
++ }
++ kunmap_atomic(pt->v, KM_USER0);
++ spin_unlock(&pd->driver->lock);
++ psb_mmu_free_pt(pt);
++ return;
++ }
++ spin_unlock(&pd->driver->lock);
++}
++
++static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
++ unsigned long addr, uint32_t pte)
++{
++ pt->v[psb_mmu_pt_index(addr)] = pte;
++}
++
++static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
++ unsigned long addr)
++{
++ pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
++}
++
++#if 0
++static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd,
++ uint32_t mmu_offset)
++{
++ uint32_t *v;
++ uint32_t pfn;
++
++ v = kmap_atomic(pd->p, KM_USER0);
++ if (!v) {
++ printk(KERN_INFO "Could not kmap pde page.\n");
++ return 0;
++ }
++ pfn = v[psb_mmu_pd_index(mmu_offset)];
++ /* printk(KERN_INFO "pde is 0x%08x\n",pfn); */
++ kunmap_atomic(v, KM_USER0);
++ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
++ printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n",
++ mmu_offset, pfn);
++ }
++ v = ioremap(pfn & 0xFFFFF000, 4096);
++ if (!v) {
++ printk(KERN_INFO "Could not kmap pte page.\n");
++ return 0;
++ }
++ pfn = v[psb_mmu_pt_index(mmu_offset)];
++ /* printk(KERN_INFO "pte is 0x%08x\n",pfn); */
++ iounmap(v);
++ if (((pfn & 0x0F) != PSB_PTE_VALID)) {
++ printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n",
++ mmu_offset, pfn);
++ }
++ return pfn >> PAGE_SHIFT;
++}
++
++static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd,
++ uint32_t mmu_offset,
++ uint32_t gtt_pages)
++{
++ uint32_t start;
++ uint32_t next;
++
++ printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n",
++ mmu_offset, gtt_pages);
++ down_read(&pd->driver->sem);
++ start = psb_mmu_check_pte_locked(pd, mmu_offset);
++ mmu_offset += PAGE_SIZE;
++ gtt_pages -= 1;
++ while (gtt_pages--) {
++ next = psb_mmu_check_pte_locked(pd, mmu_offset);
++ if (next != start + 1) {
++ printk(KERN_INFO
++ "Ptes out of order: 0x%08x, 0x%08x.\n",
++ start, next);
++ }
++ start = next;
++ mmu_offset += PAGE_SIZE;
++ }
++ up_read(&pd->driver->sem);
++}
++
++#endif
++
++void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
++ uint32_t mmu_offset, uint32_t gtt_start,
++ uint32_t gtt_pages)
++{
++ uint32_t *v;
++ uint32_t start = psb_mmu_pd_index(mmu_offset);
++ struct psb_mmu_driver *driver = pd->driver;
++ int num_pages = gtt_pages;
++
++ down_read(&driver->sem);
++ spin_lock(&driver->lock);
++
++ v = kmap_atomic(pd->p, KM_USER0);
++ v += start;
++
++ while (gtt_pages--) {
++ *v++ = gtt_start | pd->pd_mask;
++ gtt_start += PAGE_SIZE;
++ }
++
++ ttm_tt_cache_flush(&pd->p, num_pages);
++ kunmap_atomic(v, KM_USER0);
++ spin_unlock(&driver->lock);
++
++ if (pd->hw_context != -1)
++ atomic_set(&pd->driver->needs_tlbflush, 1);
++
++ up_read(&pd->driver->sem);
++ psb_mmu_flush_pd(pd->driver, 0);
++}
++
++struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
++{
++ struct psb_mmu_pd *pd;
++
++ down_read(&driver->sem);
++ pd = driver->default_pd;
++ up_read(&driver->sem);
++
++ return pd;
++}
++
++/* Returns the physical address of the PD shared by sgx/msvdx */
++uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
++{
++ struct psb_mmu_pd *pd;
++
++ pd = psb_mmu_get_default_pd(driver);
++ return page_to_pfn(pd->p) << PAGE_SHIFT;
++}
++
++void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
++{
++ psb_mmu_free_pagedir(driver->default_pd);
++ kfree(driver);
++}
++
++struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
++ int trap_pagefaults,
++ int invalid_type,
++ struct drm_psb_private *dev_priv)
++{
++ struct psb_mmu_driver *driver;
++
++ driver = kmalloc(sizeof(*driver), GFP_KERNEL);
++
++ if (!driver)
++ return NULL;
++ driver->dev_priv = dev_priv;
++
++ driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
++ invalid_type);
++ if (!driver->default_pd)
++ goto out_err1;
++
++ spin_lock_init(&driver->lock);
++ init_rwsem(&driver->sem);
++ down_write(&driver->sem);
++ driver->register_map = registers;
++ atomic_set(&driver->needs_tlbflush, 1);
++
++ driver->has_clflush = 0;
++
++#if defined(CONFIG_X86)
++ if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
++ uint32_t tfms, misc, cap0, cap4, clflush_size;
++
++ /*
++ * clflush size is determined at kernel setup for x86_64
++ * but not for i386. We have to do it here.
++ */
++
++ cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
++ clflush_size = ((misc >> 8) & 0xff) * 8;
++ driver->has_clflush = 1;
++ driver->clflush_add =
++ PAGE_SIZE * clflush_size / sizeof(uint32_t);
++ driver->clflush_mask = driver->clflush_add - 1;
++ driver->clflush_mask = ~driver->clflush_mask;
++ }
++#endif
++
++ up_write(&driver->sem);
++ return driver;
++
++out_err1:
++ kfree(driver);
++ return NULL;
++}
++
++#if defined(CONFIG_X86)
++static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
++ unsigned long address, uint32_t num_pages,
++ uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride)
++{
++ struct psb_mmu_pt *pt;
++ uint32_t rows = 1;
++ uint32_t i;
++ unsigned long addr;
++ unsigned long end;
++ unsigned long next;
++ unsigned long add;
++ unsigned long row_add;
++ unsigned long clflush_add = pd->driver->clflush_add;
++ unsigned long clflush_mask = pd->driver->clflush_mask;
++
++ if (!pd->driver->has_clflush) {
++ ttm_tt_cache_flush(&pd->p, num_pages);
++ return;
++ }
++
++ if (hw_tile_stride)
++ rows = num_pages / desired_tile_stride;
++ else
++ desired_tile_stride = num_pages;
++
++ add = desired_tile_stride << PAGE_SHIFT;
++ row_add = hw_tile_stride << PAGE_SHIFT;
++ mb();
++ for (i = 0; i < rows; ++i) {
++
++ addr = address;
++ end = addr + add;
++
++ do {
++ next = psb_pd_addr_end(addr, end);
++ pt = psb_mmu_pt_map_lock(pd, addr);
++ if (!pt)
++ continue;
++ do {
++ psb_clflush(&pt->v
++ [psb_mmu_pt_index(addr)]);
++ } while (addr +=
++ clflush_add,
++ (addr & clflush_mask) < next);
++
++ psb_mmu_pt_unmap_unlock(pt);
++ } while (addr = next, next != end);
++ address += row_add;
++ }
++ mb();
++}
++#else
++static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
++ unsigned long address, uint32_t num_pages,
++ uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride)
++{
++ drm_ttm_cache_flush(&pd->p, num_pages);
++}
++#endif
++
++void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
++ unsigned long address, uint32_t num_pages)
++{
++ struct psb_mmu_pt *pt;
++ unsigned long addr;
++ unsigned long end;
++ unsigned long next;
++ unsigned long f_address = address;
++
++ down_read(&pd->driver->sem);
++
++ addr = address;
++ end = addr + (num_pages << PAGE_SHIFT);
++
++ do {
++ next = psb_pd_addr_end(addr, end);
++ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
++ if (!pt)
++ goto out;
++ do {
++ psb_mmu_invalidate_pte(pt, addr);
++ --pt->count;
++ } while (addr += PAGE_SIZE, addr < next);
++ psb_mmu_pt_unmap_unlock(pt);
++
++ } while (addr = next, next != end);
++
++out:
++ if (pd->hw_context != -1)
++ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
++
++ up_read(&pd->driver->sem);
++
++ if (pd->hw_context != -1)
++ psb_mmu_flush(pd->driver);
++
++ return;
++}
++
++void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
++ uint32_t num_pages, uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride)
++{
++ struct psb_mmu_pt *pt;
++ uint32_t rows = 1;
++ uint32_t i;
++ unsigned long addr;
++ unsigned long end;
++ unsigned long next;
++ unsigned long add;
++ unsigned long row_add;
++ unsigned long f_address = address;
++
++ if (hw_tile_stride)
++ rows = num_pages / desired_tile_stride;
++ else
++ desired_tile_stride = num_pages;
++
++ add = desired_tile_stride << PAGE_SHIFT;
++ row_add = hw_tile_stride << PAGE_SHIFT;
++
++ down_read(&pd->driver->sem);
++
++ /* Make sure we only need to flush this processor's cache */
++
++ for (i = 0; i < rows; ++i) {
++
++ addr = address;
++ end = addr + add;
++
++ do {
++ next = psb_pd_addr_end(addr, end);
++ pt = psb_mmu_pt_map_lock(pd, addr);
++ if (!pt)
++ continue;
++ do {
++ psb_mmu_invalidate_pte(pt, addr);
++ --pt->count;
++
++ } while (addr += PAGE_SIZE, addr < next);
++ psb_mmu_pt_unmap_unlock(pt);
++
++ } while (addr = next, next != end);
++ address += row_add;
++ }
++ if (pd->hw_context != -1)
++ psb_mmu_flush_ptes(pd, f_address, num_pages,
++ desired_tile_stride, hw_tile_stride);
++
++ up_read(&pd->driver->sem);
++
++ if (pd->hw_context != -1)
++ psb_mmu_flush(pd->driver);
++}
++
++int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
++ unsigned long address, uint32_t num_pages,
++ int type)
++{
++ struct psb_mmu_pt *pt;
++ uint32_t pte;
++ unsigned long addr;
++ unsigned long end;
++ unsigned long next;
++ unsigned long f_address = address;
++ int ret = 0;
++
++ down_read(&pd->driver->sem);
++
++ addr = address;
++ end = addr + (num_pages << PAGE_SHIFT);
++
++ do {
++ next = psb_pd_addr_end(addr, end);
++ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
++ if (!pt) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ do {
++ pte = psb_mmu_mask_pte(start_pfn++, type);
++ psb_mmu_set_pte(pt, addr, pte);
++ pt->count++;
++ } while (addr += PAGE_SIZE, addr < next);
++ psb_mmu_pt_unmap_unlock(pt);
++
++ } while (addr = next, next != end);
++
++out:
++ if (pd->hw_context != -1)
++ psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
++
++ up_read(&pd->driver->sem);
++
++ if (pd->hw_context != -1)
++ psb_mmu_flush(pd->driver);
++
++ return ret;
++}
++
++int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
++ unsigned long address, uint32_t num_pages,
++ uint32_t desired_tile_stride,
++ uint32_t hw_tile_stride, int type)
++{
++ struct psb_mmu_pt *pt;
++ uint32_t rows = 1;
++ uint32_t i;
++ uint32_t pte;
++ unsigned long addr;
++ unsigned long end;
++ unsigned long next;
++ unsigned long add;
++ unsigned long row_add;
++ unsigned long f_address = address;
++ int ret = 0;
++
++ if (hw_tile_stride) {
++ if (num_pages % desired_tile_stride != 0)
++ return -EINVAL;
++ rows = num_pages / desired_tile_stride;
++ } else {
++ desired_tile_stride = num_pages;
++ }
++
++ add = desired_tile_stride << PAGE_SHIFT;
++ row_add = hw_tile_stride << PAGE_SHIFT;
++
++ down_read(&pd->driver->sem);
++
++ for (i = 0; i < rows; ++i) {
++
++ addr = address;
++ end = addr + add;
++
++ do {
++ next = psb_pd_addr_end(addr, end);
++ pt = psb_mmu_pt_alloc_map_lock(pd, addr);
++ if (!pt) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ do {
++ pte =
++ psb_mmu_mask_pte(page_to_pfn(*pages++),
++ type);
++ psb_mmu_set_pte(pt, addr, pte);
++ pt->count++;
++ } while (addr += PAGE_SIZE, addr < next);
++ psb_mmu_pt_unmap_unlock(pt);
++
++ } while (addr = next, next != end);
++
++ address += row_add;
++ }
++out:
++ if (pd->hw_context != -1)
++ psb_mmu_flush_ptes(pd, f_address, num_pages,
++ desired_tile_stride, hw_tile_stride);
++
++ up_read(&pd->driver->sem);
++
++ if (pd->hw_context != -1)
++ psb_mmu_flush(pd->driver);
++
++ return ret;
++}
++#if 0 /*comented out, only used in mmu test now*/
++void psb_mmu_enable_requestor(struct psb_mmu_driver *driver, uint32_t mask)
++{
++ mask &= _PSB_MMU_ER_MASK;
++ psb_iowrite32(driver,
++ psb_ioread32(driver, PSB_CR_BIF_CTRL) & ~mask,
++ PSB_CR_BIF_CTRL);
++ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
++}
++
++void psb_mmu_disable_requestor(struct psb_mmu_driver *driver,
++ uint32_t mask)
++{
++ mask &= _PSB_MMU_ER_MASK;
++ psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) | mask,
++ PSB_CR_BIF_CTRL);
++ (void) psb_ioread32(driver, PSB_CR_BIF_CTRL);
++}
++#endif
++int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
++ unsigned long *pfn)
++{
++ int ret;
++ struct psb_mmu_pt *pt;
++ uint32_t tmp;
++ spinlock_t *lock = &pd->driver->lock;
++
++ down_read(&pd->driver->sem);
++ pt = psb_mmu_pt_map_lock(pd, virtual);
++ if (!pt) {
++ uint32_t *v;
++
++ spin_lock(lock);
++ v = kmap_atomic(pd->p, KM_USER0);
++ tmp = v[psb_mmu_pd_index(virtual)];
++ kunmap_atomic(v, KM_USER0);
++ spin_unlock(lock);
++
++ if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
++ !(pd->invalid_pte & PSB_PTE_VALID)) {
++ ret = -EINVAL;
++ goto out;
++ }
++ ret = 0;
++ *pfn = pd->invalid_pte >> PAGE_SHIFT;
++ goto out;
++ }
++ tmp = pt->v[psb_mmu_pt_index(virtual)];
++ if (!(tmp & PSB_PTE_VALID)) {
++ ret = -EINVAL;
++ } else {
++ ret = 0;
++ *pfn = tmp >> PAGE_SHIFT;
++ }
++ psb_mmu_pt_unmap_unlock(pt);
++out:
++ up_read(&pd->driver->sem);
++ return ret;
++}
++#if 0
++void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset)
++{
++ struct page *p;
++ unsigned long pfn;
++ int ret = 0;
++ struct psb_mmu_pd *pd;
++ uint32_t *v;
++ uint32_t *vmmu;
++
++ pd = driver->default_pd;
++ if (!pd)
++ printk(KERN_WARNING "Could not get default pd\n");
++
++
++ p = alloc_page(GFP_DMA32);
++
++ if (!p) {
++ printk(KERN_WARNING "Failed allocating page\n");
++ return;
++ }
++
++ v = kmap(p);
++ memset(v, 0x67, PAGE_SIZE);
++
++ pfn = (offset >> PAGE_SHIFT);
++
++ ret = psb_mmu_insert_pages(pd, &p, pfn << PAGE_SHIFT, 1, 0, 0, 0);
++ if (ret) {
++ printk(KERN_WARNING "Failed inserting mmu page\n");
++ goto out_err1;
++ }
++
++ /* Ioremap the page through the GART aperture */
++
++ vmmu = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
++ if (!vmmu) {
++ printk(KERN_WARNING "Failed ioremapping page\n");
++ goto out_err2;
++ }
++
++ /* Read from the page with mmu disabled. */
++ printk(KERN_INFO "Page first dword is 0x%08x\n", ioread32(vmmu));
++
++ /* Enable the mmu for host accesses and read again. */
++ psb_mmu_enable_requestor(driver, _PSB_MMU_ER_HOST);
++
++ printk(KERN_INFO "MMU Page first dword is (0x67676767) 0x%08x\n",
++ ioread32(vmmu));
++ *v = 0x15243705;
++ printk(KERN_INFO "MMU Page new dword is (0x15243705) 0x%08x\n",
++ ioread32(vmmu));
++ iowrite32(0x16243355, vmmu);
++ (void) ioread32(vmmu);
++ printk(KERN_INFO "Page new dword is (0x16243355) 0x%08x\n", *v);
++
++ printk(KERN_INFO "Int stat is 0x%08x\n",
++ psb_ioread32(driver, PSB_CR_BIF_INT_STAT));
++ printk(KERN_INFO "Fault is 0x%08x\n",
++ psb_ioread32(driver, PSB_CR_BIF_FAULT));
++
++ /* Disable MMU for host accesses and clear page fault register */
++ psb_mmu_disable_requestor(driver, _PSB_MMU_ER_HOST);
++ iounmap(vmmu);
++out_err2:
++ psb_mmu_remove_pages(pd, pfn << PAGE_SHIFT, 1, 0, 0);
++out_err1:
++ kunmap(p);
++ __free_page(p);
++}
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_msvdx.c b/drivers/gpu/drm/mrst/drv/psb_msvdx.c
+new file mode 100644
+index 0000000..4ad5b31
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_msvdx.c
+@@ -0,0 +1,1063 @@
++/**************************************************************************
++ * MSVDX I/O operations and IRQ handling
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include <drm/drm_os_linux.h>
++#include "psb_drv.h"
++#include "psb_drm.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++#include "ospm_power.h"
++#include <linux/io.h>
++#include <linux/delay.h>
++
++#ifndef list_first_entry
++#define list_first_entry(ptr, type, member) \
++ list_entry((ptr)->next, type, member)
++#endif
++
++
++static int psb_msvdx_send(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size);
++
++static int psb_msvdx_dequeue_send(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct psb_msvdx_cmd_queue *msvdx_cmd = NULL;
++ int ret = 0;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ if (list_empty(&msvdx_priv->msvdx_queue)) {
++ PSB_DEBUG_GENERAL("MSVDXQUE: msvdx list empty.\n");
++ msvdx_priv->msvdx_busy = 0;
++ return -EINVAL;
++ }
++ msvdx_cmd = list_first_entry(&msvdx_priv->msvdx_queue,
++ struct psb_msvdx_cmd_queue, head);
++ PSB_DEBUG_GENERAL("MSVDXQUE: Queue has id %08x\n", msvdx_cmd->sequence);
++ ret = psb_msvdx_send(dev, msvdx_cmd->cmd, msvdx_cmd->cmd_size);
++ if (ret) {
++ DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n");
++ ret = -EINVAL;
++ }
++ list_del(&msvdx_cmd->head);
++ kfree(msvdx_cmd->cmd);
++ kfree(msvdx_cmd);
++
++ return ret;
++}
++
++static int psb_msvdx_map_command(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset, unsigned long cmd_size,
++ void **msvdx_cmd, uint32_t sequence, int copy_cmd)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++ int ret = 0;
++ unsigned long cmd_page_offset = cmd_offset & ~PAGE_MASK;
++ unsigned long cmd_size_remaining;
++ struct ttm_bo_kmap_obj cmd_kmap, regio_kmap;
++ void *cmd, *tmp, *cmd_start;
++ bool is_iomem;
++
++ /* command buffers may not exceed page boundary */
++ if (cmd_size + cmd_page_offset > PAGE_SIZE)
++ return -EINVAL;
++
++ ret = ttm_bo_kmap(cmd_buffer, cmd_offset >> PAGE_SHIFT, 1, &cmd_kmap);
++ if (ret) {
++ DRM_ERROR("MSVDXQUE:ret:%d\n", ret);
++ return ret;
++ }
++
++ cmd_start = (void *)ttm_kmap_obj_virtual(&cmd_kmap, &is_iomem)
++ + cmd_page_offset;
++ cmd = cmd_start;
++ cmd_size_remaining = cmd_size;
++
++ while (cmd_size_remaining > 0) {
++ uint32_t cur_cmd_size = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_SIZE);
++ uint32_t cur_cmd_id = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_ID);
++ uint32_t mmu_ptd = 0, tmp = 0;
++ struct psb_msvdx_deblock_queue *msvdx_deblock;
++ unsigned long irq_flags;
++
++ PSB_DEBUG_GENERAL("cmd start at %08x cur_cmd_size = %d"
++ " cur_cmd_id = %02x fence = %08x\n",
++ (uint32_t) cmd, cur_cmd_size, cur_cmd_id, sequence);
++ if ((cur_cmd_size % sizeof(uint32_t))
++ || (cur_cmd_size > cmd_size_remaining)) {
++ ret = -EINVAL;
++ DRM_ERROR("MSVDX: ret:%d\n", ret);
++ goto out;
++ }
++
++ switch (cur_cmd_id) {
++ case VA_MSGID_RENDER:
++ /* Fence ID */
++ MEMIO_WRITE_FIELD(cmd, FW_VA_RENDER_FENCE_VALUE,
++ sequence);
++ mmu_ptd = psb_get_default_pd_addr(dev_priv->mmu);
++ tmp = atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc,
++ 1, 0);
++ if (tmp == 1) {
++ mmu_ptd |= 1;
++ PSB_DEBUG_GENERAL("MSVDX:Set MMU invalidate\n");
++ }
++
++ /* PTD */
++ MEMIO_WRITE_FIELD(cmd, FW_VA_RENDER_MMUPTD, mmu_ptd);
++ break;
++
++ case VA_MSGID_DEBLOCK:
++ /* Fence ID */
++ MEMIO_WRITE_FIELD(cmd, FW_DXVA_DEBLOCK_FENCE_VALUE,
++ sequence);
++ mmu_ptd = psb_get_default_pd_addr(dev_priv->mmu);
++ tmp = atomic_cmpxchg(&dev_priv->msvdx_mmu_invaldc,
++ 1, 0);
++ if (tmp == 1) {
++ mmu_ptd |= 1;
++ PSB_DEBUG_GENERAL("MSVDX:Set MMU invalidate\n");
++ }
++
++ /* PTD */
++ MEMIO_WRITE_FIELD(cmd,
++ FW_DXVA_DEBLOCK_MMUPTD,
++ mmu_ptd);
++
++ /* printk("Got deblock msg\n"); */
++ /* Deblock message is followed by 32 */
++ /* bytes of deblock params */
++ msvdx_deblock = kmalloc(
++ sizeof(struct psb_msvdx_deblock_queue),
++ GFP_KERNEL);
++
++ if (msvdx_deblock == NULL) {
++ DRM_ERROR("DEBLOCK QUE: Out of memory...\n");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ memcpy(&msvdx_deblock->dbParams, cmd + 16, 32);
++
++ ret = ttm_bo_kmap(
++ (struct ttm_buffer_object *)
++ msvdx_deblock->dbParams.handle,
++ 0,
++ (msvdx_deblock->dbParams.buffer_size +
++ PAGE_SIZE - 1) >> PAGE_SHIFT,
++ &regio_kmap);
++
++ /* printk("deblock regio buffer size is 0x%x\n",
++ msvdx_deblock->dbParams.buffer_size); */
++
++ if (likely(!ret)) {
++ msvdx_deblock->dbParams.pPicparams = kmalloc(
++ msvdx_deblock->dbParams.buffer_size,
++ GFP_KERNEL);
++
++ if (msvdx_deblock->dbParams.pPicparams != NULL)
++ memcpy(
++ msvdx_deblock->dbParams.pPicparams,
++ regio_kmap.virtual,
++ msvdx_deblock->dbParams.buffer_size);
++ ttm_bo_kunmap(&regio_kmap);
++ }
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
++ list_add_tail(&msvdx_deblock->head,
++ &msvdx_priv->deblock_queue);
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock,
++ irq_flags);
++
++ cmd += 32;
++ cmd_size_remaining -= 32;
++ break;
++
++
++ default:
++ /* Msg not supported */
++ ret = -EINVAL;
++ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
++ goto out;
++ }
++
++ cmd += cur_cmd_size;
++ cmd_size_remaining -= cur_cmd_size;
++ }
++
++ if (copy_cmd) {
++ PSB_DEBUG_GENERAL("MSVDXQUE:copying command\n");
++
++ tmp = kzalloc(cmd_size, GFP_KERNEL);
++ if (tmp == NULL) {
++ ret = -ENOMEM;
++ DRM_ERROR("MSVDX: fail to callc,ret=:%d\n", ret);
++ goto out;
++ }
++ memcpy(tmp, cmd_start, cmd_size);
++ *msvdx_cmd = tmp;
++ } else {
++ PSB_DEBUG_GENERAL("MSVDXQUE:did NOT copy command\n");
++ ret = psb_msvdx_send(dev, cmd_start, cmd_size);
++ if (ret) {
++ DRM_ERROR("MSVDXQUE: psb_msvdx_send failed\n");
++ ret = -EINVAL;
++ }
++ }
++
++out:
++ ttm_bo_kunmap(&cmd_kmap);
++
++ return ret;
++}
++
++int psb_submit_video_cmdbuf(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset, unsigned long cmd_size,
++ struct ttm_fence_object *fence)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ uint32_t sequence = dev_priv->sequence[PSB_ENGINE_VIDEO];
++ unsigned long irq_flags;
++ int ret = 0;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++ int offset = 0;
++
++ /* psb_schedule_watchdog(dev_priv); */
++
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
++ if (msvdx_priv->msvdx_needs_reset) {
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
++ PSB_DEBUG_GENERAL("MSVDX: will reset msvdx\n");
++ if (psb_msvdx_reset(dev_priv)) {
++ ret = -EBUSY;
++ DRM_ERROR("MSVDX: Reset failed\n");
++ return ret;
++ }
++ msvdx_priv->msvdx_needs_reset = 0;
++ msvdx_priv->msvdx_busy = 0;
++
++ psb_msvdx_init(dev);
++
++ /* restore vec local mem if needed */
++ if (msvdx_priv->vec_local_mem_saved) {
++ for (offset = 0; offset < VEC_LOCAL_MEM_BYTE_SIZE / 4; ++offset)
++ PSB_WMSVDX32(msvdx_priv->vec_local_mem_data[offset],
++ VEC_LOCAL_MEM_OFFSET + offset * 4);
++
++ msvdx_priv->vec_local_mem_saved = 0;
++ }
++
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
++ }
++
++ if (!msvdx_priv->msvdx_fw_loaded) {
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
++ PSB_DEBUG_GENERAL("MSVDX:reload FW to MTX\n");
++
++ ret = psb_setup_fw(dev);
++ if (ret) {
++ DRM_ERROR("MSVDX:fail to load FW\n");
++ /* FIXME: find a proper return value */
++ return -EFAULT;
++ }
++ msvdx_priv->msvdx_fw_loaded = 1;
++
++ PSB_DEBUG_GENERAL("MSVDX: load firmware successfully\n");
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
++ }
++
++ if (!msvdx_priv->msvdx_busy) {
++ msvdx_priv->msvdx_busy = 1;
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
++ PSB_DEBUG_GENERAL("MSVDX: commit command to HW,seq=0x%08x\n",
++ sequence);
++ ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset,
++ cmd_size, NULL, sequence, 0);
++ if (ret) {
++ DRM_ERROR("MSVDXQUE: Failed to extract cmd\n");
++ return ret;
++ }
++ } else {
++ struct psb_msvdx_cmd_queue *msvdx_cmd;
++ void *cmd = NULL;
++
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
++ /* queue the command to be sent when the h/w is ready */
++ PSB_DEBUG_GENERAL("MSVDXQUE: queueing sequence:%08x..\n",
++ sequence);
++ msvdx_cmd = kzalloc(sizeof(struct psb_msvdx_cmd_queue),
++ GFP_KERNEL);
++ if (msvdx_cmd == NULL) {
++ DRM_ERROR("MSVDXQUE: Out of memory...\n");
++ return -ENOMEM;
++ }
++
++ ret = psb_msvdx_map_command(dev, cmd_buffer, cmd_offset,
++ cmd_size, &cmd, sequence, 1);
++ if (ret) {
++ DRM_ERROR("MSVDXQUE: Failed to extract cmd\n");
++ kfree(msvdx_cmd
++ );
++ return ret;
++ }
++ msvdx_cmd->cmd = cmd;
++ msvdx_cmd->cmd_size = cmd_size;
++ msvdx_cmd->sequence = sequence;
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
++ list_add_tail(&msvdx_cmd->head, &msvdx_priv->msvdx_queue);
++ if (!msvdx_priv->msvdx_busy) {
++ msvdx_priv->msvdx_busy = 1;
++ PSB_DEBUG_GENERAL("MSVDXQUE: Need immediate dequeue\n");
++ psb_msvdx_dequeue_send(dev);
++ }
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
++ }
++
++ return ret;
++}
++
++int psb_cmdbuf_video(struct drm_file *priv,
++ struct list_head *validate_list,
++ uint32_t fence_type,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct psb_ttm_fence_rep *fence_arg)
++{
++ struct drm_device *dev = priv->minor->dev;
++ struct ttm_fence_object *fence;
++ int ret;
++
++ /*
++ * Check this. Doesn't seem right. Have fencing done AFTER command
++ * submission and make sure drm_psb_idle idles the MSVDX completely.
++ */
++ ret =
++ psb_submit_video_cmdbuf(dev, cmd_buffer, arg->cmdbuf_offset,
++ arg->cmdbuf_size, NULL);
++ if (ret)
++ return ret;
++
++
++ /* DRM_ERROR("Intel: Fix video fencing!!\n"); */
++ psb_fence_or_sync(priv, PSB_ENGINE_VIDEO, fence_type,
++ arg->fence_flags, validate_list, fence_arg,
++ &fence);
++
++ ttm_fence_object_unref(&fence);
++ mutex_lock(&cmd_buffer->mutex);
++ if (cmd_buffer->sync_obj != NULL)
++ ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
++ mutex_unlock(&cmd_buffer->mutex);
++
++ return 0;
++}
++
++
++static int psb_msvdx_send(struct drm_device *dev, void *cmd,
++ unsigned long cmd_size)
++{
++ int ret = 0;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++
++ while (cmd_size > 0) {
++ uint32_t cur_cmd_size = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_SIZE);
++ uint32_t cur_cmd_id = MEMIO_READ_FIELD(cmd, FWRK_GENMSG_ID);
++ if (cur_cmd_size > cmd_size) {
++ ret = -EINVAL;
++ DRM_ERROR("MSVDX:cmd_size %lu cur_cmd_size %lu\n",
++ cmd_size, (unsigned long)cur_cmd_size);
++ goto out;
++ }
++
++ /* Send the message to h/w */
++ ret = psb_mtx_send(dev_priv, cmd);
++ if (ret) {
++ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
++ goto out;
++ }
++ cmd += cur_cmd_size;
++ cmd_size -= cur_cmd_size;
++ if (cur_cmd_id == VA_MSGID_DEBLOCK) {
++ cmd += 32;
++ cmd_size -= 32;
++ }
++ }
++
++out:
++ PSB_DEBUG_GENERAL("MSVDX: ret:%d\n", ret);
++ return ret;
++}
++
++int psb_mtx_send(struct drm_psb_private *dev_priv, const void *msg)
++{
++ static uint32_t pad_msg[FWRK_PADMSG_SIZE];
++ const uint32_t *p_msg = (uint32_t *) msg;
++ uint32_t msg_num, words_free, ridx, widx, buf_size, buf_offset;
++ int ret = 0;
++
++ PSB_DEBUG_GENERAL("MSVDX: psb_mtx_send\n");
++
++ /* we need clocks enabled before we touch VEC local ram */
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++ msg_num = (MEMIO_READ_FIELD(msg, FWRK_GENMSG_SIZE) + 3) / 4;
++
++ buf_size = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_BUF_SIZE) & ((1 << 16) - 1);
++
++ if (msg_num > buf_size) {
++ ret = -EINVAL;
++ DRM_ERROR("MSVDX: message exceed maximum,ret:%d\n", ret);
++ goto out;
++ }
++
++ ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_RD_INDEX);
++ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
++
++
++ buf_size = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_BUF_SIZE) & ((1 << 16) - 1);
++ /*0x2000 is VEC Local Ram offset*/
++ buf_offset =
++ (PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_BUF_SIZE) >> 16) + 0x2000;
++
++ /* message would wrap, need to send a pad message */
++ if (widx + msg_num > buf_size) {
++ /* Shouldn't happen for a PAD message itself */
++ BUG_ON(MEMIO_READ_FIELD(msg, FWRK_GENMSG_ID)
++ == FWRK_MSGID_PADDING);
++
++ /* if the read pointer is at zero then we must wait for it to
++ * change otherwise the write pointer will equal the read
++ * pointer,which should only happen when the buffer is empty
++ *
++ * This will only happens if we try to overfill the queue,
++ * queue management should make
++ * sure this never happens in the first place.
++ */
++ BUG_ON(0 == ridx);
++ if (0 == ridx) {
++ ret = -EINVAL;
++ DRM_ERROR("MSVDX: RIndex=0, ret:%d\n", ret);
++ goto out;
++ }
++
++ /* Send a pad message */
++ MEMIO_WRITE_FIELD(pad_msg, FWRK_GENMSG_SIZE,
++ (buf_size - widx) << 2);
++ MEMIO_WRITE_FIELD(pad_msg, FWRK_GENMSG_ID,
++ FWRK_MSGID_PADDING);
++ psb_mtx_send(dev_priv, pad_msg);
++ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_MTX_WRT_INDEX);
++ }
++
++ if (widx >= ridx)
++ words_free = buf_size - (widx - ridx);
++ else
++ words_free = ridx - widx;
++
++ BUG_ON(msg_num > words_free);
++ if (msg_num > words_free) {
++ ret = -EINVAL;
++ DRM_ERROR("MSVDX: msg_num > words_free, ret:%d\n", ret);
++ goto out;
++ }
++ while (msg_num > 0) {
++ PSB_WMSVDX32(*p_msg++, buf_offset + (widx << 2));
++ msg_num--;
++ widx++;
++ if (buf_size == widx)
++ widx = 0;
++ }
++
++ PSB_WMSVDX32(widx, MSVDX_COMMS_TO_MTX_WRT_INDEX);
++
++ /* Make sure clocks are enabled before we kick */
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++ /* signal an interrupt to let the mtx know there is a new message */
++ /* PSB_WMSVDX32(1, MSVDX_MTX_KICKI); */
++ PSB_WMSVDX32(1, MSVDX_MTX_KICK);
++
++ /* Read MSVDX Register several times in case Idle signal assert */
++ PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
++ PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
++ PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
++ PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
++
++
++out:
++ return ret;
++}
++
++static int psb_msvdx_towpass_deblock(struct drm_device *dev,
++ uint32_t *pPicparams)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ uint32_t cmd_size, cmd_count = 0;
++ uint32_t cmd_id, reg, value, wait, tmp, read = 0, ret = 0;
++
++ cmd_size = *pPicparams++;
++ PSB_DEBUG_GENERAL("MSVDX DEBLOCK: deblock get cmd size %d\n", cmd_size);
++ /* printk("MSVDX DEBLOCK: deblock get cmd size %d\n", cmd_size); */
++
++ do {
++ cmd_id = (*pPicparams) & 0xf0000000;
++ reg = (*pPicparams++) & 0x0fffffff;
++ switch (cmd_id) {
++ case MSVDX_DEBLOCK_REG_SET: {
++ value = *pPicparams++;
++ PSB_WMSVDX32(value, reg);
++ cmd_count += 2;
++ break;
++ }
++ case MSVDX_DEBLOCK_REG_GET: {
++ read = PSB_RMSVDX32(reg);
++ cmd_count += 1;
++ break;
++ }
++ case MSVDX_DEBLOCK_REG_POLLn: {
++ value = *pPicparams++;
++ wait = 0;
++
++ do {
++ tmp = PSB_RMSVDX32(reg);
++ } while ((wait++ < 20000) && (value > tmp));
++
++ if (wait >= 20000) {
++ ret = 1;
++ PSB_DEBUG_GENERAL(
++ "MSVDX DEBLOCK: polln cmd space time out!\n");
++ goto finish_deblock;
++ }
++ cmd_count += 2;
++ break;
++ }
++ case MSVDX_DEBLOCK_REG_POLLx: {
++ wait = 0;
++
++ do {
++ tmp = PSB_RMSVDX32(reg);
++ } while ((wait++ < 20000) && (read > tmp));
++
++ if (wait >= 20000) {
++ ret = 1;
++ PSB_DEBUG_GENERAL(
++ "MSVDX DEBLOCK: pollx cmd space time out!\n");
++ goto finish_deblock;
++ }
++
++ cmd_count += 1;
++ break;
++ }
++ default:
++ ret = 1;
++ PSB_DEBUG_GENERAL(
++ "MSVDX DEBLOCK: get error cmd_id: 0x%x!\n",
++ cmd_id);
++ PSB_DEBUG_GENERAL(
++ "MSVDX DEBLOCK: execute cmd num is %d\n",
++ cmd_count);
++ /* printk("MSVDX DEBLOCK: get error cmd_id: 0x%x!\n",
++ cmd_id); */
++ /* printk("MSVDX DEBLOCK: execute cmd num is %d\n",
++ cmd_count); */
++ goto finish_deblock;
++ }
++ } while (cmd_count < cmd_size);
++
++
++finish_deblock:
++ PSB_DEBUG_GENERAL("MSVDX DEBLOCK: execute cmd num is %d\n", cmd_count);
++ return ret;
++}
++
++/*
++ * MSVDX MTX interrupt
++ */
++static void psb_msvdx_mtx_interrupt(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ static uint32_t buf[128]; /* message buffer */
++ uint32_t ridx, widx, buf_size, buf_offset;
++ uint32_t num, ofs; /* message num and offset */
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ PSB_DEBUG_GENERAL("MSVDX:Got a MSVDX MTX interrupt\n");
++
++ /* Are clocks enabled - If not enable before
++ * attempting to read from VLR
++ */
++ if (PSB_RMSVDX32(MSVDX_MAN_CLK_ENABLE) != (clk_enable_all)) {
++ PSB_DEBUG_GENERAL("MSVDX:Clocks disabled when Interupt set\n");
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++ }
++
++loop: /* just for coding style check */
++ ridx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_RD_INDEX);
++ widx = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_WRT_INDEX);
++
++ /* Get out of here if nothing */
++ if (ridx == widx)
++ goto done;
++
++
++ buf_size = PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF_SIZE) & ((1 << 16) - 1);
++ /*0x2000 is VEC Local Ram offset*/
++ buf_offset =
++ (PSB_RMSVDX32(MSVDX_COMMS_TO_HOST_BUF_SIZE) >> 16) + 0x2000;
++
++ ofs = 0;
++ buf[ofs] = PSB_RMSVDX32(buf_offset + (ridx << 2));
++
++ /* round to nearest word */
++ num = (MEMIO_READ_FIELD(buf, FWRK_GENMSG_SIZE) + 3) / 4;
++
++ /* ASSERT(num <= sizeof(buf) / sizeof(uint32_t)); */
++
++ if (++ridx >= buf_size)
++ ridx = 0;
++
++ for (ofs++; ofs < num; ofs++) {
++ buf[ofs] = PSB_RMSVDX32(buf_offset + (ridx << 2));
++
++ if (++ridx >= buf_size)
++ ridx = 0;
++ }
++
++ /* Update the Read index */
++ PSB_WMSVDX32(ridx, MSVDX_COMMS_TO_HOST_RD_INDEX);
++
++ if (msvdx_priv->msvdx_needs_reset)
++ goto loop;
++
++ switch (MEMIO_READ_FIELD(buf, FWRK_GENMSG_ID)) {
++ case VA_MSGID_CMD_HW_PANIC:
++ case VA_MSGID_CMD_FAILED: {
++ uint32_t fence = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_FAILED_FENCE_VALUE);
++ uint32_t fault = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_FAILED_IRQSTATUS);
++ uint32_t msg_id = MEMIO_READ_FIELD(buf, FWRK_GENMSG_ID);
++ uint32_t diff = 0;
++
++ (void) fault;
++ if (msg_id == VA_MSGID_CMD_HW_PANIC)
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_CMD_HW_PANIC:"
++ "Fault detected"
++ " - Fence: %08x, Status: %08x"
++ " - resetting and ignoring error\n",
++ fence, fault);
++ else
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_CMD_FAILED:"
++ "Fault detected"
++ " - Fence: %08x, Status: %08x"
++ " - resetting and ignoring error\n",
++ fence, fault);
++
++ msvdx_priv->msvdx_needs_reset = 1;
++
++ if (msg_id == VA_MSGID_CMD_HW_PANIC) {
++ diff = msvdx_priv->msvdx_current_sequence
++ - dev_priv->sequence[PSB_ENGINE_VIDEO];
++
++ if (diff > 0x0FFFFFFF)
++ msvdx_priv->msvdx_current_sequence++;
++
++ PSB_DEBUG_GENERAL("MSVDX: Fence ID missing, "
++ "assuming %08x\n",
++ msvdx_priv->msvdx_current_sequence);
++ } else {
++ msvdx_priv->msvdx_current_sequence = fence;
++ }
++
++ psb_fence_error(dev, PSB_ENGINE_VIDEO,
++ msvdx_priv->msvdx_current_sequence,
++ _PSB_FENCE_TYPE_EXE, DRM_CMD_FAILED);
++
++ /* Flush the command queue */
++ psb_msvdx_flush_cmd_queue(dev);
++
++ goto done;
++ }
++ case VA_MSGID_CMD_COMPLETED: {
++ uint32_t fence = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_COMPLETED_FENCE_VALUE);
++ uint32_t flags = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_COMPLETED_FLAGS);
++
++ PSB_DEBUG_GENERAL("MSVDX:VA_MSGID_CMD_COMPLETED: "
++ "FenceID: %08x, flags: 0x%x\n",
++ fence, flags);
++
++ msvdx_priv->msvdx_current_sequence = fence;
++
++ psb_fence_handler(dev, PSB_ENGINE_VIDEO);
++
++ if (flags & FW_VA_RENDER_HOST_INT) {
++ /*Now send the next command from the msvdx cmd queue */
++ psb_msvdx_dequeue_send(dev);
++ goto done;
++ }
++
++ break;
++ }
++ case VA_MSGID_CMD_COMPLETED_BATCH: {
++ uint32_t fence = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_COMPLETED_FENCE_VALUE);
++ uint32_t tickcnt = MEMIO_READ_FIELD(buf,
++ FW_VA_CMD_COMPLETED_NO_TICKS);
++ (void)tickcnt;
++ /* we have the fence value in the message */
++ PSB_DEBUG_GENERAL("MSVDX:VA_MSGID_CMD_COMPLETED_BATCH:"
++ " FenceID: %08x, TickCount: %08x\n",
++ fence, tickcnt);
++ msvdx_priv->msvdx_current_sequence = fence;
++
++ break;
++ }
++ case VA_MSGID_ACK:
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_ACK\n");
++ break;
++
++ case VA_MSGID_TEST1:
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_TEST1\n");
++ break;
++
++ case VA_MSGID_TEST2:
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_TEST2\n");
++ break;
++ /* Don't need to do anything with these messages */
++
++ case VA_MSGID_DEBLOCK_REQUIRED: {
++ uint32_t ctxid = MEMIO_READ_FIELD(buf,
++ FW_VA_DEBLOCK_REQUIRED_CONTEXT);
++ struct psb_msvdx_deblock_queue *msvdx_deblock;
++
++ PSB_DEBUG_GENERAL("MSVDX: VA_MSGID_DEBLOCK_REQUIRED"
++ " Context=%08x\n", ctxid);
++ if (list_empty(&msvdx_priv->deblock_queue)) {
++ PSB_DEBUG_GENERAL(
++ "DEBLOCKQUE: deblock param list is empty\n");
++ PSB_WMSVDX32(0, MSVDX_CMDS_END_SLICE_PICTURE);
++ PSB_WMSVDX32(1, MSVDX_CMDS_END_SLICE_PICTURE);
++ goto done;
++ }
++ msvdx_deblock = list_first_entry(&msvdx_priv->deblock_queue,
++ struct psb_msvdx_deblock_queue, head);
++
++ if (0) {
++ PSB_DEBUG_GENERAL("MSVDX DEBLOCK: by pass \n");
++ /* try to unblock rendec */
++ PSB_WMSVDX32(0, MSVDX_CMDS_END_SLICE_PICTURE);
++ PSB_WMSVDX32(1, MSVDX_CMDS_END_SLICE_PICTURE);
++ kfree(msvdx_deblock->dbParams.pPicparams);
++ list_del(&msvdx_deblock->head);
++ goto done;
++ }
++
++
++ if (ctxid != msvdx_deblock->dbParams.ctxid) {
++ PSB_DEBUG_GENERAL("MSVDX DEBLOCK: wrong ctxid, may "
++ "caused by multiple context since "
++ "it's not supported yet\n");
++ /* try to unblock rendec */
++ PSB_WMSVDX32(0, MSVDX_CMDS_END_SLICE_PICTURE);
++ PSB_WMSVDX32(1, MSVDX_CMDS_END_SLICE_PICTURE);
++ kfree(msvdx_deblock->dbParams.pPicparams);
++ list_del(&msvdx_deblock->head);
++ goto done;
++ }
++
++ if (msvdx_deblock->dbParams.pPicparams) {
++ PSB_DEBUG_GENERAL("MSVDX DEBLOCK: start deblocking\n");
++ /* printk("MSVDX DEBLOCK: start deblocking\n"); */
++
++ if (psb_msvdx_towpass_deblock(dev,
++ msvdx_deblock->dbParams.pPicparams)) {
++
++ PSB_DEBUG_GENERAL(
++ "MSVDX DEBLOCK: deblock fail!\n");
++ PSB_WMSVDX32(0, MSVDX_CMDS_END_SLICE_PICTURE);
++ PSB_WMSVDX32(1, MSVDX_CMDS_END_SLICE_PICTURE);
++ }
++ kfree(msvdx_deblock->dbParams.pPicparams);
++ } else {
++ PSB_DEBUG_GENERAL("MSVDX DEBLOCK: deblock abort!\n");
++ /* printk("MSVDX DEBLOCK: deblock abort!\n"); */
++ PSB_WMSVDX32(0, MSVDX_CMDS_END_SLICE_PICTURE);
++ PSB_WMSVDX32(1, MSVDX_CMDS_END_SLICE_PICTURE);
++ }
++
++ list_del(&msvdx_deblock->head);
++ kfree(msvdx_deblock);
++ break;
++ }
++ default:
++ DRM_ERROR("ERROR: msvdx Unknown message from MTX \n");
++ goto done;
++ }
++
++done:
++ /* we get a frame/slice done, try to save some power*/
++ if (drm_msvdx_pmpolicy != PSB_PMPOLICY_NOPM)
++ schedule_delayed_work(&dev_priv->scheduler.msvdx_suspend_wq, 0);
++
++ DRM_MEMORYBARRIER(); /* TBD check this... */
++}
++
++
++/*
++ * MSVDX interrupt.
++ */
++IMG_BOOL psb_msvdx_interrupt(IMG_VOID *pvData)
++{
++ struct drm_device *dev;
++ struct drm_psb_private *dev_priv;
++ struct msvdx_private *msvdx_priv;
++ uint32_t msvdx_stat;
++
++ if (pvData == IMG_NULL) {
++ DRM_ERROR("ERROR: msvdx %s, Invalid params\n", __func__);
++ return IMG_FALSE;
++ }
++
++ if (!ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND)) {
++ DRM_ERROR("ERROR: interrupt arrived but HW is power off\n");
++ return IMG_FALSE;
++ }
++
++ dev = (struct drm_device *)pvData;
++ dev_priv = (struct drm_psb_private *) dev->dev_private;
++ msvdx_priv = dev_priv->msvdx_private;
++
++ msvdx_priv->msvdx_hw_busy = REG_READ(0x20D0) & (0x1 << 9);
++
++ msvdx_stat = PSB_RMSVDX32(MSVDX_INTERRUPT_STATUS);
++
++ if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK) {
++ /*Ideally we should we should never get to this */
++ PSB_DEBUG_IRQ("MSVDX:MMU Fault:0x%x\n", msvdx_stat);
++
++ /* Pause MMU */
++ PSB_WMSVDX32(MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK,
++ MSVDX_MMU_CONTROL0);
++ DRM_WRITEMEMORYBARRIER();
++
++ /* Clear this interupt bit only */
++ PSB_WMSVDX32(MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK,
++ MSVDX_INTERRUPT_CLEAR);
++ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
++ DRM_READMEMORYBARRIER();
++
++ msvdx_priv->msvdx_needs_reset = 1;
++ } else if (msvdx_stat & MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK) {
++ PSB_DEBUG_IRQ
++ ("MSVDX: msvdx_stat: 0x%x(MTX)\n", msvdx_stat);
++
++ /* Clear all interupt bits */
++ PSB_WMSVDX32(0xffff, MSVDX_INTERRUPT_CLEAR);
++ PSB_RMSVDX32(MSVDX_INTERRUPT_CLEAR);
++ DRM_READMEMORYBARRIER();
++
++ psb_msvdx_mtx_interrupt(dev);
++ }
++
++ return IMG_TRUE;
++}
++
++
++void psb_msvdx_lockup(struct drm_psb_private *dev_priv,
++ int *msvdx_lockup, int *msvdx_idle)
++{
++ int tmp;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ *msvdx_lockup = 0;
++ *msvdx_idle = 1;
++
++#if 0
++ PSB_DEBUG_GENERAL("MSVDXTimer: current_sequence:%d "
++ "last_sequence:%d and last_submitted_sequence :%d\n",
++ msvdx_priv->msvdx_current_sequence,
++ msvdx_priv->msvdx_last_sequence,
++ dev_priv->sequence[PSB_ENGINE_VIDEO]);
++#endif
++
++ tmp = msvdx_priv->msvdx_current_sequence -
++ dev_priv->sequence[PSB_ENGINE_VIDEO];
++
++ if (tmp > 0x0FFFFFFF) {
++ if (msvdx_priv->msvdx_current_sequence ==
++ msvdx_priv->msvdx_last_sequence) {
++ DRM_ERROR("MSVDXTimer:locked-up for sequence:%d\n",
++ msvdx_priv->msvdx_current_sequence);
++ *msvdx_lockup = 1;
++ } else {
++ PSB_DEBUG_GENERAL("MSVDXTimer: "
++ "msvdx responded fine so far\n");
++ msvdx_priv->msvdx_last_sequence =
++ msvdx_priv->msvdx_current_sequence;
++ *msvdx_idle = 0;
++ }
++ }
++}
++
++int psb_check_msvdx_idle(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ if (msvdx_priv->msvdx_fw_loaded == 0)
++ return 0;
++
++ if (msvdx_priv->msvdx_busy) {
++ PSB_DEBUG_PM("MSVDX: psb_check_msvdx_idle returns busy\n");
++ return -EBUSY;
++ }
++
++ if (msvdx_priv->msvdx_hw_busy) {
++ PSB_DEBUG_PM("MSVDX: %s, HW is busy\n", __func__);
++ return -EBUSY;
++ }
++
++ return 0;
++}
++
++int lnc_video_getparam(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_lnc_video_getparam_arg *arg = data;
++ int ret = 0;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)file_priv->minor->dev->dev_private;
++#if defined(CONFIG_MRST_RAR_HANDLER)
++ struct RAR_buffer rar_buf;
++ size_t rar_status;
++#endif
++ void *rar_handler;
++ uint32_t offset = 0;
++ uint32_t device_info = 0;
++ uint32_t rar_ci_info[2];
++
++ switch (arg->key) {
++ case LNC_VIDEO_GETPARAM_RAR_INFO:
++ rar_ci_info[0] = dev_priv->rar_region_start;
++ rar_ci_info[1] = dev_priv->rar_region_size;
++ ret = copy_to_user((void __user *) ((unsigned long)arg->value),
++ &rar_ci_info[0],
++ sizeof(rar_ci_info));
++ break;
++ case LNC_VIDEO_GETPARAM_CI_INFO:
++ rar_ci_info[0] = dev_priv->ci_region_start;
++ rar_ci_info[1] = dev_priv->ci_region_size;
++ ret = copy_to_user((void __user *) ((unsigned long)arg->value),
++ &rar_ci_info[0],
++ sizeof(rar_ci_info));
++ break;
++ case LNC_VIDEO_GETPARAM_RAR_HANDLER_OFFSET:
++ ret = copy_from_user(&rar_handler,
++ (void __user *)((unsigned long)arg->arg),
++ sizeof(rar_handler));
++ if (ret)
++ break;
++
++#if defined(CONFIG_MRST_RAR_HANDLER)
++ rar_buf.info.handle = (__u32)rar_handler;
++ rar_buf.bus_address = (dma_addr_t)dev_priv->rar_region_start;
++ rar_status = 1;
++
++ rar_status = rar_handle_to_bus(&rar_buf, 1);
++ if (rar_status != 1) {
++ DRM_ERROR("MSVDX:rar_handle_to_bus failed\n");
++ ret = -1;
++ break;
++ }
++ rar_status = rar_release(&rar_buf, 1);
++ if (rar_status != 1)
++ DRM_ERROR("MSVDX:rar_release failed\n");
++
++ offset = (uint32_t) rar_buf.bus_address - dev_priv->rar_region_start;
++ PSB_DEBUG_GENERAL("MSVDX:RAR handler %p, bus address=0x%08x,"
++ "RAR region=0x%08x\n",
++ rar_handler,
++ (uint32_t)rar_buf.bus_address,
++ dev_priv->rar_region_start);
++#endif
++ ret = copy_to_user((void __user *)((unsigned long)arg->value),
++ &offset,
++ sizeof(offset));
++ break;
++ case LNC_VIDEO_FRAME_SKIP:
++ ret = lnc_video_frameskip(dev, arg->value);
++ break;
++ case LNC_VIDEO_DEVICE_INFO:
++ device_info = 0xffff & dev_priv->video_device_fuse;
++ device_info |= (0xffff & dev->pci_device) << 16;
++
++ ret = copy_to_user((void __user *) ((unsigned long)arg->value),
++ &device_info, sizeof(device_info));
++ break;
++ default:
++ ret = -EFAULT;
++ break;
++ }
++
++ if (ret)
++ return -EFAULT;
++
++ return 0;
++}
++
++inline int psb_try_power_down_msvdx(struct drm_device *dev)
++{
++ ospm_apm_power_down_msvdx(dev);
++ return 0;
++}
++
++int psb_msvdx_save_context(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)dev->dev_private;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++ int offset = 0;
++
++ msvdx_priv->msvdx_needs_reset = 1;
++
++ for (offset = 0; offset < VEC_LOCAL_MEM_BYTE_SIZE / 4; ++offset)
++ msvdx_priv->vec_local_mem_data[offset] =
++ PSB_RMSVDX32(VEC_LOCAL_MEM_OFFSET + offset * 4);
++
++ msvdx_priv->vec_local_mem_saved = 1;
++
++ return 0;
++}
++
++int psb_msvdx_restore_context(struct drm_device *dev)
++{
++ return 0;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_msvdx.h b/drivers/gpu/drm/mrst/drv/psb_msvdx.h
+new file mode 100644
+index 0000000..c067203
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_msvdx.h
+@@ -0,0 +1,610 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _PSB_MSVDX_H_
++#define _PSB_MSVDX_H_
++
++#include "psb_drv.h"
++#include "img_types.h"
++
++#if defined(CONFIG_MRST_RAR_HANDLER)
++#include "rar/memrar.h"
++#endif
++
++extern int drm_msvdx_pmpolicy;
++
++int psb_wait_for_register(struct drm_psb_private *dev_priv,
++ uint32_t offset,
++ uint32_t value,
++ uint32_t enable);
++
++IMG_BOOL psb_msvdx_interrupt(IMG_VOID *pvData);
++
++int psb_msvdx_init(struct drm_device *dev);
++int psb_msvdx_uninit(struct drm_device *dev);
++int psb_msvdx_reset(struct drm_psb_private *dev_priv);
++uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver);
++int psb_mtx_send(struct drm_psb_private *dev_priv, const void *pvMsg);
++void psb_msvdx_flush_cmd_queue(struct drm_device *dev);
++void psb_msvdx_lockup(struct drm_psb_private *dev_priv,
++ int *msvdx_lockup, int *msvdx_idle);
++int psb_setup_fw(struct drm_device *dev);
++int psb_check_msvdx_idle(struct drm_device *dev);
++int psb_wait_msvdx_idle(struct drm_device *dev);
++int psb_cmdbuf_video(struct drm_file *priv,
++ struct list_head *validate_list,
++ uint32_t fence_type,
++ struct drm_psb_cmdbuf_arg *arg,
++ struct ttm_buffer_object *cmd_buffer,
++ struct psb_ttm_fence_rep *fence_arg);
++int psb_msvdx_save_context(struct drm_device *dev);
++int psb_msvdx_restore_context(struct drm_device *dev);
++
++bool
++psb_host_second_pass(struct drm_device *dev,
++ uint32_t ui32OperatingModeCmd,
++ void *pvParamBase,
++ uint32_t PicWidthInMbs,
++ uint32_t FrameHeightInMbs,
++ uint32_t ui32DeblockSourceY,
++ uint32_t ui32DeblockSourceUV);
++
++/* Non-Optimal Invalidation is not default */
++#define MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV 2
++#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
++
++#define FW_VA_RENDER_HOST_INT 0x00004000
++#define MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION 0x00000020
++
++/* There is no work currently underway on the hardware */
++#define MSVDX_FW_STATUS_HW_IDLE 0x00000001
++#define MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE 0x00000200
++#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0 \
++ (MSVDX_DEVICE_NODE_FLAGS_MMU_NONOPT_INV | \
++ MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION | \
++ MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
++
++#define MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1 \
++ (MSVDX_DEVICE_NODE_FLAGS_MMU_HW_INVALIDATION | \
++ MSVDX_DEVICE_NODE_FLAG_BRN23154_BLOCK_ON_FE)
++
++#define POULSBO_D0 0x5
++#define POULSBO_D1 0x6
++#define PSB_REVID_OFFSET 0x8
++
++#define MTX_CODE_BASE (0x80900000)
++#define MTX_DATA_BASE (0x82880000)
++#define PC_START_ADDRESS (0x80900000)
++
++#define MTX_CORE_CODE_MEM (0x10)
++#define MTX_CORE_DATA_MEM (0x18)
++
++#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
++#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
++#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK \
++ (0x00010000)
++#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK \
++ (0x00100000)
++#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK \
++ (0x01000000)
++#define MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK \
++ (0x10000000)
++
++#define clk_enable_all \
++(MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK)
++
++#define clk_enable_minimal \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
++
++#define clk_enable_auto \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_AUTO_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_AUTO_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_AUTO_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_AUTO_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_AUTO_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
++
++#define msvdx_sw_reset_all \
++(MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK | \
++MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_FE_SOFT_RESET_MASK | \
++MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_BE_SOFT_RESET_MASK | \
++MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_MEMIF_SOFT_RESET_MASK | \
++MSVDX_CORE_CR_MSVDX_CONTROL_CR_MSVDX_VEC_RENDEC_DEC_SOFT_RESET_MASK)
++
++#define MTX_INTERNAL_REG(R_SPECIFIER , U_SPECIFIER) \
++ (((R_SPECIFIER)<<4) | (U_SPECIFIER))
++#define MTX_PC MTX_INTERNAL_REG(0, 5)
++
++#define RENDEC_A_SIZE (4 * 1024 * 1024)
++#define RENDEC_B_SIZE (1024 * 1024)
++
++#define MEMIO_READ_FIELD(vpMem, field) \
++ ((uint32_t)(((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \
++ & field##_MASK) >> field##_SHIFT))
++
++#define MEMIO_WRITE_FIELD(vpMem, field, value) \
++ (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) = \
++ ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) \
++ & (field##_TYPE)~field##_MASK) | \
++ (field##_TYPE)(((uint32_t)(value) << field##_SHIFT) & field##_MASK);
++
++#define MEMIO_WRITE_FIELD_LITE(vpMem, field, value) \
++ (*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) = \
++ ((*((field##_TYPE*)(((uint32_t)vpMem) + field##_OFFSET))) | \
++ (field##_TYPE)(((uint32_t)(value) << field##_SHIFT)));
++
++#define REGIO_READ_FIELD(reg_val, reg, field) \
++ ((reg_val & reg##_##field##_MASK) >> reg##_##field##_SHIFT)
++
++#define REGIO_WRITE_FIELD(reg_val, reg, field, value) \
++ (reg_val) = \
++ ((reg_val) & ~(reg##_##field##_MASK)) | \
++ (((value) << (reg##_##field##_SHIFT)) & (reg##_##field##_MASK));
++
++#define REGIO_WRITE_FIELD_LITE(reg_val, reg, field, value) \
++ (reg_val) = \
++ ((reg_val) | ((value) << (reg##_##field##_SHIFT)));
++
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK \
++ (0x00000001)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK \
++ (0x00000002)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK \
++ (0x00000004)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK \
++ (0x00000008)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK \
++ (0x00000010)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK \
++ (0x00000020)
++#define MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK \
++ (0x00000040)
++
++#define clk_enable_all \
++ (MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_PROCESS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDEB_ACCESS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VDMC_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ENTDEC_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_VEC_ITRANS_MAN_CLK_ENABLE_MASK | \
++MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK)
++
++#define clk_enable_minimal \
++ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_CORE_MAN_CLK_ENABLE_MASK | \
++ MSVDX_CORE_CR_MSVDX_MAN_CLK_ENABLE_CR_MTX_MAN_CLK_ENABLE_MASK
++
++/* MTX registers */
++#define MSVDX_MTX_ENABLE (0x0000)
++#define MSVDX_MTX_KICKI (0x0088)
++#define MSVDX_MTX_KICK (0x0080)
++#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST (0x00FC)
++#define MSVDX_MTX_REGISTER_READ_WRITE_DATA (0x00F8)
++#define MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER (0x0104)
++#define MSVDX_MTX_RAM_ACCESS_CONTROL (0x0108)
++#define MSVDX_MTX_RAM_ACCESS_STATUS (0x010C)
++#define MSVDX_MTX_SOFT_RESET (0x0200)
++
++/* MSVDX registers */
++#define MSVDX_CONTROL (0x0600)
++#define MSVDX_INTERRUPT_CLEAR (0x060C)
++#define MSVDX_INTERRUPT_STATUS (0x0608)
++#define MSVDX_HOST_INTERRUPT_ENABLE (0x0610)
++#define MSVDX_MMU_CONTROL0 (0x0680)
++#define MSVDX_MTX_RAM_BANK (0x06F0)
++#define MSVDX_MAN_CLK_ENABLE (0x0620)
++
++/* RENDEC registers */
++#define MSVDX_RENDEC_CONTROL0 (0x0868)
++#define MSVDX_RENDEC_CONTROL1 (0x086C)
++#define MSVDX_RENDEC_BUFFER_SIZE (0x0870)
++#define MSVDX_RENDEC_BASE_ADDR0 (0x0874)
++#define MSVDX_RENDEC_BASE_ADDR1 (0x0878)
++#define MSVDX_RENDEC_READ_DATA (0x0898)
++#define MSVDX_RENDEC_CONTEXT0 (0x0950)
++#define MSVDX_RENDEC_CONTEXT1 (0x0954)
++#define MSVDX_RENDEC_CONTEXT2 (0x0958)
++#define MSVDX_RENDEC_CONTEXT3 (0x095C)
++#define MSVDX_RENDEC_CONTEXT4 (0x0960)
++#define MSVDX_RENDEC_CONTEXT5 (0x0964)
++
++/* CMD */
++#define MSVDX_CMDS_END_SLICE_PICTURE (0x1404)
++
++/*
++ * This defines the MSVDX communication buffer
++ */
++#define MSVDX_COMMS_SIGNATURE_VALUE (0xA5A5A5A5) /*!< Signature value */
++/*!< Host buffer size (in 32-bit words) */
++#define NUM_WORDS_HOST_BUF (100)
++/*!< MTX buffer size (in 32-bit words) */
++#define NUM_WORDS_MTX_BUF (100)
++
++/* There is no work currently underway on the hardware */
++#define MSVDX_FW_STATUS_HW_IDLE 0x00000001
++
++#define MSVDX_COMMS_AREA_ADDR (0x02fe0)
++
++#define MSVDX_COMMS_OFFSET_FLAGS (MSVDX_COMMS_AREA_ADDR + 0x18)
++#define MSVDX_COMMS_MSG_COUNTER (MSVDX_COMMS_AREA_ADDR - 0x04)
++#define MSVDX_COMMS_FW_STATUS (MSVDX_COMMS_AREA_ADDR - 0x10)
++#define MSVDX_COMMS_SIGNATURE (MSVDX_COMMS_AREA_ADDR + 0x00)
++#define MSVDX_COMMS_TO_HOST_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x04)
++#define MSVDX_COMMS_TO_HOST_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x08)
++#define MSVDX_COMMS_TO_HOST_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x0C)
++#define MSVDX_COMMS_TO_MTX_BUF_SIZE (MSVDX_COMMS_AREA_ADDR + 0x10)
++#define MSVDX_COMMS_TO_MTX_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x14)
++#define MSVDX_COMMS_TO_MTX_CB_RD_INDEX (MSVDX_COMMS_AREA_ADDR + 0x18)
++#define MSVDX_COMMS_TO_MTX_WRT_INDEX (MSVDX_COMMS_AREA_ADDR + 0x1C)
++#define MSVDX_COMMS_TO_HOST_BUF (MSVDX_COMMS_AREA_ADDR + 0x20)
++#define MSVDX_COMMS_TO_MTX_BUF \
++ (MSVDX_COMMS_TO_HOST_BUF + (NUM_WORDS_HOST_BUF << 2))
++
++/*
++#define MSVDX_COMMS_AREA_END \
++ (MSVDX_COMMS_TO_MTX_BUF + (NUM_WORDS_HOST_BUF << 2))
++*/
++#define MSVDX_COMMS_AREA_END 0x03000
++
++#if (MSVDX_COMMS_AREA_END != 0x03000)
++#error
++#endif
++
++#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK (0x80000000)
++#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_SHIFT (31)
++
++#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_MASK (0x00010000)
++#define MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_RNW_SHIFT (16)
++
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_MASK (0x0FF00000)
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_SHIFT (20)
++
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_MASK (0x000FFFFC)
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_SHIFT (2)
++
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_MASK (0x00000002)
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_SHIFT (1)
++
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_MASK (0x00000001)
++#define MSVDX_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_SHIFT (0)
++
++#define MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK (0x00000001)
++#define MSVDX_MTX_SOFT_RESET_MTX_RESET_SHIFT (0)
++
++#define MSVDX_MTX_ENABLE_MTX_ENABLE_MASK (0x00000001)
++#define MSVDX_MTX_ENABLE_MTX_ENABLE_SHIFT (0)
++
++#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK (0x00000100)
++#define MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_SHIFT (8)
++
++#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_MASK (0x00000F00)
++#define MSVDX_INTERRUPT_STATUS_CR_MMU_FAULT_IRQ_SHIFT (8)
++
++#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_MASK (0x00004000)
++#define MSVDX_INTERRUPT_STATUS_CR_MTX_IRQ_SHIFT (14)
++
++#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_MASK (0x00000002)
++#define MSVDX_MMU_CONTROL0_CR_MMU_PAUSE_SHIFT (1)
++
++#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_MASK (0x000F0000)
++#define MSVDX_MTX_RAM_BANK_CR_MTX_RAM_BANK_SIZE_SHIFT (16)
++
++#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_MASK (0x0000FFFF)
++#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE0_SHIFT (0)
++
++#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_MASK (0xFFFF0000)
++#define MSVDX_RENDEC_BUFFER_SIZE_RENDEC_BUFFER_SIZE1_SHIFT (16)
++
++#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_MASK (0x000000FF)
++#define MSVDX_RENDEC_CONTROL1_RENDEC_DECODE_START_SIZE_SHIFT (0)
++
++#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_MASK (0x000C0000)
++#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_W_SHIFT (18)
++
++#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_MASK (0x00030000)
++#define MSVDX_RENDEC_CONTROL1_RENDEC_BURST_SIZE_R_SHIFT (16)
++
++#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_MASK (0x01000000)
++#define MSVDX_RENDEC_CONTROL1_RENDEC_EXTERNAL_MEMORY_SHIFT (24)
++
++#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_MASK (0x00000001)
++#define MSVDX_RENDEC_CONTROL0_RENDEC_INITIALISE_SHIFT (0)
++
++#define VEC_SHIFTREG_CONTROL_SR_MASTER_SELECT_MASK (0x00000300)
++#define VEC_SHIFTREG_CONTROL_SR_MASTER_SELECT_SHIFT (8)
++
++/* Start of parser specific Host->MTX messages. */
++#define FWRK_MSGID_START_PSR_HOSTMTX_MSG (0x80)
++
++/* Start of parser specific MTX->Host messages. */
++#define FWRK_MSGID_START_PSR_MTXHOST_MSG (0xC0)
++
++#define FWRK_MSGID_PADDING (0)
++
++#define FWRK_GENMSG_SIZE_TYPE uint8_t
++#define FWRK_GENMSG_SIZE_MASK (0xFF)
++#define FWRK_GENMSG_SIZE_SHIFT (0)
++#define FWRK_GENMSG_SIZE_OFFSET (0x0000)
++#define FWRK_GENMSG_ID_TYPE uint8_t
++#define FWRK_GENMSG_ID_MASK (0xFF)
++#define FWRK_GENMSG_ID_SHIFT (0)
++#define FWRK_GENMSG_ID_OFFSET (0x0001)
++#define FWRK_PADMSG_SIZE (2)
++
++/* Deblock CMD_ID */
++#define MSVDX_DEBLOCK_REG_SET 0x10000000
++#define MSVDX_DEBLOCK_REG_GET 0x20000000
++#define MSVDX_DEBLOCK_REG_POLLn 0x30000000
++#define MSVDX_DEBLOCK_REG_POLLx 0x40000000
++
++/* vec local MEM save/restore */
++#define VEC_LOCAL_MEM_BYTE_SIZE (4 * 1024)
++#define VEC_LOCAL_MEM_OFFSET 0x2000
++
++/* This type defines the framework specified message ids */
++enum {
++ /* ! Sent by the DXVA driver on the host to the mtx firmware.
++ */
++ VA_MSGID_INIT = FWRK_MSGID_START_PSR_HOSTMTX_MSG,
++ VA_MSGID_RENDER,
++ VA_MSGID_DEBLOCK,
++ VA_MSGID_BUBBLE,
++
++ /* Test Messages */
++ VA_MSGID_TEST1,
++ VA_MSGID_TEST2,
++
++ /*! Sent by the mtx firmware to itself.
++ */
++ VA_MSGID_RENDER_MC_INTERRUPT,
++
++ /*! Sent by the DXVA firmware on the MTX to the host.
++ */
++ VA_MSGID_CMD_COMPLETED = FWRK_MSGID_START_PSR_MTXHOST_MSG,
++ VA_MSGID_CMD_COMPLETED_BATCH,
++ VA_MSGID_DEBLOCK_REQUIRED,
++ VA_MSGID_TEST_RESPONCE,
++ VA_MSGID_ACK,
++
++ VA_MSGID_CMD_FAILED,
++ VA_MSGID_CMD_UNSUPPORTED,
++ VA_MSGID_CMD_HW_PANIC,
++};
++
++/* Deblock parameters */
++struct DEBLOCKPARAMS {
++ uint32_t handle; /* struct ttm_buffer_object * of REGIO */
++ uint32_t buffer_size;
++ uint32_t ctxid;
++
++ uint32_t *pPicparams;
++ struct ttm_bo_kmap_obj *regio_kmap; /* virtual of regio */
++ uint32_t pad[3];
++};
++
++struct psb_msvdx_deblock_queue {
++
++ struct list_head head;
++ struct DEBLOCKPARAMS dbParams;
++};
++
++/* MSVDX private structure */
++struct msvdx_private {
++ int msvdx_needs_reset;
++
++ unsigned int pmstate;
++
++ struct sysfs_dirent *sysfs_pmstate;
++
++ uint32_t msvdx_current_sequence;
++ uint32_t msvdx_last_sequence;
++
++ /*
++ *MSVDX Rendec Memory
++ */
++ struct ttm_buffer_object *ccb0;
++ uint32_t base_addr0;
++ struct ttm_buffer_object *ccb1;
++ uint32_t base_addr1;
++
++ /*
++ *msvdx command queue
++ */
++ spinlock_t msvdx_lock;
++ struct mutex msvdx_mutex;
++ struct list_head msvdx_queue;
++ int msvdx_busy;
++ int msvdx_fw_loaded;
++ void *msvdx_fw;
++ int msvdx_fw_size;
++
++ struct list_head deblock_queue; /* deblock parameter list */
++
++ uint32_t msvdx_hw_busy;
++
++ uint32_t *vec_local_mem_data;
++ uint32_t vec_local_mem_size;
++ uint32_t vec_local_mem_saved;
++};
++
++/* MSVDX Firmware interface */
++#define FW_VA_INIT_SIZE (8)
++#define FW_VA_DEBUG_TEST2_SIZE (4)
++
++/* FW_VA_DEBUG_TEST2 MSG_SIZE */
++#define FW_VA_DEBUG_TEST2_MSG_SIZE_TYPE uint8_t
++#define FW_VA_DEBUG_TEST2_MSG_SIZE_MASK (0xFF)
++#define FW_VA_DEBUG_TEST2_MSG_SIZE_OFFSET (0x0000)
++#define FW_VA_DEBUG_TEST2_MSG_SIZE_SHIFT (0)
++
++/* FW_VA_DEBUG_TEST2 ID */
++#define FW_VA_DEBUG_TEST2_ID_TYPE uint8_t
++#define FW_VA_DEBUG_TEST2_ID_MASK (0xFF)
++#define FW_VA_DEBUG_TEST2_ID_OFFSET (0x0001)
++#define FW_VA_DEBUG_TEST2_ID_SHIFT (0)
++
++/* FW_VA_CMD_FAILED FENCE_VALUE */
++#define FW_VA_CMD_FAILED_FENCE_VALUE_TYPE uint32_t
++#define FW_VA_CMD_FAILED_FENCE_VALUE_MASK (0xFFFFFFFF)
++#define FW_VA_CMD_FAILED_FENCE_VALUE_OFFSET (0x0004)
++#define FW_VA_CMD_FAILED_FENCE_VALUE_SHIFT (0)
++
++/* FW_VA_CMD_FAILED IRQSTATUS */
++#define FW_VA_CMD_FAILED_IRQSTATUS_TYPE uint32_t
++#define FW_VA_CMD_FAILED_IRQSTATUS_MASK (0xFFFFFFFF)
++#define FW_VA_CMD_FAILED_IRQSTATUS_OFFSET (0x0008)
++#define FW_VA_CMD_FAILED_IRQSTATUS_SHIFT (0)
++
++/* FW_VA_CMD_COMPLETED FENCE_VALUE */
++#define FW_VA_CMD_COMPLETED_FENCE_VALUE_TYPE uint32_t
++#define FW_VA_CMD_COMPLETED_FENCE_VALUE_MASK (0xFFFFFFFF)
++#define FW_VA_CMD_COMPLETED_FENCE_VALUE_OFFSET (0x0004)
++#define FW_VA_CMD_COMPLETED_FENCE_VALUE_SHIFT (0)
++
++/* FW_VA_CMD_COMPLETED FLAGS */
++#define FW_VA_CMD_COMPLETED_FLAGS_ALIGNMENT (4)
++#define FW_VA_CMD_COMPLETED_FLAGS_TYPE uint32_t
++#define FW_VA_CMD_COMPLETED_FLAGS_MASK (0xFFFFFFFF)
++#define FW_VA_CMD_COMPLETED_FLAGS_LSBMASK (0xFFFFFFFF)
++#define FW_VA_CMD_COMPLETED_FLAGS_OFFSET (0x0008)
++#define FW_VA_CMD_COMPLETED_FLAGS_SHIFT (0)
++
++/* FW_VA_CMD_COMPLETED NO_TICKS */
++#define FW_VA_CMD_COMPLETED_NO_TICKS_TYPE uint16_t
++#define FW_VA_CMD_COMPLETED_NO_TICKS_MASK (0xFFFF)
++#define FW_VA_CMD_COMPLETED_NO_TICKS_OFFSET (0x0002)
++#define FW_VA_CMD_COMPLETED_NO_TICKS_SHIFT (0)
++
++/* FW_VA_DEBLOCK_REQUIRED CONTEXT */
++#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_TYPE uint32_t
++#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_MASK (0xFFFFFFFF)
++#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_OFFSET (0x0004)
++#define FW_VA_DEBLOCK_REQUIRED_CONTEXT_SHIFT (0)
++
++/* FW_VA_INIT GLOBAL_PTD */
++#define FW_VA_INIT_GLOBAL_PTD_TYPE uint32_t
++#define FW_VA_INIT_GLOBAL_PTD_MASK (0xFFFFFFFF)
++#define FW_VA_INIT_GLOBAL_PTD_OFFSET (0x0004)
++#define FW_VA_INIT_GLOBAL_PTD_SHIFT (0)
++
++/* FW_VA_RENDER FENCE_VALUE */
++#define FW_VA_RENDER_FENCE_VALUE_TYPE uint32_t
++#define FW_VA_RENDER_FENCE_VALUE_MASK (0xFFFFFFFF)
++#define FW_VA_RENDER_FENCE_VALUE_OFFSET (0x0010)
++#define FW_VA_RENDER_FENCE_VALUE_SHIFT (0)
++
++/* FW_VA_RENDER MMUPTD */
++#define FW_VA_RENDER_MMUPTD_TYPE uint32_t
++#define FW_VA_RENDER_MMUPTD_MASK (0xFFFFFFFF)
++#define FW_VA_RENDER_MMUPTD_OFFSET (0x0004)
++#define FW_VA_RENDER_MMUPTD_SHIFT (0)
++
++/* FW_VA_RENDER BUFFER_ADDRESS */
++#define FW_VA_RENDER_BUFFER_ADDRESS_TYPE uint32_t
++#define FW_VA_RENDER_BUFFER_ADDRESS_MASK (0xFFFFFFFF)
++#define FW_VA_RENDER_BUFFER_ADDRESS_OFFSET (0x0008)
++#define FW_VA_RENDER_BUFFER_ADDRESS_SHIFT (0)
++
++/* FW_VA_RENDER BUFFER_SIZE */
++#define FW_VA_RENDER_BUFFER_SIZE_TYPE uint16_t
++#define FW_VA_RENDER_BUFFER_SIZE_MASK (0x0FFF)
++#define FW_VA_RENDER_BUFFER_SIZE_OFFSET (0x0002)
++#define FW_VA_RENDER_BUFFER_SIZE_SHIFT (0)
++
++ /* FW_DXVA_DEBLOCK MSG_SIZE */
++#define FW_DXVA_DEBLOCK_MSG_SIZE_ALIGNMENT (1)
++#define FW_DXVA_DEBLOCK_MSG_SIZE_TYPE uint8_t
++#define FW_DXVA_DEBLOCK_MSG_SIZE_MASK (0xFF)
++#define FW_DXVA_DEBLOCK_MSG_SIZE_LSBMASK (0xFF)
++#define FW_DXVA_DEBLOCK_MSG_SIZE_OFFSET (0x0000)
++#define FW_DXVA_DEBLOCK_MSG_SIZE_SHIFT (0)
++
++/* FW_DXVA_DEBLOCK ID */
++#define FW_DXVA_DEBLOCK_ID_ALIGNMENT (1)
++#define FW_DXVA_DEBLOCK_ID_TYPE uint8_t
++#define FW_DXVA_DEBLOCK_ID_MASK (0xFF)
++#define FW_DXVA_DEBLOCK_ID_LSBMASK (0xFF)
++#define FW_DXVA_DEBLOCK_ID_OFFSET (0x0001)
++#define FW_DXVA_DEBLOCK_ID_SHIFT (0)
++
++/* FW_DXVA_DEBLOCK FENCE_VALUE */
++#define FW_DXVA_DEBLOCK_FENCE_VALUE_ALIGNMENT (4)
++#define FW_DXVA_DEBLOCK_FENCE_VALUE_TYPE uint32_t
++#define FW_DXVA_DEBLOCK_FENCE_VALUE_MASK (0xFFFFFFFF)
++#define FW_DXVA_DEBLOCK_FENCE_VALUE_LSBMASK (0xFFFFFFFF)
++#define FW_DXVA_DEBLOCK_FENCE_VALUE_OFFSET (0x0008)
++#define FW_DXVA_DEBLOCK_FENCE_VALUE_SHIFT (0)
++
++/* FW_DXVA_DEBLOCK MMUPTD */
++#define FW_DXVA_DEBLOCK_MMUPTD_ALIGNMENT (4)
++#define FW_DXVA_DEBLOCK_MMUPTD_TYPE uint32_t
++#define FW_DXVA_DEBLOCK_MMUPTD_MASK (0xFFFFFFFF)
++#define FW_DXVA_DEBLOCK_MMUPTD_LSBMASK (0xFFFFFFFF)
++#define FW_DXVA_DEBLOCK_MMUPTD_OFFSET (0x000C)
++#define FW_DXVA_DEBLOCK_MMUPTD_SHIFT (0)
++
++
++static inline void psb_msvdx_clearirq(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ unsigned long mtx_int = 0;
++
++ PSB_DEBUG_IRQ("MSVDX: clear IRQ\n");
++
++ /* Clear MTX interrupt */
++ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
++ 1);
++ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
++}
++
++
++static inline void psb_msvdx_disableirq(struct drm_device *dev)
++{
++ /* nothing */
++}
++
++
++static inline void psb_msvdx_enableirq(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ unsigned long enables = 0;
++
++ PSB_DEBUG_IRQ("MSVDX: enable MSVDX MTX IRQ\n");
++ REGIO_WRITE_FIELD_LITE(enables, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
++ 1);
++ PSB_WMSVDX32(enables, MSVDX_HOST_INTERRUPT_ENABLE);
++}
++
++#define MSVDX_NEW_PMSTATE(drm_dev, msvdx_priv, new_state) \
++do { \
++ msvdx_priv->pmstate = new_state; \
++ sysfs_notify_dirent(msvdx_priv->sysfs_pmstate); \
++ PSB_DEBUG_PM("MSVDX: %s\n", \
++ (new_state == PSB_PMSTATE_POWERUP) ? "powerup": "powerdown"); \
++} while (0)
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_msvdxinit.c b/drivers/gpu/drm/mrst/drv/psb_msvdxinit.c
+new file mode 100644
+index 0000000..a543778
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_msvdxinit.c
+@@ -0,0 +1,770 @@
++/**************************************************************************
++ * psb_msvdxinit.c
++ * MSVDX initialization and mtx-firmware upload
++ *
++ * Copyright (c) 2007 Intel Corporation, Hillsboro, OR, USA
++ * Copyright (c) Imagination Technologies Limited, UK
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include "psb_drv.h"
++#include "psb_msvdx.h"
++#include <linux/firmware.h>
++
++#define MSVDX_REG (dev_priv->msvdx_reg)
++uint8_t psb_rev_id;
++/*MSVDX FW header*/
++struct msvdx_fw {
++ uint32_t ver;
++ uint32_t text_size;
++ uint32_t data_size;
++ uint32_t data_location;
++};
++
++int psb_wait_for_register(struct drm_psb_private *dev_priv,
++ uint32_t offset, uint32_t value, uint32_t enable)
++{
++ uint32_t tmp;
++ uint32_t poll_cnt = 10000;
++ while (poll_cnt) {
++ tmp = PSB_RMSVDX32(offset);
++ if (value == (tmp & enable)) /* All the bits are reset */
++ return 0; /* So exit */
++
++ /* Wait a bit */
++ DRM_UDELAY(1000);
++ poll_cnt--;
++ }
++ DRM_ERROR("MSVDX: Timeout while waiting for register %08x:"
++ " expecting %08x (mask %08x), got %08x\n",
++ offset, value, enable, tmp);
++
++ return 1;
++}
++
++int psb_poll_mtx_irq(struct drm_psb_private *dev_priv)
++{
++ int ret = 0;
++ uint32_t mtx_int = 0;
++
++ REGIO_WRITE_FIELD_LITE(mtx_int, MSVDX_INTERRUPT_STATUS, CR_MTX_IRQ,
++ 1);
++
++ ret = psb_wait_for_register(dev_priv, MSVDX_INTERRUPT_STATUS,
++ /* Required value */
++ mtx_int,
++ /* Enabled bits */
++ mtx_int);
++
++ if (ret) {
++ DRM_ERROR("MSVDX: Error Mtx did not return"
++ " int within a resonable time\n");
++ return ret;
++ }
++
++ PSB_DEBUG_IRQ("MSVDX: Got MTX Int\n");
++
++ /* Got it so clear the bit */
++ PSB_WMSVDX32(mtx_int, MSVDX_INTERRUPT_CLEAR);
++
++ return ret;
++}
++
++void psb_write_mtx_core_reg(struct drm_psb_private *dev_priv,
++ const uint32_t core_reg, const uint32_t val)
++{
++ uint32_t reg = 0;
++
++ /* Put data in MTX_RW_DATA */
++ PSB_WMSVDX32(val, MSVDX_MTX_REGISTER_READ_WRITE_DATA);
++
++ /* DREADY is set to 0 and request a write */
++ reg = core_reg;
++ REGIO_WRITE_FIELD_LITE(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
++ MTX_RNW, 0);
++ REGIO_WRITE_FIELD_LITE(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
++ MTX_DREADY, 0);
++ PSB_WMSVDX32(reg, MSVDX_MTX_REGISTER_READ_WRITE_REQUEST);
++
++ psb_wait_for_register(dev_priv,
++ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST,
++ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK,
++ MSVDX_MTX_REGISTER_READ_WRITE_REQUEST_MTX_DREADY_MASK);
++}
++
++void psb_upload_fw(struct drm_psb_private *dev_priv,
++ const uint32_t data_mem, uint32_t ram_bank_size,
++ uint32_t address, const unsigned int words,
++ const uint32_t * const data)
++{
++ uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0;
++ uint32_t access_ctrl;
++
++ /* Save the access control register... */
++ access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL);
++
++ /* Wait for MCMSTAT to become be idle 1 */
++ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
++ 1, /* Required Value */
++ 0xffffffff /* Enables */);
++
++ for (loop = 0; loop < words; loop++) {
++ ram_id = data_mem + (address / ram_bank_size);
++ if (ram_id != cur_bank) {
++ addr = address >> 2;
++ ctrl = 0;
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCMID, ram_id);
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCM_ADDR, addr);
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCMAI, 1);
++ PSB_WMSVDX32(ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
++ cur_bank = ram_id;
++ }
++ address += 4;
++
++ PSB_WMSVDX32(data[loop],
++ MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
++
++ /* Wait for MCMSTAT to become be idle 1 */
++ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
++ 1, /* Required Value */
++ 0xffffffff /* Enables */);
++ }
++ PSB_DEBUG_GENERAL("MSVDX: Upload done\n");
++
++ /* Restore the access control register... */
++ PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
++}
++
++static int psb_verify_fw(struct drm_psb_private *dev_priv,
++ const uint32_t ram_bank_size,
++ const uint32_t data_mem, uint32_t address,
++ const uint32_t words, const uint32_t * const data)
++{
++ uint32_t loop, ctrl, ram_id, addr, cur_bank = (uint32_t) ~0;
++ uint32_t access_ctrl;
++ int ret = 0;
++
++ /* Save the access control register... */
++ access_ctrl = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_CONTROL);
++
++ /* Wait for MCMSTAT to become be idle 1 */
++ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
++ 1, /* Required Value */
++ 0xffffffff /* Enables */);
++
++ for (loop = 0; loop < words; loop++) {
++ uint32_t tmp;
++ ram_id = data_mem + (address / ram_bank_size);
++
++ if (ram_id != cur_bank) {
++ addr = address >> 2;
++ ctrl = 0;
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCMID, ram_id);
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCM_ADDR, addr);
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCMAI, 1);
++ REGIO_WRITE_FIELD_LITE(ctrl,
++ MSVDX_MTX_RAM_ACCESS_CONTROL,
++ MTX_MCMR, 1);
++
++ PSB_WMSVDX32(ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
++
++ cur_bank = ram_id;
++ }
++ address += 4;
++
++ /* Wait for MCMSTAT to become be idle 1 */
++ psb_wait_for_register(dev_priv, MSVDX_MTX_RAM_ACCESS_STATUS,
++ 1, /* Required Value */
++ 0xffffffff /* Enables */);
++
++ tmp = PSB_RMSVDX32(MSVDX_MTX_RAM_ACCESS_DATA_TRANSFER);
++ if (data[loop] != tmp) {
++ DRM_ERROR("psb: Firmware validation fails"
++ " at index=%08x\n", loop);
++ ret = 1;
++ break;
++ }
++ }
++
++ /* Restore the access control register... */
++ PSB_WMSVDX32(access_ctrl, MSVDX_MTX_RAM_ACCESS_CONTROL);
++
++ return ret;
++}
++
++static uint32_t *msvdx_get_fw(struct drm_device *dev,
++ const struct firmware **raw, uint8_t *name)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int rc, fw_size;
++ int *ptr = NULL;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ rc = request_firmware(raw, name, &dev->pdev->dev);
++ if (rc < 0) {
++ DRM_ERROR("MSVDX: %s request_firmware failed: Reason %d\n",
++ name, rc);
++ return NULL;
++ }
++
++ if ((*raw)->size < sizeof(struct msvdx_fw)) {
++ DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
++ name, (*raw)->size);
++ return NULL;
++ }
++
++ ptr = (int *) ((*raw))->data;
++
++ if (!ptr) {
++ DRM_ERROR("MSVDX: Failed to load %s\n", name);
++ return NULL;
++ }
++
++ /* another sanity check... */
++ fw_size = sizeof(struct msvdx_fw) +
++ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->text_size +
++ sizeof(uint32_t) * ((struct msvdx_fw *) ptr)->data_size;
++ if ((*raw)->size != fw_size) {
++ DRM_ERROR("MSVDX: %s is is not correct size(%zd)\n",
++ name, (*raw)->size);
++ return NULL;
++ }
++ msvdx_priv->msvdx_fw = kzalloc(fw_size, GFP_KERNEL);
++ if (msvdx_priv->msvdx_fw == NULL)
++ DRM_ERROR("MSVDX: allocate FW buffer failed\n");
++ else {
++ memcpy(msvdx_priv->msvdx_fw, ptr, fw_size);
++ msvdx_priv->msvdx_fw_size = fw_size;
++ }
++
++ PSB_DEBUG_GENERAL("MSVDX: releasing firmware resouces\n");
++ release_firmware(*raw);
++
++ return msvdx_priv->msvdx_fw;
++}
++
++int psb_setup_fw(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int ret = 0;
++
++ uint32_t ram_bank_size;
++ struct msvdx_fw *fw;
++ uint32_t *fw_ptr = NULL;
++ uint32_t *text_ptr = NULL;
++ uint32_t *data_ptr = NULL;
++ const struct firmware *raw = NULL;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ /* todo : Assert the clock is on - if not turn it on to upload code */
++ PSB_DEBUG_GENERAL("MSVDX: psb_setup_fw\n");
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++ /* Reset MTX */
++ PSB_WMSVDX32(MSVDX_MTX_SOFT_RESET_MTX_RESET_MASK,
++ MSVDX_MTX_SOFT_RESET);
++
++ /* Initialses Communication controll area to 0 */
++/*
++ if (psb_rev_id >= POULSBO_D1) {
++ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D1"
++ " or later revision.\n");
++ PSB_WMSVDX32(MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D1,
++ MSVDX_COMMS_OFFSET_FLAGS);
++ } else {
++ PSB_DEBUG_GENERAL("MSVDX: Detected Poulsbo D0"
++ " or earlier revision.\n");
++ PSB_WMSVDX32(MSVDX_DEVICE_NODE_FLAGS_DEFAULT_D0,
++ MSVDX_COMMS_OFFSET_FLAGS);
++ }
++*/
++
++ PSB_WMSVDX32(0, MSVDX_COMMS_MSG_COUNTER);
++ PSB_WMSVDX32(0, MSVDX_COMMS_SIGNATURE);
++ PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_RD_INDEX);
++ PSB_WMSVDX32(0, MSVDX_COMMS_TO_HOST_WRT_INDEX);
++ PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_RD_INDEX);
++ PSB_WMSVDX32(0, MSVDX_COMMS_TO_MTX_WRT_INDEX);
++ PSB_WMSVDX32(0, MSVDX_COMMS_FW_STATUS);
++ PSB_WMSVDX32(0, MSVDX_COMMS_OFFSET_FLAGS);
++ PSB_WMSVDX32(0, MSVDX_COMMS_SIGNATURE);
++ /* read register bank size */
++ {
++ uint32_t bank_size, reg;
++ reg = PSB_RMSVDX32(MSVDX_MTX_RAM_BANK);
++ bank_size =
++ REGIO_READ_FIELD(reg, MSVDX_MTX_RAM_BANK,
++ CR_MTX_RAM_BANK_SIZE);
++ ram_bank_size = (uint32_t) (1 << (bank_size + 2));
++ }
++
++ PSB_DEBUG_GENERAL("MSVDX: RAM bank size = %d bytes\n",
++ ram_bank_size);
++
++ /* if FW already loaded from storage */
++ if (msvdx_priv->msvdx_fw)
++ fw_ptr = msvdx_priv->msvdx_fw;
++ else {
++ PSB_DEBUG_GENERAL("MSVDX:load msvdx_fw.bin by udevd\n");
++ fw_ptr = msvdx_get_fw(dev, &raw, "msvdx_fw.bin");
++ }
++
++ if (!fw_ptr) {
++ DRM_ERROR("MSVDX:load msvdx_fw.bin failed,is udevd running?\n");
++ ret = 1;
++ goto out;
++ }
++
++ fw = (struct msvdx_fw *) fw_ptr;
++ if (fw->ver != 0x02) {
++ DRM_ERROR("psb: msvdx_fw.bin firmware version mismatch,"
++ "got version=%02x expected version=%02x\n",
++ fw->ver, 0x02);
++ ret = 1;
++ goto out;
++ }
++
++ text_ptr =
++ (uint32_t *) ((uint8_t *) fw_ptr + sizeof(struct msvdx_fw));
++ data_ptr = text_ptr + fw->text_size;
++
++ if (fw->text_size == 2858)
++ PSB_DEBUG_GENERAL(
++ "MSVDX: FW ver 1.00.10.0187 of SliceSwitch variant\n");
++ else if (fw->text_size == 3021)
++ PSB_DEBUG_GENERAL(
++ "MSVDX: FW ver 1.00.10.0187 of FrameSwitch variant\n");
++ else if (fw->text_size == 2841)
++ PSB_DEBUG_GENERAL("MSVDX: FW ver 1.00.10.0788\n");
++ else
++ PSB_DEBUG_GENERAL("MSVDX: FW ver unknown\n");
++
++
++ PSB_DEBUG_GENERAL("MSVDX: Retrieved pointers for firmware\n");
++ PSB_DEBUG_GENERAL("MSVDX: text_size: %d\n", fw->text_size);
++ PSB_DEBUG_GENERAL("MSVDX: data_size: %d\n", fw->data_size);
++ PSB_DEBUG_GENERAL("MSVDX: data_location: 0x%x\n",
++ fw->data_location);
++ PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of text: 0x%x\n",
++ *text_ptr);
++ PSB_DEBUG_GENERAL("MSVDX: First 4 bytes of data: 0x%x\n",
++ *data_ptr);
++
++ PSB_DEBUG_GENERAL("MSVDX: Uploading firmware\n");
++ psb_upload_fw(dev_priv, MTX_CORE_CODE_MEM, ram_bank_size,
++ PC_START_ADDRESS - MTX_CODE_BASE, fw->text_size,
++ text_ptr);
++ psb_upload_fw(dev_priv, MTX_CORE_DATA_MEM, ram_bank_size,
++ fw->data_location - MTX_DATA_BASE, fw->data_size,
++ data_ptr);
++
++#if 0
++ /* todo : Verify code upload possibly only in debug */
++ ret = psb_verify_fw(dev_priv, ram_bank_size,
++ MTX_CORE_CODE_MEM,
++ PC_START_ADDRESS - MTX_CODE_BASE,
++ fw->text_size, text_ptr);
++ if (ret) {
++ /* Firmware code upload failed */
++ ret = 1;
++ goto out;
++ }
++
++ ret = psb_verify_fw(dev_priv, ram_bank_size, MTX_CORE_DATA_MEM,
++ fw->data_location - MTX_DATA_BASE,
++ fw->data_size, data_ptr);
++ if (ret) {
++ /* Firmware data upload failed */
++ ret = 1;
++ goto out;
++ }
++#else
++ (void)psb_verify_fw;
++#endif
++ /* -- Set starting PC address */
++ psb_write_mtx_core_reg(dev_priv, MTX_PC, PC_START_ADDRESS);
++
++ /* -- Turn on the thread */
++ PSB_WMSVDX32(MSVDX_MTX_ENABLE_MTX_ENABLE_MASK, MSVDX_MTX_ENABLE);
++
++ /* Wait for the signature value to be written back */
++ ret = psb_wait_for_register(dev_priv, MSVDX_COMMS_SIGNATURE,
++ MSVDX_COMMS_SIGNATURE_VALUE, /*Required value*/
++ 0xffffffff /* Enabled bits */);
++ if (ret) {
++ DRM_ERROR("MSVDX: firmware fails to initialize.\n");
++ goto out;
++ }
++
++ PSB_DEBUG_GENERAL("MSVDX: MTX Initial indications OK\n");
++ PSB_DEBUG_GENERAL("MSVDX: MSVDX_COMMS_AREA_ADDR = %08x\n",
++ MSVDX_COMMS_AREA_ADDR);
++#if 0
++
++ /* Send test message */
++ {
++ uint32_t msg_buf[FW_VA_DEBUG_TEST2_SIZE >> 2];
++
++ MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_MSG_SIZE,
++ FW_VA_DEBUG_TEST2_SIZE);
++ MEMIO_WRITE_FIELD(msg_buf, FW_VA_DEBUG_TEST2_ID,
++ VA_MSGID_TEST2);
++
++ ret = psb_mtx_send(dev_priv, msg_buf);
++ if (ret) {
++ DRM_ERROR("psb: MSVDX sending fails.\n");
++ goto out;
++ }
++
++ /* Wait for Mtx to ack this message */
++ psb_poll_mtx_irq(dev_priv);
++
++ }
++#endif
++out:
++
++ return ret;
++}
++
++
++static void psb_free_ccb(struct ttm_buffer_object **ccb)
++{
++ ttm_bo_unref(ccb);
++ *ccb = NULL;
++}
++
++/**
++ * Reset chip and disable interrupts.
++ * Return 0 success, 1 failure
++ */
++int psb_msvdx_reset(struct drm_psb_private *dev_priv)
++{
++ int ret = 0;
++
++ /* Issue software reset */
++ PSB_WMSVDX32(msvdx_sw_reset_all, MSVDX_CONTROL);
++
++ ret = psb_wait_for_register(dev_priv, MSVDX_CONTROL, 0,
++ MSVDX_CONTROL_CR_MSVDX_SOFT_RESET_MASK);
++
++ if (!ret) {
++ /* Clear interrupt enabled flag */
++ PSB_WMSVDX32(0, MSVDX_HOST_INTERRUPT_ENABLE);
++
++ /* Clear any pending interrupt flags */
++ PSB_WMSVDX32(0xFFFFFFFF, MSVDX_INTERRUPT_CLEAR);
++ }
++
++ /* mutex_destroy(&msvdx_priv->msvdx_mutex); */
++
++ return ret;
++}
++
++static int psb_allocate_ccb(struct drm_device *dev,
++ struct ttm_buffer_object **ccb,
++ uint32_t *base_addr, unsigned long size)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_bo_device *bdev = &dev_priv->bdev;
++ int ret;
++ struct ttm_bo_kmap_obj tmp_kmap;
++ bool is_iomem;
++
++ PSB_DEBUG_INIT("MSVDX: allocate CCB\n");
++
++ ret = ttm_buffer_object_create(bdev, size,
++ ttm_bo_type_kernel,
++ DRM_PSB_FLAG_MEM_MMU |
++ TTM_PL_FLAG_NO_EVICT, 0, 0, 0,
++ NULL, ccb);
++ if (ret) {
++ DRM_ERROR("MSVDX:failed to allocate CCB.\n");
++ *ccb = NULL;
++ return 1;
++ }
++
++ ret = ttm_bo_kmap(*ccb, 0, (*ccb)->num_pages, &tmp_kmap);
++ if (ret) {
++ PSB_DEBUG_GENERAL("ttm_bo_kmap failed ret: %d\n", ret);
++ ttm_bo_unref(ccb);
++ *ccb = NULL;
++ return 1;
++ }
++/*
++ memset(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), 0,
++ RENDEC_A_SIZE);
++*/
++ memset(ttm_kmap_obj_virtual(&tmp_kmap, &is_iomem), 0,
++ size);
++ ttm_bo_kunmap(&tmp_kmap);
++
++ *base_addr = (*ccb)->offset;
++ return 0;
++}
++
++static ssize_t psb_msvdx_pmstate_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct drm_device *drm_dev = dev_get_drvdata(dev);
++ struct drm_psb_private *dev_priv;
++ struct msvdx_private *msvdx_priv;
++ unsigned int pmstate;
++ unsigned long flags;
++ int ret = -EINVAL;
++
++ if (drm_dev == NULL)
++ return 0;
++
++ dev_priv = drm_dev->dev_private;
++ msvdx_priv = dev_priv->msvdx_private;
++ pmstate = msvdx_priv->pmstate;
++
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, flags);
++ ret = sprintf(buf, "%s\n",
++ (pmstate == PSB_PMSTATE_POWERUP) ? "powerup" : "powerdown");
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, flags);
++
++ return ret;
++}
++
++static DEVICE_ATTR(msvdx_pmstate, 0444, psb_msvdx_pmstate_show, NULL);
++
++int psb_msvdx_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ /* uint32_t clk_gate_ctrl = clk_enable_all; */
++ uint32_t cmd;
++ int ret;
++ struct msvdx_private *msvdx_priv;
++
++ if (!dev_priv->msvdx_private) {
++ msvdx_priv = kmalloc(sizeof(struct msvdx_private), GFP_KERNEL);
++ if (msvdx_priv == NULL)
++ goto err_exit;
++
++ dev_priv->msvdx_private = msvdx_priv;
++ memset(msvdx_priv, 0, sizeof(struct msvdx_private));
++
++ /* get device --> drm_device --> drm_psb_private --> msvdx_priv
++ * for psb_msvdx_pmstate_show: msvdx_pmpolicy
++ * if not pci_set_drvdata, can't get drm_device from device
++ */
++ /* pci_set_drvdata(dev->pdev, dev); */
++ if (device_create_file(&dev->pdev->dev,
++ &dev_attr_msvdx_pmstate))
++ DRM_ERROR("MSVDX: could not create sysfs file\n");
++ msvdx_priv->sysfs_pmstate = sysfs_get_dirent(
++ dev->pdev->dev.kobj.sd, "msvdx_pmstate");
++ }
++
++ msvdx_priv = dev_priv->msvdx_private;
++ if (!msvdx_priv->ccb0) { /* one for the first time */
++ /* Initialize comand msvdx queueing */
++ INIT_LIST_HEAD(&msvdx_priv->msvdx_queue);
++ INIT_LIST_HEAD(&msvdx_priv->deblock_queue);
++ mutex_init(&msvdx_priv->msvdx_mutex);
++ spin_lock_init(&msvdx_priv->msvdx_lock);
++ /*figure out the stepping */
++ pci_read_config_byte(dev->pdev, PSB_REVID_OFFSET, &psb_rev_id);
++ }
++
++ msvdx_priv->vec_local_mem_size = VEC_LOCAL_MEM_BYTE_SIZE;
++ if (!msvdx_priv->vec_local_mem_data) {
++ msvdx_priv->vec_local_mem_data =
++ kmalloc(msvdx_priv->vec_local_mem_size, GFP_KERNEL);
++ memset(msvdx_priv->vec_local_mem_data, 0, msvdx_priv->vec_local_mem_size);
++ }
++
++ msvdx_priv->msvdx_busy = 0;
++ msvdx_priv->msvdx_hw_busy = 1;
++
++ /* Enable Clocks */
++ PSB_DEBUG_GENERAL("Enabling clocks\n");
++ PSB_WMSVDX32(clk_enable_all, MSVDX_MAN_CLK_ENABLE);
++
++
++ /* Enable MMU by removing all bypass bits */
++ PSB_WMSVDX32(0, MSVDX_MMU_CONTROL0);
++
++ /* move firmware loading to the place receiving first command buffer */
++
++ PSB_DEBUG_GENERAL("MSVDX: Setting up RENDEC,allocate CCB 0/1\n");
++ /* Allocate device virtual memory as required by rendec.... */
++ if (!msvdx_priv->ccb0) {
++ ret = psb_allocate_ccb(dev, &msvdx_priv->ccb0,
++ &msvdx_priv->base_addr0,
++ RENDEC_A_SIZE);
++ if (ret) {
++ PSB_DEBUG_GENERAL("Allocate Rendec A fail\n");
++ goto err_exit;
++ }
++ }
++
++ if (!msvdx_priv->ccb1) {
++ ret = psb_allocate_ccb(dev, &msvdx_priv->ccb1,
++ &msvdx_priv->base_addr1,
++ RENDEC_B_SIZE);
++ if (ret)
++ goto err_exit;
++ }
++
++
++ PSB_DEBUG_GENERAL("MSVDX: RENDEC A: %08x RENDEC B: %08x\n",
++ msvdx_priv->base_addr0, msvdx_priv->base_addr1);
++
++ PSB_WMSVDX32(msvdx_priv->base_addr0, MSVDX_RENDEC_BASE_ADDR0);
++ PSB_WMSVDX32(msvdx_priv->base_addr1, MSVDX_RENDEC_BASE_ADDR1);
++
++ cmd = 0;
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE,
++ RENDEC_BUFFER_SIZE0, RENDEC_A_SIZE / 4096);
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_BUFFER_SIZE,
++ RENDEC_BUFFER_SIZE1, RENDEC_B_SIZE / 4096);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_BUFFER_SIZE);
++
++
++ cmd = 0;
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
++ RENDEC_DECODE_START_SIZE, 0);
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
++ RENDEC_BURST_SIZE_W, 1);
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
++ RENDEC_BURST_SIZE_R, 1);
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL1,
++ RENDEC_EXTERNAL_MEMORY, 1);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL1);
++
++ cmd = 0x00101010;
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT0);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT1);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT2);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT3);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT4);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTEXT5);
++
++ cmd = 0;
++ REGIO_WRITE_FIELD(cmd, MSVDX_RENDEC_CONTROL0, RENDEC_INITIALISE,
++ 1);
++ PSB_WMSVDX32(cmd, MSVDX_RENDEC_CONTROL0);
++
++ /* PSB_WMSVDX32(clk_enable_minimal, MSVDX_MAN_CLK_ENABLE); */
++ PSB_DEBUG_INIT("MSVDX:defer firmware loading to the"
++ " place when receiving user space commands\n");
++
++ msvdx_priv->msvdx_fw_loaded = 0; /* need to load firware */
++
++ psb_msvdx_clearirq(dev);
++ psb_msvdx_enableirq(dev);
++
++ if (IS_MRST(dev)) {
++ PSB_DEBUG_INIT("MSDVX:old clock gating disable = 0x%08x\n",
++ PSB_RVDC32(PSB_MSVDX_CLOCKGATING));
++ }
++
++ {
++ cmd = 0;
++ cmd = PSB_RMSVDX32(0x818); /* VEC_SHIFTREG_CONTROL */
++ REGIO_WRITE_FIELD(cmd,
++ VEC_SHIFTREG_CONTROL,
++ SR_MASTER_SELECT,
++ 1); /* Host */
++ PSB_WMSVDX32(cmd, 0x818);
++ }
++
++#if 0
++ ret = psb_setup_fw(dev);
++ if (ret)
++ goto err_exit;
++ /* Send Initialisation message to firmware */
++ if (0) {
++ uint32_t msg_init[FW_VA_INIT_SIZE >> 2];
++ MEMIO_WRITE_FIELD(msg_init, FWRK_GENMSG_SIZE,
++ FW_VA_INIT_SIZE);
++ MEMIO_WRITE_FIELD(msg_init, FWRK_GENMSG_ID, VA_MSGID_INIT);
++
++ /* Need to set this for all but A0 */
++ MEMIO_WRITE_FIELD(msg_init, FW_VA_INIT_GLOBAL_PTD,
++ psb_get_default_pd_addr(dev_priv->mmu));
++
++ ret = psb_mtx_send(dev_priv, msg_init);
++ if (ret)
++ goto err_exit;
++
++ psb_poll_mtx_irq(dev_priv);
++ }
++#endif
++
++ return 0;
++
++err_exit:
++ DRM_ERROR("MSVDX: initialization failed\n");
++ if (msvdx_priv->ccb0)
++ psb_free_ccb(&msvdx_priv->ccb0);
++ if (msvdx_priv->ccb1)
++ psb_free_ccb(&msvdx_priv->ccb1);
++ kfree(dev_priv->msvdx_private);
++
++ return 1;
++}
++
++int psb_msvdx_uninit(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ /* Reset MSVDX chip */
++ psb_msvdx_reset(dev_priv);
++
++ /* PSB_WMSVDX32 (clk_enable_minimal, MSVDX_MAN_CLK_ENABLE); */
++ PSB_DEBUG_INIT("MSVDX:set the msvdx clock to 0\n");
++ PSB_WMSVDX32(0, MSVDX_MAN_CLK_ENABLE);
++
++ if (msvdx_priv->ccb0)
++ psb_free_ccb(&msvdx_priv->ccb0);
++ if (msvdx_priv->ccb1)
++ psb_free_ccb(&msvdx_priv->ccb1);
++ if (msvdx_priv->msvdx_fw)
++ kfree(msvdx_priv->msvdx_fw
++ );
++ if (msvdx_priv->vec_local_mem_data)
++ kfree(msvdx_priv->vec_local_mem_data);
++
++ if (msvdx_priv) {
++ /* pci_set_drvdata(dev->pdev, NULL); */
++ device_remove_file(&dev->pdev->dev, &dev_attr_msvdx_pmstate);
++ sysfs_put(msvdx_priv->sysfs_pmstate);
++ msvdx_priv->sysfs_pmstate = NULL;
++
++ kfree(msvdx_priv);
++ dev_priv->msvdx_private = NULL;
++ }
++
++ return 0;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_pvr_glue.c b/drivers/gpu/drm/mrst/drv/psb_pvr_glue.c
+new file mode 100644
+index 0000000..cb11475
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_pvr_glue.c
+@@ -0,0 +1,74 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include "psb_pvr_glue.h"
++
++/**
++ * FIXME: should NOT use these file under env/linux directly
++ */
++#include "mm.h"
++
++int psb_get_meminfo_by_handle(IMG_HANDLE hKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL;
++ PVRSRV_PER_PROCESS_DATA *psPerProc = IMG_NULL;
++ PVRSRV_ERROR eError;
++
++ psPerProc = PVRSRVPerProcessData(OSGetCurrentProcessIDKM());
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID *)&psKernelMemInfo,
++ hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if (eError != PVRSRV_OK) {
++ DRM_ERROR("Cannot find kernel meminfo for handle %lx\n",
++ (IMG_UINT32)hKernelMemInfo);
++ return -EINVAL;
++ }
++
++ *ppsKernelMemInfo = psKernelMemInfo;
++
++ DRM_DEBUG("Got Kernel MemInfo for handle %lx\n",
++ (IMG_UINT32)hKernelMemInfo);
++ return 0;
++}
++
++IMG_UINT32 psb_get_tgid(void)
++{
++ return OSGetCurrentProcessIDKM();
++}
++
++int psb_get_pages_by_mem_handle(IMG_HANDLE hOSMemHandle, struct page ***pages)
++{
++ LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++ struct page **page_list;
++
++ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_ALLOC_PAGES) {
++ DRM_ERROR("MemArea type is not LINUX_MEM_AREA_ALLOC_PAGES\n");
++ return -EINVAL;
++ }
++
++ page_list = psLinuxMemArea->uData.sPageList.pvPageList;
++ if (!page_list) {
++ DRM_DEBUG("Page List is NULL\n");
++ return -ENOMEM;
++ }
++
++ *pages = page_list;
++ return 0;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_pvr_glue.h b/drivers/gpu/drm/mrst/drv/psb_pvr_glue.h
+new file mode 100644
+index 0000000..3c2ae45
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_pvr_glue.h
+@@ -0,0 +1,26 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include "psb_drv.h"
++#include "services_headers.h"
++
++extern int psb_get_meminfo_by_handle(IMG_HANDLE hKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo);
++extern IMG_UINT32 psb_get_tgid(void);
++extern int psb_get_pages_by_mem_handle(IMG_HANDLE hOSMemHandle,
++ struct page ***pages);
+diff --git a/drivers/gpu/drm/mrst/drv/psb_reg.h b/drivers/gpu/drm/mrst/drv/psb_reg.h
+new file mode 100644
+index 0000000..ea1e812
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_reg.h
+@@ -0,0 +1,570 @@
++/**************************************************************************
++ *
++ * Copyright (c) (2005-2007) Imagination Technologies Limited.
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA..
++ *
++ **************************************************************************/
++
++#ifndef _PSB_REG_H_
++#define _PSB_REG_H_
++
++#define PSB_CR_CLKGATECTL 0x0000
++#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
++#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
++#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
++#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
++#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
++#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
++#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
++#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
++#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
++#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
++#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
++#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
++#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
++#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
++#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
++#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
++
++#define PSB_CR_CORE_ID 0x0010
++#define _PSB_CC_ID_ID_SHIFT (16)
++#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
++#define _PSB_CC_ID_CONFIG_SHIFT (0)
++#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
++
++#define PSB_CR_CORE_REVISION 0x0014
++#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
++#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
++#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
++#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
++#define _PSB_CC_REVISION_MINOR_SHIFT (8)
++#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
++#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
++#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
++
++#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
++
++#define PSB_CR_SOFT_RESET 0x0080
++#define _PSB_CS_RESET_TSP_RESET (1 << 6)
++#define _PSB_CS_RESET_ISP_RESET (1 << 5)
++#define _PSB_CS_RESET_USE_RESET (1 << 4)
++#define _PSB_CS_RESET_TA_RESET (1 << 3)
++#define _PSB_CS_RESET_DPM_RESET (1 << 2)
++#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
++#define _PSB_CS_RESET_BIF_RESET (1 << 0)
++
++#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
++
++#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
++
++#define PSB_CR_EVENT_STATUS2 0x0118
++
++#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
++#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4)
++
++#define PSB_CR_EVENT_STATUS 0x012C
++
++#define PSB_CR_EVENT_HOST_ENABLE 0x0130
++
++#define PSB_CR_EVENT_HOST_CLEAR 0x0134
++#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
++#define _PSB_CE_TA_DPM_FAULT (1 << 28)
++#define _PSB_CE_TWOD_COMPLETE (1 << 27)
++#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
++#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
++#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
++#define _PSB_CE_SW_EVENT (1 << 14)
++#define _PSB_CE_TA_FINISHED (1 << 13)
++#define _PSB_CE_TA_TERMINATE (1 << 12)
++#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
++#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
++#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
++#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
++
++
++#define PSB_USE_OFFSET_MASK 0x0007FFFF
++#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
++#define PSB_CR_USE_CODE_BASE0 0x0A0C
++#define PSB_CR_USE_CODE_BASE1 0x0A10
++#define PSB_CR_USE_CODE_BASE2 0x0A14
++#define PSB_CR_USE_CODE_BASE3 0x0A18
++#define PSB_CR_USE_CODE_BASE4 0x0A1C
++#define PSB_CR_USE_CODE_BASE5 0x0A20
++#define PSB_CR_USE_CODE_BASE6 0x0A24
++#define PSB_CR_USE_CODE_BASE7 0x0A28
++#define PSB_CR_USE_CODE_BASE8 0x0A2C
++#define PSB_CR_USE_CODE_BASE9 0x0A30
++#define PSB_CR_USE_CODE_BASE10 0x0A34
++#define PSB_CR_USE_CODE_BASE11 0x0A38
++#define PSB_CR_USE_CODE_BASE12 0x0A3C
++#define PSB_CR_USE_CODE_BASE13 0x0A40
++#define PSB_CR_USE_CODE_BASE14 0x0A44
++#define PSB_CR_USE_CODE_BASE15 0x0A48
++#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
++#define _PSB_CUC_BASE_DM_SHIFT (25)
++#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
++#define _PSB_CUC_BASE_ADDR_SHIFT (0) /* 1024-bit aligned address? */
++#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
++#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
++#define _PSB_CUC_DM_VERTEX (0)
++#define _PSB_CUC_DM_PIXEL (1)
++#define _PSB_CUC_DM_RESERVED (2)
++#define _PSB_CUC_DM_EDM (3)
++
++#define PSB_CR_PDS_EXEC_BASE 0x0AB8
++#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) /* 1MB aligned address */
++#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
++
++#define PSB_CR_EVENT_KICKER 0x0AC4
++#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) /* 128-bit aligned address */
++
++#define PSB_CR_EVENT_KICK 0x0AC8
++#define _PSB_CE_KICK_NOW (1 << 0)
++
++
++#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
++
++#define PSB_CR_BIF_CTRL 0x0C00
++#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
++#define _PSB_CB_CTRL_INVALDC (1 << 3)
++#define _PSB_CB_CTRL_FLUSH (1 << 2)
++
++#define PSB_CR_BIF_INT_STAT 0x0C04
++
++#define PSB_CR_BIF_FAULT 0x0C08
++#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
++#define _PSB_CBI_STAT_FAULT_SHIFT (0)
++#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
++#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
++#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
++#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
++#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
++#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
++#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
++#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
++#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
++#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
++
++#define PSB_CR_BIF_BANK0 0x0C78
++
++#define PSB_CR_BIF_BANK1 0x0C7C
++
++#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
++
++#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
++#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
++
++#define PSB_CR_2D_SOCIF 0x0E18
++#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
++#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
++#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
++
++#define PSB_CR_2D_BLIT_STATUS 0x0E04
++#define _PSB_C2B_STATUS_BUSY (1 << 24)
++#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
++#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
++
++/*
++ * 2D defs.
++ */
++
++/*
++ * 2D Slave Port Data : Block Header's Object Type
++ */
++
++#define PSB_2D_CLIP_BH (0x00000000)
++#define PSB_2D_PAT_BH (0x10000000)
++#define PSB_2D_CTRL_BH (0x20000000)
++#define PSB_2D_SRC_OFF_BH (0x30000000)
++#define PSB_2D_MASK_OFF_BH (0x40000000)
++#define PSB_2D_RESERVED1_BH (0x50000000)
++#define PSB_2D_RESERVED2_BH (0x60000000)
++#define PSB_2D_FENCE_BH (0x70000000)
++#define PSB_2D_BLIT_BH (0x80000000)
++#define PSB_2D_SRC_SURF_BH (0x90000000)
++#define PSB_2D_DST_SURF_BH (0xA0000000)
++#define PSB_2D_PAT_SURF_BH (0xB0000000)
++#define PSB_2D_SRC_PAL_BH (0xC0000000)
++#define PSB_2D_PAT_PAL_BH (0xD0000000)
++#define PSB_2D_MASK_SURF_BH (0xE0000000)
++#define PSB_2D_FLUSH_BH (0xF0000000)
++
++/*
++ * Clip Definition block (PSB_2D_CLIP_BH)
++ */
++#define PSB_2D_CLIPCOUNT_MAX (1)
++#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
++#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
++#define PSB_2D_CLIPCOUNT_SHIFT (0)
++/* clip rectangle min & max */
++#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
++#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
++#define PSB_2D_CLIP_XMAX_SHIFT (12)
++#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
++#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
++#define PSB_2D_CLIP_XMIN_SHIFT (0)
++/* clip rectangle offset */
++#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
++#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
++#define PSB_2D_CLIP_YMAX_SHIFT (12)
++#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
++#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
++#define PSB_2D_CLIP_YMIN_SHIFT (0)
++
++/*
++ * Pattern Control (PSB_2D_PAT_BH)
++ */
++#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
++#define PSB_2D_PAT_HEIGHT_SHIFT (0)
++#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
++#define PSB_2D_PAT_WIDTH_SHIFT (5)
++#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
++#define PSB_2D_PAT_YSTART_SHIFT (10)
++#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
++#define PSB_2D_PAT_XSTART_SHIFT (15)
++
++/*
++ * 2D Control block (PSB_2D_CTRL_BH)
++ */
++/* Present Flags */
++#define PSB_2D_SRCCK_CTRL (0x00000001)
++#define PSB_2D_DSTCK_CTRL (0x00000002)
++#define PSB_2D_ALPHA_CTRL (0x00000004)
++/* Colour Key Colour (SRC/DST)*/
++#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
++#define PSB_2D_CK_COL_CLRMASK (0x00000000)
++#define PSB_2D_CK_COL_SHIFT (0)
++/* Colour Key Mask (SRC/DST)*/
++#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
++#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
++#define PSB_2D_CK_MASK_SHIFT (0)
++/* Alpha Control (Alpha/RGB)*/
++#define PSB_2D_GBLALPHA_MASK (0x000FF000)
++#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
++#define PSB_2D_GBLALPHA_SHIFT (12)
++#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
++#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
++#define PSB_2D_SRCALPHA_OP_SHIFT (20)
++#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
++#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
++#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
++#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
++#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
++#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
++#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
++#define PSB_2D_SRCALPHA_INVERT (0x00800000)
++#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
++#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
++#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
++#define PSB_2D_DSTALPHA_OP_SHIFT (24)
++#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
++#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
++#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
++#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
++#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
++#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
++#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
++#define PSB_2D_DSTALPHA_INVERT (0x08000000)
++#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
++
++#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
++#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
++#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
++#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
++
++/*
++ *Source Offset (PSB_2D_SRC_OFF_BH)
++ */
++#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
++#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
++#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
++#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
++
++/*
++ * Mask Offset (PSB_2D_MASK_OFF_BH)
++ */
++#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
++#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
++#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
++#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
++
++/*
++ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
++ */
++
++/*
++ *Blit Rectangle (PSB_2D_BLIT_BH)
++ */
++
++#define PSB_2D_ROT_MASK (3<<25)
++#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
++#define PSB_2D_ROT_NONE (0<<25)
++#define PSB_2D_ROT_90DEGS (1<<25)
++#define PSB_2D_ROT_180DEGS (2<<25)
++#define PSB_2D_ROT_270DEGS (3<<25)
++
++#define PSB_2D_COPYORDER_MASK (3<<23)
++#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
++#define PSB_2D_COPYORDER_TL2BR (0<<23)
++#define PSB_2D_COPYORDER_BR2TL (1<<23)
++#define PSB_2D_COPYORDER_TR2BL (2<<23)
++#define PSB_2D_COPYORDER_BL2TR (3<<23)
++
++#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
++#define PSB_2D_DSTCK_DISABLE (0x00000000)
++#define PSB_2D_DSTCK_PASS (0x00200000)
++#define PSB_2D_DSTCK_REJECT (0x00400000)
++
++#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
++#define PSB_2D_SRCCK_DISABLE (0x00000000)
++#define PSB_2D_SRCCK_PASS (0x00080000)
++#define PSB_2D_SRCCK_REJECT (0x00100000)
++
++#define PSB_2D_CLIP_ENABLE (0x00040000)
++
++#define PSB_2D_ALPHA_ENABLE (0x00020000)
++
++#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
++#define PSB_2D_PAT_MASK (0x00010000)
++#define PSB_2D_USE_PAT (0x00010000)
++#define PSB_2D_USE_FILL (0x00000000)
++/*
++ * Tungsten Graphics note on rop codes: If rop A and rop B are
++ * identical, the mask surface will not be read and need not be
++ * set up.
++ */
++
++#define PSB_2D_ROP3B_MASK (0x0000FF00)
++#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
++#define PSB_2D_ROP3B_SHIFT (8)
++/* rop code A */
++#define PSB_2D_ROP3A_MASK (0x000000FF)
++#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
++#define PSB_2D_ROP3A_SHIFT (0)
++
++#define PSB_2D_ROP4_MASK (0x0000FFFF)
++/*
++ * DWORD0: (Only pass if Pattern control == Use Fill Colour)
++ * Fill Colour RGBA8888
++ */
++#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
++#define PSB_2D_FILLCOLOUR_SHIFT (0)
++/*
++ * DWORD1: (Always Present)
++ * X Start (Dest)
++ * Y Start (Dest)
++ */
++#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
++#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
++#define PSB_2D_DST_XSTART_SHIFT (12)
++#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
++#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
++#define PSB_2D_DST_YSTART_SHIFT (0)
++/*
++ * DWORD2: (Always Present)
++ * X Size (Dest)
++ * Y Size (Dest)
++ */
++#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
++#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
++#define PSB_2D_DST_XSIZE_SHIFT (12)
++#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
++#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
++#define PSB_2D_DST_YSIZE_SHIFT (0)
++
++/*
++ * Source Surface (PSB_2D_SRC_SURF_BH)
++ */
++/*
++ * WORD 0
++ */
++
++#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
++#define PSB_2D_SRC_1_PAL (0x00000000)
++#define PSB_2D_SRC_2_PAL (0x00008000)
++#define PSB_2D_SRC_4_PAL (0x00010000)
++#define PSB_2D_SRC_8_PAL (0x00018000)
++#define PSB_2D_SRC_8_ALPHA (0x00020000)
++#define PSB_2D_SRC_4_ALPHA (0x00028000)
++#define PSB_2D_SRC_332RGB (0x00030000)
++#define PSB_2D_SRC_4444ARGB (0x00038000)
++#define PSB_2D_SRC_555RGB (0x00040000)
++#define PSB_2D_SRC_1555ARGB (0x00048000)
++#define PSB_2D_SRC_565RGB (0x00050000)
++#define PSB_2D_SRC_0888ARGB (0x00058000)
++#define PSB_2D_SRC_8888ARGB (0x00060000)
++#define PSB_2D_SRC_8888UYVY (0x00068000)
++#define PSB_2D_SRC_RESERVED (0x00070000)
++#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
++
++
++#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
++#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
++#define PSB_2D_SRC_STRIDE_SHIFT (0)
++/*
++ * WORD 1 - Base Address
++ */
++#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
++#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
++#define PSB_2D_SRC_ADDR_SHIFT (2)
++#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
++
++/*
++ * Pattern Surface (PSB_2D_PAT_SURF_BH)
++ */
++/*
++ * WORD 0
++ */
++
++#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
++#define PSB_2D_PAT_1_PAL (0x00000000)
++#define PSB_2D_PAT_2_PAL (0x00008000)
++#define PSB_2D_PAT_4_PAL (0x00010000)
++#define PSB_2D_PAT_8_PAL (0x00018000)
++#define PSB_2D_PAT_8_ALPHA (0x00020000)
++#define PSB_2D_PAT_4_ALPHA (0x00028000)
++#define PSB_2D_PAT_332RGB (0x00030000)
++#define PSB_2D_PAT_4444ARGB (0x00038000)
++#define PSB_2D_PAT_555RGB (0x00040000)
++#define PSB_2D_PAT_1555ARGB (0x00048000)
++#define PSB_2D_PAT_565RGB (0x00050000)
++#define PSB_2D_PAT_0888ARGB (0x00058000)
++#define PSB_2D_PAT_8888ARGB (0x00060000)
++
++#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
++#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
++#define PSB_2D_PAT_STRIDE_SHIFT (0)
++/*
++ * WORD 1 - Base Address
++ */
++#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
++#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
++#define PSB_2D_PAT_ADDR_SHIFT (2)
++#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
++
++/*
++ * Destination Surface (PSB_2D_DST_SURF_BH)
++ */
++/*
++ * WORD 0
++ */
++
++#define PSB_2D_DST_FORMAT_MASK (0x00078000)
++#define PSB_2D_DST_332RGB (0x00030000)
++#define PSB_2D_DST_4444ARGB (0x00038000)
++#define PSB_2D_DST_555RGB (0x00040000)
++#define PSB_2D_DST_1555ARGB (0x00048000)
++#define PSB_2D_DST_565RGB (0x00050000)
++#define PSB_2D_DST_0888ARGB (0x00058000)
++#define PSB_2D_DST_8888ARGB (0x00060000)
++#define PSB_2D_DST_8888AYUV (0x00070000)
++
++#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
++#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
++#define PSB_2D_DST_STRIDE_SHIFT (0)
++/*
++ * WORD 1 - Base Address
++ */
++#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
++#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
++#define PSB_2D_DST_ADDR_SHIFT (2)
++#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
++
++/*
++ * Mask Surface (PSB_2D_MASK_SURF_BH)
++ */
++/*
++ * WORD 0
++ */
++#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
++#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
++#define PSB_2D_MASK_STRIDE_SHIFT (0)
++/*
++ * WORD 1 - Base Address
++ */
++#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
++#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
++#define PSB_2D_MASK_ADDR_SHIFT (2)
++#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
++
++/*
++ * Source Palette (PSB_2D_SRC_PAL_BH)
++ */
++
++#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
++#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
++#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
++#define PSB_2D_SRCPAL_BYTEALIGN (1024)
++
++/*
++ * Pattern Palette (PSB_2D_PAT_PAL_BH)
++ */
++
++#define PSB_2D_PATPAL_ADDR_SHIFT (0)
++#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
++#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
++#define PSB_2D_PATPAL_BYTEALIGN (1024)
++
++/*
++ * Rop3 Codes (2 LS bytes)
++ */
++
++#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
++#define PSB_2D_ROP3_PATCOPY (0xF0F0)
++#define PSB_2D_ROP3_WHITENESS (0xFFFF)
++#define PSB_2D_ROP3_BLACKNESS (0x0000)
++#define PSB_2D_ROP3_SRC (0xCC)
++#define PSB_2D_ROP3_PAT (0xF0)
++#define PSB_2D_ROP3_DST (0xAA)
++
++
++/*
++ * Sizes.
++ */
++
++#define PSB_SCENE_HW_COOKIE_SIZE 16
++#define PSB_TA_MEM_HW_COOKIE_SIZE 16
++
++/*
++ * Scene stuff.
++ */
++
++#define PSB_NUM_HW_SCENES 2
++
++/*
++ * Scheduler completion actions.
++ */
++
++#define PSB_RASTER_BLOCK 0
++#define PSB_RASTER 1
++#define PSB_RETURN 2
++#define PSB_TA 3
++
++
++/*Power management*/
++#define PSB_PUNIT_PORT 0x04
++#define PSB_APMBA 0x7a
++#define PSB_APM_CMD 0x0
++#define PSB_APM_STS 0x04
++#define PSB_PWRGT_GFX_MASK 0x3
++#define PSB_PWRGT_VID_ENC_MASK 0x30
++#define PSB_PWRGT_VID_DEC_MASK 0xc
++
++#define PSB_PM_SSC 0x20
++#define PSB_PM_SSS 0x30
++#define PSB_PWRGT_DISPLAY_MASK 0xc /*on a different BA than video/gfx*/
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_reset.c b/drivers/gpu/drm/mrst/drv/psb_reset.c
+new file mode 100644
+index 0000000..eba85ea
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_reset.c
+@@ -0,0 +1,209 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_reg.h"
++#include "psb_intel_reg.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++#include <linux/spinlock.h>
++
++
++void psb_schedule_watchdog(struct drm_psb_private *dev_priv)
++{
++ struct timer_list *wt = &dev_priv->watchdog_timer;
++ unsigned long irq_flags;
++
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ if (dev_priv->timer_available && !timer_pending(wt)) {
++ wt->expires = jiffies + PSB_WATCHDOG_DELAY;
++ add_timer(wt);
++ }
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
++}
++
++
++static void psb_watchdog_func(unsigned long data)
++{
++ struct drm_psb_private *dev_priv = (struct drm_psb_private *) data;
++ int msvdx_lockup;
++ int msvdx_idle;
++ unsigned long irq_flags;
++
++ psb_msvdx_lockup(dev_priv, &msvdx_lockup, &msvdx_idle);
++
++ if (msvdx_lockup) {
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ dev_priv->timer_available = 0;
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock,
++ irq_flags);
++ if (msvdx_lockup)
++ schedule_work(&dev_priv->msvdx_watchdog_wq);
++ }
++ if (!msvdx_idle)
++ psb_schedule_watchdog(dev_priv);
++}
++
++void psb_msvdx_flush_cmd_queue(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct psb_msvdx_cmd_queue *msvdx_cmd;
++ struct list_head *list, *next;
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ /*Flush the msvdx cmd queue and signal all fences in the queue */
++ list_for_each_safe(list, next, &msvdx_priv->msvdx_queue) {
++ msvdx_cmd =
++ list_entry(list, struct psb_msvdx_cmd_queue, head);
++ PSB_DEBUG_GENERAL("MSVDXQUE: flushing sequence:%d\n",
++ msvdx_cmd->sequence);
++ msvdx_priv->msvdx_current_sequence = msvdx_cmd->sequence;
++ psb_fence_error(dev, PSB_ENGINE_VIDEO,
++ msvdx_priv->msvdx_current_sequence,
++ _PSB_FENCE_TYPE_EXE, DRM_CMD_HANG);
++ list_del(list);
++ kfree(msvdx_cmd->cmd);
++ kfree(msvdx_cmd
++ );
++ }
++}
++
++static void psb_msvdx_reset_wq(struct work_struct *work)
++{
++ struct drm_psb_private *dev_priv =
++ container_of(work, struct drm_psb_private, msvdx_watchdog_wq);
++ struct msvdx_private *msvdx_priv = dev_priv->msvdx_private;
++
++ struct psb_scheduler *scheduler = &dev_priv->scheduler;
++ unsigned long irq_flags;
++
++ mutex_lock(&msvdx_priv->msvdx_mutex);
++ msvdx_priv->msvdx_needs_reset = 1;
++ msvdx_priv->msvdx_current_sequence++;
++ PSB_DEBUG_GENERAL
++ ("MSVDXFENCE: incremented msvdx_current_sequence to :%d\n",
++ msvdx_priv->msvdx_current_sequence);
++
++ psb_fence_error(scheduler->dev, PSB_ENGINE_VIDEO,
++ msvdx_priv->msvdx_current_sequence,
++ _PSB_FENCE_TYPE_EXE, DRM_CMD_HANG);
++
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ dev_priv->timer_available = 1;
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
++
++ spin_lock_irqsave(&msvdx_priv->msvdx_lock, irq_flags);
++ psb_msvdx_flush_cmd_queue(scheduler->dev);
++ spin_unlock_irqrestore(&msvdx_priv->msvdx_lock, irq_flags);
++
++ psb_schedule_watchdog(dev_priv);
++ mutex_unlock(&msvdx_priv->msvdx_mutex);
++}
++
++void psb_watchdog_init(struct drm_psb_private *dev_priv)
++{
++ struct timer_list *wt = &dev_priv->watchdog_timer;
++ unsigned long irq_flags;
++
++ spin_lock_init(&dev_priv->watchdog_lock);
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ init_timer(wt);
++ INIT_WORK(&dev_priv->msvdx_watchdog_wq, &psb_msvdx_reset_wq);
++ wt->data = (unsigned long) dev_priv;
++ wt->function = &psb_watchdog_func;
++ dev_priv->timer_available = 1;
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
++}
++
++void psb_watchdog_takedown(struct drm_psb_private *dev_priv)
++{
++ unsigned long irq_flags;
++
++ spin_lock_irqsave(&dev_priv->watchdog_lock, irq_flags);
++ dev_priv->timer_available = 0;
++ spin_unlock_irqrestore(&dev_priv->watchdog_lock, irq_flags);
++ (void) del_timer_sync(&dev_priv->watchdog_timer);
++}
++
++static void psb_lid_timer_func(unsigned long data)
++{
++ struct drm_psb_private * dev_priv = (struct drm_psb_private *)data;
++ struct drm_device *dev = (struct drm_device *)dev_priv->dev;
++ struct timer_list *lid_timer = &dev_priv->lid_timer;
++ unsigned long irq_flags;
++ u32 *lid_state = dev_priv->lid_state;
++ u32 pp_status;
++
++ if (*lid_state == dev_priv->lid_last_state)
++ goto lid_timer_schedule;
++
++ if ((*lid_state) & 0x01) {
++ /*lid state is open*/
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while ((pp_status & PP_ON) == 0);
++
++ /*FIXME: should be backlight level before*/
++ psb_intel_lvds_set_brightness(dev, 100);
++ } else {
++ psb_intel_lvds_set_brightness(dev, 0);
++
++ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
++ do {
++ pp_status = REG_READ(PP_STATUS);
++ } while ((pp_status & PP_ON) == 0);
++ }
++ /* printk(KERN_INFO"%s: lid: closed\n", __FUNCTION__); */
++
++ dev_priv->lid_last_state = *lid_state;
++
++lid_timer_schedule:
++ spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
++ if (!timer_pending(lid_timer)) {
++ lid_timer->expires = jiffies + PSB_LID_DELAY;
++ add_timer(lid_timer);
++ }
++ spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
++}
++
++void psb_lid_timer_init(struct drm_psb_private *dev_priv)
++{
++ struct timer_list *lid_timer = &dev_priv->lid_timer;
++ unsigned long irq_flags;
++
++ spin_lock_init(&dev_priv->lid_lock);
++ spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
++
++ init_timer(lid_timer);
++
++ lid_timer->data = (unsigned long)dev_priv;
++ lid_timer->function = psb_lid_timer_func;
++ lid_timer->expires = jiffies + PSB_LID_DELAY;
++
++ add_timer(lid_timer);
++ spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
++}
++
++void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
++{
++ del_timer_sync(&dev_priv->lid_timer);
++}
++
+diff --git a/drivers/gpu/drm/mrst/drv/psb_schedule.c b/drivers/gpu/drm/mrst/drv/psb_schedule.c
+new file mode 100644
+index 0000000..4e2127c
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_schedule.c
+@@ -0,0 +1,70 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include "psb_drm.h"
++#include "psb_drv.h"
++#include "psb_reg.h"
++#include "ttm/ttm_execbuf_util.h"
++
++
++static void psb_powerdown_topaz(struct work_struct *work)
++{
++ struct psb_scheduler *scheduler =
++ container_of(work, struct psb_scheduler, topaz_suspend_wq.work);
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) scheduler->dev->dev_private;
++
++ if (!dev_priv->topaz_disabled) {
++ if (!mutex_trylock(&scheduler->topaz_power_mutex))
++ return;
++
++ psb_try_power_down_topaz(scheduler->dev);
++ mutex_unlock(&scheduler->topaz_power_mutex);
++ }
++}
++
++static void psb_powerdown_msvdx(struct work_struct *work)
++{
++ struct psb_scheduler *scheduler =
++ container_of(work, struct psb_scheduler, msvdx_suspend_wq.work);
++
++ if (!mutex_trylock(&scheduler->msvdx_power_mutex))
++ return;
++
++ psb_try_power_down_msvdx(scheduler->dev);
++ mutex_unlock(&scheduler->msvdx_power_mutex);
++}
++
++int psb_scheduler_init(struct drm_device *dev,
++ struct psb_scheduler *scheduler)
++{
++ memset(scheduler, 0, sizeof(*scheduler));
++ scheduler->dev = dev;
++ mutex_init(&scheduler->topaz_power_mutex);
++ mutex_init(&scheduler->msvdx_power_mutex);
++
++ INIT_DELAYED_WORK(&scheduler->topaz_suspend_wq,
++ &psb_powerdown_topaz);
++ INIT_DELAYED_WORK(&scheduler->msvdx_suspend_wq,
++ &psb_powerdown_msvdx);
++
++ return 0;
++}
++
+diff --git a/drivers/gpu/drm/mrst/drv/psb_schedule.h b/drivers/gpu/drm/mrst/drv/psb_schedule.h
+new file mode 100644
+index 0000000..764eb29
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_schedule.h
+@@ -0,0 +1,81 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
++ **************************************************************************/
++#ifndef _PSB_SCHEDULE_H_
++#define _PSB_SCHEDULE_H_
++
++#include <drm/drmP.h>
++
++struct psb_context;
++
++enum psb_task_type {
++ psb_flip_task
++};
++
++struct drm_psb_private;
++
++/*struct psb_scheduler_seq {
++ uint32_t sequence;
++ int reported;
++};*/
++
++struct psb_scheduler {
++ struct drm_device *dev;
++ /*struct psb_scheduler_seq seq[_PSB_ENGINE_TA_FENCE_TYPES];
++ struct psb_hw_scene hs[PSB_NUM_HW_SCENES];
++ struct mutex task_wq_mutex;*/
++ struct mutex topaz_power_mutex;
++ struct mutex msvdx_power_mutex;
++ /*spinlock_t lock;
++ struct list_head hw_scenes;
++ struct list_head ta_queue;
++ struct list_head raster_queue;
++ struct list_head hp_raster_queue;
++ struct list_head task_done_queue;
++ struct psb_task *current_task[PSB_SCENE_NUM_ENGINES];
++ struct psb_task *feedback_task;
++ int ta_state;
++ struct psb_hw_scene *pending_hw_scene;
++ uint32_t pending_hw_scene_seq;
++ struct delayed_work wq*/;
++ struct delayed_work topaz_suspend_wq;
++ struct delayed_work msvdx_suspend_wq;
++ /*struct psb_scene_pool *pool;
++ uint32_t idle_count;
++ int idle;
++ wait_queue_head_t idle_queue;
++ unsigned long ta_end_jiffies;
++ unsigned long total_ta_jiffies;
++ unsigned long raster_end_jiffies;
++ unsigned long total_raster_jiffies;*/
++};
++
++/*#define PSB_RF_FIRE_TA (1 << 0)
++#define PSB_RF_OOM (1 << 1)
++#define PSB_RF_OOM_REPLY (1 << 2)
++#define PSB_RF_TERMINATE (1 << 3)
++#define PSB_RF_TA_DONE (1 << 4)
++#define PSB_RF_FIRE_RASTER (1 << 5)
++#define PSB_RF_RASTER_DONE (1 << 6)
++#define PSB_RF_DEALLOC (1 << 7)
++*/
++
++extern int psb_scheduler_init(struct drm_device *dev,
++ struct psb_scheduler *scheduler);
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_setup.c b/drivers/gpu/drm/mrst/drv/psb_setup.c
+new file mode 100644
+index 0000000..7bf2dcf
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_setup.c
+@@ -0,0 +1,35 @@
++/*
++ * Copyright (c) 2009, Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_edid.h>
++#include "psb_intel_drv.h"
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++
++/* Fixed name */
++#define ACPI_EDID_LCD "\\_SB_.PCI0.GFX0.DD04._DDC"
++#define ACPI_DOD "\\_SB_.PCI0.GFX0._DOD"
++
++#include "psb_intel_i2c.c"
++#include "psb_intel_sdvo.c"
++#include "psb_intel_modes.c"
++#include "psb_intel_lvds.c"
++#include "psb_intel_dsi.c"
++#include "psb_intel_display.c"
+diff --git a/drivers/gpu/drm/mrst/drv/psb_sgx.c b/drivers/gpu/drm/mrst/drv/psb_sgx.c
+new file mode 100644
+index 0000000..6bc821a
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_sgx.c
+@@ -0,0 +1,929 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX. USA.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_drm.h"
++#include "psb_reg.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++#include "ttm/ttm_bo_api.h"
++#include "ttm/ttm_execbuf_util.h"
++#include "ttm/ttm_userobj_api.h"
++#include "ttm/ttm_placement_common.h"
++#include "psb_sgx.h"
++#include "psb_intel_reg.h"
++#include "ospm_power.h"
++
++
++static inline int psb_same_page(unsigned long offset,
++ unsigned long offset2)
++{
++ return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
++}
++
++static inline unsigned long psb_offset_end(unsigned long offset,
++ unsigned long end)
++{
++ offset = (offset + PAGE_SIZE) & PAGE_MASK;
++ return (end < offset) ? end : offset;
++}
++
++static void psb_idle_engine(struct drm_device *dev, int engine);
++
++struct psb_dstbuf_cache {
++ unsigned int dst;
++ struct ttm_buffer_object *dst_buf;
++ unsigned long dst_offset;
++ uint32_t *dst_page;
++ unsigned int dst_page_offset;
++ struct ttm_bo_kmap_obj dst_kmap;
++ bool dst_is_iomem;
++};
++
++struct psb_validate_buffer {
++ struct ttm_validate_buffer base;
++ struct psb_validate_req req;
++ int ret;
++ struct psb_validate_arg __user *user_val_arg;
++ uint32_t flags;
++ uint32_t offset;
++ int po_correct;
++};
++
++static int psb_check_presumed(struct psb_validate_req *req,
++ struct ttm_buffer_object *bo,
++ struct psb_validate_arg __user *data,
++ int *presumed_ok)
++{
++ struct psb_validate_req __user *user_req = &(data->d.req);
++
++ *presumed_ok = 0;
++
++ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
++ *presumed_ok = 1;
++ return 0;
++ }
++
++ if (unlikely(!(req->presumed_flags & PSB_USE_PRESUMED)))
++ return 0;
++
++ if (bo->offset == req->presumed_gpu_offset) {
++ *presumed_ok = 1;
++ return 0;
++ }
++
++ return __put_user(req->presumed_flags & ~PSB_USE_PRESUMED,
++ &user_req->presumed_flags);
++}
++
++
++static void psb_unreference_buffers(struct psb_context *context)
++{
++ struct ttm_validate_buffer *entry, *next;
++ struct psb_validate_buffer *vbuf;
++ struct list_head *list = &context->validate_list;
++
++ list_for_each_entry_safe(entry, next, list, head) {
++ vbuf =
++ container_of(entry, struct psb_validate_buffer, base);
++ list_del(&entry->head);
++ ttm_bo_unref(&entry->bo);
++ }
++
++ list = &context->kern_validate_list;
++
++ list_for_each_entry_safe(entry, next, list, head) {
++ vbuf =
++ container_of(entry, struct psb_validate_buffer, base);
++ list_del(&entry->head);
++ ttm_bo_unref(&entry->bo);
++ }
++}
++
++
++static int psb_lookup_validate_buffer(struct drm_file *file_priv,
++ uint64_t data,
++ struct psb_validate_buffer *item)
++{
++ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
++
++ item->user_val_arg =
++ (struct psb_validate_arg __user *) (unsigned long) data;
++
++ if (unlikely(copy_from_user(&item->req, &item->user_val_arg->d.req,
++ sizeof(item->req)) != 0)) {
++ DRM_ERROR("Lookup copy fault.\n");
++ return -EFAULT;
++ }
++
++ item->base.bo =
++ ttm_buffer_object_lookup(tfile, item->req.buffer_handle);
++
++ if (unlikely(item->base.bo == NULL)) {
++ DRM_ERROR("Bo lookup fault.\n");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int psb_reference_buffers(struct drm_file *file_priv,
++ uint64_t data,
++ struct psb_context *context)
++{
++ struct psb_validate_buffer *item;
++ int ret;
++
++ while (likely(data != 0)) {
++ if (unlikely(context->used_buffers >=
++ PSB_NUM_VALIDATE_BUFFERS)) {
++ DRM_ERROR("Too many buffers "
++ "on validate list.\n");
++ ret = -EINVAL;
++ goto out_err0;
++ }
++
++ item = &context->buffers[context->used_buffers];
++
++ ret = psb_lookup_validate_buffer(file_priv, data, item);
++ if (unlikely(ret != 0))
++ goto out_err0;
++
++ item->base.reserved = 0;
++ list_add_tail(&item->base.head, &context->validate_list);
++ context->used_buffers++;
++ data = item->req.next;
++ }
++ return 0;
++
++out_err0:
++ psb_unreference_buffers(context);
++ return ret;
++}
++
++static int
++psb_placement_fence_type(struct ttm_buffer_object *bo,
++ uint64_t set_val_flags,
++ uint64_t clr_val_flags,
++ uint32_t new_fence_class,
++ uint32_t *new_fence_type)
++{
++ int ret;
++ uint32_t n_fence_type;
++ uint32_t set_flags = set_val_flags & 0xFFFFFFFF;
++ uint32_t clr_flags = clr_val_flags & 0xFFFFFFFF;
++ struct ttm_fence_object *old_fence;
++ uint32_t old_fence_type;
++
++ if (unlikely
++ (!(set_val_flags &
++ (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)))) {
++ DRM_ERROR
++ ("GPU access type (read / write) is not indicated.\n");
++ return -EINVAL;
++ }
++
++ ret = ttm_bo_check_placement(bo, set_flags, clr_flags);
++ if (unlikely(ret != 0))
++ return ret;
++
++ switch (new_fence_class) {
++ default:
++ n_fence_type = _PSB_FENCE_TYPE_EXE;
++ }
++
++ *new_fence_type = n_fence_type;
++ old_fence = (struct ttm_fence_object *) bo->sync_obj;
++ old_fence_type = (uint32_t) (unsigned long) bo->sync_obj_arg;
++
++ if (old_fence && ((new_fence_class != old_fence->fence_class) ||
++ ((n_fence_type ^ old_fence_type) &
++ old_fence_type))) {
++ ret = ttm_bo_wait(bo, 0, 1, 0);
++ if (unlikely(ret != 0))
++ return ret;
++ }
++
++ bo->proposed_flags = (bo->proposed_flags | set_flags)
++ & ~clr_flags & TTM_PL_MASK_MEMTYPE;
++
++ return 0;
++}
++
++int psb_validate_kernel_buffer(struct psb_context *context,
++ struct ttm_buffer_object *bo,
++ uint32_t fence_class,
++ uint64_t set_flags, uint64_t clr_flags)
++{
++ struct psb_validate_buffer *item;
++ uint32_t cur_fence_type;
++ int ret;
++
++ if (unlikely(context->used_buffers >= PSB_NUM_VALIDATE_BUFFERS)) {
++ DRM_ERROR("Out of free validation buffer entries for "
++ "kernel buffer validation.\n");
++ return -ENOMEM;
++ }
++
++ item = &context->buffers[context->used_buffers];
++ item->user_val_arg = NULL;
++ item->base.reserved = 0;
++
++ ret = ttm_bo_reserve(bo, 1, 0, 1, context->val_seq);
++ if (unlikely(ret != 0))
++ goto out_unlock;
++
++ mutex_lock(&bo->mutex);
++ ret = psb_placement_fence_type(bo, set_flags, clr_flags, fence_class,
++ &cur_fence_type);
++ if (unlikely(ret != 0)) {
++ ttm_bo_unreserve(bo);
++ goto out_unlock;
++ }
++
++ item->base.bo = ttm_bo_reference(bo);
++ item->base.new_sync_obj_arg = (void *) (unsigned long) cur_fence_type;
++ item->base.reserved = 1;
++
++ list_add_tail(&item->base.head, &context->kern_validate_list);
++ context->used_buffers++;
++
++ ret = ttm_buffer_object_validate(bo, 1, 0);
++ if (unlikely(ret != 0))
++ goto out_unlock;
++
++ item->offset = bo->offset;
++ item->flags = bo->mem.flags;
++ context->fence_types |= cur_fence_type;
++
++out_unlock:
++ mutex_unlock(&bo->mutex);
++ return ret;
++}
++
++
++static int psb_validate_buffer_list(struct drm_file *file_priv,
++ uint32_t fence_class,
++ struct psb_context *context,
++ int *po_correct)
++{
++ struct psb_validate_buffer *item;
++ struct ttm_buffer_object *bo;
++ int ret;
++ struct psb_validate_req *req;
++ uint32_t fence_types = 0;
++ uint32_t cur_fence_type;
++ struct ttm_validate_buffer *entry;
++ struct list_head *list = &context->validate_list;
++
++ *po_correct = 1;
++
++ list_for_each_entry(entry, list, head) {
++ item =
++ container_of(entry, struct psb_validate_buffer, base);
++ bo = entry->bo;
++ item->ret = 0;
++ req = &item->req;
++
++ mutex_lock(&bo->mutex);
++ ret = psb_placement_fence_type(bo,
++ req->set_flags,
++ req->clear_flags,
++ fence_class,
++ &cur_fence_type);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ ret = ttm_buffer_object_validate(bo, 1, 0);
++
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ fence_types |= cur_fence_type;
++ entry->new_sync_obj_arg = (void *)
++ (unsigned long) cur_fence_type;
++
++ item->offset = bo->offset;
++ item->flags = bo->mem.flags;
++ mutex_unlock(&bo->mutex);
++
++ ret =
++ psb_check_presumed(&item->req, bo, item->user_val_arg,
++ &item->po_correct);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ if (unlikely(!item->po_correct))
++ *po_correct = 0;
++
++ item++;
++ }
++
++ context->fence_types |= fence_types;
++
++ return 0;
++out_err:
++ mutex_unlock(&bo->mutex);
++ item->ret = ret;
++ return ret;
++}
++
++static void psb_clear_dstbuf_cache(struct psb_dstbuf_cache *dst_cache)
++{
++ if (dst_cache->dst_page) {
++ ttm_bo_kunmap(&dst_cache->dst_kmap);
++ dst_cache->dst_page = NULL;
++ }
++ dst_cache->dst_buf = NULL;
++ dst_cache->dst = ~0;
++}
++
++static int psb_update_dstbuf_cache(struct psb_dstbuf_cache *dst_cache,
++ struct psb_validate_buffer *buffers,
++ unsigned int dst,
++ unsigned long dst_offset)
++{
++ int ret;
++
++ PSB_DEBUG_GENERAL("Destination buffer is %d.\n", dst);
++
++ if (unlikely(dst != dst_cache->dst || NULL == dst_cache->dst_buf)) {
++ psb_clear_dstbuf_cache(dst_cache);
++ dst_cache->dst = dst;
++ dst_cache->dst_buf = buffers[dst].base.bo;
++ }
++
++ if (unlikely
++ (dst_offset > dst_cache->dst_buf->num_pages * PAGE_SIZE)) {
++ DRM_ERROR("Relocation destination out of bounds.\n");
++ return -EINVAL;
++ }
++
++ if (!psb_same_page(dst_cache->dst_offset, dst_offset) ||
++ NULL == dst_cache->dst_page) {
++ if (NULL != dst_cache->dst_page) {
++ ttm_bo_kunmap(&dst_cache->dst_kmap);
++ dst_cache->dst_page = NULL;
++ }
++
++ ret =
++ ttm_bo_kmap(dst_cache->dst_buf,
++ dst_offset >> PAGE_SHIFT, 1,
++ &dst_cache->dst_kmap);
++ if (ret) {
++ DRM_ERROR("Could not map destination buffer for "
++ "relocation.\n");
++ return ret;
++ }
++
++ dst_cache->dst_page =
++ ttm_kmap_obj_virtual(&dst_cache->dst_kmap,
++ &dst_cache->dst_is_iomem);
++ dst_cache->dst_offset = dst_offset & PAGE_MASK;
++ dst_cache->dst_page_offset = dst_cache->dst_offset >> 2;
++ }
++ return 0;
++}
++
++static int psb_apply_reloc(struct drm_psb_private *dev_priv,
++ uint32_t fence_class,
++ const struct drm_psb_reloc *reloc,
++ struct psb_validate_buffer *buffers,
++ int num_buffers,
++ struct psb_dstbuf_cache *dst_cache,
++ int no_wait, int interruptible)
++{
++ uint32_t val;
++ uint32_t background;
++ unsigned int index;
++ int ret;
++ unsigned int shift;
++ unsigned int align_shift;
++ struct ttm_buffer_object *reloc_bo;
++
++
++ PSB_DEBUG_GENERAL("Reloc type %d\n"
++ "\t where 0x%04x\n"
++ "\t buffer 0x%04x\n"
++ "\t mask 0x%08x\n"
++ "\t shift 0x%08x\n"
++ "\t pre_add 0x%08x\n"
++ "\t background 0x%08x\n"
++ "\t dst_buffer 0x%08x\n"
++ "\t arg0 0x%08x\n"
++ "\t arg1 0x%08x\n",
++ reloc->reloc_op,
++ reloc->where,
++ reloc->buffer,
++ reloc->mask,
++ reloc->shift,
++ reloc->pre_add,
++ reloc->background,
++ reloc->dst_buffer, reloc->arg0, reloc->arg1);
++
++ if (unlikely(reloc->buffer >= num_buffers)) {
++ DRM_ERROR("Illegal relocation buffer %d.\n",
++ reloc->buffer);
++ return -EINVAL;
++ }
++
++ if (buffers[reloc->buffer].po_correct)
++ return 0;
++
++ if (unlikely(reloc->dst_buffer >= num_buffers)) {
++ DRM_ERROR
++ ("Illegal destination buffer for relocation %d.\n",
++ reloc->dst_buffer);
++ return -EINVAL;
++ }
++
++ ret =
++ psb_update_dstbuf_cache(dst_cache, buffers, reloc->dst_buffer,
++ reloc->where << 2);
++ if (ret)
++ return ret;
++
++ reloc_bo = buffers[reloc->buffer].base.bo;
++
++ if (unlikely(reloc->pre_add > (reloc_bo->num_pages << PAGE_SHIFT))) {
++ DRM_ERROR("Illegal relocation offset add.\n");
++ return -EINVAL;
++ }
++
++ switch (reloc->reloc_op) {
++ case PSB_RELOC_OP_OFFSET:
++ val = reloc_bo->offset + reloc->pre_add;
++ break;
++ default:
++ DRM_ERROR("Unimplemented relocation.\n");
++ return -EINVAL;
++ }
++
++ shift =
++ (reloc->shift & PSB_RELOC_SHIFT_MASK) >> PSB_RELOC_SHIFT_SHIFT;
++ align_shift =
++ (reloc->
++ shift & PSB_RELOC_ALSHIFT_MASK) >> PSB_RELOC_ALSHIFT_SHIFT;
++
++ val = ((val >> align_shift) << shift);
++ index = reloc->where - dst_cache->dst_page_offset;
++
++ background = reloc->background;
++ val = (background & ~reloc->mask) | (val & reloc->mask);
++ dst_cache->dst_page[index] = val;
++
++ PSB_DEBUG_GENERAL("Reloc buffer %d index 0x%08x, value 0x%08x\n",
++ reloc->dst_buffer, index,
++ dst_cache->dst_page[index]);
++
++ return 0;
++}
++
++static int psb_ok_to_map_reloc(struct drm_psb_private *dev_priv,
++ unsigned int num_pages)
++{
++ int ret = 0;
++
++ spin_lock(&dev_priv->reloc_lock);
++ if (dev_priv->rel_mapped_pages + num_pages <= PSB_MAX_RELOC_PAGES) {
++ dev_priv->rel_mapped_pages += num_pages;
++ ret = 1;
++ }
++ spin_unlock(&dev_priv->reloc_lock);
++ return ret;
++}
++
++static int psb_fixup_relocs(struct drm_file *file_priv,
++ uint32_t fence_class,
++ unsigned int num_relocs,
++ unsigned int reloc_offset,
++ uint32_t reloc_handle,
++ struct psb_context *context,
++ int no_wait, int interruptible)
++{
++ struct drm_device *dev = file_priv->minor->dev;
++ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ struct ttm_buffer_object *reloc_buffer = NULL;
++ unsigned int reloc_num_pages;
++ unsigned int reloc_first_page;
++ unsigned int reloc_last_page;
++ struct psb_dstbuf_cache dst_cache;
++ struct drm_psb_reloc *reloc;
++ struct ttm_bo_kmap_obj reloc_kmap;
++ bool reloc_is_iomem;
++ int count;
++ int ret = 0;
++ int registered = 0;
++ uint32_t num_buffers = context->used_buffers;
++
++ if (num_relocs == 0)
++ return 0;
++
++ memset(&dst_cache, 0, sizeof(dst_cache));
++ memset(&reloc_kmap, 0, sizeof(reloc_kmap));
++
++ reloc_buffer = ttm_buffer_object_lookup(tfile, reloc_handle);
++ if (!reloc_buffer)
++ goto out;
++
++ if (unlikely(atomic_read(&reloc_buffer->reserved) != 1)) {
++ DRM_ERROR("Relocation buffer was not on validate list.\n");
++ ret = -EINVAL;
++ goto out;
++ }
++
++ reloc_first_page = reloc_offset >> PAGE_SHIFT;
++ reloc_last_page =
++ (reloc_offset +
++ num_relocs * sizeof(struct drm_psb_reloc)) >> PAGE_SHIFT;
++ reloc_num_pages = reloc_last_page - reloc_first_page + 1;
++ reloc_offset &= ~PAGE_MASK;
++
++ if (reloc_num_pages > PSB_MAX_RELOC_PAGES) {
++ DRM_ERROR("Relocation buffer is too large\n");
++ ret = -EINVAL;
++ goto out;
++ }
++
++ DRM_WAIT_ON(ret, dev_priv->rel_mapped_queue, 3 * DRM_HZ,
++ (registered =
++ psb_ok_to_map_reloc(dev_priv, reloc_num_pages)));
++
++ if (ret == -EINTR) {
++ ret = -ERESTART;
++ goto out;
++ }
++ if (ret) {
++ DRM_ERROR("Error waiting for space to map "
++ "relocation buffer.\n");
++ goto out;
++ }
++
++ ret = ttm_bo_kmap(reloc_buffer, reloc_first_page,
++ reloc_num_pages, &reloc_kmap);
++
++ if (ret) {
++ DRM_ERROR("Could not map relocation buffer.\n"
++ "\tReloc buffer id 0x%08x.\n"
++ "\tReloc first page %d.\n"
++ "\tReloc num pages %d.\n",
++ reloc_handle, reloc_first_page, reloc_num_pages);
++ goto out;
++ }
++
++ reloc = (struct drm_psb_reloc *)
++ ((unsigned long)
++ ttm_kmap_obj_virtual(&reloc_kmap,
++ &reloc_is_iomem) + reloc_offset);
++
++ for (count = 0; count < num_relocs; ++count) {
++ ret = psb_apply_reloc(dev_priv, fence_class,
++ reloc, context->buffers,
++ num_buffers, &dst_cache,
++ no_wait, interruptible);
++ if (ret)
++ goto out1;
++ reloc++;
++ }
++
++out1:
++ ttm_bo_kunmap(&reloc_kmap);
++out:
++ if (registered) {
++ spin_lock(&dev_priv->reloc_lock);
++ dev_priv->rel_mapped_pages -= reloc_num_pages;
++ spin_unlock(&dev_priv->reloc_lock);
++ DRM_WAKEUP(&dev_priv->rel_mapped_queue);
++ }
++
++ psb_clear_dstbuf_cache(&dst_cache);
++ if (reloc_buffer)
++ ttm_bo_unref(&reloc_buffer);
++ return ret;
++}
++
++void psb_fence_or_sync(struct drm_file *file_priv,
++ uint32_t engine,
++ uint32_t fence_types,
++ uint32_t fence_flags,
++ struct list_head *list,
++ struct psb_ttm_fence_rep *fence_arg,
++ struct ttm_fence_object **fence_p)
++{
++ struct drm_device *dev = file_priv->minor->dev;
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++ struct ttm_fence_device *fdev = &dev_priv->fdev;
++ int ret;
++ struct ttm_fence_object *fence;
++ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
++ uint32_t handle;
++
++ ret = ttm_fence_user_create(fdev, tfile,
++ engine, fence_types,
++ TTM_FENCE_FLAG_EMIT, &fence, &handle);
++ if (ret) {
++
++ /*
++ * Fence creation failed.
++ * Fall back to synchronous operation and idle the engine.
++ */
++
++ psb_idle_engine(dev, engine);
++ if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) {
++
++ /*
++ * Communicate to user-space that
++ * fence creation has failed and that
++ * the engine is idle.
++ */
++
++ fence_arg->handle = ~0;
++ fence_arg->error = ret;
++ }
++
++ ttm_eu_backoff_reservation(list);
++ if (fence_p)
++ *fence_p = NULL;
++ return;
++ }
++
++ ttm_eu_fence_buffer_objects(list, fence);
++ if (!(fence_flags & DRM_PSB_FENCE_NO_USER)) {
++ struct ttm_fence_info info = ttm_fence_get_info(fence);
++ fence_arg->handle = handle;
++ fence_arg->fence_class = ttm_fence_class(fence);
++ fence_arg->fence_type = ttm_fence_types(fence);
++ fence_arg->signaled_types = info.signaled_types;
++ fence_arg->error = 0;
++ } else {
++ ret =
++ ttm_ref_object_base_unref(tfile, handle,
++ ttm_fence_type);
++ BUG_ON(ret);
++ }
++
++ if (fence_p)
++ *fence_p = fence;
++ else if (fence)
++ ttm_fence_object_unref(&fence);
++}
++
++
++#if 0
++static int psb_dump_page(struct ttm_buffer_object *bo,
++ unsigned int page_offset, unsigned int num)
++{
++ struct ttm_bo_kmap_obj kmobj;
++ int is_iomem;
++ uint32_t *p;
++ int ret;
++ unsigned int i;
++
++ ret = ttm_bo_kmap(bo, page_offset, 1, &kmobj);
++ if (ret)
++ return ret;
++
++ p = ttm_kmap_obj_virtual(&kmobj, &is_iomem);
++ for (i = 0; i < num; ++i)
++ PSB_DEBUG_GENERAL("0x%04x: 0x%08x\n", i, *p++);
++
++ ttm_bo_kunmap(&kmobj);
++ return 0;
++}
++#endif
++
++static void psb_idle_engine(struct drm_device *dev, int engine)
++{
++ /*Fix me add video engile support*/
++ return;
++}
++
++static int psb_handle_copyback(struct drm_device *dev,
++ struct psb_context *context,
++ int ret)
++{
++ int err = ret;
++ struct ttm_validate_buffer *entry;
++ struct psb_validate_arg arg;
++ struct list_head *list = &context->validate_list;
++
++ if (ret) {
++ ttm_eu_backoff_reservation(list);
++ ttm_eu_backoff_reservation(&context->kern_validate_list);
++ }
++
++
++ if (ret != -EAGAIN && ret != -EINTR && ret != -ERESTART) {
++ list_for_each_entry(entry, list, head) {
++ struct psb_validate_buffer *vbuf =
++ container_of(entry, struct psb_validate_buffer,
++ base);
++ arg.handled = 1;
++ arg.ret = vbuf->ret;
++ if (!arg.ret) {
++ struct ttm_buffer_object *bo = entry->bo;
++ mutex_lock(&bo->mutex);
++ arg.d.rep.gpu_offset = bo->offset;
++ arg.d.rep.placement = bo->mem.flags;
++ arg.d.rep.fence_type_mask =
++ (uint32_t) (unsigned long)
++ entry->new_sync_obj_arg;
++ mutex_unlock(&bo->mutex);
++ }
++
++ if (__copy_to_user(vbuf->user_val_arg,
++ &arg, sizeof(arg)))
++ err = -EFAULT;
++
++ if (arg.ret)
++ break;
++ }
++ }
++
++ return err;
++}
++
++int psb_cmdbuf_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_cmdbuf_arg *arg = data;
++ int ret = 0;
++ struct ttm_object_file *tfile = psb_fpriv(file_priv)->tfile;
++ struct ttm_buffer_object *cmd_buffer = NULL;
++ struct psb_ttm_fence_rep fence_arg;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *)file_priv->minor->dev->dev_private;
++ int engine;
++ int po_correct;
++ struct psb_context *context;
++ unsigned num_buffers;
++
++ num_buffers = PSB_NUM_VALIDATE_BUFFERS;
++
++ ret = ttm_read_lock(&dev_priv->ttm_lock, true);
++ if (unlikely(ret != 0))
++ return ret;
++
++ if (arg->engine == PSB_ENGINE_VIDEO) {
++ if (!ospm_power_using_hw_begin(OSPM_VIDEO_DEC_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return -EBUSY;
++ } else if (arg->engine == LNC_ENGINE_ENCODE) {
++ if (dev_priv->topaz_disabled)
++ return -ENODEV;
++
++ if (!ospm_power_using_hw_begin(OSPM_VIDEO_ENC_ISLAND,
++ OSPM_UHB_FORCE_POWER_ON))
++ return -EBUSY;
++ }
++
++
++ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
++ if (unlikely(ret != 0))
++ goto out_err0;
++
++
++ context = &dev_priv->context;
++ context->used_buffers = 0;
++ context->fence_types = 0;
++ BUG_ON(!list_empty(&context->validate_list));
++ BUG_ON(!list_empty(&context->kern_validate_list));
++
++ if (unlikely(context->buffers == NULL)) {
++ context->buffers = vmalloc(PSB_NUM_VALIDATE_BUFFERS *
++ sizeof(*context->buffers));
++ if (unlikely(context->buffers == NULL)) {
++ ret = -ENOMEM;
++ goto out_err1;
++ }
++ }
++
++ ret = psb_reference_buffers(file_priv,
++ arg->buffer_list,
++ context);
++
++ if (unlikely(ret != 0))
++ goto out_err1;
++
++ context->val_seq = atomic_add_return(1, &dev_priv->val_seq);
++
++ ret = ttm_eu_reserve_buffers(&context->validate_list,
++ context->val_seq);
++ if (unlikely(ret != 0))
++ goto out_err2;
++
++ engine = arg->engine;
++ ret = psb_validate_buffer_list(file_priv, engine,
++ context, &po_correct);
++ if (unlikely(ret != 0))
++ goto out_err3;
++
++ if (!po_correct) {
++ ret = psb_fixup_relocs(file_priv, engine, arg->num_relocs,
++ arg->reloc_offset,
++ arg->reloc_handle, context, 0, 1);
++ if (unlikely(ret != 0))
++ goto out_err3;
++
++ }
++
++ cmd_buffer = ttm_buffer_object_lookup(tfile, arg->cmdbuf_handle);
++ if (unlikely(cmd_buffer == NULL)) {
++ ret = -EINVAL;
++ goto out_err4;
++ }
++
++ switch (arg->engine) {
++ case PSB_ENGINE_VIDEO:
++ if (arg->cmdbuf_size == (16 + 32)) {
++ /* Identify deblock msg cmdbuf */
++ /* according to cmdbuf_size */
++ struct ttm_bo_kmap_obj cmd_kmap;
++ struct ttm_buffer_object *deblock;
++ uint32_t *cmd;
++ bool is_iomem;
++
++ /* write regIO BO's address after deblcok msg */
++ ret = ttm_bo_kmap(cmd_buffer, 0, 1, &cmd_kmap);
++ if (unlikely(ret != 0))
++ goto out_err4;
++ cmd = (uint32_t *)(ttm_kmap_obj_virtual(&cmd_kmap,
++ &is_iomem) + 16);
++ deblock = ttm_buffer_object_lookup(tfile,
++ (uint32_t)(*cmd));
++ *cmd = (uint32_t)deblock;
++ ttm_bo_kunmap(&cmd_kmap);
++ }
++
++ ret = psb_cmdbuf_video(file_priv, &context->validate_list,
++ context->fence_types, arg,
++ cmd_buffer, &fence_arg);
++
++ if (unlikely(ret != 0))
++ goto out_err4;
++ break;
++ case LNC_ENGINE_ENCODE:
++ ret = lnc_cmdbuf_video(file_priv, &context->validate_list,
++ context->fence_types, arg,
++ cmd_buffer, &fence_arg);
++ if (unlikely(ret != 0))
++ goto out_err4;
++ break;
++
++
++ default:
++ DRM_ERROR
++ ("Unimplemented command submission mechanism (%x).\n",
++ arg->engine);
++ ret = -EINVAL;
++ goto out_err4;
++ }
++
++ if (!(arg->fence_flags & DRM_PSB_FENCE_NO_USER)) {
++ ret = copy_to_user((void __user *)
++ ((unsigned long) arg->fence_arg),
++ &fence_arg, sizeof(fence_arg));
++ }
++
++out_err4:
++ if (cmd_buffer)
++ ttm_bo_unref(&cmd_buffer);
++out_err3:
++ ret = psb_handle_copyback(dev, context, ret);
++out_err2:
++ psb_unreference_buffers(context);
++out_err1:
++ mutex_unlock(&dev_priv->cmdbuf_mutex);
++out_err0:
++ ttm_read_unlock(&dev_priv->ttm_lock);
++
++ if (arg->engine == PSB_ENGINE_VIDEO)
++ ospm_power_using_hw_end(OSPM_VIDEO_DEC_ISLAND);
++
++ if (arg->engine == LNC_ENGINE_ENCODE)
++ ospm_power_using_hw_end(OSPM_VIDEO_ENC_ISLAND);
++
++ return ret;
++}
++
+diff --git a/drivers/gpu/drm/mrst/drv/psb_sgx.h b/drivers/gpu/drm/mrst/drv/psb_sgx.h
+new file mode 100644
+index 0000000..2934e5d
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_sgx.h
+@@ -0,0 +1,32 @@
++/*
++ * Copyright (c) 2008, Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric@anholt.net>
++ *
++ **/
++#ifndef _PSB_SGX_H_
++#define _PSB_SGX_H_
++
++extern int psb_submit_video_cmdbuf(struct drm_device *dev,
++ struct ttm_buffer_object *cmd_buffer,
++ unsigned long cmd_offset,
++ unsigned long cmd_size,
++ struct ttm_fence_object *fence);
++
++extern int drm_idle_check_interval;
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_socket.c b/drivers/gpu/drm/mrst/drv/psb_socket.c
+new file mode 100644
+index 0000000..8bb12cf
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_socket.c
+@@ -0,0 +1,376 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
++ * Copyright (C) 2004 Novell, Inc. All rights reserved.
++ * Copyright (C) 2004 IBM, Inc. All rights reserved.
++ * Copyright (C) 2009 Intel Corporation. All rights reserved.
++ *
++ * Licensed under the GNU GPL v2.
++ *
++ * Authors:
++ * Robert Love <rml@novell.com>
++ * Kay Sievers <kay.sievers@vrfy.org>
++ * Arjan van de Ven <arjanv@redhat.com>
++ * Greg Kroah-Hartman <greg@kroah.com>
++ *
++ * Notes:
++ * Adapted from existing kobj event socket code to enable
++ * mutlicast usermode communication for gfx driver to mutiple
++ * usermode threads via different socket broadcast groups.
++ * Original kobject uevent code does not allow for different
++ * broadcast groups. Due to the frequency of usermode events
++ * generated by some gfx subsystems it is necessary to open
++ * a new dedicated socket with multicast group support. In
++ * the future it is hoped that this code can be removed
++ * and either a new netlink protocol type added for graphics
++ * or conversely to simply enable group routing to be leveraged
++ * on the existing kobject uevent infrastructure.
++ */
++
++#include <linux/spinlock.h>
++#include <linux/string.h>
++#include <linux/kobject.h>
++#include <linux/module.h>
++#include <linux/socket.h>
++#include <linux/skbuff.h>
++#include <linux/netlink.h>
++#include <net/sock.h>
++#include "psb_umevents.h"
++
++#define NETLINK_PSB_KOBJECT_UEVENT 31
++
++u64 psb_uevent_seqnum;
++char psb_uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
++static DEFINE_SPINLOCK(sequence_lock);
++#if defined(CONFIG_NET)
++static struct sock *uevent_sock;
++#endif
++
++/* the strings here must match the enum in include/linux/kobject.h */
++static const char *psb_kobject_actions[] = {
++ [KOBJ_ADD] = "add",
++ [KOBJ_REMOVE] = "remove",
++ [KOBJ_CHANGE] = "change",
++ [KOBJ_MOVE] = "move",
++ [KOBJ_ONLINE] = "online",
++ [KOBJ_OFFLINE] = "offline",
++};
++
++/**
++ * kobject_action_type - translate action string to numeric type
++ *
++ * @buf: buffer containing the action string, newline is ignored
++ * @len: length of buffer
++ * @type: pointer to the location to store the action type
++ *
++ * Returns 0 if the action string was recognized.
++ */
++int psb_kobject_action_type(const char *buf, size_t count,
++ enum kobject_action *type)
++{
++ enum kobject_action action;
++ int ret = -EINVAL;
++
++ if (count && (buf[count-1] == '\n' || buf[count-1] == '\0'))
++ count--;
++
++ if (!count)
++ goto out;
++
++ for (action = 0; action < ARRAY_SIZE(psb_kobject_actions); action++) {
++ if (strncmp(psb_kobject_actions[action], buf, count) != 0)
++ continue;
++ if (psb_kobject_actions[action][count] != '\0')
++ continue;
++ *type = action;
++ ret = 0;
++ break;
++ }
++out:
++ return ret;
++}
++
++/**
++ * psb_kobject_uevent_env - send an uevent with environmental data
++ *
++ * @action: action that is happening
++ * @kobj: struct kobject that the action is happening to
++ * @envp_ext: pointer to environmental data
++ *
++ * Returns 0 if kobject_uevent() is completed with success or the
++ * corresponding error when it fails.
++ */
++int psb_kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
++ char *envp_ext[], int dst_group_id)
++{
++ struct kobj_uevent_env *env;
++ const char *action_string = psb_kobject_actions[action];
++ const char *devpath = NULL;
++ const char *subsystem;
++ struct kobject *top_kobj;
++ struct kset *kset;
++ struct kset_uevent_ops *uevent_ops;
++ u64 seq;
++ int i = 0;
++ int retval = 0;
++
++ pr_debug("kobject: '%s' (%p): %s\n",
++ kobject_name(kobj), kobj, __func__);
++
++ /* search the kset we belong to */
++ top_kobj = kobj;
++ while (!top_kobj->kset && top_kobj->parent)
++ top_kobj = top_kobj->parent;
++
++ if (!top_kobj->kset) {
++ pr_debug("kobject: '%s' (%p): %s: attempted to send uevent "
++ "without kset!\n", kobject_name(kobj), kobj,
++ __func__);
++ return -EINVAL;
++ }
++
++ kset = top_kobj->kset;
++ uevent_ops = kset->uevent_ops;
++
++ /* skip the event, if uevent_suppress is set*/
++ if (kobj->uevent_suppress) {
++ pr_debug("kobject: '%s' (%p): %s: uevent_suppress "
++ "caused the event to drop!\n",
++ kobject_name(kobj), kobj, __func__);
++ return 0;
++ }
++ /* skip the event, if the filter returns zero. */
++ if (uevent_ops && uevent_ops->filter)
++ if (!uevent_ops->filter(kset, kobj)) {
++ pr_debug("kobject: '%s' (%p): %s: filter function "
++ "caused the event to drop!\n",
++ kobject_name(kobj), kobj, __func__);
++ return 0;
++ }
++
++ /* originating subsystem */
++ if (uevent_ops && uevent_ops->name)
++ subsystem = uevent_ops->name(kset, kobj);
++ else
++ subsystem = kobject_name(&kset->kobj);
++ if (!subsystem) {
++ pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the "
++ "event to drop!\n", kobject_name(kobj), kobj,
++ __func__);
++ return 0;
++ }
++
++ /* environment buffer */
++ env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
++ if (!env)
++ return -ENOMEM;
++
++ /* complete object path */
++ devpath = kobject_get_path(kobj, GFP_KERNEL);
++ if (!devpath) {
++ retval = -ENOENT;
++ goto exit;
++ }
++
++ /* default keys */
++ retval = add_uevent_var(env, "ACTION=%s", action_string);
++ if (retval)
++ goto exit;
++ retval = add_uevent_var(env, "DEVPATH=%s", devpath);
++ if (retval)
++ goto exit;
++ retval = add_uevent_var(env, "SUBSYSTEM=%s", subsystem);
++ if (retval)
++ goto exit;
++
++ /* keys passed in from the caller */
++ if (envp_ext) {
++ for (i = 0; envp_ext[i]; i++) {
++ retval = add_uevent_var(env, "%s", envp_ext[i]);
++ if (retval)
++ goto exit;
++ }
++ }
++
++ /* let the kset specific function add its stuff */
++ if (uevent_ops && uevent_ops->uevent) {
++ retval = uevent_ops->uevent(kset, kobj, env);
++ if (retval) {
++ pr_debug("kobject: '%s' (%p): %s: uevent() returned "
++ "%d\n", kobject_name(kobj), kobj,
++ __func__, retval);
++ goto exit;
++ }
++ }
++
++ /*
++ * Mark "add" and "remove" events in the object to ensure proper
++ * events to userspace during automatic cleanup. If the object did
++ * send an "add" event, "remove" will automatically generated by
++ * the core, if not already done by the caller.
++ */
++ if (action == KOBJ_ADD)
++ kobj->state_add_uevent_sent = 1;
++ else if (action == KOBJ_REMOVE)
++ kobj->state_remove_uevent_sent = 1;
++
++ /* we will send an event, so request a new sequence number */
++ spin_lock(&sequence_lock);
++ seq = ++psb_uevent_seqnum;
++ spin_unlock(&sequence_lock);
++ retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)seq);
++ if (retval)
++ goto exit;
++
++#if defined(CONFIG_NET)
++ /* send netlink message */
++ if (uevent_sock) {
++ struct sk_buff *skb;
++ size_t len;
++
++ /* allocate message with the maximum possible size */
++ len = strlen(action_string) + strlen(devpath) + 2;
++ skb = alloc_skb(len + env->buflen, GFP_KERNEL);
++ if (skb) {
++ char *scratch;
++
++ /* add header */
++ scratch = skb_put(skb, len);
++ sprintf(scratch, "%s@%s", action_string, devpath);
++
++ /* copy keys to our continuous event payload buffer */
++ for (i = 0; i < env->envp_idx; i++) {
++ len = strlen(env->envp[i]) + 1;
++ scratch = skb_put(skb, len);
++ strcpy(scratch, env->envp[i]);
++ }
++
++ NETLINK_CB(skb).dst_group = dst_group_id;
++ retval = netlink_broadcast(uevent_sock, skb, 0,
++ dst_group_id,
++ GFP_KERNEL);
++
++ /* ENOBUFS should be handled in userspace */
++ if (retval == -ENOBUFS)
++ retval = 0;
++ } else
++ retval = -ENOMEM;
++ }
++#endif
++
++ /* call psb_uevent_helper, usually only enabled during early boot */
++ if (psb_uevent_helper[0]) {
++ char *argv[3];
++
++ argv[0] = psb_uevent_helper;
++ argv[1] = (char *)subsystem;
++ argv[2] = NULL;
++ retval = add_uevent_var(env, "HOME=/");
++ if (retval)
++ goto exit;
++ retval = add_uevent_var(env,
++ "PATH=/sbin:/bin:/usr/sbin:/usr/bin");
++ if (retval)
++ goto exit;
++
++ retval = call_usermodehelper(argv[0], argv,
++ env->envp, UMH_WAIT_EXEC);
++ }
++
++exit:
++ kfree(devpath);
++ kfree(env);
++ return retval;
++}
++EXPORT_SYMBOL_GPL(psb_kobject_uevent_env);
++
++/**
++ * psb_kobject_uevent - notify userspace by ending an uevent
++ *
++ * @action: action that is happening
++ * @kobj: struct kobject that the action is happening to
++ *
++ * Returns 0 if psb_kobject_uevent() is completed with success or the
++ * corresponding error when it fails.
++ */
++int psb_kobject_uevent(struct kobject *kobj, enum kobject_action action,
++ int dst_group_id)
++{
++ return psb_kobject_uevent_env(kobj, action, NULL, dst_group_id);
++}
++EXPORT_SYMBOL_GPL(psb_kobject_uevent);
++
++/**
++ * psb_add_uevent_var - add key value string to the environment buffer
++ * @env: environment buffer structure
++ * @format: printf format for the key=value pair
++ *
++ * Returns 0 if environment variable was added successfully or -ENOMEM
++ * if no space was available.
++ */
++int psb_add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
++{
++ va_list args;
++ int len;
++
++ if (env->envp_idx >= ARRAY_SIZE(env->envp)) {
++ WARN(1, KERN_ERR "psb_add_uevent_var: too many keys\n");
++ return -ENOMEM;
++ }
++
++ va_start(args, format);
++ len = vsnprintf(&env->buf[env->buflen],
++ sizeof(env->buf) - env->buflen,
++ format, args);
++ va_end(args);
++
++ if (len >= (sizeof(env->buf) - env->buflen)) {
++ WARN(1,
++ KERN_ERR "psb_add_uevent_var: buffer size too small\n");
++ return -ENOMEM;
++ }
++
++ env->envp[env->envp_idx++] = &env->buf[env->buflen];
++ env->buflen += len + 1;
++ return 0;
++}
++EXPORT_SYMBOL_GPL(psb_add_uevent_var);
++
++#if defined(CONFIG_NET)
++static int __init psb_kobject_uevent_init(void)
++{
++ /* This should be the 15, but 3 seems to work better. Why? WHY!? */
++ /* uevent_sock = netlink_kernel_create(&init_net,
++ NETLINK_PSB_KOBJECT_UEVENT,
++ DRM_GFX_SOCKET_GROUPS,
++ NULL, NULL, THIS_MODULE); */
++ uevent_sock = netlink_kernel_create(&init_net,
++ NETLINK_PSB_KOBJECT_UEVENT,
++ 0x1, /* 3 is for hotplug & dpst */
++ NULL, NULL, THIS_MODULE);
++
++ if (!uevent_sock) {
++ printk(KERN_ERR "psb_kobject_uevent: failed create socket!\n");
++ return -ENODEV;
++ }
++ netlink_set_nonroot(NETLINK_PSB_KOBJECT_UEVENT, NL_NONROOT_RECV);
++
++ return 0;
++}
++
++postcore_initcall(psb_kobject_uevent_init);
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/psb_ttm_glue.c b/drivers/gpu/drm/mrst/drv/psb_ttm_glue.c
+new file mode 100644
+index 0000000..ad0e6ee
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_ttm_glue.c
+@@ -0,0 +1,344 @@
++/**************************************************************************
++ * Copyright (c) 2008, Intel Corporation.
++ * All Rights Reserved.
++ * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "ttm/ttm_userobj_api.h"
++#include <linux/io.h>
++
++/*IMG Headers*/
++#include "private_data.h"
++
++extern int PVRMMap(struct file *pFile, struct vm_area_struct *ps_vma);
++
++static struct vm_operations_struct psb_ttm_vm_ops;
++
++/**
++ * NOTE: driver_private of drm_file is now a PVRSRV_FILE_PRIVATE_DATA struct
++ * pPriv in PVRSRV_FILE_PRIVATE_DATA contains the original psb_fpriv;
++ */
++int psb_open(struct inode *inode, struct file *filp)
++{
++ struct drm_file *file_priv;
++ struct drm_psb_private *dev_priv;
++ struct psb_fpriv *psb_fp;
++ PVRSRV_FILE_PRIVATE_DATA *pvr_file_priv;
++ int ret;
++
++ DRM_DEBUG("\n");
++
++ ret = drm_open(inode, filp);
++ if (unlikely(ret))
++ return ret;
++
++ psb_fp = kzalloc(sizeof(*psb_fp), GFP_KERNEL);
++
++ if (unlikely(psb_fp == NULL))
++ goto out_err0;
++
++ file_priv = (struct drm_file *) filp->private_data;
++ dev_priv = psb_priv(file_priv->minor->dev);
++
++ DRM_DEBUG("is_master %d\n", file_priv->is_master ? 1 : 0);
++
++ psb_fp->tfile = ttm_object_file_init(dev_priv->tdev,
++ PSB_FILE_OBJECT_HASH_ORDER);
++ if (unlikely(psb_fp->tfile == NULL))
++ goto out_err1;
++
++ pvr_file_priv = (PVRSRV_FILE_PRIVATE_DATA *)file_priv->driver_priv;
++ if (!pvr_file_priv) {
++ DRM_ERROR("drm file private is NULL\n");
++ goto out_err1;
++ }
++
++ pvr_file_priv->pPriv = psb_fp;
++
++ if (unlikely(dev_priv->bdev.dev_mapping == NULL))
++ dev_priv->bdev.dev_mapping = dev_priv->dev->dev_mapping;
++
++ return 0;
++
++out_err1:
++ kfree(psb_fp);
++out_err0:
++ (void) drm_release(inode, filp);
++ return ret;
++}
++
++int psb_release(struct inode *inode, struct file *filp)
++{
++ struct drm_file *file_priv;
++ struct psb_fpriv *psb_fp;
++ struct drm_psb_private *dev_priv;
++ int ret;
++ uint32_t ui32_reg_value = 0;
++
++ file_priv = (struct drm_file *) filp->private_data;
++ psb_fp = psb_fpriv(file_priv);
++ dev_priv = psb_priv(file_priv->minor->dev);
++
++ ttm_object_file_release(&psb_fp->tfile);
++ kfree(psb_fp);
++
++ if (IS_MRST(dev_priv->dev))
++ {
++ schedule_delayed_work(&dev_priv->scheduler.topaz_suspend_wq, 10);
++ /* FIXME: workaround for HSD3469585
++ * re-enable DRAM Self Refresh Mode
++ * by setting DUNIT.DPMC0
++ */
++ ui32_reg_value = MSG_READ32(0x1, 0x4);
++ MSG_WRITE32(0x1, 0x4, (ui32_reg_value | (0x1 << 7)));
++ }
++
++ if (IS_MRST(dev_priv->dev))
++ schedule_delayed_work(&dev_priv->scheduler.msvdx_suspend_wq, 10);
++
++ ret = drm_release(inode, filp);
++
++ return ret;
++}
++
++int psb_fence_signaled_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++
++ return ttm_fence_signaled_ioctl(psb_fpriv(file_priv)->tfile, data);
++}
++
++int psb_fence_finish_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_fence_finish_ioctl(psb_fpriv(file_priv)->tfile, data);
++}
++
++int psb_fence_unref_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_fence_unref_ioctl(psb_fpriv(file_priv)->tfile, data);
++}
++
++int psb_pl_waitidle_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_pl_waitidle_ioctl(psb_fpriv(file_priv)->tfile, data);
++}
++
++int psb_pl_setstatus_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_pl_setstatus_ioctl(psb_fpriv(file_priv)->tfile,
++ &psb_priv(dev)->ttm_lock, data);
++
++}
++
++int psb_pl_synccpu_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_pl_synccpu_ioctl(psb_fpriv(file_priv)->tfile, data);
++}
++
++int psb_pl_unref_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_pl_unref_ioctl(psb_fpriv(file_priv)->tfile, data);
++
++}
++
++int psb_pl_reference_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ return ttm_pl_reference_ioctl(psb_fpriv(file_priv)->tfile, data);
++
++}
++
++int psb_pl_create_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_private *dev_priv = psb_priv(dev);
++
++ return ttm_pl_create_ioctl(psb_fpriv(file_priv)->tfile,
++ &dev_priv->bdev, &dev_priv->ttm_lock, data);
++
++}
++
++/**
++ * psb_ttm_fault - Wrapper around the ttm fault method.
++ *
++ * @vma: The struct vm_area_struct as in the vm fault() method.
++ * @vmf: The struct vm_fault as in the vm fault() method.
++ *
++ * Since ttm_fault() will reserve buffers while faulting,
++ * we need to take the ttm read lock around it, as this driver
++ * relies on the ttm_lock in write mode to exclude all threads from
++ * reserving and thus validating buffers in aperture- and memory shortage
++ * situations.
++ */
++
++static int psb_ttm_fault(struct vm_area_struct *vma,
++ struct vm_fault *vmf)
++{
++ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
++ vma->vm_private_data;
++ struct drm_psb_private *dev_priv =
++ container_of(bo->bdev, struct drm_psb_private, bdev);
++ int ret;
++
++ ret = ttm_read_lock(&dev_priv->ttm_lock, true);
++ if (unlikely(ret != 0))
++ return VM_FAULT_NOPAGE;
++
++ ret = dev_priv->ttm_vm_ops->fault(vma, vmf);
++
++ ttm_read_unlock(&dev_priv->ttm_lock);
++ return ret;
++}
++
++/**
++ * if vm_pgoff < DRM_PSB_FILE_PAGE_OFFSET call directly to
++ * PVRMMap
++ */
++int psb_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++ struct drm_file *file_priv;
++ struct drm_psb_private *dev_priv;
++ int ret;
++
++ if (vma->vm_pgoff < DRM_PSB_FILE_PAGE_OFFSET ||
++ vma->vm_pgoff > 2 * DRM_PSB_FILE_PAGE_OFFSET)
++ return PVRMMap(filp, vma);
++
++ file_priv = (struct drm_file *) filp->private_data;
++ dev_priv = psb_priv(file_priv->minor->dev);
++
++ ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev);
++ if (unlikely(ret != 0))
++ return ret;
++
++ if (unlikely(dev_priv->ttm_vm_ops == NULL)) {
++ dev_priv->ttm_vm_ops = (struct vm_operations_struct *)vma->vm_ops;
++ psb_ttm_vm_ops = *vma->vm_ops;
++ psb_ttm_vm_ops.fault = &psb_ttm_fault;
++ }
++
++ vma->vm_ops = &psb_ttm_vm_ops;
++
++ return 0;
++}
++
++ssize_t psb_ttm_write(struct file *filp, const char __user *buf,
++ size_t count, loff_t *f_pos)
++{
++ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
++ struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev);
++
++ return ttm_bo_io(&dev_priv->bdev, filp, buf, NULL, count, f_pos, 1);
++}
++
++ssize_t psb_ttm_read(struct file *filp, char __user *buf,
++ size_t count, loff_t *f_pos)
++{
++ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
++ struct drm_psb_private *dev_priv = psb_priv(file_priv->minor->dev);
++
++ return ttm_bo_io(&dev_priv->bdev, filp, NULL, buf, count, f_pos, 1);
++}
++
++int psb_verify_access(struct ttm_buffer_object *bo,
++ struct file *filp)
++{
++ struct drm_file *file_priv = (struct drm_file *)filp->private_data;
++
++ if (capable(CAP_SYS_ADMIN))
++ return 0;
++
++ if (unlikely(!file_priv->authenticated))
++ return -EPERM;
++
++ return ttm_pl_verify_access(bo, psb_fpriv(file_priv)->tfile);
++}
++
++static int psb_ttm_mem_global_init(struct drm_global_reference *ref)
++{
++ return ttm_mem_global_init(ref->object);
++}
++
++static void psb_ttm_mem_global_release(struct drm_global_reference *ref)
++{
++ ttm_mem_global_release(ref->object);
++}
++
++int psb_ttm_global_init(struct drm_psb_private *dev_priv)
++{
++ struct drm_global_reference *global_ref;
++ int ret;
++
++ global_ref = &dev_priv->mem_global_ref;
++ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
++ global_ref->size = sizeof(struct ttm_mem_global);
++ global_ref->init = &psb_ttm_mem_global_init;
++ global_ref->release = &psb_ttm_mem_global_release;
++
++ ret = drm_global_item_ref(global_ref);
++ if (unlikely(ret != 0)) {
++ DRM_ERROR("Failed referencing a global TTM memory object.\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++void psb_ttm_global_release(struct drm_psb_private *dev_priv)
++{
++ drm_global_item_unref(&dev_priv->mem_global_ref);
++}
++
++int psb_getpageaddrs_ioctl(struct drm_device *dev, void *data,
++ struct drm_file *file_priv)
++{
++ struct drm_psb_getpageaddrs_arg *arg = data;
++ struct ttm_buffer_object *bo;
++ struct ttm_tt *ttm;
++ struct page **tt_pages;
++ unsigned long i, num_pages;
++ unsigned long *p = arg->page_addrs;
++ int ret = 0;
++
++ bo = ttm_buffer_object_lookup(psb_fpriv(file_priv)->tfile,
++ arg->handle);
++ if (unlikely(bo == NULL)) {
++ printk(KERN_ERR
++ "Could not find buffer object for getpageaddrs.\n");
++ return -EINVAL;
++ }
++
++ arg->gtt_offset = bo->offset;
++ ttm = bo->ttm;
++ num_pages = ttm->num_pages;
++ tt_pages = ttm->pages;
++
++ for (i = 0; i < num_pages; i++)
++ p[i] = (unsigned long)page_to_phys(tt_pages[i]);
++
++ return ret;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_umevents.c b/drivers/gpu/drm/mrst/drv/psb_umevents.c
+new file mode 100644
+index 0000000..d9bf3c1
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_umevents.c
+@@ -0,0 +1,485 @@
++/*
++ * Copyright © 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * James C. Gualario <james.c.gualario@intel.com>
++ *
++ */
++#include "psb_umevents.h"
++/**
++ * define sysfs operations supported by umevent objects.
++ *
++ */
++static struct sysfs_ops umevent_obj_sysfs_ops = {
++ .show = psb_umevent_attr_show,
++ .store = psb_umevent_attr_store,
++};
++/**
++ * define the data attributes we will expose through sysfs.
++ *
++ */
++static struct umevent_attribute data_0 =
++ __ATTR(data_0_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_1 =
++ __ATTR(data_1_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_2 =
++ __ATTR(data_2_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_3 =
++ __ATTR(data_3_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_4 =
++ __ATTR(data_4_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_5 =
++ __ATTR(data_5_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_6 =
++ __ATTR(data_6_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++static struct umevent_attribute data_7 =
++ __ATTR(data_7_val, 0666, psb_umevent_attr_show_imp,
++ psb_umevent_attr_store_imp);
++/**
++ * define the structure used to seed our ktype.
++ *
++ */
++static struct attribute *umevent_obj_default_attrs[] = {
++ &data_0.attr,
++ &data_1.attr,
++ &data_2.attr,
++ &data_3.attr,
++ &data_4.attr,
++ &data_5.attr,
++ &data_6.attr,
++ &data_7.attr,
++ NULL, /* need to NULL terminate the list of attributes */
++};
++/**
++ * specify the ktype for our kobjects.
++ *
++ */
++static struct kobj_type umevent_obj_ktype = {
++ .sysfs_ops = &umevent_obj_sysfs_ops,
++ .release = psb_umevent_obj_release,
++ .default_attrs = umevent_obj_default_attrs,
++};
++/**
++ * psb_umevent_attr_show - default kobject show function
++ *
++ * @kobj: kobject associated with the show operation
++ * @attr: attribute being requested
++ * @buf: pointer to the return buffer
++ *
++ */
++ssize_t psb_umevent_attr_show(struct kobject *kobj,
++ struct attribute *attr,
++ char *buf)
++{
++ struct umevent_attribute *attribute;
++ struct umevent_obj *any_umevent_obj;
++ attribute = to_umevent_attr(attr);
++ any_umevent_obj = to_umevent_obj(kobj);
++ if (!attribute->show)
++ return -EIO;
++
++ return attribute->show(any_umevent_obj, attribute, buf);
++}
++/**
++ * psb_umevent_attr_store - default kobject store function
++ *
++ * @kobj: kobject associated with the store operation
++ * @attr: attribute being requested
++ * @buf: input data to write to attribute
++ * @len: character count
++ *
++ */
++ssize_t psb_umevent_attr_store(struct kobject *kobj,
++ struct attribute *attr,
++ const char *buf, size_t len)
++{
++ struct umevent_attribute *attribute;
++ struct umevent_obj *any_umevent_obj;
++ attribute = to_umevent_attr(attr);
++ any_umevent_obj = to_umevent_obj(kobj);
++ if (!attribute->store)
++ return -EIO;
++
++ return attribute->store(any_umevent_obj, attribute, buf, len);
++}
++/**
++ * psb_umevent_obj_release - kobject release funtion
++ *
++ * @kobj: kobject to be released.
++ */
++void psb_umevent_obj_release(struct kobject *kobj)
++{
++ struct umevent_obj *any_umevent_obj;
++ any_umevent_obj = to_umevent_obj(kobj);
++ kfree(any_umevent_obj);
++}
++/**
++ * psb_umevent_attr_show_imp - attribute show implementation
++ *
++ * @any_umevent_obj: kobject managed data to read from
++ * @attr: attribute being requested
++ * @buf: pointer to the return buffer
++ *
++ */
++ssize_t psb_umevent_attr_show_imp(struct umevent_obj
++ *any_umevent_obj,
++ struct umevent_attribute *attr,
++ char *buf)
++{
++ int var;
++
++ if (strcmp(attr->attr.name, "data_0_val") == 0)
++ var = any_umevent_obj->data_0_val;
++ else if (strcmp(attr->attr.name, "data_1_val") == 0)
++ var = any_umevent_obj->data_1_val;
++ else if (strcmp(attr->attr.name, "data_2_val") == 0)
++ var = any_umevent_obj->data_2_val;
++ else if (strcmp(attr->attr.name, "data_3_val") == 0)
++ var = any_umevent_obj->data_3_val;
++ else if (strcmp(attr->attr.name, "data_4_val") == 0)
++ var = any_umevent_obj->data_4_val;
++ else if (strcmp(attr->attr.name, "data_5_val") == 0)
++ var = any_umevent_obj->data_5_val;
++ else if (strcmp(attr->attr.name, "data_6_val") == 0)
++ var = any_umevent_obj->data_6_val;
++ else
++ var = any_umevent_obj->data_7_val;
++
++ return sprintf(buf, "%d\n", var);
++}
++/**
++ * psb_umevent_attr_store_imp - attribute store implementation
++ *
++ * @any_umevent_obj: kobject managed data to write to
++ * @attr: attribute being requested
++ * @buf: input data to write to attribute
++ * @count: character count
++ *
++ */
++ssize_t psb_umevent_attr_store_imp(struct umevent_obj
++ *any_umevent_obj,
++ struct umevent_attribute *attr,
++ const char *buf, size_t count)
++{
++ int var;
++
++ sscanf(buf, "%du", &var);
++ if (strcmp(attr->attr.name, "data_0_val") == 0)
++ any_umevent_obj->data_0_val = var;
++ else if (strcmp(attr->attr.name, "data_1_val") == 0)
++ any_umevent_obj->data_1_val = var;
++ else if (strcmp(attr->attr.name, "data_2_val") == 0)
++ any_umevent_obj->data_2_val = var;
++ else if (strcmp(attr->attr.name, "data_3_val") == 0)
++ any_umevent_obj->data_3_val = var;
++ else if (strcmp(attr->attr.name, "data_4_val") == 0)
++ any_umevent_obj->data_4_val = var;
++ else if (strcmp(attr->attr.name, "data_5_val") == 0)
++ any_umevent_obj->data_5_val = var;
++ else if (strcmp(attr->attr.name, "data_6_val") == 0)
++ any_umevent_obj->data_6_val = var;
++ else
++ any_umevent_obj->data_7_val = var;
++ return count;
++}
++/**
++ * psb_create_umevent_obj - create and track new event objects
++ *
++ * @name: name to give to new sysfs / kobject entry
++ * @list: event object list to track the kobject in
++ */
++struct umevent_obj *psb_create_umevent_obj(const char *name,
++ struct umevent_list
++ *list)
++{
++ struct umevent_obj *new_umevent_obj;
++ int retval;
++ new_umevent_obj = kzalloc(sizeof(*new_umevent_obj),
++ GFP_KERNEL);
++ if (!new_umevent_obj)
++ return NULL;
++
++ new_umevent_obj->kobj.kset = list->umevent_disp_pool;
++ retval = kobject_init_and_add(&new_umevent_obj->kobj,
++ &umevent_obj_ktype, NULL,
++ "%s", name);
++ if (retval) {
++ kobject_put(&new_umevent_obj->kobj);
++ return NULL;
++ }
++ psb_umevent_add_to_list(list, new_umevent_obj);
++ return new_umevent_obj;
++}
++EXPORT_SYMBOL(psb_create_umevent_obj);
++/**
++ * psb_umevent_notify - info user mode of a new device
++ *
++ * @notify_disp_obj: event object to perform notification for
++ *
++ */
++void psb_umevent_notify(struct umevent_obj *notify_disp_obj)
++{
++ kobject_uevent(&notify_disp_obj->kobj, KOBJ_ADD);
++}
++EXPORT_SYMBOL(psb_umevent_notify);
++/**
++ * psb_umevent_notify_change - notify user mode of a change to a device
++ *
++ * @notify_disp_obj: event object to perform notification for
++ *
++ */
++void psb_umevent_notify_change(struct umevent_obj *notify_disp_obj)
++{
++ kobject_uevent(&notify_disp_obj->kobj, KOBJ_CHANGE);
++}
++EXPORT_SYMBOL(psb_umevent_notify_change);
++/**
++ * psb_umevent_notify_change - notify user mode of a change to a device
++ *
++ * @notify_disp_obj: event object to perform notification for
++ *
++ */
++void psb_umevent_notify_change_gfxsock(struct umevent_obj *notify_disp_obj,
++ int dst_group_id)
++{
++ psb_kobject_uevent(&notify_disp_obj->kobj, KOBJ_CHANGE, dst_group_id);
++}
++EXPORT_SYMBOL(psb_umevent_notify_change_gfxsock);
++/**
++ * psb_destroy_umvent_obj - decrement ref count on event so kernel can kill it
++ *
++ * @any_umevent_obj: event object to destroy
++ *
++ */
++void psb_destroy_umevent_obj(struct umevent_obj
++ *any_umevent_obj)
++{
++ kobject_put(&any_umevent_obj->kobj);
++}
++/**
++ *
++ * psb_umevent_init - init the event pool
++ *
++ * @parent_kobj: parent kobject to associate new kset with
++ * @new_umevent_list: event list to associate kset with
++ * @name: name to give to new sysfs entry
++ *
++ */
++int psb_umevent_init(struct kobject *parent_kobj,
++ struct umevent_list *new_umevent_list,
++ const char *name)
++{
++ psb_umevent_init_list(new_umevent_list);
++ new_umevent_list->umevent_disp_pool = kset_create_and_add(name, NULL,
++ parent_kobj);
++ if (!new_umevent_list->umevent_disp_pool)
++ return -ENOMEM;
++
++ return 0;
++}
++EXPORT_SYMBOL(psb_umevent_init);
++/**
++ *
++ * psb_umevent_cleanup - cleanup all event objects
++ *
++ * @kill_list: list of events to destroy
++ *
++ */
++void psb_umevent_cleanup(struct umevent_list *kill_list)
++{
++ psb_umevent_destroy_list(kill_list);
++}
++EXPORT_SYMBOL(psb_umevent_cleanup);
++/**
++ * psb_umevent_add_to_list - add an event to the event list
++ *
++ * @list: list to add the event to
++ * @umevent_obj_to_add: event to add
++ *
++ */
++void psb_umevent_add_to_list(struct umevent_list *list,
++ struct umevent_obj *umevent_obj_to_add)
++{
++ unsigned long flags;
++ spin_lock_irqsave(&list->list_lock, flags);
++ list_add(&umevent_obj_to_add->head, &list->head);
++ spin_unlock_irqrestore(&list->list_lock, flags);
++}
++/**
++ * psb_umevent_init_list - initialize event list
++ *
++ * @list: list to initialize
++ *
++ */
++void psb_umevent_init_list(struct umevent_list *list)
++{
++ spin_lock_init(&list->list_lock);
++ INIT_LIST_HEAD(&list->head);
++}
++/**
++ * psb_umevent_create_list - allocate an event list
++ *
++ */
++struct umevent_list *psb_umevent_create_list()
++{
++ struct umevent_list *new_umevent_list;
++ new_umevent_list = NULL;
++ new_umevent_list = kmalloc(sizeof(struct umevent_list),
++ GFP_ATOMIC);
++ return new_umevent_list;
++}
++EXPORT_SYMBOL(psb_umevent_create_list);
++/**
++ * psb_umevent_destroy_list - destroy a list and clean up all mem
++ *
++ * @list: list to destroy and clean up after
++ *
++ */
++void psb_umevent_destroy_list(struct umevent_list *list)
++{
++ struct umevent_obj *umevent_obj_curr;
++ struct list_head *node;
++ struct list_head *node_kill;
++ int i;
++ i = 0;
++ node = NULL;
++ node_kill = NULL;
++ node = list->head.next;
++ while (node != (&list->head)) {
++ umevent_obj_curr = list_entry(node,
++ struct umevent_obj,
++ head);
++ node_kill = node;
++ node = umevent_obj_curr->head.next;
++ psb_destroy_umevent_obj(umevent_obj_curr);
++ umevent_obj_curr = NULL;
++ list_del(node_kill);
++ i++;
++ }
++ kset_unregister(list->umevent_disp_pool);
++ kfree(list);
++}
++/**
++ * psb_umevent_remove_from_list - remove an event from tracking list
++ *
++ * @list: list to remove the event from
++ * @disp_to_remove: name of event to remove.
++ *
++ */
++void psb_umevent_remove_from_list(struct umevent_list *list,
++ const char *disp_to_remove)
++{
++ struct umevent_obj *umevent_obj_curr = NULL;
++ struct list_head *node = NULL;
++ struct list_head *node_kill = NULL;
++ int i = 0;
++ int found_match = 0;
++ i = 0;
++ node = NULL;
++ node_kill = NULL;
++ node = list->head.next;
++ while (node != (&list->head)) {
++ umevent_obj_curr = list_entry(node,
++ struct umevent_obj, head);
++ if (strcmp(umevent_obj_curr->kobj.name,
++ disp_to_remove) == 0) {
++ found_match = 1;
++ break;
++ }
++ node = NULL;
++ node = umevent_obj_curr->head.next;
++ i++;
++ }
++ if (found_match == 1) {
++ node_kill = node;
++ node = umevent_obj_curr->head.next;
++ psb_destroy_umevent_obj(umevent_obj_curr);
++ umevent_obj_curr = NULL;
++ list_del(node_kill);
++ }
++}
++EXPORT_SYMBOL(psb_umevent_remove_from_list);
++/**
++ * psb_umevent_find_obj - find an event in a tracking list
++ *
++ * @name: name of the event to find
++ * @list: list to find the event in
++ *
++ */
++struct umevent_obj *psb_umevent_find_obj(const char *name,
++ struct umevent_list *list)
++{
++ struct umevent_obj *umevent_obj_curr = NULL;
++ struct list_head *node = NULL;
++ struct list_head *node_find = NULL;
++ int i = 0;
++ int found_match = 0;
++ i = 0;
++ node = NULL;
++ node_find = NULL;
++ node = list->head.next;
++ while (node != (&list->head)) {
++ umevent_obj_curr = list_entry(node,
++ struct umevent_obj, head);
++ if (strcmp(umevent_obj_curr->kobj.name,
++ name) == 0) {
++ found_match = 1;
++ break;
++ }
++ node = NULL;
++ node = umevent_obj_curr->head.next;
++ i++;
++ }
++ if (found_match == 1)
++ return umevent_obj_curr;
++
++ return NULL;
++}
++EXPORT_SYMBOL(psb_umevent_find_obj);
++/**
++ * psb_umevent_debug_dump_list - debug list dump
++ *
++ * @list: list to dump
++ *
++ */
++void psb_umevent_debug_dump_list(struct umevent_list *list)
++{
++ struct umevent_obj *umevent_obj_curr;
++ unsigned long flags;
++ struct list_head *node;
++ int i;
++ spin_lock_irqsave(&list->list_lock, flags);
++ i = 0;
++ node = NULL;
++ node = list->head.next;
++ while (node != (&list->head)) {
++ umevent_obj_curr = list_entry(node,
++ struct umevent_obj,
++ head);
++ /*TBD: DUMP ANY REQUIRED VALUES WITH PRINTK*/
++ node = NULL;
++ node = umevent_obj_curr->head.next;
++ i++;
++ }
++ spin_unlock_irqrestore(&list->list_lock, flags);
++}
+diff --git a/drivers/gpu/drm/mrst/drv/psb_umevents.h b/drivers/gpu/drm/mrst/drv/psb_umevents.h
+new file mode 100644
+index 0000000..868bee4
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/psb_umevents.h
+@@ -0,0 +1,154 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * James C. Gualario <james.c.gualario@intel.com>
++ *
++ */
++#ifndef _PSB_UMEVENT_H_
++#define _PSB_UMEVENT_H_
++/**
++ * required includes
++ *
++ */
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <drm/drmP.h>
++#include <drm/drm_core.h>
++#include <drm/drm_pciids.h>
++#include <linux/spinlock.h>
++/**
++ * event groups for routing to different user mode threads
++ *
++ */
++#define DRM_DPST_SOCKET_GROUP_ID 1
++#define DRM_HOTPLUG_SOCKET_GROUP_ID 2
++#define DRM_HDMI_AUDIO_SOCKET_GROUP 4
++#define DRM_HDMI_HDCP_SOCKET_GROUP 8
++#define DRM_GFX_SOCKET_GROUPS 15
++/**
++ * event structure managed by kobjects
++ *
++ */
++struct umevent_obj {
++ struct kobject kobj;
++ struct list_head head;
++ int data_0_val;
++ int data_1_val;
++ int data_2_val;
++ int data_3_val;
++ int data_4_val;
++ int data_5_val;
++ int data_6_val;
++ int data_7_val;
++};
++/**
++ * event tracking list element
++ *
++ */
++struct umevent_list{
++ struct list_head head;
++ struct kset *umevent_disp_pool;
++ spinlock_t list_lock;
++};
++/**
++ * to go back and forth between kobjects and their main container
++ *
++ */
++#define to_umevent_obj(x) \
++ container_of(x, struct umevent_obj, kobj)
++
++/**
++ * event attributes exposed via sysfs
++ *
++ */
++struct umevent_attribute {
++ struct attribute attr;
++ ssize_t (*show)(struct umevent_obj *any_umevent_obj,
++ struct umevent_attribute *attr, char *buf);
++ ssize_t (*store)(struct umevent_obj *any_umevent_obj,
++ struct umevent_attribute *attr,
++ const char *buf, size_t count);
++};
++/**
++ * to go back and forth between the attribute passed to us by the OS
++ * and the umevent_attribute
++ *
++ */
++#define to_umevent_attr(x) \
++ container_of(x, struct umevent_attribute, \
++ attr)
++
++/**
++ * umevent function prototypes
++ *
++ */
++extern struct umevent_obj *psb_create_umevent_obj(const char *name,
++ struct umevent_list
++ *list);
++extern ssize_t psb_umevent_attr_show(struct kobject *kobj,
++ struct attribute *attr, char *buf);
++extern ssize_t psb_umevent_attr_store(struct kobject *kobj,
++ struct attribute *attr,
++ const char *buf, size_t len);
++extern ssize_t psb_umevent_attr_show_imp(struct umevent_obj
++ *any_umevent_obj,
++ struct umevent_attribute *attr,
++ char *buf);
++extern ssize_t psb_umevent_attr_store_imp(struct umevent_obj
++ *any_umevent_obj,
++ struct umevent_attribute *attr,
++ const char *buf, size_t count);
++extern void psb_umevent_cleanup(struct umevent_list *kill_list);
++extern int psb_umevent_init(struct kobject *parent_kobj,
++ struct umevent_list *new_umevent_list,
++ const char *name);
++extern void psb_umevent_init_list(struct umevent_list *list);
++extern void psb_umevent_debug_dump_list(struct umevent_list *list);
++extern void psb_umevent_add_to_list(struct umevent_list *list,
++ struct umevent_obj
++ *umevent_obj_to_add);
++extern void psb_umevent_destroy_list(struct umevent_list *list);
++extern struct umevent_list *psb_umevent_create_list(void);
++extern void psb_umevent_notify(struct umevent_obj *notify_disp_obj);
++extern void psb_umevent_obj_release(struct kobject *kobj);
++extern void psb_umevent_remove_from_list(struct umevent_list *list,
++ const char *disp_to_remove);
++extern void psb_umevent_workqueue_dispatch(int work_type, const char *name,
++ struct umevent_list *list);
++extern void psb_umevent_notify_change(struct umevent_obj *notify_disp_obj);
++extern void psb_umevent_notify_change_gfxsock(struct umevent_obj
++ *notify_disp_obj,
++ int dst_group_id);
++extern struct umevent_obj *psb_umevent_find_obj(const char *name,
++ struct umevent_list
++ *list);
++/**
++ * socket function prototypes
++ *
++ */
++extern int psb_kobject_uevent(struct kobject *kobj,
++ enum kobject_action action, int dst_group_id);
++extern int psb_kobject_uevent_env(struct kobject *kobj,
++ enum kobject_action action,
++ char *envp[], int dst_group_id);
++int psb_add_uevent_var(struct kobj_uevent_env *env,
++ const char *format, ...)
++ __attribute__((format (printf, 2, 3)));
++int psb_kobject_action_type(const char *buf,
++ size_t count, enum kobject_action *type);
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/topaz_power.c b/drivers/gpu/drm/mrst/drv/topaz_power.c
+new file mode 100644
+index 0000000..7481390
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/topaz_power.c
+@@ -0,0 +1,173 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Author:binglin.chen@intel.com
++ */
++
++#include "topaz_power.h"
++#include "lnc_topaz.h"
++#include "psb_drv.h"
++#include "sysirq.h"
++#include "ospm_power.h"
++
++#include "services_headers.h"
++#include "sysconfig.h"
++
++static PVRSRV_ERROR DevInitTOPAZPart1(IMG_VOID *pvDeviceNode)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++ PVRSRV_ERROR eError;
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState;
++
++ /* register power operation function */
++ /* FIXME: this should be in part2 init function, but
++ * currently here only OSPM needs IMG device... */
++ eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF;
++ eError = PVRSRVRegisterPowerDevice(psDeviceNode->sDevId.ui32DeviceIndex,
++ TOPAZPrePowerState,
++ TOPAZPostPowerState,
++ TOPAZPreClockSpeedChange,
++ TOPAZPostClockSpeedChange,
++ (IMG_HANDLE)psDeviceNode,
++ PVRSRV_DEV_POWER_STATE_ON,
++ eDefaultPowerState);
++ if (eError != PVRSRV_OK) {
++ PVR_DPF((PVR_DBG_ERROR, "DevInitTOPAZPart1: failed to "
++ "register device with power manager"));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR DevDeInitTOPAZ(IMG_VOID *pvDeviceNode)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++ PVRSRV_ERROR eError;
++
++ /* should deinit all resource */
++
++ eError = PVRSRVRemovePowerDevice(psDeviceNode->sDevId.ui32DeviceIndex);
++ if (eError != PVRSRV_OK)
++ return eError;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR TOPAZDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ /* version check */
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR TOPAZRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_TOPAZ;
++ psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_VIDEO;
++
++ psDeviceNode->pfnInitDevice = DevInitTOPAZPart1;
++ psDeviceNode->pfnDeInitDevice = DevDeInitTOPAZ;
++
++ psDeviceNode->pfnInitDeviceCompatCheck = TOPAZDevInitCompatCheck;
++
++ psDeviceNode->pfnDeviceISR = lnc_topaz_interrupt;
++ psDeviceNode->pvISRData = (IMG_VOID *)gpDrmDevice;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR TOPAZPrePowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ /* ask for a change not power on*/
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON)) {
++ struct drm_psb_private *dev_priv = gpDrmDevice->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ TOPAZ_NEW_PMSTATE(gpDrmDevice, topaz_priv, PSB_PMSTATE_POWERDOWN);
++
++ /* context save */
++ /* context save require irq disable first */
++ sysirq_uninstall_islands(gpDrmDevice, OSPM_VIDEO_ENC_ISLAND);
++ lnc_topaz_save_mtx_state(gpDrmDevice);
++
++ /* internally close the device */
++
++ /* ask for power off */
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF) {
++ /* here will deinitialize the driver if needed */
++ lnc_unmap_topaz_reg(gpDrmDevice);
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s no action for transform from %d to %d",
++ __func__,
++ eCurrentPowerState,
++ eNewPowerState));
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR TOPAZPostPowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ /* if ask for change & current status is not on */
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON)) {
++ /* internally open device */
++ struct drm_psb_private *dev_priv = gpDrmDevice->dev_private;
++ struct topaz_private *topaz_priv = dev_priv->topaz_private;
++ TOPAZ_NEW_PMSTATE(gpDrmDevice, topaz_priv, PSB_PMSTATE_POWERUP);
++
++ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF) {
++ /* here will initialize the driver if needed */
++ lnc_map_topaz_reg(gpDrmDevice);
++ } else {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s no action for transform from %d to %d",
++ __func__,
++ eCurrentPowerState,
++ eNewPowerState));
++ }
++
++ /* context restore */
++ sysirq_uninstall_islands(gpDrmDevice, OSPM_VIDEO_ENC_ISLAND);
++ lnc_topaz_restore_mtx_state(gpDrmDevice);
++ sysirq_preinstall_islands(gpDrmDevice, OSPM_VIDEO_ENC_ISLAND);
++ sysirq_postinstall_islands(gpDrmDevice, OSPM_VIDEO_ENC_ISLAND);
++
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR TOPAZPreClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR TOPAZPostClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ return PVRSRV_OK;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/topaz_power.h b/drivers/gpu/drm/mrst/drv/topaz_power.h
+new file mode 100644
+index 0000000..beb6114
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/topaz_power.h
+@@ -0,0 +1,53 @@
++/*
++** topaz_power.h
++** Login : <binglin.chen@intel.com>
++** Started on Mon Nov 16 13:31:42 2009 brady
++**
++** Copyright (C) 2009 brady
++** This program is free software; you can redistribute it and/or modify
++** it under the terms of the GNU General Public License as published by
++** the Free Software Foundation; either version 2 of the License, or
++** (at your option) any later version.
++**
++** This program is distributed in the hope that it will be useful,
++** but WITHOUT ANY WARRANTY; without even the implied warranty of
++** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++** GNU General Public License for more details.
++**
++** You should have received a copy of the GNU General Public License
++** along with this program; if not, write to the Free Software
++** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++*/
++
++#ifndef TOPAZ_POWER_H_
++#define TOPAZ_POWER_H_
++
++#include "services_headers.h"
++#include "sysconfig.h"
++
++extern struct drm_device *gpDrmDevice;
++
++/* function define */
++PVRSRV_ERROR TOPAZRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
++PVRSRV_ERROR TOPAZDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++/* power function define */
++PVRSRV_ERROR TOPAZPrePowerState(
++ IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR TOPAZPostPowerState(
++ IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR TOPAZPreClockSpeedChange(
++ IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR TOPAZPostClockSpeedChange(
++ IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR TOPAZInitOSPM(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++#endif /* !TOPAZ_POWER_H_ */
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_agp_backend.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_agp_backend.c
+new file mode 100644
+index 0000000..8eb830a
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_agp_backend.c
+@@ -0,0 +1,144 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ * Keith Packard.
++ */
++
++#include "ttm_bo_driver.h"
++#ifdef TTM_HAS_AGP
++#include "ttm_placement_common.h"
++#include <linux/agp_backend.h>
++#include <asm/agp.h>
++#include <linux/io.h>
++
++struct ttm_agp_backend {
++ struct ttm_backend backend;
++ struct agp_memory *mem;
++ struct agp_bridge_data *bridge;
++};
++
++static int ttm_agp_populate(struct ttm_backend *backend,
++ unsigned long num_pages, struct page **pages,
++ struct page *dummy_read_page)
++{
++ struct ttm_agp_backend *agp_be =
++ container_of(backend, struct ttm_agp_backend, backend);
++ struct page **cur_page, **last_page = pages + num_pages;
++ struct agp_memory *mem;
++
++ mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
++ if (unlikely(mem == NULL))
++ return -ENOMEM;
++
++ mem->page_count = 0;
++ for (cur_page = pages; cur_page < last_page; ++cur_page) {
++ struct page *page = *cur_page;
++ if (!page)
++ page = dummy_read_page;
++
++ #if 0
++ mem->memory[mem->page_count++] =
++ phys_to_gart(page_to_phys(page));
++ #endif
++ }
++ agp_be->mem = mem;
++ return 0;
++}
++
++static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
++{
++ struct ttm_agp_backend *agp_be =
++ container_of(backend, struct ttm_agp_backend, backend);
++ struct agp_memory *mem = agp_be->mem;
++ int cached = (bo_mem->flags & TTM_PL_FLAG_CACHED);
++ int ret;
++
++ mem->is_flushed = 1;
++ mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
++
++ ret = agp_bind_memory(mem, bo_mem->mm_node->start);
++ if (ret)
++ printk(KERN_ERR "AGP Bind memory failed.\n");
++
++ return ret;
++}
++
++static int ttm_agp_unbind(struct ttm_backend *backend)
++{
++ struct ttm_agp_backend *agp_be =
++ container_of(backend, struct ttm_agp_backend, backend);
++
++ if (agp_be->mem->is_bound)
++ return agp_unbind_memory(agp_be->mem);
++ else
++ return 0;
++}
++
++static void ttm_agp_clear(struct ttm_backend *backend)
++{
++ struct ttm_agp_backend *agp_be =
++ container_of(backend, struct ttm_agp_backend, backend);
++ struct agp_memory *mem = agp_be->mem;
++
++ if (mem) {
++ ttm_agp_unbind(backend);
++ agp_free_memory(mem);
++ }
++ agp_be->mem = NULL;
++}
++
++static void ttm_agp_destroy(struct ttm_backend *backend)
++{
++ struct ttm_agp_backend *agp_be =
++ container_of(backend, struct ttm_agp_backend, backend);
++
++ if (agp_be->mem)
++ ttm_agp_clear(backend);
++ kfree(agp_be);
++}
++
++static struct ttm_backend_func ttm_agp_func = {
++ .populate = ttm_agp_populate,
++ .clear = ttm_agp_clear,
++ .bind = ttm_agp_bind,
++ .unbind = ttm_agp_unbind,
++ .destroy = ttm_agp_destroy,
++};
++
++struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
++ struct agp_bridge_data *bridge)
++{
++ struct ttm_agp_backend *agp_be;
++
++ agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
++ if (!agp_be)
++ return NULL;
++
++ agp_be->mem = NULL;
++ agp_be->bridge = bridge;
++ agp_be->backend.func = &ttm_agp_func;
++ agp_be->backend.bdev = bdev;
++ return &agp_be->backend;
++}
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_bo.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_bo.c
+new file mode 100644
+index 0000000..2d738b6
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_bo.c
+@@ -0,0 +1,1729 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm_bo_driver.h"
++#include "ttm_placement_common.h"
++#include <linux/jiffies.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/file.h>
++
++#define TTM_ASSERT_LOCKED(param)
++#define TTM_DEBUG(fmt, arg...)
++#define TTM_BO_HASH_ORDER 13
++
++static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
++static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
++static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
++
++static inline uint32_t ttm_bo_type_flags(unsigned type)
++{
++ uint32_t return_type = 1 << (type);
++ return return_type;
++}
++
++static void ttm_bo_release_list(struct kref *list_kref)
++{
++ struct ttm_buffer_object *bo =
++ container_of(list_kref, struct ttm_buffer_object, list_kref);
++ struct ttm_bo_device *bdev = bo->bdev;
++
++ BUG_ON(atomic_read(&bo->list_kref.refcount));
++ BUG_ON(atomic_read(&bo->kref.refcount));
++ BUG_ON(atomic_read(&bo->cpu_writers));
++ BUG_ON(bo->sync_obj != NULL);
++ BUG_ON(bo->mem.mm_node != NULL);
++ BUG_ON(!list_empty(&bo->lru));
++ BUG_ON(!list_empty(&bo->ddestroy));
++
++ if (bo->ttm)
++ ttm_tt_destroy(bo->ttm);
++ if (bo->destroy)
++ bo->destroy(bo);
++ else {
++ ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
++ kfree(bo);
++ }
++}
++
++int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
++{
++
++ if (interruptible) {
++ int ret = 0;
++
++ ret = wait_event_interruptible(bo->event_queue,
++ atomic_read(&bo->reserved) == 0);
++ if (unlikely(ret != 0))
++ return -ERESTART;
++ } else {
++ wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
++ }
++ return 0;
++}
++
++static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_type_manager *man;
++
++ BUG_ON(!atomic_read(&bo->reserved));
++
++ if (!(bo->mem.flags & TTM_PL_FLAG_NO_EVICT)) {
++
++ BUG_ON(!list_empty(&bo->lru));
++
++ man = &bdev->man[bo->mem.mem_type];
++ list_add_tail(&bo->lru, &man->lru);
++ kref_get(&bo->list_kref);
++
++ if (bo->ttm != NULL) {
++ list_add_tail(&bo->swap, &bdev->swap_lru);
++ kref_get(&bo->list_kref);
++ }
++ }
++}
++
++/*
++ * Call with bdev->lru_lock and bdev->global->swap_lock held..
++ */
++
++static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
++{
++ int put_count = 0;
++
++ if (!list_empty(&bo->swap)) {
++ list_del_init(&bo->swap);
++ ++put_count;
++ }
++ if (!list_empty(&bo->lru)) {
++ list_del_init(&bo->lru);
++ ++put_count;
++ }
++
++ /*
++ * TODO: Add a driver hook to delete from
++ * driver-specific LRU's here.
++ */
++
++ return put_count;
++}
++
++int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
++ bool interruptible,
++ bool no_wait, bool use_sequence, uint32_t sequence)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ int ret;
++
++ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
++ if (use_sequence && bo->seq_valid &&
++ (sequence - bo->val_seq < (1 << 31))) {
++ return -EAGAIN;
++ }
++
++ if (no_wait)
++ return -EBUSY;
++
++ spin_unlock(&bdev->lru_lock);
++ ret = ttm_bo_wait_unreserved(bo, interruptible);
++ spin_lock(&bdev->lru_lock);
++
++ if (unlikely(ret))
++ return ret;
++ }
++
++ if (use_sequence) {
++ bo->val_seq = sequence;
++ bo->seq_valid = true;
++ } else {
++ bo->seq_valid = false;
++ }
++
++ return 0;
++}
++
++static void ttm_bo_ref_bug(struct kref *list_kref)
++{
++ BUG();
++}
++
++int ttm_bo_reserve(struct ttm_buffer_object *bo,
++ bool interruptible,
++ bool no_wait, bool use_sequence, uint32_t sequence)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ int put_count = 0;
++ int ret;
++
++ spin_lock(&bdev->lru_lock);
++ ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
++ sequence);
++ if (likely(ret == 0))
++ put_count = ttm_bo_del_from_lru(bo);
++ spin_unlock(&bdev->lru_lock);
++
++ while (put_count--)
++ kref_put(&bo->list_kref, ttm_bo_ref_bug);
++
++ return ret;
++}
++
++void ttm_bo_unreserve(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++
++ spin_lock(&bdev->lru_lock);
++ ttm_bo_add_to_lru(bo);
++ atomic_set(&bo->reserved, 0);
++ wake_up_all(&bo->event_queue);
++ spin_unlock(&bdev->lru_lock);
++}
++
++/*
++ * Call bo->mutex locked.
++ */
++
++static int ttm_bo_add_ttm(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ int ret = 0;
++ uint32_t page_flags = 0;
++
++ TTM_ASSERT_LOCKED(&bo->mutex);
++ bo->ttm = NULL;
++
++ switch (bo->type) {
++ case ttm_bo_type_device:
++ case ttm_bo_type_kernel:
++ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
++ page_flags, bdev->dummy_read_page);
++ if (unlikely(bo->ttm == NULL))
++ ret = -ENOMEM;
++ break;
++ case ttm_bo_type_user:
++ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
++ page_flags | TTM_PAGE_FLAG_USER,
++ bdev->dummy_read_page);
++ if (unlikely(bo->ttm == NULL))
++ ret = -ENOMEM;
++ break;
++
++ ret = ttm_tt_set_user(bo->ttm, current,
++ bo->buffer_start, bo->num_pages);
++ if (unlikely(ret != 0))
++ ttm_tt_destroy(bo->ttm);
++ break;
++ default:
++ printk(KERN_ERR "Illegal buffer object type\n");
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
++ struct ttm_mem_reg *mem,
++ bool evict, bool interruptible, bool no_wait)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
++ bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
++ struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
++ struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
++ int ret = 0;
++
++ if (old_is_pci || new_is_pci ||
++ ((mem->flags & bo->mem.flags & TTM_PL_MASK_CACHING) == 0))
++ ttm_bo_unmap_virtual(bo);
++
++ /*
++ * Create and bind a ttm if required.
++ */
++
++ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
++ ret = ttm_bo_add_ttm(bo);
++ if (ret)
++ goto out_err;
++
++ ret = ttm_tt_set_placement_caching(bo->ttm, mem->flags);
++ if (ret)
++ return ret;
++
++ if (mem->mem_type != TTM_PL_SYSTEM) {
++ ret = ttm_tt_bind(bo->ttm, mem);
++ if (ret)
++ goto out_err;
++ }
++
++ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
++
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ uint32_t save_flags = old_mem->flags;
++ uint32_t save_proposed_flags = old_mem->proposed_flags;
++
++ *old_mem = *mem;
++ mem->mm_node = NULL;
++ old_mem->proposed_flags = save_proposed_flags;
++ ttm_flag_masked(&save_flags, mem->flags,
++ TTM_PL_MASK_MEMTYPE);
++ goto moved;
++ }
++
++ }
++
++ if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
++ !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
++ ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
++ else if (bdev->driver->move)
++ ret = bdev->driver->move(bo, evict, interruptible,
++ no_wait, mem);
++ else
++ ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
++
++ if (ret)
++ goto out_err;
++
++moved:
++ if (bo->priv_flags & TTM_BO_PRIV_FLAG_EVICTED) {
++ ret = bdev->driver->invalidate_caches(bdev, bo->mem.flags);
++ if (ret)
++ printk(KERN_ERR "Can not flush read caches\n");
++ }
++
++ ttm_flag_masked(&bo->priv_flags,
++ (evict) ? TTM_BO_PRIV_FLAG_EVICTED : 0,
++ TTM_BO_PRIV_FLAG_EVICTED);
++
++ if (bo->mem.mm_node)
++ bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
++ bdev->man[bo->mem.mem_type].gpu_offset;
++
++ return 0;
++
++out_err:
++ new_man = &bdev->man[bo->mem.mem_type];
++ if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
++ ttm_tt_unbind(bo->ttm);
++ ttm_tt_destroy(bo->ttm);
++ bo->ttm = NULL;
++ }
++
++ return ret;
++}
++
++static int ttm_bo_expire_sync_obj(struct ttm_buffer_object *bo,
++ bool allow_errors)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_bo_driver *driver = bdev->driver;
++
++ if (bo->sync_obj) {
++ if (bdev->nice_mode) {
++ unsigned long _end = jiffies + 3 * HZ;
++ int ret;
++ do {
++ ret = ttm_bo_wait(bo, false, false, false);
++ if (ret && allow_errors)
++ return ret;
++
++ } while (ret && !time_after_eq(jiffies, _end));
++
++ if (bo->sync_obj) {
++ bdev->nice_mode = false;
++ printk(KERN_ERR "Detected probable GPU lockup. "
++ "Evicting buffer.\n");
++ }
++ }
++ if (bo->sync_obj) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ }
++ }
++ return 0;
++}
++
++/**
++ * If bo idle, remove from delayed- and lru lists, and unref.
++ * If not idle, and already on delayed list, do nothing.
++ * If not idle, and not on delayed list, put on delayed list,
++ * up the list_kref and schedule a delayed list check.
++ */
++
++static void ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_bo_driver *driver = bdev->driver;
++
++ mutex_lock(&bo->mutex);
++
++ if (bo->sync_obj && driver->sync_obj_signaled(bo->sync_obj,
++ bo->sync_obj_arg)) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ }
++
++ if (bo->sync_obj && remove_all)
++ (void)ttm_bo_expire_sync_obj(bo, false);
++
++ if (!bo->sync_obj) {
++ int put_count;
++
++ if (bo->ttm)
++ ttm_tt_unbind(bo->ttm);
++ spin_lock(&bdev->lru_lock);
++ if (!list_empty(&bo->ddestroy)) {
++ list_del_init(&bo->ddestroy);
++ kref_put(&bo->list_kref, ttm_bo_ref_bug);
++ }
++ if (bo->mem.mm_node) {
++ drm_mm_put_block(bo->mem.mm_node);
++ bo->mem.mm_node = NULL;
++ }
++ put_count = ttm_bo_del_from_lru(bo);
++ spin_unlock(&bdev->lru_lock);
++ mutex_unlock(&bo->mutex);
++ while (put_count--)
++ kref_put(&bo->list_kref, ttm_bo_release_list);
++
++ return;
++ }
++
++ spin_lock(&bdev->lru_lock);
++ if (list_empty(&bo->ddestroy)) {
++ spin_unlock(&bdev->lru_lock);
++ driver->sync_obj_flush(bo->sync_obj, bo->sync_obj_arg);
++ spin_lock(&bdev->lru_lock);
++ if (list_empty(&bo->ddestroy)) {
++ kref_get(&bo->list_kref);
++ list_add_tail(&bo->ddestroy, &bdev->ddestroy);
++ }
++ spin_unlock(&bdev->lru_lock);
++ schedule_delayed_work(&bdev->wq,
++ ((HZ / 100) < 1) ? 1 : HZ / 100);
++ } else
++ spin_unlock(&bdev->lru_lock);
++
++ mutex_unlock(&bo->mutex);
++ return;
++}
++
++/**
++ * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
++ * encountered buffers.
++ */
++
++static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
++{
++ struct ttm_buffer_object *entry, *nentry;
++ struct list_head *list, *next;
++ int ret;
++
++ spin_lock(&bdev->lru_lock);
++ list_for_each_safe(list, next, &bdev->ddestroy) {
++ entry = list_entry(list, struct ttm_buffer_object, ddestroy);
++ nentry = NULL;
++
++ /*
++ * Protect the next list entry from destruction while we
++ * unlock the lru_lock.
++ */
++
++ if (next != &bdev->ddestroy) {
++ nentry = list_entry(next, struct ttm_buffer_object,
++ ddestroy);
++ kref_get(&nentry->list_kref);
++ }
++ kref_get(&entry->list_kref);
++
++ spin_unlock(&bdev->lru_lock);
++ ttm_bo_cleanup_refs(entry, remove_all);
++ kref_put(&entry->list_kref, ttm_bo_release_list);
++ spin_lock(&bdev->lru_lock);
++
++ if (nentry) {
++ bool next_onlist = !list_empty(next);
++ kref_put(&nentry->list_kref, ttm_bo_release_list);
++
++ /*
++ * Someone might have raced us and removed the
++ * next entry from the list. We don't bother restarting
++ * list traversal.
++ */
++
++ if (!next_onlist)
++ break;
++ }
++ }
++ ret = !list_empty(&bdev->ddestroy);
++ spin_unlock(&bdev->lru_lock);
++
++ return ret;
++}
++
++static void ttm_bo_delayed_workqueue(struct work_struct *work)
++{
++ struct ttm_bo_device *bdev =
++ container_of(work, struct ttm_bo_device, wq.work);
++
++ if (ttm_bo_delayed_delete(bdev, false)) {
++ schedule_delayed_work(&bdev->wq,
++ ((HZ / 100) < 1) ? 1 : HZ / 100);
++ }
++}
++
++static void ttm_bo_release(struct kref *kref)
++{
++ struct ttm_buffer_object *bo =
++ container_of(kref, struct ttm_buffer_object, kref);
++ struct ttm_bo_device *bdev = bo->bdev;
++
++ if (likely(bo->vm_node != NULL)) {
++ rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
++ drm_mm_put_block(bo->vm_node);
++ }
++ write_unlock(&bdev->vm_lock);
++ ttm_bo_cleanup_refs(bo, false);
++ kref_put(&bo->list_kref, ttm_bo_release_list);
++ write_lock(&bdev->vm_lock);
++}
++
++void ttm_bo_unref(struct ttm_buffer_object **p_bo)
++{
++ struct ttm_buffer_object *bo = *p_bo;
++ struct ttm_bo_device *bdev = bo->bdev;
++
++ *p_bo = NULL;
++ write_lock(&bdev->vm_lock);
++ kref_put(&bo->kref, ttm_bo_release);
++ write_unlock(&bdev->vm_lock);
++}
++
++static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
++ bool interruptible, bool no_wait)
++{
++ int ret = 0;
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_reg evict_mem;
++
++ if (bo->mem.mem_type != mem_type)
++ goto out;
++
++ ret = ttm_bo_wait(bo, false, interruptible, no_wait);
++ if (ret && ret != -ERESTART) {
++ printk(KERN_ERR "Failed to expire sync object before "
++ "buffer eviction.\n");
++ goto out;
++ }
++
++ BUG_ON(!atomic_read(&bo->reserved));
++
++ evict_mem = bo->mem;
++ evict_mem.mm_node = NULL;
++
++ evict_mem.proposed_flags = bdev->driver->evict_flags(bo);
++ BUG_ON(ttm_bo_type_flags(mem_type) & evict_mem.proposed_flags);
++
++ ret = ttm_bo_mem_space(bo, &evict_mem, interruptible, no_wait);
++ if (unlikely(ret != 0 && ret != -ERESTART)) {
++ evict_mem.proposed_flags = TTM_PL_FLAG_SYSTEM;
++ BUG_ON(ttm_bo_type_flags(mem_type) & evict_mem.proposed_flags);
++ ret = ttm_bo_mem_space(bo, &evict_mem, interruptible, no_wait);
++ }
++
++ if (ret) {
++ if (ret != -ERESTART)
++ printk(KERN_ERR "Failed to find memory space for "
++ "buffer 0x%p eviction.\n", bo);
++ goto out;
++ }
++
++ ret = ttm_bo_handle_move_mem(bo,
++ &evict_mem,
++ true,
++ interruptible,
++ no_wait);
++ if (ret) {
++ if (ret != -ERESTART)
++ printk(KERN_ERR "Buffer eviction failed\n");
++ goto out;
++ }
++
++ spin_lock(&bdev->lru_lock);
++ if (evict_mem.mm_node) {
++ drm_mm_put_block(evict_mem.mm_node);
++ evict_mem.mm_node = NULL;
++ }
++ spin_unlock(&bdev->lru_lock);
++
++ ttm_flag_masked(&bo->priv_flags, TTM_BO_PRIV_FLAG_EVICTED,
++ TTM_BO_PRIV_FLAG_EVICTED);
++
++out:
++ return ret;
++}
++
++/**
++ * Repeatedly evict memory from the LRU for @mem_type until we create enough
++ * space, or we've evicted everything and there isn't enough space.
++ */
++static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
++ struct ttm_mem_reg *mem,
++ uint32_t mem_type,
++ bool interruptible, bool no_wait)
++{
++ struct drm_mm_node *node;
++ struct ttm_buffer_object *entry;
++ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
++ struct list_head *lru;
++ unsigned long num_pages = mem->num_pages;
++ int put_count = 0;
++ int ret;
++
++retry_pre_get:
++ ret = drm_mm_pre_get(&man->manager);
++ if (unlikely(ret != 0))
++ return ret;
++
++ spin_lock(&bdev->lru_lock);
++ do {
++ node = drm_mm_search_free(&man->manager, num_pages,
++ mem->page_alignment, 1);
++ if (node)
++ break;
++
++ lru = &man->lru;
++ if (list_empty(lru))
++ break;
++
++ entry = list_first_entry(lru, struct ttm_buffer_object, lru);
++ kref_get(&entry->list_kref);
++
++ ret = ttm_bo_reserve_locked(entry,
++ interruptible,
++ no_wait,
++ false,
++ 0);
++
++ if (likely(ret == 0))
++ put_count = ttm_bo_del_from_lru(entry);
++
++ spin_unlock(&bdev->lru_lock);
++
++ if (unlikely(ret != 0))
++ return ret;
++
++ while (put_count--)
++ kref_put(&entry->list_kref, ttm_bo_ref_bug);
++
++ mutex_lock(&entry->mutex);
++ ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
++ mutex_unlock(&entry->mutex);
++
++ ttm_bo_unreserve(entry);
++
++ kref_put(&entry->list_kref, ttm_bo_release_list);
++ if (ret)
++ return ret;
++
++ spin_lock(&bdev->lru_lock);
++ } while (1);
++
++ if (!node) {
++ spin_unlock(&bdev->lru_lock);
++ return -ENOMEM;
++ }
++
++ node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
++ if (unlikely(!node)) {
++ spin_unlock(&bdev->lru_lock);
++ goto retry_pre_get;
++ }
++
++ spin_unlock(&bdev->lru_lock);
++ mem->mm_node = node;
++ mem->mem_type = mem_type;
++ return 0;
++}
++
++static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
++ bool disallow_fixed,
++ uint32_t mem_type,
++ uint32_t mask, uint32_t *res_mask)
++{
++ uint32_t cur_flags = ttm_bo_type_flags(mem_type);
++
++ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
++ return false;
++
++ if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
++ return false;
++
++ if ((mask & man->available_caching) == 0)
++ return false;
++ if (mask & man->default_caching)
++ cur_flags |= man->default_caching;
++ else if (mask & TTM_PL_FLAG_CACHED)
++ cur_flags |= TTM_PL_FLAG_CACHED;
++ else if (mask & TTM_PL_FLAG_WC)
++ cur_flags |= TTM_PL_FLAG_WC;
++ else
++ cur_flags |= TTM_PL_FLAG_UNCACHED;
++
++ *res_mask = cur_flags;
++ return true;
++}
++
++/**
++ * Creates space for memory region @mem according to its type.
++ *
++ * This function first searches for free space in compatible memory types in
++ * the priority order defined by the driver. If free space isn't found, then
++ * ttm_bo_mem_force_space is attempted in priority order to evict and find
++ * space.
++ */
++int ttm_bo_mem_space(struct ttm_buffer_object *bo,
++ struct ttm_mem_reg *mem, bool interruptible, bool no_wait)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_type_manager *man;
++
++ uint32_t num_prios = bdev->driver->num_mem_type_prio;
++ const uint32_t *prios = bdev->driver->mem_type_prio;
++ uint32_t i;
++ uint32_t mem_type = TTM_PL_SYSTEM;
++ uint32_t cur_flags = 0;
++ bool type_found = false;
++ bool type_ok = false;
++ bool has_eagain = false;
++ struct drm_mm_node *node = NULL;
++ int ret;
++
++ mem->mm_node = NULL;
++ for (i = 0; i < num_prios; ++i) {
++ mem_type = prios[i];
++ man = &bdev->man[mem_type];
++
++ type_ok = ttm_bo_mt_compatible(man,
++ bo->type == ttm_bo_type_user,
++ mem_type, mem->proposed_flags,
++ &cur_flags);
++
++ if (!type_ok)
++ continue;
++
++ if (mem_type == TTM_PL_SYSTEM)
++ break;
++
++ if (man->has_type && man->use_type) {
++ type_found = true;
++ do {
++ ret = drm_mm_pre_get(&man->manager);
++ if (unlikely(ret))
++ return ret;
++
++ spin_lock(&bdev->lru_lock);
++ node = drm_mm_search_free(&man->manager,
++ mem->num_pages,
++ mem->page_alignment,
++ 1);
++ if (unlikely(!node)) {
++ spin_unlock(&bdev->lru_lock);
++ break;
++ }
++ node = drm_mm_get_block_atomic(node,
++ mem->num_pages,
++ mem->
++ page_alignment);
++ spin_unlock(&bdev->lru_lock);
++ } while (!node);
++ }
++ if (node)
++ break;
++ }
++
++ if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
++ mem->mm_node = node;
++ mem->mem_type = mem_type;
++ mem->flags = cur_flags;
++ return 0;
++ }
++
++ if (!type_found)
++ return -EINVAL;
++
++ num_prios = bdev->driver->num_mem_busy_prio;
++ prios = bdev->driver->mem_busy_prio;
++
++ for (i = 0; i < num_prios; ++i) {
++ mem_type = prios[i];
++ man = &bdev->man[mem_type];
++
++ if (!man->has_type)
++ continue;
++
++ if (!ttm_bo_mt_compatible(man,
++ bo->type == ttm_bo_type_user,
++ mem_type,
++ mem->proposed_flags, &cur_flags))
++ continue;
++
++ ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
++ interruptible, no_wait);
++
++ if (ret == 0 && mem->mm_node) {
++ mem->flags = cur_flags;
++ return 0;
++ }
++
++ if (ret == -ERESTART)
++ has_eagain = true;
++ }
++
++ ret = (has_eagain) ? -ERESTART : -ENOMEM;
++ return ret;
++}
++
++/*
++ * Call bo->mutex locked.
++ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
++ */
++
++static int ttm_bo_busy(struct ttm_buffer_object *bo)
++{
++ void *sync_obj = bo->sync_obj;
++ struct ttm_bo_driver *driver = bo->bdev->driver;
++
++ if (sync_obj) {
++ if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ return 0;
++ }
++ driver->sync_obj_flush(sync_obj, bo->sync_obj_arg);
++ if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ return 0;
++ }
++ return 1;
++ }
++ return 0;
++}
++
++int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
++{
++ int ret = 0;
++
++ if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
++ return -EBUSY;
++
++ ret = wait_event_interruptible(bo->event_queue,
++ atomic_read(&bo->cpu_writers) == 0);
++
++ if (ret == -ERESTARTSYS)
++ ret = -ERESTART;
++
++ return ret;
++}
++
++/*
++ * bo->mutex locked.
++ * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
++ */
++
++int ttm_bo_move_buffer(struct ttm_buffer_object *bo, uint32_t new_mem_flags,
++ bool interruptible, bool no_wait)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ int ret = 0;
++ struct ttm_mem_reg mem;
++
++ BUG_ON(!atomic_read(&bo->reserved));
++
++ /*
++ * FIXME: It's possible to pipeline buffer moves.
++ * Have the driver move function wait for idle when necessary,
++ * instead of doing it here.
++ */
++
++ ttm_bo_busy(bo);
++ ret = ttm_bo_wait(bo, false, interruptible, no_wait);
++ if (ret)
++ return ret;
++
++ mem.num_pages = bo->num_pages;
++ mem.size = mem.num_pages << PAGE_SHIFT;
++ mem.proposed_flags = new_mem_flags;
++ mem.page_alignment = bo->mem.page_alignment;
++
++ /*
++ * Determine where to move the buffer.
++ */
++
++ ret = ttm_bo_mem_space(bo, &mem, interruptible, no_wait);
++ if (ret)
++ goto out_unlock;
++
++ ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
++
++out_unlock:
++ if (ret && mem.mm_node) {
++ spin_lock(&bdev->lru_lock);
++ drm_mm_put_block(mem.mm_node);
++ spin_unlock(&bdev->lru_lock);
++ }
++ return ret;
++}
++
++static int ttm_bo_mem_compat(struct ttm_mem_reg *mem)
++{
++ if ((mem->proposed_flags & mem->flags & TTM_PL_MASK_MEM) == 0)
++ return 0;
++ if ((mem->proposed_flags & mem->flags & TTM_PL_MASK_CACHING) == 0)
++ return 0;
++
++ return 1;
++}
++
++int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
++ bool interruptible, bool no_wait)
++{
++ int ret;
++
++ BUG_ON(!atomic_read(&bo->reserved));
++ bo->mem.proposed_flags = bo->proposed_flags;
++
++ TTM_DEBUG("Proposed flags 0x%08lx, Old flags 0x%08lx\n",
++ (unsigned long)bo->mem.proposed_flags,
++ (unsigned long)bo->mem.flags);
++
++ /*
++ * Check whether we need to move buffer.
++ */
++
++ if (!ttm_bo_mem_compat(&bo->mem)) {
++ ret = ttm_bo_move_buffer(bo, bo->mem.proposed_flags,
++ interruptible, no_wait);
++ if (ret) {
++ if (ret != -ERESTART)
++ printk(KERN_ERR "Failed moving buffer. "
++ "Proposed placement 0x%08x\n",
++ bo->mem.proposed_flags);
++ if (ret == -ENOMEM)
++ printk(KERN_ERR "Out of aperture space or "
++ "DRM memory quota.\n");
++ return ret;
++ }
++ }
++
++ /*
++ * We might need to add a TTM.
++ */
++
++ if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
++ ret = ttm_bo_add_ttm(bo);
++ if (ret)
++ return ret;
++ }
++ /*
++ * Validation has succeeded, move the access and other
++ * non-mapping-related flag bits from the proposed flags to
++ * the active flags
++ */
++
++ ttm_flag_masked(&bo->mem.flags, bo->proposed_flags,
++ ~TTM_PL_MASK_MEMTYPE);
++
++ return 0;
++}
++
++int
++ttm_bo_check_placement(struct ttm_buffer_object *bo,
++ uint32_t set_flags, uint32_t clr_flags)
++{
++ uint32_t new_mask = set_flags | clr_flags;
++
++ if ((bo->type == ttm_bo_type_user) &&
++ (clr_flags & TTM_PL_FLAG_CACHED)) {
++ printk(KERN_ERR
++ "User buffers require cache-coherent memory.\n");
++ return -EINVAL;
++ }
++
++ if (!capable(CAP_SYS_ADMIN)) {
++ if (new_mask & TTM_PL_FLAG_NO_EVICT) {
++ printk(KERN_ERR "Need to be root to modify"
++ " NO_EVICT status.\n");
++ return -EINVAL;
++ }
++
++ if ((clr_flags & bo->mem.flags & TTM_PL_MASK_MEMTYPE) &&
++ (bo->mem.flags & TTM_PL_FLAG_NO_EVICT)) {
++ printk(KERN_ERR "Incompatible memory specification"
++ " for NO_EVICT buffer.\n");
++ return -EINVAL;
++ }
++ }
++ return 0;
++}
++
++int ttm_buffer_object_init(struct ttm_bo_device *bdev,
++ struct ttm_buffer_object *bo,
++ unsigned long size,
++ enum ttm_bo_type type,
++ uint32_t flags,
++ uint32_t page_alignment,
++ unsigned long buffer_start,
++ bool interruptible,
++ struct file *persistant_swap_storage,
++ size_t acc_size,
++ void (*destroy) (struct ttm_buffer_object *))
++{
++ int ret = 0;
++ unsigned long num_pages;
++
++ size += buffer_start & ~PAGE_MASK;
++ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ if (num_pages == 0) {
++ printk(KERN_ERR "Illegal buffer object size.\n");
++ return -EINVAL;
++ }
++ bo->destroy = destroy;
++
++ mutex_init(&bo->mutex);
++ mutex_lock(&bo->mutex);
++ kref_init(&bo->kref);
++ kref_init(&bo->list_kref);
++ atomic_set(&bo->cpu_writers, 0);
++ atomic_set(&bo->reserved, 1);
++ init_waitqueue_head(&bo->event_queue);
++ INIT_LIST_HEAD(&bo->lru);
++ INIT_LIST_HEAD(&bo->ddestroy);
++ INIT_LIST_HEAD(&bo->swap);
++ bo->bdev = bdev;
++ bo->type = type;
++ bo->num_pages = num_pages;
++ bo->mem.mem_type = TTM_PL_SYSTEM;
++ bo->mem.num_pages = bo->num_pages;
++ bo->mem.mm_node = NULL;
++ bo->mem.page_alignment = page_alignment;
++ bo->buffer_start = buffer_start & PAGE_MASK;
++ bo->priv_flags = 0;
++ bo->mem.flags = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
++ bo->seq_valid = false;
++ bo->persistant_swap_storage = persistant_swap_storage;
++ bo->acc_size = acc_size;
++
++ ret = ttm_bo_check_placement(bo, flags, 0ULL);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ /*
++ * If no caching attributes are set, accept any form of caching.
++ */
++
++ if ((flags & TTM_PL_MASK_CACHING) == 0)
++ flags |= TTM_PL_MASK_CACHING;
++
++ bo->proposed_flags = flags;
++ bo->mem.proposed_flags = flags;
++
++ /*
++ * For ttm_bo_type_device buffers, allocate
++ * address space from the device.
++ */
++
++ if (bo->type == ttm_bo_type_device) {
++ ret = ttm_bo_setup_vm(bo);
++ if (ret)
++ goto out_err;
++ }
++
++ ret = ttm_buffer_object_validate(bo, interruptible, false);
++ if (ret)
++ goto out_err;
++
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ return 0;
++
++out_err:
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ ttm_bo_unref(&bo);
++
++ return ret;
++}
++
++static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
++ unsigned long num_pages)
++{
++ size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
++ PAGE_MASK;
++
++ return bdev->ttm_bo_size + 2 * page_array_size;
++}
++
++int ttm_buffer_object_create(struct ttm_bo_device *bdev,
++ unsigned long size,
++ enum ttm_bo_type type,
++ uint32_t flags,
++ uint32_t page_alignment,
++ unsigned long buffer_start,
++ bool interruptible,
++ struct file *persistant_swap_storage,
++ struct ttm_buffer_object **p_bo)
++{
++ struct ttm_buffer_object *bo;
++ int ret;
++ struct ttm_mem_global *mem_glob = bdev->mem_glob;
++
++ size_t acc_size =
++ ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
++ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
++ if (unlikely(ret != 0))
++ return ret;
++
++ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
++
++ if (unlikely(bo == NULL)) {
++ ttm_mem_global_free(mem_glob, acc_size, false);
++ return -ENOMEM;
++ }
++
++ ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
++ page_alignment, buffer_start,
++ interruptible,
++ persistant_swap_storage, acc_size, NULL);
++ if (likely(ret == 0))
++ *p_bo = bo;
++
++ return ret;
++}
++
++static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
++ uint32_t mem_type, bool allow_errors)
++{
++ int ret;
++
++ mutex_lock(&bo->mutex);
++
++ ret = ttm_bo_expire_sync_obj(bo, allow_errors);
++ if (ret)
++ goto out;
++
++ if (bo->mem.mem_type == mem_type)
++ ret = ttm_bo_evict(bo, mem_type, false, false);
++
++ if (ret) {
++ if (allow_errors)
++ goto out;
++ else {
++ ret = 0;
++ printk(KERN_ERR "Cleanup eviction failed\n");
++ }
++ }
++
++out:
++ mutex_unlock(&bo->mutex);
++ return ret;
++}
++
++static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
++ struct list_head *head,
++ unsigned mem_type, bool allow_errors)
++{
++ struct ttm_buffer_object *entry;
++ int ret;
++ int put_count;
++
++ /*
++ * Can't use standard list traversal since we're unlocking.
++ */
++
++ spin_lock(&bdev->lru_lock);
++
++ while (!list_empty(head)) {
++ entry = list_first_entry(head, struct ttm_buffer_object, lru);
++ kref_get(&entry->list_kref);
++ ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
++ put_count = ttm_bo_del_from_lru(entry);
++ spin_unlock(&bdev->lru_lock);
++ while (put_count--)
++ kref_put(&entry->list_kref, ttm_bo_ref_bug);
++ BUG_ON(ret);
++ ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
++ ttm_bo_unreserve(entry);
++ kref_put(&entry->list_kref, ttm_bo_release_list);
++ spin_lock(&bdev->lru_lock);
++ }
++
++ spin_unlock(&bdev->lru_lock);
++
++ return 0;
++}
++
++int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
++ int ret = -EINVAL;
++
++ if (mem_type >= TTM_NUM_MEM_TYPES) {
++ printk(KERN_ERR "Illegal memory type %d\n", mem_type);
++ return ret;
++ }
++
++ if (!man->has_type) {
++ printk(KERN_ERR "Trying to take down uninitialized "
++ "memory manager type %u\n", mem_type);
++ return ret;
++ }
++
++ man->use_type = false;
++ man->has_type = false;
++
++ ret = 0;
++ if (mem_type > 0) {
++ ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
++
++ spin_lock(&bdev->lru_lock);
++ if (drm_mm_clean(&man->manager))
++ drm_mm_takedown(&man->manager);
++ else
++ ret = -EBUSY;
++ spin_unlock(&bdev->lru_lock);
++ }
++
++ return ret;
++}
++
++int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
++
++ if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
++ printk(KERN_ERR "Illegal memory manager memory type %u.\n",
++ mem_type);
++ return -EINVAL;
++ }
++
++ if (!man->has_type) {
++ printk(KERN_ERR "Memory type %u has not been initialized.\n",
++ mem_type);
++ return 0;
++ }
++
++ return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
++}
++
++int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
++ unsigned long p_offset, unsigned long p_size)
++{
++ int ret = -EINVAL;
++ struct ttm_mem_type_manager *man;
++
++ if (type >= TTM_NUM_MEM_TYPES) {
++ printk(KERN_ERR "Illegal memory type %d\n", type);
++ return ret;
++ }
++
++ man = &bdev->man[type];
++ if (man->has_type) {
++ printk(KERN_ERR
++ "Memory manager already initialized for type %d\n",
++ type);
++ return ret;
++ }
++
++ ret = bdev->driver->init_mem_type(bdev, type, man);
++ if (ret)
++ return ret;
++
++ ret = 0;
++ if (type != TTM_PL_SYSTEM) {
++ if (!p_size) {
++ printk(KERN_ERR "Zero size memory manager type %d\n",
++ type);
++ return ret;
++ }
++ ret = drm_mm_init(&man->manager, p_offset, p_size);
++ if (ret)
++ return ret;
++ }
++ man->has_type = true;
++ man->use_type = true;
++ man->size = p_size;
++
++ INIT_LIST_HEAD(&man->lru);
++
++ return 0;
++}
++
++int ttm_bo_device_release(struct ttm_bo_device *bdev)
++{
++ int ret = 0;
++ unsigned i = TTM_NUM_MEM_TYPES;
++ struct ttm_mem_type_manager *man;
++
++ while (i--) {
++ man = &bdev->man[i];
++ if (man->has_type) {
++ man->use_type = false;
++ if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
++ ret = -EBUSY;
++ printk(KERN_ERR "DRM memory manager type %d "
++ "is not clean.\n", i);
++ }
++ man->has_type = false;
++ }
++ }
++
++ if (!cancel_delayed_work(&bdev->wq))
++ flush_scheduled_work();
++
++ while (ttm_bo_delayed_delete(bdev, true)) {
++ /* Don't you know you have to do */
++ /* something here otherwise checkpatch will */
++ /* give you error */
++ }
++
++
++ spin_lock(&bdev->lru_lock);
++ if (list_empty(&bdev->ddestroy))
++ TTM_DEBUG("Delayed destroy list was clean\n");
++
++ if (list_empty(&bdev->man[0].lru))
++ TTM_DEBUG("Swap list was clean\n");
++ spin_unlock(&bdev->lru_lock);
++
++ ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
++ BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
++ write_lock(&bdev->vm_lock);
++ drm_mm_takedown(&bdev->addr_space_mm);
++ write_unlock(&bdev->vm_lock);
++
++ __free_page(bdev->dummy_read_page);
++ return ret;
++}
++
++/*
++ * This function is intended to be called on drm driver load.
++ * If you decide to call it from firstopen, you must protect the call
++ * from a potentially racing ttm_bo_driver_finish in lastclose.
++ * (This may happen on X server restart).
++ */
++
++int ttm_bo_device_init(struct ttm_bo_device *bdev,
++ struct ttm_mem_global *mem_glob,
++ struct ttm_bo_driver *driver, uint64_t file_page_offset)
++{
++ int ret = -EINVAL;
++
++ bdev->dummy_read_page = NULL;
++ rwlock_init(&bdev->vm_lock);
++ spin_lock_init(&bdev->lru_lock);
++
++ bdev->driver = driver;
++ bdev->mem_glob = mem_glob;
++
++ memset(bdev->man, 0, sizeof(bdev->man));
++
++ bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
++ if (unlikely(bdev->dummy_read_page == NULL)) {
++ ret = -ENOMEM;
++ goto out_err0;
++ }
++
++ /*
++ * Initialize the system memory buffer type.
++ * Other types need to be driver / IOCTL initialized.
++ */
++ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
++ if (unlikely(ret != 0))
++ goto out_err1;
++
++ bdev->addr_space_rb = RB_ROOT;
++ ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
++ if (unlikely(ret != 0))
++ goto out_err2;
++
++ INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
++ bdev->nice_mode = true;
++ INIT_LIST_HEAD(&bdev->ddestroy);
++ INIT_LIST_HEAD(&bdev->swap_lru);
++ bdev->dev_mapping = NULL;
++ ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
++ ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
++ if (unlikely(ret != 0)) {
++ printk(KERN_ERR "Could not register buffer object swapout.\n");
++ goto out_err2;
++ }
++ return 0;
++out_err2:
++ ttm_bo_clean_mm(bdev, 0);
++out_err1:
++ __free_page(bdev->dummy_read_page);
++out_err0:
++ return ret;
++}
++
++/*
++ * buffer object vm functions.
++ */
++
++bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
++
++ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
++ if (mem->mem_type == TTM_PL_SYSTEM)
++ return false;
++
++ if (man->flags & TTM_MEMTYPE_FLAG_CMA)
++ return false;
++
++ if (mem->flags & TTM_PL_FLAG_CACHED)
++ return false;
++ }
++ return true;
++}
++
++int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
++ struct ttm_mem_reg *mem,
++ unsigned long *bus_base,
++ unsigned long *bus_offset, unsigned long *bus_size)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
++
++ *bus_size = 0;
++ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
++ return -EINVAL;
++
++ if (ttm_mem_reg_is_pci(bdev, mem)) {
++ *bus_offset = mem->mm_node->start << PAGE_SHIFT;
++ *bus_size = mem->num_pages << PAGE_SHIFT;
++ *bus_base = man->io_offset;
++ }
++
++ return 0;
++}
++
++/**
++ * \c Kill all user-space virtual mappings of this buffer object.
++ *
++ * \param bo The buffer object.
++ *
++ * Call bo->mutex locked.
++ */
++
++void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ loff_t offset = (loff_t) bo->addr_space_offset;
++ loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
++
++ if (!bdev->dev_mapping)
++ return;
++
++ unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
++}
++
++static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct rb_node **cur = &bdev->addr_space_rb.rb_node;
++ struct rb_node *parent = NULL;
++ struct ttm_buffer_object *cur_bo;
++ unsigned long offset = bo->vm_node->start;
++ unsigned long cur_offset;
++
++ while (*cur) {
++ parent = *cur;
++ cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
++ cur_offset = cur_bo->vm_node->start;
++ if (offset < cur_offset)
++ cur = &parent->rb_left;
++ else if (offset > cur_offset)
++ cur = &parent->rb_right;
++ else
++ BUG();
++ }
++
++ rb_link_node(&bo->vm_rb, parent, cur);
++ rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
++}
++
++/**
++ * ttm_bo_setup_vm:
++ *
++ * @bo: the buffer to allocate address space for
++ *
++ * Allocate address space in the drm device so that applications
++ * can mmap the buffer and access the contents. This only
++ * applies to ttm_bo_type_device objects as others are not
++ * placed in the drm device address space.
++ */
++
++static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ int ret;
++
++retry_pre_get:
++ ret = drm_mm_pre_get(&bdev->addr_space_mm);
++ if (unlikely(ret != 0))
++ return ret;
++
++ write_lock(&bdev->vm_lock);
++ bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
++ bo->mem.num_pages, 0, 0);
++
++ if (unlikely(bo->vm_node == NULL)) {
++ ret = -ENOMEM;
++ goto out_unlock;
++ }
++
++ bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
++ bo->mem.num_pages, 0);
++
++ if (unlikely(bo->vm_node == NULL)) {
++ write_unlock(&bdev->vm_lock);
++ goto retry_pre_get;
++ }
++
++ ttm_bo_vm_insert_rb(bo);
++ write_unlock(&bdev->vm_lock);
++ bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
++
++ return 0;
++out_unlock:
++ write_unlock(&bdev->vm_lock);
++ return ret;
++}
++
++int ttm_bo_wait(struct ttm_buffer_object *bo,
++ bool lazy, bool interruptible, bool no_wait)
++{
++ struct ttm_bo_driver *driver = bo->bdev->driver;
++ void *sync_obj;
++ void *sync_obj_arg;
++ int ret = 0;
++
++ while (bo->sync_obj) {
++ if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ goto out;
++ }
++ if (no_wait) {
++ ret = -EBUSY;
++ goto out;
++ }
++ sync_obj = driver->sync_obj_ref(bo->sync_obj);
++ sync_obj_arg = bo->sync_obj_arg;
++ mutex_unlock(&bo->mutex);
++ ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
++ lazy, interruptible);
++
++ mutex_lock(&bo->mutex);
++ if (unlikely(ret != 0)) {
++ driver->sync_obj_unref(&sync_obj);
++ return ret;
++ }
++
++ if (bo->sync_obj == sync_obj) {
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->priv_flags &= ~TTM_BO_PRIV_FLAG_MOVING;
++ }
++ driver->sync_obj_unref(&sync_obj);
++ }
++out:
++ return 0;
++}
++
++void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
++{
++ atomic_set(&bo->reserved, 0);
++ wake_up_all(&bo->event_queue);
++}
++
++int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
++ bool no_wait)
++{
++ int ret;
++
++ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
++ if (no_wait)
++ return -EBUSY;
++ else if (interruptible) {
++ ret = wait_event_interruptible
++ (bo->event_queue, atomic_read(&bo->reserved) == 0);
++ if (unlikely(ret != 0))
++ return -ERESTART;
++ } else {
++ wait_event(bo->event_queue,
++ atomic_read(&bo->reserved) == 0);
++ }
++ }
++ return 0;
++}
++
++int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
++{
++ int ret = 0;
++
++ /*
++ * Using ttm_bo_reserve instead of ttm_bo_block_reservation
++ * makes sure the lru lists are updated.
++ */
++
++ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
++ if (unlikely(ret != 0))
++ return ret;
++ mutex_lock(&bo->mutex);
++ ret = ttm_bo_wait(bo, false, true, no_wait);
++ if (unlikely(ret != 0))
++ goto out_err0;
++ atomic_inc(&bo->cpu_writers);
++out_err0:
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ return ret;
++}
++
++void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
++{
++ if (atomic_dec_and_test(&bo->cpu_writers))
++ wake_up_all(&bo->event_queue);
++}
++
++/**
++ * A buffer object shrink method that tries to swap out the first
++ * buffer object on the bo_global::swap_lru list.
++ */
++
++static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
++{
++ struct ttm_bo_device *bdev =
++ container_of(shrink, struct ttm_bo_device, shrink);
++ struct ttm_buffer_object *bo;
++ int ret = -EBUSY;
++ int put_count;
++ uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
++
++ spin_lock(&bdev->lru_lock);
++ while (ret == -EBUSY) {
++ if (unlikely(list_empty(&bdev->swap_lru))) {
++ spin_unlock(&bdev->lru_lock);
++ return -EBUSY;
++ }
++
++ bo = list_first_entry(&bdev->swap_lru,
++ struct ttm_buffer_object, swap);
++ kref_get(&bo->list_kref);
++
++ /**
++ * Reserve buffer. Since we unlock while sleeping, we need
++ * to re-check that nobody removed us from the swap-list while
++ * we slept.
++ */
++
++ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
++ if (unlikely(ret == -EBUSY)) {
++ spin_unlock(&bdev->lru_lock);
++ ttm_bo_wait_unreserved(bo, false);
++ kref_put(&bo->list_kref, ttm_bo_release_list);
++ spin_lock(&bdev->lru_lock);
++ }
++ }
++
++ BUG_ON(ret != 0);
++ put_count = ttm_bo_del_from_lru(bo);
++ spin_unlock(&bdev->lru_lock);
++
++ while (put_count--)
++ kref_put(&bo->list_kref, ttm_bo_ref_bug);
++
++ /**
++ * Wait for GPU, then move to system cached.
++ */
++
++ mutex_lock(&bo->mutex);
++ ret = ttm_bo_wait(bo, false, false, false);
++ if (unlikely(ret != 0))
++ goto out;
++
++ if ((bo->mem.flags & swap_placement) != swap_placement) {
++ struct ttm_mem_reg evict_mem;
++
++ evict_mem = bo->mem;
++ evict_mem.mm_node = NULL;
++ evict_mem.proposed_flags =
++ TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
++ evict_mem.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
++ evict_mem.mem_type = TTM_PL_SYSTEM;
++
++ ret = ttm_bo_handle_move_mem(bo,
++ &evict_mem,
++ true,
++ false,
++ false);
++ if (unlikely(ret != 0))
++ goto out;
++ }
++
++ ttm_bo_unmap_virtual(bo);
++
++ /**
++ * Swap out. Buffer will be swapped in again as soon as
++ * anyone tries to access a ttm page.
++ */
++
++ ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
++out:
++ mutex_unlock(&bo->mutex);
++
++ /**
++ *
++ * Unreserve without putting on LRU to avoid swapping out an
++ * already swapped buffer.
++ */
++
++ atomic_set(&bo->reserved, 0);
++ wake_up_all(&bo->event_queue);
++ kref_put(&bo->list_kref, ttm_bo_release_list);
++ return ret;
++}
++
++void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
++{
++ while (ttm_bo_swapout(&bdev->shrink) == 0) {
++ /* Checkpatch doesn't like it */
++ /* adding something here */
++ }
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_api.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_api.h
+new file mode 100644
+index 0000000..e336893
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_api.h
+@@ -0,0 +1,573 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_BO_API_H_
++#define _TTM_BO_API_H_
++
++#include <drm/drm_hashtab.h>
++#include <linux/kref.h>
++#include <linux/list.h>
++#include <linux/wait.h>
++#include <linux/mutex.h>
++#include <linux/mm.h>
++#include <linux/rbtree.h>
++
++struct ttm_bo_device;
++
++struct drm_mm_node;
++
++/**
++ * struct ttm_mem_reg
++ *
++ * @mm_node: Memory manager node.
++ * @size: Requested size of memory region.
++ * @num_pages: Actual size of memory region in pages.
++ * @page_alignment: Page alignment.
++ * @flags: Placement flags.
++ * @proposed_flags: Proposed placement flags.
++ *
++ * Structure indicating the placement and space resources used by a
++ * buffer object.
++ */
++
++struct ttm_mem_reg {
++ struct drm_mm_node *mm_node;
++ unsigned long size;
++ unsigned long num_pages;
++ uint32_t page_alignment;
++ uint32_t mem_type;
++ uint32_t flags;
++ uint32_t proposed_flags;
++};
++
++/**
++ * enum ttm_bo_type
++ *
++ * @ttm_bo_type_device: These are 'normal' buffers that can
++ * be mmapped by user space. Each of these bos occupy a slot in the
++ * device address space, that can be used for normal vm operations.
++ *
++ * @ttm_bo_type_user: These are user-space memory areas that are made
++ * available to the GPU by mapping the buffer pages into the GPU aperture
++ * space. These buffers cannot be mmaped from the device address space.
++ *
++ * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
++ * but they cannot be accessed from user-space. For kernel-only use.
++ */
++
++enum ttm_bo_type {
++ ttm_bo_type_device,
++ ttm_bo_type_user,
++ ttm_bo_type_kernel
++};
++
++struct ttm_tt;
++
++/**
++ * struct ttm_buffer_object
++ *
++ * @bdev: Pointer to the buffer object device structure.
++ * @kref: Reference count of this buffer object. When this refcount reaches
++ * zero, the object is put on the delayed delete list.
++ * @list_kref: List reference count of this buffer object. This member is
++ * used to avoid destruction while the buffer object is still on a list.
++ * Lru lists may keep one refcount, the delayed delete list, and kref != 0
++ * keeps one refcount. When this refcount reaches zero,
++ * the object is destroyed.
++ * @proposed_flags: Proposed placement for the buffer. Changed only by the
++ * creator prior to validation as opposed to bo->mem.proposed_flags which is
++ * changed by the implementation prior to a buffer move if it wants to outsmart
++ * the buffer creator / user. This latter happens, for example, at eviction.
++ * @buffer_start: The virtual user-space start address of ttm_bo_type_user
++ * buffers.
++ * @type: The bo type.
++ * @offset: The current GPU offset, which can have different meanings
++ * depending on the memory type. For SYSTEM type memory, it should be 0.
++ * @mem: structure describing current placement.
++ * @val_seq: Sequence of the validation holding the @reserved lock.
++ * Used to avoid starvation when many processes compete to validate the
++ * buffer. This member is protected by the bo_device::lru_lock.
++ * @seq_valid: The value of @val_seq is valid. This value is protected by
++ * the bo_device::lru_lock.
++ * @lru: List head for the lru list.
++ * @ddestroy: List head for the delayed destroy list.
++ * @swap: List head for swap LRU list.
++ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
++ * pinned in physical memory. If this behaviour is not desired, this member
++ * holds a pointer to a persistant shmem object.
++ * @destroy: Destruction function. If NULL, kfree is used.
++ * @sync_obj_arg: Opaque argument to synchronization object function.
++ * @sync_obj: Pointer to a synchronization object.
++ * @priv_flags: Flags describing buffer object internal state.
++ * @event_queue: Queue for processes waiting on buffer object status change.
++ * @mutex: Lock protecting all members with the exception of constant members
++ * and list heads. We should really use a spinlock here.
++ * @num_pages: Actual number of pages.
++ * @ttm: TTM structure holding system pages.
++ * @vm_hash: Hash item for fast address space lookup. Need to change to a
++ * rb-tree node.
++ * @vm_node: Address space manager node.
++ * @addr_space_offset: Address space offset.
++ * @cpu_writes: For synchronization. Number of cpu writers.
++ * @reserved: Deadlock-free lock used for synchronization state transitions.
++ * @acc_size: Accounted size for this object.
++ *
++ * Base class for TTM buffer object, that deals with data placement and CPU
++ * mappings. GPU mappings are really up to the driver, but for simpler GPUs
++ * the driver can usually use the placement offset @offset directly as the
++ * GPU virtual address. For drivers implementing multiple
++ * GPU memory manager contexts, the driver should manage the address space
++ * in these contexts separately and use these objects to get the correct
++ * placement and caching for these GPU maps. This makes it possible to use
++ * these objects for even quite elaborate memory management schemes.
++ * The destroy member, the API visibility of this object makes it possible
++ * to derive driver specific types.
++ */
++
++struct ttm_buffer_object {
++ struct ttm_bo_device *bdev;
++ struct kref kref;
++ struct kref list_kref;
++
++ /*
++ * If there is a possibility that the usage variable is zero,
++ * then dev->struct_mutex should be locked before incrementing it.
++ */
++
++ uint32_t proposed_flags;
++ unsigned long buffer_start;
++ enum ttm_bo_type type;
++ unsigned long offset;
++ struct ttm_mem_reg mem;
++ uint32_t val_seq;
++ bool seq_valid;
++
++ struct list_head lru;
++ struct list_head ddestroy;
++ struct list_head swap;
++
++ struct file *persistant_swap_storage;
++
++ void (*destroy) (struct ttm_buffer_object *);
++
++ void *sync_obj_arg;
++ void *sync_obj;
++
++ uint32_t priv_flags;
++ wait_queue_head_t event_queue;
++ struct mutex mutex;
++ unsigned long num_pages;
++
++ struct ttm_tt *ttm;
++ struct rb_node vm_rb;
++ struct drm_mm_node *vm_node;
++ uint64_t addr_space_offset;
++
++ atomic_t cpu_writers;
++ atomic_t reserved;
++
++ size_t acc_size;
++};
++
++/**
++ * struct ttm_bo_kmap_obj
++ *
++ * @virtual: The current kernel virtual address.
++ * @page: The page when kmap'ing a single page.
++ * @bo_kmap_type: Type of bo_kmap.
++ *
++ * Object describing a kernel mapping. Since a TTM bo may be located
++ * in various memory types with various caching policies, the
++ * mapping can either be an ioremap, a vmap, a kmap or part of a
++ * premapped region.
++ */
++
++struct ttm_bo_kmap_obj {
++ void *virtual;
++ struct page *page;
++ enum {
++ ttm_bo_map_iomap,
++ ttm_bo_map_vmap,
++ ttm_bo_map_kmap,
++ ttm_bo_map_premapped,
++ } bo_kmap_type;
++};
++
++/**
++ * ttm_bo_reference - reference a struct ttm_buffer_object
++ *
++ * @bo: The buffer object.
++ *
++ * Returns a refcounted pointer to a buffer object.
++ */
++
++static inline struct ttm_buffer_object *ttm_bo_reference(
++ struct ttm_buffer_object *bo)
++{
++ kref_get(&bo->kref);
++ return bo;
++}
++
++/**
++ * ttm_bo_wait - wait for buffer idle.
++ *
++ * @bo: The buffer object.
++ * @interruptible: Use interruptible wait.
++ * @no_wait: Return immediately if buffer is busy.
++ *
++ * This function must be called with the bo::mutex held, and makes
++ * sure any previous rendering to the buffer is completed.
++ * Note: It might be necessary to block validations before the
++ * wait by reserving the buffer.
++ * Returns -EBUSY if no_wait is true and the buffer is busy.
++ * Returns -ERESTART if interrupted by a signal.
++ */
++extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
++ bool interruptible, bool no_wait);
++/**
++ * ttm_buffer_object_validate
++ *
++ * @bo: The buffer object.
++ * @interruptible: Sleep interruptible if sleeping.
++ * @no_wait: Return immediately if the buffer is busy.
++ *
++ * Changes placement and caching policy of the buffer object
++ * according to bo::proposed_flags.
++ * Returns
++ * -EINVAL on invalid proposed_flags.
++ * -ENOMEM on out-of-memory condition.
++ * -EBUSY if no_wait is true and buffer busy.
++ * -ERESTART if interrupted by a signal.
++ */
++extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
++ bool interruptible, bool no_wait);
++/**
++ * ttm_bo_unref
++ *
++ * @bo: The buffer object.
++ *
++ * Unreference and clear a pointer to a buffer object.
++ */
++extern void ttm_bo_unref(struct ttm_buffer_object **bo);
++
++/**
++ * ttm_bo_synccpu_write_grab
++ *
++ * @bo: The buffer object:
++ * @no_wait: Return immediately if buffer is busy.
++ *
++ * Synchronizes a buffer object for CPU RW access. This means
++ * blocking command submission that affects the buffer and
++ * waiting for buffer idle. This lock is recursive.
++ * Returns
++ * -EBUSY if the buffer is busy and no_wait is true.
++ * -ERESTART if interrupted by a signal.
++ */
++
++extern int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo,
++ bool no_wait);
++/**
++ * ttm_bo_synccpu_write_release:
++ *
++ * @bo : The buffer object.
++ *
++ * Releases a synccpu lock.
++ */
++extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
++
++/**
++ * ttm_buffer_object_init
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @bo: Pointer to a ttm_buffer_object to be initialized.
++ * @size: Requested size of buffer object.
++ * @type: Requested type of buffer object.
++ * @flags: Initial placement flags.
++ * @page_alignment: Data alignment in pages.
++ * @buffer_start: Virtual address of user space data backing a
++ * user buffer object.
++ * @interruptible: If needing to sleep to wait for GPU resources,
++ * sleep interruptible.
++ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
++ * pinned in physical memory. If this behaviour is not desired, this member
++ * holds a pointer to a persistant shmem object. Typically, this would
++ * point to the shmem object backing a GEM object if TTM is used to back a
++ * GEM user interface.
++ * @acc_size: Accounted size for this object.
++ * @destroy: Destroy function. Use NULL for kfree().
++ *
++ * This function initializes a pre-allocated struct ttm_buffer_object.
++ * As this object may be part of a larger structure, this function,
++ * together with the @destroy function,
++ * enables driver-specific objects derived from a ttm_buffer_object.
++ * On successful return, the object kref and list_kref are set to 1.
++ * Returns
++ * -ENOMEM: Out of memory.
++ * -EINVAL: Invalid placement flags.
++ * -ERESTART: Interrupted by signal while sleeping waiting for resources.
++ */
++
++extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
++ struct ttm_buffer_object *bo,
++ unsigned long size,
++ enum ttm_bo_type type,
++ uint32_t flags,
++ uint32_t page_alignment,
++ unsigned long buffer_start,
++ bool interrubtible,
++ struct file *persistant_swap_storage,
++ size_t acc_size,
++ void (*destroy) (struct ttm_buffer_object *));
++/**
++ * ttm_bo_synccpu_object_init
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @bo: Pointer to a ttm_buffer_object to be initialized.
++ * @size: Requested size of buffer object.
++ * @type: Requested type of buffer object.
++ * @flags: Initial placement flags.
++ * @page_alignment: Data alignment in pages.
++ * @buffer_start: Virtual address of user space data backing a
++ * user buffer object.
++ * @interruptible: If needing to sleep while waiting for GPU resources,
++ * sleep interruptible.
++ * @persistant_swap_storage: Usually the swap storage is deleted for buffers
++ * pinned in physical memory. If this behaviour is not desired, this member
++ * holds a pointer to a persistant shmem object. Typically, this would
++ * point to the shmem object backing a GEM object if TTM is used to back a
++ * GEM user interface.
++ * @p_bo: On successful completion *p_bo points to the created object.
++ *
++ * This function allocates a ttm_buffer_object, and then calls
++ * ttm_buffer_object_init on that object.
++ * The destroy function is set to kfree().
++ * Returns
++ * -ENOMEM: Out of memory.
++ * -EINVAL: Invalid placement flags.
++ * -ERESTART: Interrupted by signal while waiting for resources.
++ */
++
++extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
++ unsigned long size,
++ enum ttm_bo_type type,
++ uint32_t flags,
++ uint32_t page_alignment,
++ unsigned long buffer_start,
++ bool interruptible,
++ struct file *persistant_swap_storage,
++ struct ttm_buffer_object **p_bo);
++
++/**
++ * ttm_bo_check_placement
++ *
++ * @bo: the buffer object.
++ * @set_flags: placement flags to set.
++ * @clr_flags: placement flags to clear.
++ *
++ * Performs minimal validity checking on an intended change of
++ * placement flags.
++ * Returns
++ * -EINVAL: Intended change is invalid or not allowed.
++ */
++
++extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
++ uint32_t set_flags, uint32_t clr_flags);
++
++/**
++ * ttm_bo_init_mm
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @mem_type: The memory type.
++ * @p_offset: offset for managed area in pages.
++ * @p_size: size managed area in pages.
++ *
++ * Initialize a manager for a given memory type.
++ * Note: if part of driver firstopen, it must be protected from a
++ * potentially racing lastclose.
++ * Returns:
++ * -EINVAL: invalid size or memory type.
++ * -ENOMEM: Not enough memory.
++ * May also return driver-specified errors.
++ */
++
++extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
++ unsigned long p_offset, unsigned long p_size);
++/**
++ * ttm_bo_clean_mm
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @mem_type: The memory type.
++ *
++ * Take down a manager for a given memory type after first walking
++ * the LRU list to evict any buffers left alive.
++ *
++ * Normally, this function is part of lastclose() or unload(), and at that
++ * point there shouldn't be any buffers left created by user-space, since
++ * there should've been removed by the file descriptor release() method.
++ * However, before this function is run, make sure to signal all sync objects,
++ * and verify that the delayed delete queue is empty. The driver must also
++ * make sure that there are no NO_EVICT buffers present in this memory type
++ * when the call is made.
++ *
++ * If this function is part of a VT switch, the caller must make sure that
++ * there are no appications currently validating buffers before this
++ * function is called. The caller can do that by first taking the
++ * struct ttm_bo_device::ttm_lock in write mode.
++ *
++ * Returns:
++ * -EINVAL: invalid or uninitialized memory type.
++ * -EBUSY: There are still buffers left in this memory type.
++ */
++
++extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
++
++/**
++ * ttm_bo_evict_mm
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @mem_type: The memory type.
++ *
++ * Evicts all buffers on the lru list of the memory type.
++ * This is normally part of a VT switch or an
++ * out-of-memory-space-due-to-fragmentation handler.
++ * The caller must make sure that there are no other processes
++ * currently validating buffers, and can do that by taking the
++ * struct ttm_bo_device::ttm_lock in write mode.
++ *
++ * Returns:
++ * -EINVAL: Invalid or uninitialized memory type.
++ * -ERESTART: The call was interrupted by a signal while waiting to
++ * evict a buffer.
++ */
++
++extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
++
++/**
++ * ttm_kmap_obj_virtual
++ *
++ * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
++ * @is_iomem: Pointer to an integer that on return indicates 1 if the
++ * virtual map is io memory, 0 if normal memory.
++ *
++ * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
++ * If *is_iomem is 1 on return, the virtual address points to an io memory area,
++ * that should strictly be accessed by the iowriteXX() and similar functions.
++ */
++
++static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
++ bool *is_iomem)
++{
++ *is_iomem = (map->bo_kmap_type == ttm_bo_map_iomap ||
++ map->bo_kmap_type == ttm_bo_map_premapped);
++ return map->virtual;
++}
++
++/**
++ * ttm_bo_kmap
++ *
++ * @bo: The buffer object.
++ * @start_page: The first page to map.
++ * @num_pages: Number of pages to map.
++ * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
++ *
++ * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
++ * data in the buffer object. The ttm_kmap_obj_virtual function can then be
++ * used to obtain a virtual address to the data.
++ *
++ * Returns
++ * -ENOMEM: Out of memory.
++ * -EINVAL: Invalid range.
++ */
++
++extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
++ unsigned long num_pages, struct ttm_bo_kmap_obj *map);
++
++/**
++ * ttm_bo_kunmap
++ *
++ * @map: Object describing the map to unmap.
++ *
++ * Unmaps a kernel map set up by ttm_bo_kmap.
++ */
++
++extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
++
++#if 0
++#endif
++
++/**
++ * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
++ *
++ * @vma: vma as input from the fbdev mmap method.
++ * @bo: The bo backing the address space. The address space will
++ * have the same size as the bo, and start at offset 0.
++ *
++ * This function is intended to be called by the fbdev mmap method
++ * if the fbdev address space is to be backed by a bo.
++ */
++
++extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
++ struct ttm_buffer_object *bo);
++
++/**
++ * ttm_bo_mmap - mmap out of the ttm device address space.
++ *
++ * @filp: filp as input from the mmap method.
++ * @vma: vma as input from the mmap method.
++ * @bdev: Pointer to the ttm_bo_device with the address space manager.
++ *
++ * This function is intended to be called by the device mmap method.
++ * if the device address space is to be backed by the bo manager.
++ */
++
++extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
++ struct ttm_bo_device *bdev);
++
++/**
++ * ttm_bo_io
++ *
++ * @bdev: Pointer to the struct ttm_bo_device.
++ * @filp: Pointer to the struct file attempting to read / write.
++ * @wbuf: User-space pointer to address of buffer to write. NULL on read.
++ * @rbuf: User-space pointer to address of buffer to read into.
++ * Null on write.
++ * @count: Number of bytes to read / write.
++ * @f_pos: Pointer to current file position.
++ * @write: 1 for read, 0 for write.
++ *
++ * This function implements read / write into ttm buffer objects, and is
++ * intended to be called from the fops::read and fops::write method.
++ * Returns:
++ * See man (2) write, man(2) read. In particular, the function may
++ * return -EINTR if interrupted by a signal.
++ */
++
++extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
++ const char __user *wbuf, char __user *rbuf,
++ size_t count, loff_t *f_pos, bool write);
++
++extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_driver.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_driver.h
+new file mode 100644
+index 0000000..4991256
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_driver.h
+@@ -0,0 +1,862 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 Vmware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++#ifndef _TTM_BO_DRIVER_H_
++#define _TTM_BO_DRIVER_H_
++
++#include "ttm_bo_api.h"
++#include "ttm_memory.h"
++#include <drm/drm_mm.h>
++#include "linux/workqueue.h"
++#include "linux/fs.h"
++#include "linux/spinlock.h"
++
++struct ttm_backend;
++
++struct ttm_backend_func {
++ /**
++ * struct ttm_backend_func member populate
++ *
++ * @backend: Pointer to a struct ttm_backend.
++ * @num_pages: Number of pages to populate.
++ * @pages: Array of pointers to ttm pages.
++ * @dummy_read_page: Page to be used instead of NULL pages in the
++ * array @pages.
++ *
++ * Populate the backend with ttm pages. Depending on the backend,
++ * it may or may not copy the @pages array.
++ */
++ int (*populate) (struct ttm_backend *backend,
++ unsigned long num_pages, struct page **pages,
++ struct page *dummy_read_page);
++ /**
++ * struct ttm_backend_func member clear
++ *
++ * @backend: Pointer to a struct ttm_backend.
++ *
++ * This is an "unpopulate" function. Release all resources
++ * allocated with populate.
++ */
++ void (*clear) (struct ttm_backend *backend);
++
++ /**
++ * struct ttm_backend_func member bind
++ *
++ * @backend: Pointer to a struct ttm_backend.
++ * @bo_mem: Pointer to a struct ttm_mem_reg describing the
++ * memory type and location for binding.
++ *
++ * Bind the backend pages into the aperture in the location
++ * indicated by @bo_mem. This function should be able to handle
++ * differences between aperture- and system page sizes.
++ */
++ int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem);
++
++ /**
++ * struct ttm_backend_func member unbind
++ *
++ * @backend: Pointer to a struct ttm_backend.
++ *
++ * Unbind previously bound backend pages. This function should be
++ * able to handle differences between aperture- and system page sizes.
++ */
++ int (*unbind) (struct ttm_backend *backend);
++
++ /**
++ * struct ttm_backend_func member destroy
++ *
++ * @backend: Pointer to a struct ttm_backend.
++ *
++ * Destroy the backend.
++ */
++ void (*destroy) (struct ttm_backend *backend);
++};
++
++/**
++ * struct ttm_backend
++ *
++ * @bdev: Pointer to a struct ttm_bo_device.
++ * @flags: For driver use.
++ * @func: Pointer to a struct ttm_backend_func that describes
++ * the backend methods.
++ *
++ */
++
++struct ttm_backend {
++ struct ttm_bo_device *bdev;
++ uint32_t flags;
++ struct ttm_backend_func *func;
++};
++
++#define TTM_PAGE_FLAG_VMALLOC (1 << 0)
++#define TTM_PAGE_FLAG_USER (1 << 1)
++#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
++#define TTM_PAGE_FLAG_WRITE (1 << 3)
++#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
++#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5)
++
++enum ttm_caching_state {
++ tt_uncached,
++ tt_wc,
++ tt_cached
++};
++
++/**
++ * struct ttm_tt
++ *
++ * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
++ * pointer.
++ * @pages: Array of pages backing the data.
++ * @first_himem_page: Himem pages are put last in the page array, which
++ * enables us to run caching attribute changes on only the first part
++ * of the page array containing lomem pages. This is the index of the
++ * first himem page.
++ * @last_lomem_page: Index of the last lomem page in the page array.
++ * @num_pages: Number of pages in the page array.
++ * @bdev: Pointer to the current struct ttm_bo_device.
++ * @be: Pointer to the ttm backend.
++ * @tsk: The task for user ttm.
++ * @start: virtual address for user ttm.
++ * @swap_storage: Pointer to shmem struct file for swap storage.
++ * @caching_state: The current caching state of the pages.
++ * @state: The current binding state of the pages.
++ *
++ * This is a structure holding the pages, caching- and aperture binding
++ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
++ * memory.
++ */
++
++struct ttm_tt {
++ struct page *dummy_read_page;
++ struct page **pages;
++ long first_himem_page;
++ long last_lomem_page;
++ uint32_t page_flags;
++ unsigned long num_pages;
++ struct ttm_bo_device *bdev;
++ struct ttm_backend *be;
++ struct task_struct *tsk;
++ unsigned long start;
++ struct file *swap_storage;
++ enum ttm_caching_state caching_state;
++ enum {
++ tt_bound,
++ tt_unbound,
++ tt_unpopulated,
++ } state;
++};
++
++#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
++#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
++#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap
++ before kernel access. */
++#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
++
++/**
++ * struct ttm_mem_type_manager
++ *
++ * @has_type: The memory type has been initialized.
++ * @use_type: The memory type is enabled.
++ * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
++ * managed by this memory type.
++ * @gpu_offset: If used, the GPU offset of the first managed page of
++ * fixed memory or the first managed location in an aperture.
++ * @io_offset: The io_offset of the first managed page of IO memory or
++ * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
++ * memory, this should be set to NULL.
++ * @io_size: The size of a managed IO region (fixed memory or aperture).
++ * @io_addr: Virtual kernel address if the io region is pre-mapped. For
++ * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
++ * @io_addr should be set to NULL.
++ * @size: Size of the managed region.
++ * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
++ * as defined in ttm_placement_common.h
++ * @default_caching: The default caching policy used for a buffer object
++ * placed in this memory type if the user doesn't provide one.
++ * @manager: The range manager used for this memory type. FIXME: If the aperture
++ * has a page size different from the underlying system, the granularity
++ * of this manager should take care of this. But the range allocating code
++ * in ttm_bo.c needs to be modified for this.
++ * @lru: The lru list for this memory type.
++ *
++ * This structure is used to identify and manage memory types for a device.
++ * It's set up by the ttm_bo_driver::init_mem_type method.
++ */
++
++struct ttm_mem_type_manager {
++
++ /*
++ * No protection. Constant from start.
++ */
++
++ bool has_type;
++ bool use_type;
++ uint32_t flags;
++ unsigned long gpu_offset;
++ unsigned long io_offset;
++ unsigned long io_size;
++ void *io_addr;
++ uint64_t size;
++ uint32_t available_caching;
++ uint32_t default_caching;
++
++ /*
++ * Protected by the bdev->lru_lock.
++ * TODO: Consider one lru_lock per ttm_mem_type_manager.
++ * Plays ill with list removal, though.
++ */
++
++ struct drm_mm manager;
++ struct list_head lru;
++};
++
++/**
++ * struct ttm_bo_driver
++ *
++ * @mem_type_prio: Priority array of memory types to place a buffer object in
++ * if it fits without evicting buffers from any of these memory types.
++ * @mem_busy_prio: Priority array of memory types to place a buffer object in
++ * if it needs to evict buffers to make room.
++ * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
++ * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
++ * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
++ * @invalidate_caches: Callback to invalidate read caches when a buffer object
++ * has been evicted.
++ * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
++ * structure.
++ * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
++ * @move: Callback for a driver to hook in accelerated functions to move
++ * a buffer.
++ * If set to NULL, a potentially slow memcpy() move is used.
++ * @sync_obj_signaled: See ttm_fence_api.h
++ * @sync_obj_wait: See ttm_fence_api.h
++ * @sync_obj_flush: See ttm_fence_api.h
++ * @sync_obj_unref: See ttm_fence_api.h
++ * @sync_obj_ref: See ttm_fence_api.h
++ */
++
++struct ttm_bo_driver {
++ const uint32_t *mem_type_prio;
++ const uint32_t *mem_busy_prio;
++ uint32_t num_mem_type_prio;
++ uint32_t num_mem_busy_prio;
++
++ /**
++ * struct ttm_bo_driver member create_ttm_backend_entry
++ *
++ * @bdev: The buffer object device.
++ *
++ * Create a driver specific struct ttm_backend.
++ */
++
++ struct ttm_backend *(*create_ttm_backend_entry)
++ (struct ttm_bo_device *bdev);
++
++ /**
++ * struct ttm_bo_driver member invalidate_caches
++ *
++ * @bdev: the buffer object device.
++ * @flags: new placement of the rebound buffer object.
++ *
++ * A previosly evicted buffer has been rebound in a
++ * potentially new location. Tell the driver that it might
++ * consider invalidating read (texture) caches on the next command
++ * submission as a consequence.
++ */
++
++ int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
++ int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
++ struct ttm_mem_type_manager *man);
++ /**
++ * struct ttm_bo_driver member evict_flags:
++ *
++ * @bo: the buffer object to be evicted
++ *
++ * Return the bo flags for a buffer which is not mapped to the hardware.
++ * These will be placed in proposed_flags so that when the move is
++ * finished, they'll end up in bo->mem.flags
++ */
++
++ uint32_t(*evict_flags) (struct ttm_buffer_object *bo);
++ /**
++ * struct ttm_bo_driver member move:
++ *
++ * @bo: the buffer to move
++ * @evict: whether this motion is evicting the buffer from
++ * the graphics address space
++ * @interruptible: Use interruptible sleeps if possible when sleeping.
++ * @no_wait: whether this should give up and return -EBUSY
++ * if this move would require sleeping
++ * @new_mem: the new memory region receiving the buffer
++ *
++ * Move a buffer between two memory regions.
++ */
++ int (*move) (struct ttm_buffer_object *bo,
++ bool evict, bool interruptible,
++ bool no_wait, struct ttm_mem_reg *new_mem);
++
++ /**
++ * struct ttm_bo_driver_member verify_access
++ *
++ * @bo: Pointer to a buffer object.
++ * @filp: Pointer to a struct file trying to access the object.
++ *
++ * Called from the map / write / read methods to verify that the
++ * caller is permitted to access the buffer object.
++ * This member may be set to NULL, which will refuse this kind of
++ * access for all buffer objects.
++ * This function should return 0 if access is granted, -EPERM otherwise.
++ */
++ int (*verify_access) (struct ttm_buffer_object *bo,
++ struct file *filp);
++
++ /**
++ * In case a driver writer dislikes the TTM fence objects,
++ * the driver writer can replace those with sync objects of
++ * his / her own. If it turns out that no driver writer is
++ * using these. I suggest we remove these hooks and plug in
++ * fences directly. The bo driver needs the following functionality:
++ * See the corresponding functions in the fence object API
++ * documentation.
++ */
++
++ bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
++ int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
++ bool lazy, bool interruptible);
++ int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
++ void (*sync_obj_unref) (void **sync_obj);
++ void *(*sync_obj_ref) (void *sync_obj);
++};
++
++#define TTM_NUM_MEM_TYPES 11
++
++#define TTM_BO_PRIV_FLAG_EVICTED (1 << 0) /* Buffer object is evicted. */
++#define TTM_BO_PRIV_FLAG_MOVING (1 << 1) /* Buffer object is moving
++ and needs idling before
++ CPU mapping */
++/**
++ * struct ttm_bo_device - Buffer object driver device-specific data.
++ *
++ * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
++ * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
++ * @count: Current number of buffer object.
++ * @pages: Current number of pinned pages.
++ * @dummy_read_page: Pointer to a dummy page used for mapping requests
++ * of unpopulated pages.
++ * @shrink: A shrink callback object used for buffre object swap.
++ * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
++ * used by a buffer object. This is excluding page arrays and backing pages.
++ * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
++ * @man: An array of mem_type_managers.
++ * @addr_space_mm: Range manager for the device address space.
++ * lru_lock: Spinlock that protects the buffer+device lru lists and
++ * ddestroy lists.
++ * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
++ * If a GPU lockup has been detected, this is forced to 0.
++ * @dev_mapping: A pointer to the struct address_space representing the
++ * device address space.
++ * @wq: Work queue structure for the delayed delete workqueue.
++ *
++ */
++
++struct ttm_bo_device {
++
++ /*
++ * Constant after bo device init / atomic.
++ */
++
++ struct ttm_mem_global *mem_glob;
++ struct ttm_bo_driver *driver;
++ struct page *dummy_read_page;
++ struct ttm_mem_shrink shrink;
++
++ size_t ttm_bo_extra_size;
++ size_t ttm_bo_size;
++
++ rwlock_t vm_lock;
++ /*
++ * Protected by the vm lock.
++ */
++ struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
++ struct rb_root addr_space_rb;
++ struct drm_mm addr_space_mm;
++
++ /*
++ * Might want to change this to one lock per manager.
++ */
++ spinlock_t lru_lock;
++ /*
++ * Protected by the lru lock.
++ */
++ struct list_head ddestroy;
++ struct list_head swap_lru;
++
++ /*
++ * Protected by load / firstopen / lastclose /unload sync.
++ */
++
++ bool nice_mode;
++ struct address_space *dev_mapping;
++
++ /*
++ * Internal protection.
++ */
++
++ struct delayed_work wq;
++};
++
++/**
++ * ttm_flag_masked
++ *
++ * @old: Pointer to the result and original value.
++ * @new: New value of bits.
++ * @mask: Mask of bits to change.
++ *
++ * Convenience function to change a number of bits identified by a mask.
++ */
++
++static inline uint32_t
++ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
++{
++ *old ^= (*old ^ new) & mask;
++ return *old;
++}
++
++/**
++ * ttm_tt_create
++ *
++ * @bdev: pointer to a struct ttm_bo_device:
++ * @size: Size of the data needed backing.
++ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
++ * @dummy_read_page: See struct ttm_bo_device.
++ *
++ * Create a struct ttm_tt to back data with system memory pages.
++ * No pages are actually allocated.
++ * Returns:
++ * NULL: Out of memory.
++ */
++extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
++ unsigned long size,
++ uint32_t page_flags,
++ struct page *dummy_read_page);
++
++/**
++ * ttm_tt_set_user:
++ *
++ * @ttm: The struct ttm_tt to populate.
++ * @tsk: A struct task_struct for which @start is a valid user-space address.
++ * @start: A valid user-space address.
++ * @num_pages: Size in pages of the user memory area.
++ *
++ * Populate a struct ttm_tt with a user-space memory area after first pinning
++ * the pages backing it.
++ * Returns:
++ * !0: Error.
++ */
++
++extern int ttm_tt_set_user(struct ttm_tt *ttm,
++ struct task_struct *tsk,
++ unsigned long start, unsigned long num_pages);
++
++/**
++ * ttm_ttm_bind:
++ *
++ * @ttm: The struct ttm_tt containing backing pages.
++ * @bo_mem: The struct ttm_mem_reg identifying the binding location.
++ *
++ * Bind the pages of @ttm to an aperture location identified by @bo_mem
++ */
++extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
++
++/**
++ * ttm_ttm_destroy:
++ *
++ * @ttm: The struct ttm_tt.
++ *
++ * Unbind, unpopulate and destroy a struct ttm_tt.
++ */
++extern void ttm_tt_destroy(struct ttm_tt *ttm);
++
++/**
++ * ttm_ttm_unbind:
++ *
++ * @ttm: The struct ttm_tt.
++ *
++ * Unbind a struct ttm_tt.
++ */
++extern void ttm_tt_unbind(struct ttm_tt *ttm);
++
++/**
++ * ttm_ttm_destroy:
++ *
++ * @ttm: The struct ttm_tt.
++ * @index: Index of the desired page.
++ *
++ * Return a pointer to the struct page backing @ttm at page
++ * index @index. If the page is unpopulated, one will be allocated to
++ * populate that index.
++ *
++ * Returns:
++ * NULL on OOM.
++ */
++extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
++
++/**
++ * ttm_tt_cache_flush:
++ *
++ * @pages: An array of pointers to struct page:s to flush.
++ * @num_pages: Number of pages to flush.
++ *
++ * Flush the data of the indicated pages from the cpu caches.
++ * This is used when changing caching attributes of the pages from
++ * cache-coherent.
++ */
++extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
++
++/**
++ * ttm_tt_set_placement_caching:
++ *
++ * @ttm A struct ttm_tt the backing pages of which will change caching policy.
++ * @placement: Flag indicating the desired caching policy.
++ *
++ * This function will change caching policy of any default kernel mappings of
++ * the pages backing @ttm. If changing from cached to uncached or
++ * write-combined, all CPU caches will first be flushed to make sure the
++ * data of the pages hit RAM. This function may be very costly as it involves
++ * global TLB and cache flushes and potential page splitting / combining.
++ */
++extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm,
++ uint32_t placement);
++extern int ttm_tt_swapout(struct ttm_tt *ttm,
++ struct file *persistant_swap_storage);
++
++/*
++ * ttm_bo.c
++ */
++
++/**
++ * ttm_mem_reg_is_pci
++ *
++ * @bdev: Pointer to a struct ttm_bo_device.
++ * @mem: A valid struct ttm_mem_reg.
++ *
++ * Returns true if the memory described by @mem is PCI memory,
++ * false otherwise.
++ */
++extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
++ struct ttm_mem_reg *mem);
++
++/**
++ * ttm_bo_mem_space
++ *
++ * @bo: Pointer to a struct ttm_buffer_object. the data of which
++ * we want to allocate space for.
++ * @mem: A struct ttm_mem_reg with the struct ttm_mem_reg::proposed_flags set
++ * up.
++ * @interruptible: Sleep interruptible when sliping.
++ * @no_wait: Don't sleep waiting for space to become available.
++ *
++ * Allocate memory space for the buffer object pointed to by @bo, using
++ * the placement flags in @mem, potentially evicting other idle buffer objects.
++ * This function may sleep while waiting for space to become available.
++ * Returns:
++ * -EBUSY: No space available (only if no_wait == 1).
++ * -ENOMEM: Could not allocate memory for the buffer object, either due to
++ * fragmentation or concurrent allocators.
++ * -ERESTART: An interruptible sleep was interrupted by a signal.
++ */
++extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
++ struct ttm_mem_reg *mem,
++ bool interruptible, bool no_wait);
++/**
++ * ttm_bo_wait_for_cpu
++ *
++ * @bo: Pointer to a struct ttm_buffer_object.
++ * @no_wait: Don't sleep while waiting.
++ *
++ * Wait until a buffer object is no longer sync'ed for CPU access.
++ * Returns:
++ * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
++ * -ERESTART: An interruptible sleep was interrupted by a signal.
++ */
++
++extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
++
++/**
++ * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
++ *
++ * @bo Pointer to a struct ttm_buffer_object.
++ * @bus_base On return the base of the PCI region
++ * @bus_offset On return the byte offset into the PCI region
++ * @bus_size On return the byte size of the buffer object or zero if
++ * the buffer object memory is not accessible through a PCI region.
++ *
++ * Returns:
++ * -EINVAL if the buffer object is currently not mappable.
++ * 0 otherwise.
++ */
++
++extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
++ struct ttm_mem_reg *mem,
++ unsigned long *bus_base,
++ unsigned long *bus_offset,
++ unsigned long *bus_size);
++
++extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
++
++/**
++ * ttm_bo_device_init
++ *
++ * @bdev: A pointer to a struct ttm_bo_device to initialize.
++ * @mem_global: A pointer to an initialized struct ttm_mem_global.
++ * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
++ * @file_page_offset: Offset into the device address space that is available
++ * for buffer data. This ensures compatibility with other users of the
++ * address space.
++ *
++ * Initializes a struct ttm_bo_device:
++ * Returns:
++ * !0: Failure.
++ */
++extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
++ struct ttm_mem_global *mem_glob,
++ struct ttm_bo_driver *driver,
++ uint64_t file_page_offset);
++
++/**
++ * ttm_bo_reserve:
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @interruptible: Sleep interruptible if waiting.
++ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
++ * @use_sequence: If @bo is already reserved, Only sleep waiting for
++ * it to become unreserved if @sequence < (@bo)->sequence.
++ *
++ * Locks a buffer object for validation. (Or prevents other processes from
++ * locking it for validation) and removes it from lru lists, while taking
++ * a number of measures to prevent deadlocks.
++ *
++ * Deadlocks may occur when two processes try to reserve multiple buffers in
++ * different order, either by will or as a result of a buffer being evicted
++ * to make room for a buffer already reserved. (Buffers are reserved before
++ * they are evicted). The following algorithm prevents such deadlocks from
++ * occuring:
++ * 1) Buffers are reserved with the lru spinlock held. Upon successful
++ * reservation they are removed from the lru list. This stops a reserved buffer
++ * from being evicted. However the lru spinlock is released between the time
++ * a buffer is selected for eviction and the time it is reserved.
++ * Therefore a check is made when a buffer is reserved for eviction, that it
++ * is still the first buffer in the lru list, before it is removed from the
++ * list. @check_lru == 1 forces this check. If it fails, the function returns
++ * -EINVAL, and the caller should then choose a new buffer to evict and repeat
++ * the procedure.
++ * 2) Processes attempting to reserve multiple buffers other than for eviction,
++ * (typically execbuf), should first obtain a unique 32-bit
++ * validation sequence number,
++ * and call this function with @use_sequence == 1 and @sequence == the unique
++ * sequence number. If upon call of this function, the buffer object is already
++ * reserved, the validation sequence is checked against the validation
++ * sequence of the process currently reserving the buffer,
++ * and if the current validation sequence is greater than that of the process
++ * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
++ * waiting for the buffer to become unreserved, after which it retries
++ * reserving. The caller should, when receiving an -EAGAIN error
++ * release all its buffer reservations, wait for @bo to become unreserved, and
++ * then rerun the validation with the same validation sequence. This procedure
++ * will always guarantee that the process with the lowest validation sequence
++ * will eventually succeed, preventing both deadlocks and starvation.
++ *
++ * Returns:
++ * -EAGAIN: The reservation may cause a deadlock. Release all buffer
++ * reservations, wait for @bo to become unreserved and try again.
++ * (only if use_sequence == 1).
++ * -ERESTART: A wait for the buffer to become unreserved was interrupted by
++ * a signal. Release all buffer reservations and return to user-space.
++ */
++extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
++ bool interruptible,
++ bool no_wait, bool use_sequence, uint32_t sequence);
++
++/**
++ * ttm_bo_unreserve
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ *
++ * Unreserve a previous reservation of @bo.
++ */
++extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
++
++/**
++ * ttm_bo_wait_unreserved
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ *
++ * Wait for a struct ttm_buffer_object to become unreserved.
++ * This is typically used in the execbuf code to relax cpu-usage when
++ * a potential deadlock condition backoff.
++ */
++extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
++ bool interruptible);
++
++/**
++ * ttm_bo_block_reservation
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @interruptible: Use interruptible sleep when waiting.
++ * @no_wait: Don't sleep, but rather return -EBUSY.
++ *
++ * Block reservation for validation by simply reserving the buffer.
++ * This is intended for single buffer use only without eviction,
++ * and thus needs no deadlock protection.
++ *
++ * Returns:
++ * -EBUSY: If no_wait == 1 and the buffer is already reserved.
++ * -ERESTART: If interruptible == 1 and the process received a
++ * signal while sleeping.
++ */
++extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
++ bool interruptible, bool no_wait);
++
++/**
++ * ttm_bo_unblock_reservation
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ *
++ * Unblocks reservation leaving lru lists untouched.
++ */
++extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
++
++/*
++ * ttm_bo_util.c
++ */
++
++/**
++ * ttm_bo_move_ttm
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @evict: 1: This is an eviction. Don't try to pipeline.
++ * @no_wait: Never sleep, but rather return with -EBUSY.
++ * @new_mem: struct ttm_mem_reg indicating where to move.
++ *
++ * Optimized move function for a buffer object with both old and
++ * new placement backed by a TTM. The function will, if successful,
++ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
++ * and update the (@bo)->mem placement flags. If unsuccessful, the old
++ * data remains untouched, and it's up to the caller to free the
++ * memory space indicated by @new_mem.
++ * Returns:
++ * !0: Failure.
++ */
++
++extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
++ bool evict,
++ bool no_wait,
++ struct ttm_mem_reg *new_mem);
++
++/**
++ * ttm_bo_move_memcpy
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @evict: 1: This is an eviction. Don't try to pipeline.
++ * @no_wait: Never sleep, but rather return with -EBUSY.
++ * @new_mem: struct ttm_mem_reg indicating where to move.
++ *
++ * Fallback move function for a mappable buffer object in mappable memory.
++ * The function will, if successful,
++ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
++ * and update the (@bo)->mem placement flags. If unsuccessful, the old
++ * data remains untouched, and it's up to the caller to free the
++ * memory space indicated by @new_mem.
++ * Returns:
++ * !0: Failure.
++ */
++
++extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
++ bool evict,
++ bool no_wait,
++ struct ttm_mem_reg *new_mem);
++
++/**
++ * ttm_bo_free_old_node
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ *
++ * Utility function to free an old placement after a successful move.
++ */
++extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
++
++/**
++ * ttm_bo_move_accel_cleanup.
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @sync_obj: A sync object that signals when moving is complete.
++ * @sync_obj_arg: An argument to pass to the sync object idle / wait
++ * functions.
++ * @evict: This is an evict move. Don't return until the buffer is idle.
++ * @no_wait: Never sleep, but rather return with -EBUSY.
++ * @new_mem: struct ttm_mem_reg indicating where to move.
++ *
++ * Accelerated move function to be called when an accelerated move
++ * has been scheduled. The function will create a new temporary buffer object
++ * representing the old placement, and put the sync object on both buffer
++ * objects. After that the newly created buffer object is unref'd to be
++ * destroyed when the move is complete. This will help pipeline
++ * buffer moves.
++ */
++
++extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
++ void *sync_obj,
++ void *sync_obj_arg,
++ bool evict, bool no_wait,
++ struct ttm_mem_reg *new_mem);
++/**
++ * ttm_io_prot
++ *
++ * @c_state: Caching state.
++ * @tmp: Page protection flag for a normal, cached mapping.
++ *
++ * Utility function that returns the pgprot_t that should be used for
++ * setting up a PTE with the caching model indicated by @c_state.
++ */
++extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp);
++
++#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
++#define TTM_HAS_AGP
++#include <linux/agp_backend.h>
++
++/**
++ * ttm_agp_backend_init
++ *
++ * @bdev: Pointer to a struct ttm_bo_device.
++ * @bridge: The agp bridge this device is sitting on.
++ *
++ * Create a TTM backend that uses the indicated AGP bridge as an aperture
++ * for TT memory. This function uses the linux agpgart interface to
++ * bind and unbind memory backing a ttm_tt.
++ */
++extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
++ struct agp_bridge_data *bridge);
++#endif
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_util.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_util.c
+new file mode 100644
+index 0000000..ce8eaed
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_util.c
+@@ -0,0 +1,546 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm_bo_driver.h"
++#include "ttm_placement_common.h"
++#include "ttm_pat_compat.h"
++#include <linux/io.h>
++#include <linux/highmem.h>
++#include <linux/wait.h>
++#include <linux/version.h>
++
++void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
++{
++ struct ttm_mem_reg *old_mem = &bo->mem;
++
++ if (old_mem->mm_node) {
++ spin_lock(&bo->bdev->lru_lock);
++ drm_mm_put_block(old_mem->mm_node);
++ spin_unlock(&bo->bdev->lru_lock);
++ }
++ old_mem->mm_node = NULL;
++}
++
++int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
++ bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
++{
++ struct ttm_tt *ttm = bo->ttm;
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ uint32_t save_flags = old_mem->flags;
++ uint32_t save_proposed_flags = old_mem->proposed_flags;
++ int ret;
++
++ if (old_mem->mem_type != TTM_PL_SYSTEM) {
++ ttm_tt_unbind(ttm);
++ ttm_bo_free_old_node(bo);
++ ttm_flag_masked(&old_mem->flags, TTM_PL_FLAG_SYSTEM,
++ TTM_PL_MASK_MEM);
++ old_mem->mem_type = TTM_PL_SYSTEM;
++ save_flags = old_mem->flags;
++ }
++
++ ret = ttm_tt_set_placement_caching(ttm, new_mem->flags);
++ if (unlikely(ret != 0))
++ return ret;
++
++ if (new_mem->mem_type != TTM_PL_SYSTEM) {
++ ret = ttm_tt_bind(ttm, new_mem);
++ if (unlikely(ret != 0))
++ return ret;
++ }
++
++ *old_mem = *new_mem;
++ new_mem->mm_node = NULL;
++ old_mem->proposed_flags = save_proposed_flags;
++ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
++ return 0;
++}
++
++int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
++ void **virtual)
++{
++ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
++ unsigned long bus_offset;
++ unsigned long bus_size;
++ unsigned long bus_base;
++ int ret;
++ void *addr;
++
++ *virtual = NULL;
++ ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
++ if (ret || bus_size == 0)
++ return ret;
++
++ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
++ addr = (void *)(((u8 *) man->io_addr) + bus_offset);
++ else {
++ if (mem->flags & TTM_PL_FLAG_WC)
++ addr = ioremap_wc(bus_base + bus_offset, bus_size);
++ else
++ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
++ if (!addr)
++ return -ENOMEM;
++ }
++ *virtual = addr;
++ return 0;
++}
++
++void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
++ void *virtual)
++{
++ struct ttm_mem_type_manager *man;
++
++ man = &bdev->man[mem->mem_type];
++
++ if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
++ iounmap(virtual);
++}
++
++static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
++{
++ uint32_t *dstP =
++ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
++ uint32_t *srcP =
++ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
++
++ int i;
++ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
++ iowrite32(ioread32(srcP++), dstP++);
++ return 0;
++}
++
++static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
++ unsigned long page)
++{
++ struct page *d = ttm_tt_get_page(ttm, page);
++ void *dst;
++
++ if (!d)
++ return -ENOMEM;
++
++ src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
++ dst = kmap(d);
++ if (!dst)
++ return -ENOMEM;
++
++ memcpy_fromio(dst, src, PAGE_SIZE);
++ kunmap(d);
++ return 0;
++}
++
++static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
++ unsigned long page)
++{
++ struct page *s = ttm_tt_get_page(ttm, page);
++ void *src;
++
++ if (!s)
++ return -ENOMEM;
++
++ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
++ src = kmap(s);
++ if (!src)
++ return -ENOMEM;
++
++ memcpy_toio(dst, src, PAGE_SIZE);
++ kunmap(s);
++ return 0;
++}
++
++int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
++ bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
++ struct ttm_tt *ttm = bo->ttm;
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ struct ttm_mem_reg old_copy = *old_mem;
++ void *old_iomap;
++ void *new_iomap;
++ int ret;
++ uint32_t save_flags = old_mem->flags;
++ uint32_t save_proposed_flags = old_mem->proposed_flags;
++ unsigned long i;
++ unsigned long page;
++ unsigned long add = 0;
++ int dir;
++
++ ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
++ if (ret)
++ return ret;
++ ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
++ if (ret)
++ goto out;
++
++ if (old_iomap == NULL && new_iomap == NULL)
++ goto out2;
++ if (old_iomap == NULL && ttm == NULL)
++ goto out2;
++
++ add = 0;
++ dir = 1;
++
++ if ((old_mem->mem_type == new_mem->mem_type) &&
++ (new_mem->mm_node->start <
++ old_mem->mm_node->start + old_mem->mm_node->size)) {
++ dir = -1;
++ add = new_mem->num_pages - 1;
++ }
++
++ for (i = 0; i < new_mem->num_pages; ++i) {
++ page = i * dir + add;
++ if (old_iomap == NULL)
++ ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
++ else if (new_iomap == NULL)
++ ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
++ else
++ ret = ttm_copy_io_page(new_iomap, old_iomap, page);
++ if (ret)
++ goto out1;
++ }
++ mb();
++out2:
++ ttm_bo_free_old_node(bo);
++
++ *old_mem = *new_mem;
++ new_mem->mm_node = NULL;
++ old_mem->proposed_flags = save_proposed_flags;
++ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
++
++ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
++ ttm_tt_unbind(ttm);
++ ttm_tt_destroy(ttm);
++ bo->ttm = NULL;
++ }
++
++out1:
++ ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
++out:
++ ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
++ return ret;
++}
++
++/**
++ * ttm_buffer_object_transfer
++ *
++ * @bo: A pointer to a struct ttm_buffer_object.
++ * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
++ * holding the data of @bo with the old placement.
++ *
++ * This is a utility function that may be called after an accelerated move
++ * has been scheduled. A new buffer object is created as a placeholder for
++ * the old data while it's being copied. When that buffer object is idle,
++ * it can be destroyed, releasing the space of the old placement.
++ * Returns:
++ * !0: Failure.
++ */
++
++static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
++ struct ttm_buffer_object **new_obj)
++{
++ struct ttm_buffer_object *fbo;
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_bo_driver *driver = bdev->driver;
++
++ fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
++ if (!fbo)
++ return -ENOMEM;
++
++ *fbo = *bo;
++ mutex_init(&fbo->mutex);
++ mutex_lock(&fbo->mutex);
++
++ init_waitqueue_head(&fbo->event_queue);
++ INIT_LIST_HEAD(&fbo->ddestroy);
++ INIT_LIST_HEAD(&fbo->lru);
++
++ fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
++ if (fbo->mem.mm_node)
++ fbo->mem.mm_node->private = (void *)fbo;
++ kref_init(&fbo->list_kref);
++ kref_init(&fbo->kref);
++
++ mutex_unlock(&fbo->mutex);
++
++ *new_obj = fbo;
++ return 0;
++}
++
++pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
++{
++#if defined(__i386__) || defined(__x86_64__)
++ if (caching_flags & TTM_PL_FLAG_WC) {
++ tmp = pgprot_ttm_x86_wc(tmp);
++ } else if (boot_cpu_data.x86 > 3 &&
++ (caching_flags & TTM_PL_FLAG_UNCACHED)) {
++ tmp = pgprot_noncached(tmp);
++ }
++#elif defined(__powerpc__)
++ if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
++ pgprot_val(tmp) |= _PAGE_NO_CACHE;
++ if (caching_flags & TTM_PL_FLAG_UNCACHED)
++ pgprot_val(tmp) |= _PAGE_GUARDED;
++ }
++#endif
++#if defined(__ia64__)
++ if (caching_flags & TTM_PL_FLAG_WC)
++ tmp = pgprot_writecombine(tmp);
++ else
++ tmp = pgprot_noncached(tmp);
++#endif
++#if defined(__sparc__)
++ if (!(caching_flags & TTM_PL_FLAG_CACHED))
++ tmp = pgprot_noncached(tmp);
++#endif
++ return tmp;
++}
++
++static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
++ unsigned long bus_base,
++ unsigned long bus_offset,
++ unsigned long bus_size,
++ struct ttm_bo_kmap_obj *map)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_mem_reg *mem = &bo->mem;
++ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
++
++ if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
++ map->bo_kmap_type = ttm_bo_map_premapped;
++ map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
++ } else {
++ map->bo_kmap_type = ttm_bo_map_iomap;
++ if (mem->flags & TTM_PL_FLAG_WC)
++ map->virtual =
++ ioremap_wc(bus_base + bus_offset,
++ bus_size);
++ else
++ map->virtual =
++ ioremap_nocache(bus_base + bus_offset,
++ bus_size);
++ }
++ return (!map->virtual) ? -ENOMEM : 0;
++}
++
++static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
++ unsigned long start_page,
++ unsigned long num_pages,
++ struct ttm_bo_kmap_obj *map)
++{
++ struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
++ struct ttm_tt *ttm = bo->ttm;
++ struct page *d;
++ bool do_kmap = false;
++ int i;
++ BUG_ON(!ttm);
++ if (num_pages == 1) {
++ map->page = ttm_tt_get_page(ttm, start_page);
++ do_kmap = (!PageHighMem(map->page) ||
++ (mem->flags & TTM_PL_FLAG_CACHED));
++ }
++
++ if (do_kmap) {
++ /*
++ * We're mapping a single page, and the desired
++ * page protection is consistent with the bo.
++ */
++ map->bo_kmap_type = ttm_bo_map_kmap;
++ map->virtual = kmap(map->page);
++ } else {
++ /* Populate the part we're mapping; */
++ for (i = start_page; i < start_page + num_pages; ++i) {
++ d = ttm_tt_get_page(ttm, i);
++
++ if (!d)
++ return -ENOMEM;
++ }
++
++ /*
++ * We need to use vmap to get the desired page protection
++ * or to make the buffer object look contigous.
++ */
++ prot = (mem->flags & TTM_PL_FLAG_CACHED) ?
++ PAGE_KERNEL :
++ ttm_io_prot(mem->flags, PAGE_KERNEL);
++ map->bo_kmap_type = ttm_bo_map_vmap;
++ map->virtual = vmap(ttm->pages + start_page,
++ num_pages,
++ 0,
++ prot);
++ }
++ return (!map->virtual) ? -ENOMEM : 0;
++}
++
++int ttm_bo_kmap(struct ttm_buffer_object *bo,
++ unsigned long start_page, unsigned long num_pages,
++ struct ttm_bo_kmap_obj *map)
++{
++ int ret;
++ unsigned long bus_base;
++ unsigned long bus_offset;
++ unsigned long bus_size;
++ BUG_ON(!list_empty(&bo->swap));
++ map->virtual = NULL;
++
++ if (num_pages > bo->num_pages)
++ return -EINVAL;
++
++ if (start_page > bo->num_pages)
++ return -EINVAL;
++#if 0
++ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
++ return -EPERM;
++#endif
++ ret = ttm_bo_pci_offset(bo->bdev,
++ &bo->mem,
++ &bus_base,
++ &bus_offset,
++ &bus_size);
++ if (ret)
++ return ret;
++
++ if (bus_size == 0) {
++ return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
++ } else {
++ bus_offset += start_page << PAGE_SHIFT;
++ bus_size = num_pages << PAGE_SHIFT;
++
++ return ttm_bo_ioremap(bo,
++ bus_base,
++ bus_offset,
++ bus_size, map);
++ }
++}
++
++void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
++{
++ if (!map->virtual)
++ return;
++ switch (map->bo_kmap_type) {
++ case ttm_bo_map_iomap:
++ iounmap(map->virtual);
++ break;
++ case ttm_bo_map_vmap:
++ vunmap(map->virtual);
++ break;
++ case ttm_bo_map_kmap:
++ kunmap(map->page);
++ break;
++ case ttm_bo_map_premapped:
++ break;
++ default:
++ BUG();
++ }
++ map->virtual = NULL;
++ map->page = NULL;
++}
++
++int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
++ unsigned long dst_offset,
++ unsigned long *pfn, pgprot_t *prot)
++{
++ struct ttm_mem_reg *mem = &bo->mem;
++ struct ttm_bo_device *bdev = bo->bdev;
++ unsigned long bus_offset;
++ unsigned long bus_size;
++ unsigned long bus_base;
++ int ret;
++ ret = ttm_bo_pci_offset(bdev,
++ mem,
++ &bus_base,
++ &bus_offset,
++ &bus_size);
++ if (ret)
++ return -EINVAL;
++ if (bus_size != 0)
++ *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
++ else
++ if (!bo->ttm)
++ return -EINVAL;
++ else
++ *pfn = page_to_pfn(ttm_tt_get_page(
++ bo->ttm,
++ dst_offset >> PAGE_SHIFT));
++
++ *prot = (mem->flags & TTM_PL_FLAG_CACHED) ?
++ PAGE_KERNEL :
++ ttm_io_prot(mem->flags, PAGE_KERNEL);
++ return 0;
++}
++
++int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
++ void *sync_obj,
++ void *sync_obj_arg,
++ bool evict, bool no_wait,
++ struct ttm_mem_reg *new_mem)
++{
++ struct ttm_bo_device *bdev = bo->bdev;
++ struct ttm_bo_driver *driver = bdev->driver;
++ struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
++ struct ttm_mem_reg *old_mem = &bo->mem;
++ int ret;
++ uint32_t save_flags = old_mem->flags;
++ uint32_t save_proposed_flags = old_mem->proposed_flags;
++ struct ttm_buffer_object *old_obj;
++ if (bo->sync_obj)
++ driver->sync_obj_unref(&bo->sync_obj);
++ bo->sync_obj = driver->sync_obj_ref(sync_obj);
++ bo->sync_obj_arg = sync_obj_arg;
++ if (evict) {
++ ret = ttm_bo_wait(bo, false, false, false);
++ if (ret)
++ return ret;
++ ttm_bo_free_old_node(bo);
++ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
++ (bo->ttm != NULL)) {
++ ttm_tt_unbind(bo->ttm);
++ ttm_tt_destroy(bo->ttm);
++ bo->ttm = NULL;
++ }
++ } else {
++
++ /* This should help pipeline ordinary buffer moves.
++ *
++ * Hang old buffer memory on a new buffer object,
++ * and leave it to be released when the GPU
++ * operation has completed.
++ */
++ ret = ttm_buffer_object_transfer(bo, &old_obj);
++ if (ret)
++ return ret;
++ if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
++ old_obj->ttm = NULL;
++ else
++ bo->ttm = NULL;
++ bo->priv_flags |= TTM_BO_PRIV_FLAG_MOVING;
++ ttm_bo_unreserve(old_obj);
++ }
++
++ *old_mem = *new_mem;
++ new_mem->mm_node = NULL;
++ old_mem->proposed_flags = save_proposed_flags;
++ ttm_flag_masked(&save_flags, new_mem->flags, TTM_PL_MASK_MEMTYPE);
++ return 0;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_vm.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_vm.c
+new file mode 100644
+index 0000000..a8aae7e
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_bo_vm.c
+@@ -0,0 +1,429 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 Vmware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++
++#include "ttm_bo_driver.h"
++#include "ttm_placement_common.h"
++#include <linux/mm.h>
++#include <linux/version.h>
++#include <linux/rbtree.h>
++#include <linux/uaccess.h>
++
++#define TTM_BO_VM_NUM_PREFAULT 16
++
++static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
++ unsigned long page_start,
++ unsigned long num_pages)
++{
++ struct rb_node *cur = bdev->addr_space_rb.rb_node;
++ unsigned long cur_offset;
++ struct ttm_buffer_object *bo;
++ struct ttm_buffer_object *best_bo = NULL;
++
++ while (likely(cur != NULL)) {
++ bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
++ cur_offset = bo->vm_node->start;
++ if (page_start >= cur_offset) {
++ cur = cur->rb_right;
++ best_bo = bo;
++ if (page_start == cur_offset)
++ break;
++ } else
++ cur = cur->rb_left;
++ }
++
++ if (unlikely(best_bo == NULL))
++ return NULL;
++
++ if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
++ (page_start + num_pages)))
++ return NULL;
++
++ return best_bo;
++}
++
++static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
++ vma->vm_private_data;
++ struct ttm_bo_device *bdev = bo->bdev;
++ unsigned long bus_base;
++ unsigned long bus_offset;
++ unsigned long bus_size;
++ unsigned long page_offset;
++ unsigned long page_last;
++ unsigned long pfn;
++ struct ttm_tt *ttm = NULL;
++ struct page *page;
++ int ret;
++ int i;
++ bool is_iomem;
++ unsigned long address = (unsigned long)vmf->virtual_address;
++ int retval = VM_FAULT_NOPAGE;
++
++ ret = ttm_bo_reserve(bo, true, false, false, 0);
++ if (unlikely(ret != 0))
++ return VM_FAULT_NOPAGE;
++
++ mutex_lock(&bo->mutex);
++
++ /*
++ * Wait for buffer data in transit, due to a pipelined
++ * move.
++ */
++
++ if (bo->priv_flags & TTM_BO_PRIV_FLAG_MOVING) {
++ ret = ttm_bo_wait(bo, false, true, false);
++ if (unlikely(ret != 0)) {
++ retval = (ret != -ERESTART) ?
++ VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
++ goto out_unlock;
++ }
++ }
++
++ ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
++ &bus_size);
++ if (unlikely(ret != 0)) {
++ retval = VM_FAULT_SIGBUS;
++ goto out_unlock;
++ }
++
++ is_iomem = (bus_size != 0);
++
++ page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
++ bo->vm_node->start - vma->vm_pgoff;
++ page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
++ bo->vm_node->start - vma->vm_pgoff;
++
++ if (unlikely(page_offset >= bo->num_pages)) {
++ retval = VM_FAULT_SIGBUS;
++ goto out_unlock;
++ }
++
++ /*
++ * Strictly, we're not allowed to modify vma->vm_page_prot here,
++ * since the mmap_sem is only held in read mode. However, we
++ * modify only the caching bits of vma->vm_page_prot and
++ * consider those bits protected by
++ * the bo->mutex, as we should be the only writers.
++ * There shouldn't really be any readers of these bits except
++ * within vm_insert_mixed()? fork?
++ *
++ * TODO: Add a list of vmas to the bo, and change the
++ * vma->vm_page_prot when the object changes caching policy, with
++ * the correct locks held.
++ */
++
++ if (is_iomem) {
++ vma->vm_page_prot = ttm_io_prot(bo->mem.flags,
++ vma->vm_page_prot);
++ } else {
++ ttm = bo->ttm;
++ vma->vm_page_prot = (bo->mem.flags & TTM_PL_FLAG_CACHED) ?
++ vm_get_page_prot(vma->vm_flags) :
++ ttm_io_prot(bo->mem.flags, vma->vm_page_prot);
++ }
++
++ /*
++ * Speculatively prefault a number of pages. Only error on
++ * first page.
++ */
++
++ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
++
++ if (is_iomem)
++ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
++ page_offset;
++ else {
++ page = ttm_tt_get_page(ttm, page_offset);
++ if (unlikely(!page && i == 0)) {
++ retval = VM_FAULT_OOM;
++ goto out_unlock;
++ } else if (unlikely(!page)) {
++ break;
++ }
++ pfn = page_to_pfn(page);
++ }
++
++ ret = vm_insert_mixed(vma, address, pfn);
++ /*
++ * Somebody beat us to this PTE or prefaulting to
++ * an already populated PTE, or prefaulting error.
++ */
++
++ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
++ break;
++ else if (unlikely(ret != 0)) {
++ retval =
++ (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
++ goto out_unlock;
++
++ }
++
++ address += PAGE_SIZE;
++ if (unlikely(++page_offset >= page_last))
++ break;
++ }
++
++out_unlock:
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ return retval;
++}
++
++static void ttm_bo_vm_open(struct vm_area_struct *vma)
++{
++ struct ttm_buffer_object *bo =
++ (struct ttm_buffer_object *)vma->vm_private_data;
++
++ (void)ttm_bo_reference(bo);
++}
++
++static void ttm_bo_vm_close(struct vm_area_struct *vma)
++{
++ struct ttm_buffer_object *bo =
++ (struct ttm_buffer_object *)vma->vm_private_data;
++
++ ttm_bo_unref(&bo);
++ vma->vm_private_data = NULL;
++}
++
++static struct vm_operations_struct ttm_bo_vm_ops = {
++ .fault = ttm_bo_vm_fault,
++ .open = ttm_bo_vm_open,
++ .close = ttm_bo_vm_close
++};
++
++int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
++ struct ttm_bo_device *bdev)
++{
++ struct ttm_bo_driver *driver;
++ struct ttm_buffer_object *bo;
++ int ret;
++
++ read_lock(&bdev->vm_lock);
++ bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
++ (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
++ if (likely(bo != NULL))
++ ttm_bo_reference(bo);
++ read_unlock(&bdev->vm_lock);
++
++ if (unlikely(bo == NULL)) {
++ printk(KERN_ERR "Could not find buffer object to map.\n");
++ ret = -EINVAL;
++ goto out_unref;
++ }
++
++ driver = bo->bdev->driver;
++ if (unlikely(!driver->verify_access)) {
++ ret = -EPERM;
++ goto out_unref;
++ }
++ ret = driver->verify_access(bo, filp);
++ if (unlikely(ret != 0))
++ goto out_unref;
++
++ vma->vm_ops = &ttm_bo_vm_ops;
++
++ /*
++ * Note: We're transferring the bo reference to
++ * vma->vm_private_data here.
++ */
++
++ vma->vm_private_data = bo;
++ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
++ return 0;
++out_unref:
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
++{
++ if (vma->vm_pgoff != 0)
++ return -EACCES;
++
++ vma->vm_ops = &ttm_bo_vm_ops;
++ vma->vm_private_data = ttm_bo_reference(bo);
++ vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
++ return 0;
++}
++
++ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
++ const char __user *wbuf, char __user *rbuf, size_t count,
++ loff_t *f_pos, bool write)
++{
++ struct ttm_buffer_object *bo;
++ struct ttm_bo_driver *driver;
++ struct ttm_bo_kmap_obj map;
++ unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
++ unsigned long kmap_offset;
++ unsigned long kmap_end;
++ unsigned long kmap_num;
++ size_t io_size;
++ unsigned int page_offset;
++ char *virtual;
++ int ret;
++ bool no_wait = false;
++ bool dummy;
++
++ read_lock(&bdev->vm_lock);
++ bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
++ if (likely(bo != NULL))
++ ttm_bo_reference(bo);
++ read_unlock(&bdev->vm_lock);
++
++ if (unlikely(bo == NULL))
++ return -EFAULT;
++
++ driver = bo->bdev->driver;
++ if (unlikely(driver->verify_access))
++ return -EPERM;
++
++ ret = driver->verify_access(bo, filp);
++ if (unlikely(ret != 0))
++ goto out_unref;
++
++ kmap_offset = dev_offset - bo->vm_node->start;
++ if (unlikely(kmap_offset) >= bo->num_pages) {
++ ret = -EFBIG;
++ goto out_unref;
++ }
++
++ page_offset = *f_pos & ~PAGE_MASK;
++ io_size = bo->num_pages - kmap_offset;
++ io_size = (io_size << PAGE_SHIFT) - page_offset;
++ if (count < io_size)
++ io_size = count;
++
++ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
++ kmap_num = kmap_end - kmap_offset + 1;
++
++ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
++
++ switch (ret) {
++ case 0:
++ break;
++ case -ERESTART:
++ ret = -EINTR;
++ goto out_unref;
++ case -EBUSY:
++ ret = -EAGAIN;
++ goto out_unref;
++ default:
++ goto out_unref;
++ }
++
++ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
++ if (unlikely(ret != 0))
++ goto out_unref;
++
++ virtual = ttm_kmap_obj_virtual(&map, &dummy);
++ virtual += page_offset;
++
++ if (write)
++ ret = copy_from_user(virtual, wbuf, io_size);
++ else
++ ret = copy_to_user(rbuf, virtual, io_size);
++
++ ttm_bo_kunmap(&map);
++ ttm_bo_unreserve(bo);
++ ttm_bo_unref(&bo);
++
++ if (unlikely(ret != 0))
++ return -EFBIG;
++
++ *f_pos += io_size;
++
++ return io_size;
++out_unref:
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
++ char __user *rbuf, size_t count, loff_t *f_pos,
++ bool write)
++{
++ struct ttm_bo_kmap_obj map;
++ unsigned long kmap_offset;
++ unsigned long kmap_end;
++ unsigned long kmap_num;
++ size_t io_size;
++ unsigned int page_offset;
++ char *virtual;
++ int ret;
++ bool no_wait = false;
++ bool dummy;
++
++ kmap_offset = (*f_pos >> PAGE_SHIFT);
++ if (unlikely(kmap_offset) >= bo->num_pages)
++ return -EFBIG;
++
++ page_offset = *f_pos & ~PAGE_MASK;
++ io_size = bo->num_pages - kmap_offset;
++ io_size = (io_size << PAGE_SHIFT) - page_offset;
++ if (count < io_size)
++ io_size = count;
++
++ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
++ kmap_num = kmap_end - kmap_offset + 1;
++
++ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
++
++ switch (ret) {
++ case 0:
++ break;
++ case -ERESTART:
++ return -EINTR;
++ case -EBUSY:
++ return -EAGAIN;
++ default:
++ return ret;
++ }
++
++ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
++ if (unlikely(ret != 0))
++ return ret;
++
++ virtual = ttm_kmap_obj_virtual(&map, &dummy);
++ virtual += page_offset;
++
++ if (write)
++ ret = copy_from_user(virtual, wbuf, io_size);
++ else
++ ret = copy_to_user(rbuf, virtual, io_size);
++
++ ttm_bo_kunmap(&map);
++ ttm_bo_unreserve(bo);
++ ttm_bo_unref(&bo);
++
++ if (unlikely(ret != 0))
++ return ret;
++
++ *f_pos += io_size;
++
++ return io_size;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_execbuf_util.c
+new file mode 100644
+index 0000000..610e0e0
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_execbuf_util.c
+@@ -0,0 +1,108 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include "ttm_execbuf_util.h"
++#include "ttm_bo_driver.h"
++#include "ttm_placement_common.h"
++#include <linux/wait.h>
++#include <linux/sched.h>
++
++void ttm_eu_backoff_reservation(struct list_head *list)
++{
++ struct ttm_validate_buffer *entry;
++
++ list_for_each_entry(entry, list, head) {
++ struct ttm_buffer_object *bo = entry->bo;
++ if (!entry->reserved)
++ continue;
++
++ entry->reserved = false;
++ ttm_bo_unreserve(bo);
++ }
++}
++
++/*
++ * Reserve buffers for validation.
++ *
++ * If a buffer in the list is marked for CPU access, we back off and
++ * wait for that buffer to become free for GPU access.
++ *
++ * If a buffer is reserved for another validation, the validator with
++ * the highest validation sequence backs off and waits for that buffer
++ * to become unreserved. This prevents deadlocks when validating multiple
++ * buffers in different orders.
++ */
++
++int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
++{
++ struct ttm_validate_buffer *entry;
++ int ret;
++
++retry:
++ list_for_each_entry(entry, list, head) {
++ struct ttm_buffer_object *bo = entry->bo;
++
++ entry->reserved = false;
++ ret = ttm_bo_reserve(bo, true, false, true, val_seq);
++ if (ret != 0) {
++ ttm_eu_backoff_reservation(list);
++ if (ret == -EAGAIN) {
++ ret = ttm_bo_wait_unreserved(bo, true);
++ if (unlikely(ret != 0))
++ return ret;
++ goto retry;
++ } else
++ return ret;
++ }
++
++ entry->reserved = true;
++ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
++ ttm_eu_backoff_reservation(list);
++ ret = ttm_bo_wait_cpu(bo, false);
++ if (ret)
++ return ret;
++ goto retry;
++ }
++ }
++ return 0;
++}
++
++void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
++{
++ struct ttm_validate_buffer *entry;
++
++ list_for_each_entry(entry, list, head) {
++ struct ttm_buffer_object *bo = entry->bo;
++ struct ttm_bo_driver *driver = bo->bdev->driver;
++ void *old_sync_obj;
++
++ mutex_lock(&bo->mutex);
++ old_sync_obj = bo->sync_obj;
++ bo->sync_obj = driver->sync_obj_ref(sync_obj);
++ bo->sync_obj_arg = entry->new_sync_obj_arg;
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++ entry->reserved = false;
++ if (old_sync_obj)
++ driver->sync_obj_unref(&old_sync_obj);
++ }
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_execbuf_util.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_execbuf_util.h
+new file mode 100644
+index 0000000..0b88d08
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_execbuf_util.h
+@@ -0,0 +1,103 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_EXECBUF_UTIL_H_
++#define _TTM_EXECBUF_UTIL_H_
++
++#include "ttm_bo_api.h"
++#include "ttm_fence_api.h"
++#include <linux/list.h>
++
++/**
++ * struct ttm_validate_buffer
++ *
++ * @head: list head for thread-private list.
++ * @bo: refcounted buffer object pointer.
++ * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
++ * adding a new sync object.
++ * @reservied: Indicates whether @bo has been reserved for validation.
++ */
++
++struct ttm_validate_buffer {
++ struct list_head head;
++ struct ttm_buffer_object *bo;
++ void *new_sync_obj_arg;
++ bool reserved;
++};
++
++/**
++ * function ttm_eu_backoff_reservation
++ *
++ * @list: thread private list of ttm_validate_buffer structs.
++ *
++ * Undoes all buffer validation reservations for bos pointed to by
++ * the list entries.
++ */
++
++extern void ttm_eu_backoff_reservation(struct list_head *list);
++
++/**
++ * function ttm_eu_reserve_buffers
++ *
++ * @list: thread private list of ttm_validate_buffer structs.
++ * @val_seq: A unique sequence number.
++ *
++ * Tries to reserve bos pointed to by the list entries for validation.
++ * If the function returns 0, all buffers are marked as "unfenced",
++ * taken off the lru lists and are not synced for write CPU usage.
++ *
++ * If the function detects a deadlock due to multiple threads trying to
++ * reserve the same buffers in reverse order, all threads except one will
++ * back off and retry. This function may sleep while waiting for
++ * CPU write reservations to be cleared, and for other threads to
++ * unreserve their buffers.
++ *
++ * This function may return -ERESTART or -EAGAIN if the calling process
++ * receives a signal while waiting. In that case, no buffers on the list
++ * will be reserved upon return.
++ *
++ * Buffers reserved by this function should be unreserved by
++ * a call to either ttm_eu_backoff_reservation() or
++ * ttm_eu_fence_buffer_objects() when command submission is complete or
++ * has failed.
++ */
++
++extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq);
++
++/**
++ * function ttm_eu_fence_buffer_objects.
++ *
++ * @list: thread private list of ttm_validate_buffer structs.
++ * @sync_obj: The new sync object for the buffers.
++ *
++ * This function should be called when command submission is complete, and
++ * it will add a new sync object to bos pointed to by entries on @list.
++ * It also unreserves all buffers, putting them on lru lists.
++ *
++ */
++
++extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_fence.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_fence.c
+new file mode 100644
+index 0000000..3f36ecc
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_fence.c
+@@ -0,0 +1,607 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm_fence_api.h"
++#include "ttm_fence_driver.h"
++#include <linux/wait.h>
++#include <linux/sched.h>
++
++#include <drm/drmP.h>
++
++/*
++ * Simple implementation for now.
++ */
++
++static void ttm_fence_lockup(struct ttm_fence_object *fence, uint32_t mask)
++{
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++
++ printk(KERN_ERR "GPU lockup dectected on engine %u "
++ "fence type 0x%08x\n",
++ (unsigned int)fence->fence_class, (unsigned int)mask);
++ /*
++ * Give engines some time to idle?
++ */
++
++ write_lock(&fc->lock);
++ ttm_fence_handler(fence->fdev, fence->fence_class,
++ fence->sequence, mask, -EBUSY);
++ write_unlock(&fc->lock);
++}
++
++/*
++ * Convenience function to be called by fence::wait methods that
++ * need polling.
++ */
++
++int ttm_fence_wait_polling(struct ttm_fence_object *fence, bool lazy,
++ bool interruptible, uint32_t mask)
++{
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
++ uint32_t count = 0;
++ int ret;
++ unsigned long end_jiffies = fence->timeout_jiffies;
++
++ DECLARE_WAITQUEUE(entry, current);
++ add_wait_queue(&fc->fence_queue, &entry);
++
++ ret = 0;
++
++ for (;;) {
++ __set_current_state((interruptible) ?
++ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
++ if (ttm_fence_object_signaled(fence, mask))
++ break;
++ if (time_after_eq(jiffies, end_jiffies)) {
++ if (driver->lockup)
++ driver->lockup(fence, mask);
++ else
++ ttm_fence_lockup(fence, mask);
++ continue;
++ }
++ if (lazy)
++ schedule_timeout(1);
++ else if ((++count & 0x0F) == 0) {
++ __set_current_state(TASK_RUNNING);
++ schedule();
++ __set_current_state((interruptible) ?
++ TASK_INTERRUPTIBLE :
++ TASK_UNINTERRUPTIBLE);
++ }
++ if (interruptible && signal_pending(current)) {
++ ret = -ERESTART;
++ break;
++ }
++ }
++ __set_current_state(TASK_RUNNING);
++ remove_wait_queue(&fc->fence_queue, &entry);
++ return ret;
++}
++
++/*
++ * Typically called by the IRQ handler.
++ */
++
++void ttm_fence_handler(struct ttm_fence_device *fdev, uint32_t fence_class,
++ uint32_t sequence, uint32_t type, uint32_t error)
++{
++ int wake = 0;
++ uint32_t diff;
++ uint32_t relevant_type;
++ uint32_t new_type;
++ struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
++ const struct ttm_fence_driver *driver = ttm_fence_driver_from_dev(fdev);
++ struct list_head *head;
++ struct ttm_fence_object *fence, *next;
++ bool found = false;
++
++ if (list_empty(&fc->ring))
++ return;
++
++ list_for_each_entry(fence, &fc->ring, ring) {
++ diff = (sequence - fence->sequence) & fc->sequence_mask;
++ if (diff > fc->wrap_diff) {
++ found = true;
++ break;
++ }
++ }
++
++ fc->waiting_types &= ~type;
++ head = (found) ? &fence->ring : &fc->ring;
++
++ list_for_each_entry_safe_reverse(fence, next, head, ring) {
++ if (&fence->ring == &fc->ring)
++ break;
++
++ DRM_DEBUG("Fence 0x%08lx, sequence 0x%08x, type 0x%08x\n",
++ (unsigned long)fence, fence->sequence,
++ fence->fence_type);
++
++ if (error) {
++ fence->info.error = error;
++ fence->info.signaled_types = fence->fence_type;
++ list_del_init(&fence->ring);
++ wake = 1;
++ break;
++ }
++
++ relevant_type = type & fence->fence_type;
++ new_type = (fence->info.signaled_types | relevant_type) ^
++ fence->info.signaled_types;
++
++ if (new_type) {
++ fence->info.signaled_types |= new_type;
++ DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
++ (unsigned long)fence,
++ fence->info.signaled_types);
++
++ if (unlikely(driver->signaled))
++ driver->signaled(fence);
++
++ if (driver->needed_flush)
++ fc->pending_flush |=
++ driver->needed_flush(fence);
++
++ if (new_type & fence->waiting_types)
++ wake = 1;
++ }
++
++ fc->waiting_types |=
++ fence->waiting_types & ~fence->info.signaled_types;
++
++ if (!(fence->fence_type & ~fence->info.signaled_types)) {
++ DRM_DEBUG("Fence completely signaled 0x%08lx\n",
++ (unsigned long)fence);
++ list_del_init(&fence->ring);
++ }
++ }
++
++ /*
++ * Reinstate lost waiting types.
++ */
++
++ if ((fc->waiting_types & type) != type) {
++ head = head->prev;
++ list_for_each_entry(fence, head, ring) {
++ if (&fence->ring == &fc->ring)
++ break;
++ diff =
++ (fc->highest_waiting_sequence -
++ fence->sequence) & fc->sequence_mask;
++ if (diff > fc->wrap_diff)
++ break;
++
++ fc->waiting_types |=
++ fence->waiting_types & ~fence->info.signaled_types;
++ }
++ }
++
++ if (wake)
++ wake_up_all(&fc->fence_queue);
++}
++
++static void ttm_fence_unring(struct ttm_fence_object *fence)
++{
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ unsigned long irq_flags;
++
++ write_lock_irqsave(&fc->lock, irq_flags);
++ list_del_init(&fence->ring);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++}
++
++bool ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask)
++{
++ unsigned long flags;
++ bool signaled;
++ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++
++ mask &= fence->fence_type;
++ read_lock_irqsave(&fc->lock, flags);
++ signaled = (mask & fence->info.signaled_types) == mask;
++ read_unlock_irqrestore(&fc->lock, flags);
++ if (!signaled && driver->poll) {
++ write_lock_irqsave(&fc->lock, flags);
++ driver->poll(fence->fdev, fence->fence_class, mask);
++ signaled = (mask & fence->info.signaled_types) == mask;
++ write_unlock_irqrestore(&fc->lock, flags);
++ }
++ return signaled;
++}
++
++int ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t type)
++{
++ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ unsigned long irq_flags;
++ uint32_t saved_pending_flush;
++ uint32_t diff;
++ bool call_flush;
++
++ if (type & ~fence->fence_type) {
++ DRM_ERROR("Flush trying to extend fence type, "
++ "0x%x, 0x%x\n", type, fence->fence_type);
++ return -EINVAL;
++ }
++
++ write_lock_irqsave(&fc->lock, irq_flags);
++ fence->waiting_types |= type;
++ fc->waiting_types |= fence->waiting_types;
++ diff = (fence->sequence - fc->highest_waiting_sequence) &
++ fc->sequence_mask;
++
++ if (diff < fc->wrap_diff)
++ fc->highest_waiting_sequence = fence->sequence;
++
++ /*
++ * fence->waiting_types has changed. Determine whether
++ * we need to initiate some kind of flush as a result of this.
++ */
++
++ saved_pending_flush = fc->pending_flush;
++ if (driver->needed_flush)
++ fc->pending_flush |= driver->needed_flush(fence);
++
++ if (driver->poll)
++ driver->poll(fence->fdev, fence->fence_class,
++ fence->waiting_types);
++
++ call_flush = (fc->pending_flush != 0);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++
++ if (call_flush && driver->flush)
++ driver->flush(fence->fdev, fence->fence_class);
++
++ return 0;
++}
++
++/*
++ * Make sure old fence objects are signaled before their fence sequences are
++ * wrapped around and reused.
++ */
++
++void ttm_fence_flush_old(struct ttm_fence_device *fdev,
++ uint32_t fence_class, uint32_t sequence)
++{
++ struct ttm_fence_class_manager *fc = &fdev->fence_class[fence_class];
++ struct ttm_fence_object *fence;
++ unsigned long irq_flags;
++ const struct ttm_fence_driver *driver = fdev->driver;
++ bool call_flush;
++
++ uint32_t diff;
++
++ write_lock_irqsave(&fc->lock, irq_flags);
++
++ list_for_each_entry_reverse(fence, &fc->ring, ring) {
++ diff = (sequence - fence->sequence) & fc->sequence_mask;
++ if (diff <= fc->flush_diff)
++ break;
++
++ fence->waiting_types = fence->fence_type;
++ fc->waiting_types |= fence->fence_type;
++
++ if (driver->needed_flush)
++ fc->pending_flush |= driver->needed_flush(fence);
++ }
++
++ if (driver->poll)
++ driver->poll(fdev, fence_class, fc->waiting_types);
++
++ call_flush = (fc->pending_flush != 0);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++
++ if (call_flush && driver->flush)
++ driver->flush(fdev, fence->fence_class);
++
++ /*
++ * FIXME: Shold we implement a wait here for really old fences?
++ */
++
++}
++
++int ttm_fence_object_wait(struct ttm_fence_object *fence,
++ bool lazy, bool interruptible, uint32_t mask)
++{
++ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ int ret = 0;
++ unsigned long timeout;
++ unsigned long cur_jiffies;
++ unsigned long to_jiffies;
++
++ if (mask & ~fence->fence_type) {
++ DRM_ERROR("Wait trying to extend fence type"
++ " 0x%08x 0x%08x\n", mask, fence->fence_type);
++ BUG();
++ return -EINVAL;
++ }
++
++ if (driver->wait)
++ return driver->wait(fence, lazy, interruptible, mask);
++
++ ttm_fence_object_flush(fence, mask);
++retry:
++ if (!driver->has_irq ||
++ driver->has_irq(fence->fdev, fence->fence_class, mask)) {
++
++ cur_jiffies = jiffies;
++ to_jiffies = fence->timeout_jiffies;
++
++ timeout = (time_after(to_jiffies, cur_jiffies)) ?
++ to_jiffies - cur_jiffies : 1;
++
++ if (interruptible)
++ ret = wait_event_interruptible_timeout
++ (fc->fence_queue,
++ ttm_fence_object_signaled(fence, mask), timeout);
++ else
++ ret = wait_event_timeout
++ (fc->fence_queue,
++ ttm_fence_object_signaled(fence, mask), timeout);
++
++ if (unlikely(ret == -ERESTARTSYS))
++ return -ERESTART;
++
++ if (unlikely(ret == 0)) {
++ if (driver->lockup)
++ driver->lockup(fence, mask);
++ else
++ ttm_fence_lockup(fence, mask);
++ goto retry;
++ }
++
++ return 0;
++ }
++
++ return ttm_fence_wait_polling(fence, lazy, interruptible, mask);
++}
++
++int ttm_fence_object_emit(struct ttm_fence_object *fence, uint32_t fence_flags,
++ uint32_t fence_class, uint32_t type)
++{
++ const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ unsigned long flags;
++ uint32_t sequence;
++ unsigned long timeout;
++ int ret;
++
++ ttm_fence_unring(fence);
++ ret = driver->emit(fence->fdev,
++ fence_class, fence_flags, &sequence, &timeout);
++ if (ret)
++ return ret;
++
++ write_lock_irqsave(&fc->lock, flags);
++ fence->fence_class = fence_class;
++ fence->fence_type = type;
++ fence->waiting_types = 0;
++ fence->info.signaled_types = 0;
++ fence->info.error = 0;
++ fence->sequence = sequence;
++ fence->timeout_jiffies = timeout;
++ if (list_empty(&fc->ring))
++ fc->highest_waiting_sequence = sequence - 1;
++ list_add_tail(&fence->ring, &fc->ring);
++ fc->latest_queued_sequence = sequence;
++ write_unlock_irqrestore(&fc->lock, flags);
++ return 0;
++}
++
++int ttm_fence_object_init(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t type,
++ uint32_t create_flags,
++ void (*destroy) (struct ttm_fence_object *),
++ struct ttm_fence_object *fence)
++{
++ int ret = 0;
++
++ kref_init(&fence->kref);
++ fence->fence_class = fence_class;
++ fence->fence_type = type;
++ fence->info.signaled_types = 0;
++ fence->waiting_types = 0;
++ fence->sequence = 0;
++ fence->info.error = 0;
++ fence->fdev = fdev;
++ fence->destroy = destroy;
++ INIT_LIST_HEAD(&fence->ring);
++ atomic_inc(&fdev->count);
++
++ if (create_flags & TTM_FENCE_FLAG_EMIT) {
++ ret = ttm_fence_object_emit(fence, create_flags,
++ fence->fence_class, type);
++ }
++
++ return ret;
++}
++
++int ttm_fence_object_create(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t type,
++ uint32_t create_flags,
++ struct ttm_fence_object **c_fence)
++{
++ struct ttm_fence_object *fence;
++ int ret;
++
++ ret = ttm_mem_global_alloc(fdev->mem_glob,
++ sizeof(*fence),
++ false,
++ false,
++ false);
++ if (unlikely(ret != 0)) {
++ printk(KERN_ERR "Out of memory creating fence object\n");
++ return ret;
++ }
++
++ fence = kmalloc(sizeof(*fence), GFP_KERNEL);
++ if (!fence) {
++ printk(KERN_ERR "Out of memory creating fence object\n");
++ ttm_mem_global_free(fdev->mem_glob, sizeof(*fence), false);
++ return -ENOMEM;
++ }
++
++ ret = ttm_fence_object_init(fdev, fence_class, type,
++ create_flags, NULL, fence);
++ if (ret) {
++ ttm_fence_object_unref(&fence);
++ return ret;
++ }
++ *c_fence = fence;
++
++ return 0;
++}
++
++static void ttm_fence_object_destroy(struct kref *kref)
++{
++ struct ttm_fence_object *fence =
++ container_of(kref, struct ttm_fence_object, kref);
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ unsigned long irq_flags;
++
++ write_lock_irqsave(&fc->lock, irq_flags);
++ list_del_init(&fence->ring);
++ write_unlock_irqrestore(&fc->lock, irq_flags);
++
++ atomic_dec(&fence->fdev->count);
++ if (fence->destroy)
++ fence->destroy(fence);
++ else {
++ ttm_mem_global_free(fence->fdev->mem_glob,
++ sizeof(*fence),
++ false);
++ kfree(fence);
++ }
++}
++
++void ttm_fence_device_release(struct ttm_fence_device *fdev)
++{
++ kfree(fdev->fence_class);
++}
++
++int
++ttm_fence_device_init(int num_classes,
++ struct ttm_mem_global *mem_glob,
++ struct ttm_fence_device *fdev,
++ const struct ttm_fence_class_init *init,
++ bool replicate_init,
++ const struct ttm_fence_driver *driver)
++{
++ struct ttm_fence_class_manager *fc;
++ const struct ttm_fence_class_init *fci;
++ int i;
++
++ fdev->mem_glob = mem_glob;
++ fdev->fence_class = kzalloc(num_classes *
++ sizeof(*fdev->fence_class), GFP_KERNEL);
++
++ if (unlikely(!fdev->fence_class))
++ return -ENOMEM;
++
++ fdev->num_classes = num_classes;
++ atomic_set(&fdev->count, 0);
++ fdev->driver = driver;
++
++ for (i = 0; i < fdev->num_classes; ++i) {
++ fc = &fdev->fence_class[i];
++ fci = &init[(replicate_init) ? 0 : i];
++
++ fc->wrap_diff = fci->wrap_diff;
++ fc->flush_diff = fci->flush_diff;
++ fc->sequence_mask = fci->sequence_mask;
++
++ rwlock_init(&fc->lock);
++ INIT_LIST_HEAD(&fc->ring);
++ init_waitqueue_head(&fc->fence_queue);
++ }
++
++ return 0;
++}
++
++struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence)
++{
++ struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
++ struct ttm_fence_info tmp;
++ unsigned long irq_flags;
++
++ read_lock_irqsave(&fc->lock, irq_flags);
++ tmp = fence->info;
++ read_unlock_irqrestore(&fc->lock, irq_flags);
++
++ return tmp;
++}
++
++void ttm_fence_object_unref(struct ttm_fence_object **p_fence)
++{
++ struct ttm_fence_object *fence = *p_fence;
++
++ *p_fence = NULL;
++ (void)kref_put(&fence->kref, &ttm_fence_object_destroy);
++}
++
++/*
++ * Placement / BO sync object glue.
++ */
++
++bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg)
++{
++ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
++ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
++
++ return ttm_fence_object_signaled(fence, fence_types);
++}
++
++int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
++ bool lazy, bool interruptible)
++{
++ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
++ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
++
++ return ttm_fence_object_wait(fence, lazy, interruptible, fence_types);
++}
++
++int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg)
++{
++ struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
++ uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
++
++ return ttm_fence_object_flush(fence, fence_types);
++}
++
++void ttm_fence_sync_obj_unref(void **sync_obj)
++{
++ ttm_fence_object_unref((struct ttm_fence_object **)sync_obj);
++}
++
++void *ttm_fence_sync_obj_ref(void *sync_obj)
++{
++ return (void *)
++ ttm_fence_object_ref((struct ttm_fence_object *)sync_obj);
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_api.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_api.h
+new file mode 100644
+index 0000000..d42904c
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_api.h
+@@ -0,0 +1,272 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++#ifndef _TTM_FENCE_API_H_
++#define _TTM_FENCE_API_H_
++
++#include <linux/list.h>
++#include <linux/kref.h>
++
++#define TTM_FENCE_FLAG_EMIT (1 << 0)
++#define TTM_FENCE_TYPE_EXE (1 << 0)
++
++struct ttm_fence_device;
++
++/**
++ * struct ttm_fence_info
++ *
++ * @fence_class: The fence class.
++ * @fence_type: Bitfield indicating types for this fence.
++ * @signaled_types: Bitfield indicating which types are signaled.
++ * @error: Last error reported from the device.
++ *
++ * Used as output from the ttm_fence_get_info
++ */
++
++struct ttm_fence_info {
++ uint32_t signaled_types;
++ uint32_t error;
++};
++
++/**
++ * struct ttm_fence_object
++ *
++ * @fdev: Pointer to the fence device struct.
++ * @kref: Holds the reference count of this fence object.
++ * @ring: List head used for the circular list of not-completely
++ * signaled fences.
++ * @info: Data for fast retrieval using the ttm_fence_get_info()
++ * function.
++ * @timeout_jiffies: Absolute jiffies value indicating when this fence
++ * object times out and, if waited on, calls ttm_fence_lockup
++ * to check for and resolve a GPU lockup.
++ * @sequence: Fence sequence number.
++ * @waiting_types: Types currently waited on.
++ * @destroy: Called to free the fence object, when its refcount has
++ * reached zero. If NULL, kfree is used.
++ *
++ * This struct is provided in the driver interface so that drivers can
++ * derive from it and create their own fence implementation. All members
++ * are private to the fence implementation and the fence driver callbacks.
++ * Otherwise a driver may access the derived object using container_of().
++ */
++
++struct ttm_fence_object {
++ struct ttm_fence_device *fdev;
++ struct kref kref;
++ uint32_t fence_class;
++ uint32_t fence_type;
++
++ /*
++ * The below fields are protected by the fence class
++ * manager spinlock.
++ */
++
++ struct list_head ring;
++ struct ttm_fence_info info;
++ unsigned long timeout_jiffies;
++ uint32_t sequence;
++ uint32_t waiting_types;
++ void (*destroy) (struct ttm_fence_object *);
++};
++
++/**
++ * ttm_fence_object_init
++ *
++ * @fdev: Pointer to a struct ttm_fence_device.
++ * @fence_class: Fence class for this fence.
++ * @type: Fence type for this fence.
++ * @create_flags: Flags indicating varios actions at init time. At this point
++ * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to
++ * the command stream.
++ * @destroy: Destroy function. If NULL, kfree() is used.
++ * @fence: The struct ttm_fence_object to initialize.
++ *
++ * Initialize a pre-allocated fence object. This function, together with the
++ * destroy function makes it possible to derive driver-specific fence objects.
++ */
++
++extern int
++ttm_fence_object_init(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t type,
++ uint32_t create_flags,
++ void (*destroy) (struct ttm_fence_object *fence),
++ struct ttm_fence_object *fence);
++
++/**
++ * ttm_fence_object_create
++ *
++ * @fdev: Pointer to a struct ttm_fence_device.
++ * @fence_class: Fence class for this fence.
++ * @type: Fence type for this fence.
++ * @create_flags: Flags indicating varios actions at init time. At this point
++ * there's only TTM_FENCE_FLAG_EMIT, which triggers a sequence emission to
++ * the command stream.
++ * @c_fence: On successful termination, *(@c_fence) will point to the created
++ * fence object.
++ *
++ * Create and initialize a struct ttm_fence_object. The destroy function will
++ * be set to kfree().
++ */
++
++extern int
++ttm_fence_object_create(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t type,
++ uint32_t create_flags,
++ struct ttm_fence_object **c_fence);
++
++/**
++ * ttm_fence_object_wait
++ *
++ * @fence: The fence object to wait on.
++ * @lazy: Allow sleeps to reduce the cpu-usage if polling.
++ * @interruptible: Sleep interruptible when waiting.
++ * @type_mask: Wait for the given type_mask to signal.
++ *
++ * Wait for a fence to signal the given type_mask. The function will
++ * perform a fence_flush using type_mask. (See ttm_fence_object_flush).
++ *
++ * Returns
++ * -ERESTART if interrupted by a signal.
++ * May return driver-specific error codes if timed-out.
++ */
++
++extern int
++ttm_fence_object_wait(struct ttm_fence_object *fence,
++ bool lazy, bool interruptible, uint32_t type_mask);
++
++/**
++ * ttm_fence_object_flush
++ *
++ * @fence: The fence object to flush.
++ * @flush_mask: Fence types to flush.
++ *
++ * Make sure that the given fence eventually signals the
++ * types indicated by @flush_mask. Note that this may or may not
++ * map to a CPU or GPU flush.
++ */
++
++extern int
++ttm_fence_object_flush(struct ttm_fence_object *fence, uint32_t flush_mask);
++
++/**
++ * ttm_fence_get_info
++ *
++ * @fence: The fence object.
++ *
++ * Copy the info block from the fence while holding relevant locks.
++ */
++
++struct ttm_fence_info ttm_fence_get_info(struct ttm_fence_object *fence);
++
++/**
++ * ttm_fence_object_ref
++ *
++ * @fence: The fence object.
++ *
++ * Return a ref-counted pointer to the fence object indicated by @fence.
++ */
++
++static inline struct ttm_fence_object *ttm_fence_object_ref(struct
++ ttm_fence_object
++ *fence)
++{
++ kref_get(&fence->kref);
++ return fence;
++}
++
++/**
++ * ttm_fence_object_unref
++ *
++ * @p_fence: Pointer to a ref-counted pinter to a struct ttm_fence_object.
++ *
++ * Unreference the fence object pointed to by *(@p_fence), clearing
++ * *(p_fence).
++ */
++
++extern void ttm_fence_object_unref(struct ttm_fence_object **p_fence);
++
++/**
++ * ttm_fence_object_signaled
++ *
++ * @fence: Pointer to the struct ttm_fence_object.
++ * @mask: Type mask to check whether signaled.
++ *
++ * This function checks (without waiting) whether the fence object
++ * pointed to by @fence has signaled the types indicated by @mask,
++ * and returns 1 if true, 0 if false. This function does NOT perform
++ * an implicit fence flush.
++ */
++
++extern bool
++ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask);
++
++/**
++ * ttm_fence_class
++ *
++ * @fence: Pointer to the struct ttm_fence_object.
++ *
++ * Convenience function that returns the fence class of a
++ * struct ttm_fence_object.
++ */
++
++static inline uint32_t ttm_fence_class(const struct ttm_fence_object *fence)
++{
++ return fence->fence_class;
++}
++
++/**
++ * ttm_fence_types
++ *
++ * @fence: Pointer to the struct ttm_fence_object.
++ *
++ * Convenience function that returns the fence types of a
++ * struct ttm_fence_object.
++ */
++
++static inline uint32_t ttm_fence_types(const struct ttm_fence_object *fence)
++{
++ return fence->fence_type;
++}
++
++/*
++ * The functions below are wrappers to the above functions, with
++ * similar names but with sync_obj omitted. These wrappers are intended
++ * to be plugged directly into the buffer object driver's sync object
++ * API, if the driver chooses to use ttm_fence_objects as buffer object
++ * sync objects. In the prototypes below, a sync_obj is cast to a
++ * struct ttm_fence_object, whereas a sync_arg is cast to an
++ * uint32_t representing a fence_type argument.
++ */
++
++extern bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg);
++extern int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
++ bool lazy, bool interruptible);
++extern int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg);
++extern void ttm_fence_sync_obj_unref(void **sync_obj);
++extern void *ttm_fence_sync_obj_ref(void *sync_obj);
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_driver.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_driver.h
+new file mode 100644
+index 0000000..1dbd817
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_driver.h
+@@ -0,0 +1,302 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++#ifndef _TTM_FENCE_DRIVER_H_
++#define _TTM_FENCE_DRIVER_H_
++
++#include <linux/kref.h>
++#include <linux/spinlock.h>
++#include <linux/wait.h>
++#include "ttm_fence_api.h"
++#include "ttm_memory.h"
++
++/** @file ttm_fence_driver.h
++ *
++ * Definitions needed for a driver implementing the
++ * ttm_fence subsystem.
++ */
++
++/**
++ * struct ttm_fence_class_manager:
++ *
++ * @wrap_diff: Sequence difference to catch 32-bit wrapping.
++ * if (seqa - seqb) > @wrap_diff, then seqa < seqb.
++ * @flush_diff: Sequence difference to trigger fence flush.
++ * if (cur_seq - seqa) > @flush_diff, then consider fence object with
++ * seqa as old an needing a flush.
++ * @sequence_mask: Mask of valid bits in a fence sequence.
++ * @lock: Lock protecting this struct as well as fence objects
++ * associated with this struct.
++ * @ring: Circular sequence-ordered list of fence objects.
++ * @pending_flush: Fence types currently needing a flush.
++ * @waiting_types: Fence types that are currently waited for.
++ * @fence_queue: Queue of waiters on fences belonging to this fence class.
++ * @highest_waiting_sequence: Sequence number of the fence with highest
++ * sequence number and that is waited for.
++ * @latest_queued_sequence: Sequence number of the fence latest queued
++ * on the ring.
++ */
++
++struct ttm_fence_class_manager {
++
++ /*
++ * Unprotected constant members.
++ */
++
++ uint32_t wrap_diff;
++ uint32_t flush_diff;
++ uint32_t sequence_mask;
++
++ /*
++ * The rwlock protects this structure as well as
++ * the data in all fence objects belonging to this
++ * class. This should be OK as most fence objects are
++ * only read from once they're created.
++ */
++
++ rwlock_t lock;
++ struct list_head ring;
++ uint32_t pending_flush;
++ uint32_t waiting_types;
++ wait_queue_head_t fence_queue;
++ uint32_t highest_waiting_sequence;
++ uint32_t latest_queued_sequence;
++};
++
++/**
++ * struct ttm_fence_device
++ *
++ * @fence_class: Array of fence class managers.
++ * @num_classes: Array dimension of @fence_class.
++ * @count: Current number of fence objects for statistics.
++ * @driver: Driver struct.
++ *
++ * Provided in the driver interface so that the driver can derive
++ * from this struct for its driver_private, and accordingly
++ * access the driver_private from the fence driver callbacks.
++ *
++ * All members except "count" are initialized at creation and
++ * never touched after that. No protection needed.
++ *
++ * This struct is private to the fence implementation and to the fence
++ * driver callbacks, and may otherwise be used by drivers only to
++ * obtain the derived device_private object using container_of().
++ */
++
++struct ttm_fence_device {
++ struct ttm_mem_global *mem_glob;
++ struct ttm_fence_class_manager *fence_class;
++ uint32_t num_classes;
++ atomic_t count;
++ const struct ttm_fence_driver *driver;
++};
++
++/**
++ * struct ttm_fence_class_init
++ *
++ * @wrap_diff: Fence sequence number wrap indicator. If
++ * (sequence1 - sequence2) > @wrap_diff, then sequence1 is
++ * considered to be older than sequence2.
++ * @flush_diff: Fence sequence number flush indicator.
++ * If a non-completely-signaled fence has a fence sequence number
++ * sequence1 and (sequence1 - current_emit_sequence) > @flush_diff,
++ * the fence is considered too old and it will be flushed upon the
++ * next call of ttm_fence_flush_old(), to make sure no fences with
++ * stale sequence numbers remains unsignaled. @flush_diff should
++ * be sufficiently less than @wrap_diff.
++ * @sequence_mask: Mask with valid bits of the fence sequence
++ * number set to 1.
++ *
++ * This struct is used as input to ttm_fence_device_init.
++ */
++
++struct ttm_fence_class_init {
++ uint32_t wrap_diff;
++ uint32_t flush_diff;
++ uint32_t sequence_mask;
++};
++
++/**
++ * struct ttm_fence_driver
++ *
++ * @has_irq: Called by a potential waiter. Should return 1 if a
++ * fence object with indicated parameters is expected to signal
++ * automatically, and 0 if the fence implementation needs to
++ * repeatedly call @poll to make it signal.
++ * @emit: Make sure a fence with the given parameters is
++ * present in the indicated command stream. Return its sequence number
++ * in "breadcrumb".
++ * @poll: Check and report sequences of the given "fence_class"
++ * that have signaled "types"
++ * @flush: Make sure that the types indicated by the bitfield
++ * ttm_fence_class_manager::pending_flush will eventually
++ * signal. These bits have been put together using the
++ * result from the needed_flush function described below.
++ * @needed_flush: Given the fence_class and fence_types indicated by
++ * "fence", and the last received fence sequence of this
++ * fence class, indicate what types need a fence flush to
++ * signal. Return as a bitfield.
++ * @wait: Set to non-NULL if the driver wants to override the fence
++ * wait implementation. Return 0 on success, -EBUSY on failure,
++ * and -ERESTART if interruptible and a signal is pending.
++ * @signaled: Driver callback that is called whenever a
++ * ttm_fence_object::signaled_types has changed status.
++ * This function is called from atomic context,
++ * with the ttm_fence_class_manager::lock held in write mode.
++ * @lockup: Driver callback that is called whenever a wait has exceeded
++ * the lifetime of a fence object.
++ * If there is a GPU lockup,
++ * this function should, if possible, reset the GPU,
++ * call the ttm_fence_handler with an error status, and
++ * return. If no lockup was detected, simply extend the
++ * fence timeout_jiffies and return. The driver might
++ * want to protect the lockup check with a mutex and cache a
++ * non-locked-up status for a while to avoid an excessive
++ * amount of lockup checks from every waiting thread.
++ */
++
++struct ttm_fence_driver {
++ bool (*has_irq) (struct ttm_fence_device *fdev,
++ uint32_t fence_class, uint32_t flags);
++ int (*emit) (struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t flags,
++ uint32_t *breadcrumb, unsigned long *timeout_jiffies);
++ void (*flush) (struct ttm_fence_device *fdev, uint32_t fence_class);
++ void (*poll) (struct ttm_fence_device *fdev,
++ uint32_t fence_class, uint32_t types);
++ uint32_t(*needed_flush)
++ (struct ttm_fence_object *fence);
++ int (*wait) (struct ttm_fence_object *fence, bool lazy,
++ bool interruptible, uint32_t mask);
++ void (*signaled) (struct ttm_fence_object *fence);
++ void (*lockup) (struct ttm_fence_object *fence, uint32_t fence_types);
++};
++
++/**
++ * function ttm_fence_device_init
++ *
++ * @num_classes: Number of fence classes for this fence implementation.
++ * @mem_global: Pointer to the global memory accounting info.
++ * @fdev: Pointer to an uninitialised struct ttm_fence_device.
++ * @init: Array of initialization info for each fence class.
++ * @replicate_init: Use the first @init initialization info for all classes.
++ * @driver: Driver callbacks.
++ *
++ * Initialize a struct ttm_fence_driver structure. Returns -ENOMEM if
++ * out-of-memory. Otherwise returns 0.
++ */
++extern int
++ttm_fence_device_init(int num_classes,
++ struct ttm_mem_global *mem_glob,
++ struct ttm_fence_device *fdev,
++ const struct ttm_fence_class_init *init,
++ bool replicate_init,
++ const struct ttm_fence_driver *driver);
++
++/**
++ * function ttm_fence_device_release
++ *
++ * @fdev: Pointer to the fence device.
++ *
++ * Release all resources held by a fence device. Note that before
++ * this function is called, the caller must have made sure all fence
++ * objects belonging to this fence device are completely signaled.
++ */
++
++extern void ttm_fence_device_release(struct ttm_fence_device *fdev);
++
++/**
++ * ttm_fence_handler - the fence handler.
++ *
++ * @fdev: Pointer to the fence device.
++ * @fence_class: Fence class that signals.
++ * @sequence: Signaled sequence.
++ * @type: Types that signal.
++ * @error: Error from the engine.
++ *
++ * This function signals all fences with a sequence previous to the
++ * @sequence argument, and belonging to @fence_class. The signaled fence
++ * types are provided in @type. If error is non-zero, the error member
++ * of the fence with sequence = @sequence is set to @error. This value
++ * may be reported back to user-space, indicating, for example an illegal
++ * 3D command or illegal mpeg data.
++ *
++ * This function is typically called from the driver::poll method when the
++ * command sequence preceding the fence marker has executed. It should be
++ * called with the ttm_fence_class_manager::lock held in write mode and
++ * may be called from interrupt context.
++ */
++
++extern void
++ttm_fence_handler(struct ttm_fence_device *fdev,
++ uint32_t fence_class,
++ uint32_t sequence, uint32_t type, uint32_t error);
++
++/**
++ * ttm_fence_driver_from_dev
++ *
++ * @fdev: The ttm fence device.
++ *
++ * Returns a pointer to the fence driver struct.
++ */
++
++static inline const struct ttm_fence_driver *ttm_fence_driver_from_dev(
++ struct ttm_fence_device *fdev)
++{
++ return fdev->driver;
++}
++
++/**
++ * ttm_fence_driver
++ *
++ * @fence: Pointer to a ttm fence object.
++ *
++ * Returns a pointer to the fence driver struct.
++ */
++
++static inline const struct ttm_fence_driver *ttm_fence_driver(struct
++ ttm_fence_object
++ *fence)
++{
++ return ttm_fence_driver_from_dev(fence->fdev);
++}
++
++/**
++ * ttm_fence_fc
++ *
++ * @fence: Pointer to a ttm fence object.
++ *
++ * Returns a pointer to the struct ttm_fence_class_manager for the
++ * fence class of @fence.
++ */
++
++static inline struct ttm_fence_class_manager *ttm_fence_fc(struct
++ ttm_fence_object
++ *fence)
++{
++ return &fence->fdev->fence_class[fence->fence_class];
++}
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_user.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_user.c
+new file mode 100644
+index 0000000..878c9bd
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_user.c
+@@ -0,0 +1,238 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include <drm/drmP.h>
++#include "ttm_fence_user.h"
++#include "ttm_object.h"
++#include "ttm_fence_driver.h"
++#include "ttm_userobj_api.h"
++
++/**
++ * struct ttm_fence_user_object
++ *
++ * @base: The base object used for user-space visibility and refcounting.
++ *
++ * @fence: The fence object itself.
++ *
++ */
++
++struct ttm_fence_user_object {
++ struct ttm_base_object base;
++ struct ttm_fence_object fence;
++};
++
++static struct ttm_fence_user_object *ttm_fence_user_object_lookup(
++ struct ttm_object_file *tfile,
++ uint32_t handle)
++{
++ struct ttm_base_object *base;
++
++ base = ttm_base_object_lookup(tfile, handle);
++ if (unlikely(base == NULL)) {
++ printk(KERN_ERR "Invalid fence handle 0x%08lx\n",
++ (unsigned long)handle);
++ return NULL;
++ }
++
++ if (unlikely(base->object_type != ttm_fence_type)) {
++ ttm_base_object_unref(&base);
++ printk(KERN_ERR "Invalid fence handle 0x%08lx\n",
++ (unsigned long)handle);
++ return NULL;
++ }
++
++ return container_of(base, struct ttm_fence_user_object, base);
++}
++
++/*
++ * The fence object destructor.
++ */
++
++static void ttm_fence_user_destroy(struct ttm_fence_object *fence)
++{
++ struct ttm_fence_user_object *ufence =
++ container_of(fence, struct ttm_fence_user_object, fence);
++
++ ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*ufence), false);
++ kfree(ufence);
++}
++
++/*
++ * The base object destructor. We basically unly unreference the
++ * attached fence object.
++ */
++
++static void ttm_fence_user_release(struct ttm_base_object **p_base)
++{
++ struct ttm_fence_user_object *ufence;
++ struct ttm_base_object *base = *p_base;
++ struct ttm_fence_object *fence;
++
++ *p_base = NULL;
++
++ if (unlikely(base == NULL))
++ return;
++
++ ufence = container_of(base, struct ttm_fence_user_object, base);
++ fence = &ufence->fence;
++ ttm_fence_object_unref(&fence);
++}
++
++int
++ttm_fence_user_create(struct ttm_fence_device *fdev,
++ struct ttm_object_file *tfile,
++ uint32_t fence_class,
++ uint32_t fence_types,
++ uint32_t create_flags,
++ struct ttm_fence_object **fence,
++ uint32_t *user_handle)
++{
++ int ret;
++ struct ttm_fence_object *tmp;
++ struct ttm_fence_user_object *ufence;
++
++ ret = ttm_mem_global_alloc(fdev->mem_glob,
++ sizeof(*ufence),
++ false,
++ false,
++ false);
++ if (unlikely(ret != 0))
++ return -ENOMEM;
++
++ ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
++ if (unlikely(ufence == NULL)) {
++ ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false);
++ return -ENOMEM;
++ }
++
++ ret = ttm_fence_object_init(fdev,
++ fence_class,
++ fence_types, create_flags,
++ &ttm_fence_user_destroy, &ufence->fence);
++
++ if (unlikely(ret != 0))
++ goto out_err0;
++
++ /*
++ * One fence ref is held by the fence ptr we return.
++ * The other one by the base object. Need to up the
++ * fence refcount before we publish this object to
++ * user-space.
++ */
++
++ tmp = ttm_fence_object_ref(&ufence->fence);
++ ret = ttm_base_object_init(tfile, &ufence->base,
++ false, ttm_fence_type,
++ &ttm_fence_user_release, NULL);
++
++ if (unlikely(ret != 0))
++ goto out_err1;
++
++ *fence = &ufence->fence;
++ *user_handle = ufence->base.hash.key;
++
++ return 0;
++out_err1:
++ ttm_fence_object_unref(&tmp);
++ tmp = &ufence->fence;
++ ttm_fence_object_unref(&tmp);
++ return ret;
++out_err0:
++ ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false);
++ kfree(ufence);
++ return ret;
++}
++
++int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ int ret;
++ union ttm_fence_signaled_arg *arg = data;
++ struct ttm_fence_object *fence;
++ struct ttm_fence_info info;
++ struct ttm_fence_user_object *ufence;
++ struct ttm_base_object *base;
++ ret = 0;
++
++ ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle);
++ if (unlikely(ufence == NULL))
++ return -EINVAL;
++
++ fence = &ufence->fence;
++
++ if (arg->req.flush) {
++ ret = ttm_fence_object_flush(fence, arg->req.fence_type);
++ if (unlikely(ret != 0))
++ goto out;
++ }
++
++ info = ttm_fence_get_info(fence);
++ arg->rep.signaled_types = info.signaled_types;
++ arg->rep.fence_error = info.error;
++
++out:
++ base = &ufence->base;
++ ttm_base_object_unref(&base);
++ return ret;
++}
++
++int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ int ret;
++ union ttm_fence_finish_arg *arg = data;
++ struct ttm_fence_user_object *ufence;
++ struct ttm_base_object *base;
++ struct ttm_fence_object *fence;
++ ret = 0;
++
++ ufence = ttm_fence_user_object_lookup(tfile, arg->req.handle);
++ if (unlikely(ufence == NULL))
++ return -EINVAL;
++
++ fence = &ufence->fence;
++
++ ret = ttm_fence_object_wait(fence,
++ arg->req.mode & TTM_FENCE_FINISH_MODE_LAZY,
++ true, arg->req.fence_type);
++ if (likely(ret == 0)) {
++ struct ttm_fence_info info = ttm_fence_get_info(fence);
++
++ arg->rep.signaled_types = info.signaled_types;
++ arg->rep.fence_error = info.error;
++ }
++
++ base = &ufence->base;
++ ttm_base_object_unref(&base);
++
++ return ret;
++}
++
++int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ struct ttm_fence_unref_arg *arg = data;
++ int ret = 0;
++
++ ret = ttm_ref_object_base_unref(tfile, arg->handle, ttm_fence_type);
++ return ret;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_user.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_user.h
+new file mode 100644
+index 0000000..ee95e6a
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_fence_user.h
+@@ -0,0 +1,140 @@
++/**************************************************************************
++ *
++ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors
++ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef TTM_FENCE_USER_H
++#define TTM_FENCE_USER_H
++
++#if !defined(__KERNEL__) && !defined(_KERNEL)
++#include <stdint.h>
++#endif
++
++#define TTM_FENCE_MAJOR 0
++#define TTM_FENCE_MINOR 1
++#define TTM_FENCE_PL 0
++#define TTM_FENCE_DATE "080819"
++
++/**
++ * struct ttm_fence_signaled_req
++ *
++ * @handle: Handle to the fence object. Input.
++ *
++ * @fence_type: Fence types we want to flush. Input.
++ *
++ * @flush: Boolean. Flush the indicated fence_types. Input.
++ *
++ * Argument to the TTM_FENCE_SIGNALED ioctl.
++ */
++
++struct ttm_fence_signaled_req {
++ uint32_t handle;
++ uint32_t fence_type;
++ int32_t flush;
++ uint32_t pad64;
++};
++
++/**
++ * struct ttm_fence_rep
++ *
++ * @signaled_types: Fence type that has signaled.
++ *
++ * @fence_error: Command execution error.
++ * Hardware errors that are consequences of the execution
++ * of the command stream preceding the fence are reported
++ * here.
++ *
++ * Output argument to the TTM_FENCE_SIGNALED and
++ * TTM_FENCE_FINISH ioctls.
++ */
++
++struct ttm_fence_rep {
++ uint32_t signaled_types;
++ uint32_t fence_error;
++};
++
++union ttm_fence_signaled_arg {
++ struct ttm_fence_signaled_req req;
++ struct ttm_fence_rep rep;
++};
++
++/*
++ * Waiting mode flags for the TTM_FENCE_FINISH ioctl.
++ *
++ * TTM_FENCE_FINISH_MODE_LAZY: Allow for sleeps during polling
++ * wait.
++ *
++ * TTM_FENCE_FINISH_MODE_NO_BLOCK: Don't block waiting for GPU,
++ * but return -EBUSY if the buffer is busy.
++ */
++
++#define TTM_FENCE_FINISH_MODE_LAZY (1 << 0)
++#define TTM_FENCE_FINISH_MODE_NO_BLOCK (1 << 1)
++
++/**
++ * struct ttm_fence_finish_req
++ *
++ * @handle: Handle to the fence object. Input.
++ *
++ * @fence_type: Fence types we want to finish.
++ *
++ * @mode: Wait mode.
++ *
++ * Input to the TTM_FENCE_FINISH ioctl.
++ */
++
++struct ttm_fence_finish_req {
++ uint32_t handle;
++ uint32_t fence_type;
++ uint32_t mode;
++ uint32_t pad64;
++};
++
++union ttm_fence_finish_arg {
++ struct ttm_fence_finish_req req;
++ struct ttm_fence_rep rep;
++};
++
++/**
++ * struct ttm_fence_unref_arg
++ *
++ * @handle: Handle to the fence object.
++ *
++ * Argument to the TTM_FENCE_UNREF ioctl.
++ */
++
++struct ttm_fence_unref_arg {
++ uint32_t handle;
++ uint32_t pad64;
++};
++
++/*
++ * Ioctl offsets frome extenstion start.
++ */
++
++#define TTM_FENCE_SIGNALED 0x01
++#define TTM_FENCE_FINISH 0x02
++#define TTM_FENCE_UNREF 0x03
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_lock.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_lock.c
+new file mode 100644
+index 0000000..be7464c
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_lock.c
+@@ -0,0 +1,155 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm_lock.h"
++#include <asm/atomic.h>
++#include <linux/errno.h>
++#include <linux/wait.h>
++#include <linux/sched.h>
++
++void ttm_lock_init(struct ttm_lock *lock)
++{
++ init_waitqueue_head(&lock->queue);
++ atomic_set(&lock->write_lock_pending, 0);
++ atomic_set(&lock->readers, 0);
++ lock->kill_takers = false;
++ lock->signal = SIGKILL;
++}
++
++void ttm_read_unlock(struct ttm_lock *lock)
++{
++ if (atomic_dec_and_test(&lock->readers))
++ wake_up_all(&lock->queue);
++}
++
++int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
++{
++ while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
++ int ret;
++
++ if (!interruptible) {
++ wait_event(lock->queue,
++ atomic_read(&lock->write_lock_pending) == 0);
++ continue;
++ }
++ ret = wait_event_interruptible
++ (lock->queue, atomic_read(&lock->write_lock_pending) == 0);
++ if (ret)
++ return -ERESTART;
++ }
++
++ while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
++ int ret;
++ if (!interruptible) {
++ wait_event(lock->queue,
++ atomic_read(&lock->readers) != -1);
++ continue;
++ }
++ ret = wait_event_interruptible
++ (lock->queue, atomic_read(&lock->readers) != -1);
++ if (ret)
++ return -ERESTART;
++ }
++
++ if (unlikely(lock->kill_takers)) {
++ send_sig(lock->signal, current, 0);
++ ttm_read_unlock(lock);
++ return -ERESTART;
++ }
++
++ return 0;
++}
++
++static int __ttm_write_unlock(struct ttm_lock *lock)
++{
++ if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
++ return -EINVAL;
++ wake_up_all(&lock->queue);
++ return 0;
++}
++
++static void ttm_write_lock_remove(struct ttm_base_object **p_base)
++{
++ struct ttm_base_object *base = *p_base;
++ struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
++ int ret;
++
++ *p_base = NULL;
++ ret = __ttm_write_unlock(lock);
++ BUG_ON(ret != 0);
++}
++
++int ttm_write_lock(struct ttm_lock *lock,
++ bool interruptible,
++ struct ttm_object_file *tfile)
++{
++ int ret = 0;
++
++ atomic_inc(&lock->write_lock_pending);
++
++ while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
++ if (!interruptible) {
++ wait_event(lock->queue,
++ atomic_read(&lock->readers) == 0);
++ continue;
++ }
++ ret = wait_event_interruptible
++ (lock->queue, atomic_read(&lock->readers) == 0);
++
++ if (ret) {
++ if (atomic_dec_and_test(&lock->write_lock_pending))
++ wake_up_all(&lock->queue);
++ return -ERESTART;
++ }
++ }
++
++ if (atomic_dec_and_test(&lock->write_lock_pending))
++ wake_up_all(&lock->queue);
++
++ if (unlikely(lock->kill_takers)) {
++ send_sig(lock->signal, current, 0);
++ __ttm_write_unlock(lock);
++ return -ERESTART;
++ }
++
++ /*
++ * Add a base-object, the destructor of which will
++ * make sure the lock is released if the client dies
++ * while holding it.
++ */
++
++ ret = ttm_base_object_init(tfile, &lock->base, false,
++ ttm_lock_type, &ttm_write_lock_remove, NULL);
++ if (ret)
++ (void)__ttm_write_unlock(lock);
++
++ return ret;
++}
++
++int ttm_write_unlock(struct ttm_lock *lock, struct ttm_object_file *tfile)
++{
++ return ttm_ref_object_base_unref(tfile,
++ lock->base.hash.key, TTM_REF_USAGE);
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_lock.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_lock.h
+new file mode 100644
+index 0000000..500b2c1
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_lock.h
+@@ -0,0 +1,176 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++/** @file ttm_lock.h
++ * This file implements a simple replacement for the buffer manager use
++ * of the DRM heavyweight hardware lock.
++ * The lock is a read-write lock. Taking it in read mode is fast, and
++ * intended for in-kernel use only.
++ * Taking it in write mode is slow.
++ *
++ * The write mode is used only when there is a need to block all
++ * user-space processes from validating buffers.
++ * It's allowed to leave kernel space with the write lock held.
++ * If a user-space process dies while having the write-lock,
++ * it will be released during the file descriptor release.
++ *
++ * The read lock is typically placed at the start of an IOCTL- or
++ * user-space callable function that may end up allocating a memory area.
++ * This includes setstatus, super-ioctls and faults; the latter may move
++ * unmappable regions to mappable. It's a bug to leave kernel space with the
++ * read lock held.
++ *
++ * Both read- and write lock taking is interruptible for low signal-delivery
++ * latency. The locking functions will return -ERESTART if interrupted by a
++ * signal.
++ *
++ * Locking order: The lock should be taken BEFORE any TTM mutexes
++ * or spinlocks.
++ *
++ * Typical usages:
++ * a) VT-switching, when we want to clean VRAM and perhaps AGP. The lock
++ * stops it from being repopulated.
++ * b) out-of-VRAM or out-of-aperture space, in which case the process
++ * receiving the out-of-space notification may take the lock in write mode
++ * and evict all buffers prior to start validating its own buffers.
++ */
++
++#ifndef _TTM_LOCK_H_
++#define _TTM_LOCK_H_
++
++#include "ttm_object.h"
++#include <linux/wait.h>
++#include <asm/atomic.h>
++
++/**
++ * struct ttm_lock
++ *
++ * @base: ttm base object used solely to release the lock if the client
++ * holding the lock dies.
++ * @queue: Queue for processes waiting for lock change-of-status.
++ * @write_lock_pending: Flag indicating that a write-lock is pending. Avoids
++ * write lock starvation.
++ * @readers: The lock status: A negative number indicates that a write lock is
++ * held. Positive values indicate number of concurrent readers.
++ */
++
++struct ttm_lock {
++ struct ttm_base_object base;
++ wait_queue_head_t queue;
++ atomic_t write_lock_pending;
++ atomic_t readers;
++ bool kill_takers;
++ int signal;
++};
++
++/**
++ * ttm_lock_init
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * Initializes the lock.
++ */
++extern void ttm_lock_init(struct ttm_lock *lock);
++
++/**
++ * ttm_read_unlock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ *
++ * Releases a read lock.
++ */
++
++extern void ttm_read_unlock(struct ttm_lock *lock);
++
++/**
++ * ttm_read_unlock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @interruptible: Interruptible sleeping while waiting for a lock.
++ *
++ * Takes the lock in read mode.
++ * Returns:
++ * -ERESTART If interrupted by a signal and interruptible is true.
++ */
++
++extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
++
++/**
++ * ttm_write_lock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @interruptible: Interruptible sleeping while waiting for a lock.
++ * @tfile: Pointer to a struct ttm_object_file used to identify the user-space
++ * application taking the lock.
++ *
++ * Takes the lock in write mode.
++ * Returns:
++ * -ERESTART If interrupted by a signal and interruptible is true.
++ * -ENOMEM: Out of memory when locking.
++ */
++extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible,
++ struct ttm_object_file *tfile);
++
++/**
++ * ttm_write_unlock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @tfile: Pointer to a struct ttm_object_file used to identify the user-space
++ * application taking the lock.
++ *
++ * Releases a write lock.
++ * Returns:
++ * -EINVAL If the lock was not held.
++ */
++extern int ttm_write_unlock(struct ttm_lock *lock,
++ struct ttm_object_file *tfile);
++
++/**
++ * ttm_lock_set_kill
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @val: Boolean whether to kill processes taking the lock.
++ * @signal: Signal to send to the process taking the lock.
++ *
++ * The kill-when-taking-lock functionality is used to kill processes that keep
++ * on using the TTM functionality when its resources has been taken down, for
++ * example when the X server exits. A typical sequence would look like this:
++ * - X server takes lock in write mode.
++ * - ttm_lock_set_kill() is called with @val set to true.
++ * - As part of X server exit, TTM resources are taken down.
++ * - X server releases the lock on file release.
++ * - Another dri client wants to render, takes the lock and is killed.
++ *
++ */
++
++static inline void ttm_lock_set_kill(struct ttm_lock *lock,
++ bool val,
++ int signal)
++{
++ lock->kill_takers = val;
++ if (val)
++ lock->signal = signal;
++}
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_memory.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_memory.c
+new file mode 100644
+index 0000000..363c1c3
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_memory.c
+@@ -0,0 +1,228 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include "ttm_memory.h"
++#include <linux/spinlock.h>
++#include <linux/sched.h>
++#include <linux/wait.h>
++#include <linux/mm.h>
++
++#define TTM_MEMORY_ALLOC_RETRIES 4
++
++/**
++ * At this point we only support a single shrink callback.
++ * Extend this if needed, perhaps using a linked list of callbacks.
++ * Note that this function is reentrant:
++ * many threads may try to swap out at any given time.
++ */
++
++static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue,
++ uint64_t extra)
++{
++ int ret;
++ struct ttm_mem_shrink *shrink;
++ uint64_t target;
++ uint64_t total_target;
++
++ spin_lock(&glob->lock);
++ if (glob->shrink == NULL)
++ goto out;
++
++ if (from_workqueue) {
++ target = glob->swap_limit;
++ total_target = glob->total_memory_swap_limit;
++ } else if (capable(CAP_SYS_ADMIN)) {
++ total_target = glob->emer_total_memory;
++ target = glob->emer_memory;
++ } else {
++ total_target = glob->max_total_memory;
++ target = glob->max_memory;
++ }
++
++ total_target = (extra >= total_target) ? 0 : total_target - extra;
++ target = (extra >= target) ? 0 : target - extra;
++
++ while (glob->used_memory > target ||
++ glob->used_total_memory > total_target) {
++ shrink = glob->shrink;
++ spin_unlock(&glob->lock);
++ ret = shrink->do_shrink(shrink);
++ spin_lock(&glob->lock);
++ if (unlikely(ret != 0))
++ goto out;
++ }
++out:
++ spin_unlock(&glob->lock);
++}
++
++static void ttm_shrink_work(struct work_struct *work)
++{
++ struct ttm_mem_global *glob =
++ container_of(work, struct ttm_mem_global, work);
++
++ ttm_shrink(glob, true, 0ULL);
++}
++
++int ttm_mem_global_init(struct ttm_mem_global *glob)
++{
++ struct sysinfo si;
++ uint64_t mem;
++
++ spin_lock_init(&glob->lock);
++ glob->swap_queue = create_singlethread_workqueue("ttm_swap");
++ INIT_WORK(&glob->work, ttm_shrink_work);
++ init_waitqueue_head(&glob->queue);
++
++ si_meminfo(&si);
++
++ mem = si.totalram - si.totalhigh;
++ mem *= si.mem_unit;
++
++ glob->max_memory = mem >> 1;
++ glob->emer_memory = glob->max_memory + (mem >> 2);
++ glob->swap_limit = glob->max_memory - (mem >> 5);
++ glob->used_memory = 0;
++ glob->used_total_memory = 0;
++ glob->shrink = NULL;
++
++ mem = si.totalram;
++ mem *= si.mem_unit;
++
++ glob->max_total_memory = mem >> 1;
++ glob->emer_total_memory = glob->max_total_memory + (mem >> 2);
++ glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 5);
++
++ printk(KERN_INFO "TTM available graphics memory: %llu MiB\n",
++ glob->max_total_memory >> 20);
++ printk(KERN_INFO "TTM available object memory: %llu MiB\n",
++ glob->max_memory >> 20);
++ printk(KERN_INFO "TTM available swap breakpoint: %llu MiB\n",
++ glob->swap_limit >> 20);
++
++ return 0;
++}
++
++void ttm_mem_global_release(struct ttm_mem_global *glob)
++{
++ printk(KERN_INFO "Used total memory is %llu bytes.\n",
++ (unsigned long long)glob->used_total_memory);
++ flush_workqueue(glob->swap_queue);
++ destroy_workqueue(glob->swap_queue);
++ glob->swap_queue = NULL;
++}
++
++static inline void ttm_check_swapping(struct ttm_mem_global *glob)
++{
++ bool needs_swapping;
++
++ spin_lock(&glob->lock);
++ needs_swapping = (glob->used_memory > glob->swap_limit ||
++ glob->used_total_memory >
++ glob->total_memory_swap_limit);
++ spin_unlock(&glob->lock);
++
++ if (unlikely(needs_swapping))
++ (void)queue_work(glob->swap_queue, &glob->work);
++
++}
++
++void ttm_mem_global_free(struct ttm_mem_global *glob,
++ uint64_t amount, bool himem)
++{
++ spin_lock(&glob->lock);
++ glob->used_total_memory -= amount;
++ if (!himem)
++ glob->used_memory -= amount;
++ wake_up_all(&glob->queue);
++ spin_unlock(&glob->lock);
++}
++
++static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
++ uint64_t amount, bool himem, bool reserve)
++{
++ uint64_t limit;
++ uint64_t lomem_limit;
++ int ret = -ENOMEM;
++
++ spin_lock(&glob->lock);
++
++ if (capable(CAP_SYS_ADMIN)) {
++ limit = glob->emer_total_memory;
++ lomem_limit = glob->emer_memory;
++ } else {
++ limit = glob->max_total_memory;
++ lomem_limit = glob->max_memory;
++ }
++
++ if (unlikely(glob->used_total_memory + amount > limit))
++ goto out_unlock;
++ if (unlikely(!himem && glob->used_memory + amount > lomem_limit))
++ goto out_unlock;
++
++ if (reserve) {
++ glob->used_total_memory += amount;
++ if (!himem)
++ glob->used_memory += amount;
++ }
++ ret = 0;
++out_unlock:
++ spin_unlock(&glob->lock);
++ ttm_check_swapping(glob);
++
++ return ret;
++}
++
++int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
++ bool no_wait, bool interruptible, bool himem)
++{
++ int count = TTM_MEMORY_ALLOC_RETRIES;
++
++ while (unlikely(ttm_mem_global_reserve(glob,
++ memory,
++ himem,
++ true) != 0)) {
++ if (no_wait)
++ return -ENOMEM;
++ if (unlikely(count-- == 0))
++ return -ENOMEM;
++ ttm_shrink(glob, false, memory + (memory >> 2) + 16);
++ }
++
++ return 0;
++}
++
++size_t ttm_round_pot(size_t size)
++{
++ if ((size & (size - 1)) == 0)
++ return size;
++ else if (size > PAGE_SIZE)
++ return PAGE_ALIGN(size);
++ else {
++ size_t tmp_size = 4;
++
++ while (tmp_size < size)
++ tmp_size <<= 1;
++
++ return tmp_size;
++ }
++ return 0;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_memory.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_memory.h
+new file mode 100644
+index 0000000..2ceeb32
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_memory.h
+@@ -0,0 +1,147 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef TTM_MEMORY_H
++#define TTM_MEMORY_H
++
++#include <linux/workqueue.h>
++#include <linux/spinlock.h>
++#include <linux/wait.h>
++
++/**
++ * struct ttm_mem_shrink - callback to shrink TTM memory usage.
++ *
++ * @do_shrink: The callback function.
++ *
++ * Arguments to the do_shrink functions are intended to be passed using
++ * inheritance. That is, the argument class derives from struct ttm_mem_srink,
++ * and can be accessed using container_of().
++ */
++
++struct ttm_mem_shrink {
++ int (*do_shrink) (struct ttm_mem_shrink *);
++};
++
++/**
++ * struct ttm_mem_global - Global memory accounting structure.
++ *
++ * @shrink: A single callback to shrink TTM memory usage. Extend this
++ * to a linked list to be able to handle multiple callbacks when needed.
++ * @swap_queue: A workqueue to handle shrinking in low memory situations. We
++ * need a separate workqueue since it will spend a lot of time waiting
++ * for the GPU, and this will otherwise block other workqueue tasks(?)
++ * At this point we use only a single-threaded workqueue.
++ * @work: The workqueue callback for the shrink queue.
++ * @queue: Wait queue for processes suspended waiting for memory.
++ * @lock: Lock to protect the @shrink - and the memory accounting members,
++ * that is, essentially the whole structure with some exceptions.
++ * @emer_memory: Lowmem memory limit available for root.
++ * @max_memory: Lowmem memory limit available for non-root.
++ * @swap_limit: Lowmem memory limit where the shrink workqueue kicks in.
++ * @used_memory: Currently used lowmem memory.
++ * @used_total_memory: Currently used total (lowmem + highmem) memory.
++ * @total_memory_swap_limit: Total memory limit where the shrink workqueue
++ * kicks in.
++ * @max_total_memory: Total memory available to non-root processes.
++ * @emer_total_memory: Total memory available to root processes.
++ *
++ * Note that this structure is not per device. It should be global for all
++ * graphics devices.
++ */
++
++struct ttm_mem_global {
++ struct ttm_mem_shrink *shrink;
++ struct workqueue_struct *swap_queue;
++ struct work_struct work;
++ wait_queue_head_t queue;
++ spinlock_t lock;
++ uint64_t emer_memory;
++ uint64_t max_memory;
++ uint64_t swap_limit;
++ uint64_t used_memory;
++ uint64_t used_total_memory;
++ uint64_t total_memory_swap_limit;
++ uint64_t max_total_memory;
++ uint64_t emer_total_memory;
++};
++
++/**
++ * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
++ *
++ * @shrink: The object to initialize.
++ * @func: The callback function.
++ */
++
++static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
++ int (*func) (struct ttm_mem_shrink *))
++{
++ shrink->do_shrink = func;
++}
++
++/**
++ * ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
++ *
++ * @glob: The struct ttm_mem_global object to register with.
++ * @shrink: An initialized struct ttm_mem_shrink object to register.
++ *
++ * Returns:
++ * -EBUSY: There's already a callback registered. (May change).
++ */
++
++static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
++ struct ttm_mem_shrink *shrink)
++{
++ spin_lock(&glob->lock);
++ if (glob->shrink != NULL) {
++ spin_unlock(&glob->lock);
++ return -EBUSY;
++ }
++ glob->shrink = shrink;
++ spin_unlock(&glob->lock);
++ return 0;
++}
++
++/**
++ * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
++ *
++ * @glob: The struct ttm_mem_global object to unregister from.
++ * @shrink: A previously registert struct ttm_mem_shrink object.
++ *
++ */
++
++static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
++ struct ttm_mem_shrink *shrink)
++{
++ spin_lock(&glob->lock);
++ BUG_ON(glob->shrink != shrink);
++ glob->shrink = NULL;
++ spin_unlock(&glob->lock);
++}
++
++extern int ttm_mem_global_init(struct ttm_mem_global *glob);
++extern void ttm_mem_global_release(struct ttm_mem_global *glob);
++extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
++ bool no_wait, bool interruptible, bool himem);
++extern void ttm_mem_global_free(struct ttm_mem_global *glob,
++ uint64_t amount, bool himem);
++extern size_t ttm_round_pot(size_t size);
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_object.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_object.c
+new file mode 100644
+index 0000000..53ee1c9
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_object.c
+@@ -0,0 +1,440 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++/** @file ttm_ref_object.c
++ *
++ * Base- and reference object implementation for the various
++ * ttm objects. Implements reference counting, minimal security checks
++ * and release on file close.
++ */
++
++/**
++ * struct ttm_object_file
++ *
++ * @tdev: Pointer to the ttm_object_device.
++ *
++ * @lock: Lock that protects the ref_list list and the
++ * ref_hash hash tables.
++ *
++ * @ref_list: List of ttm_ref_objects to be destroyed at
++ * file release.
++ *
++ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
++ * for fast lookup of ref objects given a base object.
++ */
++
++#include "ttm_object.h"
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/slab.h>
++#include <asm/atomic.h>
++
++struct ttm_object_file {
++ struct ttm_object_device *tdev;
++ rwlock_t lock;
++ struct list_head ref_list;
++ struct drm_open_hash ref_hash[TTM_REF_NUM];
++ struct kref refcount;
++};
++
++/**
++ * struct ttm_object_device
++ *
++ * @object_lock: lock that protects the object_hash hash table.
++ *
++ * @object_hash: hash table for fast lookup of object global names.
++ *
++ * @object_count: Per device object count.
++ *
++ * This is the per-device data structure needed for ttm object management.
++ */
++
++struct ttm_object_device {
++ rwlock_t object_lock;
++ struct drm_open_hash object_hash;
++ atomic_t object_count;
++ struct ttm_mem_global *mem_glob;
++};
++
++/**
++ * struct ttm_ref_object
++ *
++ * @hash: Hash entry for the per-file object reference hash.
++ *
++ * @head: List entry for the per-file list of ref-objects.
++ *
++ * @kref: Ref count.
++ *
++ * @obj: Base object this ref object is referencing.
++ *
++ * @ref_type: Type of ref object.
++ *
++ * This is similar to an idr object, but it also has a hash table entry
++ * that allows lookup with a pointer to the referenced object as a key. In
++ * that way, one can easily detect whether a base object is referenced by
++ * a particular ttm_object_file. It also carries a ref count to avoid creating
++ * multiple ref objects if a ttm_object_file references the same base object
++ * more than once.
++ */
++
++struct ttm_ref_object {
++ struct drm_hash_item hash;
++ struct list_head head;
++ struct kref kref;
++ struct ttm_base_object *obj;
++ enum ttm_ref_type ref_type;
++ struct ttm_object_file *tfile;
++};
++
++static inline struct ttm_object_file *
++ttm_object_file_ref(struct ttm_object_file *tfile)
++{
++ kref_get(&tfile->refcount);
++ return tfile;
++}
++
++static void ttm_object_file_destroy(struct kref *kref)
++{
++ struct ttm_object_file *tfile =
++ container_of(kref, struct ttm_object_file, refcount);
++
++ /* printk(KERN_INFO "Freeing 0x%08lx\n", (unsigned long) tfile); */
++ kfree(tfile);
++}
++
++
++static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
++{
++ struct ttm_object_file *tfile = *p_tfile;
++
++ *p_tfile = NULL;
++ kref_put(&tfile->refcount, ttm_object_file_destroy);
++}
++
++
++int ttm_base_object_init(struct ttm_object_file *tfile,
++ struct ttm_base_object *base,
++ bool shareable,
++ enum ttm_object_type object_type,
++ void (*refcount_release) (struct ttm_base_object **),
++ void (*ref_obj_release) (struct ttm_base_object *,
++ enum ttm_ref_type ref_type))
++{
++ struct ttm_object_device *tdev = tfile->tdev;
++ int ret;
++
++ base->shareable = shareable;
++ base->tfile = ttm_object_file_ref(tfile);
++ base->refcount_release = refcount_release;
++ base->ref_obj_release = ref_obj_release;
++ base->object_type = object_type;
++ write_lock(&tdev->object_lock);
++ kref_init(&base->refcount);
++ ret = drm_ht_just_insert_please(&tdev->object_hash,
++ &base->hash,
++ (unsigned long)base, 31, 0, 0);
++ write_unlock(&tdev->object_lock);
++ if (unlikely(ret != 0))
++ goto out_err0;
++
++ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
++ if (unlikely(ret != 0))
++ goto out_err1;
++
++ ttm_base_object_unref(&base);
++
++ return 0;
++out_err1:
++ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
++out_err0:
++ return ret;
++}
++
++static void ttm_release_base(struct kref *kref)
++{
++ struct ttm_base_object *base =
++ container_of(kref, struct ttm_base_object, refcount);
++ struct ttm_object_device *tdev = base->tfile->tdev;
++
++ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
++ write_unlock(&tdev->object_lock);
++ if (base->refcount_release) {
++ ttm_object_file_unref(&base->tfile);
++ base->refcount_release(&base);
++ }
++ write_lock(&tdev->object_lock);
++}
++
++void ttm_base_object_unref(struct ttm_base_object **p_base)
++{
++ struct ttm_base_object *base = *p_base;
++ struct ttm_object_device *tdev = base->tfile->tdev;
++
++ /* printk(KERN_INFO "TTM base object unref.\n"); */
++ *p_base = NULL;
++
++ /*
++ * Need to take the lock here to avoid racing with
++ * users trying to look up the object.
++ */
++
++ write_lock(&tdev->object_lock);
++ (void)kref_put(&base->refcount, &ttm_release_base);
++ write_unlock(&tdev->object_lock);
++}
++
++struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
++ uint32_t key)
++{
++ struct ttm_object_device *tdev = tfile->tdev;
++ struct ttm_base_object *base;
++ struct drm_hash_item *hash;
++ int ret;
++
++ read_lock(&tdev->object_lock);
++ ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
++
++ if (likely(ret == 0)) {
++ base = drm_hash_entry(hash, struct ttm_base_object, hash);
++ kref_get(&base->refcount);
++ }
++ read_unlock(&tdev->object_lock);
++
++ if (unlikely(ret != 0))
++ return NULL;
++
++ if (tfile != base->tfile && !base->shareable) {
++ printk(KERN_ERR "Attempted access of non-shareable object.\n");
++ ttm_base_object_unref(&base);
++ return NULL;
++ }
++
++ return base;
++}
++
++int ttm_ref_object_add(struct ttm_object_file *tfile,
++ struct ttm_base_object *base,
++ enum ttm_ref_type ref_type, bool *existed)
++{
++ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
++ struct ttm_ref_object *ref;
++ struct drm_hash_item *hash;
++ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
++ int ret = -EINVAL;
++
++ if (existed != NULL)
++ *existed = true;
++
++ while (ret == -EINVAL) {
++ read_lock(&tfile->lock);
++ ret = drm_ht_find_item(ht, base->hash.key, &hash);
++
++ if (ret == 0) {
++ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
++ kref_get(&ref->kref);
++ read_unlock(&tfile->lock);
++ break;
++ }
++
++ read_unlock(&tfile->lock);
++ ret = ttm_mem_global_alloc(mem_glob,
++ sizeof(*ref),
++ false,
++ false,
++ false);
++ if (unlikely(ret != 0))
++ return ret;
++ ref = kmalloc(sizeof(*ref), GFP_KERNEL);
++ if (unlikely(ref == NULL)) {
++ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
++ return -ENOMEM;
++ }
++
++ ref->hash.key = base->hash.key;
++ ref->obj = base;
++ ref->tfile = tfile;
++ ref->ref_type = ref_type;
++ kref_init(&ref->kref);
++
++ write_lock(&tfile->lock);
++ ret = drm_ht_insert_item(ht, &ref->hash);
++
++ if (likely(ret == 0)) {
++ list_add_tail(&ref->head, &tfile->ref_list);
++ kref_get(&base->refcount);
++ write_unlock(&tfile->lock);
++ if (existed != NULL)
++ *existed = false;
++ break;
++ }
++
++ write_unlock(&tfile->lock);
++ BUG_ON(ret != -EINVAL);
++
++ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
++ kfree(ref);
++ }
++
++ return ret;
++}
++
++static void ttm_ref_object_release(struct kref *kref)
++{
++ struct ttm_ref_object *ref =
++ container_of(kref, struct ttm_ref_object, kref);
++ struct ttm_base_object *base = ref->obj;
++ struct ttm_object_file *tfile = ref->tfile;
++ struct drm_open_hash *ht;
++ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
++
++ ht = &tfile->ref_hash[ref->ref_type];
++ (void)drm_ht_remove_item(ht, &ref->hash);
++ list_del(&ref->head);
++ write_unlock(&tfile->lock);
++
++ if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
++ base->ref_obj_release(base, ref->ref_type);
++
++ ttm_base_object_unref(&ref->obj);
++ ttm_mem_global_free(mem_glob, sizeof(*ref), false);
++ kfree(ref);
++ write_lock(&tfile->lock);
++}
++
++int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
++ unsigned long key, enum ttm_ref_type ref_type)
++{
++ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
++ struct ttm_ref_object *ref;
++ struct drm_hash_item *hash;
++ int ret;
++
++ write_lock(&tfile->lock);
++ ret = drm_ht_find_item(ht, key, &hash);
++ if (unlikely(ret != 0)) {
++ write_unlock(&tfile->lock);
++ return -EINVAL;
++ }
++ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
++ kref_put(&ref->kref, ttm_ref_object_release);
++ write_unlock(&tfile->lock);
++ return 0;
++}
++
++void ttm_object_file_release(struct ttm_object_file **p_tfile)
++{
++ struct ttm_ref_object *ref;
++ struct list_head *list;
++ unsigned int i;
++ struct ttm_object_file *tfile = *p_tfile;
++
++ *p_tfile = NULL;
++ write_lock(&tfile->lock);
++
++ /*
++ * Since we release the lock within the loop, we have to
++ * restart it from the beginning each time.
++ */
++
++ while (!list_empty(&tfile->ref_list)) {
++ list = tfile->ref_list.next;
++ ref = list_entry(list, struct ttm_ref_object, head);
++ ttm_ref_object_release(&ref->kref);
++ }
++
++ for (i = 0; i < TTM_REF_NUM; ++i)
++ drm_ht_remove(&tfile->ref_hash[i]);
++
++ write_unlock(&tfile->lock);
++ ttm_object_file_unref(&tfile);
++}
++
++struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
++ unsigned int hash_order)
++{
++ struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
++ unsigned int i;
++ unsigned int j = 0;
++ int ret;
++
++ if (unlikely(tfile == NULL))
++ return NULL;
++
++ rwlock_init(&tfile->lock);
++ tfile->tdev = tdev;
++ kref_init(&tfile->refcount);
++ INIT_LIST_HEAD(&tfile->ref_list);
++
++ for (i = 0; i < TTM_REF_NUM; ++i) {
++ ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
++ if (ret) {
++ j = i;
++ goto out_err;
++ }
++ }
++
++ return tfile;
++out_err:
++ for (i = 0; i < j; ++i)
++ drm_ht_remove(&tfile->ref_hash[i]);
++
++ kfree(tfile);
++
++ return NULL;
++}
++
++struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
++ *mem_glob,
++ unsigned int hash_order)
++{
++ struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
++ int ret;
++
++ if (unlikely(tdev == NULL))
++ return NULL;
++
++ tdev->mem_glob = mem_glob;
++ rwlock_init(&tdev->object_lock);
++ atomic_set(&tdev->object_count, 0);
++ ret = drm_ht_create(&tdev->object_hash, hash_order);
++
++ if (likely(ret == 0))
++ return tdev;
++
++ kfree(tdev);
++ return NULL;
++}
++
++void ttm_object_device_release(struct ttm_object_device **p_tdev)
++{
++ struct ttm_object_device *tdev = *p_tdev;
++
++ *p_tdev = NULL;
++
++ write_lock(&tdev->object_lock);
++ drm_ht_remove(&tdev->object_hash);
++ write_unlock(&tdev->object_lock);
++
++ kfree(tdev);
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_object.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_object.h
+new file mode 100644
+index 0000000..b04c714
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_object.h
+@@ -0,0 +1,262 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++/** @file ttm_ref_object.h
++ *
++ * Base- and reference object implementation for the various
++ * ttm objects. Implements reference counting, minimal security checks
++ * and release on file close.
++ */
++
++#ifndef _TTM_OBJECT_H_
++#define _TTM_OBJECT_H_
++
++#include <linux/list.h>
++#include <drm/drm_hashtab.h>
++#include <linux/kref.h>
++#include "ttm_memory.h"
++
++/**
++ * enum ttm_ref_type
++ *
++ * Describes what type of reference a ref object holds.
++ *
++ * TTM_REF_USAGE is a simple refcount on a base object.
++ *
++ * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
++ * buffer object.
++ *
++ * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
++ * buffer object.
++ *
++ */
++
++enum ttm_ref_type {
++ TTM_REF_USAGE,
++ TTM_REF_SYNCCPU_READ,
++ TTM_REF_SYNCCPU_WRITE,
++ TTM_REF_NUM
++};
++
++/**
++ * enum ttm_object_type
++ *
++ * One entry per ttm object type.
++ * Device-specific types should use the
++ * ttm_driver_typex types.
++ */
++
++enum ttm_object_type {
++ ttm_fence_type,
++ ttm_buffer_type,
++ ttm_lock_type,
++ ttm_driver_type0 = 256,
++ ttm_driver_type1
++};
++
++struct ttm_object_file;
++struct ttm_object_device;
++
++/**
++ * struct ttm_base_object
++ *
++ * @hash: hash entry for the per-device object hash.
++ * @type: derived type this object is base class for.
++ * @shareable: Other ttm_object_files can access this object.
++ *
++ * @tfile: Pointer to ttm_object_file of the creator.
++ * NULL if the object was not created by a user request.
++ * (kernel object).
++ *
++ * @refcount: Number of references to this object, not
++ * including the hash entry. A reference to a base object can
++ * only be held by a ref object.
++ *
++ * @refcount_release: A function to be called when there are
++ * no more references to this object. This function should
++ * destroy the object (or make sure destruction eventually happens),
++ * and when it is called, the object has
++ * already been taken out of the per-device hash. The parameter
++ * "base" should be set to NULL by the function.
++ *
++ * @ref_obj_release: A function to be called when a reference object
++ * with another ttm_ref_type than TTM_REF_USAGE is deleted.
++ * this function may, for example, release a lock held by a user-space
++ * process.
++ *
++ * This struct is intended to be used as a base struct for objects that
++ * are visible to user-space. It provides a global name, race-safe
++ * access and refcounting, minimal access contol and hooks for unref actions.
++ */
++
++struct ttm_base_object {
++ struct drm_hash_item hash;
++ enum ttm_object_type object_type;
++ bool shareable;
++ struct ttm_object_file *tfile;
++ struct kref refcount;
++ void (*refcount_release) (struct ttm_base_object **base);
++ void (*ref_obj_release) (struct ttm_base_object *base,
++ enum ttm_ref_type ref_type);
++};
++
++/**
++ * ttm_base_object_init
++ *
++ * @tfile: Pointer to a struct ttm_object_file.
++ * @base: The struct ttm_base_object to initialize.
++ * @shareable: This object is shareable with other applcations.
++ * (different @tfile pointers.)
++ * @type: The object type.
++ * @refcount_release: See the struct ttm_base_object description.
++ * @ref_obj_release: See the struct ttm_base_object description.
++ *
++ * Initializes a struct ttm_base_object.
++ */
++
++extern int ttm_base_object_init(struct ttm_object_file *tfile,
++ struct ttm_base_object *base,
++ bool shareable,
++ enum ttm_object_type type,
++ void (*refcount_release) (struct ttm_base_object
++ **),
++ void (*ref_obj_release) (struct ttm_base_object
++ *,
++ enum ttm_ref_type
++ ref_type));
++
++/**
++ * ttm_base_object_lookup
++ *
++ * @tfile: Pointer to a struct ttm_object_file.
++ * @key: Hash key
++ *
++ * Looks up a struct ttm_base_object with the key @key.
++ * Also verifies that the object is visible to the application, by
++ * comparing the @tfile argument and checking the object shareable flag.
++ */
++
++extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
++ *tfile, uint32_t key);
++
++/**
++ * ttm_base_object_unref
++ *
++ * @p_base: Pointer to a pointer referncing a struct ttm_base_object.
++ *
++ * Decrements the base object refcount and clears the pointer pointed to by
++ * p_base.
++ */
++
++extern void ttm_base_object_unref(struct ttm_base_object **p_base);
++
++/**
++ * ttm_ref_object_add.
++ *
++ * @tfile: A struct ttm_object_file representing the application owning the
++ * ref_object.
++ * @base: The base object to reference.
++ * @ref_type: The type of reference.
++ * @existed: Upon completion, indicates that an identical reference object
++ * already existed, and the refcount was upped on that object instead.
++ *
++ * Adding a ref object to a base object is basically like referencing the
++ * base object, but a user-space application holds the reference. When the
++ * file corresponding to @tfile is closed, all its reference objects are
++ * deleted. A reference object can have different types depending on what
++ * it's intended for. It can be refcounting to prevent object destruction,
++ * When user-space takes a lock, it can add a ref object to that lock to
++ * make sure the lock is released if the application dies. A ref object
++ * will hold a single reference on a base object.
++ */
++extern int ttm_ref_object_add(struct ttm_object_file *tfile,
++ struct ttm_base_object *base,
++ enum ttm_ref_type ref_type, bool *existed);
++/**
++ * ttm_ref_object_base_unref
++ *
++ * @key: Key representing the base object.
++ * @ref_type: Ref type of the ref object to be dereferenced.
++ *
++ * Unreference a ref object with type @ref_type
++ * on the base object identified by @key. If there are no duplicate
++ * references, the ref object will be destroyed and the base object
++ * will be unreferenced.
++ */
++extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
++ unsigned long key,
++ enum ttm_ref_type ref_type);
++
++/**
++ * ttm_object_file_init - initialize a struct ttm_object file
++ *
++ * @tdev: A struct ttm_object device this file is initialized on.
++ * @hash_order: Order of the hash table used to hold the reference objects.
++ *
++ * This is typically called by the file_ops::open function.
++ */
++
++extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
++ *tdev,
++ unsigned int hash_order);
++
++/**
++ * ttm_object_file_release - release data held by a ttm_object_file
++ *
++ * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
++ * *p_tfile will be set to NULL by this function.
++ *
++ * Releases all data associated by a ttm_object_file.
++ * Typically called from file_ops::release. The caller must
++ * ensure that there are no concurrent users of tfile.
++ */
++
++extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
++
++/**
++ * ttm_object device init - initialize a struct ttm_object_device
++ *
++ * @hash_order: Order of hash table used to hash the base objects.
++ *
++ * This function is typically called on device initialization to prepare
++ * data structures needed for ttm base and ref objects.
++ */
++
++extern struct ttm_object_device *ttm_object_device_init
++ (struct ttm_mem_global *mem_glob, unsigned int hash_order);
++
++/**
++ * ttm_object_device_release - release data held by a ttm_object_device
++ *
++ * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
++ * *p_tdev will be set to NULL by this function.
++ *
++ * Releases all data associated by a ttm_object_device.
++ * Typically called from driver::unload before the destruction of the
++ * device private data structure.
++ */
++
++extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_pat_compat.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_pat_compat.c
+new file mode 100644
+index 0000000..83f34c6
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_pat_compat.c
+@@ -0,0 +1,164 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm_pat_compat.h"
++#include <linux/version.h>
++#include <asm/page.h>
++#include <linux/spinlock.h>
++#include <asm/pgtable.h>
++
++#if (defined(CONFIG_X86) && !defined(CONFIG_X86_PAT))
++#include <asm/tlbflush.h>
++#include <asm/msr.h>
++#include <asm/system.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++
++#ifndef MSR_IA32_CR_PAT
++#define MSR_IA32_CR_PAT 0x0277
++#endif
++
++#ifndef _PAGE_PAT
++#define _PAGE_PAT 0x080
++#endif
++
++static int ttm_has_pat;
++
++/*
++ * Used at resume-time when CPU-s are fired up.
++ */
++
++static void ttm_pat_ipi_handler(void *notused)
++{
++ u32 v1, v2;
++
++ rdmsr(MSR_IA32_CR_PAT, v1, v2);
++ v2 &= 0xFFFFFFF8;
++ v2 |= 0x00000001;
++ wbinvd();
++ wrmsr(MSR_IA32_CR_PAT, v1, v2);
++ wbinvd();
++ __flush_tlb_all();
++}
++
++static void ttm_pat_enable(void)
++{
++ if (on_each_cpu(ttm_pat_ipi_handler, NULL, 1) != 0)
++ printk(KERN_ERR "Timed out setting up CPU PAT.\n");
++}
++
++void ttm_pat_resume(void)
++{
++ if (unlikely(!ttm_has_pat))
++ return;
++
++ ttm_pat_enable();
++}
++
++static int psb_cpu_callback(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ if (action == CPU_ONLINE)
++ ttm_pat_resume();
++
++ return 0;
++}
++
++static struct notifier_block psb_nb = {
++ .notifier_call = psb_cpu_callback,
++ .priority = 1
++};
++
++/*
++ * Set i386 PAT entry PAT4 to Write-combining memory type on all processors.
++ */
++
++void ttm_pat_init(void)
++{
++ if (likely(ttm_has_pat))
++ return;
++
++ if (!boot_cpu_has(X86_FEATURE_PAT))
++ return;
++
++ ttm_pat_enable();
++
++ if (num_present_cpus() > 1)
++ register_cpu_notifier(&psb_nb);
++
++ ttm_has_pat = 1;
++}
++
++void ttm_pat_takedown(void)
++{
++ if (unlikely(!ttm_has_pat))
++ return;
++
++ if (num_present_cpus() > 1)
++ unregister_cpu_notifier(&psb_nb);
++
++ ttm_has_pat = 0;
++}
++
++pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
++{
++ if (likely(ttm_has_pat)) {
++ pgprot_val(prot) |= _PAGE_PAT;
++ return prot;
++ } else {
++ return pgprot_noncached(prot);
++ }
++}
++
++#else
++
++void ttm_pat_init(void)
++{
++}
++
++void ttm_pat_takedown(void)
++{
++}
++
++void ttm_pat_resume(void)
++{
++}
++
++#ifdef CONFIG_X86
++#include <asm/pat.h>
++
++pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
++{
++ uint32_t cache_bits = ((1) ? _PAGE_CACHE_WC : _PAGE_CACHE_UC_MINUS);
++
++ return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_MASK) | cache_bits);
++}
++#else
++pgprot_t pgprot_ttm_x86_wc(pgprot_t prot)
++{
++ BUG();
++}
++#endif
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_pat_compat.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_pat_compat.h
+new file mode 100644
+index 0000000..4702f1c
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_pat_compat.h
+@@ -0,0 +1,34 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_PAT_COMPAT_
++#define _TTM_PAT_COMPAT_
++#include <asm/page.h>
++#include <asm/pgtable_types.h>
++extern void ttm_pat_init(void);
++extern void ttm_pat_takedown(void);
++extern void ttm_pat_resume(void);
++extern pgprot_t pgprot_ttm_x86_wc(pgprot_t prot);
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_placement_common.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_placement_common.h
+new file mode 100644
+index 0000000..067ce27
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_placement_common.h
+@@ -0,0 +1,91 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_PL_COMMON_H_
++#define _TTM_PL_COMMON_H_
++/*
++ * Memory regions for data placement.
++ */
++
++#define TTM_PL_SYSTEM 0
++#define TTM_PL_TT 1
++#define TTM_PL_VRAM 2
++#define TTM_PL_PRIV0 3
++#define TTM_PL_PRIV1 4
++#define TTM_PL_PRIV2 5
++#define TTM_PL_PRIV3 6
++#define TTM_PL_PRIV4 7
++#define TTM_PL_PRIV5 8
++#define TTM_PL_CI 9
++#define TTM_PL_RAR 10
++#define TTM_PL_SWAPPED 15
++
++#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
++#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
++#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
++#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
++#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
++#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
++#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
++#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
++#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
++#define TTM_PL_FLAG_CI (1 << TTM_PL_CI)
++#define TTM_PL_FLAG_RAR (1 << TTM_PL_RAR)
++#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
++#define TTM_PL_MASK_MEM 0x0000FFFF
++
++/*
++ * Other flags that affects data placement.
++ * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
++ * if available.
++ * TTM_PL_FLAG_SHARED means that another application may
++ * reference the buffer.
++ * TTM_PL_FLAG_NO_EVICT means that the buffer may never
++ * be evicted to make room for other buffers.
++ */
++
++#define TTM_PL_FLAG_CACHED (1 << 16)
++#define TTM_PL_FLAG_UNCACHED (1 << 17)
++#define TTM_PL_FLAG_WC (1 << 18)
++#define TTM_PL_FLAG_SHARED (1 << 20)
++#define TTM_PL_FLAG_NO_EVICT (1 << 21)
++
++#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
++ TTM_PL_FLAG_UNCACHED | \
++ TTM_PL_FLAG_WC)
++
++#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
++
++/*
++ * Access flags to be used for CPU- and GPU- mappings.
++ * The idea is that the TTM synchronization mechanism will
++ * allow concurrent READ access and exclusive write access.
++ * Currently GPU- and CPU accesses are exclusive.
++ */
++
++#define TTM_ACCESS_READ (1 << 0)
++#define TTM_ACCESS_WRITE (1 << 1)
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_placement_user.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_placement_user.c
+new file mode 100644
+index 0000000..e4d6964
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_placement_user.c
+@@ -0,0 +1,468 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "ttm_placement_user.h"
++#include "ttm_bo_driver.h"
++#include "ttm_object.h"
++#include "ttm_userobj_api.h"
++#include "ttm_lock.h"
++
++struct ttm_bo_user_object {
++ struct ttm_base_object base;
++ struct ttm_buffer_object bo;
++};
++
++static size_t pl_bo_size;
++
++static size_t ttm_pl_size(struct ttm_bo_device *bdev, unsigned long num_pages)
++{
++ size_t page_array_size =
++ (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
++
++ if (unlikely(pl_bo_size == 0)) {
++ pl_bo_size = bdev->ttm_bo_extra_size +
++ ttm_round_pot(sizeof(struct ttm_bo_user_object));
++ }
++
++ return bdev->ttm_bo_size + 2 * page_array_size;
++}
++
++static struct ttm_bo_user_object *ttm_bo_user_lookup(struct ttm_object_file
++ *tfile, uint32_t handle)
++{
++ struct ttm_base_object *base;
++
++ base = ttm_base_object_lookup(tfile, handle);
++ if (unlikely(base == NULL)) {
++ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
++ (unsigned long)handle);
++ return NULL;
++ }
++
++ if (unlikely(base->object_type != ttm_buffer_type)) {
++ ttm_base_object_unref(&base);
++ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
++ (unsigned long)handle);
++ return NULL;
++ }
++
++ return container_of(base, struct ttm_bo_user_object, base);
++}
++
++struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file
++ *tfile, uint32_t handle)
++{
++ struct ttm_bo_user_object *user_bo;
++ struct ttm_base_object *base;
++
++ user_bo = ttm_bo_user_lookup(tfile, handle);
++ if (unlikely(user_bo == NULL))
++ return NULL;
++
++ (void)ttm_bo_reference(&user_bo->bo);
++ base = &user_bo->base;
++ ttm_base_object_unref(&base);
++ return &user_bo->bo;
++}
++
++static void ttm_bo_user_destroy(struct ttm_buffer_object *bo)
++{
++ struct ttm_bo_user_object *user_bo =
++ container_of(bo, struct ttm_bo_user_object, bo);
++
++ ttm_mem_global_free(bo->bdev->mem_glob, bo->acc_size, false);
++ kfree(user_bo);
++}
++
++static void ttm_bo_user_release(struct ttm_base_object **p_base)
++{
++ struct ttm_bo_user_object *user_bo;
++ struct ttm_base_object *base = *p_base;
++ struct ttm_buffer_object *bo;
++
++ *p_base = NULL;
++
++ if (unlikely(base == NULL))
++ return;
++
++ user_bo = container_of(base, struct ttm_bo_user_object, base);
++ bo = &user_bo->bo;
++ ttm_bo_unref(&bo);
++}
++
++static void ttm_bo_user_ref_release(struct ttm_base_object *base,
++ enum ttm_ref_type ref_type)
++{
++ struct ttm_bo_user_object *user_bo =
++ container_of(base, struct ttm_bo_user_object, base);
++ struct ttm_buffer_object *bo = &user_bo->bo;
++
++ switch (ref_type) {
++ case TTM_REF_SYNCCPU_WRITE:
++ ttm_bo_synccpu_write_release(bo);
++ break;
++ default:
++ BUG();
++ }
++}
++
++static void ttm_pl_fill_rep(struct ttm_buffer_object *bo,
++ struct ttm_pl_rep *rep)
++{
++ struct ttm_bo_user_object *user_bo =
++ container_of(bo, struct ttm_bo_user_object, bo);
++
++ rep->gpu_offset = bo->offset;
++ rep->bo_size = bo->num_pages << PAGE_SHIFT;
++ rep->map_handle = bo->addr_space_offset;
++ rep->placement = bo->mem.flags;
++ rep->handle = user_bo->base.hash.key;
++ rep->sync_object_arg = (uint32_t) (unsigned long)bo->sync_obj_arg;
++}
++
++int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
++ struct ttm_bo_device *bdev,
++ struct ttm_lock *lock, void *data)
++{
++ union ttm_pl_create_arg *arg = data;
++ struct ttm_pl_create_req *req = &arg->req;
++ struct ttm_pl_rep *rep = &arg->rep;
++ struct ttm_buffer_object *bo;
++ struct ttm_buffer_object *tmp;
++ struct ttm_bo_user_object *user_bo;
++ uint32_t flags;
++ int ret = 0;
++ struct ttm_mem_global *mem_glob = bdev->mem_glob;
++ size_t acc_size =
++ ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
++ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
++ if (unlikely(ret != 0))
++ return ret;
++
++ flags = req->placement;
++ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
++ if (unlikely(user_bo == NULL)) {
++ ttm_mem_global_free(mem_glob, acc_size, false);
++ return -ENOMEM;
++ }
++
++ bo = &user_bo->bo;
++ ret = ttm_read_lock(lock, true);
++ if (unlikely(ret != 0)) {
++ ttm_mem_global_free(mem_glob, acc_size, false);
++ kfree(user_bo);
++ return ret;
++ }
++
++ ret = ttm_buffer_object_init(bdev, bo, req->size,
++ ttm_bo_type_device, flags,
++ req->page_alignment, 0, true,
++ NULL, acc_size, &ttm_bo_user_destroy);
++ ttm_read_unlock(lock);
++
++ /*
++ * Note that the ttm_buffer_object_init function
++ * would've called the destroy function on failure!!
++ */
++
++ if (unlikely(ret != 0))
++ goto out;
++
++ tmp = ttm_bo_reference(bo);
++ ret = ttm_base_object_init(tfile, &user_bo->base,
++ flags & TTM_PL_FLAG_SHARED,
++ ttm_buffer_type,
++ &ttm_bo_user_release,
++ &ttm_bo_user_ref_release);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ mutex_lock(&bo->mutex);
++ ttm_pl_fill_rep(bo, rep);
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unref(&bo);
++out:
++ return 0;
++out_err:
++ ttm_bo_unref(&tmp);
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
++ struct ttm_bo_device *bdev,
++ struct ttm_lock *lock, void *data)
++{
++ union ttm_pl_create_ub_arg *arg = data;
++ struct ttm_pl_create_ub_req *req = &arg->req;
++ struct ttm_pl_rep *rep = &arg->rep;
++ struct ttm_buffer_object *bo;
++ struct ttm_buffer_object *tmp;
++ struct ttm_bo_user_object *user_bo;
++ uint32_t flags;
++ int ret = 0;
++ struct ttm_mem_global *mem_glob = bdev->mem_glob;
++ size_t acc_size =
++ ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
++ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
++ if (unlikely(ret != 0))
++ return ret;
++
++ flags = req->placement;
++ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
++ if (unlikely(user_bo == NULL)) {
++ ttm_mem_global_free(mem_glob, acc_size, false);
++ return -ENOMEM;
++ }
++ ret = ttm_read_lock(lock, true);
++ if (unlikely(ret != 0)) {
++ ttm_mem_global_free(mem_glob, acc_size, false);
++ kfree(user_bo);
++ return ret;
++ }
++ bo = &user_bo->bo;
++ ret = ttm_buffer_object_init(bdev,
++ bo,
++ req->size,
++ ttm_bo_type_user,
++ flags,
++ req->page_alignment,
++ req->user_address,
++ true,
++ NULL,
++ acc_size,
++ &ttm_bo_user_destroy);
++
++ /*
++ * Note that the ttm_buffer_object_init function
++ * would've called the destroy function on failure!!
++ */
++ ttm_read_unlock(lock);
++ if (unlikely(ret != 0))
++ goto out;
++
++ tmp = ttm_bo_reference(bo);
++ ret = ttm_base_object_init(tfile, &user_bo->base,
++ flags & TTM_PL_FLAG_SHARED,
++ ttm_buffer_type,
++ &ttm_bo_user_release,
++ &ttm_bo_user_ref_release);
++ if (unlikely(ret != 0))
++ goto out_err;
++
++ mutex_lock(&bo->mutex);
++ ttm_pl_fill_rep(bo, rep);
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unref(&bo);
++out:
++ return 0;
++out_err:
++ ttm_bo_unref(&tmp);
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ union ttm_pl_reference_arg *arg = data;
++ struct ttm_pl_rep *rep = &arg->rep;
++ struct ttm_bo_user_object *user_bo;
++ struct ttm_buffer_object *bo;
++ struct ttm_base_object *base;
++ int ret;
++
++ user_bo = ttm_bo_user_lookup(tfile, arg->req.handle);
++ if (unlikely(user_bo == NULL)) {
++ printk(KERN_ERR "Could not reference buffer object.\n");
++ return -EINVAL;
++ }
++
++ bo = &user_bo->bo;
++ ret = ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
++ if (unlikely(ret != 0)) {
++ printk(KERN_ERR
++ "Could not add a reference to buffer object.\n");
++ goto out;
++ }
++
++ mutex_lock(&bo->mutex);
++ ttm_pl_fill_rep(bo, rep);
++ mutex_unlock(&bo->mutex);
++
++out:
++ base = &user_bo->base;
++ ttm_base_object_unref(&base);
++ return ret;
++}
++
++int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ struct ttm_pl_reference_req *arg = data;
++
++ return ttm_ref_object_base_unref(tfile, arg->handle, TTM_REF_USAGE);
++}
++
++int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ struct ttm_pl_synccpu_arg *arg = data;
++ struct ttm_bo_user_object *user_bo;
++ struct ttm_buffer_object *bo;
++ struct ttm_base_object *base;
++ bool existed;
++ int ret;
++
++ switch (arg->op) {
++ case TTM_PL_SYNCCPU_OP_GRAB:
++ user_bo = ttm_bo_user_lookup(tfile, arg->handle);
++ if (unlikely(user_bo == NULL)) {
++ printk(KERN_ERR
++ "Could not find buffer object for synccpu.\n");
++ return -EINVAL;
++ }
++ bo = &user_bo->bo;
++ base = &user_bo->base;
++ ret = ttm_bo_synccpu_write_grab(bo,
++ arg->access_mode &
++ TTM_PL_SYNCCPU_MODE_NO_BLOCK);
++ if (unlikely(ret != 0)) {
++ ttm_base_object_unref(&base);
++ goto out;
++ }
++ ret = ttm_ref_object_add(tfile, &user_bo->base,
++ TTM_REF_SYNCCPU_WRITE, &existed);
++ if (existed || ret != 0)
++ ttm_bo_synccpu_write_release(bo);
++ ttm_base_object_unref(&base);
++ break;
++ case TTM_PL_SYNCCPU_OP_RELEASE:
++ ret = ttm_ref_object_base_unref(tfile, arg->handle,
++ TTM_REF_SYNCCPU_WRITE);
++ break;
++ default:
++ ret = -EINVAL;
++ break;
++ }
++out:
++ return ret;
++}
++
++int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
++ struct ttm_lock *lock, void *data)
++{
++ union ttm_pl_setstatus_arg *arg = data;
++ struct ttm_pl_setstatus_req *req = &arg->req;
++ struct ttm_pl_rep *rep = &arg->rep;
++ struct ttm_buffer_object *bo;
++ struct ttm_bo_device *bdev;
++ int ret;
++
++ bo = ttm_buffer_object_lookup(tfile, req->handle);
++ if (unlikely(bo == NULL)) {
++ printk(KERN_ERR
++ "Could not find buffer object for setstatus.\n");
++ return -EINVAL;
++ }
++
++ bdev = bo->bdev;
++
++ ret = ttm_read_lock(lock, true);
++ if (unlikely(ret != 0))
++ goto out_err0;
++
++ ret = ttm_bo_reserve(bo, true, false, false, 0);
++ if (unlikely(ret != 0))
++ goto out_err1;
++
++ ret = ttm_bo_wait_cpu(bo, false);
++ if (unlikely(ret != 0))
++ goto out_err2;
++
++ mutex_lock(&bo->mutex);
++ ret = ttm_bo_check_placement(bo, req->set_placement,
++ req->clr_placement);
++ if (unlikely(ret != 0))
++ goto out_err2;
++
++ bo->proposed_flags = (bo->proposed_flags | req->set_placement)
++ & ~req->clr_placement;
++ ret = ttm_buffer_object_validate(bo, true, false);
++ if (unlikely(ret != 0))
++ goto out_err2;
++
++ ttm_pl_fill_rep(bo, rep);
++out_err2:
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unreserve(bo);
++out_err1:
++ ttm_read_unlock(lock);
++out_err0:
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data)
++{
++ struct ttm_pl_waitidle_arg *arg = data;
++ struct ttm_buffer_object *bo;
++ int ret;
++
++ bo = ttm_buffer_object_lookup(tfile, arg->handle);
++ if (unlikely(bo == NULL)) {
++ printk(KERN_ERR "Could not find buffer object for waitidle.\n");
++ return -EINVAL;
++ }
++
++ ret =
++ ttm_bo_block_reservation(bo, true,
++ arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
++ if (unlikely(ret != 0))
++ goto out;
++ mutex_lock(&bo->mutex);
++ ret = ttm_bo_wait(bo,
++ arg->mode & TTM_PL_WAITIDLE_MODE_LAZY,
++ true, arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
++ mutex_unlock(&bo->mutex);
++ ttm_bo_unblock_reservation(bo);
++out:
++ ttm_bo_unref(&bo);
++ return ret;
++}
++
++int ttm_pl_verify_access(struct ttm_buffer_object *bo,
++ struct ttm_object_file *tfile)
++{
++ struct ttm_bo_user_object *ubo;
++
++ /*
++ * Check bo subclass.
++ */
++
++ if (unlikely(bo->destroy != &ttm_bo_user_destroy))
++ return -EPERM;
++
++ ubo = container_of(bo, struct ttm_bo_user_object, bo);
++ if (likely(ubo->base.shareable || ubo->base.tfile == tfile))
++ return 0;
++
++ return -EPERM;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_placement_user.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_placement_user.h
+new file mode 100644
+index 0000000..5d8100f
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_placement_user.h
+@@ -0,0 +1,252 @@
++/**************************************************************************
++ *
++ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors
++ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_PLACEMENT_USER_H_
++#define _TTM_PLACEMENT_USER_H_
++
++#if !defined(__KERNEL__) && !defined(_KERNEL)
++#include <stdint.h>
++#else
++#include <linux/kernel.h>
++#endif
++
++#include "ttm_placement_common.h"
++
++#define TTM_PLACEMENT_MAJOR 0
++#define TTM_PLACEMENT_MINOR 1
++#define TTM_PLACEMENT_PL 0
++#define TTM_PLACEMENT_DATE "080819"
++
++/**
++ * struct ttm_pl_create_req
++ *
++ * @size: The buffer object size.
++ * @placement: Flags that indicate initial acceptable
++ * placement.
++ * @page_alignment: Required alignment in pages.
++ *
++ * Input to the TTM_BO_CREATE ioctl.
++ */
++
++struct ttm_pl_create_req {
++ uint64_t size;
++ uint32_t placement;
++ uint32_t page_alignment;
++};
++
++/**
++ * struct ttm_pl_create_ub_req
++ *
++ * @size: The buffer object size.
++ * @user_address: User-space address of the memory area that
++ * should be used to back the buffer object cast to 64-bit.
++ * @placement: Flags that indicate initial acceptable
++ * placement.
++ * @page_alignment: Required alignment in pages.
++ *
++ * Input to the TTM_BO_CREATE_UB ioctl.
++ */
++
++struct ttm_pl_create_ub_req {
++ uint64_t size;
++ uint64_t user_address;
++ uint32_t placement;
++ uint32_t page_alignment;
++};
++
++/**
++ * struct ttm_pl_rep
++ *
++ * @gpu_offset: The current offset into the memory region used.
++ * This can be used directly by the GPU if there are no
++ * additional GPU mapping procedures used by the driver.
++ *
++ * @bo_size: Actual buffer object size.
++ *
++ * @map_handle: Offset into the device address space.
++ * Used for map, seek, read, write. This will never change
++ * during the lifetime of an object.
++ *
++ * @placement: Flag indicating the placement status of
++ * the buffer object using the TTM_PL flags above.
++ *
++ * @sync_object_arg: Used for user-space synchronization and
++ * depends on the synchronization model used. If fences are
++ * used, this is the buffer_object::fence_type_mask
++ *
++ * Output from the TTM_PL_CREATE and TTM_PL_REFERENCE, and
++ * TTM_PL_SETSTATUS ioctls.
++ */
++
++struct ttm_pl_rep {
++ uint64_t gpu_offset;
++ uint64_t bo_size;
++ uint64_t map_handle;
++ uint32_t placement;
++ uint32_t handle;
++ uint32_t sync_object_arg;
++ uint32_t pad64;
++};
++
++/**
++ * struct ttm_pl_setstatus_req
++ *
++ * @set_placement: Placement flags to set.
++ *
++ * @clr_placement: Placement flags to clear.
++ *
++ * @handle: The object handle
++ *
++ * Input to the TTM_PL_SETSTATUS ioctl.
++ */
++
++struct ttm_pl_setstatus_req {
++ uint32_t set_placement;
++ uint32_t clr_placement;
++ uint32_t handle;
++ uint32_t pad64;
++};
++
++/**
++ * struct ttm_pl_reference_req
++ *
++ * @handle: The object to put a reference on.
++ *
++ * Input to the TTM_PL_REFERENCE and the TTM_PL_UNREFERENCE ioctls.
++ */
++
++struct ttm_pl_reference_req {
++ uint32_t handle;
++ uint32_t pad64;
++};
++
++/*
++ * ACCESS mode flags for SYNCCPU.
++ *
++ * TTM_SYNCCPU_MODE_READ will guarantee that the GPU is not
++ * writing to the buffer.
++ *
++ * TTM_SYNCCPU_MODE_WRITE will guarantee that the GPU is not
++ * accessing the buffer.
++ *
++ * TTM_SYNCCPU_MODE_NO_BLOCK makes sure the call does not wait
++ * for GPU accesses to finish but return -EBUSY.
++ *
++ * TTM_SYNCCPU_MODE_TRYCACHED Try to place the buffer in cacheable
++ * memory while synchronized for CPU.
++ */
++
++#define TTM_PL_SYNCCPU_MODE_READ TTM_ACCESS_READ
++#define TTM_PL_SYNCCPU_MODE_WRITE TTM_ACCESS_WRITE
++#define TTM_PL_SYNCCPU_MODE_NO_BLOCK (1 << 2)
++#define TTM_PL_SYNCCPU_MODE_TRYCACHED (1 << 3)
++
++/**
++ * struct ttm_pl_synccpu_arg
++ *
++ * @handle: The object to synchronize.
++ *
++ * @access_mode: access mode indicated by the
++ * TTM_SYNCCPU_MODE flags.
++ *
++ * @op: indicates whether to grab or release the
++ * buffer for cpu usage.
++ *
++ * Input to the TTM_PL_SYNCCPU ioctl.
++ */
++
++struct ttm_pl_synccpu_arg {
++ uint32_t handle;
++ uint32_t access_mode;
++ enum {
++ TTM_PL_SYNCCPU_OP_GRAB,
++ TTM_PL_SYNCCPU_OP_RELEASE
++ } op;
++ uint32_t pad64;
++};
++
++/*
++ * Waiting mode flags for the TTM_BO_WAITIDLE ioctl.
++ *
++ * TTM_WAITIDLE_MODE_LAZY: Allow for sleeps during polling
++ * wait.
++ *
++ * TTM_WAITIDLE_MODE_NO_BLOCK: Don't block waiting for GPU,
++ * but return -EBUSY if the buffer is busy.
++ */
++
++#define TTM_PL_WAITIDLE_MODE_LAZY (1 << 0)
++#define TTM_PL_WAITIDLE_MODE_NO_BLOCK (1 << 1)
++
++/**
++ * struct ttm_waitidle_arg
++ *
++ * @handle: The object to synchronize.
++ *
++ * @mode: wait mode indicated by the
++ * TTM_SYNCCPU_MODE flags.
++ *
++ * Argument to the TTM_BO_WAITIDLE ioctl.
++ */
++
++struct ttm_pl_waitidle_arg {
++ uint32_t handle;
++ uint32_t mode;
++};
++
++union ttm_pl_create_arg {
++ struct ttm_pl_create_req req;
++ struct ttm_pl_rep rep;
++};
++
++union ttm_pl_reference_arg {
++ struct ttm_pl_reference_req req;
++ struct ttm_pl_rep rep;
++};
++
++union ttm_pl_setstatus_arg {
++ struct ttm_pl_setstatus_req req;
++ struct ttm_pl_rep rep;
++};
++
++union ttm_pl_create_ub_arg {
++ struct ttm_pl_create_ub_req req;
++ struct ttm_pl_rep rep;
++};
++
++/*
++ * Ioctl offsets.
++ */
++
++#define TTM_PL_CREATE 0x00
++#define TTM_PL_REFERENCE 0x01
++#define TTM_PL_UNREF 0x02
++#define TTM_PL_SYNCCPU 0x03
++#define TTM_PL_WAITIDLE 0x04
++#define TTM_PL_SETSTATUS 0x05
++#define TTM_PL_CREATE_UB 0x06
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_regman.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_regman.h
+new file mode 100644
+index 0000000..ed73652
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_regman.h
+@@ -0,0 +1,67 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_REGMAN_H_
++#define _TTM_REGMAN_H_
++
++#include <linux/list.h>
++
++struct ttm_fence_object;
++
++struct ttm_reg {
++ struct list_head head;
++ struct ttm_fence_object *fence;
++ uint32_t fence_type;
++ uint32_t new_fence_type;
++};
++
++struct ttm_reg_manager {
++ struct list_head free;
++ struct list_head lru;
++ struct list_head unfenced;
++
++ int (*reg_reusable)(const struct ttm_reg *reg, const void *data);
++ void (*reg_destroy)(struct ttm_reg *reg);
++};
++
++extern int ttm_regs_alloc(struct ttm_reg_manager *manager,
++ const void *data,
++ uint32_t fence_class,
++ uint32_t fence_type,
++ int interruptible,
++ int no_wait,
++ struct ttm_reg **reg);
++
++extern void ttm_regs_fence(struct ttm_reg_manager *regs,
++ struct ttm_fence_object *fence);
++
++extern void ttm_regs_free(struct ttm_reg_manager *manager);
++extern void ttm_regs_add(struct ttm_reg_manager *manager, struct ttm_reg *reg);
++extern void ttm_regs_init(struct ttm_reg_manager *manager,
++ int (*reg_reusable)(const struct ttm_reg *,
++ const void *),
++ void (*reg_destroy)(struct ttm_reg *));
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_tt.c b/drivers/gpu/drm/mrst/drv/ttm/ttm_tt.c
+new file mode 100644
+index 0000000..4c0e318
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_tt.c
+@@ -0,0 +1,653 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include <linux/version.h>
++#include <linux/vmalloc.h>
++#include <linux/sched.h>
++#include <linux/highmem.h>
++#include <linux/pagemap.h>
++#include <linux/file.h>
++#include <linux/swap.h>
++#include "ttm_bo_driver.h"
++#include "ttm_placement_common.h"
++
++static int ttm_tt_swapin(struct ttm_tt *ttm);
++
++#if defined(CONFIG_X86)
++static void ttm_tt_clflush_page(struct page *page)
++{
++ uint8_t *page_virtual;
++ unsigned int i;
++
++ if (unlikely(page == NULL))
++ return;
++
++ page_virtual = kmap_atomic(page, KM_USER0);
++
++ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
++ clflush(page_virtual + i);
++
++ kunmap_atomic(page_virtual, KM_USER0);
++}
++
++static void ttm_tt_cache_flush_clflush(struct page *pages[],
++ unsigned long num_pages)
++{
++ unsigned long i;
++
++ mb();
++ for (i = 0; i < num_pages; ++i)
++ ttm_tt_clflush_page(*pages++);
++ mb();
++}
++#else
++static void ttm_tt_ipi_handler(void *null)
++{
++ ;
++}
++#endif
++
++void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
++{
++
++#if defined(CONFIG_X86)
++ if (cpu_has_clflush) {
++ ttm_tt_cache_flush_clflush(pages, num_pages);
++ return;
++ }
++#else
++ if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1, 1) != 0)
++ printk(KERN_ERR "Timed out waiting for drm cache flush.\n");
++#endif
++}
++
++/**
++ * Allocates storage for pointers to the pages that back the ttm.
++ *
++ * Uses kmalloc if possible. Otherwise falls back to vmalloc.
++ */
++static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
++{
++ unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
++ ttm->pages = NULL;
++
++ if (size <= PAGE_SIZE)
++ ttm->pages = kzalloc(size, GFP_KERNEL);
++
++ if (!ttm->pages) {
++ ttm->pages = vmalloc_user(size);
++ if (ttm->pages)
++ ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
++ }
++}
++
++static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
++{
++ if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
++ vfree(ttm->pages);
++ ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
++ } else {
++ kfree(ttm->pages);
++ }
++ ttm->pages = NULL;
++}
++
++static struct page *ttm_tt_alloc_page(void)
++{
++ return alloc_page(GFP_KERNEL | __GFP_ZERO);
++}
++
++static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
++{
++ int write;
++ int dirty;
++ struct page *page;
++ int i;
++ struct ttm_backend *be = ttm->be;
++
++ BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
++ write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
++ dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
++
++ if (be)
++ be->func->clear(be);
++
++ for (i = 0; i < ttm->num_pages; ++i) {
++ page = ttm->pages[i];
++ if (page == NULL)
++ continue;
++
++ if (page == ttm->dummy_read_page) {
++ BUG_ON(write);
++ continue;
++ }
++
++ if (write && dirty && !PageReserved(page))
++ set_page_dirty_lock(page);
++
++ ttm->pages[i] = NULL;
++ ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
++ put_page(page);
++ }
++ ttm->state = tt_unpopulated;
++ ttm->first_himem_page = ttm->num_pages;
++ ttm->last_lomem_page = -1;
++}
++
++static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
++{
++ struct page *p;
++ struct ttm_bo_device *bdev = ttm->bdev;
++ struct ttm_mem_global *mem_glob = bdev->mem_glob;
++ int ret;
++
++ while (NULL == (p = ttm->pages[index])) {
++ p = ttm_tt_alloc_page();
++
++ if (!p)
++ return NULL;
++
++ if (PageHighMem(p)) {
++ ret = ttm_mem_global_alloc(mem_glob,
++ PAGE_SIZE,
++ false,
++ false,
++ true);
++ if (unlikely(ret != 0))
++ goto out_err;
++ ttm->pages[--ttm->first_himem_page] = p;
++ } else {
++ ret =
++ ttm_mem_global_alloc(mem_glob,
++ PAGE_SIZE,
++ false,
++ false,
++ false);
++ if (unlikely(ret != 0))
++ goto out_err;
++ ttm->pages[++ttm->last_lomem_page] = p;
++ }
++ }
++ return p;
++out_err:
++ put_page(p);
++ return NULL;
++}
++
++struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
++{
++ int ret;
++
++ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
++ ret = ttm_tt_swapin(ttm);
++ if (unlikely(ret != 0))
++ return NULL;
++ }
++ return __ttm_tt_get_page(ttm, index);
++}
++
++int ttm_tt_populate(struct ttm_tt *ttm)
++{
++ struct page *page;
++ unsigned long i;
++ struct ttm_backend *be;
++ int ret;
++
++ if (ttm->state != tt_unpopulated)
++ return 0;
++
++ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
++ ret = ttm_tt_swapin(ttm);
++ if (unlikely(ret != 0))
++ return ret;
++ }
++
++ be = ttm->be;
++
++ for (i = 0; i < ttm->num_pages; ++i) {
++ page = __ttm_tt_get_page(ttm, i);
++ if (!page)
++ return -ENOMEM;
++ }
++
++ be->func->populate(be, ttm->num_pages, ttm->pages,
++ ttm->dummy_read_page);
++ ttm->state = tt_unbound;
++ return 0;
++}
++
++#ifdef CONFIG_X86
++static inline int ttm_tt_set_page_caching(struct page *p,
++ enum ttm_caching_state c_state)
++{
++ if (PageHighMem(p))
++ return 0;
++
++ switch (c_state) {
++ case tt_cached:
++ return set_pages_wb(p, 1);
++ case tt_wc:
++ return set_memory_wc((unsigned long) page_address(p), 1);
++ default:
++ return set_pages_uc(p, 1);
++ }
++}
++#else /* CONFIG_X86 */
++static inline int ttm_tt_set_page_caching(struct page *p,
++ enum ttm_caching_state c_state)
++{
++ return 0;
++}
++#endif /* CONFIG_X86 */
++
++/*
++ * Change caching policy for the linear kernel map
++ * for range of pages in a ttm.
++ */
++
++static int ttm_tt_set_caching(struct ttm_tt *ttm,
++ enum ttm_caching_state c_state)
++{
++ int i, j;
++ struct page *cur_page;
++ int ret;
++
++ if (ttm->caching_state == c_state)
++ return 0;
++
++ if (c_state != tt_cached) {
++ ret = ttm_tt_populate(ttm);
++ if (unlikely(ret != 0))
++ return ret;
++ }
++
++ if (ttm->caching_state == tt_cached)
++ ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
++
++ for (i = 0; i < ttm->num_pages; ++i) {
++ cur_page = ttm->pages[i];
++ if (likely(cur_page != NULL)) {
++ ret = ttm_tt_set_page_caching(cur_page, c_state);
++ if (unlikely(ret != 0))
++ goto out_err;
++ }
++ }
++
++ ttm->caching_state = c_state;
++
++ return 0;
++
++out_err:
++ for (j = 0; j < i; ++j) {
++ cur_page = ttm->pages[j];
++ if (likely(cur_page != NULL)) {
++ (void)ttm_tt_set_page_caching(cur_page,
++ ttm->caching_state);
++ }
++ }
++
++ return ret;
++}
++
++int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
++{
++ enum ttm_caching_state state;
++
++ if (placement & TTM_PL_FLAG_WC)
++ state = tt_wc;
++ else if (placement & TTM_PL_FLAG_UNCACHED)
++ state = tt_uncached;
++ else
++ state = tt_cached;
++
++ return ttm_tt_set_caching(ttm, state);
++}
++
++static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
++{
++ int i;
++ struct page *cur_page;
++ struct ttm_backend *be = ttm->be;
++
++ if (be)
++ be->func->clear(be);
++ (void)ttm_tt_set_caching(ttm, tt_cached);
++ for (i = 0; i < ttm->num_pages; ++i) {
++ cur_page = ttm->pages[i];
++ ttm->pages[i] = NULL;
++ if (cur_page) {
++ if (page_count(cur_page) != 1)
++ printk(KERN_ERR
++ "Erroneous page count. Leaking pages.\n");
++ ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
++ PageHighMem(cur_page));
++ __free_page(cur_page);
++ }
++ }
++ ttm->state = tt_unpopulated;
++ ttm->first_himem_page = ttm->num_pages;
++ ttm->last_lomem_page = -1;
++}
++
++void ttm_tt_destroy(struct ttm_tt *ttm)
++{
++ struct ttm_backend *be;
++
++ if (unlikely(ttm == NULL))
++ return;
++
++ be = ttm->be;
++ if (likely(be != NULL)) {
++ be->func->destroy(be);
++ ttm->be = NULL;
++ }
++
++ if (likely(ttm->pages != NULL)) {
++ if (ttm->page_flags & TTM_PAGE_FLAG_USER)
++ ttm_tt_free_user_pages(ttm);
++ else
++ ttm_tt_free_alloced_pages(ttm);
++
++ ttm_tt_free_page_directory(ttm);
++ }
++
++ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
++ ttm->swap_storage)
++ fput(ttm->swap_storage);
++
++ kfree(ttm);
++}
++
++int ttm_tt_set_user(struct ttm_tt *ttm,
++ struct task_struct *tsk,
++ unsigned long start, unsigned long num_pages)
++{
++ struct mm_struct *mm = tsk->mm;
++ int ret;
++ int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
++ struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
++
++ BUG_ON(num_pages != ttm->num_pages);
++ BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
++
++ /**
++ * Account user pages as lowmem pages for now.
++ */
++
++ ret = ttm_mem_global_alloc(mem_glob,
++ num_pages * PAGE_SIZE,
++ false,
++ false,
++ false);
++ if (unlikely(ret != 0))
++ return ret;
++
++ down_read(&mm->mmap_sem);
++ ret = get_user_pages(tsk, mm, start, num_pages,
++ write, 0, ttm->pages, NULL);
++ up_read(&mm->mmap_sem);
++
++ if (ret != num_pages && write) {
++ ttm_tt_free_user_pages(ttm);
++ ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
++ return -ENOMEM;
++ }
++
++ ttm->tsk = tsk;
++ ttm->start = start;
++ ttm->state = tt_unbound;
++
++ return 0;
++}
++
++struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
++ uint32_t page_flags, struct page *dummy_read_page)
++{
++ struct ttm_bo_driver *bo_driver = bdev->driver;
++ struct ttm_tt *ttm;
++
++ if (!bo_driver)
++ return NULL;
++
++ ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
++ if (!ttm)
++ return NULL;
++
++ ttm->bdev = bdev;
++
++ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ ttm->first_himem_page = ttm->num_pages;
++ ttm->last_lomem_page = -1;
++ ttm->caching_state = tt_cached;
++ ttm->page_flags = page_flags;
++
++ ttm->dummy_read_page = dummy_read_page;
++
++ ttm_tt_alloc_page_directory(ttm);
++ if (!ttm->pages) {
++ ttm_tt_destroy(ttm);
++ printk(KERN_ERR "Failed allocating page table\n");
++ return NULL;
++ }
++ ttm->be = bo_driver->create_ttm_backend_entry(bdev);
++ if (!ttm->be) {
++ ttm_tt_destroy(ttm);
++ printk(KERN_ERR "Failed creating ttm backend entry\n");
++ return NULL;
++ }
++ ttm->state = tt_unpopulated;
++ return ttm;
++}
++
++/**
++ * ttm_tt_unbind:
++ *
++ * @ttm: the object to unbind from the graphics device
++ *
++ * Unbind an object from the aperture. This removes the mappings
++ * from the graphics device and flushes caches if necessary.
++ */
++void ttm_tt_unbind(struct ttm_tt *ttm)
++{
++ int ret;
++ struct ttm_backend *be = ttm->be;
++
++ if (ttm->state == tt_bound) {
++ ret = be->func->unbind(be);
++ BUG_ON(ret);
++ }
++ ttm->state = tt_unbound;
++}
++
++/**
++ * ttm_tt_bind:
++ *
++ * @ttm: the ttm object to bind to the graphics device
++ *
++ * @bo_mem: the aperture memory region which will hold the object
++ *
++ * Bind a ttm object to the aperture. This ensures that the necessary
++ * pages are allocated, flushes CPU caches as needed and marks the
++ * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been
++ * modified by the GPU
++ */
++
++int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
++{
++ int ret = 0;
++ struct ttm_backend *be;
++
++ if (!ttm)
++ return -EINVAL;
++
++ if (ttm->state == tt_bound)
++ return 0;
++
++ be = ttm->be;
++
++ ret = ttm_tt_populate(ttm);
++ if (ret)
++ return ret;
++
++ ret = be->func->bind(be, bo_mem);
++ if (ret) {
++ printk(KERN_ERR "Couldn't bind backend.\n");
++ return ret;
++ }
++
++ ttm->state = tt_bound;
++
++ if (ttm->page_flags & TTM_PAGE_FLAG_USER)
++ ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
++ return 0;
++}
++
++static int ttm_tt_swapin(struct ttm_tt *ttm)
++{
++ struct address_space *swap_space;
++ struct file *swap_storage;
++ struct page *from_page;
++ struct page *to_page;
++ void *from_virtual;
++ void *to_virtual;
++ int i;
++ int ret;
++
++ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
++ ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
++ ttm->num_pages);
++ if (unlikely(ret != 0))
++ return ret;
++
++ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
++ return 0;
++ }
++
++ swap_storage = ttm->swap_storage;
++ BUG_ON(swap_storage == NULL);
++
++ swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
++
++ for (i = 0; i < ttm->num_pages; ++i) {
++ from_page = read_mapping_page(swap_space, i, NULL);
++ if (IS_ERR(from_page))
++ goto out_err;
++ to_page = __ttm_tt_get_page(ttm, i);
++ if (unlikely(to_page == NULL))
++ goto out_err;
++
++ preempt_disable();
++ from_virtual = kmap_atomic(from_page, KM_USER0);
++ to_virtual = kmap_atomic(to_page, KM_USER1);
++ memcpy(to_virtual, from_virtual, PAGE_SIZE);
++ kunmap_atomic(to_virtual, KM_USER1);
++ kunmap_atomic(from_virtual, KM_USER0);
++ preempt_enable();
++ page_cache_release(from_page);
++ }
++
++ if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
++ fput(swap_storage);
++ ttm->swap_storage = NULL;
++ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
++
++ return 0;
++out_err:
++ ttm_tt_free_alloced_pages(ttm);
++ return -ENOMEM;
++}
++
++int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
++{
++ struct address_space *swap_space;
++ struct file *swap_storage;
++ struct page *from_page;
++ struct page *to_page;
++ void *from_virtual;
++ void *to_virtual;
++ int i;
++
++ BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
++ BUG_ON(ttm->caching_state != tt_cached);
++
++ /*
++ * For user buffers, just unpin the pages, as there should be
++ * vma references.
++ */
++
++ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
++ ttm_tt_free_user_pages(ttm);
++ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
++ ttm->swap_storage = NULL;
++ return 0;
++ }
++
++ if (!persistant_swap_storage) {
++ swap_storage = shmem_file_setup("ttm swap",
++ ttm->num_pages << PAGE_SHIFT,
++ 0);
++ if (unlikely(IS_ERR(swap_storage))) {
++ printk(KERN_ERR "Failed allocating swap storage.\n");
++ return -ENOMEM;
++ }
++ } else
++ swap_storage = persistant_swap_storage;
++
++ swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
++
++ for (i = 0; i < ttm->num_pages; ++i) {
++ from_page = ttm->pages[i];
++ if (unlikely(from_page == NULL))
++ continue;
++ to_page = read_mapping_page(swap_space, i, NULL);
++ if (unlikely(to_page == NULL))
++ goto out_err;
++
++ preempt_disable();
++ from_virtual = kmap_atomic(from_page, KM_USER0);
++ to_virtual = kmap_atomic(to_page, KM_USER1);
++ memcpy(to_virtual, from_virtual, PAGE_SIZE);
++ kunmap_atomic(to_virtual, KM_USER1);
++ kunmap_atomic(from_virtual, KM_USER0);
++ preempt_enable();
++ set_page_dirty(to_page);
++ mark_page_accessed(to_page);
++ /* unlock_page(to_page); */
++ page_cache_release(to_page);
++ }
++
++ ttm_tt_free_alloced_pages(ttm);
++ ttm->swap_storage = swap_storage;
++ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
++ if (persistant_swap_storage)
++ ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
++
++ return 0;
++out_err:
++ if (!persistant_swap_storage)
++ fput(swap_storage);
++
++ return -ENOMEM;
++}
+diff --git a/drivers/gpu/drm/mrst/drv/ttm/ttm_userobj_api.h b/drivers/gpu/drm/mrst/drv/ttm/ttm_userobj_api.h
+new file mode 100644
+index 0000000..36df724
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/drv/ttm/ttm_userobj_api.h
+@@ -0,0 +1,72 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _TTM_USEROBJ_API_H_
++#define _TTM_USEROBJ_API_H_
++
++#include "ttm_placement_user.h"
++#include "ttm_fence_user.h"
++#include "ttm_object.h"
++#include "ttm_fence_api.h"
++#include "ttm_bo_api.h"
++
++struct ttm_lock;
++
++/*
++ * User ioctls.
++ */
++
++extern int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
++ struct ttm_bo_device *bdev,
++ struct ttm_lock *lock, void *data);
++extern int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
++ struct ttm_bo_device *bdev,
++ struct ttm_lock *lock, void *data);
++extern int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_pl_unref_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_pl_synccpu_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
++ struct ttm_lock *lock, void *data);
++extern int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_fence_signaled_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_fence_finish_ioctl(struct ttm_object_file *tfile, void *data);
++extern int ttm_fence_unref_ioctl(struct ttm_object_file *tfile, void *data);
++
++extern int
++ttm_fence_user_create(struct ttm_fence_device *fdev,
++ struct ttm_object_file *tfile,
++ uint32_t fence_class,
++ uint32_t fence_types,
++ uint32_t create_flags,
++ struct ttm_fence_object **fence, uint32_t * user_handle);
++
++extern struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file
++ *tfile,
++ uint32_t handle);
++
++extern int
++ttm_pl_verify_access(struct ttm_buffer_object *bo,
++ struct ttm_object_file *tfile);
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/COPYING b/drivers/gpu/drm/mrst/pvr/COPYING
+new file mode 100644
+index 0000000..80dd76b
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/COPYING
+@@ -0,0 +1,351 @@
++
++This software is Copyright (C) 2008 Imagination Technologies Ltd.
++ All rights reserved.
++
++You may use, distribute and copy this software under the terms of
++GNU General Public License version 2, which is displayed below.
++
++-------------------------------------------------------------------------
++
++ GNU GENERAL PUBLIC LICENSE
++ Version 2, June 1991
++
++ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
++ 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ Everyone is permitted to copy and distribute verbatim copies
++ of this license document, but changing it is not allowed.
++
++ Preamble
++
++ The licenses for most software are designed to take away your
++freedom to share and change it. By contrast, the GNU General Public
++License is intended to guarantee your freedom to share and change free
++software--to make sure the software is free for all its users. This
++General Public License applies to most of the Free Software
++Foundation's software and to any other program whose authors commit to
++using it. (Some other Free Software Foundation software is covered by
++the GNU Library General Public License instead.) You can apply it to
++your programs, too.
++
++ When we speak of free software, we are referring to freedom, not
++price. Our General Public Licenses are designed to make sure that you
++have the freedom to distribute copies of free software (and charge for
++this service if you wish), that you receive source code or can get it
++if you want it, that you can change the software or use pieces of it
++in new free programs; and that you know you can do these things.
++
++ To protect your rights, we need to make restrictions that forbid
++anyone to deny you these rights or to ask you to surrender the rights.
++These restrictions translate to certain responsibilities for you if you
++distribute copies of the software, or if you modify it.
++
++ For example, if you distribute copies of such a program, whether
++gratis or for a fee, you must give the recipients all the rights that
++you have. You must make sure that they, too, receive or can get the
++source code. And you must show them these terms so they know their
++rights.
++
++ We protect your rights with two steps: (1) copyright the software, and
++(2) offer you this license which gives you legal permission to copy,
++distribute and/or modify the software.
++
++ Also, for each author's protection and ours, we want to make certain
++that everyone understands that there is no warranty for this free
++software. If the software is modified by someone else and passed on, we
++want its recipients to know that what they have is not the original, so
++that any problems introduced by others will not reflect on the original
++authors' reputations.
++
++ Finally, any free program is threatened constantly by software
++patents. We wish to avoid the danger that redistributors of a free
++program will individually obtain patent licenses, in effect making the
++program proprietary. To prevent this, we have made it clear that any
++patent must be licensed for everyone's free use or not licensed at all.
++
++ The precise terms and conditions for copying, distribution and
++modification follow.
++
++ GNU GENERAL PUBLIC LICENSE
++ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
++
++ 0. This License applies to any program or other work which contains
++a notice placed by the copyright holder saying it may be distributed
++under the terms of this General Public License. The "Program", below,
++refers to any such program or work, and a "work based on the Program"
++means either the Program or any derivative work under copyright law:
++that is to say, a work containing the Program or a portion of it,
++either verbatim or with modifications and/or translated into another
++language. (Hereinafter, translation is included without limitation in
++the term "modification".) Each licensee is addressed as "you".
++
++Activities other than copying, distribution and modification are not
++covered by this License; they are outside its scope. The act of
++running the Program is not restricted, and the output from the Program
++is covered only if its contents constitute a work based on the
++Program (independent of having been made by running the Program).
++Whether that is true depends on what the Program does.
++
++ 1. You may copy and distribute verbatim copies of the Program's
++source code as you receive it, in any medium, provided that you
++conspicuously and appropriately publish on each copy an appropriate
++copyright notice and disclaimer of warranty; keep intact all the
++notices that refer to this License and to the absence of any warranty;
++and give any other recipients of the Program a copy of this License
++along with the Program.
++
++You may charge a fee for the physical act of transferring a copy, and
++you may at your option offer warranty protection in exchange for a fee.
++
++ 2. You may modify your copy or copies of the Program or any portion
++of it, thus forming a work based on the Program, and copy and
++distribute such modifications or work under the terms of Section 1
++above, provided that you also meet all of these conditions:
++
++ a) You must cause the modified files to carry prominent notices
++ stating that you changed the files and the date of any change.
++
++ b) You must cause any work that you distribute or publish, that in
++ whole or in part contains or is derived from the Program or any
++ part thereof, to be licensed as a whole at no charge to all third
++ parties under the terms of this License.
++
++ c) If the modified program normally reads commands interactively
++ when run, you must cause it, when started running for such
++ interactive use in the most ordinary way, to print or display an
++ announcement including an appropriate copyright notice and a
++ notice that there is no warranty (or else, saying that you provide
++ a warranty) and that users may redistribute the program under
++ these conditions, and telling the user how to view a copy of this
++ License. (Exception: if the Program itself is interactive but
++ does not normally print such an announcement, your work based on
++ the Program is not required to print an announcement.)
++
++These requirements apply to the modified work as a whole. If
++identifiable sections of that work are not derived from the Program,
++and can be reasonably considered independent and separate works in
++themselves, then this License, and its terms, do not apply to those
++sections when you distribute them as separate works. But when you
++distribute the same sections as part of a whole which is a work based
++on the Program, the distribution of the whole must be on the terms of
++this License, whose permissions for other licensees extend to the
++entire whole, and thus to each and every part regardless of who wrote it.
++
++Thus, it is not the intent of this section to claim rights or contest
++your rights to work written entirely by you; rather, the intent is to
++exercise the right to control the distribution of derivative or
++collective works based on the Program.
++
++In addition, mere aggregation of another work not based on the Program
++with the Program (or with a work based on the Program) on a volume of
++a storage or distribution medium does not bring the other work under
++the scope of this License.
++
++ 3. You may copy and distribute the Program (or a work based on it,
++under Section 2) in object code or executable form under the terms of
++Sections 1 and 2 above provided that you also do one of the following:
++
++ a) Accompany it with the complete corresponding machine-readable
++ source code, which must be distributed under the terms of Sections
++ 1 and 2 above on a medium customarily used for software interchange; or,
++
++ b) Accompany it with a written offer, valid for at least three
++ years, to give any third party, for a charge no more than your
++ cost of physically performing source distribution, a complete
++ machine-readable copy of the corresponding source code, to be
++ distributed under the terms of Sections 1 and 2 above on a medium
++ customarily used for software interchange; or,
++
++ c) Accompany it with the information you received as to the offer
++ to distribute corresponding source code. (This alternative is
++ allowed only for noncommercial distribution and only if you
++ received the program in object code or executable form with such
++ an offer, in accord with Subsection b above.)
++
++The source code for a work means the preferred form of the work for
++making modifications to it. For an executable work, complete source
++code means all the source code for all modules it contains, plus any
++associated interface definition files, plus the scripts used to
++control compilation and installation of the executable. However, as a
++special exception, the source code distributed need not include
++anything that is normally distributed (in either source or binary
++form) with the major components (compiler, kernel, and so on) of the
++operating system on which the executable runs, unless that component
++itself accompanies the executable.
++
++If distribution of executable or object code is made by offering
++access to copy from a designated place, then offering equivalent
++access to copy the source code from the same place counts as
++distribution of the source code, even though third parties are not
++compelled to copy the source along with the object code.
++
++ 4. You may not copy, modify, sublicense, or distribute the Program
++except as expressly provided under this License. Any attempt
++otherwise to copy, modify, sublicense or distribute the Program is
++void, and will automatically terminate your rights under this License.
++However, parties who have received copies, or rights, from you under
++this License will not have their licenses terminated so long as such
++parties remain in full compliance.
++
++ 5. You are not required to accept this License, since you have not
++signed it. However, nothing else grants you permission to modify or
++distribute the Program or its derivative works. These actions are
++prohibited by law if you do not accept this License. Therefore, by
++modifying or distributing the Program (or any work based on the
++Program), you indicate your acceptance of this License to do so, and
++all its terms and conditions for copying, distributing or modifying
++the Program or works based on it.
++
++ 6. Each time you redistribute the Program (or any work based on the
++Program), the recipient automatically receives a license from the
++original licensor to copy, distribute or modify the Program subject to
++these terms and conditions. You may not impose any further
++restrictions on the recipients' exercise of the rights granted herein.
++You are not responsible for enforcing compliance by third parties to
++this License.
++
++ 7. If, as a consequence of a court judgment or allegation of patent
++infringement or for any other reason (not limited to patent issues),
++conditions are imposed on you (whether by court order, agreement or
++otherwise) that contradict the conditions of this License, they do not
++excuse you from the conditions of this License. If you cannot
++distribute so as to satisfy simultaneously your obligations under this
++License and any other pertinent obligations, then as a consequence you
++may not distribute the Program at all. For example, if a patent
++license would not permit royalty-free redistribution of the Program by
++all those who receive copies directly or indirectly through you, then
++the only way you could satisfy both it and this License would be to
++refrain entirely from distribution of the Program.
++
++If any portion of this section is held invalid or unenforceable under
++any particular circumstance, the balance of the section is intended to
++apply and the section as a whole is intended to apply in other
++circumstances.
++
++It is not the purpose of this section to induce you to infringe any
++patents or other property right claims or to contest validity of any
++such claims; this section has the sole purpose of protecting the
++integrity of the free software distribution system, which is
++implemented by public license practices. Many people have made
++generous contributions to the wide range of software distributed
++through that system in reliance on consistent application of that
++system; it is up to the author/donor to decide if he or she is willing
++to distribute software through any other system and a licensee cannot
++impose that choice.
++
++This section is intended to make thoroughly clear what is believed to
++be a consequence of the rest of this License.
++
++ 8. If the distribution and/or use of the Program is restricted in
++certain countries either by patents or by copyrighted interfaces, the
++original copyright holder who places the Program under this License
++may add an explicit geographical distribution limitation excluding
++those countries, so that distribution is permitted only in or among
++countries not thus excluded. In such case, this License incorporates
++the limitation as if written in the body of this License.
++
++ 9. The Free Software Foundation may publish revised and/or new versions
++of the General Public License from time to time. Such new versions will
++be similar in spirit to the present version, but may differ in detail to
++address new problems or concerns.
++
++Each version is given a distinguishing version number. If the Program
++specifies a version number of this License which applies to it and "any
++later version", you have the option of following the terms and conditions
++either of that version or of any later version published by the Free
++Software Foundation. If the Program does not specify a version number of
++this License, you may choose any version ever published by the Free Software
++Foundation.
++
++ 10. If you wish to incorporate parts of the Program into other free
++programs whose distribution conditions are different, write to the author
++to ask for permission. For software which is copyrighted by the Free
++Software Foundation, write to the Free Software Foundation; we sometimes
++make exceptions for this. Our decision will be guided by the two goals
++of preserving the free status of all derivatives of our free software and
++of promoting the sharing and reuse of software generally.
++
++ NO WARRANTY
++
++ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
++FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
++OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
++PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
++OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
++MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
++TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
++PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
++REPAIR OR CORRECTION.
++
++ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
++WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
++REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
++INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
++OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
++TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
++YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
++PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
++POSSIBILITY OF SUCH DAMAGES.
++
++ END OF TERMS AND CONDITIONS
++
++ Appendix: How to Apply These Terms to Your New Programs
++
++ If you develop a new program, and you want it to be of the greatest
++possible use to the public, the best way to achieve this is to make it
++free software which everyone can redistribute and change under these terms.
++
++ To do so, attach the following notices to the program. It is safest
++to attach them to the start of each source file to most effectively
++convey the exclusion of warranty; and each file should have at least
++the "copyright" line and a pointer to where the full notice is found.
++
++ <one line to give the program's name and a brief idea of what it does.>
++ Copyright (C) 19yy <name of author>
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++
++Also add information on how to contact you by electronic and paper mail.
++
++If the program is interactive, make it output a short notice like this
++when it starts in an interactive mode:
++
++ Gnomovision version 69, Copyright (C) 19yy name of author
++ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
++ This is free software, and you are welcome to redistribute it
++ under certain conditions; type `show c' for details.
++
++The hypothetical commands `show w' and `show c' should show the appropriate
++parts of the General Public License. Of course, the commands you use may
++be called something other than `show w' and `show c'; they could even be
++mouse-clicks or menu items--whatever suits your program.
++
++You should also get your employer (if you work as a programmer) or your
++school, if any, to sign a "copyright disclaimer" for the program, if
++necessary. Here is a sample; alter the names:
++
++ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
++ `Gnomovision' (which makes passes at compilers) written by James Hacker.
++
++ <signature of Ty Coon>, 1 April 1989
++ Ty Coon, President of Vice
++
++This General Public License does not permit incorporating your program into
++proprietary programs. If your program is a subroutine library, you may
++consider it more useful to permit linking proprietary applications with the
++library. If this is what you want to do, use the GNU Library General
++Public License instead of this License.
++
++-------------------------------------------------------------------------
++
+diff --git a/drivers/gpu/drm/mrst/pvr/INSTALL b/drivers/gpu/drm/mrst/pvr/INSTALL
+new file mode 100644
+index 0000000..e4c1069
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/INSTALL
+@@ -0,0 +1,76 @@
++
++SGX Embedded Systems DDK for the Linux kernel.
++Copyright (C) 2008 Imagination Technologies Ltd. All rights reserved.
++======================================================================
++
++This file covers how to build and install the Imagination Technologies
++SGX DDK for the Linux kernel.
++
++
++Build System Environment Variables
++-------------------------------------------
++
++The SGX DDK Build scripts depend on a number of environment variables
++being setup before compilation or installation of DDK software can
++commence:
++
++$DISCIMAGE
++The DDK Build scripts install files to the location specified by the
++DISCIMAGE environment variable, when the make install target is used.
++This should point to the target filesystem.
++$ export DISCIMAGE=/path/to/filesystem
++
++$KERNELDIR
++When building the SGX DDK kernel module, the build needs access
++to the headers of the Linux kernel
++$ export KERNELDIR=/path/to/kernel
++
++$PATH
++If a cross compiler is being used make sure the PATH environment variable
++includes the path to the toolchain
++$ export PATH=$PATH:/path/to/toolchain
++
++$CROSS_COMPILE
++Since the SGX DDK Build scripts are geared toward a cross-compilation
++workflow, the CROSS_COMPILE environment variable needs to be set
++$ export CROSS_COMPILE=toolchain-prefix-
++
++
++Build and Install Instructions
++-------------------------------------------
++
++The SGX DDK configures different target builds within directories under
++eurasiacon/build/linux/.
++
++The supported build targets are:
++
++ all Makes everything
++ clean Removes all intermediate files created by a build.
++ clobber Removes all binaries for all builds as well.
++ install Runs the install script generated by the build.
++
++The following variables may be set on the command line to influence a build.
++
++ BUILD The type of build being performed.
++ Alternatives are release, timing or debug.
++ CFLAGS Build dependent optimisations and debug information flags.
++ SILENT Determines whether text of commands is produced during build.
++
++To build for, change to the appropriate target directory, e.g.:
++$ cd eurasiacon/build/linux/platform/kbuild
++
++Issue the make command:
++$ make BUILD=debug all
++
++The DDK software must be installed by the root user. Become the root user:
++$ su
++
++Install the DDK software:
++$ make install
++
++Become an ordinary user again:
++$ exit
++
++
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/README b/drivers/gpu/drm/mrst/pvr/README
+new file mode 100644
+index 0000000..8039c39
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/README
+@@ -0,0 +1,48 @@
++
++SGX Embedded Systems DDK for Linux kernel.
++Copyright (C) 2008 Imagination Technologies Ltd. All rights reserved.
++======================================================================
++
++
++About
++-------------------------------------------
++
++This is the Imagination Technologies SGX DDK for the Linux kernel.
++
++
++License
++-------------------------------------------
++
++You may use, distribute and copy this software under the terms of
++GNU General Public License version 2.
++
++The full GNU General Public License version 2 is included in this
++distribution in the file called "COPYING".
++
++
++Build and Install Instructions
++-------------------------------------------
++
++For details see the "INSTALL" file.
++
++To build for, change to the appropriate target directory, e.g.:
++$ cd eurasiacon/build/linux/platform/kbuild
++
++Issue the make command:
++$ make BUILD=debug all
++
++The DDK software must be installed by the root user. Become the root user:
++$ su
++
++Install the DDK software:
++$ make install
++
++Become an ordinary user again:
++$ exit
++
++
++Contact information:
++-------------------------------------------
++
++Imagination Technologies Ltd. <gpl-support@imgtec.com>
++Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+diff --git a/drivers/gpu/drm/mrst/pvr/eurasiacon/.gitignore b/drivers/gpu/drm/mrst/pvr/eurasiacon/.gitignore
+new file mode 100644
+index 0000000..f558f8b
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/eurasiacon/.gitignore
+@@ -0,0 +1,6 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++binary_pc_i686*
++*.o
++*.o.cmd
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/dbgdrvif.h b/drivers/gpu/drm/mrst/pvr/include4/dbgdrvif.h
+new file mode 100644
+index 0000000..e65e551
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/dbgdrvif.h
+@@ -0,0 +1,298 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _DBGDRVIF_
++#define _DBGDRVIF_
++
++
++#include "ioctldef.h"
++
++#define DEBUG_CAPMODE_FRAMED 0x00000001UL
++#define DEBUG_CAPMODE_CONTINUOUS 0x00000002UL
++#define DEBUG_CAPMODE_HOTKEY 0x00000004UL
++
++#define DEBUG_OUTMODE_STANDARDDBG 0x00000001UL
++#define DEBUG_OUTMODE_MONO 0x00000002UL
++#define DEBUG_OUTMODE_STREAMENABLE 0x00000004UL
++#define DEBUG_OUTMODE_ASYNC 0x00000008UL
++#define DEBUG_OUTMODE_SGXVGA 0x00000010UL
++
++#define DEBUG_FLAGS_USE_NONPAGED_MEM 0x00000001UL
++#define DEBUG_FLAGS_NO_BUF_EXPANDSION 0x00000002UL
++#define DEBUG_FLAGS_ENABLESAMPLE 0x00000004UL
++
++#define DEBUG_FLAGS_TEXTSTREAM 0x80000000UL
++
++#define DEBUG_LEVEL_0 0x00000001UL
++#define DEBUG_LEVEL_1 0x00000003UL
++#define DEBUG_LEVEL_2 0x00000007UL
++#define DEBUG_LEVEL_3 0x0000000FUL
++#define DEBUG_LEVEL_4 0x0000001FUL
++#define DEBUG_LEVEL_5 0x0000003FUL
++#define DEBUG_LEVEL_6 0x0000007FUL
++#define DEBUG_LEVEL_7 0x000000FFUL
++#define DEBUG_LEVEL_8 0x000001FFUL
++#define DEBUG_LEVEL_9 0x000003FFUL
++#define DEBUG_LEVEL_10 0x000007FFUL
++#define DEBUG_LEVEL_11 0x00000FFFUL
++
++#define DEBUG_LEVEL_SEL0 0x00000001UL
++#define DEBUG_LEVEL_SEL1 0x00000002UL
++#define DEBUG_LEVEL_SEL2 0x00000004UL
++#define DEBUG_LEVEL_SEL3 0x00000008UL
++#define DEBUG_LEVEL_SEL4 0x00000010UL
++#define DEBUG_LEVEL_SEL5 0x00000020UL
++#define DEBUG_LEVEL_SEL6 0x00000040UL
++#define DEBUG_LEVEL_SEL7 0x00000080UL
++#define DEBUG_LEVEL_SEL8 0x00000100UL
++#define DEBUG_LEVEL_SEL9 0x00000200UL
++#define DEBUG_LEVEL_SEL10 0x00000400UL
++#define DEBUG_LEVEL_SEL11 0x00000800UL
++
++#define DEBUG_SERVICE_IOCTL_BASE 0x800UL
++#define DEBUG_SERVICE_CREATESTREAM \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x01, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_DESTROYSTREAM \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x02, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETSTREAM \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x03, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITESTRING \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x04, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READSTRING \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x05, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x06, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READ \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x07, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGMODE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x08, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGOUTMODE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x09, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGLEVEL \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0A, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETFRAME \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0B, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETFRAME \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0C, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_OVERRIDEMODE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0D, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_DEFAULTMODE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0E, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETSERVICETABLE \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0F, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITE2 \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x10, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITESTRINGCM \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x11, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITECM \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x12, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETMARKER \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x13, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETMARKER \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x14, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_ISCAPTUREFRAME \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x15, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITELF \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x16, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READLF \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x17, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WAITFOREVENT \
++ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x18, \
++ METHOD_BUFFERED, FILE_ANY_ACCESS)
++
++
++typedef enum _DBG_EVENT_ {
++ DBG_EVENT_STREAM_DATA = 1
++} DBG_EVENT;
++
++typedef struct _DBG_IN_CREATESTREAM_ {
++ IMG_UINT32 ui32Pages;
++ IMG_UINT32 ui32CapMode;
++ IMG_UINT32 ui32OutMode;
++ IMG_CHAR *pszName;
++}DBG_IN_CREATESTREAM, *PDBG_IN_CREATESTREAM;
++
++typedef struct _DBG_IN_FINDSTREAM_ {
++ IMG_BOOL bResetStream;
++ IMG_CHAR *pszName;
++}DBG_IN_FINDSTREAM, *PDBG_IN_FINDSTREAM;
++
++typedef struct _DBG_IN_WRITESTRING_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Level;
++ IMG_CHAR *pszString;
++}DBG_IN_WRITESTRING, *PDBG_IN_WRITESTRING;
++
++typedef struct _DBG_IN_READSTRING_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32StringLen;
++ IMG_CHAR *pszString;
++} DBG_IN_READSTRING, *PDBG_IN_READSTRING;
++
++typedef struct _DBG_IN_SETDEBUGMODE_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Mode;
++ IMG_UINT32 ui32Start;
++ IMG_UINT32 ui32End;
++ IMG_UINT32 ui32SampleRate;
++} DBG_IN_SETDEBUGMODE, *PDBG_IN_SETDEBUGMODE;
++
++typedef struct _DBG_IN_SETDEBUGOUTMODE_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Mode;
++} DBG_IN_SETDEBUGOUTMODE, *PDBG_IN_SETDEBUGOUTMODE;
++
++typedef struct _DBG_IN_SETDEBUGLEVEL_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Level;
++} DBG_IN_SETDEBUGLEVEL, *PDBG_IN_SETDEBUGLEVEL;
++
++typedef struct _DBG_IN_SETFRAME_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Frame;
++} DBG_IN_SETFRAME, *PDBG_IN_SETFRAME;
++
++typedef struct _DBG_IN_WRITE_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Level;
++ IMG_UINT32 ui32TransferSize;
++ IMG_UINT8 *pui8InBuffer;
++} DBG_IN_WRITE, *PDBG_IN_WRITE;
++
++typedef struct _DBG_IN_READ_ {
++ IMG_VOID *pvStream;
++ IMG_BOOL bReadInitBuffer;
++ IMG_UINT32 ui32OutBufferSize;
++ IMG_UINT8 *pui8OutBuffer;
++} DBG_IN_READ, *PDBG_IN_READ;
++
++typedef struct _DBG_IN_OVERRIDEMODE_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Mode;
++} DBG_IN_OVERRIDEMODE, *PDBG_IN_OVERRIDEMODE;
++
++typedef struct _DBG_IN_ISCAPTUREFRAME_ {
++ IMG_VOID *pvStream;
++ IMG_BOOL bCheckPreviousFrame;
++} DBG_IN_ISCAPTUREFRAME, *PDBG_IN_ISCAPTUREFRAME;
++
++typedef struct _DBG_IN_SETMARKER_ {
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Marker;
++} DBG_IN_SETMARKER, *PDBG_IN_SETMARKER;
++
++typedef struct _DBG_IN_WRITE_LF_ {
++ IMG_UINT32 ui32Flags;
++ IMG_VOID *pvStream;
++ IMG_UINT32 ui32Level;
++ IMG_UINT32 ui32BufferSize;
++ IMG_UINT8 *pui8InBuffer;
++} DBG_IN_WRITE_LF, *PDBG_IN_WRITE_LF;
++
++#define WRITELF_FLAGS_RESETBUF 0x00000001UL
++
++typedef struct _DBG_STREAM_ {
++ struct _DBG_STREAM_ *psNext;
++ struct _DBG_STREAM_ *psInitStream;
++ IMG_BOOL bInitPhaseComplete;
++ IMG_UINT32 ui32Flags;
++ IMG_UINT32 ui32Base;
++ IMG_UINT32 ui32Size;
++ IMG_UINT32 ui32RPtr;
++ IMG_UINT32 ui32WPtr;
++ IMG_UINT32 ui32DataWritten;
++ IMG_UINT32 ui32CapMode;
++ IMG_UINT32 ui32OutMode;
++ IMG_UINT32 ui32DebugLevel;
++ IMG_UINT32 ui32DefaultMode;
++ IMG_UINT32 ui32Start;
++ IMG_UINT32 ui32End;
++ IMG_UINT32 ui32Current;
++ IMG_UINT32 ui32Access;
++ IMG_UINT32 ui32SampleRate;
++ IMG_UINT32 ui32Reserved;
++ IMG_UINT32 ui32Timeout;
++ IMG_UINT32 ui32Marker;
++ IMG_CHAR szName[30];
++} DBG_STREAM,*PDBG_STREAM;
++
++typedef struct _DBGKM_SERVICE_TABLE_ {
++ IMG_UINT32 ui32Size;
++ IMG_VOID * (IMG_CALLCONV *pfnCreateStream) (IMG_CHAR * pszName,IMG_UINT32 ui32CapMode,IMG_UINT32 ui32OutMode,IMG_UINT32 ui32Flags,IMG_UINT32 ui32Pages);
++ IMG_VOID (IMG_CALLCONV *pfnDestroyStream) (PDBG_STREAM psStream);
++ IMG_VOID * (IMG_CALLCONV *pfnFindStream) (IMG_CHAR * pszName, IMG_BOOL bResetInitBuffer);
++ IMG_UINT32 (IMG_CALLCONV *pfnWriteString) (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
++ IMG_UINT32 (IMG_CALLCONV *pfnReadString) (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit);
++ IMG_UINT32 (IMG_CALLCONV *pfnWriteBIN) (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++ IMG_UINT32 (IMG_CALLCONV *pfnReadBIN) (PDBG_STREAM psStream,IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBufferSize,IMG_UINT8 *pui8OutBuf);
++ IMG_VOID (IMG_CALLCONV *pfnSetCaptureMode) (PDBG_STREAM psStream,IMG_UINT32 ui32CapMode,IMG_UINT32 ui32Start,IMG_UINT32 ui32Stop,IMG_UINT32 ui32SampleRate);
++ IMG_VOID (IMG_CALLCONV *pfnSetOutputMode) (PDBG_STREAM psStream,IMG_UINT32 ui32OutMode);
++ IMG_VOID (IMG_CALLCONV *pfnSetDebugLevel) (PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel);
++ IMG_VOID (IMG_CALLCONV *pfnSetFrame) (PDBG_STREAM psStream,IMG_UINT32 ui32Frame);
++ IMG_UINT32 (IMG_CALLCONV *pfnGetFrame) (PDBG_STREAM psStream);
++ IMG_VOID (IMG_CALLCONV *pfnOverrideMode) (PDBG_STREAM psStream,IMG_UINT32 ui32Mode);
++ IMG_VOID (IMG_CALLCONV *pfnDefaultMode) (PDBG_STREAM psStream);
++ IMG_UINT32 (IMG_CALLCONV *pfnDBGDrivWrite2) (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++ IMG_UINT32 (IMG_CALLCONV *pfnWriteStringCM) (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
++ IMG_UINT32 (IMG_CALLCONV *pfnWriteBINCM) (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++ IMG_VOID (IMG_CALLCONV *pfnSetMarker) (PDBG_STREAM psStream,IMG_UINT32 ui32Marker);
++ IMG_UINT32 (IMG_CALLCONV *pfnGetMarker) (PDBG_STREAM psStream);
++ IMG_VOID (IMG_CALLCONV *pfnStartInitPhase) (PDBG_STREAM psStream);
++ IMG_VOID (IMG_CALLCONV *pfnStopInitPhase) (PDBG_STREAM psStream);
++ IMG_BOOL (IMG_CALLCONV *pfnIsCaptureFrame) (PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame);
++ IMG_UINT32 (IMG_CALLCONV *pfnWriteLF) (PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags);
++ IMG_UINT32 (IMG_CALLCONV *pfnReadLF) (PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 *pui8OutBuf);
++ IMG_UINT32 (IMG_CALLCONV *pfnGetStreamOffset) (PDBG_STREAM psStream);
++ IMG_VOID (IMG_CALLCONV *pfnSetStreamOffset) (PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset);
++ IMG_BOOL (IMG_CALLCONV *pfnIsLastCaptureFrame) (PDBG_STREAM psStream);
++ IMG_VOID (IMG_CALLCONV *pfnWaitForEvent) (DBG_EVENT eEvent);
++} DBGKM_SERVICE_TABLE, *PDBGKM_SERVICE_TABLE;
++
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/img_defs.h b/drivers/gpu/drm/mrst/pvr/include4/img_defs.h
+new file mode 100644
+index 0000000..370300a
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/img_defs.h
+@@ -0,0 +1,108 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__IMG_DEFS_H__)
++#define __IMG_DEFS_H__
++
++#include "img_types.h"
++
++typedef enum img_tag_TriStateSwitch
++{
++ IMG_ON = 0x00,
++ IMG_OFF,
++ IMG_IGNORE
++
++} img_TriStateSwitch, * img_pTriStateSwitch;
++
++#define IMG_SUCCESS 0
++
++#define IMG_NO_REG 1
++
++#if defined (NO_INLINE_FUNCS)
++ #define INLINE
++ #define FORCE_INLINE
++#else
++#if defined (__cplusplus)
++ #define INLINE inline
++ #define FORCE_INLINE inline
++#else
++#if !defined(INLINE)
++ #define INLINE __inline
++#endif
++ #define FORCE_INLINE static __inline
++#endif
++#endif
++
++
++#ifndef PVR_UNREFERENCED_PARAMETER
++#define PVR_UNREFERENCED_PARAMETER(param) (param) = (param)
++#endif
++
++#ifdef __GNUC__
++#define unref__ __attribute__ ((unused))
++#else
++#define unref__
++#endif
++
++#ifndef _TCHAR_DEFINED
++#if defined(UNICODE)
++typedef unsigned short TCHAR, *PTCHAR, *PTSTR;
++#else
++typedef char TCHAR, *PTCHAR, *PTSTR;
++#endif
++#define _TCHAR_DEFINED
++#endif
++
++
++ #if defined(__linux__) || defined(__METAG)
++
++ #define IMG_CALLCONV
++ #define IMG_INTERNAL __attribute__ ((visibility ("hidden")))
++ #define IMG_EXPORT
++ #define IMG_IMPORT
++ #define IMG_RESTRICT __restrict__
++
++ #else
++ #error("define an OS")
++ #endif
++
++#ifndef IMG_ABORT
++ #define IMG_ABORT() abort()
++#endif
++
++#ifndef IMG_MALLOC
++ #define IMG_MALLOC(A) malloc (A)
++#endif
++
++#ifndef IMG_FREE
++ #define IMG_FREE(A) free (A)
++#endif
++
++#define IMG_CONST const
++
++#define IMG_FORMAT_PRINTF(x,y)
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/img_types.h b/drivers/gpu/drm/mrst/pvr/include4/img_types.h
+new file mode 100644
+index 0000000..1b55521
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/img_types.h
+@@ -0,0 +1,128 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_TYPES_H__
++#define __IMG_TYPES_H__
++
++#if !defined(IMG_ADDRSPACE_CPUVADDR_BITS)
++#define IMG_ADDRSPACE_CPUVADDR_BITS 32
++#endif
++
++#if !defined(IMG_ADDRSPACE_PHYSADDR_BITS)
++#define IMG_ADDRSPACE_PHYSADDR_BITS 32
++#endif
++
++typedef unsigned int IMG_UINT, *IMG_PUINT;
++typedef signed int IMG_INT, *IMG_PINT;
++
++typedef unsigned char IMG_UINT8, *IMG_PUINT8;
++typedef unsigned char IMG_BYTE, *IMG_PBYTE;
++typedef signed char IMG_INT8, *IMG_PINT8;
++typedef char IMG_CHAR, *IMG_PCHAR;
++
++typedef unsigned short IMG_UINT16, *IMG_PUINT16;
++typedef signed short IMG_INT16, *IMG_PINT16;
++typedef unsigned long IMG_UINT32, *IMG_PUINT32;
++typedef signed long IMG_INT32, *IMG_PINT32;
++
++#if !defined(IMG_UINT32_MAX)
++ #define IMG_UINT32_MAX 0xFFFFFFFFUL
++#endif
++
++ #if (defined(LINUX) || defined(__METAG))
++#if !defined(USE_CODE)
++ typedef unsigned long long IMG_UINT64, *IMG_PUINT64;
++ typedef long long IMG_INT64, *IMG_PINT64;
++#endif
++ #else
++
++ #error("define an OS")
++
++ #endif
++
++#if !(defined(LINUX) && defined (__KERNEL__))
++typedef float IMG_FLOAT, *IMG_PFLOAT;
++typedef double IMG_DOUBLE, *IMG_PDOUBLE;
++#endif
++
++typedef enum tag_img_bool
++{
++ IMG_FALSE = 0,
++ IMG_TRUE = 1,
++ IMG_FORCE_ALIGN = 0x7FFFFFFF
++} IMG_BOOL, *IMG_PBOOL;
++
++typedef void IMG_VOID, *IMG_PVOID;
++
++typedef IMG_INT32 IMG_RESULT;
++
++typedef IMG_UINT32 IMG_UINTPTR_T;
++
++typedef IMG_PVOID IMG_HANDLE;
++
++typedef void** IMG_HVOID, * IMG_PHVOID;
++
++typedef IMG_UINT32 IMG_SIZE_T;
++
++#define IMG_NULL 0
++
++
++typedef IMG_PVOID IMG_CPU_VIRTADDR;
++
++typedef struct
++{
++
++ IMG_UINT32 uiAddr;
++#define IMG_CAST_TO_DEVVADDR_UINT(var) (IMG_UINT32)(var)
++
++} IMG_DEV_VIRTADDR;
++
++typedef struct _IMG_CPU_PHYADDR
++{
++
++ IMG_UINTPTR_T uiAddr;
++} IMG_CPU_PHYADDR;
++
++typedef struct _IMG_DEV_PHYADDR
++{
++#if IMG_ADDRSPACE_PHYSADDR_BITS == 32
++
++ IMG_UINTPTR_T uiAddr;
++#else
++ IMG_UINT32 uiAddr;
++ IMG_UINT32 uiHighAddr;
++#endif
++} IMG_DEV_PHYADDR;
++
++typedef struct _IMG_SYS_PHYADDR
++{
++
++ IMG_UINTPTR_T uiAddr;
++} IMG_SYS_PHYADDR;
++
++#include "img_defs.h"
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/ioctldef.h b/drivers/gpu/drm/mrst/pvr/include4/ioctldef.h
+new file mode 100644
+index 0000000..cc69629
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/ioctldef.h
+@@ -0,0 +1,98 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IOCTLDEF_H__
++#define __IOCTLDEF_H__
++
++#define MAKEIOCTLINDEX(i) (((i) >> 2) & 0xFFF)
++
++#ifndef CTL_CODE
++
++#define DEVICE_TYPE ULONG
++
++#define FILE_DEVICE_BEEP 0x00000001
++#define FILE_DEVICE_CD_ROM 0x00000002
++#define FILE_DEVICE_CD_ROM_FILE_SYSTEM 0x00000003
++#define FILE_DEVICE_CONTROLLER 0x00000004
++#define FILE_DEVICE_DATALINK 0x00000005
++#define FILE_DEVICE_DFS 0x00000006
++#define FILE_DEVICE_DISK 0x00000007
++#define FILE_DEVICE_DISK_FILE_SYSTEM 0x00000008
++#define FILE_DEVICE_FILE_SYSTEM 0x00000009
++#define FILE_DEVICE_INPORT_PORT 0x0000000a
++#define FILE_DEVICE_KEYBOARD 0x0000000b
++#define FILE_DEVICE_MAILSLOT 0x0000000c
++#define FILE_DEVICE_MIDI_IN 0x0000000d
++#define FILE_DEVICE_MIDI_OUT 0x0000000e
++#define FILE_DEVICE_MOUSE 0x0000000f
++#define FILE_DEVICE_MULTI_UNC_PROVIDER 0x00000010
++#define FILE_DEVICE_NAMED_PIPE 0x00000011
++#define FILE_DEVICE_NETWORK 0x00000012
++#define FILE_DEVICE_NETWORK_BROWSER 0x00000013
++#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014
++#define FILE_DEVICE_NULL 0x00000015
++#define FILE_DEVICE_PARALLEL_PORT 0x00000016
++#define FILE_DEVICE_PHYSICAL_NETCARD 0x00000017
++#define FILE_DEVICE_PRINTER 0x00000018
++#define FILE_DEVICE_SCANNER 0x00000019
++#define FILE_DEVICE_SERIAL_MOUSE_PORT 0x0000001a
++#define FILE_DEVICE_SERIAL_PORT 0x0000001b
++#define FILE_DEVICE_SCREEN 0x0000001c
++#define FILE_DEVICE_SOUND 0x0000001d
++#define FILE_DEVICE_STREAMS 0x0000001e
++#define FILE_DEVICE_TAPE 0x0000001f
++#define FILE_DEVICE_TAPE_FILE_SYSTEM 0x00000020
++#define FILE_DEVICE_TRANSPORT 0x00000021
++#define FILE_DEVICE_UNKNOWN 0x00000022
++#define FILE_DEVICE_VIDEO 0x00000023
++#define FILE_DEVICE_VIRTUAL_DISK 0x00000024
++#define FILE_DEVICE_WAVE_IN 0x00000025
++#define FILE_DEVICE_WAVE_OUT 0x00000026
++#define FILE_DEVICE_8042_PORT 0x00000027
++#define FILE_DEVICE_NETWORK_REDIRECTOR 0x00000028
++#define FILE_DEVICE_BATTERY 0x00000029
++#define FILE_DEVICE_BUS_EXTENDER 0x0000002a
++#define FILE_DEVICE_MODEM 0x0000002b
++#define FILE_DEVICE_VDM 0x0000002c
++#define FILE_DEVICE_MASS_STORAGE 0x0000002d
++
++#define CTL_CODE( DeviceType, Function, Method, Access ) ( \
++ ((DeviceType) << 16) | ((Access) << 14) | ((Function) << 2) | (Method) \
++)
++
++#define METHOD_BUFFERED 0
++#define METHOD_IN_DIRECT 1
++#define METHOD_OUT_DIRECT 2
++#define METHOD_NEITHER 3
++
++#define FILE_ANY_ACCESS 0
++#define FILE_READ_ACCESS ( 0x0001 )
++#define FILE_WRITE_ACCESS ( 0x0002 )
++
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/pdumpdefs.h b/drivers/gpu/drm/mrst/pvr/include4/pdumpdefs.h
+new file mode 100644
+index 0000000..3a2e4c1
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/pdumpdefs.h
+@@ -0,0 +1,99 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__PDUMPDEFS_H__)
++#define __PDUMPDEFS_H__
++
++typedef enum _PDUMP_PIXEL_FORMAT_
++{
++ PVRSRV_PDUMP_PIXEL_FORMAT_UNSUPPORTED = 0,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2,
++ PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9,
++ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10,
++ PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11,
++ PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12,
++ PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13,
++ PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15,
++ PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16,
++ PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17,
++ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18,
++ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20,
++ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24,
++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25,
++ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26,
++ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27,
++ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28,
++ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29,
++ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 = 30,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 = 31,
++ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 = 32,
++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 = 33,
++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB555 = 34,
++ PVRSRV_PDUMP_PIXEL_FORMAT_F16 = 35,
++ PVRSRV_PDUMP_PIXEL_FORMAT_F32 = 36,
++ PVRSRV_PDUMP_PIXEL_FORMAT_L16 = 37,
++ PVRSRV_PDUMP_PIXEL_FORMAT_L32 = 38,
++
++ PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff
++
++} PDUMP_PIXEL_FORMAT;
++
++typedef enum _PDUMP_MEM_FORMAT_
++{
++ PVRSRV_PDUMP_MEM_FORMAT_STRIDE = 0,
++ PVRSRV_PDUMP_MEM_FORMAT_RESERVED = 1,
++ PVRSRV_PDUMP_MEM_FORMAT_TILED = 8,
++ PVRSRV_PDUMP_MEM_FORMAT_TWIDDLED = 9,
++ PVRSRV_PDUMP_MEM_FORMAT_HYBRID = 10,
++
++ PVRSRV_PDUMP_MEM_FORMAT_FORCE_I32 = 0x7fffffff
++} PDUMP_MEM_FORMAT;
++
++typedef enum _PDUMP_POLL_OPERATOR
++{
++ PDUMP_POLL_OPERATOR_EQUAL = 0,
++ PDUMP_POLL_OPERATOR_LESS = 1,
++ PDUMP_POLL_OPERATOR_LESSEQUAL = 2,
++ PDUMP_POLL_OPERATOR_GREATER = 3,
++ PDUMP_POLL_OPERATOR_GREATEREQUAL = 4,
++ PDUMP_POLL_OPERATOR_NOTEQUAL = 5,
++} PDUMP_POLL_OPERATOR;
++
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/pvr_debug.h b/drivers/gpu/drm/mrst/pvr/include4/pvr_debug.h
+new file mode 100644
+index 0000000..fe99f45
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/pvr_debug.h
+@@ -0,0 +1,127 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_DEBUG_H__
++#define __PVR_DEBUG_H__
++
++
++#include "img_types.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#define PVR_MAX_DEBUG_MESSAGE_LEN (512)
++
++#define DBGPRIV_FATAL 0x01UL
++#define DBGPRIV_ERROR 0x02UL
++#define DBGPRIV_WARNING 0x04UL
++#define DBGPRIV_MESSAGE 0x08UL
++#define DBGPRIV_VERBOSE 0x10UL
++#define DBGPRIV_CALLTRACE 0x20UL
++#define DBGPRIV_ALLOC 0x40UL
++#define DBGPRIV_ALLLEVELS (DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING | DBGPRIV_MESSAGE | DBGPRIV_VERBOSE)
++
++
++
++#define PVR_DBG_FATAL DBGPRIV_FATAL,__FILE__, __LINE__
++#define PVR_DBG_ERROR DBGPRIV_ERROR,__FILE__, __LINE__
++#define PVR_DBG_WARNING DBGPRIV_WARNING,__FILE__, __LINE__
++#define PVR_DBG_MESSAGE DBGPRIV_MESSAGE,__FILE__, __LINE__
++#define PVR_DBG_VERBOSE DBGPRIV_VERBOSE,__FILE__, __LINE__
++#define PVR_DBG_CALLTRACE DBGPRIV_CALLTRACE,__FILE__, __LINE__
++#define PVR_DBG_ALLOC DBGPRIV_ALLOC,__FILE__, __LINE__
++
++#if !defined(PVRSRV_NEED_PVR_ASSERT) && defined(DEBUG)
++#define PVRSRV_NEED_PVR_ASSERT
++#endif
++
++#if defined(PVRSRV_NEED_PVR_ASSERT) && !defined(PVRSRV_NEED_PVR_DPF)
++#define PVRSRV_NEED_PVR_DPF
++#endif
++
++#if !defined(PVRSRV_NEED_PVR_TRACE) && (defined(DEBUG) || defined(TIMING))
++#define PVRSRV_NEED_PVR_TRACE
++#endif
++
++
++#if defined(PVRSRV_NEED_PVR_ASSERT)
++
++ #define PVR_ASSERT(EXPR) if (!(EXPR)) PVRSRVDebugAssertFail(__FILE__, __LINE__);
++
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugAssertFail(const IMG_CHAR *pszFile,
++ IMG_UINT32 ui32Line);
++
++ #if defined(PVR_DBG_BREAK_ASSERT_FAIL)
++ #define PVR_DBG_BREAK PVRSRVDebugAssertFail("PVR_DBG_BREAK", 0)
++ #else
++ #define PVR_DBG_BREAK
++ #endif
++
++#else
++
++ #define PVR_ASSERT(EXPR)
++ #define PVR_DBG_BREAK
++
++#endif
++
++
++#if defined(PVRSRV_NEED_PVR_DPF)
++
++ #define PVR_DPF(X) PVRSRVDebugPrintf X
++
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel,
++ const IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line,
++ const IMG_CHAR *pszFormat,
++ ...);
++
++#else
++
++ #define PVR_DPF(X)
++
++#endif
++
++
++#if defined(PVRSRV_NEED_PVR_TRACE)
++
++ #define PVR_TRACE(X) PVRSRVTrace X
++
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... );
++
++#else
++
++ #define PVR_TRACE(X)
++
++#endif
++
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/pvrmodule.h b/drivers/gpu/drm/mrst/pvr/include4/pvrmodule.h
+new file mode 100644
+index 0000000..5f77d1c
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/pvrmodule.h
+@@ -0,0 +1,31 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _PVRMODULE_H_
++#define _PVRMODULE_H_
++MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
++MODULE_LICENSE("GPL");
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/pvrversion.h b/drivers/gpu/drm/mrst/pvr/include4/pvrversion.h
+new file mode 100644
+index 0000000..585e49b
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/pvrversion.h
+@@ -0,0 +1,38 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _PVRVERSION_H_
++#define _PVRVERSION_H_
++
++#define PVRVERSION_MAJ 1
++#define PVRVERSION_MIN 5
++#define PVRVERSION_BRANCH 15
++#define PVRVERSION_BUILD 3014
++#define PVRVERSION_STRING "1.5.15.3014"
++#define PVRVERSION_FILE "eurasiacon.pj"
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/regpaths.h b/drivers/gpu/drm/mrst/pvr/include4/regpaths.h
+new file mode 100644
+index 0000000..8dac213
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/regpaths.h
+@@ -0,0 +1,43 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __REGPATHS_H__
++#define __REGPATHS_H__
++
++#define POWERVR_REG_ROOT "Drivers\\Display\\PowerVR"
++#define POWERVR_CHIP_KEY "\\SGX1\\"
++
++#define POWERVR_EURASIA_KEY "PowerVREurasia\\"
++
++#define POWERVR_SERVICES_KEY "\\Registry\\Machine\\System\\CurrentControlSet\\Services\\PowerVR\\"
++
++#define PVRSRV_REGISTRY_ROOT POWERVR_EURASIA_KEY "HWSettings\\PVRSRVKM"
++
++
++#define MAX_REG_STRING_SIZE 128
++
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/services.h b/drivers/gpu/drm/mrst/pvr/include4/services.h
+new file mode 100644
+index 0000000..7b8159d
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/services.h
+@@ -0,0 +1,872 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SERVICES_H__
++#define __SERVICES_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "img_defs.h"
++#include "servicesext.h"
++#include "pdumpdefs.h"
++
++
++#define PVRSRV_4K_PAGE_SIZE 4096UL
++
++#define PVRSRV_MAX_CMD_SIZE 1024
++
++#define PVRSRV_MAX_DEVICES 16
++
++#define EVENTOBJNAME_MAXLENGTH (50)
++
++#define PVRSRV_MEM_READ (1UL<<0)
++#define PVRSRV_MEM_WRITE (1UL<<1)
++#define PVRSRV_MEM_CACHE_CONSISTENT (1UL<<2)
++#define PVRSRV_MEM_NO_SYNCOBJ (1UL<<3)
++#define PVRSRV_MEM_INTERLEAVED (1UL<<4)
++#define PVRSRV_MEM_DUMMY (1UL<<5)
++#define PVRSRV_MEM_EDM_PROTECT (1UL<<6)
++#define PVRSRV_MEM_ZERO (1UL<<7)
++#define PVRSRV_MEM_USER_SUPPLIED_DEVVADDR (1UL<<8)
++#define PVRSRV_MEM_RAM_BACKED_ALLOCATION (1UL<<9)
++#define PVRSRV_MEM_NO_RESMAN (1UL<<10)
++#define PVRSRV_MEM_EXPORTED (1UL<<11)
++
++
++#define PVRSRV_HAP_CACHED (1UL<<12)
++#define PVRSRV_HAP_UNCACHED (1UL<<13)
++#define PVRSRV_HAP_WRITECOMBINE (1UL<<14)
++#define PVRSRV_HAP_CACHETYPE_MASK (PVRSRV_HAP_CACHED|PVRSRV_HAP_UNCACHED|PVRSRV_HAP_WRITECOMBINE)
++#define PVRSRV_HAP_KERNEL_ONLY (1UL<<15)
++#define PVRSRV_HAP_SINGLE_PROCESS (1UL<<16)
++#define PVRSRV_HAP_MULTI_PROCESS (1UL<<17)
++#define PVRSRV_HAP_FROM_EXISTING_PROCESS (1UL<<18)
++#define PVRSRV_HAP_NO_CPU_VIRTUAL (1UL<<19)
++#define PVRSRV_HAP_MAPTYPE_MASK (PVRSRV_HAP_KERNEL_ONLY \
++ |PVRSRV_HAP_SINGLE_PROCESS \
++ |PVRSRV_HAP_MULTI_PROCESS \
++ |PVRSRV_HAP_FROM_EXISTING_PROCESS \
++ |PVRSRV_HAP_NO_CPU_VIRTUAL)
++
++#define PVRSRV_MEM_CACHED PVRSRV_HAP_CACHED
++#define PVRSRV_MEM_UNCACHED PVRSRV_HAP_UNCACHED
++#define PVRSRV_MEM_WRITECOMBINE PVRSRV_HAP_WRITECOMBINE
++
++#define PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT (24)
++
++#define PVRSRV_MAP_NOUSERVIRTUAL (1UL<<27)
++
++#define PVRSRV_NO_CONTEXT_LOSS 0
++#define PVRSRV_SEVERE_LOSS_OF_CONTEXT 1
++#define PVRSRV_PRE_STATE_CHANGE_MASK 0x80
++
++
++#define PVRSRV_DEFAULT_DEV_COOKIE (1)
++
++
++#define PVRSRV_MISC_INFO_TIMER_PRESENT (1UL<<0)
++#define PVRSRV_MISC_INFO_CLOCKGATE_PRESENT (1UL<<1)
++#define PVRSRV_MISC_INFO_MEMSTATS_PRESENT (1UL<<2)
++#define PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT (1UL<<3)
++#define PVRSRV_MISC_INFO_DDKVERSION_PRESENT (1UL<<4)
++#define PVRSRV_MISC_INFO_CPUCACHEFLUSH_PRESENT (1UL<<5)
++
++#define PVRSRV_MISC_INFO_RESET_PRESENT (1UL<<31)
++
++#define PVRSRV_PDUMP_MAX_FILENAME_SIZE 20
++#define PVRSRV_PDUMP_MAX_COMMENT_SIZE 200
++
++
++#define PVRSRV_CHANGEDEVMEM_ATTRIBS_CACHECOHERENT 0x00000001
++
++#define PVRSRV_MAPEXTMEMORY_FLAGS_ALTERNATEVA 0x00000001
++#define PVRSRV_MAPEXTMEMORY_FLAGS_PHYSCONTIG 0x00000002
++
++#define PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC 0x00000001
++#define PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC 0x00000002
++
++typedef enum _PVRSRV_DEVICE_TYPE_
++{
++ PVRSRV_DEVICE_TYPE_UNKNOWN = 0 ,
++ PVRSRV_DEVICE_TYPE_MBX1 = 1 ,
++ PVRSRV_DEVICE_TYPE_MBX1_LITE = 2 ,
++
++ PVRSRV_DEVICE_TYPE_M24VA = 3,
++ PVRSRV_DEVICE_TYPE_MVDA2 = 4,
++ PVRSRV_DEVICE_TYPE_MVED1 = 5,
++ PVRSRV_DEVICE_TYPE_MSVDX = 6,
++
++ PVRSRV_DEVICE_TYPE_SGX = 7,
++
++ PVRSRV_DEVICE_TYPE_VGX = 8,
++
++ PVRSRV_DEVICE_TYPE_TOPAZ = 9,
++
++ PVRSRV_DEVICE_TYPE_EXT = 10,
++
++ PVRSRV_DEVICE_TYPE_LAST = 10,
++
++ PVRSRV_DEVICE_TYPE_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_DEVICE_TYPE;
++
++#define HEAP_ID( _dev_ , _dev_heap_idx_ ) ( ((_dev_)<<24) | ((_dev_heap_idx_)&((1<<24)-1)) )
++#define HEAP_IDX( _heap_id_ ) ( (_heap_id_)&((1<<24) - 1 ) )
++#define HEAP_DEV( _heap_id_ ) ( (_heap_id_)>>24 )
++
++#define PVRSRV_UNDEFINED_HEAP_ID (~0LU)
++
++typedef enum
++{
++ IMG_EGL = 0x00000001,
++ IMG_OPENGLES1 = 0x00000002,
++ IMG_OPENGLES2 = 0x00000003,
++ IMG_D3DM = 0x00000004,
++ IMG_SRV_UM = 0x00000005,
++ IMG_OPENVG = 0x00000006,
++ IMG_SRVCLIENT = 0x00000007,
++ IMG_VISTAKMD = 0x00000008,
++ IMG_VISTA3DNODE = 0x00000009,
++ IMG_VISTAMVIDEONODE = 0x0000000A,
++ IMG_VISTAVPBNODE = 0x0000000B,
++ IMG_OPENGL = 0x0000000C,
++ IMG_D3D = 0x0000000D,
++#if defined(SUPPORT_GRAPHICS_HAL)
++ IMG_GRAPHICS_HAL = 0x0000000E
++#endif
++
++} IMG_MODULE_ID;
++
++
++#define APPHINT_MAX_STRING_SIZE 256
++
++typedef enum
++{
++ IMG_STRING_TYPE = 1,
++ IMG_FLOAT_TYPE ,
++ IMG_UINT_TYPE ,
++ IMG_INT_TYPE ,
++ IMG_FLAG_TYPE
++}IMG_DATA_TYPE;
++
++
++typedef struct _PVRSRV_DEV_DATA_ *PPVRSRV_DEV_DATA;
++
++typedef struct _PVRSRV_DEVICE_IDENTIFIER_
++{
++ PVRSRV_DEVICE_TYPE eDeviceType;
++ PVRSRV_DEVICE_CLASS eDeviceClass;
++ IMG_UINT32 ui32DeviceIndex;
++
++} PVRSRV_DEVICE_IDENTIFIER;
++
++
++typedef struct _PVRSRV_CLIENT_DEV_DATA_
++{
++ IMG_UINT32 ui32NumDevices;
++ PVRSRV_DEVICE_IDENTIFIER asDevID[PVRSRV_MAX_DEVICES];
++ PVRSRV_ERROR (*apfnDevConnect[PVRSRV_MAX_DEVICES])(PPVRSRV_DEV_DATA);
++ PVRSRV_ERROR (*apfnDumpTrace[PVRSRV_MAX_DEVICES])(PPVRSRV_DEV_DATA);
++
++} PVRSRV_CLIENT_DEV_DATA;
++
++
++typedef struct _PVRSRV_CONNECTION_
++{
++ IMG_HANDLE hServices;
++ IMG_UINT32 ui32ProcessID;
++ PVRSRV_CLIENT_DEV_DATA sClientDevData;
++}PVRSRV_CONNECTION;
++
++
++typedef struct _PVRSRV_DEV_DATA_
++{
++ PVRSRV_CONNECTION sConnection;
++ IMG_HANDLE hDevCookie;
++
++} PVRSRV_DEV_DATA;
++
++typedef struct _PVRSRV_MEMUPDATE_
++{
++ IMG_UINT32 ui32UpdateAddr;
++ IMG_UINT32 ui32UpdateVal;
++} PVRSRV_MEMUPDATE;
++
++typedef struct _PVRSRV_HWREG_
++{
++ IMG_UINT32 ui32RegAddr;
++ IMG_UINT32 ui32RegVal;
++} PVRSRV_HWREG;
++
++typedef struct _PVRSRV_MEMBLK_
++{
++ IMG_DEV_VIRTADDR sDevVirtAddr;
++ IMG_HANDLE hOSMemHandle;
++ IMG_HANDLE hOSWrapMem;
++ IMG_HANDLE hBuffer;
++ IMG_HANDLE hResItem;
++ IMG_SYS_PHYADDR *psIntSysPAddr;
++
++} PVRSRV_MEMBLK;
++
++typedef struct _PVRSRV_KERNEL_MEM_INFO_ *PPVRSRV_KERNEL_MEM_INFO;
++
++typedef struct _PVRSRV_CLIENT_MEM_INFO_
++{
++
++ IMG_PVOID pvLinAddr;
++
++
++ IMG_PVOID pvLinAddrKM;
++
++
++ IMG_DEV_VIRTADDR sDevVAddr;
++
++
++
++
++
++
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++
++ IMG_UINT32 ui32Flags;
++
++
++
++
++ IMG_UINT32 ui32ClientFlags;
++
++
++ IMG_SIZE_T ui32AllocSize;
++
++
++
++ struct _PVRSRV_CLIENT_SYNC_INFO_ *psClientSyncInfo;
++
++
++ IMG_HANDLE hMappingInfo;
++
++
++ IMG_HANDLE hKernelMemInfo;
++
++
++ IMG_HANDLE hResItem;
++
++#if defined(SUPPORT_MEMINFO_IDS)
++ #if !defined(USE_CODE)
++
++ IMG_UINT64 ui64Stamp;
++ #else
++ IMG_UINT32 dummy1;
++ IMG_UINT32 dummy2;
++ #endif
++#endif
++
++
++
++
++ struct _PVRSRV_CLIENT_MEM_INFO_ *psNext;
++
++} PVRSRV_CLIENT_MEM_INFO, *PPVRSRV_CLIENT_MEM_INFO;
++
++
++#define PVRSRV_MAX_CLIENT_HEAPS (32)
++typedef struct _PVRSRV_HEAP_INFO_
++{
++ IMG_UINT32 ui32HeapID;
++ IMG_HANDLE hDevMemHeap;
++ IMG_DEV_VIRTADDR sDevVAddrBase;
++ IMG_UINT32 ui32HeapByteSize;
++ IMG_UINT32 ui32Attribs;
++}PVRSRV_HEAP_INFO;
++
++
++
++
++typedef struct _PVRSRV_EVENTOBJECT_
++{
++
++ IMG_CHAR szName[EVENTOBJNAME_MAXLENGTH];
++
++ IMG_HANDLE hOSEventKM;
++
++} PVRSRV_EVENTOBJECT;
++
++typedef struct _PVRSRV_MISC_INFO_
++{
++ IMG_UINT32 ui32StateRequest;
++ IMG_UINT32 ui32StatePresent;
++
++
++ IMG_VOID *pvSOCTimerRegisterKM;
++ IMG_VOID *pvSOCTimerRegisterUM;
++ IMG_HANDLE hSOCTimerRegisterOSMemHandle;
++ IMG_HANDLE hSOCTimerRegisterMappingInfo;
++
++
++ IMG_VOID *pvSOCClockGateRegs;
++ IMG_UINT32 ui32SOCClockGateRegsSize;
++
++
++ IMG_CHAR *pszMemoryStr;
++ IMG_UINT32 ui32MemoryStrLen;
++
++
++ PVRSRV_EVENTOBJECT sGlobalEventObject;
++ IMG_HANDLE hOSGlobalEvent;
++
++
++ IMG_UINT32 aui32DDKVersion[4];
++
++
++
++ IMG_BOOL bCPUCacheFlushAll;
++
++ IMG_BOOL bDeferCPUCacheFlush;
++
++ IMG_PVOID pvRangeAddrStart;
++
++ IMG_PVOID pvRangeAddrEnd;
++
++} PVRSRV_MISC_INFO;
++
++
++typedef enum _PVRSRV_CLIENT_EVENT_
++{
++ PVRSRV_CLIENT_EVENT_HWTIMEOUT = 0,
++} PVRSRV_CLIENT_EVENT;
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVClientEvent(IMG_CONST PVRSRV_CLIENT_EVENT eEvent,
++ PVRSRV_DEV_DATA *psDevData,
++ IMG_PVOID pvData);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVConnect(PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDisconnect(PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevices(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 *puiNumDevices,
++ PVRSRV_DEVICE_IDENTIFIER *puiDevIDs);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceData(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 uiDevIndex,
++ PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_DEVICE_TYPE eDeviceType);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfo (IMG_CONST PVRSRV_CONNECTION *psConnection, PVRSRV_MISC_INFO *psMiscInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReleaseMiscInfo (IMG_CONST PVRSRV_CONNECTION *psConnection, PVRSRV_MISC_INFO *psMiscInfo);
++
++#if 1
++IMG_IMPORT
++IMG_UINT32 ReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
++
++IMG_IMPORT
++IMG_VOID WriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
++
++IMG_IMPORT IMG_VOID WriteHWRegs(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Count, PVRSRV_HWREG *psHWRegs);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVPollForValue ( const PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hOSEvent,
++ volatile IMG_UINT32 *pui32LinMemAddr,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Waitus,
++ IMG_UINT32 ui32Tries);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContext(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE *phDevMemContext,
++ IMG_UINT32 *pui32SharedHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContext(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemContext);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapInfo(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemContext,
++ IMG_UINT32 *pui32SharedHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo);
++
++#if defined(PVRSRV_LOG_MEMORY_ALLOCS)
++ #define PVRSRVAllocDeviceMem_log(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo, logStr) \
++ (PVR_TRACE(("PVRSRVAllocDeviceMem(" #psDevData "," #hDevMemHeap "," #ui32Attribs "," #ui32Size "," #ui32Alignment "," #ppsMemInfo ")" \
++ ": " logStr " (size = 0x%lx)", ui32Size)), \
++ PVRSRVAllocDeviceMem(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo))
++#else
++ #define PVRSRVAllocDeviceMem_log(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo, logStr) \
++ PVRSRVAllocDeviceMem(psDevData, hDevMemHeap, ui32Attribs, ui32Size, ui32Alignment, ppsMemInfo)
++#endif
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocDeviceMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemHeap,
++ IMG_UINT32 ui32Attribs,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVExportDeviceMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ IMG_HANDLE *phMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReserveDeviceVirtualMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemHeap,
++ IMG_DEV_VIRTADDR *psDevVAddr,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceVirtualMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hKernelMemInfo,
++ IMG_HANDLE hDstDevMemHeap,
++ PVRSRV_CLIENT_MEM_INFO **ppsDstMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ IMG_UINT32 ui32Flags);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Flags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemory2(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemContext,
++ IMG_SIZE_T ui32ByteSize,
++ IMG_SIZE_T ui32PageOffset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ IMG_VOID *pvLinAddr,
++ IMG_UINT32 ui32Flags,
++ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemory(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemContext,
++ IMG_SIZE_T ui32ByteSize,
++ IMG_SIZE_T ui32PageOffset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ IMG_VOID *pvLinAddr,
++ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++PVRSRV_ERROR PVRSRVChangeDeviceMemoryAttributes(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psClientMemInfo,
++ IMG_UINT32 ui32Attribs);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_HANDLE hDevMemContext,
++ IMG_HANDLE hDeviceClassBuffer,
++ PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapPhysToUserSpace(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_SYS_PHYADDR sSysPhysAddr,
++ IMG_UINT32 uiSizeInBytes,
++ IMG_PVOID *ppvUserAddr,
++ IMG_UINT32 *puiActualSize,
++ IMG_PVOID *ppvProcess);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapPhysToUserSpace(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_PVOID pvUserAddr,
++ IMG_PVOID pvProcess);
++
++typedef enum _PVRSRV_SYNCVAL_MODE_
++{
++ PVRSRV_SYNCVAL_READ = IMG_TRUE,
++ PVRSRV_SYNCVAL_WRITE = IMG_FALSE,
++
++} PVRSRV_SYNCVAL_MODE, *PPVRSRV_SYNCVAL_MODE;
++
++typedef IMG_UINT32 PVRSRV_SYNCVAL;
++
++IMG_IMPORT PVRSRV_ERROR PVRSRVWaitForOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
++
++IMG_IMPORT PVRSRV_ERROR PVRSRVWaitForAllOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode);
++
++IMG_IMPORT IMG_BOOL PVRSRVTestOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
++
++IMG_IMPORT IMG_BOOL PVRSRVTestAllOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode);
++
++IMG_IMPORT IMG_BOOL PVRSRVTestOpsNotComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
++
++IMG_IMPORT IMG_BOOL PVRSRVTestAllOpsNotComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode);
++
++IMG_IMPORT PVRSRV_SYNCVAL PVRSRVGetPendingOpSyncVal(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++ PVRSRV_SYNCVAL_MODE eMode);
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDeviceClass(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ PVRSRV_DEVICE_CLASS DeviceClass,
++ IMG_UINT32 *pui32DevCount,
++ IMG_UINT32 *pui32DevID);
++
++IMG_IMPORT
++IMG_HANDLE IMG_CALLCONV PVRSRVOpenDCDevice(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_UINT32 ui32DeviceID);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCloseDCDevice(IMG_CONST PVRSRV_CONNECTION *psConnection, IMG_HANDLE hDevice);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumDCFormats (IMG_HANDLE hDevice,
++ IMG_UINT32 *pui32Count,
++ DISPLAY_FORMAT *psFormat);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumDCDims (IMG_HANDLE hDevice,
++ IMG_UINT32 *pui32Count,
++ DISPLAY_FORMAT *psFormat,
++ DISPLAY_DIMS *psDims);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCSystemBuffer(IMG_HANDLE hDevice,
++ IMG_HANDLE *phBuffer);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCInfo(IMG_HANDLE hDevice,
++ DISPLAY_INFO* psDisplayInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDCSwapChain (IMG_HANDLE hDevice,
++ IMG_UINT32 ui32Flags,
++ DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++ DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++ IMG_UINT32 ui32BufferCount,
++ IMG_UINT32 ui32OEMFlags,
++ IMG_UINT32 *pui32SwapChainID,
++ IMG_HANDLE *phSwapChain);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDCSwapChain (IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCDstRect (IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_RECT *psDstRect);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCSrcRect (IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_RECT *psSrcRect);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCDstColourKey (IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 ui32CKColour);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCSrcColourKey (IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 ui32CKColour);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCBuffers(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_HANDLE *phBuffer);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSwapToDCBuffer (IMG_HANDLE hDevice,
++ IMG_HANDLE hBuffer,
++ IMG_UINT32 ui32ClipRectCount,
++ IMG_RECT *psClipRect,
++ IMG_UINT32 ui32SwapInterval,
++ IMG_HANDLE hPrivateTag);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSwapToDCSystem (IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain);
++
++
++IMG_IMPORT
++IMG_HANDLE IMG_CALLCONV PVRSRVOpenBCDevice(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++ IMG_UINT32 ui32DeviceID);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCloseBCDevice(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hDevice);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetBCBufferInfo(IMG_HANDLE hDevice,
++ BUFFER_INFO *psBuffer);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetBCBuffer(IMG_HANDLE hDevice,
++ IMG_UINT32 ui32BufferIndex,
++ IMG_HANDLE *phBuffer);
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpInit(IMG_CONST PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpStartInitPhase(IMG_CONST PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpStopInitPhase(IMG_CONST PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMemPol(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Flags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSyncPol(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo,
++ IMG_BOOL bIsRead,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMem(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_PVOID pvAltLinAddr,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSync(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_PVOID pvAltLinAddr,
++ PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Bytes);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpReg(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue,
++ IMG_UINT32 ui32Flags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPolWithFlags(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Flags);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPol(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue,
++ IMG_UINT32 ui32Mask);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpPDReg(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpPDDevPAddr(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_DEV_PHYADDR sPDDevPAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMemPages(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hKernelMemInfo,
++ IMG_DEV_PHYADDR *pPages,
++ IMG_UINT32 ui32NumPages,
++ IMG_DEV_VIRTADDR sDevAddr,
++ IMG_UINT32 ui32Start,
++ IMG_UINT32 ui32Length,
++ IMG_BOOL bContinuous);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSetFrame(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32Frame);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpComment(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_CONST IMG_CHAR *pszComment,
++ IMG_BOOL bContinuous);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentf(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_BOOL bContinuous,
++ IMG_CONST IMG_CHAR *pszFormat, ...);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentWithFlagsf(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32Flags,
++ IMG_CONST IMG_CHAR *pszFormat, ...);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpDriverInfo(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_CHAR *pszString,
++ IMG_BOOL bContinuous);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpIsCapturing(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_BOOL *pbIsCapturing);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpBitmap(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_UINT32 ui32Width,
++ IMG_UINT32 ui32Height,
++ IMG_UINT32 ui32StrideInBytes,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ IMG_UINT32 ui32Size,
++ PDUMP_PIXEL_FORMAT ePixelFormat,
++ PDUMP_MEM_FORMAT eMemFormat,
++ IMG_UINT32 ui32PDumpFlags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegRead(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_CONST IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_UINT32 ui32Address,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags);
++
++
++IMG_IMPORT
++IMG_BOOL IMG_CALLCONV PVRSRVPDumpIsCapturingTest(IMG_CONST PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCycleCountRegRead(IMG_CONST PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32RegOffset,
++ IMG_BOOL bLastFrame);
++
++IMG_IMPORT IMG_HANDLE PVRSRVLoadLibrary(const IMG_CHAR *pszLibraryName);
++IMG_IMPORT PVRSRV_ERROR PVRSRVUnloadLibrary(IMG_HANDLE hExtDrv);
++IMG_IMPORT PVRSRV_ERROR PVRSRVGetLibFuncAddr(IMG_HANDLE hExtDrv, const IMG_CHAR *pszFunctionName, IMG_VOID **ppvFuncAddr);
++
++IMG_IMPORT IMG_UINT32 PVRSRVClockus (void);
++IMG_IMPORT IMG_VOID PVRSRVWaitus (IMG_UINT32 ui32Timeus);
++IMG_IMPORT IMG_VOID PVRSRVReleaseThreadQuanta (void);
++IMG_IMPORT IMG_UINT32 IMG_CALLCONV PVRSRVGetCurrentProcessID(void);
++IMG_IMPORT IMG_CHAR * IMG_CALLCONV PVRSRVSetLocale(const IMG_CHAR *pszLocale);
++
++
++
++
++
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVCreateAppHintState(IMG_MODULE_ID eModuleID,
++ const IMG_CHAR *pszAppName,
++ IMG_VOID **ppvState);
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVFreeAppHintState(IMG_MODULE_ID eModuleID,
++ IMG_VOID *pvHintState);
++
++IMG_IMPORT IMG_BOOL IMG_CALLCONV PVRSRVGetAppHint(IMG_VOID *pvHintState,
++ const IMG_CHAR *pszHintName,
++ IMG_DATA_TYPE eDataType,
++ const IMG_VOID *pvDefault,
++ IMG_VOID *pvReturn);
++
++IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVAllocUserModeMem (IMG_SIZE_T ui32Size);
++IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVCallocUserModeMem (IMG_SIZE_T ui32Size);
++IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVReallocUserModeMem (IMG_PVOID pvBase, IMG_SIZE_T uNewSize);
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVFreeUserModeMem (IMG_PVOID pvMem);
++IMG_IMPORT IMG_VOID PVRSRVMemCopy(IMG_VOID *pvDst, const IMG_VOID *pvSrc, IMG_SIZE_T ui32Size);
++IMG_IMPORT IMG_VOID PVRSRVMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_SIZE_T ui32Size);
++
++struct _PVRSRV_MUTEX_OPAQUE_STRUCT_;
++typedef struct _PVRSRV_MUTEX_OPAQUE_STRUCT_ *PVRSRV_MUTEX_HANDLE;
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateMutex(PVRSRV_MUTEX_HANDLE *phMutex);
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyMutex(PVRSRV_MUTEX_HANDLE hMutex);
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVLockMutex(PVRSRV_MUTEX_HANDLE hMutex);
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVUnlockMutex(PVRSRV_MUTEX_HANDLE hMutex);
++
++#if (defined(DEBUG) && defined(__linux__))
++IMG_PVOID PVRSRVAllocUserModeMemTracking(IMG_SIZE_T ui32Size, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber);
++IMG_PVOID PVRSRVCallocUserModeMemTracking(IMG_SIZE_T ui32Size, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber);
++IMG_VOID PVRSRVFreeUserModeMemTracking(IMG_VOID *pvMem);
++IMG_PVOID PVRSRVReallocUserModeMemTracking(IMG_VOID *pvMem, IMG_SIZE_T ui32NewSize, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber);
++#endif
++
++IMG_IMPORT PVRSRV_ERROR PVRSRVEventObjectWait(const PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hOSEvent);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVModifyPendingSyncOps(PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hKernelSyncInfo,
++ IMG_UINT32 ui32ModifyFlags,
++ IMG_UINT32 *pui32ReadOpsPending,
++ IMG_UINT32 *pui32WriteOpsPending);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVModifyCompleteSyncOps(PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hKernelSyncInfo,
++ IMG_UINT32 ui32ModifyFlags);
++
++
++#define TIME_NOT_PASSED_UINT32(a,b,c) ((a - b) < c)
++
++#if defined (__cplusplus)
++}
++#endif
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/servicesext.h b/drivers/gpu/drm/mrst/pvr/include4/servicesext.h
+new file mode 100644
+index 0000000..4bfb75c
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/servicesext.h
+@@ -0,0 +1,648 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__SERVICESEXT_H__)
++#define __SERVICESEXT_H__
++
++#define PVRSRV_LOCKFLG_READONLY (1)
++
++typedef enum _PVRSRV_ERROR_
++{
++ PVRSRV_OK = 0,
++ PVRSRV_ERROR_GENERIC = 1,
++ PVRSRV_ERROR_OUT_OF_MEMORY = 2,
++ PVRSRV_ERROR_TOO_FEW_BUFFERS = 3,
++ PVRSRV_ERROR_SYMBOL_NOT_FOUND = 4,
++ PVRSRV_ERROR_OUT_OF_HSPACE = 5,
++ PVRSRV_ERROR_INVALID_PARAMS = 6,
++ PVRSRV_ERROR_TILE_MAP_FAILED = 7,
++ PVRSRV_ERROR_INIT_FAILURE = 8,
++ PVRSRV_ERROR_CANT_REGISTER_CALLBACK = 9,
++ PVRSRV_ERROR_INVALID_DEVICE = 10,
++ PVRSRV_ERROR_NOT_OWNER = 11,
++ PVRSRV_ERROR_BAD_MAPPING = 12,
++ PVRSRV_ERROR_TIMEOUT = 13,
++ PVRSRV_ERROR_NO_PRIMARY = 14,
++ PVRSRV_ERROR_FLIP_CHAIN_EXISTS = 15,
++ PVRSRV_ERROR_CANNOT_ACQUIRE_SYSDATA = 16,
++ PVRSRV_ERROR_SCENE_INVALID = 17,
++ PVRSRV_ERROR_STREAM_ERROR = 18,
++ PVRSRV_ERROR_INVALID_INTERRUPT = 19,
++ PVRSRV_ERROR_FAILED_DEPENDENCIES = 20,
++ PVRSRV_ERROR_CMD_NOT_PROCESSED = 21,
++ PVRSRV_ERROR_CMD_TOO_BIG = 22,
++ PVRSRV_ERROR_DEVICE_REGISTER_FAILED = 23,
++ PVRSRV_ERROR_FIFO_SPACE = 24,
++ PVRSRV_ERROR_TA_RECOVERY = 25,
++ PVRSRV_ERROR_INDOSORLOWPOWER = 26,
++ PVRSRV_ERROR_TOOMANYBUFFERS = 27,
++ PVRSRV_ERROR_NOT_SUPPORTED = 28,
++ PVRSRV_ERROR_PROCESSING_BLOCKED = 29,
++
++
++ PVRSRV_ERROR_CANNOT_FLUSH_QUEUE = 31,
++ PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE = 32,
++ PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS = 33,
++ PVRSRV_ERROR_RETRY = 34,
++
++ PVRSRV_ERROR_DDK_VERSION_MISMATCH = 35,
++ PVRSRV_ERROR_BUILD_MISMATCH = 36,
++ PVRSRV_ERROR_PDUMP_BUF_OVERFLOW,
++
++ PVRSRV_ERROR_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_ERROR;
++
++
++typedef enum _PVRSRV_DEVICE_CLASS_
++{
++ PVRSRV_DEVICE_CLASS_3D = 0 ,
++ PVRSRV_DEVICE_CLASS_DISPLAY = 1 ,
++ PVRSRV_DEVICE_CLASS_BUFFER = 2 ,
++ PVRSRV_DEVICE_CLASS_VIDEO = 3 ,
++
++ PVRSRV_DEVICE_CLASS_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_DEVICE_CLASS;
++
++
++
++typedef enum _PVRSRV_SYS_POWER_STATE_
++{
++ PVRSRV_SYS_POWER_STATE_Unspecified = -1,
++ PVRSRV_SYS_POWER_STATE_D0 = 0,
++ PVRSRV_SYS_POWER_STATE_D1 = 1,
++ PVRSRV_SYS_POWER_STATE_D2 = 2,
++ PVRSRV_SYS_POWER_STATE_D3 = 3,
++ PVRSRV_SYS_POWER_STATE_D4 = 4,
++
++ PVRSRV_SYS_POWER_STATE_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_SYS_POWER_STATE, *PPVRSRV_SYS_POWER_STATE;
++
++
++typedef enum _PVRSRV_DEV_POWER_STATE_
++{
++ PVRSRV_DEV_POWER_STATE_DEFAULT = -1,
++ PVRSRV_DEV_POWER_STATE_ON = 0,
++ PVRSRV_DEV_POWER_STATE_IDLE = 1,
++ PVRSRV_DEV_POWER_STATE_OFF = 2,
++
++ PVRSRV_DEV_POWER_STATE_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_DEV_POWER_STATE, *PPVRSRV_DEV_POWER_STATE;
++
++
++typedef PVRSRV_ERROR (*PFN_PRE_POWER) (IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++typedef PVRSRV_ERROR (*PFN_POST_POWER) (IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++typedef PVRSRV_ERROR (*PFN_PRE_CLOCKSPEED_CHANGE) (IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++typedef PVRSRV_ERROR (*PFN_POST_CLOCKSPEED_CHANGE) (IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++
++typedef enum _PVRSRV_PIXEL_FORMAT_ {
++
++ PVRSRV_PIXEL_FORMAT_UNKNOWN = 0,
++ PVRSRV_PIXEL_FORMAT_RGB565 = 1,
++ PVRSRV_PIXEL_FORMAT_RGB555 = 2,
++ PVRSRV_PIXEL_FORMAT_RGB888 = 3,
++ PVRSRV_PIXEL_FORMAT_BGR888 = 4,
++ PVRSRV_PIXEL_FORMAT_GREY_SCALE = 8,
++ PVRSRV_PIXEL_FORMAT_PAL12 = 13,
++ PVRSRV_PIXEL_FORMAT_PAL8 = 14,
++ PVRSRV_PIXEL_FORMAT_PAL4 = 15,
++ PVRSRV_PIXEL_FORMAT_PAL2 = 16,
++ PVRSRV_PIXEL_FORMAT_PAL1 = 17,
++ PVRSRV_PIXEL_FORMAT_ARGB1555 = 18,
++ PVRSRV_PIXEL_FORMAT_ARGB4444 = 19,
++ PVRSRV_PIXEL_FORMAT_ARGB8888 = 20,
++ PVRSRV_PIXEL_FORMAT_ABGR8888 = 21,
++ PVRSRV_PIXEL_FORMAT_YV12 = 22,
++ PVRSRV_PIXEL_FORMAT_I420 = 23,
++ PVRSRV_PIXEL_FORMAT_IMC2 = 25,
++ PVRSRV_PIXEL_FORMAT_XRGB8888,
++ PVRSRV_PIXEL_FORMAT_XBGR8888,
++ PVRSRV_PIXEL_FORMAT_BGRA8888,
++ PVRSRV_PIXEL_FORMAT_XRGB4444,
++ PVRSRV_PIXEL_FORMAT_ARGB8332,
++ PVRSRV_PIXEL_FORMAT_A2RGB10,
++ PVRSRV_PIXEL_FORMAT_A2BGR10,
++ PVRSRV_PIXEL_FORMAT_P8,
++ PVRSRV_PIXEL_FORMAT_L8,
++ PVRSRV_PIXEL_FORMAT_A8L8,
++ PVRSRV_PIXEL_FORMAT_A4L4,
++ PVRSRV_PIXEL_FORMAT_L16,
++ PVRSRV_PIXEL_FORMAT_L6V5U5,
++ PVRSRV_PIXEL_FORMAT_V8U8,
++ PVRSRV_PIXEL_FORMAT_V16U16,
++ PVRSRV_PIXEL_FORMAT_QWVU8888,
++ PVRSRV_PIXEL_FORMAT_XLVU8888,
++ PVRSRV_PIXEL_FORMAT_QWVU16,
++ PVRSRV_PIXEL_FORMAT_D16,
++ PVRSRV_PIXEL_FORMAT_D24S8,
++ PVRSRV_PIXEL_FORMAT_D24X8,
++
++
++ PVRSRV_PIXEL_FORMAT_ABGR16,
++ PVRSRV_PIXEL_FORMAT_ABGR16F,
++ PVRSRV_PIXEL_FORMAT_ABGR32,
++ PVRSRV_PIXEL_FORMAT_ABGR32F,
++ PVRSRV_PIXEL_FORMAT_B10GR11,
++ PVRSRV_PIXEL_FORMAT_GR88,
++ PVRSRV_PIXEL_FORMAT_BGR32,
++ PVRSRV_PIXEL_FORMAT_GR32,
++ PVRSRV_PIXEL_FORMAT_E5BGR9,
++
++
++ PVRSRV_PIXEL_FORMAT_DXT1,
++ PVRSRV_PIXEL_FORMAT_DXT2,
++ PVRSRV_PIXEL_FORMAT_DXT3,
++ PVRSRV_PIXEL_FORMAT_DXT4,
++ PVRSRV_PIXEL_FORMAT_DXT5,
++
++
++ PVRSRV_PIXEL_FORMAT_R8G8_B8G8,
++ PVRSRV_PIXEL_FORMAT_G8R8_G8B8,
++
++
++ PVRSRV_PIXEL_FORMAT_NV11,
++ PVRSRV_PIXEL_FORMAT_NV12,
++
++
++ PVRSRV_PIXEL_FORMAT_YUY2,
++ PVRSRV_PIXEL_FORMAT_YUV420,
++ PVRSRV_PIXEL_FORMAT_YUV444,
++ PVRSRV_PIXEL_FORMAT_VUY444,
++ PVRSRV_PIXEL_FORMAT_YUYV,
++ PVRSRV_PIXEL_FORMAT_YVYU,
++ PVRSRV_PIXEL_FORMAT_UYVY,
++ PVRSRV_PIXEL_FORMAT_VYUY,
++
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_UYVY,
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YUYV,
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_YVYU,
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_VYUY,
++ PVRSRV_PIXEL_FORMAT_FOURCC_ORG_AYUV,
++
++
++ PVRSRV_PIXEL_FORMAT_A32B32G32R32,
++ PVRSRV_PIXEL_FORMAT_A32B32G32R32F,
++ PVRSRV_PIXEL_FORMAT_A32B32G32R32_UINT,
++ PVRSRV_PIXEL_FORMAT_A32B32G32R32_SINT,
++
++
++ PVRSRV_PIXEL_FORMAT_B32G32R32,
++ PVRSRV_PIXEL_FORMAT_B32G32R32F,
++ PVRSRV_PIXEL_FORMAT_B32G32R32_UINT,
++ PVRSRV_PIXEL_FORMAT_B32G32R32_SINT,
++
++
++ PVRSRV_PIXEL_FORMAT_G32R32,
++ PVRSRV_PIXEL_FORMAT_G32R32F,
++ PVRSRV_PIXEL_FORMAT_G32R32_UINT,
++ PVRSRV_PIXEL_FORMAT_G32R32_SINT,
++
++
++ PVRSRV_PIXEL_FORMAT_D32F,
++ PVRSRV_PIXEL_FORMAT_R32,
++ PVRSRV_PIXEL_FORMAT_R32F,
++ PVRSRV_PIXEL_FORMAT_R32_UINT,
++ PVRSRV_PIXEL_FORMAT_R32_SINT,
++
++
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16F,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16_SINT,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16_SNORM,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16_UINT,
++ PVRSRV_PIXEL_FORMAT_A16B16G16R16_UNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_G16R16,
++ PVRSRV_PIXEL_FORMAT_G16R16F,
++ PVRSRV_PIXEL_FORMAT_G16R16_UINT,
++ PVRSRV_PIXEL_FORMAT_G16R16_UNORM,
++ PVRSRV_PIXEL_FORMAT_G16R16_SINT,
++ PVRSRV_PIXEL_FORMAT_G16R16_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_R16,
++ PVRSRV_PIXEL_FORMAT_R16F,
++ PVRSRV_PIXEL_FORMAT_R16_UINT,
++ PVRSRV_PIXEL_FORMAT_R16_UNORM,
++ PVRSRV_PIXEL_FORMAT_R16_SINT,
++ PVRSRV_PIXEL_FORMAT_R16_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_X8R8G8B8,
++ PVRSRV_PIXEL_FORMAT_X8R8G8B8_UNORM,
++ PVRSRV_PIXEL_FORMAT_X8R8G8B8_UNORM_SRGB,
++
++ PVRSRV_PIXEL_FORMAT_A8R8G8B8,
++ PVRSRV_PIXEL_FORMAT_A8R8G8B8_UNORM,
++ PVRSRV_PIXEL_FORMAT_A8R8G8B8_UNORM_SRGB,
++
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_UINT,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_UNORM,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_UNORM_SRGB,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_SINT,
++ PVRSRV_PIXEL_FORMAT_A8B8G8R8_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_G8R8,
++ PVRSRV_PIXEL_FORMAT_G8R8_UINT,
++ PVRSRV_PIXEL_FORMAT_G8R8_UNORM,
++ PVRSRV_PIXEL_FORMAT_G8R8_SINT,
++ PVRSRV_PIXEL_FORMAT_G8R8_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_A8,
++ PVRSRV_PIXEL_FORMAT_R8,
++ PVRSRV_PIXEL_FORMAT_R8_UINT,
++ PVRSRV_PIXEL_FORMAT_R8_UNORM,
++ PVRSRV_PIXEL_FORMAT_R8_SINT,
++ PVRSRV_PIXEL_FORMAT_R8_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_A2B10G10R10,
++ PVRSRV_PIXEL_FORMAT_A2B10G10R10_UNORM,
++ PVRSRV_PIXEL_FORMAT_A2B10G10R10_UINT,
++
++
++ PVRSRV_PIXEL_FORMAT_B10G11R11,
++ PVRSRV_PIXEL_FORMAT_B10G11R11F,
++
++
++ PVRSRV_PIXEL_FORMAT_X24G8R32,
++ PVRSRV_PIXEL_FORMAT_G8R24,
++ PVRSRV_PIXEL_FORMAT_X8R24,
++ PVRSRV_PIXEL_FORMAT_E5B9G9R9,
++ PVRSRV_PIXEL_FORMAT_R1,
++
++ PVRSRV_PIXEL_FORMAT_BC1,
++ PVRSRV_PIXEL_FORMAT_BC1_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC1_SRGB,
++ PVRSRV_PIXEL_FORMAT_BC2,
++ PVRSRV_PIXEL_FORMAT_BC2_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC2_SRGB,
++ PVRSRV_PIXEL_FORMAT_BC3,
++ PVRSRV_PIXEL_FORMAT_BC3_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC3_SRGB,
++ PVRSRV_PIXEL_FORMAT_BC4,
++ PVRSRV_PIXEL_FORMAT_BC4_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC4_SNORM,
++ PVRSRV_PIXEL_FORMAT_BC5,
++ PVRSRV_PIXEL_FORMAT_BC5_UNORM,
++ PVRSRV_PIXEL_FORMAT_BC5_SNORM,
++
++
++ PVRSRV_PIXEL_FORMAT_L_F16,
++ PVRSRV_PIXEL_FORMAT_L_F16_REP,
++ PVRSRV_PIXEL_FORMAT_L_F16_A_F16,
++ PVRSRV_PIXEL_FORMAT_A_F16,
++ PVRSRV_PIXEL_FORMAT_B16G16R16F,
++
++ PVRSRV_PIXEL_FORMAT_L_F32,
++ PVRSRV_PIXEL_FORMAT_A_F32,
++ PVRSRV_PIXEL_FORMAT_L_F32_A_F32,
++
++
++ PVRSRV_PIXEL_FORMAT_PVRTC2,
++ PVRSRV_PIXEL_FORMAT_PVRTC4,
++ PVRSRV_PIXEL_FORMAT_PVRTCII2,
++ PVRSRV_PIXEL_FORMAT_PVRTCII4,
++ PVRSRV_PIXEL_FORMAT_PVRTCIII,
++ PVRSRV_PIXEL_FORMAT_PVRO8,
++ PVRSRV_PIXEL_FORMAT_PVRO88,
++ PVRSRV_PIXEL_FORMAT_PT1,
++ PVRSRV_PIXEL_FORMAT_PT2,
++ PVRSRV_PIXEL_FORMAT_PT4,
++ PVRSRV_PIXEL_FORMAT_PT8,
++ PVRSRV_PIXEL_FORMAT_PTW,
++ PVRSRV_PIXEL_FORMAT_PTB,
++ PVRSRV_PIXEL_FORMAT_MONO8,
++ PVRSRV_PIXEL_FORMAT_MONO16,
++
++
++ PVRSRV_PIXEL_FORMAT_C0_YUYV,
++ PVRSRV_PIXEL_FORMAT_C0_UYVY,
++ PVRSRV_PIXEL_FORMAT_C0_YVYU,
++ PVRSRV_PIXEL_FORMAT_C0_VYUY,
++ PVRSRV_PIXEL_FORMAT_C1_YUYV,
++ PVRSRV_PIXEL_FORMAT_C1_UYVY,
++ PVRSRV_PIXEL_FORMAT_C1_YVYU,
++ PVRSRV_PIXEL_FORMAT_C1_VYUY,
++
++
++ PVRSRV_PIXEL_FORMAT_C0_YUV420_2P_UV,
++ PVRSRV_PIXEL_FORMAT_C0_YUV420_2P_VU,
++ PVRSRV_PIXEL_FORMAT_C0_YUV420_3P,
++ PVRSRV_PIXEL_FORMAT_C1_YUV420_2P_UV,
++ PVRSRV_PIXEL_FORMAT_C1_YUV420_2P_VU,
++ PVRSRV_PIXEL_FORMAT_C1_YUV420_3P,
++
++ PVRSRV_PIXEL_FORMAT_A2B10G10R10F,
++ PVRSRV_PIXEL_FORMAT_B8G8R8_SINT,
++ PVRSRV_PIXEL_FORMAT_PVRF32SIGNMASK,
++
++ PVRSRV_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff,
++} PVRSRV_PIXEL_FORMAT;
++
++typedef enum _PVRSRV_ALPHA_FORMAT_ {
++ PVRSRV_ALPHA_FORMAT_UNKNOWN = 0x00000000,
++ PVRSRV_ALPHA_FORMAT_PRE = 0x00000001,
++ PVRSRV_ALPHA_FORMAT_NONPRE = 0x00000002,
++ PVRSRV_ALPHA_FORMAT_MASK = 0x0000000F,
++} PVRSRV_ALPHA_FORMAT;
++
++typedef enum _PVRSRV_COLOURSPACE_FORMAT_ {
++ PVRSRV_COLOURSPACE_FORMAT_UNKNOWN = 0x00000000,
++ PVRSRV_COLOURSPACE_FORMAT_LINEAR = 0x00010000,
++ PVRSRV_COLOURSPACE_FORMAT_NONLINEAR = 0x00020000,
++ PVRSRV_COLOURSPACE_FORMAT_MASK = 0x000F0000,
++} PVRSRV_COLOURSPACE_FORMAT;
++
++
++typedef enum _PVRSRV_ROTATION_ {
++ PVRSRV_ROTATE_0 = 0,
++ PVRSRV_ROTATE_90 = 1,
++ PVRSRV_ROTATE_180 = 2,
++ PVRSRV_ROTATE_270 = 3,
++ PVRSRV_FLIP_Y
++
++} PVRSRV_ROTATION;
++
++#define PVRSRV_CREATE_SWAPCHAIN_SHARED (1<<0)
++#define PVRSRV_CREATE_SWAPCHAIN_QUERY (1<<1)
++#define PVRSRV_CREATE_SWAPCHAIN_OEMOVERLAY (1<<2)
++
++typedef struct _PVRSRV_SYNC_DATA_
++{
++
++ IMG_UINT32 ui32WriteOpsPending;
++ volatile IMG_UINT32 ui32WriteOpsComplete;
++
++
++ IMG_UINT32 ui32ReadOpsPending;
++ volatile IMG_UINT32 ui32ReadOpsComplete;
++
++
++ IMG_UINT32 ui32LastOpDumpVal;
++ IMG_UINT32 ui32LastReadOpDumpVal;
++
++} PVRSRV_SYNC_DATA;
++
++typedef struct _PVRSRV_CLIENT_SYNC_INFO_
++{
++
++ PVRSRV_SYNC_DATA *psSyncData;
++
++
++
++
++
++ IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr;
++
++
++ IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr;
++
++
++ IMG_HANDLE hMappingInfo;
++
++
++ IMG_HANDLE hKernelSyncInfo;
++
++} PVRSRV_CLIENT_SYNC_INFO, *PPVRSRV_CLIENT_SYNC_INFO;
++
++
++typedef struct PVRSRV_RESOURCE_TAG
++{
++ volatile IMG_UINT32 ui32Lock;
++ IMG_UINT32 ui32ID;
++}PVRSRV_RESOURCE;
++typedef PVRSRV_RESOURCE PVRSRV_RES_HANDLE;
++
++
++typedef IMG_VOID (*PFN_CMD_COMPLETE) (IMG_HANDLE);
++typedef IMG_VOID (**PPFN_CMD_COMPLETE) (IMG_HANDLE);
++
++typedef IMG_BOOL (*PFN_CMD_PROC) (IMG_HANDLE, IMG_UINT32, IMG_VOID*);
++typedef IMG_BOOL (**PPFN_CMD_PROC) (IMG_HANDLE, IMG_UINT32, IMG_VOID*);
++
++
++typedef struct _IMG_RECT_
++{
++ IMG_INT32 x0;
++ IMG_INT32 y0;
++ IMG_INT32 x1;
++ IMG_INT32 y1;
++}IMG_RECT;
++
++typedef struct _IMG_RECT_16_
++{
++ IMG_INT16 x0;
++ IMG_INT16 y0;
++ IMG_INT16 x1;
++ IMG_INT16 y1;
++}IMG_RECT_16;
++
++
++typedef PVRSRV_ERROR (*PFN_GET_BUFFER_ADDR)(IMG_HANDLE,
++ IMG_HANDLE,
++ IMG_SYS_PHYADDR**,
++ IMG_SIZE_T*,
++ IMG_VOID**,
++ IMG_HANDLE*,
++ IMG_BOOL*);
++
++
++typedef struct DISPLAY_DIMS_TAG
++{
++ IMG_UINT32 ui32ByteStride;
++ IMG_UINT32 ui32Width;
++ IMG_UINT32 ui32Height;
++} DISPLAY_DIMS;
++
++
++typedef struct DISPLAY_FORMAT_TAG
++{
++
++ PVRSRV_PIXEL_FORMAT pixelformat;
++} DISPLAY_FORMAT;
++
++typedef struct DISPLAY_SURF_ATTRIBUTES_TAG
++{
++
++ PVRSRV_PIXEL_FORMAT pixelformat;
++
++ DISPLAY_DIMS sDims;
++} DISPLAY_SURF_ATTRIBUTES;
++
++
++typedef struct DISPLAY_MODE_INFO_TAG
++{
++
++ PVRSRV_PIXEL_FORMAT pixelformat;
++
++ DISPLAY_DIMS sDims;
++
++ IMG_UINT32 ui32RefreshHZ;
++
++ IMG_UINT32 ui32OEMFlags;
++} DISPLAY_MODE_INFO;
++
++
++
++#define MAX_DISPLAY_NAME_SIZE (50)
++
++typedef struct DISPLAY_INFO_TAG
++{
++ IMG_UINT32 ui32MaxSwapChains;
++
++ IMG_UINT32 ui32MaxSwapChainBuffers;
++
++ IMG_UINT32 ui32MinSwapInterval;
++
++ IMG_UINT32 ui32MaxSwapInterval;
++
++ IMG_UINT32 ui32PhysicalWidthmm;
++ IMG_UINT32 ui32PhysicalHeightmm;
++
++ IMG_CHAR szDisplayName[MAX_DISPLAY_NAME_SIZE];
++
++#if defined(SUPPORT_HW_CURSOR)
++ IMG_UINT16 ui32CursorWidth;
++ IMG_UINT16 ui32CursorHeight;
++#endif
++
++} DISPLAY_INFO;
++
++typedef struct ACCESS_INFO_TAG
++{
++ IMG_UINT32 ui32Size;
++ IMG_UINT32 ui32FBPhysBaseAddress;
++ IMG_UINT32 ui32FBMemAvailable;
++ IMG_UINT32 ui32SysPhysBaseAddress;
++ IMG_UINT32 ui32SysSize;
++ IMG_UINT32 ui32DevIRQ;
++}ACCESS_INFO;
++
++
++typedef struct PVRSRV_CURSOR_SHAPE_TAG
++{
++ IMG_UINT16 ui16Width;
++ IMG_UINT16 ui16Height;
++ IMG_INT16 i16XHot;
++ IMG_INT16 i16YHot;
++
++
++ IMG_VOID* pvMask;
++ IMG_INT16 i16MaskByteStride;
++
++
++ IMG_VOID* pvColour;
++ IMG_INT16 i16ColourByteStride;
++ PVRSRV_PIXEL_FORMAT eColourPixelFormat;
++} PVRSRV_CURSOR_SHAPE;
++
++#define PVRSRV_SET_CURSOR_VISIBILITY (1<<0)
++#define PVRSRV_SET_CURSOR_POSITION (1<<1)
++#define PVRSRV_SET_CURSOR_SHAPE (1<<2)
++#define PVRSRV_SET_CURSOR_ROTATION (1<<3)
++
++typedef struct PVRSRV_CURSOR_INFO_TAG
++{
++
++ IMG_UINT32 ui32Flags;
++
++
++ IMG_BOOL bVisible;
++
++
++ IMG_INT16 i16XPos;
++ IMG_INT16 i16YPos;
++
++
++ PVRSRV_CURSOR_SHAPE sCursorShape;
++
++
++ IMG_UINT32 ui32Rotation;
++
++} PVRSRV_CURSOR_INFO;
++
++
++typedef struct _PVRSRV_REGISTRY_INFO_
++{
++ IMG_UINT32 ui32DevCookie;
++ IMG_PCHAR pszKey;
++ IMG_PCHAR pszValue;
++ IMG_PCHAR pszBuf;
++ IMG_UINT32 ui32BufSize;
++} PVRSRV_REGISTRY_INFO, *PPVRSRV_REGISTRY_INFO;
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReadRegistryString (PPVRSRV_REGISTRY_INFO psRegInfo);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWriteRegistryString (PPVRSRV_REGISTRY_INFO psRegInfo);
++
++
++#define PVRSRV_BC_FLAGS_YUVCSC_CONFORMANT_RANGE (0 << 0)
++#define PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE (1 << 0)
++
++#define PVRSRV_BC_FLAGS_YUVCSC_BT601 (0 << 1)
++#define PVRSRV_BC_FLAGS_YUVCSC_BT709 (1 << 1)
++
++#define MAX_BUFFER_DEVICE_NAME_SIZE (50)
++
++typedef struct BUFFER_INFO_TAG
++{
++ IMG_UINT32 ui32BufferCount;
++ IMG_UINT32 ui32BufferDeviceID;
++ PVRSRV_PIXEL_FORMAT pixelformat;
++ IMG_UINT32 ui32ByteStride;
++ IMG_UINT32 ui32Width;
++ IMG_UINT32 ui32Height;
++ IMG_UINT32 ui32Flags;
++ IMG_CHAR szDeviceName[MAX_BUFFER_DEVICE_NAME_SIZE];
++} BUFFER_INFO;
++
++typedef enum _OVERLAY_DEINTERLACE_MODE_
++{
++ WEAVE=0x0,
++ BOB_ODD,
++ BOB_EVEN,
++ BOB_EVEN_NONINTERLEAVED
++} OVERLAY_DEINTERLACE_MODE;
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/sgx_options.h b/drivers/gpu/drm/mrst/pvr/include4/sgx_options.h
+new file mode 100644
+index 0000000..69dd25a
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/sgx_options.h
+@@ -0,0 +1,224 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(DEBUG) || defined (INTERNAL_TEST)
++#define DEBUG_SET_OFFSET OPTIONS_BIT0
++#define OPTIONS_BIT0 0x1
++#else
++#define OPTIONS_BIT0 0x0
++#endif
++
++#if defined(PDUMP) || defined (INTERNAL_TEST)
++#define PDUMP_SET_OFFSET OPTIONS_BIT1
++#define OPTIONS_BIT1 (0x1 << 1)
++#else
++#define OPTIONS_BIT1 0x0
++#endif
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) || defined (INTERNAL_TEST)
++#define PVRSRV_USSE_EDM_STATUS_DEBUG_SET_OFFSET OPTIONS_BIT2
++#define OPTIONS_BIT2 (0x1 << 2)
++#else
++#define OPTIONS_BIT2 0x0
++#endif
++
++#if defined(SUPPORT_HW_RECOVERY) || defined (INTERNAL_TEST)
++#define SUPPORT_HW_RECOVERY_SET_OFFSET OPTIONS_BIT3
++#define OPTIONS_BIT3 (0x1 << 3)
++#else
++#define OPTIONS_BIT3 0x0
++#endif
++
++
++
++#if defined(PVR_SECURE_HANDLES) || defined (INTERNAL_TEST)
++#define PVR_SECURE_HANDLES_SET_OFFSET OPTIONS_BIT4
++#define OPTIONS_BIT4 (0x1 << 4)
++#else
++#define OPTIONS_BIT4 0x0
++#endif
++
++#if defined(SGX_BYPASS_SYSTEM_CACHE) || defined (INTERNAL_TEST)
++#define SGX_BYPASS_SYSTEM_CACHE_SET_OFFSET OPTIONS_BIT5
++#define OPTIONS_BIT5 (0x1 << 5)
++#else
++#define OPTIONS_BIT5 0x0
++#endif
++
++#if defined(SGX_DMS_AGE_ENABLE) || defined (INTERNAL_TEST)
++#define SGX_DMS_AGE_ENABLE_SET_OFFSET OPTIONS_BIT6
++#define OPTIONS_BIT6 (0x1 << 6)
++#else
++#define OPTIONS_BIT6 0x0
++#endif
++
++#if defined(SGX_FAST_DPM_INIT) || defined (INTERNAL_TEST)
++#define SGX_FAST_DPM_INIT_SET_OFFSET OPTIONS_BIT8
++#define OPTIONS_BIT8 (0x1 << 8)
++#else
++#define OPTIONS_BIT8 0x0
++#endif
++
++#if defined(SGX_FEATURE_DCU) || defined (INTERNAL_TEST)
++#define SGX_FEATURE_DCU_SET_OFFSET OPTIONS_BIT9
++#define OPTIONS_BIT9 (0x1 << 9)
++#else
++#define OPTIONS_BIT9 0x0
++#endif
++
++#if defined(SGX_FEATURE_MP) || defined (INTERNAL_TEST)
++#define SGX_FEATURE_MP_SET_OFFSET OPTIONS_BIT10
++#define OPTIONS_BIT10 (0x1 << 10)
++#else
++#define OPTIONS_BIT10 0x0
++#endif
++
++#if defined(SGX_FEATURE_MULTITHREADED_UKERNEL) || defined (INTERNAL_TEST)
++#define SGX_FEATURE_MULTITHREADED_UKERNEL_SET_OFFSET OPTIONS_BIT11
++#define OPTIONS_BIT11 (0x1 << 11)
++#else
++#define OPTIONS_BIT11 0x0
++#endif
++
++
++
++#if defined(SGX_FEATURE_OVERLAPPED_SPM) || defined (INTERNAL_TEST)
++#define SGX_FEATURE_OVERLAPPED_SPM_SET_OFFSET OPTIONS_BIT12
++#define OPTIONS_BIT12 (0x1 << 12)
++#else
++#define OPTIONS_BIT12 0x0
++#endif
++
++
++#if defined(SGX_FEATURE_SYSTEM_CACHE) || defined (INTERNAL_TEST)
++#define SGX_FEATURE_SYSTEM_CACHE_SET_OFFSET OPTIONS_BIT13
++#define OPTIONS_BIT13 (0x1 << 13)
++#else
++#define OPTIONS_BIT13 0x0
++#endif
++
++#if defined(SGX_SUPPORT_HWPROFILING) || defined (INTERNAL_TEST)
++#define SGX_SUPPORT_HWPROFILING_SET_OFFSET OPTIONS_BIT14
++#define OPTIONS_BIT14 (0x1 << 14)
++#else
++#define OPTIONS_BIT14 0x0
++#endif
++
++
++
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT) || defined (INTERNAL_TEST)
++#define SUPPORT_ACTIVE_POWER_MANAGEMENT_SET_OFFSET OPTIONS_BIT15
++#define OPTIONS_BIT15 (0x1 << 15)
++#else
++#define OPTIONS_BIT15 0x0
++#endif
++
++#if defined(SUPPORT_DISPLAYCONTROLLER_TILING) || defined (INTERNAL_TEST)
++#define SUPPORT_DISPLAYCONTROLLER_TILING_SET_OFFSET OPTIONS_BIT16
++#define OPTIONS_BIT16 (0x1 << 16)
++#else
++#define OPTIONS_BIT16 0x0
++#endif
++
++#if defined(SUPPORT_PERCONTEXT_PB) || defined (INTERNAL_TEST)
++#define SUPPORT_PERCONTEXT_PB_SET_OFFSET OPTIONS_BIT17
++#define OPTIONS_BIT17 (0x1 << 17)
++#else
++#define OPTIONS_BIT17 0x0
++#endif
++
++#if defined(SUPPORT_SGX_HWPERF) || defined (INTERNAL_TEST)
++#define SUPPORT_SGX_HWPERF_SET_OFFSET OPTIONS_BIT18
++#define OPTIONS_BIT18 (0x1 << 18)
++#else
++#define OPTIONS_BIT18 0x0
++#endif
++
++
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE) || defined (INTERNAL_TEST)
++#define SUPPORT_SGX_MMU_DUMMY_PAGE_SET_OFFSET OPTIONS_BIT19
++#define OPTIONS_BIT19 (0x1 << 19)
++#else
++#define OPTIONS_BIT19 0x0
++#endif
++
++#if defined(SUPPORT_SGX_PRIORITY_SCHEDULING) || defined (INTERNAL_TEST)
++#define SUPPORT_SGX_PRIORITY_SCHEDULING_SET_OFFSET OPTIONS_BIT20
++#define OPTIONS_BIT20 (0x1 << 20)
++#else
++#define OPTIONS_BIT20 0x0
++#endif
++
++#if defined(SGX_LOW_LATENCY_SCHEDULING) || defined (INTERNAL_TEST)
++#define SUPPORT_SGX_LOW_LATENCY_SCHEDULING_SET_OFFSET OPTIONS_BIT21
++#define OPTIONS_BIT21 (0x1 << 21)
++#else
++#define OPTIONS_BIT21 0x0
++#endif
++
++#if defined(USE_SUPPORT_NO_TA3D_OVERLAP) || defined (INTERNAL_TEST)
++#define USE_SUPPORT_NO_TA3D_OVERLAP_SET_OFFSET OPTIONS_BIT22
++#define OPTIONS_BIT22 (0x1 << 22)
++#else
++#define OPTIONS_BIT22 0x0
++#endif
++
++
++#if defined(SGX_FEATURE_MP) || defined (INTERNAL_TEST)
++#define OPTIONS_HIGHBYTE ((SGX_FEATURE_MP_CORE_COUNT-1) << SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET)
++#define SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET 28UL
++#define SGX_FEATURE_MP_CORE_COUNT_SET_MASK 0xFF
++#else
++#define OPTIONS_HIGHBYTE 0x0
++#endif
++
++
++
++#define SGX_BUILD_OPTIONS \
++ OPTIONS_BIT0 |\
++ OPTIONS_BIT1 |\
++ OPTIONS_BIT2 |\
++ OPTIONS_BIT3 |\
++ OPTIONS_BIT4 |\
++ OPTIONS_BIT5 |\
++ OPTIONS_BIT6 |\
++ OPTIONS_BIT8 |\
++ OPTIONS_BIT9 |\
++ OPTIONS_BIT10 |\
++ OPTIONS_BIT11 |\
++ OPTIONS_BIT12 |\
++ OPTIONS_BIT13 |\
++ OPTIONS_BIT14 |\
++ OPTIONS_BIT15 |\
++ OPTIONS_BIT16 |\
++ OPTIONS_BIT17 |\
++ OPTIONS_BIT18 |\
++ OPTIONS_BIT19 |\
++ OPTIONS_BIT20 |\
++ OPTIONS_BIT21 |\
++ OPTIONS_HIGHBYTE
++
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/sgxapi_km.h b/drivers/gpu/drm/mrst/pvr/include4/sgxapi_km.h
+new file mode 100644
+index 0000000..6cdbc1a
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/sgxapi_km.h
+@@ -0,0 +1,323 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SGXAPI_KM_H__
++#define __SGXAPI_KM_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "sgxdefs.h"
++
++#if defined(__linux__) && !defined(USE_CODE)
++ #if defined(__KERNEL__)
++ #include <asm/unistd.h>
++ #else
++ #include <unistd.h>
++ #endif
++#endif
++
++#define SGX_UNDEFINED_HEAP_ID (~0LU)
++#define SGX_GENERAL_HEAP_ID 0
++#define SGX_TADATA_HEAP_ID 1
++#define SGX_KERNEL_CODE_HEAP_ID 2
++#define SGX_KERNEL_DATA_HEAP_ID 3
++#define SGX_PIXELSHADER_HEAP_ID 4
++#define SGX_VERTEXSHADER_HEAP_ID 5
++#define SGX_PDSPIXEL_CODEDATA_HEAP_ID 6
++#define SGX_PDSVERTEX_CODEDATA_HEAP_ID 7
++#define SGX_SYNCINFO_HEAP_ID 8
++#define SGX_3DPARAMETERS_HEAP_ID 9
++#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++#define SGX_GENERAL_MAPPING_HEAP_ID 10
++#endif
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define SGX_2D_HEAP_ID 11
++#else
++#if defined(FIX_HW_BRN_26915)
++#define SGX_CGBUFFER_HEAP_ID 12
++#endif
++#endif
++#define SGX_MAX_HEAP_ID 13
++
++
++#define SGX_MAX_TA_STATUS_VALS 32
++#define SGX_MAX_3D_STATUS_VALS 3
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++#define SGX_MAX_TA_DST_SYNCS 1
++#define SGX_MAX_TA_SRC_SYNCS 1
++#define SGX_MAX_3D_SRC_SYNCS 4
++#else
++#define SGX_MAX_SRC_SYNCS 4
++#endif
++
++#ifdef SUPPORT_SGX_HWPERF
++
++#define PVRSRV_SGX_HWPERF_NUM_COUNTERS 9
++
++#define PVRSRV_SGX_HWPERF_INVALID 0x1
++
++#define PVRSRV_SGX_HWPERF_TRANSFER 0x2
++#define PVRSRV_SGX_HWPERF_TA 0x3
++#define PVRSRV_SGX_HWPERF_3D 0x4
++#define PVRSRV_SGX_HWPERF_2D 0x5
++
++#define PVRSRV_SGX_HWPERF_MK_EVENT 0x101
++#define PVRSRV_SGX_HWPERF_MK_TA 0x102
++#define PVRSRV_SGX_HWPERF_MK_3D 0x103
++#define PVRSRV_SGX_HWPERF_MK_2D 0x104
++
++#define PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT 28
++#define PVRSRV_SGX_HWPERF_TYPE_OP_MASK ((1UL << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT) - 1)
++#define PVRSRV_SGX_HWPERF_TYPE_OP_START (0UL << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT)
++#define PVRSRV_SGX_HWPERF_TYPE_OP_END (1Ul << PVRSRV_SGX_HWPERF_TYPE_STARTEND_BIT)
++
++#define PVRSRV_SGX_HWPERF_TYPE_TRANSFER_START (PVRSRV_SGX_HWPERF_TRANSFER | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_TRANSFER_END (PVRSRV_SGX_HWPERF_TRANSFER | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_TA_START (PVRSRV_SGX_HWPERF_TA | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_TA_END (PVRSRV_SGX_HWPERF_TA | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_3D_START (PVRSRV_SGX_HWPERF_3D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_3D_END (PVRSRV_SGX_HWPERF_3D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_2D_START (PVRSRV_SGX_HWPERF_2D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_2D_END (PVRSRV_SGX_HWPERF_2D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++
++#define PVRSRV_SGX_HWPERF_TYPE_MK_EVENT_START (PVRSRV_SGX_HWPERF_MK_EVENT | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_EVENT_END (PVRSRV_SGX_HWPERF_MK_EVENT | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_TA_START (PVRSRV_SGX_HWPERF_MK_TA | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_TA_END (PVRSRV_SGX_HWPERF_MK_TA | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_3D_START (PVRSRV_SGX_HWPERF_MK_3D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_3D_END (PVRSRV_SGX_HWPERF_MK_3D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_2D_START (PVRSRV_SGX_HWPERF_MK_2D | PVRSRV_SGX_HWPERF_TYPE_OP_START)
++#define PVRSRV_SGX_HWPERF_TYPE_MK_2D_END (PVRSRV_SGX_HWPERF_MK_2D | PVRSRV_SGX_HWPERF_TYPE_OP_END)
++
++#define PVRSRV_SGX_HWPERF_OFF (0x0)
++#define PVRSRV_SGX_HWPERF_GRAPHICS_ON (1UL << 0)
++#define PVRSRV_SGX_HWPERF_MK_EXECUTION_ON (1UL << 1)
++
++
++typedef struct _PVRSRV_SGX_HWPERF_CB_ENTRY_
++{
++ IMG_UINT32 ui32FrameNo;
++ IMG_UINT32 ui32Type;
++ IMG_UINT32 ui32Ordinal;
++ IMG_UINT32 ui32Clocksx16;
++ IMG_UINT32 ui32Counters[PVRSRV_SGX_HWPERF_NUM_COUNTERS];
++} PVRSRV_SGX_HWPERF_CB_ENTRY;
++
++
++typedef struct _PVRSRV_SGX_HWPERF_CBDATA_
++{
++ IMG_UINT32 ui32FrameNo;
++ IMG_UINT32 ui32Type;
++ IMG_UINT32 ui32StartTimeWraps;
++ IMG_UINT32 ui32StartTime;
++ IMG_UINT32 ui32EndTimeWraps;
++ IMG_UINT32 ui32EndTime;
++ IMG_UINT32 ui32ClockSpeed;
++ IMG_UINT32 ui32TimeMax;
++} PVRSRV_SGX_HWPERF_CBDATA;
++
++
++typedef struct _SGX_MISC_INFO_HWPERF_RETRIEVE_CB
++{
++ PVRSRV_SGX_HWPERF_CBDATA* psHWPerfData;
++ IMG_UINT32 ui32ArraySize;
++ IMG_UINT32 ui32DataCount;
++ IMG_UINT32 ui32Time;
++} SGX_MISC_INFO_HWPERF_RETRIEVE_CB;
++#endif
++
++
++typedef struct _CTL_STATUS_
++{
++ IMG_DEV_VIRTADDR sStatusDevAddr;
++ IMG_UINT32 ui32StatusValue;
++} CTL_STATUS;
++
++
++typedef enum _SGX_MISC_INFO_REQUEST_
++{
++ SGX_MISC_INFO_REQUEST_CLOCKSPEED = 0,
++ SGX_MISC_INFO_REQUEST_SGXREV,
++ SGX_MISC_INFO_REQUEST_DRIVER_SGXREV,
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ SGX_MISC_INFO_REQUEST_MEMREAD,
++#endif
++#if defined(SUPPORT_SGX_HWPERF)
++ SGX_MISC_INFO_REQUEST_SET_HWPERF_STATUS,
++ SGX_MISC_INFO_REQUEST_HWPERF_CB_ON,
++ SGX_MISC_INFO_REQUEST_HWPERF_CB_OFF,
++ SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB,
++#endif
++#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
++ SGX_MISC_INFO_REQUEST_SET_BREAKPOINT,
++#endif
++ SGX_MISC_INFO_DUMP_DEBUG_INFO,
++ SGX_MISC_INFO_PANIC,
++ SGX_MISC_INFO_REQUEST_FORCE_I16 = 0x7fff
++} SGX_MISC_INFO_REQUEST;
++
++
++typedef struct _PVRSRV_SGX_MISCINFO_FEATURES
++{
++ IMG_UINT32 ui32CoreRev;
++ IMG_UINT32 ui32CoreID;
++ IMG_UINT32 ui32DDKVersion;
++ IMG_UINT32 ui32DDKBuild;
++ IMG_UINT32 ui32CoreIdSW;
++ IMG_UINT32 ui32CoreRevSW;
++ IMG_UINT32 ui32BuildOptions;
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ IMG_UINT32 ui32DeviceMemValue;
++#endif
++} PVRSRV_SGX_MISCINFO_FEATURES;
++
++
++#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
++typedef struct _SGX_BREAKPOINT_INFO
++{
++
++ IMG_BOOL bBPEnable;
++
++
++
++ IMG_UINT32 ui32BPIndex;
++
++ IMG_DEV_VIRTADDR sBPDevVAddr;
++} SGX_BREAKPOINT_INFO;
++#endif
++
++typedef struct _SGX_MISC_INFO_
++{
++ SGX_MISC_INFO_REQUEST eRequest;
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ IMG_DEV_VIRTADDR sDevVAddr;
++ IMG_HANDLE hDevMemContext;
++#endif
++ union
++ {
++ IMG_UINT32 reserved;
++ PVRSRV_SGX_MISCINFO_FEATURES sSGXFeatures;
++ IMG_UINT32 ui32SGXClockSpeed;
++#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
++ SGX_BREAKPOINT_INFO sSGXBreakpointInfo;
++#endif
++#ifdef SUPPORT_SGX_HWPERF
++ IMG_UINT32 ui32NewHWPerfStatus;
++ SGX_MISC_INFO_HWPERF_RETRIEVE_CB sRetrieveCB;
++#endif
++ } uData;
++} SGX_MISC_INFO;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define PVRSRV_MAX_BLT_SRC_SYNCS 3
++#endif
++
++
++#define SGX_KICKTA_DUMPBITMAP_MAX_NAME_LENGTH 256
++
++typedef struct _SGX_KICKTA_DUMPBITMAP_
++{
++ IMG_DEV_VIRTADDR sDevBaseAddr;
++ IMG_UINT32 ui32Flags;
++ IMG_UINT32 ui32Width;
++ IMG_UINT32 ui32Height;
++ IMG_UINT32 ui32Stride;
++ IMG_UINT32 ui32PDUMPFormat;
++ IMG_UINT32 ui32BytesPP;
++ IMG_CHAR pszName[SGX_KICKTA_DUMPBITMAP_MAX_NAME_LENGTH];
++} SGX_KICKTA_DUMPBITMAP, *PSGX_KICKTA_DUMPBITMAP;
++
++#define PVRSRV_SGX_PDUMP_CONTEXT_MAX_BITMAP_ARRAY_SIZE (16)
++
++typedef struct _PVRSRV_SGX_PDUMP_CONTEXT_
++{
++
++ IMG_UINT32 ui32CacheControl;
++
++} PVRSRV_SGX_PDUMP_CONTEXT;
++
++
++typedef struct _SGX_KICKTA_DUMP_ROFF_
++{
++ IMG_HANDLE hKernelMemInfo;
++ IMG_UINT32 uiAllocIndex;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32Value;
++ IMG_PCHAR pszName;
++} SGX_KICKTA_DUMP_ROFF, *PSGX_KICKTA_DUMP_ROFF;
++
++typedef struct _SGX_KICKTA_DUMP_BUFFER_
++{
++ IMG_UINT32 ui32SpaceUsed;
++ IMG_UINT32 ui32Start;
++ IMG_UINT32 ui32End;
++ IMG_UINT32 ui32BufferSize;
++ IMG_UINT32 ui32BackEndLength;
++ IMG_UINT32 uiAllocIndex;
++ IMG_HANDLE hKernelMemInfo;
++ IMG_PVOID pvLinAddr;
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ IMG_HANDLE hCtrlKernelMemInfo;
++ IMG_DEV_VIRTADDR sCtrlDevVAddr;
++#endif
++ IMG_PCHAR pszName;
++} SGX_KICKTA_DUMP_BUFFER, *PSGX_KICKTA_DUMP_BUFFER;
++
++#ifdef PDUMP
++typedef struct _SGX_KICKTA_PDUMP_
++{
++
++ PSGX_KICKTA_DUMPBITMAP psPDumpBitmapArray;
++ IMG_UINT32 ui32PDumpBitmapSize;
++
++
++ PSGX_KICKTA_DUMP_BUFFER psBufferArray;
++ IMG_UINT32 ui32BufferArraySize;
++
++
++ PSGX_KICKTA_DUMP_ROFF psROffArray;
++ IMG_UINT32 ui32ROffArraySize;
++} SGX_KICKTA_PDUMP, *PSGX_KICKTA_PDUMP;
++#endif
++
++#if defined(TRANSFER_QUEUE)
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define SGX_MAX_2D_BLIT_CMD_SIZE 26
++#define SGX_MAX_2D_SRC_SYNC_OPS 3
++#endif
++#define SGX_MAX_TRANSFER_STATUS_VALS 2
++#define SGX_MAX_TRANSFER_SYNC_OPS 5
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/include4/sgxscript.h b/drivers/gpu/drm/mrst/pvr/include4/sgxscript.h
+new file mode 100644
+index 0000000..fb5efbb
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/include4/sgxscript.h
+@@ -0,0 +1,81 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SGXSCRIPT_H__
++#define __SGXSCRIPT_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#define SGX_MAX_INIT_COMMANDS 64
++#define SGX_MAX_DEINIT_COMMANDS 16
++
++typedef enum _SGX_INIT_OPERATION
++{
++ SGX_INIT_OP_ILLEGAL = 0,
++ SGX_INIT_OP_WRITE_HW_REG,
++#if defined(PDUMP)
++ SGX_INIT_OP_PDUMP_HW_REG,
++#endif
++ SGX_INIT_OP_HALT
++} SGX_INIT_OPERATION;
++
++typedef union _SGX_INIT_COMMAND
++{
++ SGX_INIT_OPERATION eOp;
++ struct {
++ SGX_INIT_OPERATION eOp;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32Value;
++ } sWriteHWReg;
++#if defined(PDUMP)
++ struct {
++ SGX_INIT_OPERATION eOp;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32Value;
++ } sPDumpHWReg;
++#endif
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++ struct {
++ SGX_INIT_OPERATION eOp;
++ } sWorkaroundBRN22997;
++#endif
++} SGX_INIT_COMMAND;
++
++typedef struct _SGX_INIT_SCRIPTS_
++{
++ SGX_INIT_COMMAND asInitCommandsPart1[SGX_MAX_INIT_COMMANDS];
++ SGX_INIT_COMMAND asInitCommandsPart2[SGX_MAX_INIT_COMMANDS];
++ SGX_INIT_COMMAND asDeinitCommands[SGX_MAX_DEINIT_COMMANDS];
++} SGX_INIT_SCRIPTS;
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/.gitignore b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/.gitignore
+new file mode 100644
+index 0000000..f558f8b
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/.gitignore
+@@ -0,0 +1,6 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++binary_pc_i686*
++*.o
++*.o.cmd
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/makefile.linux.common b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/makefile.linux.common
+new file mode 100644
+index 0000000..c3ab6f4
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/makefile.linux.common
+@@ -0,0 +1,41 @@
++#
++# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++#
++# This program is free software; you can redistribute it and/or modify it
++# under the terms and conditions of the GNU General Public License,
++# version 2, as published by the Free Software Foundation.
++#
++# This program is distributed in the hope it will be useful but, except
++# as otherwise stated in writing, without any warranty; without even the
++# implied warranty of merchantability or fitness for a particular purpose.
++# See the GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License along with
++# this program; if not, write to the Free Software Foundation, Inc.,
++# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++#
++# The full GNU General Public License is included in this distribution in
++# the file called "COPYING".
++#
++# Contact Information:
++# Imagination Technologies Ltd. <gpl-support@imgtec.com>
++# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++#
++#
++#
++
++ifeq ($(SUPPORT_DRI_DRM),1)
++DISPLAY_CONTROLLER_SOURCES_ROOT = $(KBUILDROOT)/$(DISPLAY_CONTROLLER_DIR)
++else
++DISPLAY_CONTROLLER_SOURCES_ROOT = ..
++endif
++
++INCLUDES += -I$(EURASIAROOT)/include4 \
++ -I$(EURASIAROOT)/services4/include \
++ -I$(EURASIAROOT)/services4/system/$(PVR_SYSTEM) \
++ -I$(EURASIAROOT)/services4/system/include \
++ -I$(EURASIAROOT)/services4/srvkm/env/linux/mrst
++
++SOURCES += $(DISPLAY_CONTROLLER_SOURCES_ROOT)/mrstlfb_displayclass.c \
++ $(DISPLAY_CONTROLLER_SOURCES_ROOT)/mrstlfb_linux.c
++MODULE_CFLAGS += -DPVR_MRST_FB_SET_PAR_ON_INIT
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb.h b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb.h
+new file mode 100644
+index 0000000..9f4a116
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb.h
+@@ -0,0 +1,295 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __MRSTLFB_H__
++#define __MRSTLFB_H__
++
++#include <drm/drmP.h>
++#include "psb_intel_reg.h"
++
++#define MRST_USING_INTERRUPTS
++
++#define PSB_HWSTAM 0x2098
++#define PSB_INSTPM 0x20C0
++#define PSB_INT_IDENTITY_R 0x20A4
++#define _PSB_VSYNC_PIPEB_FLAG (1<<5)
++#define _PSB_VSYNC_PIPEA_FLAG (1<<7)
++#define _PSB_IRQ_SGX_FLAG (1<<18)
++#define _PSB_IRQ_MSVDX_FLAG (1<<19)
++#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
++#define PSB_INT_MASK_R 0x20A8
++#define PSB_INT_ENABLE_R 0x20A0
++
++/* IPC message and command defines used to enable/disable mipi panel voltages */
++#define IPC_MSG_PANEL_ON_OFF 0xE9
++#define IPC_CMD_PANEL_ON 1
++#define IPC_CMD_PANEL_OFF 0
++
++typedef void * MRST_HANDLE;
++
++typedef enum tag_mrst_bool
++{
++ MRST_FALSE = 0,
++ MRST_TRUE = 1,
++} MRST_BOOL, *MRST_PBOOL;
++
++typedef IMG_INT (* MRSTLFB_VSYNC_ISR_PFN)(struct drm_device* psDrmDevice, int iPipe);
++
++extern IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable);
++
++
++typedef struct MRSTLFB_BUFFER_TAG
++{
++
++ IMG_UINT32 ui32BufferSize;
++ union {
++
++ IMG_SYS_PHYADDR *psNonCont;
++
++ IMG_SYS_PHYADDR sCont;
++ } uSysAddr;
++
++ IMG_DEV_VIRTADDR sDevVAddr;
++
++ IMG_CPU_VIRTADDR sCPUVAddr;
++
++ PVRSRV_SYNC_DATA *psSyncData;
++
++ IMG_BOOL bIsContiguous;
++
++ IMG_BOOL bIsAllocated;
++
++ IMG_UINT32 ui32OwnerTaskID;
++
++ struct MRSTLFB_BUFFER_TAG *psNext;
++} MRSTLFB_BUFFER;
++
++typedef struct MRSTLFB_VSYNC_FLIP_ITEM_TAG
++{
++
++
++
++ MRST_HANDLE hCmdComplete;
++
++ unsigned long ulSwapInterval;
++
++ MRST_BOOL bValid;
++
++ MRST_BOOL bFlipped;
++
++ MRST_BOOL bCmdCompleted;
++
++
++
++
++
++ IMG_DEV_VIRTADDR sDevVAddr;
++} MRSTLFB_VSYNC_FLIP_ITEM;
++
++typedef struct MRSTLFB_SWAPCHAIN_TAG
++{
++
++ unsigned long ulBufferCount;
++
++ MRSTLFB_BUFFER **ppsBuffer;
++
++ MRSTLFB_VSYNC_FLIP_ITEM *psVSyncFlips;
++
++
++ unsigned long ulInsertIndex;
++
++
++ unsigned long ulRemoveIndex;
++
++
++ PVRSRV_DC_DISP2SRV_KMJTABLE *psPVRJTable;
++
++
++ MRST_BOOL bFlushCommands;
++
++
++ unsigned long ulSetFlushStateRefCount;
++
++
++ MRST_BOOL bBlanked;
++
++
++ spinlock_t *psSwapChainLock;
++
++
++ struct drm_driver *psDrmDriver;
++
++
++ struct drm_device *psDrmDev;
++
++ struct MRSTLFB_SWAPCHAIN_TAG *psNext;
++
++ struct MRSTLFB_DEVINFO_TAG *psDevInfo;
++
++} MRSTLFB_SWAPCHAIN;
++
++typedef struct MRSTLFB_FBINFO_TAG
++{
++ unsigned long ulFBSize;
++ unsigned long ulBufferSize;
++ unsigned long ulRoundedBufferSize;
++ unsigned long ulWidth;
++ unsigned long ulHeight;
++ unsigned long ulByteStride;
++
++
++
++ IMG_SYS_PHYADDR sSysAddr;
++ IMG_CPU_VIRTADDR sCPUVAddr;
++ IMG_DEV_VIRTADDR sDevVAddr;
++
++
++ PVRSRV_PIXEL_FORMAT ePixelFormat;
++}MRSTLFB_FBINFO;
++
++/**
++ * If DRI is enable then extemding drm_device
++ */
++typedef struct MRSTLFB_DEVINFO_TAG
++{
++ unsigned long ulDeviceID;
++
++ struct drm_device *psDrmDevice;
++
++ MRSTLFB_BUFFER sSystemBuffer;
++
++
++ PVRSRV_DC_DISP2SRV_KMJTABLE sPVRJTable;
++
++
++ PVRSRV_DC_SRV2DISP_KMJTABLE sDCJTable;
++
++
++ unsigned long ulRefCount;
++
++
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++
++ IMG_UINT32 ui32SwapChainNum;
++
++ IMG_UINT32 ui32SwapChainIdCounter;
++
++
++ void *pvRegs;
++
++
++ MRST_BOOL bFlushCommands;
++
++
++ struct fb_info *psLINFBInfo;
++
++
++ struct notifier_block sLINNotifBlock;
++
++
++ MRST_BOOL bDeviceSuspended;
++
++
++ spinlock_t sSwapChainLock;
++
++
++
++
++
++ IMG_DEV_VIRTADDR sDisplayDevVAddr;
++
++ DISPLAY_INFO sDisplayInfo;
++
++
++ DISPLAY_FORMAT sDisplayFormat;
++
++
++ DISPLAY_DIMS sDisplayDim;
++
++ IMG_UINT32 ui32MainPipe;
++
++} MRSTLFB_DEVINFO;
++
++#if 0
++#define MRSTLFB_PAGE_SIZE 4096
++#define MRSTLFB_PAGE_MASK (MRSTLFB_PAGE_SIZE - 1)
++#define MRSTLFB_PAGE_TRUNC (~MRSTLFB_PAGE_MASK)
++
++#define MRSTLFB_PAGE_ROUNDUP(x) (((x) + MRSTLFB_PAGE_MASK) & MRSTLFB_PAGE_TRUNC)
++#endif
++
++#ifdef DEBUG
++#define DEBUG_PRINTK(x) printk x
++#else
++#define DEBUG_PRINTK(x)
++#endif
++
++#define DISPLAY_DEVICE_NAME "PowerVR Moorestown Linux Display Driver"
++#define DRVNAME "mrstlfb"
++#define DEVNAME DRVNAME
++#define DRIVER_PREFIX DRVNAME
++
++typedef enum _MRST_ERROR_
++{
++ MRST_OK = 0,
++ MRST_ERROR_GENERIC = 1,
++ MRST_ERROR_OUT_OF_MEMORY = 2,
++ MRST_ERROR_TOO_FEW_BUFFERS = 3,
++ MRST_ERROR_INVALID_PARAMS = 4,
++ MRST_ERROR_INIT_FAILURE = 5,
++ MRST_ERROR_CANT_REGISTER_CALLBACK = 6,
++ MRST_ERROR_INVALID_DEVICE = 7,
++ MRST_ERROR_DEVICE_REGISTER_FAILED = 8
++} MRST_ERROR;
++
++
++#ifndef UNREFERENCED_PARAMETER
++#define UNREFERENCED_PARAMETER(param) (param) = (param)
++#endif
++
++MRST_ERROR MRSTLFBInit(struct drm_device * dev);
++MRST_ERROR MRSTLFBDeinit(void);
++
++MRST_ERROR MRSTLFBAllocBuffer(struct MRSTLFB_DEVINFO_TAG *psDevInfo, IMG_UINT32 ui32Size, MRSTLFB_BUFFER **ppBuffer);
++MRST_ERROR MRSTLFBFreeBuffer(struct MRSTLFB_DEVINFO_TAG *psDevInfo, MRSTLFB_BUFFER **ppBuffer);
++
++void *MRSTLFBAllocKernelMem(unsigned long ulSize);
++void MRSTLFBFreeKernelMem(void *pvMem);
++MRST_ERROR MRSTLFBGetLibFuncAddr(char *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable);
++MRST_ERROR MRSTLFBInstallVSyncISR (MRSTLFB_DEVINFO *psDevInfo, MRSTLFB_VSYNC_ISR_PFN pVsyncHandler);
++MRST_ERROR MRSTLFBUninstallVSyncISR(MRSTLFB_DEVINFO *psDevInfo);
++MRST_BOOL MRSTLFBVSyncIHandler(MRSTLFB_SWAPCHAIN *psSwapChain);
++
++void MRSTLFBEnableVSyncInterrupt(MRSTLFB_DEVINFO *psDevInfo);
++void MRSTLFBDisableVSyncInterrupt(MRSTLFB_DEVINFO *psDevInfo);
++
++void MRSTLFBEnableDisplayRegisterAccess(void);
++void MRSTLFBDisableDisplayRegisterAccess(void);
++
++void MRSTLFBFlip(MRSTLFB_DEVINFO *psDevInfo, unsigned long uiAddr);
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_displayclass.c b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_displayclass.c
+new file mode 100644
+index 0000000..adca7e2
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_displayclass.c
+@@ -0,0 +1,2056 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/console.h>
++#include <linux/fb.h>
++#include <linux/module.h>
++#include <linux/string.h>
++#include <linux/notifier.h>
++#include <linux/spinlock.h>
++#include <asm/ipc_defs.h>
++
++#include "img_defs.h"
++#include "servicesext.h"
++#include "kerneldisplay.h"
++#include "mrstlfb.h"
++
++#include "psb_fb.h"
++#include "psb_drv.h"
++#include "ospm_power.h"
++
++#if !defined(SUPPORT_DRI_DRM)
++#error "SUPPORT_DRI_DRM must be set"
++#endif
++
++IMG_UINT32 gui32MRSTDisplayDeviceID;
++
++extern void MRSTLFBVSyncWriteReg(MRSTLFB_DEVINFO * psDevinfo, unsigned long ulOffset, unsigned long ulValue);
++extern unsigned long MRSTLFBVSyncReadReg(MRSTLFB_DEVINFO * psDevinfo, unsigned long ulOffset);
++
++PVRSRV_ERROR MRSTLFBPrePowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++PVRSRV_ERROR MRSTLFBPostPowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++#ifdef MODESET_640x480
++extern int psb_to_640 (struct fb_info* info);
++#endif
++
++extern void mrst_init_LGE_MIPI(struct drm_device *dev);
++extern void mrst_init_NSC_MIPI_bridge(struct drm_device *dev);
++
++struct psbfb_par {
++ struct drm_device *dev;
++ void *psbfb;
++
++ int dpms_state;
++
++ int crtc_count;
++
++ uint32_t crtc_ids[2];
++};
++
++extern void* psbfb_vdc_reg(struct drm_device* dev);
++
++static void *gpvAnchor;
++
++
++#define MRSTLFB_COMMAND_COUNT 1
++
++static PFN_DC_GET_PVRJTABLE pfnGetPVRJTable = 0;
++
++static MRSTLFB_DEVINFO * GetAnchorPtr(void)
++{
++ return (MRSTLFB_DEVINFO *)gpvAnchor;
++}
++
++static void SetAnchorPtr(MRSTLFB_DEVINFO *psDevInfo)
++{
++ gpvAnchor = (void*)psDevInfo;
++}
++
++
++static void FlushInternalVSyncQueue(MRSTLFB_SWAPCHAIN *psSwapChain)
++{
++ MRSTLFB_VSYNC_FLIP_ITEM *psFlipItem;
++ unsigned long ulMaxIndex;
++ unsigned long i;
++
++
++ psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulRemoveIndex];
++ ulMaxIndex = psSwapChain->ulBufferCount - 1;
++
++ for(i = 0; i < psSwapChain->ulBufferCount; i++)
++ {
++ if (psFlipItem->bValid == MRST_FALSE)
++ {
++ continue;
++ }
++
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": FlushInternalVSyncQueue: Flushing swap buffer (index %lu)\n", psSwapChain->ulRemoveIndex));
++
++ if(psFlipItem->bFlipped == MRST_FALSE)
++ {
++
++ MRSTLFBFlip(psSwapChain->psDevInfo, (unsigned long)psFlipItem->sDevVAddr.uiAddr);
++ }
++
++ if(psFlipItem->bCmdCompleted == MRST_FALSE)
++ {
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX ": FlushInternalVSyncQueue: Calling command complete for swap buffer (index %lu)\n", psSwapChain->ulRemoveIndex));
++
++ psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete((IMG_HANDLE)psFlipItem->hCmdComplete, IMG_TRUE);
++ }
++
++
++ psSwapChain->ulRemoveIndex++;
++
++ if(psSwapChain->ulRemoveIndex > ulMaxIndex)
++ {
++ psSwapChain->ulRemoveIndex = 0;
++ }
++
++
++ psFlipItem->bFlipped = MRST_FALSE;
++ psFlipItem->bCmdCompleted = MRST_FALSE;
++ psFlipItem->bValid = MRST_FALSE;
++
++
++ psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulRemoveIndex];
++ }
++
++ psSwapChain->ulInsertIndex = 0;
++ psSwapChain->ulRemoveIndex = 0;
++}
++
++static void SetFlushStateInternalNoLock(MRSTLFB_DEVINFO* psDevInfo,
++ MRST_BOOL bFlushState)
++{
++ MRSTLFB_SWAPCHAIN *psSwapChain = psDevInfo->psSwapChain;
++
++ if (psSwapChain == NULL)
++ {
++ return;
++ }
++
++ if (bFlushState)
++ {
++ if (psSwapChain->ulSetFlushStateRefCount == 0)
++ {
++ MRSTLFBDisableVSyncInterrupt(psDevInfo);
++ psSwapChain->bFlushCommands = MRST_TRUE;
++ FlushInternalVSyncQueue(psSwapChain);
++ }
++ psSwapChain->ulSetFlushStateRefCount++;
++ }
++ else
++ {
++ if (psSwapChain->ulSetFlushStateRefCount != 0)
++ {
++ psSwapChain->ulSetFlushStateRefCount--;
++ if (psSwapChain->ulSetFlushStateRefCount == 0)
++ {
++ psSwapChain->bFlushCommands = MRST_FALSE;
++ MRSTLFBEnableVSyncInterrupt(psDevInfo);
++ }
++ }
++ }
++}
++
++static IMG_VOID SetFlushStateInternal(MRSTLFB_DEVINFO* psDevInfo,
++ MRST_BOOL bFlushState)
++{
++ unsigned long ulLockFlags;
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++ SetFlushStateInternalNoLock(psDevInfo, bFlushState);
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++}
++
++static void SetFlushStateExternal(MRSTLFB_DEVINFO* psDevInfo,
++ MRST_BOOL bFlushState)
++{
++ unsigned long ulLockFlags;
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++
++ if (psDevInfo->bFlushCommands != bFlushState)
++ {
++ psDevInfo->bFlushCommands = bFlushState;
++ SetFlushStateInternalNoLock(psDevInfo, bFlushState);
++ }
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++}
++
++static IMG_VOID SetDCState(IMG_HANDLE hDevice, IMG_UINT32 ui32State)
++{
++ MRSTLFB_DEVINFO *psDevInfo = (MRSTLFB_DEVINFO *)hDevice;
++
++ switch (ui32State)
++ {
++ case DC_STATE_FLUSH_COMMANDS:
++ SetFlushStateExternal(psDevInfo, MRST_TRUE);
++ break;
++ case DC_STATE_NO_FLUSH_COMMANDS:
++ SetFlushStateExternal(psDevInfo, MRST_FALSE);
++ break;
++ default:
++ break;
++ }
++
++ return;
++}
++
++static int FrameBufferEvents(struct notifier_block *psNotif,
++ unsigned long event, void *data)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++ struct fb_event *psFBEvent = (struct fb_event *)data;
++ MRST_BOOL bBlanked;
++
++
++ if (event != FB_EVENT_BLANK)
++ {
++ return 0;
++ }
++
++ psDevInfo = GetAnchorPtr();
++ psSwapChain = psDevInfo->psSwapChain;
++
++ bBlanked = (*(IMG_INT *)psFBEvent->data != 0) ? MRST_TRUE: MRST_FALSE;
++
++ if (bBlanked != psSwapChain->bBlanked)
++ {
++ psSwapChain->bBlanked = bBlanked;
++
++ if (bBlanked)
++ {
++
++ SetFlushStateInternal(psDevInfo, MRST_TRUE);
++ }
++ else
++ {
++
++ SetFlushStateInternal(psDevInfo, MRST_FALSE);
++ }
++ }
++
++ return 0;
++}
++
++
++static MRST_ERROR UnblankDisplay(MRSTLFB_DEVINFO *psDevInfo)
++{
++ int res;
++
++ acquire_console_sem();
++ res = fb_blank(psDevInfo->psLINFBInfo, 0);
++ release_console_sem();
++ if (res != 0)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": fb_blank failed (%d)", res);
++ return (MRST_ERROR_GENERIC);
++ }
++
++ return (MRST_OK);
++}
++
++static MRST_ERROR EnableLFBEventNotification(MRSTLFB_DEVINFO *psDevInfo)
++{
++ int res;
++ MRST_ERROR eError;
++
++
++ memset(&psDevInfo->sLINNotifBlock, 0, sizeof(psDevInfo->sLINNotifBlock));
++
++ psDevInfo->sLINNotifBlock.notifier_call = FrameBufferEvents;
++
++ res = fb_register_client(&psDevInfo->sLINNotifBlock);
++ if (res != 0)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": fb_register_client failed (%d)", res);
++
++ return (MRST_ERROR_GENERIC);
++ }
++
++ eError = UnblankDisplay(psDevInfo);
++ if (eError != MRST_OK)
++ {
++ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX
++ ": UnblankDisplay failed (%d)", eError));
++ return eError;
++ }
++
++ return (MRST_OK);
++}
++
++static MRST_ERROR DisableLFBEventNotification(MRSTLFB_DEVINFO *psDevInfo)
++{
++ int res;
++
++
++ res = fb_unregister_client(&psDevInfo->sLINNotifBlock);
++ if (res != 0)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": fb_unregister_client failed (%d)", res);
++ return (MRST_ERROR_GENERIC);
++ }
++
++ return (MRST_OK);
++}
++
++static PVRSRV_ERROR OpenDCDevice(IMG_UINT32 ui32DeviceID,
++ IMG_HANDLE *phDevice,
++ PVRSRV_SYNC_DATA* psSystemBufferSyncData)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRST_ERROR eError;
++
++ UNREFERENCED_PARAMETER(ui32DeviceID);
++
++ psDevInfo = GetAnchorPtr();
++
++
++ psDevInfo->sSystemBuffer.psSyncData = psSystemBufferSyncData;
++
++ eError = UnblankDisplay(psDevInfo);
++ if (eError != MRST_OK)
++ {
++ DEBUG_PRINTK((KERN_WARNING DRIVER_PREFIX
++ ": UnblankDisplay failed (%d)", eError));
++ return (PVRSRV_ERROR_GENERIC);
++ }
++
++
++ *phDevice = (IMG_HANDLE)psDevInfo;
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR CloseDCDevice(IMG_HANDLE hDevice)
++{
++ UNREFERENCED_PARAMETER(hDevice);
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR EnumDCFormats(IMG_HANDLE hDevice,
++ IMG_UINT32 *pui32NumFormats,
++ DISPLAY_FORMAT *psFormat)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++
++ if(!hDevice || !pui32NumFormats)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++ *pui32NumFormats = 1;
++
++ if(psFormat)
++ {
++ psFormat[0] = psDevInfo->sDisplayFormat;
++ }
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR EnumDCDims(IMG_HANDLE hDevice,
++ DISPLAY_FORMAT *psFormat,
++ IMG_UINT32 *pui32NumDims,
++ DISPLAY_DIMS *psDim)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++
++ if(!hDevice || !psFormat || !pui32NumDims)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++ *pui32NumDims = 1;
++
++
++ if(psDim)
++ {
++ psDim[0] = psDevInfo->sDisplayDim;
++ }
++
++ return (PVRSRV_OK);
++}
++
++
++static PVRSRV_ERROR GetDCSystemBuffer(IMG_HANDLE hDevice, IMG_HANDLE *phBuffer)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++
++ if(!hDevice || !phBuffer)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++
++
++ *phBuffer = (IMG_HANDLE)&psDevInfo->sSystemBuffer;
++
++ return (PVRSRV_OK);
++}
++
++
++static PVRSRV_ERROR GetDCInfo(IMG_HANDLE hDevice, DISPLAY_INFO *psDCInfo)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++
++ if(!hDevice || !psDCInfo)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++ *psDCInfo = psDevInfo->sDisplayInfo;
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR GetDCBufferAddr(IMG_HANDLE hDevice,
++ IMG_HANDLE hBuffer,
++ IMG_SYS_PHYADDR **ppsSysAddr,
++ IMG_UINT32 *pui32ByteSize,
++ IMG_VOID **ppvCpuVAddr,
++ IMG_HANDLE *phOSMapInfo,
++ IMG_BOOL *pbIsContiguous)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_BUFFER *psSystemBuffer;
++
++ if(!hDevice)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++ if(!hBuffer)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++ psSystemBuffer = (MRSTLFB_BUFFER *)hBuffer;
++
++ if (!ppsSysAddr)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ if( psSystemBuffer->bIsContiguous )
++ *ppsSysAddr = &psSystemBuffer->uSysAddr.sCont;
++ else
++ *ppsSysAddr = psSystemBuffer->uSysAddr.psNonCont;
++
++ if (!pui32ByteSize)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++ *pui32ByteSize = psSystemBuffer->ui32BufferSize;
++
++ if (ppvCpuVAddr)
++ {
++ *ppvCpuVAddr = psSystemBuffer->sCPUVAddr;
++ }
++
++ if (phOSMapInfo)
++ {
++ *phOSMapInfo = (IMG_HANDLE)0;
++ }
++
++ if (pbIsContiguous)
++ {
++ *pbIsContiguous = psSystemBuffer->bIsContiguous;
++ }
++
++ return (PVRSRV_OK);
++}
++
++
++static MRST_ERROR MRSTLFBEnableSwapChains(MRSTLFB_DEVINFO *psDevInfo)
++{
++ unsigned long ulLockFlags;
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++ if(!psDevInfo->bFlushCommands)
++ MRSTLFBEnableVSyncInterrupt(psDevInfo);
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++ if (EnableLFBEventNotification(psDevInfo)!= MRST_OK)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX ": Couldn't enable framebuffer event notification\n");
++ }
++
++ return MRST_OK;
++}
++
++static MRST_ERROR MRSTLFBDisableSwapChains(MRSTLFB_DEVINFO *psDevInfo)
++{
++ MRST_ERROR eError;
++ unsigned long ulLockFlags;
++
++ eError = DisableLFBEventNotification(psDevInfo);
++ if (eError != MRST_OK)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX ": Couldn't disable framebuffer event notification\n");
++ }
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++ MRSTLFBDisableVSyncInterrupt(psDevInfo);
++
++
++ MRSTLFBFlip(psDevInfo, (unsigned long)psDevInfo->sSystemBuffer.sDevVAddr.uiAddr);
++
++ psDevInfo->psSwapChain = NULL;
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++ return MRST_OK;
++}
++
++
++static PVRSRV_ERROR CreateDCSwapChain(IMG_HANDLE hDevice,
++ IMG_UINT32 ui32Flags,
++ DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++ DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++ IMG_UINT32 ui32BufferCount,
++ PVRSRV_SYNC_DATA **ppsSyncData,
++ IMG_UINT32 ui32OEMFlags,
++ IMG_HANDLE *phSwapChain,
++ IMG_UINT32 *pui32SwapChainID)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++ MRSTLFB_BUFFER **ppsBuffer;
++ MRSTLFB_VSYNC_FLIP_ITEM *psVSyncFlips;
++ IMG_UINT32 i;
++ PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_SUPPORTED;
++ unsigned long ulLockFlags;
++ struct drm_device* psDrmDev;
++
++ UNREFERENCED_PARAMETER(ui32OEMFlags);
++
++
++ if(!hDevice
++ || !psDstSurfAttrib
++ || !psSrcSurfAttrib
++ || !ppsSyncData
++ || !phSwapChain)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++
++ if(ui32BufferCount > psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers)
++ {
++ return (PVRSRV_ERROR_TOOMANYBUFFERS);
++ }
++
++
++
++
++
++ if(psDstSurfAttrib->pixelformat != psDevInfo->sDisplayFormat.pixelformat
++ || psDstSurfAttrib->sDims.ui32ByteStride != psDevInfo->sDisplayDim.ui32ByteStride
++ || psDstSurfAttrib->sDims.ui32Width != psDevInfo->sDisplayDim.ui32Width
++ || psDstSurfAttrib->sDims.ui32Height != psDevInfo->sDisplayDim.ui32Height)
++ {
++
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ if(psDstSurfAttrib->pixelformat != psSrcSurfAttrib->pixelformat
++ || psDstSurfAttrib->sDims.ui32ByteStride != psSrcSurfAttrib->sDims.ui32ByteStride
++ || psDstSurfAttrib->sDims.ui32Width != psSrcSurfAttrib->sDims.ui32Width
++ || psDstSurfAttrib->sDims.ui32Height != psSrcSurfAttrib->sDims.ui32Height)
++ {
++
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++
++ UNREFERENCED_PARAMETER(ui32Flags);
++
++
++ psSwapChain = (MRSTLFB_SWAPCHAIN*)MRSTLFBAllocKernelMem(sizeof(MRSTLFB_SWAPCHAIN));
++ if(!psSwapChain)
++ {
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++
++ ppsBuffer = (MRSTLFB_BUFFER**)MRSTLFBAllocKernelMem(sizeof(MRSTLFB_BUFFER*) * ui32BufferCount);
++ if(!ppsBuffer)
++ {
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorFreeSwapChain;
++ }
++
++ psVSyncFlips = (MRSTLFB_VSYNC_FLIP_ITEM *)MRSTLFBAllocKernelMem(sizeof(MRSTLFB_VSYNC_FLIP_ITEM) * ui32BufferCount);
++ if (!psVSyncFlips)
++ {
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorFreeBuffers;
++ }
++
++ psSwapChain->ulBufferCount = (unsigned long)ui32BufferCount;
++ psSwapChain->ppsBuffer = ppsBuffer;
++ psSwapChain->psVSyncFlips = psVSyncFlips;
++ psSwapChain->ulInsertIndex = 0;
++ psSwapChain->ulRemoveIndex = 0;
++ psSwapChain->psPVRJTable = &psDevInfo->sPVRJTable;
++ psSwapChain->psSwapChainLock = &psDevInfo->sSwapChainLock;
++
++
++
++ for(i=0; i<ui32BufferCount; i++)
++ {
++ MRSTLFBAllocBuffer(psDevInfo, psDevInfo->sSystemBuffer.ui32BufferSize, &ppsBuffer[i] );
++ ppsBuffer[i]->psSyncData = ppsSyncData[i];
++ }
++
++
++ for(i=0; i<ui32BufferCount-1; i++)
++ {
++ ppsBuffer[i]->psNext = ppsBuffer[i+1];
++ }
++
++ ppsBuffer[i]->psNext = ppsBuffer[0];
++
++
++ for(i=0; i<ui32BufferCount; i++)
++ {
++ psVSyncFlips[i].bValid = MRST_FALSE;
++ psVSyncFlips[i].bFlipped = MRST_FALSE;
++ psVSyncFlips[i].bCmdCompleted = MRST_FALSE;
++ }
++
++
++ psDrmDev = psDevInfo->psDrmDevice;
++
++ psSwapChain->psDevInfo = psDevInfo;
++ psSwapChain->psDrmDev = psDrmDev;
++ psSwapChain->psDrmDriver = psDrmDev->driver;
++ psSwapChain->bBlanked = MRST_FALSE;
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++
++ psSwapChain->bFlushCommands = psDevInfo->bFlushCommands;
++
++ if (psSwapChain->bFlushCommands)
++ {
++ psSwapChain->ulSetFlushStateRefCount = 1;
++ }
++ else
++ {
++ psSwapChain->ulSetFlushStateRefCount = 0;
++ }
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++
++
++
++
++
++
++ *phSwapChain = (IMG_HANDLE)psSwapChain;
++ *pui32SwapChainID = ++psDevInfo->ui32SwapChainIdCounter;
++ psDevInfo->psSwapChain = psSwapChain;
++
++ if( psDevInfo->ui32SwapChainNum++ == 0)
++ {
++ MRSTLFBEnableSwapChains( psDevInfo );
++ }
++
++ return (PVRSRV_OK);
++
++
++ MRSTLFBFreeKernelMem(psVSyncFlips);
++ErrorFreeBuffers:
++ MRSTLFBFreeKernelMem(ppsBuffer);
++ErrorFreeSwapChain:
++ MRSTLFBFreeKernelMem(psSwapChain);
++
++ return eError;
++}
++
++static PVRSRV_ERROR DestroyDCSwapChain(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++ int i;
++
++
++ if(!hDevice || !hSwapChain)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++ psSwapChain = (MRSTLFB_SWAPCHAIN*)hSwapChain;
++
++
++ FlushInternalVSyncQueue(psSwapChain);
++
++
++ if(--psDevInfo->ui32SwapChainNum == 0)
++ {
++ MRSTLFBDisableSwapChains(psDevInfo);
++ }
++
++ if( psDevInfo->psSwapChain == psSwapChain )
++ psDevInfo->psSwapChain = IMG_NULL;
++
++
++
++ for(i=0; i< psSwapChain->ulBufferCount; i++)
++ {
++ MRSTLFBFreeBuffer(psDevInfo, &psSwapChain->ppsBuffer[i] );
++ }
++ MRSTLFBFreeKernelMem(psSwapChain->psVSyncFlips);
++ MRSTLFBFreeKernelMem(psSwapChain->ppsBuffer);
++ MRSTLFBFreeKernelMem(psSwapChain);
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR SetDCDstRect(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_RECT *psRect)
++{
++ UNREFERENCED_PARAMETER(hDevice);
++ UNREFERENCED_PARAMETER(hSwapChain);
++ UNREFERENCED_PARAMETER(psRect);
++
++
++
++ return (PVRSRV_ERROR_NOT_SUPPORTED);
++}
++
++static PVRSRV_ERROR SetDCSrcRect(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_RECT *psRect)
++{
++ UNREFERENCED_PARAMETER(hDevice);
++ UNREFERENCED_PARAMETER(hSwapChain);
++ UNREFERENCED_PARAMETER(psRect);
++
++
++
++ return (PVRSRV_ERROR_NOT_SUPPORTED);
++}
++
++static PVRSRV_ERROR SetDCDstColourKey(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 ui32CKColour)
++{
++ UNREFERENCED_PARAMETER(hDevice);
++ UNREFERENCED_PARAMETER(hSwapChain);
++ UNREFERENCED_PARAMETER(ui32CKColour);
++
++
++
++ return (PVRSRV_ERROR_NOT_SUPPORTED);
++}
++
++static PVRSRV_ERROR SetDCSrcColourKey(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 ui32CKColour)
++{
++ UNREFERENCED_PARAMETER(hDevice);
++ UNREFERENCED_PARAMETER(hSwapChain);
++ UNREFERENCED_PARAMETER(ui32CKColour);
++
++
++
++ return (PVRSRV_ERROR_NOT_SUPPORTED);
++}
++
++static PVRSRV_ERROR GetDCBuffers(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 *pui32BufferCount,
++ IMG_HANDLE *phBuffer)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++ unsigned long i;
++
++
++ if(!hDevice
++ || !hSwapChain
++ || !pui32BufferCount
++ || !phBuffer)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++ psSwapChain = (MRSTLFB_SWAPCHAIN*)hSwapChain;
++ if (psSwapChain != psDevInfo->psSwapChain)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++
++ *pui32BufferCount = (IMG_UINT32)psSwapChain->ulBufferCount;
++
++
++ for(i=0; i<psSwapChain->ulBufferCount; i++)
++ {
++ phBuffer[i] = (IMG_HANDLE)psSwapChain->ppsBuffer[i];
++ }
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR SwapToDCBuffer(IMG_HANDLE hDevice,
++ IMG_HANDLE hBuffer,
++ IMG_UINT32 ui32SwapInterval,
++ IMG_HANDLE hPrivateTag,
++ IMG_UINT32 ui32ClipRectCount,
++ IMG_RECT *psClipRect)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++
++ UNREFERENCED_PARAMETER(ui32SwapInterval);
++ UNREFERENCED_PARAMETER(hPrivateTag);
++ UNREFERENCED_PARAMETER(psClipRect);
++
++ if(!hDevice
++ || !hBuffer
++ || (ui32ClipRectCount != 0))
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++
++
++ return (PVRSRV_OK);
++}
++
++static PVRSRV_ERROR SwapToDCSystem(IMG_HANDLE hDevice,
++ IMG_HANDLE hSwapChain)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++ unsigned long ulLockFlags;
++
++ if(!hDevice || !hSwapChain)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psDevInfo = (MRSTLFB_DEVINFO*)hDevice;
++ psSwapChain = (MRSTLFB_SWAPCHAIN*)hSwapChain;
++ if (psSwapChain != psDevInfo->psSwapChain)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++
++ FlushInternalVSyncQueue(psSwapChain);
++
++
++ MRSTLFBFlip(psDevInfo, (unsigned long)(psDevInfo->sSystemBuffer.sDevVAddr.uiAddr));
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++ return (PVRSRV_OK);
++}
++
++MRST_BOOL MRSTLFBVSyncIHandler(MRSTLFB_SWAPCHAIN *psSwapChain)
++{
++ IMG_BOOL bStatus = IMG_TRUE;
++ MRSTLFB_VSYNC_FLIP_ITEM *psFlipItem;
++ unsigned long ulMaxIndex;
++ unsigned long ulLockFlags;
++
++ psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulRemoveIndex];
++ ulMaxIndex = psSwapChain->ulBufferCount - 1;
++
++ spin_lock_irqsave(psSwapChain->psSwapChainLock, ulLockFlags);
++
++
++ if (psSwapChain->bFlushCommands)
++ {
++ goto ExitUnlock;
++ }
++
++ while(psFlipItem->bValid)
++ {
++
++ if(psFlipItem->bFlipped)
++ {
++
++ if(!psFlipItem->bCmdCompleted)
++ {
++
++ IMG_BOOL bScheduleMISR;
++
++ bScheduleMISR = IMG_TRUE;
++
++
++ psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete((IMG_HANDLE)psFlipItem->hCmdComplete, bScheduleMISR);
++
++
++ psFlipItem->bCmdCompleted = MRST_TRUE;
++ }
++
++
++ psFlipItem->ulSwapInterval--;
++
++
++ if(psFlipItem->ulSwapInterval == 0)
++ {
++
++ psSwapChain->ulRemoveIndex++;
++
++ if(psSwapChain->ulRemoveIndex > ulMaxIndex)
++ {
++ psSwapChain->ulRemoveIndex = 0;
++ }
++
++
++ psFlipItem->bCmdCompleted = MRST_FALSE;
++ psFlipItem->bFlipped = MRST_FALSE;
++
++
++ psFlipItem->bValid = MRST_FALSE;
++ }
++ else
++ {
++
++ break;
++ }
++ }
++ else
++ {
++
++ MRSTLFBFlip(psSwapChain->psDevInfo, (unsigned long)psFlipItem->sDevVAddr.uiAddr);
++
++
++ psFlipItem->bFlipped = MRST_TRUE;
++
++
++ break;
++ }
++
++
++ psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulRemoveIndex];
++ }
++
++ExitUnlock:
++ spin_unlock_irqrestore(psSwapChain->psSwapChainLock, ulLockFlags);
++
++ return bStatus;
++}
++
++#if defined(MRST_USING_INTERRUPTS)
++static int
++MRSTLFBVSyncISR(struct drm_device *psDrmDevice, int iPipe)
++{
++ MRSTLFB_DEVINFO *psDevInfo = GetAnchorPtr();
++
++
++ if(!psDevInfo->psSwapChain)
++ {
++ return (IMG_TRUE);
++ }
++
++ (void) MRSTLFBVSyncIHandler(psDevInfo->psSwapChain);
++ return 0;
++}
++#endif
++
++#if defined(MRST_USING_INTERRUPTS)
++static IMG_BOOL
++MRSTLFBISRHandler(IMG_VOID* pvDevInfo)
++{
++ MRSTLFB_DEVINFO *psDevInfo = (MRSTLFB_DEVINFO *)pvDevInfo;
++#if 0
++#ifdef MRST_USING_INTERRUPTS
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++#endif
++#endif
++ unsigned long vdc_stat;
++ struct drm_psb_private *dev_priv;
++#if defined(SUPPORT_DRI_DRM)
++ uint32_t pipea_stat = 0;
++#endif
++
++ if (!ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ DRM_ERROR("ERROR: interrupt arrived but Display HW is power off\n");
++ return IMG_FALSE;
++ }
++
++#if defined(SUPPORT_DRI_DRM)
++ dev_priv = (struct drm_psb_private *) psDevInfo->psDrmDevice->dev_private;
++
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++ //write back to clear all interrupt status bits and reset interrupts.
++ PSB_WVDC32(pipea_stat, PIPEASTAT);
++
++ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
++ vdc_stat &= dev_priv->vdc_irq_mask;
++ if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
++ {
++ drm_handle_vblank(psDevInfo->psDrmDevice, 0);
++ }
++#endif
++
++/* Use drm_handle_vblank() as the VSync handler, otherwise kernel would panic if handle
++ * the VSync event again. */
++#if 0
++#ifdef MRST_USING_INTERRUPTS
++
++ psSwapChain = psDevInfo->psSwapChain;
++ vdc_stat = MRSTLFBVSyncReadReg(psDevInfo, PSB_INT_IDENTITY_R);
++
++ if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
++ {
++ if(!psDevInfo->psSwapChain)
++ {
++ psSwapChain = psDevInfo->psSwapChain;
++ (void) MRSTLFBVSyncIHandler(psSwapChain);
++ }
++ }
++#endif
++#endif
++
++#if defined(SUPPORT_DRI_DRM)
++ vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
++ vdc_stat &= dev_priv->vdc_irq_mask;
++ if (vdc_stat & _PSB_DPST_PIPEA_FLAG) {
++
++ /* Check for DPST related interrupts */
++ if((pipea_stat & PIPE_DPST_EVENT_STATUS) &&
++ (dev_priv->psb_dpst_state != NULL)) {
++ uint32_t pwm_reg = 0;
++ uint32_t hist_reg = 0;
++ u32 irqCtrl = 0;
++ struct dpst_guardband guardband_reg;
++ struct dpst_ie_histogram_control ie_hist_cont_reg;
++
++ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++
++ /* Determine if this is histogram or pwm interrupt */
++ if(hist_reg & HISTOGRAM_INT_CTRL_CLEAR) {
++ /* Notify UM of histogram interrupt */
++ psb_dpst_notify_change_um(DPST_EVENT_HIST_INTERRUPT,
++ dev_priv->psb_dpst_state);
++
++ /* disable dpst interrupts */
++ guardband_reg.data = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++ guardband_reg.interrupt_enable = 0;
++ guardband_reg.interrupt_status = 1;
++ PSB_WVDC32(guardband_reg.data, HISTOGRAM_INT_CONTROL);
++
++ ie_hist_cont_reg.data = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
++ ie_hist_cont_reg.ie_histogram_enable = 0;
++ PSB_WVDC32(ie_hist_cont_reg.data, HISTOGRAM_LOGIC_CONTROL);
++
++ irqCtrl = PSB_RVDC32(PIPEASTAT);
++ irqCtrl &= ~PIPE_DPST_EVENT_ENABLE;
++ PSB_WVDC32(irqCtrl, PIPEASTAT);
++ }
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++ if((pwm_reg & PWM_PHASEIN_INT_ENABLE) &&
++ !(pwm_reg & PWM_PHASEIN_ENABLE)) {
++ /* Notify UM of the phase complete */
++ psb_dpst_notify_change_um(DPST_EVENT_PHASE_COMPLETE,
++ dev_priv->psb_dpst_state);
++
++ /* Temporarily get phase mngr ready to generate
++ * another interrupt until this can be moved to
++ * user mode */
++ /* PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE,
++ PWM_CONTROL_LOGIC); */
++ }
++ }
++ }
++#endif
++ return IMG_TRUE;
++}
++#endif
++
++static IMG_BOOL ProcessFlip(IMG_HANDLE hCmdCookie,
++ IMG_UINT32 ui32DataSize,
++ IMG_VOID *pvData)
++{
++ DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
++ MRSTLFB_DEVINFO *psDevInfo;
++ MRSTLFB_BUFFER *psBuffer;
++ MRSTLFB_SWAPCHAIN *psSwapChain;
++#if 0//defined(MRST_USING_INTERRUPTS)
++ MRSTLFB_VSYNC_FLIP_ITEM* psFlipItem;
++#endif
++ unsigned long ulLockFlags;
++
++
++ if(!hCmdCookie || !pvData)
++ {
++ return IMG_FALSE;
++ }
++
++
++ psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)pvData;
++
++ if (psFlipCmd == IMG_NULL || sizeof(DISPLAYCLASS_FLIP_COMMAND) != ui32DataSize)
++ {
++ return IMG_FALSE;
++ }
++
++
++ psDevInfo = (MRSTLFB_DEVINFO*)psFlipCmd->hExtDevice;
++
++ psBuffer = (MRSTLFB_BUFFER*)psFlipCmd->hExtBuffer;
++ psSwapChain = (MRSTLFB_SWAPCHAIN*) psFlipCmd->hExtSwapChain;
++
++ spin_lock_irqsave(&psDevInfo->sSwapChainLock, ulLockFlags);
++
++
++
++ if (psDevInfo->bDeviceSuspended)
++ {
++ psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(hCmdCookie, IMG_TRUE);
++ goto ExitTrueUnlock;
++ }
++
++#if 0 //defined(MRST_USING_INTERRUPTS)
++
++ if(psFlipCmd->ui32SwapInterval == 0 || psSwapChain->bFlushCommands == MRST_TRUE || psBuffer == &psDevInfo->sSystemBuffer)
++ {
++#endif
++
++ MRSTLFBFlip(psDevInfo, (unsigned long)psBuffer->sDevVAddr.uiAddr);
++
++
++
++ psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(hCmdCookie, IMG_TRUE);
++
++#if 0 //defined(MRST_USING_INTERRUPTS)
++ goto ExitTrueUnlock;
++ }
++
++ psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ulInsertIndex];
++
++
++ if(psFlipItem->bValid == MRST_FALSE)
++ {
++ unsigned long ulMaxIndex = psSwapChain->ulBufferCount - 1;
++
++ if(psSwapChain->ulInsertIndex == psSwapChain->ulRemoveIndex)
++ {
++
++ MRSTLFBFlip(psDevInfo, (unsigned long)psBuffer->sDevVAddr.uiAddr);
++
++ psFlipItem->bFlipped = MRST_TRUE;
++ }
++ else
++ {
++ psFlipItem->bFlipped = MRST_FALSE;
++ }
++
++ psFlipItem->hCmdComplete = (MRST_HANDLE)hCmdCookie;
++ psFlipItem->ulSwapInterval = (unsigned long)psFlipCmd->ui32SwapInterval;
++ psFlipItem->sDevVAddr = psBuffer->sDevVAddr;
++ psFlipItem->bValid = MRST_TRUE;
++
++ psSwapChain->ulInsertIndex++;
++ if(psSwapChain->ulInsertIndex > ulMaxIndex)
++ {
++ psSwapChain->ulInsertIndex = 0;
++ }
++
++ goto ExitTrueUnlock;
++ }
++
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++ return IMG_FALSE;
++#endif
++
++ExitTrueUnlock:
++ spin_unlock_irqrestore(&psDevInfo->sSwapChainLock, ulLockFlags);
++ return IMG_TRUE;
++}
++
++
++#if defined(PVR_MRST_FB_SET_PAR_ON_INIT)
++static void MRSTFBSetPar(struct fb_info *psLINFBInfo)
++{
++ acquire_console_sem();
++
++ if (psLINFBInfo->fbops->fb_set_par != NULL)
++ {
++ int res;
++
++ res = psLINFBInfo->fbops->fb_set_par(psLINFBInfo);
++ if (res != 0)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": fb_set_par failed: %d\n", res);
++
++ }
++ }
++ else
++ {
++ printk(KERN_WARNING DRIVER_PREFIX
++ ": fb_set_par not set - HW cursor may not work\n");
++ }
++
++ release_console_sem();
++}
++#endif
++
++
++static int MRSTLFBHandleChangeFB(struct drm_device* dev, struct psb_framebuffer *psbfb)
++{
++ MRSTLFB_DEVINFO *psDevInfo = GetAnchorPtr();
++ int i;
++ struct drm_psb_private * dev_priv;
++ struct psb_gtt * pg;
++
++ if( !psDevInfo->sSystemBuffer.bIsContiguous )
++ MRSTLFBFreeKernelMem( psDevInfo->sSystemBuffer.uSysAddr.psNonCont );
++
++ dev_priv = (struct drm_psb_private *)dev->dev_private;
++ pg = dev_priv->pg;
++
++
++ psDevInfo->sDisplayDim.ui32ByteStride = psbfb->base.pitch;
++ psDevInfo->sDisplayDim.ui32Width = psbfb->base.width;
++ psDevInfo->sDisplayDim.ui32Height = psbfb->base.height;
++
++ psDevInfo->sSystemBuffer.ui32BufferSize = psbfb->size;
++ //psDevInfo->sSystemBuffer.sCPUVAddr = psbfb->pvKMAddr;
++ psDevInfo->sSystemBuffer.sCPUVAddr = pg->vram_addr;
++ //psDevInfo->sSystemBuffer.sDevVAddr.uiAddr = psbfb->offsetGTT;
++ psDevInfo->sSystemBuffer.sDevVAddr.uiAddr = 0;
++ psDevInfo->sSystemBuffer.bIsAllocated = IMG_FALSE;
++
++ if(psbfb->bo )
++ {
++
++ psDevInfo->sSystemBuffer.bIsContiguous = IMG_FALSE;
++ psDevInfo->sSystemBuffer.uSysAddr.psNonCont = MRSTLFBAllocKernelMem( sizeof( IMG_SYS_PHYADDR ) * psbfb->bo->ttm->num_pages);
++ for(i = 0;i < psbfb->bo->ttm->num_pages;++i)
++ {
++ struct page *p = ttm_tt_get_page( psbfb->bo->ttm, i);
++ psDevInfo->sSystemBuffer.uSysAddr.psNonCont[i].uiAddr = page_to_pfn(p) << PAGE_SHIFT;
++
++ }
++ }
++ else
++ {
++
++ //struct drm_device * psDrmDevice = psDevInfo->psDrmDevice;
++ //struct drm_psb_private * dev_priv = (struct drm_psb_private *)psDrmDevice->dev_private;
++ //struct psb_gtt * pg = dev_priv->pg;
++
++ psDevInfo->sSystemBuffer.bIsContiguous = IMG_TRUE;
++ psDevInfo->sSystemBuffer.uSysAddr.sCont.uiAddr = pg->stolen_base;
++ }
++
++ return 0;
++}
++
++static int MRSTLFBFindMainPipe(struct drm_device *dev) {
++ struct drm_crtc *crtc;
++
++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
++ {
++ if ( drm_helper_crtc_in_use(crtc) )
++ {
++ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++ return psb_intel_crtc->pipe;
++ }
++ }
++
++ return 0;
++}
++
++static MRST_ERROR InitDev(MRSTLFB_DEVINFO *psDevInfo)
++{
++ MRST_ERROR eError = MRST_ERROR_GENERIC;
++ struct fb_info *psLINFBInfo;
++ struct drm_device * psDrmDevice = psDevInfo->psDrmDevice;
++ struct drm_framebuffer * psDrmFB;
++ struct psb_framebuffer *psbfb;
++
++
++ int hdisplay;
++ int vdisplay;
++
++ unsigned long FBSize;
++
++ psDrmFB = list_first_entry(&psDrmDevice->mode_config.fb_kernel_list,
++ struct drm_framebuffer,
++ filp_head);
++ if(!psDrmFB) {
++ printk(KERN_INFO"%s:Cannot find drm FB", __FUNCTION__);
++ return eError;
++ }
++ psbfb = to_psb_fb(psDrmFB);
++
++ hdisplay = psDrmFB->width;
++ vdisplay = psDrmFB->height;
++ FBSize = psDrmFB->pitch * psDrmFB->height;
++
++ psLINFBInfo = (struct fb_info*)psDrmFB->fbdev;
++
++#if defined(PVR_MRST_FB_SET_PAR_ON_INIT)
++ MRSTFBSetPar(psLINFBInfo);
++#endif
++
++
++ psDevInfo->sSystemBuffer.bIsContiguous = IMG_TRUE;
++ psDevInfo->sSystemBuffer.bIsAllocated = IMG_FALSE;
++
++ MRSTLFBHandleChangeFB(psDrmDevice, psbfb);
++
++
++ psDevInfo->sDisplayFormat.pixelformat = PVRSRV_PIXEL_FORMAT_ARGB8888;
++ psDevInfo->psLINFBInfo = psLINFBInfo;
++
++
++ psDevInfo->ui32MainPipe = MRSTLFBFindMainPipe(psDevInfo->psDrmDevice);
++
++
++
++
++ psDevInfo->pvRegs = psbfb_vdc_reg(psDevInfo->psDrmDevice);
++
++ if (psDevInfo->pvRegs == NULL)
++ {
++ eError = PVRSRV_ERROR_BAD_MAPPING;
++ printk(KERN_WARNING DRIVER_PREFIX ": Couldn't map registers needed for flipping\n");
++ return eError;
++ }
++
++ return MRST_OK;
++}
++
++static void DeInitDev(MRSTLFB_DEVINFO *psDevInfo)
++{
++
++}
++
++MRST_ERROR MRSTLFBInit(struct drm_device * dev)
++{
++ MRSTLFB_DEVINFO *psDevInfo;
++ //struct drm_psb_private *psDrmPriv = (struct drm_psb_private *)dev->dev_private;
++
++ psDevInfo = GetAnchorPtr();
++
++ if (psDevInfo == NULL)
++ {
++ PFN_CMD_PROC pfnCmdProcList[MRSTLFB_COMMAND_COUNT];
++ IMG_UINT32 aui32SyncCountList[MRSTLFB_COMMAND_COUNT][2];
++
++ psDevInfo = (MRSTLFB_DEVINFO *)MRSTLFBAllocKernelMem(sizeof(MRSTLFB_DEVINFO));
++
++ if(!psDevInfo)
++ {
++ return (MRST_ERROR_OUT_OF_MEMORY);
++ }
++
++
++ memset(psDevInfo, 0, sizeof(MRSTLFB_DEVINFO));
++
++
++ SetAnchorPtr((void*)psDevInfo);
++
++ psDevInfo->psDrmDevice = dev;
++ psDevInfo->ulRefCount = 0;
++
++
++ if(InitDev(psDevInfo) != MRST_OK)
++ {
++ return (MRST_ERROR_INIT_FAILURE);
++ }
++
++ if(MRSTLFBGetLibFuncAddr ("PVRGetDisplayClassJTable", &pfnGetPVRJTable) != MRST_OK)
++ {
++ return (MRST_ERROR_INIT_FAILURE);
++ }
++
++
++ if(!(*pfnGetPVRJTable)(&psDevInfo->sPVRJTable))
++ {
++ return (MRST_ERROR_INIT_FAILURE);
++ }
++
++
++ spin_lock_init(&psDevInfo->sSwapChainLock);
++
++ psDevInfo->psSwapChain = 0;
++ psDevInfo->bFlushCommands = MRST_FALSE;
++ psDevInfo->bDeviceSuspended = MRST_FALSE;
++
++ psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers = 3;
++ psDevInfo->sDisplayInfo.ui32MaxSwapChains = 2;
++ psDevInfo->sDisplayInfo.ui32MaxSwapInterval = 3;
++ psDevInfo->sDisplayInfo.ui32MinSwapInterval = 0;
++
++ strncpy(psDevInfo->sDisplayInfo.szDisplayName, DISPLAY_DEVICE_NAME, MAX_DISPLAY_NAME_SIZE);
++
++
++
++
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX
++ ": Maximum number of swap chain buffers: %lu\n",
++ psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers));
++
++
++
++
++ psDevInfo->sDCJTable.ui32TableSize = sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE);
++ psDevInfo->sDCJTable.pfnOpenDCDevice = OpenDCDevice;
++ psDevInfo->sDCJTable.pfnCloseDCDevice = CloseDCDevice;
++ psDevInfo->sDCJTable.pfnEnumDCFormats = EnumDCFormats;
++ psDevInfo->sDCJTable.pfnEnumDCDims = EnumDCDims;
++ psDevInfo->sDCJTable.pfnGetDCSystemBuffer = GetDCSystemBuffer;
++ psDevInfo->sDCJTable.pfnGetDCInfo = GetDCInfo;
++ psDevInfo->sDCJTable.pfnGetBufferAddr = GetDCBufferAddr;
++ psDevInfo->sDCJTable.pfnCreateDCSwapChain = CreateDCSwapChain;
++ psDevInfo->sDCJTable.pfnDestroyDCSwapChain = DestroyDCSwapChain;
++ psDevInfo->sDCJTable.pfnSetDCDstRect = SetDCDstRect;
++ psDevInfo->sDCJTable.pfnSetDCSrcRect = SetDCSrcRect;
++ psDevInfo->sDCJTable.pfnSetDCDstColourKey = SetDCDstColourKey;
++ psDevInfo->sDCJTable.pfnSetDCSrcColourKey = SetDCSrcColourKey;
++ psDevInfo->sDCJTable.pfnGetDCBuffers = GetDCBuffers;
++ psDevInfo->sDCJTable.pfnSwapToDCBuffer = SwapToDCBuffer;
++ psDevInfo->sDCJTable.pfnSwapToDCSystem = SwapToDCSystem;
++ psDevInfo->sDCJTable.pfnSetDCState = SetDCState;
++
++
++ if(psDevInfo->sPVRJTable.pfnPVRSRVRegisterDCDevice (
++ &psDevInfo->sDCJTable,
++ &psDevInfo->ulDeviceID ) != PVRSRV_OK)
++ {
++ return (MRST_ERROR_DEVICE_REGISTER_FAILED);
++ }
++
++ printk("Device ID: %lu\n", psDevInfo->ulDeviceID);
++
++#if defined (SYS_USING_INTERRUPTS)
++ if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterSystemISRHandler(MRSTLFBISRHandler,
++ psDevInfo,
++ 0,
++ (IMG_UINT32)psDevInfo->ulDeviceID) != PVRSRV_OK)
++ {
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX "ISR Installation failed\n"));
++ return (MRST_ERROR_INIT_FAILURE);
++ }
++#endif
++ if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterPowerDevice((IMG_UINT32)psDevInfo->ulDeviceID,
++ MRSTLFBPrePowerState, MRSTLFBPostPowerState,
++ IMG_NULL, IMG_NULL,
++ psDevInfo,
++ PVRSRV_DEV_POWER_STATE_ON,
++ PVRSRV_DEV_POWER_STATE_ON) != PVRSRV_OK)
++ {
++ return (MRST_ERROR_INIT_FAILURE);
++ }
++
++
++
++
++
++
++
++
++
++
++
++#if defined (MRST_USING_INTERRUPTS)
++
++ if(MRSTLFBInstallVSyncISR(psDevInfo,MRSTLFBVSyncISR) != MRST_OK)
++ {
++ DEBUG_PRINTK((KERN_INFO DRIVER_PREFIX "ISR Installation failed\n"));
++ return (MRST_ERROR_INIT_FAILURE);
++ }
++#endif
++
++
++ pfnCmdProcList[DC_FLIP_COMMAND] = ProcessFlip;
++
++
++ aui32SyncCountList[DC_FLIP_COMMAND][0] = 0;
++ aui32SyncCountList[DC_FLIP_COMMAND][1] = 2;
++
++
++
++
++
++ if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterCmdProcList (psDevInfo->ulDeviceID,
++ &pfnCmdProcList[0],
++ aui32SyncCountList,
++ MRSTLFB_COMMAND_COUNT) != PVRSRV_OK)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX ": Can't register callback\n");
++ return (MRST_ERROR_CANT_REGISTER_CALLBACK);
++ }
++
++
++ }
++
++
++ //psDrmPriv->psb_change_fb_handler = MRSTLFBHandleChangeFB;
++
++
++ psDevInfo->ulRefCount++;
++
++
++ return (MRST_OK);
++}
++
++MRST_ERROR MRSTLFBDeinit(void)
++{
++ MRSTLFB_DEVINFO *psDevInfo, *psDevFirst;
++
++ psDevFirst = GetAnchorPtr();
++ psDevInfo = psDevFirst;
++
++
++ if (psDevInfo == NULL)
++ {
++ return (MRST_ERROR_GENERIC);
++ }
++
++
++ psDevInfo->ulRefCount--;
++
++ psDevInfo->psDrmDevice = NULL;
++ if (psDevInfo->ulRefCount == 0)
++ {
++
++ PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable = &psDevInfo->sPVRJTable;
++
++ if (psDevInfo->sPVRJTable.pfnPVRSRVRemoveCmdProcList (psDevInfo->ulDeviceID, MRSTLFB_COMMAND_COUNT) != PVRSRV_OK)
++ {
++ return (MRST_ERROR_GENERIC);
++ }
++
++ if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterPowerDevice((IMG_UINT32)psDevInfo->ulDeviceID,
++ IMG_NULL, IMG_NULL,
++ IMG_NULL, IMG_NULL, IMG_NULL,
++ PVRSRV_DEV_POWER_STATE_ON,
++ PVRSRV_DEV_POWER_STATE_ON) != PVRSRV_OK)
++ {
++ return (MRST_ERROR_GENERIC);
++ }
++
++#if defined (SYS_USING_INTERRUPTS)
++ if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterSystemISRHandler(IMG_NULL, IMG_NULL, 0,
++ (IMG_UINT32)psDevInfo->ulDeviceID) != PVRSRV_OK)
++ {
++ return (MRST_ERROR_GENERIC);
++ }
++#endif
++
++#if defined (MRST_USING_INTERRUPTS)
++
++ if(MRSTLFBUninstallVSyncISR(psDevInfo) != MRST_OK)
++ {
++ return (MRST_ERROR_GENERIC);
++ }
++#endif
++
++ if (psJTable->pfnPVRSRVRemoveDCDevice(psDevInfo->ulDeviceID) != PVRSRV_OK)
++ {
++ return (MRST_ERROR_GENERIC);
++ }
++
++ DeInitDev(psDevInfo);
++
++
++ MRSTLFBFreeKernelMem(psDevInfo);
++ }
++
++
++ SetAnchorPtr(NULL);
++
++
++ return (MRST_OK);
++}
++
++
++/*
++ * save_display_registers
++ *
++ * Description: We are going to suspend so save current display
++ * register state.
++ */
++static void save_display_registers(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int i;
++
++ /* Display arbitration control + watermarks */
++ dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
++ dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
++ dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
++ dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
++ dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
++ dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
++ dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
++ dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
++
++ /* Pipe & plane A info */
++ dev_priv->savePIPEACONF = PSB_RVDC32(PIPEACONF);
++ dev_priv->savePIPEASRC = PSB_RVDC32(PIPEASRC);
++ dev_priv->saveFPA0 = PSB_RVDC32(MRST_FPA0);
++ dev_priv->saveFPA1 = PSB_RVDC32(MRST_FPA1);
++ dev_priv->saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
++ dev_priv->saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
++ dev_priv->saveHBLANK_A = PSB_RVDC32(HBLANK_A);
++ dev_priv->saveHSYNC_A = PSB_RVDC32(HSYNC_A);
++ dev_priv->saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
++ dev_priv->saveVBLANK_A = PSB_RVDC32(VBLANK_A);
++ dev_priv->saveVSYNC_A = PSB_RVDC32(VSYNC_A);
++ dev_priv->saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
++ dev_priv->saveDSPACNTR = PSB_RVDC32(DSPACNTR);
++ dev_priv->saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
++ dev_priv->saveDSPAADDR = PSB_RVDC32(DSPABASE);
++ dev_priv->saveDSPASURF = PSB_RVDC32(DSPASURF);
++ dev_priv->saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
++ dev_priv->saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
++
++ /*save cursor regs*/
++ dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
++ dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
++ dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
++
++ /*save palette (gamma) */
++ for (i = 0; i < 256; i++)
++ dev_priv->save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i<<2));
++
++ /*save performance state*/
++ dev_priv->savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
++
++ /* LVDS state */
++ dev_priv->savePP_CONTROL = PSB_RVDC32(PP_CONTROL);
++ dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
++ dev_priv->savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS);
++ dev_priv->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL);
++ dev_priv->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2);
++ dev_priv->saveLVDS = PSB_RVDC32(LVDS);
++ dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
++ dev_priv->savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON);
++ dev_priv->savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF);
++ dev_priv->savePP_DIVISOR = PSB_RVDC32(PP_CYCLE);
++
++ /* HW overlay */
++ dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD);
++ dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
++ dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
++ dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
++ dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
++ dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
++ dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
++
++ /* MIPI DSI */
++ dev_priv->saveMIPI = PSB_RVDC32(MIPI);
++ dev_priv->saveDEVICE_READY_REG = PSB_RVDC32(DEVICE_READY_REG);
++ dev_priv->saveINTR_EN_REG = PSB_RVDC32(INTR_EN_REG);
++ dev_priv->saveDSI_FUNC_PRG_REG = PSB_RVDC32(DSI_FUNC_PRG_REG);
++ dev_priv->saveHS_TX_TIMEOUT_REG = PSB_RVDC32(HS_TX_TIMEOUT_REG);
++ dev_priv->saveLP_RX_TIMEOUT_REG = PSB_RVDC32(LP_RX_TIMEOUT_REG);
++ dev_priv->saveTURN_AROUND_TIMEOUT_REG =
++ PSB_RVDC32(TURN_AROUND_TIMEOUT_REG);
++ dev_priv->saveDEVICE_RESET_REG = PSB_RVDC32(DEVICE_RESET_REG);
++ dev_priv->saveDPI_RESOLUTION_REG =
++ PSB_RVDC32(DPI_RESOLUTION_REG);
++ dev_priv->saveHORIZ_SYNC_PAD_COUNT_REG =
++ PSB_RVDC32(HORIZ_SYNC_PAD_COUNT_REG);
++ dev_priv->saveHORIZ_BACK_PORCH_COUNT_REG =
++ PSB_RVDC32(HORIZ_BACK_PORCH_COUNT_REG);
++ dev_priv->saveHORIZ_FRONT_PORCH_COUNT_REG =
++ PSB_RVDC32(HORIZ_FRONT_PORCH_COUNT_REG);
++ dev_priv->saveHORIZ_ACTIVE_AREA_COUNT_REG =
++ PSB_RVDC32(HORIZ_ACTIVE_AREA_COUNT_REG);
++ dev_priv->saveVERT_SYNC_PAD_COUNT_REG =
++ PSB_RVDC32(VERT_SYNC_PAD_COUNT_REG);
++ dev_priv->saveVERT_BACK_PORCH_COUNT_REG =
++ PSB_RVDC32(VERT_BACK_PORCH_COUNT_REG);
++ dev_priv->saveVERT_FRONT_PORCH_COUNT_REG =
++ PSB_RVDC32(VERT_FRONT_PORCH_COUNT_REG);
++ dev_priv->saveHIGH_LOW_SWITCH_COUNT_REG =
++ PSB_RVDC32(HIGH_LOW_SWITCH_COUNT_REG);
++ dev_priv->saveINIT_COUNT_REG = PSB_RVDC32(INIT_COUNT_REG);
++ dev_priv->saveMAX_RET_PAK_REG = PSB_RVDC32(MAX_RET_PAK_REG);
++ dev_priv->saveVIDEO_FMT_REG = PSB_RVDC32(VIDEO_FMT_REG);
++ dev_priv->saveEOT_DISABLE_REG = PSB_RVDC32(EOT_DISABLE_REG);
++ dev_priv->saveLP_BYTECLK_REG = PSB_RVDC32(LP_BYTECLK_REG);
++ dev_priv->saveHS_LS_DBI_ENABLE_REG =
++ PSB_RVDC32(HS_LS_DBI_ENABLE_REG);
++ dev_priv->saveTXCLKESC_REG = PSB_RVDC32(TXCLKESC_REG);
++ dev_priv->saveDPHY_PARAM_REG = PSB_RVDC32(DPHY_PARAM_REG);
++ dev_priv->saveMIPI_CONTROL_REG = PSB_RVDC32(MIPI_CONTROL_REG);
++
++ /* DPST registers */
++ dev_priv->saveHISTOGRAM_INT_CONTROL_REG = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++ dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
++}
++
++
++/*
++ * restore_display_registers
++ *
++ * Description: We are going to resume so restore display register state.
++ */
++static void restore_display_registers(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ unsigned long i, pp_stat;
++
++ /* Display arbitration + watermarks */
++ PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
++ PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
++ PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
++ PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
++ PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
++ PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
++ PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
++ PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
++
++ /*make sure VGA plane is off. it initializes to on after reset!*/
++ PSB_WVDC32(0x80000000, VGACNTRL);
++
++ /* set the plls */
++ PSB_WVDC32(dev_priv->saveFPA0, MRST_FPA0);
++ PSB_WVDC32(dev_priv->saveFPA1, MRST_FPA1);
++ /* Actually enable it */
++ PSB_WVDC32(dev_priv->saveDPLL_A, MRST_DPLL_A);
++ DRM_UDELAY(150);
++
++ /* Restore mode */
++ PSB_WVDC32(dev_priv->saveHTOTAL_A, HTOTAL_A);
++ PSB_WVDC32(dev_priv->saveHBLANK_A, HBLANK_A);
++ PSB_WVDC32(dev_priv->saveHSYNC_A, HSYNC_A);
++ PSB_WVDC32(dev_priv->saveVTOTAL_A, VTOTAL_A);
++ PSB_WVDC32(dev_priv->saveVBLANK_A, VBLANK_A);
++ PSB_WVDC32(dev_priv->saveVSYNC_A, VSYNC_A);
++ PSB_WVDC32(dev_priv->savePIPEASRC, PIPEASRC);
++ PSB_WVDC32(dev_priv->saveBCLRPAT_A, BCLRPAT_A);
++
++ /*restore performance mode*/
++ PSB_WVDC32(dev_priv->savePERF_MODE, MRST_PERF_MODE);
++
++ /*enable the pipe*/
++ if (dev_priv->iLVDS_enable)
++ PSB_WVDC32(dev_priv->savePIPEACONF, PIPEACONF);
++
++ /* set up MIPI */
++ PSB_WVDC32(dev_priv->saveINTR_EN_REG, INTR_EN_REG);
++ PSB_WVDC32(dev_priv->saveDSI_FUNC_PRG_REG, DSI_FUNC_PRG_REG);
++ PSB_WVDC32(dev_priv->saveHS_TX_TIMEOUT_REG, HS_TX_TIMEOUT_REG);
++ PSB_WVDC32(dev_priv->saveLP_RX_TIMEOUT_REG, LP_RX_TIMEOUT_REG);
++ PSB_WVDC32(dev_priv->saveTURN_AROUND_TIMEOUT_REG,
++ TURN_AROUND_TIMEOUT_REG);
++ PSB_WVDC32(dev_priv->saveDEVICE_RESET_REG, DEVICE_RESET_REG);
++ PSB_WVDC32(dev_priv->saveDPI_RESOLUTION_REG,
++ DPI_RESOLUTION_REG);
++ PSB_WVDC32(dev_priv->saveHORIZ_SYNC_PAD_COUNT_REG,
++ HORIZ_SYNC_PAD_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveHORIZ_BACK_PORCH_COUNT_REG,
++ HORIZ_BACK_PORCH_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveHORIZ_FRONT_PORCH_COUNT_REG,
++ HORIZ_FRONT_PORCH_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveHORIZ_ACTIVE_AREA_COUNT_REG,
++ HORIZ_ACTIVE_AREA_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveVERT_SYNC_PAD_COUNT_REG,
++ VERT_SYNC_PAD_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveVERT_BACK_PORCH_COUNT_REG,
++ VERT_BACK_PORCH_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveVERT_FRONT_PORCH_COUNT_REG,
++ VERT_FRONT_PORCH_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveHIGH_LOW_SWITCH_COUNT_REG,
++ HIGH_LOW_SWITCH_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveINIT_COUNT_REG, INIT_COUNT_REG);
++ PSB_WVDC32(dev_priv->saveMAX_RET_PAK_REG, MAX_RET_PAK_REG);
++ PSB_WVDC32(dev_priv->saveVIDEO_FMT_REG, VIDEO_FMT_REG);
++ PSB_WVDC32(dev_priv->saveEOT_DISABLE_REG, EOT_DISABLE_REG);
++ PSB_WVDC32(dev_priv->saveLP_BYTECLK_REG, LP_BYTECLK_REG);
++ PSB_WVDC32(dev_priv->saveHS_LS_DBI_ENABLE_REG,
++ HS_LS_DBI_ENABLE_REG);
++ PSB_WVDC32(dev_priv->saveTXCLKESC_REG, TXCLKESC_REG);
++ PSB_WVDC32(dev_priv->saveDPHY_PARAM_REG, DPHY_PARAM_REG);
++ PSB_WVDC32(dev_priv->saveMIPI_CONTROL_REG, MIPI_CONTROL_REG);
++
++ /*set up the plane*/
++ PSB_WVDC32(dev_priv->saveDSPALINOFF, DSPALINOFF);
++ PSB_WVDC32(dev_priv->saveDSPASTRIDE, DSPASTRIDE);
++ PSB_WVDC32(dev_priv->saveDSPATILEOFF, DSPATILEOFF);
++
++ /* Enable the plane */
++ PSB_WVDC32(dev_priv->saveDSPACNTR, DSPACNTR);
++ PSB_WVDC32(dev_priv->saveDSPASURF, DSPASURF);
++
++ /*Enable Cursor A*/
++ PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR);
++ PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS);
++ PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE);
++
++ /* restore palette (gamma) */
++ /*DRM_UDELAY(50000); */
++ for (i = 0; i < 256; i++)
++ PSB_WVDC32(dev_priv->save_palette_a[i], PALETTE_A + (i<<2));
++
++ if (dev_priv->iLVDS_enable) {
++ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
++ PSB_WVDC32(dev_priv->saveLVDS, LVDS); /*port 61180h*/
++ PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL);
++ PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
++ PSB_WVDC32(dev_priv->savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS);
++ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL, BLC_PWM_CTL);
++ PSB_WVDC32(dev_priv->savePP_ON_DELAYS, LVDSPP_ON);
++ PSB_WVDC32(dev_priv->savePP_OFF_DELAYS, LVDSPP_OFF);
++ PSB_WVDC32(dev_priv->savePP_DIVISOR, PP_CYCLE);
++ PSB_WVDC32(dev_priv->savePP_CONTROL, PP_CONTROL);
++ } else { /* enable MIPI */
++ PSB_WVDC32(MIPI_PORT_EN | MIPI_BORDER_EN, MIPI); /*force on port*/
++ PSB_WVDC32(1, DEVICE_READY_REG);/* force on to re-program */
++ dev_priv->init_drvIC(dev);
++ PSB_WVDC32(dev_priv->saveMIPI, MIPI); /*port 61190h*/
++ PSB_WVDC32(dev_priv->saveDEVICE_READY_REG, DEVICE_READY_REG);
++ if (dev_priv->saveDEVICE_READY_REG)
++ PSB_WVDC32(DPI_TURN_ON, DPI_CONTROL_REG);
++ PSB_WVDC32(dev_priv->savePIPEACONF, PIPEACONF);
++ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
++ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL, BLC_PWM_CTL);
++ }
++
++ /*wait for cycle delay*/
++ do {
++ pp_stat = PSB_RVDC32(PP_STATUS);
++ } while (pp_stat & 0x08000000);
++
++ DRM_UDELAY(999);
++ /*wait for panel power up*/
++ do {
++ pp_stat = PSB_RVDC32(PP_STATUS);
++ } while (pp_stat & 0x10000000);
++
++ /* restore HW overlay */
++ PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4);
++ PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5);
++
++ /* DPST registers */
++ PSB_WVDC32(dev_priv->saveHISTOGRAM_INT_CONTROL_REG, HISTOGRAM_INT_CONTROL);
++ PSB_WVDC32(dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG, HISTOGRAM_LOGIC_CONTROL);
++}
++
++MRST_ERROR MRSTLFBAllocBuffer(struct MRSTLFB_DEVINFO_TAG *psDevInfo, IMG_UINT32 ui32Size, MRSTLFB_BUFFER **ppBuffer)
++{
++ IMG_VOID *pvBuf;
++ IMG_UINT32 ulPagesNumber;
++ IMG_UINT32 ulCounter;
++ int i;
++
++ pvBuf = __vmalloc( ui32Size, GFP_KERNEL | __GFP_HIGHMEM, __pgprot((pgprot_val(PAGE_KERNEL ) & ~_PAGE_CACHE_MASK) | _PAGE_CACHE_WC) );
++ if( pvBuf == NULL )
++ {
++ return MRST_ERROR_OUT_OF_MEMORY;
++ }
++
++ ulPagesNumber = (ui32Size + PAGE_SIZE -1) / PAGE_SIZE;
++
++ *ppBuffer = MRSTLFBAllocKernelMem( sizeof( MRSTLFB_BUFFER ) );
++ (*ppBuffer)->sCPUVAddr = pvBuf;
++ (*ppBuffer)->ui32BufferSize = ui32Size;
++ (*ppBuffer)->uSysAddr.psNonCont = MRSTLFBAllocKernelMem( sizeof( IMG_SYS_PHYADDR ) * ulPagesNumber);
++ (*ppBuffer)->bIsAllocated = IMG_TRUE;
++ (*ppBuffer)->bIsContiguous = IMG_FALSE;
++ (*ppBuffer)->ui32OwnerTaskID = task_tgid_nr(current);
++
++ i = 0;
++ for(ulCounter = 0; ulCounter < ui32Size; ulCounter += PAGE_SIZE)
++ {
++ (*ppBuffer)->uSysAddr.psNonCont[i++].uiAddr = vmalloc_to_pfn( pvBuf + ulCounter ) << PAGE_SHIFT;
++ }
++
++ psb_gtt_map_pvr_memory( psDevInfo->psDrmDevice,
++ (unsigned int)*ppBuffer,
++ (*ppBuffer)->ui32OwnerTaskID,
++ (IMG_CPU_PHYADDR*) (*ppBuffer)->uSysAddr.psNonCont,
++ ulPagesNumber,
++ (unsigned int *)&(*ppBuffer)->sDevVAddr.uiAddr );
++
++ (*ppBuffer)->sDevVAddr.uiAddr <<= PAGE_SHIFT;
++
++ return MRST_OK;
++}
++
++MRST_ERROR MRSTLFBFreeBuffer(struct MRSTLFB_DEVINFO_TAG *psDevInfo, MRSTLFB_BUFFER **ppBuffer)
++{
++ if( !(*ppBuffer)->bIsAllocated )
++ return MRST_ERROR_INVALID_PARAMS;
++
++ psb_gtt_unmap_pvr_memory( psDevInfo->psDrmDevice,
++ (unsigned int)*ppBuffer,
++ (*ppBuffer)->ui32OwnerTaskID);
++
++ vfree( (*ppBuffer)->sCPUVAddr );
++
++ MRSTLFBFreeKernelMem( (*ppBuffer)->uSysAddr.psNonCont );
++
++ MRSTLFBFreeKernelMem( *ppBuffer);
++
++ *ppBuffer = NULL;
++
++ return MRST_OK;
++}
++
++
++
++PVRSRV_ERROR MRSTLFBPrePowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ MRSTLFB_DEVINFO* psDevInfo = (MRSTLFB_DEVINFO *)hDevHandle;
++ struct drm_device* dev = psDevInfo->psDrmDevice;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ int pp_stat, ret;
++
++ if ((eNewPowerState == eCurrentPowerState) ||
++ (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON))
++ return PVRSRV_OK;
++
++ save_display_registers(dev);
++
++ if (dev_priv->iLVDS_enable) {
++ /*shutdown the panel*/
++ PSB_WVDC32(0, PP_CONTROL);
++
++ do {
++ pp_stat = PSB_RVDC32(PP_STATUS);
++ } while (pp_stat & 0x80000000);
++
++ /*turn off the plane*/
++ PSB_WVDC32(0x58000000, DSPACNTR);
++ PSB_WVDC32(0, DSPASURF);/*trigger the plane disable*/
++ msleep(4);
++
++ /*turn off pipe*/
++ PSB_WVDC32(0x0, PIPEACONF);
++ msleep(8);
++
++ /*turn off PLLs*/
++ PSB_WVDC32(0, MRST_DPLL_A);
++ } else {
++ PSB_WVDC32(DPI_SHUT_DOWN, DPI_CONTROL_REG);
++ PSB_WVDC32(0x0, PIPEACONF);
++ PSB_WVDC32(0x2faf0000, BLC_PWM_CTL);
++ while (REG_READ(0x70008) & 0x40000000);
++ while ((PSB_RVDC32(GEN_FIFO_STAT_REG) & DPI_FIFO_EMPTY)
++ != DPI_FIFO_EMPTY);
++ PSB_WVDC32(0, DEVICE_READY_REG);
++
++ /* turn off mipi panel power */
++ ret = lnw_ipc_single_cmd(IPC_MSG_PANEL_ON_OFF, IPC_CMD_PANEL_OFF, 0, 0);
++ if (ret)
++ printk(KERN_WARNING "IPC 0xE9 failed to turn off pnl pwr. Error is: %x\n", ret);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR MRSTLFBPostPowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ MRSTLFB_DEVINFO* psDevInfo = (MRSTLFB_DEVINFO *)hDevHandle;
++ struct drm_device* dev = psDevInfo->psDrmDevice;
++ struct drm_psb_private *dev_priv = dev->dev_private;
++ struct psb_gtt *pg = dev_priv->pg;
++ int ret;
++
++ if ((eNewPowerState == eCurrentPowerState) ||
++ (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF))
++ return PVRSRV_OK;
++
++ PSB_WVDC32(pg->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
++ pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
++ pg->gmch_ctrl | _PSB_GMCH_ENABLED);
++
++ /* Don't reinitialize the GTT as it is unnecessary. The gtt is
++ * stored in memory so it will automatically be restored. All
++ * we need to do is restore the PGETBL_CTL which we already do
++ * above.
++ */
++ /*psb_gtt_init(dev_priv->pg, 1);*/
++
++ if (!dev_priv->iLVDS_enable) {
++ /* turn on mipi panel power */
++ ret = lnw_ipc_single_cmd(IPC_MSG_PANEL_ON_OFF, IPC_CMD_PANEL_ON, 0, 0);
++ if (ret)
++ printk(KERN_WARNING "IPC 0xE9 failed to turn on pnl pwr. Error is: %x\n", ret);
++ msleep(2000); /* wait 2 seconds */
++ }
++
++ restore_display_registers(dev);
++
++ return PVRSRV_OK;
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_linux.c b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_linux.c
+new file mode 100644
+index 0000000..6001a9c
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/3rdparty/linux_framebuffer_mrst/mrstlfb_linux.c
+@@ -0,0 +1,206 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++
++#include <linux/pci.h>
++#include <linux/slab.h>
++#include <linux/errno.h>
++#include <linux/interrupt.h>
++
++#include <drm/drmP.h>
++
++#include <asm/io.h>
++
++#include "img_defs.h"
++#include "servicesext.h"
++#include "kerneldisplay.h"
++#include "pvrmodule.h"
++#include "pvr_drm.h"
++#include "mrstlfb.h"
++#include "kerneldisplay.h"
++#include "sysirq.h"
++
++#include "psb_drv.h"
++
++#if !defined(SUPPORT_DRI_DRM)
++#error "SUPPORT_DRI_DRM must be set"
++#endif
++
++#define MAKESTRING(x) # x
++
++#if !defined(DISPLAY_CONTROLLER)
++#define DISPLAY_CONTROLLER pvrlfb
++#endif
++
++//#define MAKENAME_HELPER(x, y) x ## y
++//#define MAKENAME2(x, y) MAKENAME_HELPER(x, y)
++//#define MAKENAME(x) MAKENAME2(DISPLAY_CONTROLLER, x)
++
++#define unref__ __attribute__ ((unused))
++
++
++extern int fb_idx;
++
++void *MRSTLFBAllocKernelMem(unsigned long ulSize)
++{
++ return kmalloc(ulSize, GFP_KERNEL);
++}
++
++void MRSTLFBFreeKernelMem(void *pvMem)
++{
++ kfree(pvMem);
++}
++
++
++MRST_ERROR MRSTLFBGetLibFuncAddr (char *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable)
++{
++ if(strcmp("PVRGetDisplayClassJTable", szFunctionName) != 0)
++ {
++ return (MRST_ERROR_INVALID_PARAMS);
++ }
++
++
++ *ppfnFuncTable = PVRGetDisplayClassJTable;
++
++ return (MRST_OK);
++}
++
++static void MRSTLFBVSyncWriteReg(MRSTLFB_DEVINFO *psDevInfo, unsigned long ulOffset, unsigned long ulValue)
++{
++
++ void *pvRegAddr = (void *)(psDevInfo->pvRegs + ulOffset);
++ mb();
++ iowrite32(ulValue, pvRegAddr);
++}
++
++unsigned long MRSTLFBVSyncReadReg(MRSTLFB_DEVINFO * psDevinfo, unsigned long ulOffset)
++{
++ mb();
++ return ioread32((char *)psDevinfo->pvRegs + ulOffset);
++}
++
++void MRSTLFBEnableVSyncInterrupt(MRSTLFB_DEVINFO * psDevinfo)
++{
++#if defined(MRST_USING_INTERRUPTS)
++
++#if defined(SUPPORT_DRI_DRM)
++
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) psDevinfo->psDrmDevice->dev_private;
++ dev_priv->vblanksEnabledForFlips = true;
++ sysirq_enable_vblank(psDevinfo->psDrmDevice, 0);
++
++#else
++
++ unsigned long vdc_irq_mask;
++
++ vdc_irq_mask = ~MRSTLFBVSyncReadReg( psDevinfo, PSB_INT_MASK_R);
++ vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
++
++ MRSTLFBVSyncWriteReg(psDevinfo, PSB_INT_MASK_R, ~vdc_irq_mask);
++ MRSTLFBVSyncWriteReg(psDevinfo, PSB_INT_ENABLE_R, vdc_irq_mask);
++
++ {
++ unsigned int writeVal = MRSTLFBVSyncReadReg(psDevinfo, PIPEASTAT);
++ unsigned int mask = PIPE_START_VBLANK_INTERRUPT_ENABLE | PIPE_VBLANK_INTERRUPT_ENABLE;
++
++ writeVal |= (mask | (mask >> 16));
++ MRSTLFBVSyncWriteReg(psDevinfo, PIPEASTAT, writeVal);
++ MRSTLFBVSyncReadReg(psDevinfo, PIPEASTAT);
++ }
++#endif
++#endif
++}
++
++void MRSTLFBDisableVSyncInterrupt(MRSTLFB_DEVINFO * psDevinfo)
++{
++#if defined(MRST_USING_INTERRUPTS)
++ struct drm_device * dev = psDevinfo->psDrmDevice;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) psDevinfo->psDrmDevice->dev_private;
++ dev_priv->vblanksEnabledForFlips = false;
++ //Only turn off if DRM isn't currently using vblanks, otherwise, leave on.
++ if (!dev->vblank_enabled[0])
++ sysirq_disable_vblank(psDevinfo->psDrmDevice, 0);
++#endif
++}
++
++#if defined(MRST_USING_INTERRUPTS)
++MRST_ERROR MRSTLFBInstallVSyncISR(MRSTLFB_DEVINFO *psDevInfo, MRSTLFB_VSYNC_ISR_PFN pVsyncHandler)
++{
++ //struct drm_psb_private *dev_priv =
++ // (struct drm_psb_private *) psDevInfo->psDrmDevice->dev_private;
++ //dev_priv->psb_vsync_handler = pVsyncHandler;
++ return (MRST_OK);
++}
++
++
++MRST_ERROR MRSTLFBUninstallVSyncISR(MRSTLFB_DEVINFO *psDevInfo)
++{
++ //struct drm_psb_private *dev_priv =
++ // (struct drm_psb_private *) psDevInfo->psDrmDevice->dev_private;
++ //dev_priv->psb_vsync_handler = NULL;
++ return (MRST_OK);
++}
++#endif
++
++
++void MRSTLFBFlip(MRSTLFB_DEVINFO *psDevInfo, unsigned long uiAddr)
++{
++ int dspbase = (psDevInfo->ui32MainPipe == 0 ? DSPABASE : DSPBBASE);
++ int dspsurf = (psDevInfo->ui32MainPipe == 0 ? DSPASURF : DSPBSURF);
++
++ if (IS_MRST(psDevInfo->psDrmDevice)) {
++ MRSTLFBVSyncWriteReg(psDevInfo, dspsurf, uiAddr);
++ } else {
++ MRSTLFBVSyncWriteReg(psDevInfo, dspbase, uiAddr);
++ }
++}
++
++
++int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Init)(struct drm_device unref__ *dev)
++{
++ if(MRSTLFBInit(dev) != MRST_OK)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX ": MRSTLFB_Init: MRSTLFBInit failed\n");
++ return -ENODEV;
++ }
++
++ return 0;
++}
++
++void PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Cleanup)(struct drm_device unref__ *dev)
++{
++ if(MRSTLFBDeinit() != MRST_OK)
++ {
++ printk(KERN_WARNING DRIVER_PREFIX "%s: can't deinit device\n", __FUNCTION__);
++ }
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/include/env/linux/pvr_drm_shared.h b/drivers/gpu/drm/mrst/pvr/services4/include/env/linux/pvr_drm_shared.h
+new file mode 100644
+index 0000000..573d9b9
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/include/env/linux/pvr_drm_shared.h
+@@ -0,0 +1,54 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__PVR_DRM_SHARED_H__)
++#define __PVR_DRM_SHARED_H__
++
++#if defined(SUPPORT_DRI_DRM)
++
++#define PVR_DRM_SRVKM_CMD 0x12
++#define PVR_DRM_DISP_CMD 0x13
++#define PVR_DRM_BC_CMD 0x14
++#define PVR_DRM_IS_MASTER_CMD 0x15
++#define PVR_DRM_UNPRIV_CMD 0x16
++#define PVR_DRM_DBGDRV_CMD 0x1E
++
++#define PVR_DRM_UNPRIV_INIT_SUCCESFUL 0
++#define PVR_DRM_UNPRIV_BUSID_TYPE 1
++#define PVR_DRM_UNPRIV_BUSID_FIELD 2
++
++#define PVR_DRM_BUS_TYPE_PCI 0
++
++#define PVR_DRM_PCI_DOMAIN 0
++#define PVR_DRM_PCI_BUS 1
++#define PVR_DRM_PCI_DEV 2
++#define PVR_DRM_PCI_FUNC 3
++
++#endif
++
++#endif
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/include/kernelbuffer.h b/drivers/gpu/drm/mrst/pvr/services4/include/kernelbuffer.h
+new file mode 100644
+index 0000000..33aa49c
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/include/kernelbuffer.h
+@@ -0,0 +1,60 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__KERNELBUFFER_H__)
++#define __KERNELBUFFER_H__
++
++typedef PVRSRV_ERROR (*PFN_OPEN_BC_DEVICE)(IMG_HANDLE*);
++typedef PVRSRV_ERROR (*PFN_CLOSE_BC_DEVICE)(IMG_HANDLE);
++typedef PVRSRV_ERROR (*PFN_GET_BC_INFO)(IMG_HANDLE, BUFFER_INFO*);
++typedef PVRSRV_ERROR (*PFN_GET_BC_BUFFER)(IMG_HANDLE, IMG_UINT32, PVRSRV_SYNC_DATA*, IMG_HANDLE*);
++
++typedef struct PVRSRV_BC_SRV2BUFFER_KMJTABLE_TAG
++{
++ IMG_UINT32 ui32TableSize;
++ PFN_OPEN_BC_DEVICE pfnOpenBCDevice;
++ PFN_CLOSE_BC_DEVICE pfnCloseBCDevice;
++ PFN_GET_BC_INFO pfnGetBCInfo;
++ PFN_GET_BC_BUFFER pfnGetBCBuffer;
++ PFN_GET_BUFFER_ADDR pfnGetBufferAddr;
++
++} PVRSRV_BC_SRV2BUFFER_KMJTABLE;
++
++
++typedef PVRSRV_ERROR (*PFN_BC_REGISTER_BUFFER_DEV)(PVRSRV_BC_SRV2BUFFER_KMJTABLE*, IMG_UINT32*);
++typedef PVRSRV_ERROR (*PFN_BC_REMOVE_BUFFER_DEV)(IMG_UINT32);
++
++typedef struct PVRSRV_BC_BUFFER2SRV_KMJTABLE_TAG
++{
++ IMG_UINT32 ui32TableSize;
++ PFN_BC_REGISTER_BUFFER_DEV pfnPVRSRVRegisterBCDevice;
++ PFN_BC_REMOVE_BUFFER_DEV pfnPVRSRVRemoveBCDevice;
++
++} PVRSRV_BC_BUFFER2SRV_KMJTABLE, *PPVRSRV_BC_BUFFER2SRV_KMJTABLE;
++
++typedef IMG_BOOL (*PFN_BC_GET_PVRJTABLE) (PPVRSRV_BC_BUFFER2SRV_KMJTABLE);
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/include/kerneldisplay.h b/drivers/gpu/drm/mrst/pvr/services4/include/kerneldisplay.h
+new file mode 100644
+index 0000000..f735503
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/include/kerneldisplay.h
+@@ -0,0 +1,153 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__KERNELDISPLAY_H__)
++#define __KERNELDISPLAY_H__
++
++typedef PVRSRV_ERROR (*PFN_OPEN_DC_DEVICE)(IMG_UINT32, IMG_HANDLE*, PVRSRV_SYNC_DATA*);
++typedef PVRSRV_ERROR (*PFN_CLOSE_DC_DEVICE)(IMG_HANDLE);
++typedef PVRSRV_ERROR (*PFN_ENUM_DC_FORMATS)(IMG_HANDLE, IMG_UINT32*, DISPLAY_FORMAT*);
++typedef PVRSRV_ERROR (*PFN_ENUM_DC_DIMS)(IMG_HANDLE,
++ DISPLAY_FORMAT*,
++ IMG_UINT32*,
++ DISPLAY_DIMS*);
++typedef PVRSRV_ERROR (*PFN_GET_DC_SYSTEMBUFFER)(IMG_HANDLE, IMG_HANDLE*);
++typedef PVRSRV_ERROR (*PFN_GET_DC_INFO)(IMG_HANDLE, DISPLAY_INFO*);
++typedef PVRSRV_ERROR (*PFN_CREATE_DC_SWAPCHAIN)(IMG_HANDLE,
++ IMG_UINT32,
++ DISPLAY_SURF_ATTRIBUTES*,
++ DISPLAY_SURF_ATTRIBUTES*,
++ IMG_UINT32,
++ PVRSRV_SYNC_DATA**,
++ IMG_UINT32,
++ IMG_HANDLE*,
++ IMG_UINT32*);
++typedef PVRSRV_ERROR (*PFN_DESTROY_DC_SWAPCHAIN)(IMG_HANDLE,
++ IMG_HANDLE);
++typedef PVRSRV_ERROR (*PFN_SET_DC_DSTRECT)(IMG_HANDLE, IMG_HANDLE, IMG_RECT*);
++typedef PVRSRV_ERROR (*PFN_SET_DC_SRCRECT)(IMG_HANDLE, IMG_HANDLE, IMG_RECT*);
++typedef PVRSRV_ERROR (*PFN_SET_DC_DSTCK)(IMG_HANDLE, IMG_HANDLE, IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_SET_DC_SRCCK)(IMG_HANDLE, IMG_HANDLE, IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_GET_DC_BUFFERS)(IMG_HANDLE,
++ IMG_HANDLE,
++ IMG_UINT32*,
++ IMG_HANDLE*);
++typedef PVRSRV_ERROR (*PFN_SWAP_TO_DC_BUFFER)(IMG_HANDLE,
++ IMG_HANDLE,
++ IMG_UINT32,
++ IMG_HANDLE,
++ IMG_UINT32,
++ IMG_RECT*);
++typedef PVRSRV_ERROR (*PFN_SWAP_TO_DC_SYSTEM)(IMG_HANDLE, IMG_HANDLE);
++typedef IMG_VOID (*PFN_SET_DC_STATE)(IMG_HANDLE, IMG_UINT32);
++
++typedef struct PVRSRV_DC_SRV2DISP_KMJTABLE_TAG
++{
++ IMG_UINT32 ui32TableSize;
++ PFN_OPEN_DC_DEVICE pfnOpenDCDevice;
++ PFN_CLOSE_DC_DEVICE pfnCloseDCDevice;
++ PFN_ENUM_DC_FORMATS pfnEnumDCFormats;
++ PFN_ENUM_DC_DIMS pfnEnumDCDims;
++ PFN_GET_DC_SYSTEMBUFFER pfnGetDCSystemBuffer;
++ PFN_GET_DC_INFO pfnGetDCInfo;
++ PFN_GET_BUFFER_ADDR pfnGetBufferAddr;
++ PFN_CREATE_DC_SWAPCHAIN pfnCreateDCSwapChain;
++ PFN_DESTROY_DC_SWAPCHAIN pfnDestroyDCSwapChain;
++ PFN_SET_DC_DSTRECT pfnSetDCDstRect;
++ PFN_SET_DC_SRCRECT pfnSetDCSrcRect;
++ PFN_SET_DC_DSTCK pfnSetDCDstColourKey;
++ PFN_SET_DC_SRCCK pfnSetDCSrcColourKey;
++ PFN_GET_DC_BUFFERS pfnGetDCBuffers;
++ PFN_SWAP_TO_DC_BUFFER pfnSwapToDCBuffer;
++ PFN_SWAP_TO_DC_SYSTEM pfnSwapToDCSystem;
++ PFN_SET_DC_STATE pfnSetDCState;
++
++} PVRSRV_DC_SRV2DISP_KMJTABLE;
++
++typedef IMG_BOOL (*PFN_ISR_HANDLER)(IMG_VOID*);
++
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_DISPLAY_DEV)(PVRSRV_DC_SRV2DISP_KMJTABLE*, IMG_UINT32*);
++typedef PVRSRV_ERROR (*PFN_DC_REMOVE_DISPLAY_DEV)(IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_DC_OEM_FUNCTION)(IMG_UINT32, IMG_VOID*, IMG_UINT32, IMG_VOID*, IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_COMMANDPROCLIST)(IMG_UINT32, PPFN_CMD_PROC,IMG_UINT32[][2], IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_DC_REMOVE_COMMANDPROCLIST)(IMG_UINT32, IMG_UINT32);
++typedef IMG_VOID (*PFN_DC_CMD_COMPLETE)(IMG_HANDLE, IMG_BOOL);
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_SYS_ISR)(PFN_ISR_HANDLER, IMG_VOID*, IMG_UINT32, IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_POWER)(IMG_UINT32, PFN_PRE_POWER, PFN_POST_POWER,
++ PFN_PRE_CLOCKSPEED_CHANGE, PFN_POST_CLOCKSPEED_CHANGE,
++ IMG_HANDLE, PVRSRV_DEV_POWER_STATE, PVRSRV_DEV_POWER_STATE);
++
++typedef struct PVRSRV_DC_DISP2SRV_KMJTABLE_TAG
++{
++ IMG_UINT32 ui32TableSize;
++ PFN_DC_REGISTER_DISPLAY_DEV pfnPVRSRVRegisterDCDevice;
++ PFN_DC_REMOVE_DISPLAY_DEV pfnPVRSRVRemoveDCDevice;
++ PFN_DC_OEM_FUNCTION pfnPVRSRVOEMFunction;
++ PFN_DC_REGISTER_COMMANDPROCLIST pfnPVRSRVRegisterCmdProcList;
++ PFN_DC_REMOVE_COMMANDPROCLIST pfnPVRSRVRemoveCmdProcList;
++ PFN_DC_CMD_COMPLETE pfnPVRSRVCmdComplete;
++ PFN_DC_REGISTER_SYS_ISR pfnPVRSRVRegisterSystemISRHandler;
++ PFN_DC_REGISTER_POWER pfnPVRSRVRegisterPowerDevice;
++} PVRSRV_DC_DISP2SRV_KMJTABLE, *PPVRSRV_DC_DISP2SRV_KMJTABLE;
++
++
++typedef struct DISPLAYCLASS_FLIP_COMMAND_TAG
++{
++
++ IMG_HANDLE hExtDevice;
++
++
++ IMG_HANDLE hExtSwapChain;
++
++
++ IMG_HANDLE hExtBuffer;
++
++
++ IMG_HANDLE hPrivateTag;
++
++
++ IMG_UINT32 ui32ClipRectCount;
++
++
++ IMG_RECT *psClipRect;
++
++
++ IMG_UINT32 ui32SwapInterval;
++
++} DISPLAYCLASS_FLIP_COMMAND;
++
++#define DC_FLIP_COMMAND 0
++
++#define DC_STATE_NO_FLUSH_COMMANDS 0
++#define DC_STATE_FLUSH_COMMANDS 1
++
++
++typedef IMG_BOOL (*PFN_DC_GET_PVRJTABLE)(PPVRSRV_DC_DISP2SRV_KMJTABLE);
++
++
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/include/pvr_bridge.h b/drivers/gpu/drm/mrst/pvr/services4/include/pvr_bridge.h
+new file mode 100644
+index 0000000..3893db7
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/include/pvr_bridge.h
+@@ -0,0 +1,1383 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_BRIDGE_H__
++#define __PVR_BRIDGE_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "servicesint.h"
++
++#ifdef __linux__
++
++ #include <linux/ioctl.h>
++
++ #define PVRSRV_IOC_GID 'g'
++ #define PVRSRV_IO(INDEX) _IO(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++ #define PVRSRV_IOW(INDEX) _IOW(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++ #define PVRSRV_IOR(INDEX) _IOR(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++ #define PVRSRV_IOWR(INDEX) _IOWR(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++
++#else
++
++ #error Unknown platform: Cannot define ioctls
++
++ #define PVRSRV_IO(INDEX) (PVRSRV_IOC_GID + INDEX)
++ #define PVRSRV_IOW(INDEX) (PVRSRV_IOC_GID + INDEX)
++ #define PVRSRV_IOR(INDEX) (PVRSRV_IOC_GID + INDEX)
++ #define PVRSRV_IOWR(INDEX) (PVRSRV_IOC_GID + INDEX)
++
++ #define PVRSRV_BRIDGE_BASE PVRSRV_IOC_GID
++#endif
++
++
++#define PVRSRV_BRIDGE_CORE_CMD_FIRST 0UL
++#define PVRSRV_BRIDGE_ENUM_DEVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_RELEASE_DEVICEINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+4)
++#define PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+5)
++#define PVRSRV_BRIDGE_ALLOC_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+6)
++#define PVRSRV_BRIDGE_FREE_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+7)
++#define PVRSRV_BRIDGE_GETFREE_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+8)
++#define PVRSRV_BRIDGE_CREATE_COMMANDQUEUE PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+9)
++#define PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+10)
++#define PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+11)
++#define PVRSRV_BRIDGE_CONNECT_SERVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+12)
++#define PVRSRV_BRIDGE_DISCONNECT_SERVICES PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+13)
++#define PVRSRV_BRIDGE_WRAP_DEVICE_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+14)
++#define PVRSRV_BRIDGE_GET_DEVICEMEMINFO PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+15)
++#define PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+16)
++#define PVRSRV_BRIDGE_FREE_DEV_VIRTMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+17)
++#define PVRSRV_BRIDGE_MAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+18)
++#define PVRSRV_BRIDGE_UNMAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+19)
++#define PVRSRV_BRIDGE_MAP_DEV_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+20)
++#define PVRSRV_BRIDGE_UNMAP_DEV_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+21)
++#define PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+22)
++#define PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+23)
++#define PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+24)
++#define PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+25)
++#define PVRSRV_BRIDGE_EXPORT_DEVICEMEM PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+26)
++#define PVRSRV_BRIDGE_RELEASE_MMAP_DATA PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+27)
++#define PVRSRV_BRIDGE_CORE_CMD_LAST (PVRSRV_BRIDGE_CORE_CMD_FIRST+27)
++
++#define PVRSRV_BRIDGE_SIM_CMD_FIRST (PVRSRV_BRIDGE_CORE_CMD_LAST+1)
++#define PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_REGISTER_SIM_PROCESS PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_SIM_CMD_LAST (PVRSRV_BRIDGE_SIM_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_MAPPING_CMD_FIRST (PVRSRV_BRIDGE_SIM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_MAPPING_CMD_LAST (PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_STATS_CMD_FIRST (PVRSRV_BRIDGE_MAPPING_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GET_FB_STATS PVRSRV_IOWR(PVRSRV_BRIDGE_STATS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_STATS_CMD_LAST (PVRSRV_BRIDGE_STATS_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_MISC_CMD_FIRST (PVRSRV_BRIDGE_STATS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GET_MISC_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_MISC_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_RELEASE_MISC_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_MISC_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_MISC_CMD_LAST (PVRSRV_BRIDGE_MISC_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_OVERLAY_CMD_FIRST (PVRSRV_BRIDGE_MISC_CMD_LAST+1)
++#if defined (SUPPORT_OVERLAY_ROTATE_BLIT)
++#define PVRSRV_BRIDGE_INIT_3D_OVL_BLT_RES PVRSRV_IOWR(PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_DEINIT_3D_OVL_BLT_RES PVRSRV_IOWR(PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
++#endif
++#define PVRSRV_BRIDGE_OVERLAY_CMD_LAST (PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
++
++#if defined(PDUMP)
++#define PVRSRV_BRIDGE_PDUMP_CMD_FIRST (PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_PDUMP_INIT PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_PDUMP_MEMPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_PDUMP_DUMPMEM PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_PDUMP_REG PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_PDUMP_REGPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+4)
++#define PVRSRV_BRIDGE_PDUMP_COMMENT PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+5)
++#define PVRSRV_BRIDGE_PDUMP_SETFRAME PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+6)
++#define PVRSRV_BRIDGE_PDUMP_ISCAPTURING PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+7)
++#define PVRSRV_BRIDGE_PDUMP_DUMPBITMAP PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+8)
++#define PVRSRV_BRIDGE_PDUMP_DUMPREADREG PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+9)
++#define PVRSRV_BRIDGE_PDUMP_SYNCPOL PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+10)
++#define PVRSRV_BRIDGE_PDUMP_DUMPSYNC PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+11)
++#define PVRSRV_BRIDGE_PDUMP_MEMPAGES PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+12)
++#define PVRSRV_BRIDGE_PDUMP_DRIVERINFO PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+13)
++#define PVRSRV_BRIDGE_PDUMP_PDREG PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+14)
++#define PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+15)
++#define PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+16)
++#define PVRSRV_BRIDGE_PDUMP_STARTINITPHASE PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+17)
++#define PVRSRV_BRIDGE_PDUMP_STOPINITPHASE PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+18)
++#define PVRSRV_BRIDGE_PDUMP_CMD_LAST (PVRSRV_BRIDGE_PDUMP_CMD_FIRST+18)
++#else
++#define PVRSRV_BRIDGE_PDUMP_CMD_LAST PVRSRV_BRIDGE_OVERLAY_CMD_LAST
++#endif
++
++#define PVRSRV_BRIDGE_OEM_CMD_FIRST (PVRSRV_BRIDGE_PDUMP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GET_OEMJTABLE PVRSRV_IOWR(PVRSRV_BRIDGE_OEM_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_OEM_CMD_LAST (PVRSRV_BRIDGE_OEM_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST (PVRSRV_BRIDGE_OEM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_ENUM_CLASS PVRSRV_IOWR(PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_DEVCLASS_CMD_LAST (PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST (PVRSRV_BRIDGE_DEVCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+4)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+5)
++#define PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+6)
++#define PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+7)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+8)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+9)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+10)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+11)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+12)
++#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+13)
++#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+14)
++#define PVRSRV_BRIDGE_DISPCLASS_CMD_LAST (PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+14)
++
++
++#define PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST (PVRSRV_BRIDGE_DISPCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_BUFCLASS_CMD_LAST (PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3)
++
++#define PVRSRV_BRIDGE_WRAP_CMD_FIRST (PVRSRV_BRIDGE_BUFCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_WRAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_WRAP_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY PVRSRV_IOWR(PVRSRV_BRIDGE_WRAP_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_WRAP_CMD_LAST (PVRSRV_BRIDGE_WRAP_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST (PVRSRV_BRIDGE_WRAP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_MAP_MEMINFO_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_UNMAP_MEMINFO_MEM PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST (PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3)
++
++#define PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST (PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR PVRSRV_IOWR(PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_SERVICES4_TMP_CMD_LAST (PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_INITSRV_CMD_FIRST (PVRSRV_BRIDGE_SERVICES4_TMP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_INITSRV_CONNECT PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_INITSRV_DISCONNECT PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_INITSRV_CMD_LAST (PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST (PVRSRV_BRIDGE_INITSRV_CMD_LAST+1)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_WAIT PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_OPEN PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST+1)
++#define PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS PVRSRV_IOWR(PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_SYNC_OPS_CMD_LAST (PVRSRV_BRIDGE_SYNC_OPS_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD (PVRSRV_BRIDGE_SYNC_OPS_CMD_LAST+1)
++
++
++#define PVRSRV_KERNEL_MODE_CLIENT 1
++
++typedef struct PVRSRV_BRIDGE_RETURN_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_VOID *pvData;
++
++}PVRSRV_BRIDGE_RETURN;
++
++
++typedef struct PVRSRV_BRIDGE_PACKAGE_TAG
++{
++ IMG_UINT32 ui32BridgeID;
++ IMG_UINT32 ui32Size;
++ IMG_VOID *pvParamIn;
++ IMG_UINT32 ui32InBufferSize;
++ IMG_VOID *pvParamOut;
++ IMG_UINT32 ui32OutBufferSize;
++
++ IMG_HANDLE hKernelServices;
++}PVRSRV_BRIDGE_PACKAGE;
++
++
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 uiDevIndex;
++ PVRSRV_DEVICE_TYPE eDeviceType;
++
++} PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ENUMCLASS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_DEVICE_CLASS sDeviceClass;
++} PVRSRV_BRIDGE_IN_ENUMCLASS;
++
++
++typedef struct PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++} PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++} PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++} PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++} PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++} PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++} PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_RELEASE_DEVICEINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++
++} PVRSRV_BRIDGE_IN_RELEASE_DEVICEINFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_FREE_CLASSDEVICEINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_DEVICE_CLASS DeviceClass;
++ IMG_VOID* pvDevInfo;
++
++}PVRSRV_BRIDGE_IN_FREE_CLASSDEVICEINFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hDevMemContext;
++
++}PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++
++}PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hDevMemContext;
++
++}PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hDevMemHeap;
++ IMG_UINT32 ui32Attribs;
++ IMG_SIZE_T ui32Size;
++ IMG_SIZE_T ui32Alignment;
++
++}PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_IN_MAPMEMINFOTOUSER_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++}PVRSRV_BRIDGE_IN_MAPMEMINFOTOUSER;
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAPMEMINFOFROMUSER_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ IMG_PVOID pvLinAddr;
++ IMG_HANDLE hMappingInfo;
++
++}PVRSRV_BRIDGE_IN_UNMAPMEMINFOFROMUSER;
++
++
++typedef struct PVRSRV_BRIDGE_IN_FREEDEVICEMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++
++}PVRSRV_BRIDGE_IN_FREEDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++}PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32Flags;
++
++} PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_IN_CREATECOMMANDQUEUE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_SIZE_T ui32QueueSize;
++
++}PVRSRV_BRIDGE_IN_CREATECOMMANDQUEUE;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_DESTROYCOMMANDQUEUE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ PVRSRV_QUEUE_INFO *psQueueInfo;
++
++}PVRSRV_BRIDGE_IN_DESTROYCOMMANDQUEUE;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hMHandle;
++} PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hMHandle;
++} PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_RESERVE_DEV_VIRTMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevMemHeap;
++ IMG_DEV_VIRTADDR *psDevVAddr;
++ IMG_SIZE_T ui32Size;
++ IMG_SIZE_T ui32Alignment;
++
++}PVRSRV_BRIDGE_IN_RESERVE_DEV_VIRTMEM;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_CONNECT_SERVICES_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hKernelServices;
++}PVRSRV_BRIDGE_OUT_CONNECT_SERVICES;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_RESERVE_DEV_VIRTMEM_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_OUT_RESERVE_DEV_VIRTMEM;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_FREE_DEV_VIRTMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_IN_FREE_DEV_VIRTMEM;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelMemInfo;
++ IMG_HANDLE hDstDevMemHeap;
++
++}PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psDstKernelMemInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psDstKernelSyncInfo;
++ PVRSRV_CLIENT_MEM_INFO sDstClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sDstClientSyncInfo;
++
++}PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_MAP_EXT_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ IMG_SYS_PHYADDR *psSysPAddr;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_MAP_EXT_MEMORY;
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_EXT_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_UNMAP_EXT_MEMORY;
++
++
++typedef struct PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceClassBuffer;
++ IMG_HANDLE hDevMemContext;
++
++}PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ IMG_HANDLE hMappingInfo;
++
++}PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_MEMPOL_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32Value;
++ IMG_UINT32 ui32Mask;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_MEMPOL;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ IMG_BOOL bIsRead;
++ IMG_UINT32 ui32Value;
++ IMG_UINT32 ui32Mask;
++
++}PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_PVOID pvLinAddr;
++ IMG_PVOID pvAltLinAddr;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32Bytes;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_PVOID pvAltLinAddr;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32Bytes;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPREG_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_HWREG sHWReg;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPREG;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_REGPOL_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_HWREG sHWReg;
++ IMG_UINT32 ui32Mask;
++ IMG_UINT32 ui32Flags;
++}PVRSRV_BRIDGE_IN_PDUMP_REGPOL;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_HWREG sHWReg;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelMemInfo;
++ IMG_DEV_PHYADDR *pPages;
++ IMG_UINT32 ui32NumPages;
++ IMG_DEV_VIRTADDR sDevAddr;
++ IMG_UINT32 ui32Start;
++ IMG_UINT32 ui32Length;
++ IMG_BOOL bContinuous;
++
++}PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_COMMENT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_CHAR szComment[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_COMMENT;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_SETFRAME_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32Frame;
++
++}PVRSRV_BRIDGE_IN_PDUMP_SETFRAME;
++
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_BITMAP_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
++ IMG_UINT32 ui32FileOffset;
++ IMG_UINT32 ui32Width;
++ IMG_UINT32 ui32Height;
++ IMG_UINT32 ui32StrideInBytes;
++ IMG_DEV_VIRTADDR sDevBaseAddr;
++ IMG_UINT32 ui32Size;
++ PDUMP_PIXEL_FORMAT ePixelFormat;
++ PDUMP_MEM_FORMAT eMemFormat;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_BITMAP;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_READREG_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
++ IMG_UINT32 ui32FileOffset;
++ IMG_UINT32 ui32Address;
++ IMG_UINT32 ui32Size;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_READREG;
++
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_CHAR szString[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
++ IMG_BOOL bContinuous;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelMemInfo;
++ IMG_UINT32 ui32Offset;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR;
++
++
++typedef struct PVRSRV_BRIDGE_PDUM_IN_CYCLE_COUNT_REG_READ_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32RegOffset;
++ IMG_BOOL bLastFrame;
++}PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ENUMDEVICE_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32NumDevices;
++ PVRSRV_DEVICE_IDENTIFIER asDeviceIdentifier[PVRSRV_MAX_DEVICES];
++
++}PVRSRV_BRIDGE_OUT_ENUMDEVICE;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO_TAG
++{
++
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hDevCookie;
++
++} PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ENUMCLASS_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32NumDevices;
++ IMG_UINT32 ui32DevID[PVRSRV_MAX_DEVICES];
++
++}PVRSRV_BRIDGE_OUT_ENUMCLASS;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32DeviceID;
++ IMG_HANDLE hDevCookie;
++
++}PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hDeviceKM;
++
++}PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hDevMemContext;
++ IMG_VOID *pvLinAddr;
++ IMG_SIZE_T ui32ByteSize;
++ IMG_SIZE_T ui32PageOffset;
++ IMG_BOOL bPhysContig;
++ IMG_UINT32 ui32NumPageTableEntries;
++ IMG_SYS_PHYADDR *psSysPAddr;
++ IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY;
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY;
++
++
++#define PVRSRV_MAX_DC_DISPLAY_FORMATS 10
++#define PVRSRV_MAX_DC_DISPLAY_DIMENSIONS 10
++#define PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS 4
++#define PVRSRV_MAX_DC_CLIP_RECTS 32
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32Count;
++ DISPLAY_FORMAT asFormat[PVRSRV_MAX_DC_DISPLAY_FORMATS];
++
++}PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ DISPLAY_FORMAT sFormat;
++
++}PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32Count;
++ DISPLAY_DIMS asDim[PVRSRV_MAX_DC_DISPLAY_DIMENSIONS];
++
++}PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO_TAG
++{
++ PVRSRV_ERROR eError;
++ DISPLAY_INFO sDisplayInfo;
++
++}PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hBuffer;
++
++}PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_UINT32 ui32Flags;
++ DISPLAY_SURF_ATTRIBUTES sDstSurfAttrib;
++ DISPLAY_SURF_ATTRIBUTES sSrcSurfAttrib;
++ IMG_UINT32 ui32BufferCount;
++ IMG_UINT32 ui32OEMFlags;
++ IMG_UINT32 ui32SwapChainID;
++
++} PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hSwapChain;
++ IMG_UINT32 ui32SwapChainID;
++
++} PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hSwapChain;
++
++} PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hSwapChain;
++ IMG_RECT sRect;
++
++} PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hSwapChain;
++ IMG_UINT32 ui32CKColour;
++
++} PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hSwapChain;
++
++} PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32BufferCount;
++ IMG_HANDLE ahBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++
++} PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hBuffer;
++ IMG_UINT32 ui32SwapInterval;
++ IMG_HANDLE hPrivateTag;
++ IMG_UINT32 ui32ClipRectCount;
++ IMG_RECT sClipRect[PVRSRV_MAX_DC_CLIP_RECTS];
++
++} PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hSwapChain;
++
++} PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32DeviceID;
++ IMG_HANDLE hDevCookie;
++
++} PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hDeviceKM;
++
++} PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO_TAG
++{
++ PVRSRV_ERROR eError;
++ BUFFER_INFO sBufferInfo;
++
++} PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDeviceKM;
++ IMG_UINT32 ui32BufferIndex;
++
++} PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hBuffer;
++
++} PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32ClientHeapCount;
++ PVRSRV_HEAP_INFO sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++
++} PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hDevMemContext;
++ IMG_UINT32 ui32ClientHeapCount;
++ PVRSRV_HEAP_INFO sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++
++} PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_CREATE_DEVMEMHEAP_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hDevMemHeap;
++
++} PVRSRV_BRIDGE_OUT_CREATE_DEVMEMHEAP;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++} PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hMemInfo;
++#if defined(SUPPORT_MEMINFO_IDS)
++ IMG_UINT64 ui64Stamp;
++#endif
++
++} PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_MAPMEMINFOTOUSER_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_PVOID pvLinAddr;
++ IMG_HANDLE hMappingInfo;
++
++}PVRSRV_BRIDGE_OUT_MAPMEMINFOTOUSER;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_SIZE_T ui32Total;
++ IMG_SIZE_T ui32Free;
++ IMG_SIZE_T ui32LargestBlock;
++
++} PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM;
++
++
++#include "pvrmmap.h"
++typedef struct PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA_TAG
++{
++ PVRSRV_ERROR eError;
++
++
++ IMG_UINT32 ui32MMapOffset;
++
++
++ IMG_UINT32 ui32ByteOffset;
++
++
++ IMG_UINT32 ui32RealByteSize;
++
++
++ IMG_UINT32 ui32UserVAddr;
++
++} PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA;
++
++typedef struct PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA_TAG
++{
++ PVRSRV_ERROR eError;
++
++
++ IMG_BOOL bMUnmap;
++
++
++ IMG_UINT32 ui32UserVAddr;
++
++
++ IMG_UINT32 ui32RealByteSize;
++} PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA;
++
++typedef struct PVRSRV_BRIDGE_IN_GET_MISC_INFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_MISC_INFO sMiscInfo;
++
++}PVRSRV_BRIDGE_IN_GET_MISC_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GET_MISC_INFO_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_MISC_INFO sMiscInfo;
++
++}PVRSRV_BRIDGE_OUT_GET_MISC_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_RELEASE_MISC_INFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_MISC_INFO sMiscInfo;
++
++}PVRSRV_BRIDGE_IN_RELEASE_MISC_INFO;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_RELEASE_MISC_INFO_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_MISC_INFO sMiscInfo;
++
++}PVRSRV_BRIDGE_OUT_RELEASE_MISC_INFO;
++
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_BOOL bIsCapturing;
++
++} PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_FB_STATS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_SIZE_T ui32Total;
++ IMG_SIZE_T ui32Available;
++
++} PVRSRV_BRIDGE_IN_GET_FB_STATS;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_MAPPHYSTOUSERSPACE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_SYS_PHYADDR sSysPhysAddr;
++ IMG_UINT32 uiSizeInBytes;
++
++} PVRSRV_BRIDGE_IN_MAPPHYSTOUSERSPACE;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_MAPPHYSTOUSERSPACE_TAG
++{
++ IMG_PVOID pvUserAddr;
++ IMG_UINT32 uiActualSize;
++ IMG_PVOID pvProcess;
++
++} PVRSRV_BRIDGE_OUT_MAPPHYSTOUSERSPACE;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAPPHYSTOUSERSPACE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_PVOID pvUserAddr;
++ IMG_PVOID pvProcess;
++
++} PVRSRV_BRIDGE_IN_UNMAPPHYSTOUSERSPACE;
++
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GETPHYSTOUSERSPACEMAP_TAG
++{
++ IMG_PVOID *ppvTbl;
++ IMG_UINT32 uiTblSize;
++
++} PVRSRV_BRIDGE_OUT_GETPHYSTOUSERSPACEMAP;
++
++
++
++typedef struct PVRSRV_BRIDGE_IN_REGISTER_SIM_PROCESS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_PVOID pvProcess;
++
++} PVRSRV_BRIDGE_IN_REGISTER_SIM_PROCESS;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_REGISTER_SIM_PROCESS_TAG
++{
++ IMG_SYS_PHYADDR sRegsPhysBase;
++ IMG_VOID *pvRegsBase;
++ IMG_PVOID pvProcess;
++ IMG_UINT32 ulNoOfEntries;
++ IMG_PVOID pvTblLinAddr;
++
++} PVRSRV_BRIDGE_OUT_REGISTER_SIM_PROCESS;
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNREGISTER_SIM_PROCESS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_PVOID pvProcess;
++ IMG_VOID *pvRegsBase;
++
++} PVRSRV_BRIDGE_IN_UNREGISTER_SIM_PROCESS;
++
++typedef struct PVRSRV_BRIDGE_IN_PROCESS_SIMISR_EVENT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_UINT32 ui32StatusAndMask;
++ PVRSRV_ERROR eError;
++
++} PVRSRV_BRIDGE_IN_PROCESS_SIMISR_EVENT;
++
++typedef struct PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_BOOL bInitSuccesful;
++} PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32Flags;
++ IMG_SIZE_T ui32Size;
++}PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM_TAG
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++}PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++}PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM_TAG
++{
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelMemInfo;
++}PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM_TAG
++{
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++ PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_MEMINFO_MEM_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++}PVRSRV_BRIDGE_IN_UNMAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_UNMAP_MEMINFO_MEM_TAG
++{
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_UNMAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevMemContext;
++}PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR;
++
++typedef struct PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR_TAG
++{
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR;
++
++typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAI_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hOSEventKM;
++} PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT;
++
++typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN_TAG
++{
++ PVRSRV_EVENTOBJECT sEventObject;
++} PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN;
++
++typedef struct PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN_TAG
++{
++ IMG_HANDLE hOSEvent;
++ PVRSRV_ERROR eError;
++} PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN;
++
++typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE_TAG
++{
++ PVRSRV_EVENTOBJECT sEventObject;
++ IMG_HANDLE hOSEventKM;
++} PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE;
++
++typedef struct PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelSyncInfo;
++ IMG_UINT32 ui32ModifyFlags;
++
++} PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS;
++
++typedef struct PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hKernelSyncInfo;
++ IMG_UINT32 ui32ModifyFlags;
++
++} PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS;
++
++typedef struct PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS_TAG
++{
++ PVRSRV_ERROR eError;
++
++
++ IMG_UINT32 ui32ReadOpsPending;
++ IMG_UINT32 ui32WriteOpsPending;
++
++} PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS;
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/include/pvr_bridge_km.h b/drivers/gpu/drm/mrst/pvr/services4/include/pvr_bridge_km.h
+new file mode 100644
+index 0000000..9c4b054
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/include/pvr_bridge_km.h
+@@ -0,0 +1,288 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_BRIDGE_KM_H_
++#define __PVR_BRIDGE_KM_H_
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "pvr_bridge.h"
++#include "perproc.h"
++
++#if defined(__linux__)
++PVRSRV_ERROR LinuxBridgeInit(IMG_VOID);
++IMG_VOID LinuxBridgeDeInit(IMG_VOID);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevicesKM(IMG_UINT32 *pui32NumDevices,
++ PVRSRV_DEVICE_IDENTIFIER *psDevIdList);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceDataKM(IMG_UINT32 uiDevIndex,
++ PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_HANDLE *phDevCookie);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_SIZE_T ui32QueueSize,
++ PVRSRV_QUEUE_INFO **ppsQueueInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapsKM(IMG_HANDLE hDevCookie,
++ PVRSRV_HEAP_INFO *psHeapInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContextKM(IMG_HANDLE hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE *phDevMemContext,
++ IMG_UINT32 *pui32ClientHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo,
++ IMG_BOOL *pbCreated,
++ IMG_BOOL *pbShared);
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContextKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ IMG_BOOL *pbDestroyed);
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapInfoKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ IMG_UINT32 *pui32ClientHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo,
++ IMG_BOOL *pbShared
++ );
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV _PVRSRVAllocDeviceMemKM(IMG_HANDLE hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevMemHeap,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++
++#if defined(PVRSRV_LOG_MEMORY_ALLOCS)
++ #define PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, memInfo, logStr) \
++ (PVR_TRACE(("PVRSRVAllocDeviceMemKM(" #devCookie ", " #perProc ", " #devMemHeap ", " #flags ", " #size \
++ ", " #alignment "," #memInfo "): " logStr " (size = 0x%;x)", size)),\
++ _PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, memInfo))
++#else
++ #define PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, memInfo, logStr) \
++ _PVRSRVAllocDeviceMemKM(devCookie, perProc, devMemHeap, flags, size, alignment, memInfo)
++#endif
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMemKM(IMG_HANDLE hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDissociateDeviceMemKM(IMG_HANDLE hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReserveDeviceVirtualMemKM(IMG_HANDLE hDevMemHeap,
++ IMG_DEV_VIRTADDR *psDevVAddr,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceVirtualMemKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo,
++ IMG_HANDLE hDstDevMemHeap,
++ PVRSRV_KERNEL_MEM_INFO **ppsDstMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemoryKM(IMG_HANDLE hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevMemContext,
++ IMG_SIZE_T ui32ByteSize,
++ IMG_SIZE_T ui32PageOffset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psSysAddr,
++ IMG_VOID *pvLinAddr,
++ IMG_UINT32 ui32Flags,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVEnumerateDCKM(PVRSRV_DEVICE_CLASS DeviceClass,
++ IMG_UINT32 *pui32DevCount,
++ IMG_UINT32 *pui32DevID );
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVOpenDCDeviceKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32DeviceID,
++ IMG_HANDLE hDevCookie,
++ IMG_HANDLE *phDeviceKM);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVCloseDCDeviceKM(IMG_HANDLE hDeviceKM, IMG_BOOL bResManCallback);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVEnumDCFormatsKM(IMG_HANDLE hDeviceKM,
++ IMG_UINT32 *pui32Count,
++ DISPLAY_FORMAT *psFormat);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVEnumDCDimsKM(IMG_HANDLE hDeviceKM,
++ DISPLAY_FORMAT *psFormat,
++ IMG_UINT32 *pui32Count,
++ DISPLAY_DIMS *psDim);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetDCSystemBufferKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE *phBuffer);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetDCInfoKM(IMG_HANDLE hDeviceKM,
++ DISPLAY_INFO *psDisplayInfo);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVCreateDCSwapChainKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDeviceKM,
++ IMG_UINT32 ui32Flags,
++ DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++ DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++ IMG_UINT32 ui32BufferCount,
++ IMG_UINT32 ui32OEMFlags,
++ IMG_HANDLE *phSwapChain,
++ IMG_UINT32 *pui32SwapChainID);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(IMG_HANDLE hSwapChain);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDCDstRectKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChain,
++ IMG_RECT *psRect);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDCSrcRectKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChain,
++ IMG_RECT *psRect);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 ui32CKColour);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 ui32CKColour);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetDCBuffersKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChain,
++ IMG_UINT32 *pui32BufferCount,
++ IMG_HANDLE *phBuffer);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSwapToDCBufferKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hBuffer,
++ IMG_UINT32 ui32SwapInterval,
++ IMG_HANDLE hPrivateTag,
++ IMG_UINT32 ui32ClipRectCount,
++ IMG_RECT *psClipRect);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSwapToDCSystemKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChain);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVOpenBCDeviceKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32DeviceID,
++ IMG_HANDLE hDevCookie,
++ IMG_HANDLE *phDeviceKM);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVCloseBCDeviceKM(IMG_HANDLE hDeviceKM, IMG_BOOL bResManCallback);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetBCInfoKM(IMG_HANDLE hDeviceKM,
++ BUFFER_INFO *psBufferInfo);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetBCBufferKM(IMG_HANDLE hDeviceKM,
++ IMG_UINT32 ui32BufferIndex,
++ IMG_HANDLE *phBuffer);
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevMemContext,
++ IMG_HANDLE hDeviceClassBuffer,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo,
++ IMG_HANDLE *phOSMapInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFreeDeviceMemKM(IMG_UINT32 ui32Flags,
++ IMG_SIZE_T *pui32Total,
++ IMG_SIZE_T *pui32Free,
++ IMG_SIZE_T *pui32LargestBlock);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocSyncInfoKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ PVRSRV_KERNEL_SYNC_INFO **ppsKernelSyncInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo);
++
++PVRSRV_ERROR PVRSRVGetFBStatsKM(IMG_SIZE_T *pui32Total,
++ IMG_SIZE_T *pui32Available);
++
++IMG_IMPORT PVRSRV_ERROR
++PVRSRVAllocSharedSysMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++PVRSRVFreeSharedSysMemoryKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++PVRSRVDissociateMemFromResmanKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/include/pvrmmap.h b/drivers/gpu/drm/mrst/pvr/services4/include/pvrmmap.h
+new file mode 100644
+index 0000000..7270f54
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/include/pvrmmap.h
+@@ -0,0 +1,36 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PVRMMAP_H__
++#define __PVRMMAP_H__
++
++PVRSRV_ERROR PVRPMapKMem(IMG_HANDLE hModule, IMG_VOID **ppvLinAddr, IMG_VOID *pvLinAddrKM, IMG_HANDLE *phMappingInfo, IMG_HANDLE hMHandle);
++
++
++IMG_BOOL PVRUnMapKMem(IMG_HANDLE hModule, IMG_HANDLE hMappingInfo, IMG_HANDLE hMHandle);
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/include/servicesint.h b/drivers/gpu/drm/mrst/pvr/services4/include/servicesint.h
+new file mode 100644
+index 0000000..a024fd5
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/include/servicesint.h
+@@ -0,0 +1,266 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__SERVICESINT_H__)
++#define __SERVICESINT_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "services.h"
++#include "sysinfo.h"
++
++#define HWREC_DEFAULT_TIMEOUT (500)
++
++#define DRIVERNAME_MAXLENGTH (100)
++
++
++
++typedef struct _PVRSRV_KERNEL_MEM_INFO_
++{
++
++ IMG_PVOID pvLinAddrKM;
++
++
++ IMG_DEV_VIRTADDR sDevVAddr;
++
++
++ IMG_UINT32 ui32Flags;
++
++
++ IMG_SIZE_T ui32AllocSize;
++
++
++ PVRSRV_MEMBLK sMemBlk;
++
++
++ IMG_PVOID pvSysBackupBuffer;
++
++
++ IMG_UINT32 ui32RefCount;
++
++
++ IMG_BOOL bPendingFree;
++
++
++ #if defined(SUPPORT_MEMINFO_IDS)
++ #if !defined(USE_CODE)
++
++ IMG_UINT64 ui64Stamp;
++ #else
++ IMG_UINT32 dummy1;
++ IMG_UINT32 dummy2;
++ #endif
++ #endif
++
++
++ struct _PVRSRV_KERNEL_SYNC_INFO_ *psKernelSyncInfo;
++
++} PVRSRV_KERNEL_MEM_INFO;
++
++
++typedef struct _PVRSRV_KERNEL_SYNC_INFO_
++{
++
++ PVRSRV_SYNC_DATA *psSyncData;
++
++
++ IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr;
++
++
++ IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr;
++
++
++ PVRSRV_KERNEL_MEM_INFO *psSyncDataMemInfoKM;
++
++
++ IMG_HANDLE hResItem;
++
++
++
++ IMG_UINT32 ui32RefCount;
++
++} PVRSRV_KERNEL_SYNC_INFO;
++
++typedef struct _PVRSRV_DEVICE_SYNC_OBJECT_
++{
++
++ IMG_UINT32 ui32ReadOpsPendingVal;
++ IMG_DEV_VIRTADDR sReadOpsCompleteDevVAddr;
++ IMG_UINT32 ui32WriteOpsPendingVal;
++ IMG_DEV_VIRTADDR sWriteOpsCompleteDevVAddr;
++} PVRSRV_DEVICE_SYNC_OBJECT;
++
++typedef struct _PVRSRV_SYNC_OBJECT
++{
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfoKM;
++ IMG_UINT32 ui32WriteOpsPending;
++ IMG_UINT32 ui32ReadOpsPending;
++
++}PVRSRV_SYNC_OBJECT, *PPVRSRV_SYNC_OBJECT;
++
++typedef struct _PVRSRV_COMMAND
++{
++ IMG_SIZE_T ui32CmdSize;
++ IMG_UINT32 ui32DevIndex;
++ IMG_UINT32 CommandType;
++ IMG_UINT32 ui32DstSyncCount;
++ IMG_UINT32 ui32SrcSyncCount;
++ PVRSRV_SYNC_OBJECT *psDstSync;
++ PVRSRV_SYNC_OBJECT *psSrcSync;
++ IMG_SIZE_T ui32DataSize;
++ IMG_UINT32 ui32ProcessID;
++ IMG_VOID *pvData;
++}PVRSRV_COMMAND, *PPVRSRV_COMMAND;
++
++
++typedef struct _PVRSRV_QUEUE_INFO_
++{
++ IMG_VOID *pvLinQueueKM;
++ IMG_VOID *pvLinQueueUM;
++ volatile IMG_SIZE_T ui32ReadOffset;
++ volatile IMG_SIZE_T ui32WriteOffset;
++ IMG_UINT32 *pui32KickerAddrKM;
++ IMG_UINT32 *pui32KickerAddrUM;
++ IMG_SIZE_T ui32QueueSize;
++
++ IMG_UINT32 ui32ProcessID;
++
++ IMG_HANDLE hMemBlock[2];
++
++ struct _PVRSRV_QUEUE_INFO_ *psNextKM;
++}PVRSRV_QUEUE_INFO;
++
++typedef PVRSRV_ERROR (*PFN_INSERT_CMD) (PVRSRV_QUEUE_INFO*,
++ PVRSRV_COMMAND**,
++ IMG_UINT32,
++ IMG_UINT16,
++ IMG_UINT32,
++ PVRSRV_KERNEL_SYNC_INFO*[],
++ IMG_UINT32,
++ PVRSRV_KERNEL_SYNC_INFO*[],
++ IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_SUBMIT_CMD) (PVRSRV_QUEUE_INFO*, PVRSRV_COMMAND*, IMG_BOOL);
++
++
++typedef struct PVRSRV_DEVICECLASS_BUFFER_TAG
++{
++ PFN_GET_BUFFER_ADDR pfnGetBufferAddr;
++ IMG_HANDLE hDevMemContext;
++ IMG_HANDLE hExtDevice;
++ IMG_HANDLE hExtBuffer;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++
++} PVRSRV_DEVICECLASS_BUFFER;
++
++
++typedef struct PVRSRV_CLIENT_DEVICECLASS_INFO_TAG
++{
++ IMG_HANDLE hDeviceKM;
++ IMG_HANDLE hServices;
++} PVRSRV_CLIENT_DEVICECLASS_INFO;
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVGetWriteOpsPending)
++#endif
++static INLINE
++IMG_UINT32 PVRSRVGetWriteOpsPending(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_BOOL bIsReadOp)
++{
++ IMG_UINT32 ui32WriteOpsPending;
++
++ if(bIsReadOp)
++ {
++ ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++ else
++ {
++
++
++
++ ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++
++ return ui32WriteOpsPending;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVGetReadOpsPending)
++#endif
++static INLINE
++IMG_UINT32 PVRSRVGetReadOpsPending(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_BOOL bIsReadOp)
++{
++ IMG_UINT32 ui32ReadOpsPending;
++
++ if(bIsReadOp)
++ {
++ ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ }
++ else
++ {
++ ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++ return ui32ReadOpsPending;
++}
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVQueueCommand(IMG_HANDLE hQueueInfo,
++ PVRSRV_COMMAND *psCommand);
++
++
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVGetMMUContextPDDevPAddr(const PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hDevMemContext,
++ IMG_DEV_PHYADDR *sPDDevPAddr);
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVAllocSharedSysMem(const PVRSRV_CONNECTION *psConnection,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ PVRSRV_CLIENT_MEM_INFO **ppsClientMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVFreeSharedSysMem(const PVRSRV_CONNECTION *psConnection,
++ PVRSRV_CLIENT_MEM_INFO *psClientMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++PVRSRVUnrefSharedSysMem(const PVRSRV_CONNECTION *psConnection,
++ PVRSRV_CLIENT_MEM_INFO *psClientMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVMapMemInfoMem(const PVRSRV_CONNECTION *psConnection,
++ IMG_HANDLE hKernelMemInfo,
++ PVRSRV_CLIENT_MEM_INFO **ppsClientMemInfo);
++
++
++#if defined (__cplusplus)
++}
++#endif
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/include/sgx_bridge.h b/drivers/gpu/drm/mrst/pvr/services4/include/sgx_bridge.h
+new file mode 100644
+index 0000000..b2bfc0f
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/include/sgx_bridge.h
+@@ -0,0 +1,477 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SGX_BRIDGE_H__)
++#define __SGX_BRIDGE_H__
++
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "pvr_bridge.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++
++#define PVRSRV_BRIDGE_SGX_CMD_BASE (PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD+1)
++#define PVRSRV_BRIDGE_SGX_GETCLIENTINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+0)
++#define PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+1)
++#define PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+2)
++#define PVRSRV_BRIDGE_SGX_DOKICK PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+3)
++#define PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+4)
++#define PVRSRV_BRIDGE_SGX_READREGISTRYDWORD PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+5)
++
++#define PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+9)
++
++#define PVRSRV_BRIDGE_SGX_GETMMUPDADDR PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+10)
++
++#if defined(TRANSFER_QUEUE)
++#define PVRSRV_BRIDGE_SGX_SUBMITTRANSFER PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+13)
++#endif
++#define PVRSRV_BRIDGE_SGX_GETMISCINFO PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+14)
++#define PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+15)
++#define PVRSRV_BRIDGE_SGX_DEVINITPART2 PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+16)
++
++#define PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+17)
++#define PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+18)
++#define PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+19)
++#define PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+20)
++#define PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+21)
++#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+22)
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define PVRSRV_BRIDGE_SGX_SUBMIT2D PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+23)
++#define PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+24)
++#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+25)
++#endif
++#define PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+26)
++#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+27)
++
++#define PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+28)
++
++#if defined(SUPPORT_SGX_HWPERF)
++#define PVRSRV_BRIDGE_SGX_READ_DIFF_COUNTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+29)
++#define PVRSRV_BRIDGE_SGX_READ_HWPERF_CB PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+30)
++#endif
++
++#if defined(PDUMP)
++#define PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+31)
++#define PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+32)
++#define PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+33)
++#define PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+34)
++#define PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+35)
++#endif
++
++
++
++#define PVRSRV_BRIDGE_LAST_SGX_CMD (PVRSRV_BRIDGE_SGX_CMD_BASE+35)
++
++
++typedef struct PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevMemHeap;
++ IMG_DEV_VIRTADDR sDevVAddr;
++}PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GETPHYSPAGEADDR
++{
++ PVRSRV_ERROR eError;
++ IMG_DEV_PHYADDR DevPAddr;
++ IMG_CPU_PHYADDR CpuPAddr;
++}PVRSRV_BRIDGE_OUT_GETPHYSPAGEADDR;
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_GETMMU_PDADDR_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hDevMemContext;
++}PVRSRV_BRIDGE_IN_SGX_GETMMU_PDADDR;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_GETMMU_PDADDR_TAG
++{
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_SGX_GETMMU_PDADDR;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GETCLIENTINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++}PVRSRV_BRIDGE_IN_GETCLIENTINFO;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO_TAG
++{
++ SGX_INTERNAL_DEVINFO sSGXInternalDevInfo;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++}PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_GETCLIENTINFO_TAG
++{
++ SGX_CLIENT_INFO sClientInfo;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_GETCLIENTINFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_RELEASECLIENTINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ SGX_CLIENT_INFO sClientInfo;
++}PVRSRV_BRIDGE_IN_RELEASECLIENTINFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ISPBREAKPOLL_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++}PVRSRV_BRIDGE_IN_ISPBREAKPOLL;
++
++
++typedef struct PVRSRV_BRIDGE_IN_DOKICK_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ SGX_CCB_KICK sCCBKick;
++}PVRSRV_BRIDGE_IN_DOKICK;
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++}PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES;
++
++
++#if defined(TRANSFER_QUEUE)
++
++typedef struct PVRSRV_BRIDGE_IN_SUBMITTRANSFER_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ PVRSRV_TRANSFER_SGX_KICK sKick;
++}PVRSRV_BRIDGE_IN_SUBMITTRANSFER;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++
++typedef struct PVRSRV_BRIDGE_IN_SUBMIT2D_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ PVRSRV_2D_SGX_KICK sKick;
++} PVRSRV_BRIDGE_IN_SUBMIT2D;
++#endif
++#endif
++
++
++typedef struct PVRSRV_BRIDGE_IN_READREGDWORD_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_PCHAR pszKey;
++ IMG_PCHAR pszValue;
++}PVRSRV_BRIDGE_IN_READREGDWORD;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_READREGDWORD_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32Data;
++}PVRSRV_BRIDGE_OUT_READREGDWORD;
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGXGETMISCINFO_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ SGX_MISC_INFO *psMiscInfo;
++}PVRSRV_BRIDGE_IN_SGXGETMISCINFO;
++
++typedef struct PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++}PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT_TAG
++{
++ PVRSRV_ERROR eError;
++ SGX_BRIDGE_INFO_FOR_SRVINIT sInitInfo;
++}PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGXDEVINITPART2_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ SGX_BRIDGE_INIT_INFO sInitInfo;
++}PVRSRV_BRIDGE_IN_SGXDEVINITPART2;
++
++
++typedef struct PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hKernSyncInfo;
++ IMG_BOOL bWaitForComplete;
++}PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE;
++
++
++#define PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS 10
++
++typedef struct PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_BOOL bLockOnFailure;
++ IMG_UINT32 ui32TotalPBSize;
++}PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC_TAG
++{
++ IMG_HANDLE hKernelMemInfo;
++ IMG_HANDLE hSharedPBDesc;
++ IMG_HANDLE hSharedPBDescKernelMemInfoHandle;
++ IMG_HANDLE hHWPBDescKernelMemInfoHandle;
++ IMG_HANDLE hBlockKernelMemInfoHandle;
++ IMG_HANDLE hHWBlockKernelMemInfoHandle;
++ IMG_HANDLE ahSharedPBDescSubKernelMemInfoHandles[PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS];
++ IMG_UINT32 ui32SharedPBDescSubKernelMemInfoHandlesCount;
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hSharedPBDesc;
++}PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC_TAG
++{
++ PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC;
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hSharedPBDescKernelMemInfo;
++ IMG_HANDLE hHWPBDescKernelMemInfo;
++ IMG_HANDLE hBlockKernelMemInfo;
++ IMG_HANDLE hHWBlockKernelMemInfo;
++ IMG_UINT32 ui32TotalPBSize;
++ IMG_HANDLE *phKernelMemInfoHandles;
++ IMG_UINT32 ui32KernelMemInfoHandlesCount;
++}PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hSharedPBDesc;
++}PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC;
++
++
++#ifdef PDUMP
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ SGX_KICKTA_DUMP_BUFFER *psBufferArray;
++ IMG_UINT32 ui32BufferArrayLength;
++ IMG_BOOL bDumpPolls;
++} PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_UINT32 ui32DumpFrameNum;
++ IMG_BOOL bLastFrame;
++ IMG_UINT32 *pui32Registers;
++ IMG_UINT32 ui32NumRegisters;
++}PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMPCOUNTER_REGISTERS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_UINT32 ui32DumpFrameNum;
++ IMG_BOOL bLastFrame;
++ IMG_UINT32 *pui32Registers;
++ IMG_UINT32 ui32NumRegisters;
++}PVRSRV_BRIDGE_IN_PDUMP_COUNTER_REGISTERS;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_UINT32 ui32DumpFrameNum;
++ IMG_UINT32 ui32TAKickCount;
++ IMG_BOOL bLastFrame;
++ IMG_UINT32 *pui32Registers;
++ IMG_UINT32 ui32NumRegisters;
++}PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
++ IMG_UINT32 ui32FileOffset;
++ IMG_UINT32 ui32PDumpFlags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB;
++
++#endif
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_DEV_VIRTADDR sHWRenderContextDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hHWRenderContext;
++}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hHWRenderContext;
++}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hHWTransferContext;
++}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hHWTransferContext;
++}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET;
++
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hHW2DContext;
++}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_HANDLE hHW2DContext;
++}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT;
++
++#define SGX2D_MAX_BLT_CMD_SIZ 256
++#endif
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_READ_DIFF_COUNTERS_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_UINT32 ui32Reg;
++ IMG_BOOL bNew;
++ IMG_UINT32 ui32New;
++ IMG_UINT32 ui32NewReset;
++ IMG_UINT32 ui32CountersReg;
++ IMG_UINT32 ui32Reg2;
++} PVRSRV_BRIDGE_IN_SGX_READ_DIFF_COUNTERS;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_READ_DIFF_COUNTERS_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32Old;
++ IMG_BOOL bActive;
++ PVRSRV_SGXDEV_DIFF_INFO sDiffs;
++} PVRSRV_BRIDGE_OUT_SGX_READ_DIFF_COUNTERS;
++
++
++#if defined(SUPPORT_SGX_HWPERF)
++typedef struct PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB_TAG
++{
++ IMG_UINT32 ui32BridgeFlags;
++ IMG_HANDLE hDevCookie;
++ IMG_UINT32 ui32ArraySize;
++ PVRSRV_SGX_HWPERF_CB_ENTRY *psHWPerfCBData;
++} PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB_TAG
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32DataCount;
++ IMG_UINT32 ui32ClockSpeed;
++ IMG_UINT32 ui32HostTimeStamp;
++} PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB;
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/include/sgx_mkif_km.h b/drivers/gpu/drm/mrst/pvr/services4/include/sgx_mkif_km.h
+new file mode 100644
+index 0000000..99f29db
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/include/sgx_mkif_km.h
+@@ -0,0 +1,334 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__SGX_MKIF_KM_H__)
++#define __SGX_MKIF_KM_H__
++
++#include "img_types.h"
++#include "servicesint.h"
++#include "sgxapi_km.h"
++
++
++#if defined(SGX_FEATURE_MP)
++ #define SGX_REG_BANK_SHIFT (12)
++ #define SGX_REG_BANK_SIZE (0x4000)
++ #if defined(SGX541)
++ #define SGX_REG_BANK_BASE_INDEX (1)
++ #define SGX_REG_BANK_MASTER_INDEX (SGX_REG_BANK_BASE_INDEX + SGX_FEATURE_MP_CORE_COUNT)
++ #else
++ #define SGX_REG_BANK_BASE_INDEX (2)
++ #define SGX_REG_BANK_MASTER_INDEX (1)
++ #endif
++ #define SGX_MP_CORE_SELECT(x,i) (x + ((i + SGX_REG_BANK_BASE_INDEX) * SGX_REG_BANK_SIZE))
++ #define SGX_MP_MASTER_SELECT(x) (x + (SGX_REG_BANK_MASTER_INDEX * SGX_REG_BANK_SIZE))
++#else
++ #define SGX_MP_CORE_SELECT(x,i) (x)
++#endif
++
++
++typedef struct _SGXMKIF_COMMAND_
++{
++ IMG_UINT32 ui32ServiceAddress;
++ IMG_UINT32 ui32CacheControl;
++ IMG_UINT32 ui32Data[2];
++} SGXMKIF_COMMAND;
++
++
++typedef struct _PVRSRV_SGX_KERNEL_CCB_
++{
++ SGXMKIF_COMMAND asCommands[256];
++} PVRSRV_SGX_KERNEL_CCB;
++
++
++typedef struct _PVRSRV_SGX_CCB_CTL_
++{
++ IMG_UINT32 ui32WriteOffset;
++ IMG_UINT32 ui32ReadOffset;
++} PVRSRV_SGX_CCB_CTL;
++
++
++typedef struct _SGXMKIF_HOST_CTL_
++{
++#if defined(PVRSRV_USSE_EDM_BREAKPOINTS)
++ IMG_UINT32 ui32BreakpointDisable;
++ IMG_UINT32 ui32Continue;
++#endif
++
++ volatile IMG_UINT32 ui32InitStatus;
++ volatile IMG_UINT32 ui32PowerStatus;
++ volatile IMG_UINT32 ui32CleanupStatus;
++#if defined(SUPPORT_HW_RECOVERY)
++ IMG_UINT32 ui32uKernelDetectedLockups;
++ IMG_UINT32 ui32HostDetectedLockups;
++ IMG_UINT32 ui32HWRecoverySampleRate;
++#endif
++ IMG_UINT32 ui32uKernelTimerClock;
++ IMG_UINT32 ui32ActivePowManSampleRate;
++ IMG_UINT32 ui32InterruptFlags;
++ IMG_UINT32 ui32InterruptClearFlags;
++
++
++ IMG_UINT32 ui32NumActivePowerEvents;
++
++#if defined(SUPPORT_SGX_HWPERF)
++ IMG_UINT32 ui32HWPerfFlags;
++#endif
++
++
++ IMG_UINT32 ui32TimeWraps;
++} SGXMKIF_HOST_CTL;
++
++#define SGXMKIF_CMDTA_CTRLFLAGS_READY 0x00000001
++typedef struct _SGXMKIF_CMDTA_SHARED_
++{
++ IMG_UINT32 ui32CtrlFlags;
++
++ IMG_UINT32 ui32NumTAStatusVals;
++ IMG_UINT32 ui32Num3DStatusVals;
++
++
++ IMG_UINT32 ui32TATQSyncWriteOpsPendingVal;
++ IMG_DEV_VIRTADDR sTATQSyncWriteOpsCompleteDevVAddr;
++ IMG_UINT32 ui32TATQSyncReadOpsPendingVal;
++ IMG_DEV_VIRTADDR sTATQSyncReadOpsCompleteDevVAddr;
++
++
++ IMG_UINT32 ui323DTQSyncWriteOpsPendingVal;
++ IMG_DEV_VIRTADDR s3DTQSyncWriteOpsCompleteDevVAddr;
++ IMG_UINT32 ui323DTQSyncReadOpsPendingVal;
++ IMG_DEV_VIRTADDR s3DTQSyncReadOpsCompleteDevVAddr;
++
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++
++ IMG_UINT32 ui32NumTASrcSyncs;
++ PVRSRV_DEVICE_SYNC_OBJECT asTASrcSyncs[SGX_MAX_TA_SRC_SYNCS];
++ IMG_UINT32 ui32NumTADstSyncs;
++ PVRSRV_DEVICE_SYNC_OBJECT asTADstSyncs[SGX_MAX_TA_DST_SYNCS];
++ IMG_UINT32 ui32Num3DSrcSyncs;
++ PVRSRV_DEVICE_SYNC_OBJECT as3DSrcSyncs[SGX_MAX_3D_SRC_SYNCS];
++#else
++
++ IMG_UINT32 ui32NumSrcSyncs;
++ PVRSRV_DEVICE_SYNC_OBJECT asSrcSyncs[SGX_MAX_SRC_SYNCS];
++#endif
++
++
++ PVRSRV_DEVICE_SYNC_OBJECT sTA3DDependency;
++
++ CTL_STATUS sCtlTAStatusInfo[SGX_MAX_TA_STATUS_VALS];
++ CTL_STATUS sCtl3DStatusInfo[SGX_MAX_3D_STATUS_VALS];
++
++} SGXMKIF_CMDTA_SHARED;
++
++#define SGXTQ_MAX_STATUS SGX_MAX_TRANSFER_STATUS_VALS + 2
++
++#define SGXMKIF_TQFLAGS_NOSYNCUPDATE 0x00000001
++#define SGXMKIF_TQFLAGS_KEEPPENDING 0x00000002
++#define SGXMKIF_TQFLAGS_TATQ_SYNC 0x00000004
++#define SGXMKIF_TQFLAGS_3DTQ_SYNC 0x00000008
++#if defined(SGX_FEATURE_FAST_RENDER_CONTEXT_SWITCH)
++#define SGXMKIF_TQFLAGS_CTXSWITCH 0x00000010
++#endif
++#define SGXMKIF_TQFLAGS_DUMMYTRANSFER 0x00000020
++
++typedef struct _SGXMKIF_TRANSFERCMD_SHARED_
++{
++
++
++ IMG_UINT32 ui32SrcReadOpPendingVal;
++ IMG_DEV_VIRTADDR sSrcReadOpsCompleteDevAddr;
++
++ IMG_UINT32 ui32SrcWriteOpPendingVal;
++ IMG_DEV_VIRTADDR sSrcWriteOpsCompleteDevAddr;
++
++
++
++ IMG_UINT32 ui32DstReadOpPendingVal;
++ IMG_DEV_VIRTADDR sDstReadOpsCompleteDevAddr;
++
++ IMG_UINT32 ui32DstWriteOpPendingVal;
++ IMG_DEV_VIRTADDR sDstWriteOpsCompleteDevAddr;
++
++
++ IMG_UINT32 ui32TASyncWriteOpsPendingVal;
++ IMG_DEV_VIRTADDR sTASyncWriteOpsCompleteDevVAddr;
++ IMG_UINT32 ui32TASyncReadOpsPendingVal;
++ IMG_DEV_VIRTADDR sTASyncReadOpsCompleteDevVAddr;
++
++
++ IMG_UINT32 ui323DSyncWriteOpsPendingVal;
++ IMG_DEV_VIRTADDR s3DSyncWriteOpsCompleteDevVAddr;
++ IMG_UINT32 ui323DSyncReadOpsPendingVal;
++ IMG_DEV_VIRTADDR s3DSyncReadOpsCompleteDevVAddr;
++
++ IMG_UINT32 ui32NumStatusVals;
++ CTL_STATUS sCtlStatusInfo[SGXTQ_MAX_STATUS];
++} SGXMKIF_TRANSFERCMD_SHARED, *PSGXMKIF_TRANSFERCMD_SHARED;
++
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct _SGXMKIF_2DCMD_SHARED_ {
++
++ IMG_UINT32 ui32NumSrcSync;
++ PVRSRV_DEVICE_SYNC_OBJECT sSrcSyncData[SGX_MAX_2D_SRC_SYNC_OPS];
++
++
++ PVRSRV_DEVICE_SYNC_OBJECT sDstSyncData;
++
++
++ PVRSRV_DEVICE_SYNC_OBJECT sTASyncData;
++
++
++ PVRSRV_DEVICE_SYNC_OBJECT s3DSyncData;
++} SGXMKIF_2DCMD_SHARED, *PSGXMKIF_2DCMD_SHARED;
++#endif
++
++
++typedef struct _SGXMKIF_HWDEVICE_SYNC_LIST_
++{
++ IMG_DEV_VIRTADDR sAccessDevAddr;
++ IMG_UINT32 ui32NumSyncObjects;
++
++ PVRSRV_DEVICE_SYNC_OBJECT asSyncData[1];
++} SGXMKIF_HWDEVICE_SYNC_LIST, *PSGXMKIF_HWDEVICE_SYNC_LIST;
++
++
++#define PVRSRV_USSE_EDM_INIT_COMPLETE (1UL << 0)
++
++#define PVRSRV_USSE_EDM_POWMAN_IDLE_COMPLETE (1UL << 2)
++#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE (1UL << 3)
++#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE (1UL << 4)
++#define PVRSRV_USSE_EDM_POWMAN_NO_WORK (1UL << 5)
++
++#define PVRSRV_USSE_EDM_INTERRUPT_HWR (1UL << 0)
++#define PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER (1UL << 1)
++
++#define PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE (1UL << 0)
++
++#define PVRSRV_USSE_MISCINFO_READY 0x1UL
++#define PVRSRV_USSE_MISCINFO_GET_STRUCT_SIZES 0x2UL
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++#define PVRSRV_USSE_MISCINFO_MEMREAD 0x4UL
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++#define PVRSRV_USSE_MISCINFO_MEMREAD_FAIL 0x1UL << 31;
++#endif
++#endif
++
++
++#define PVRSRV_CLEANUPCMD_RT 0x1
++#define PVRSRV_CLEANUPCMD_RC 0x2
++#define PVRSRV_CLEANUPCMD_TC 0x3
++#define PVRSRV_CLEANUPCMD_2DC 0x4
++#define PVRSRV_CLEANUPCMD_PB 0x5
++
++#define PVRSRV_POWERCMD_POWEROFF 0x1
++#define PVRSRV_POWERCMD_IDLE 0x2
++#define PVRSRV_POWERCMD_RESUME 0x3
++
++
++#if defined(SGX_FEATURE_BIF_NUM_DIRLISTS)
++#define SGX_BIF_DIR_LIST_INDEX_EDM (SGX_FEATURE_BIF_NUM_DIRLISTS - 1)
++#else
++#define SGX_BIF_DIR_LIST_INDEX_EDM (0)
++#endif
++
++#define SGX_BIF_INVALIDATE_PTCACHE 0x1
++#define SGX_BIF_INVALIDATE_PDCACHE 0x2
++#define SGX_BIF_INVALIDATE_SLCACHE 0x4
++
++
++typedef struct _SGX_MISCINFO_STRUCT_SIZES_
++{
++#if defined (SGX_FEATURE_2D_HARDWARE)
++ IMG_UINT32 ui32Sizeof_2DCMD;
++ IMG_UINT32 ui32Sizeof_2DCMD_SHARED;
++#endif
++ IMG_UINT32 ui32Sizeof_CMDTA;
++ IMG_UINT32 ui32Sizeof_CMDTA_SHARED;
++ IMG_UINT32 ui32Sizeof_TRANSFERCMD;
++ IMG_UINT32 ui32Sizeof_TRANSFERCMD_SHARED;
++ IMG_UINT32 ui32Sizeof_3DREGISTERS;
++ IMG_UINT32 ui32Sizeof_HWPBDESC;
++ IMG_UINT32 ui32Sizeof_HWRENDERCONTEXT;
++ IMG_UINT32 ui32Sizeof_HWRENDERDETAILS;
++ IMG_UINT32 ui32Sizeof_HWRTDATA;
++ IMG_UINT32 ui32Sizeof_HWRTDATASET;
++ IMG_UINT32 ui32Sizeof_HWTRANSFERCONTEXT;
++ IMG_UINT32 ui32Sizeof_HOST_CTL;
++ IMG_UINT32 ui32Sizeof_COMMAND;
++} SGX_MISCINFO_STRUCT_SIZES;
++
++
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++typedef struct _PVRSRV_SGX_MISCINFO_MEMREAD
++{
++ IMG_DEV_VIRTADDR sDevVAddr;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++} PVRSRV_SGX_MISCINFO_MEMREAD;
++#endif
++
++typedef struct _PVRSRV_SGX_MISCINFO_INFO
++{
++ IMG_UINT32 ui32MiscInfoFlags;
++ PVRSRV_SGX_MISCINFO_FEATURES sSGXFeatures;
++ SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes;
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ PVRSRV_SGX_MISCINFO_MEMREAD sSGXMemReadData;
++#endif
++} PVRSRV_SGX_MISCINFO_INFO;
++
++#ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
++#define SGXMK_TRACE_BUFFER_SIZE 512
++#endif
++
++#define SGXMKIF_HWPERF_CB_SIZE 0x100
++
++#if defined(SUPPORT_SGX_HWPERF)
++typedef struct _SGXMKIF_HWPERF_CB_ENTRY_
++{
++ IMG_UINT32 ui32FrameNo;
++ IMG_UINT32 ui32Type;
++ IMG_UINT32 ui32Ordinal;
++ IMG_UINT32 ui32TimeWraps;
++ IMG_UINT32 ui32Time;
++ IMG_UINT32 ui32Counters[PVRSRV_SGX_HWPERF_NUM_COUNTERS];
++} SGXMKIF_HWPERF_CB_ENTRY;
++
++typedef struct _SGXMKIF_HWPERF_CB_
++{
++ IMG_UINT32 ui32Woff;
++ IMG_UINT32 ui32Roff;
++ IMG_UINT32 ui32OrdinalGRAPHICS;
++ IMG_UINT32 ui32OrdinalMK_EXECUTION;
++ SGXMKIF_HWPERF_CB_ENTRY psHWPerfCBData[SGXMKIF_HWPERF_CB_SIZE];
++} SGXMKIF_HWPERF_CB;
++#endif
++
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/include/sgxinfo.h b/drivers/gpu/drm/mrst/pvr/services4/include/sgxinfo.h
+new file mode 100644
+index 0000000..8caa7af
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/include/sgxinfo.h
+@@ -0,0 +1,288 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined (__SGXINFO_H__)
++#define __SGXINFO_H__
++
++#include "sgxscript.h"
++#include "servicesint.h"
++#include "services.h"
++#include "sgxapi_km.h"
++#include "sgx_mkif_km.h"
++
++
++#define SGX_MAX_DEV_DATA 24
++#define SGX_MAX_INIT_MEM_HANDLES 16
++
++
++typedef struct _SGX_BRIDGE_INFO_FOR_SRVINIT
++{
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ PVRSRV_HEAP_INFO asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++} SGX_BRIDGE_INFO_FOR_SRVINIT;
++
++
++typedef enum _SGXMKIF_CMD_TYPE_
++{
++ SGXMKIF_CMD_TA = 0,
++ SGXMKIF_CMD_TRANSFER = 1,
++ SGXMKIF_CMD_2D = 2,
++ SGXMKIF_CMD_POWER = 3,
++ SGXMKIF_CMD_CLEANUP = 4,
++ SGXMKIF_CMD_GETMISCINFO = 5,
++ SGXMKIF_CMD_PROCESS_QUEUES = 6,
++ SGXMKIF_CMD_MAX = 7,
++
++ SGXMKIF_CMD_FORCE_I32 = -1,
++
++} SGXMKIF_CMD_TYPE;
++
++
++typedef struct _SGX_BRIDGE_INIT_INFO_
++{
++ IMG_HANDLE hKernelCCBMemInfo;
++ IMG_HANDLE hKernelCCBCtlMemInfo;
++ IMG_HANDLE hKernelCCBEventKickerMemInfo;
++ IMG_HANDLE hKernelSGXHostCtlMemInfo;
++ IMG_HANDLE hKernelSGXTA3DCtlMemInfo;
++ IMG_HANDLE hKernelSGXMiscMemInfo;
++
++ IMG_UINT32 aui32HostKickAddr[SGXMKIF_CMD_MAX];
++
++ SGX_INIT_SCRIPTS sScripts;
++
++ IMG_UINT32 ui32ClientBuildOptions;
++ SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes;
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++ IMG_HANDLE hKernelHWProfilingMemInfo;
++#endif
++#if defined(SUPPORT_SGX_HWPERF)
++ IMG_HANDLE hKernelHWPerfCBMemInfo;
++#endif
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ IMG_HANDLE hKernelEDMStatusBufferMemInfo;
++#endif
++#if defined(SGX_FEATURE_OVERLAPPED_SPM)
++ IMG_HANDLE hKernelTmpRgnHeaderMemInfo;
++#endif
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ IMG_HANDLE hKernelTmpDPMStateMemInfo;
++#endif
++
++ IMG_UINT32 ui32EDMTaskReg0;
++ IMG_UINT32 ui32EDMTaskReg1;
++
++ IMG_UINT32 ui32ClkGateStatusReg;
++ IMG_UINT32 ui32ClkGateStatusMask;
++#if defined(SGX_FEATURE_MP)
++ IMG_UINT32 ui32MasterClkGateStatusReg;
++ IMG_UINT32 ui32MasterClkGateStatusMask;
++#endif
++
++ IMG_UINT32 ui32CacheControl;
++
++ IMG_UINT32 asInitDevData[SGX_MAX_DEV_DATA];
++ IMG_HANDLE asInitMemHandles[SGX_MAX_INIT_MEM_HANDLES];
++
++} SGX_BRIDGE_INIT_INFO;
++
++
++typedef struct _SGX_DEVICE_SYNC_LIST_
++{
++ PSGXMKIF_HWDEVICE_SYNC_LIST psHWDeviceSyncList;
++
++ IMG_HANDLE hKernelHWSyncListMemInfo;
++ PVRSRV_CLIENT_MEM_INFO *psHWDeviceSyncListClientMemInfo;
++ PVRSRV_CLIENT_MEM_INFO *psAccessResourceClientMemInfo;
++
++ volatile IMG_UINT32 *pui32Lock;
++
++ struct _SGX_DEVICE_SYNC_LIST_ *psNext;
++
++
++ IMG_UINT32 ui32NumSyncObjects;
++ IMG_HANDLE ahSyncHandles[1];
++} SGX_DEVICE_SYNC_LIST, *PSGX_DEVICE_SYNC_LIST;
++
++
++typedef struct _SGX_INTERNEL_STATUS_UPDATE_
++{
++ CTL_STATUS sCtlStatus;
++ IMG_HANDLE hKernelMemInfo;
++
++ IMG_UINT32 ui32LastStatusUpdateDumpVal;
++} SGX_INTERNEL_STATUS_UPDATE;
++
++
++typedef struct _SGX_CCB_KICK_
++{
++ SGXMKIF_COMMAND sCommand;
++ IMG_HANDLE hCCBKernelMemInfo;
++
++ IMG_UINT32 ui32NumDstSyncObjects;
++ IMG_HANDLE hKernelHWSyncListMemInfo;
++
++
++ IMG_HANDLE *pahDstSyncHandles;
++
++ IMG_UINT32 ui32NumTAStatusVals;
++ IMG_UINT32 ui32Num3DStatusVals;
++
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ SGX_INTERNEL_STATUS_UPDATE asTAStatusUpdate[SGX_MAX_TA_STATUS_VALS];
++ SGX_INTERNEL_STATUS_UPDATE as3DStatusUpdate[SGX_MAX_3D_STATUS_VALS];
++#else
++ IMG_HANDLE ahTAStatusSyncInfo[SGX_MAX_TA_STATUS_VALS];
++ IMG_HANDLE ah3DStatusSyncInfo[SGX_MAX_3D_STATUS_VALS];
++#endif
++
++ IMG_BOOL bFirstKickOrResume;
++#if (defined(NO_HARDWARE) || defined(PDUMP))
++ IMG_BOOL bTerminateOrAbort;
++#endif
++#if defined(SUPPORT_SGX_HWPERF)
++ IMG_BOOL bKickRender;
++#endif
++
++
++ IMG_UINT32 ui32CCBOffset;
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++
++ IMG_UINT32 ui32NumTASrcSyncs;
++ IMG_HANDLE ahTASrcKernelSyncInfo[SGX_MAX_TA_SRC_SYNCS];
++ IMG_UINT32 ui32NumTADstSyncs;
++ IMG_HANDLE ahTADstKernelSyncInfo[SGX_MAX_TA_DST_SYNCS];
++ IMG_UINT32 ui32Num3DSrcSyncs;
++ IMG_HANDLE ah3DSrcKernelSyncInfo[SGX_MAX_3D_SRC_SYNCS];
++#else
++
++ IMG_UINT32 ui32NumSrcSyncs;
++ IMG_HANDLE ahSrcKernelSyncInfo[SGX_MAX_SRC_SYNCS];
++#endif
++
++
++ IMG_BOOL bTADependency;
++ IMG_HANDLE hTA3DSyncInfo;
++
++ IMG_HANDLE hTASyncInfo;
++ IMG_HANDLE h3DSyncInfo;
++#if defined(PDUMP)
++ IMG_UINT32 ui32CCBDumpWOff;
++#endif
++#if defined(NO_HARDWARE)
++ IMG_UINT32 ui32WriteOpsPendingVal;
++#endif
++} SGX_CCB_KICK;
++
++
++#define SGX_KERNEL_USE_CODE_BASE_INDEX 15
++
++
++typedef struct _SGX_CLIENT_INFO_
++{
++ IMG_UINT32 ui32ProcessID;
++ IMG_VOID *pvProcess;
++ PVRSRV_MISC_INFO sMiscInfo;
++
++ IMG_UINT32 asDevData[SGX_MAX_DEV_DATA];
++
++} SGX_CLIENT_INFO;
++
++typedef struct _SGX_INTERNAL_DEVINFO_
++{
++ IMG_UINT32 ui32Flags;
++ IMG_HANDLE hHostCtlKernelMemInfoHandle;
++ IMG_BOOL bForcePTOff;
++} SGX_INTERNAL_DEVINFO;
++
++
++#if defined(TRANSFER_QUEUE)
++typedef struct _PVRSRV_TRANSFER_SGX_KICK_
++{
++ IMG_HANDLE hCCBMemInfo;
++ IMG_UINT32 ui32SharedCmdCCBOffset;
++
++ IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
++
++ IMG_HANDLE hTASyncInfo;
++ IMG_HANDLE h3DSyncInfo;
++
++ IMG_UINT32 ui32NumSrcSync;
++ IMG_HANDLE ahSrcSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
++
++ IMG_UINT32 ui32NumDstSync;
++ IMG_HANDLE ahDstSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
++
++ IMG_UINT32 ui32Flags;
++
++ IMG_UINT32 ui32PDumpFlags;
++#if defined(PDUMP)
++ IMG_UINT32 ui32CCBDumpWOff;
++#endif
++} PVRSRV_TRANSFER_SGX_KICK, *PPVRSRV_TRANSFER_SGX_KICK;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct _PVRSRV_2D_SGX_KICK_
++{
++ IMG_HANDLE hCCBMemInfo;
++ IMG_UINT32 ui32SharedCmdCCBOffset;
++
++ IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
++
++ IMG_UINT32 ui32NumSrcSync;
++ IMG_HANDLE ahSrcSyncInfo[SGX_MAX_2D_SRC_SYNC_OPS];
++
++
++ IMG_HANDLE hDstSyncInfo;
++
++
++ IMG_HANDLE hTASyncInfo;
++
++
++ IMG_HANDLE h3DSyncInfo;
++
++ IMG_UINT32 ui32PDumpFlags;
++#if defined(PDUMP)
++ IMG_UINT32 ui32CCBDumpWOff;
++#endif
++} PVRSRV_2D_SGX_KICK, *PPVRSRV_2D_SGX_KICK;
++#endif
++#endif
++
++#define PVRSRV_SGX_DIFF_NUM_COUNTERS 9
++
++typedef struct _PVRSRV_SGXDEV_DIFF_INFO_
++{
++ IMG_UINT32 aui32Counters[PVRSRV_SGX_DIFF_NUM_COUNTERS];
++ IMG_UINT32 ui32Time[3];
++ IMG_UINT32 ui32Marker[2];
++} PVRSRV_SGXDEV_DIFF_INFO, *PPVRSRV_SGXDEV_DIFF_INFO;
++
++
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/.gitignore b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/.gitignore
+new file mode 100644
+index 0000000..2f89523
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/.gitignore
+@@ -0,0 +1,5 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++*.o
++*.o.cmd
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c
+new file mode 100644
+index 0000000..118c1d2
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c
+@@ -0,0 +1,3426 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++
++#include <stddef.h>
++
++#include "img_defs.h"
++#include "services.h"
++#include "pvr_bridge_km.h"
++#include "pvr_debug.h"
++#include "ra.h"
++#include "pvr_bridge.h"
++#if defined(SUPPORT_SGX)
++#include "sgx_bridge.h"
++#endif
++#if defined(SUPPORT_VGX)
++#include "vgx_bridge.h"
++#endif
++#if defined(SUPPORT_MSVDX)
++#include "msvdx_bridge.h"
++#endif
++#include "perproc.h"
++#include "device.h"
++#include "buffer_manager.h"
++
++#include "pdump_km.h"
++#include "syscommon.h"
++
++#include "bridged_pvr_bridge.h"
++#if defined(SUPPORT_SGX)
++#include "bridged_sgx_bridge.h"
++#endif
++#if defined(SUPPORT_VGX)
++#include "bridged_vgx_bridge.h"
++#endif
++#if defined(SUPPORT_MSVDX)
++#include "bridged_msvdx_bridge.h"
++#endif
++
++#include "env_data.h"
++
++#if defined (__linux__)
++#include "mmap.h"
++#endif
++
++#include "srvkm.h"
++
++PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
++
++#if defined(DEBUG_BRIDGE_KM)
++PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
++#endif
++
++#if defined(PVR_SECURE_HANDLES)
++static IMG_BOOL abSharedDeviceMemHeap[PVRSRV_MAX_CLIENT_HEAPS];
++static IMG_BOOL *pbSharedDeviceMemHeap = abSharedDeviceMemHeap;
++#else
++static IMG_BOOL *pbSharedDeviceMemHeap = (IMG_BOOL*)IMG_NULL;
++#endif
++
++
++#if defined(DEBUG_BRIDGE_KM)
++PVRSRV_ERROR
++CopyFromUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData,
++ IMG_UINT32 ui32BridgeID,
++ IMG_VOID *pvDest,
++ IMG_VOID *pvSrc,
++ IMG_UINT32 ui32Size)
++{
++ g_BridgeDispatchTable[ui32BridgeID].ui32CopyFromUserTotalBytes+=ui32Size;
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size;
++ return OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size);
++}
++PVRSRV_ERROR
++CopyToUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData,
++ IMG_UINT32 ui32BridgeID,
++ IMG_VOID *pvDest,
++ IMG_VOID *pvSrc,
++ IMG_UINT32 ui32Size)
++{
++ g_BridgeDispatchTable[ui32BridgeID].ui32CopyToUserTotalBytes+=ui32Size;
++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size;
++ return OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size);
++}
++#endif
++
++
++static IMG_INT
++PVRSRVEnumerateDevicesBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_OUT_ENUMDEVICE *psEnumDeviceOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DEVICES);
++
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++ psEnumDeviceOUT->eError =
++ PVRSRVEnumerateDevicesKM(&psEnumDeviceOUT->ui32NumDevices,
++ psEnumDeviceOUT->asDeviceIdentifier);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVAcquireDeviceDataBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO *psAcquireDevInfoIN,
++ PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO *psAcquireDevInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO);
++
++ psAcquireDevInfoOUT->eError =
++ PVRSRVAcquireDeviceDataKM(psAcquireDevInfoIN->uiDevIndex,
++ psAcquireDevInfoIN->eDeviceType,
++ &hDevCookieInt);
++ if(psAcquireDevInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ psAcquireDevInfoOUT->eError =
++ PVRSRVAllocHandle(psPerProc->psHandleBase,
++ &psAcquireDevInfoOUT->hDevCookie,
++ hDevCookieInt,
++ PVRSRV_HANDLE_TYPE_DEV_NODE,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVCreateDeviceMemContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT *psCreateDevMemContextIN,
++ PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT *psCreateDevMemContextOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDevMemContextInt;
++ IMG_UINT32 i;
++ IMG_BOOL bCreated;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT);
++
++
++ NEW_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS + 1);
++
++ psCreateDevMemContextOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psCreateDevMemContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psCreateDevMemContextOUT->eError =
++ PVRSRVCreateDeviceMemContextKM(hDevCookieInt,
++ psPerProc,
++ &hDevMemContextInt,
++ &psCreateDevMemContextOUT->ui32ClientHeapCount,
++ &psCreateDevMemContextOUT->sHeapInfo[0],
++ &bCreated,
++ pbSharedDeviceMemHeap);
++
++ if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ if(bCreated)
++ {
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psCreateDevMemContextOUT->hDevMemContext,
++ hDevMemContextInt,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ }
++ else
++ {
++ psCreateDevMemContextOUT->eError =
++ PVRSRVFindHandle(psPerProc->psHandleBase,
++ &psCreateDevMemContextOUT->hDevMemContext,
++ hDevMemContextInt,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++ if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ for(i = 0; i < psCreateDevMemContextOUT->ui32ClientHeapCount; i++)
++ {
++ IMG_HANDLE hDevMemHeapExt;
++
++#if defined(PVR_SECURE_HANDLES)
++ if(abSharedDeviceMemHeap[i])
++#endif
++ {
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt,
++ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++ }
++#if defined(PVR_SECURE_HANDLES)
++ else
++ {
++
++ if(bCreated)
++ {
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt,
++ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psCreateDevMemContextOUT->hDevMemContext);
++ }
++ else
++ {
++ psCreateDevMemContextOUT->eError =
++ PVRSRVFindHandle(psPerProc->psHandleBase, &hDevMemHeapExt,
++ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++ if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++ }
++#endif
++ psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap = hDevMemHeapExt;
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDevMemContextOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVDestroyDeviceMemContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT *psDestroyDevMemContextIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDevMemContextInt;
++ IMG_BOOL bDestroyed;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psDestroyDevMemContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psDestroyDevMemContextIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVDestroyDeviceMemContextKM(hDevCookieInt, hDevMemContextInt, &bDestroyed);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if(bDestroyed)
++ {
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psDestroyDevMemContextIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++ }
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVGetDeviceMemHeapInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_DEVMEM_HEAPINFO *psGetDevMemHeapInfoIN,
++ PVRSRV_BRIDGE_OUT_GET_DEVMEM_HEAPINFO *psGetDevMemHeapInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDevMemContextInt;
++ IMG_UINT32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS);
++
++ psGetDevMemHeapInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psGetDevMemHeapInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetDevMemHeapInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psGetDevMemHeapInfoIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetDevMemHeapInfoOUT->eError =
++ PVRSRVGetDeviceMemHeapInfoKM(hDevCookieInt,
++ hDevMemContextInt,
++ &psGetDevMemHeapInfoOUT->ui32ClientHeapCount,
++ &psGetDevMemHeapInfoOUT->sHeapInfo[0],
++ pbSharedDeviceMemHeap);
++
++ if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ for(i = 0; i < psGetDevMemHeapInfoOUT->ui32ClientHeapCount; i++)
++ {
++ IMG_HANDLE hDevMemHeapExt;
++
++#if defined(PVR_SECURE_HANDLES)
++ if(abSharedDeviceMemHeap[i])
++#endif
++ {
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase, &hDevMemHeapExt,
++ psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++ }
++#if defined(PVR_SECURE_HANDLES)
++ else
++ {
++
++ psGetDevMemHeapInfoOUT->eError =
++ PVRSRVFindHandle(psPerProc->psHandleBase, &hDevMemHeapExt,
++ psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++ if(psGetDevMemHeapInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++#endif
++ psGetDevMemHeapInfoOUT->sHeapInfo[i].hDevMemHeap = hDevMemHeapExt;
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDevMemHeapInfoOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++#if defined(OS_PVRSRV_ALLOC_DEVICE_MEM_BW)
++IMG_INT
++PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN,
++ PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++#else
++static IMG_INT
++PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN,
++ PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDevMemHeapInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_DEVICEMEM);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc, 2);
++
++ psAllocDeviceMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psAllocDeviceMemIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psAllocDeviceMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemHeapInt,
++ psAllocDeviceMemIN->hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++
++ if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psAllocDeviceMemOUT->eError =
++ PVRSRVAllocDeviceMemKM(hDevCookieInt,
++ psPerProc,
++ hDevMemHeapInt,
++ psAllocDeviceMemIN->ui32Attribs,
++ psAllocDeviceMemIN->ui32Size,
++ psAllocDeviceMemIN->ui32Alignment,
++ &psMemInfo,
++ "" );
++
++ if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ OSMemSet(&psAllocDeviceMemOUT->sClientMemInfo,
++ 0,
++ sizeof(psAllocDeviceMemOUT->sClientMemInfo));
++
++ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddrKM =
++ psMemInfo->pvLinAddrKM;
++
++#if defined (__linux__)
++ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = 0;
++#else
++ psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = psMemInfo->pvLinAddrKM;
++#endif
++ psAllocDeviceMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++ psAllocDeviceMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++ psAllocDeviceMemOUT->sClientMemInfo.ui32AllocSize = psMemInfo->ui32AllocSize;
++ psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo,
++ psMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ if(psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_NO_SYNCOBJ)
++ {
++
++ OSMemSet(&psAllocDeviceMemOUT->sClientSyncInfo,
++ 0,
++ sizeof (PVRSRV_CLIENT_SYNC_INFO));
++ psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo = IMG_NULL;
++ psAllocDeviceMemOUT->psKernelSyncInfo = IMG_NULL;
++ }
++ else
++ {
++
++ psAllocDeviceMemOUT->psKernelSyncInfo = psMemInfo->psKernelSyncInfo;
++
++ psAllocDeviceMemOUT->sClientSyncInfo.psSyncData =
++ psMemInfo->psKernelSyncInfo->psSyncData;
++ psAllocDeviceMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psAllocDeviceMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo =
++ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psAllocDeviceMemOUT->sClientSyncInfo.hKernelSyncInfo,
++ psMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo);
++
++ psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo =
++ &psAllocDeviceMemOUT->sClientSyncInfo;
++
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psAllocDeviceMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++#endif
++
++static IMG_INT
++PVRSRVFreeDeviceMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_FREEDEVICEMEM *psFreeDeviceMemIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_VOID *pvKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_DEVICEMEM);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psFreeDeviceMemIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo,
++ psFreeDeviceMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO*)pvKernelMemInfo;
++
++ if (psKernelMemInfo->ui32RefCount == 1)
++ {
++ psRetOUT->eError =
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, pvKernelMemInfo);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_WARNING, "PVRSRVFreeDeviceMemBW: mappings are open "
++ "in other processes, deferring free!"));
++
++ psKernelMemInfo->bPendingFree = IMG_TRUE;
++ psRetOUT->eError = PVRSRV_OK;
++ }
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psFreeDeviceMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVExportDeviceMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_EXPORTDEVICEMEM *psExportDeviceMemIN,
++ PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *psExportDeviceMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EXPORT_DEVICEMEM);
++
++
++ psExportDeviceMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psExportDeviceMemIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psExportDeviceMemOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: can't find devcookie"));
++ return 0;
++ }
++
++
++ psExportDeviceMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_PVOID *)&psKernelMemInfo,
++ psExportDeviceMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(psExportDeviceMemOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: can't find kernel meminfo"));
++ return 0;
++ }
++
++
++ psExportDeviceMemOUT->eError =
++ PVRSRVFindHandle(KERNEL_HANDLE_BASE,
++ &psExportDeviceMemOUT->hMemInfo,
++ psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psExportDeviceMemOUT->eError == PVRSRV_OK)
++ {
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVExportDeviceMemBW: allocation is already exported"));
++ return 0;
++ }
++
++
++ psExportDeviceMemOUT->eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE,
++ &psExportDeviceMemOUT->hMemInfo,
++ psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ if (psExportDeviceMemOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVExportDeviceMemBW: failed to allocate handle from global handle list"));
++ return 0;
++ }
++
++
++ psKernelMemInfo->ui32Flags |= PVRSRV_MEM_EXPORTED;
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVMapDeviceMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN,
++ PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *psMapDevMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psSrcKernelMemInfo = IMG_NULL;
++ PVRSRV_KERNEL_MEM_INFO *psDstKernelMemInfo = IMG_NULL;
++ IMG_HANDLE hDstDevMemHeap = IMG_NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_DEV_MEMORY);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc, 2);
++
++
++ psMapDevMemOUT->eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
++ (IMG_VOID**)&psSrcKernelMemInfo,
++ psMapDevMemIN->hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psMapDevMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ psMapDevMemOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDstDevMemHeap,
++ psMapDevMemIN->hDstDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++ if(psMapDevMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ psMapDevMemOUT->eError = PVRSRVMapDeviceMemoryKM(psPerProc,
++ psSrcKernelMemInfo,
++ hDstDevMemHeap,
++ &psDstKernelMemInfo);
++ if(psMapDevMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ OSMemSet(&psMapDevMemOUT->sDstClientMemInfo,
++ 0,
++ sizeof(psMapDevMemOUT->sDstClientMemInfo));
++ OSMemSet(&psMapDevMemOUT->sDstClientSyncInfo,
++ 0,
++ sizeof(psMapDevMemOUT->sDstClientSyncInfo));
++
++ psMapDevMemOUT->sDstClientMemInfo.pvLinAddrKM =
++ psDstKernelMemInfo->pvLinAddrKM;
++
++ psMapDevMemOUT->sDstClientMemInfo.pvLinAddr = 0;
++ psMapDevMemOUT->sDstClientMemInfo.sDevVAddr = psDstKernelMemInfo->sDevVAddr;
++ psMapDevMemOUT->sDstClientMemInfo.ui32Flags = psDstKernelMemInfo->ui32Flags;
++ psMapDevMemOUT->sDstClientMemInfo.ui32AllocSize = psDstKernelMemInfo->ui32AllocSize;
++ psMapDevMemOUT->sDstClientMemInfo.hMappingInfo = psDstKernelMemInfo->sMemBlk.hOSMemHandle;
++
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo,
++ psDstKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ psMapDevMemOUT->sDstClientSyncInfo.hKernelSyncInfo = IMG_NULL;
++ psMapDevMemOUT->psDstKernelSyncInfo = IMG_NULL;
++
++
++ if(psDstKernelMemInfo->psKernelSyncInfo)
++ {
++ psMapDevMemOUT->psDstKernelSyncInfo = psDstKernelMemInfo->psKernelSyncInfo;
++
++ psMapDevMemOUT->sDstClientSyncInfo.psSyncData =
++ psDstKernelMemInfo->psKernelSyncInfo->psSyncData;
++ psMapDevMemOUT->sDstClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psDstKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psMapDevMemOUT->sDstClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psDstKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psMapDevMemOUT->sDstClientSyncInfo.hMappingInfo =
++ psDstKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
++
++ psMapDevMemOUT->sDstClientMemInfo.psClientSyncInfo = &psMapDevMemOUT->sDstClientSyncInfo;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapDevMemOUT->sDstClientSyncInfo.hKernelSyncInfo,
++ psDstKernelMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psMapDevMemOUT->sDstClientMemInfo.hKernelMemInfo);
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVUnmapDeviceMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY *psUnmapDevMemIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = IMG_NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEV_MEMORY);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID**)&psKernelMemInfo,
++ psUnmapDevMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVUnmapDeviceMemoryKM(psKernelMemInfo);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psUnmapDevMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ return 0;
++}
++
++
++
++static IMG_INT
++PVRSRVMapDeviceClassMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY *psMapDevClassMemIN,
++ PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psMapDevClassMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ IMG_HANDLE hOSMapInfo;
++ IMG_HANDLE hDeviceClassBufferInt;
++ IMG_HANDLE hDevMemContextInt;
++ PVRSRV_HANDLE_TYPE eHandleType;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc, 2);
++
++
++ psMapDevClassMemOUT->eError =
++ PVRSRVLookupHandleAnyType(psPerProc->psHandleBase, &hDeviceClassBufferInt,
++ &eHandleType,
++ psMapDevClassMemIN->hDeviceClassBuffer);
++
++ if(psMapDevClassMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ psMapDevClassMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psMapDevClassMemIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if(psMapDevClassMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ switch(eHandleType)
++ {
++#if defined(PVR_SECURE_HANDLES)
++ case PVRSRV_HANDLE_TYPE_DISP_BUFFER:
++ case PVRSRV_HANDLE_TYPE_BUF_BUFFER:
++#else
++ case PVRSRV_HANDLE_TYPE_NONE:
++#endif
++ break;
++ default:
++ psMapDevClassMemOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psMapDevClassMemOUT->eError =
++ PVRSRVMapDeviceClassMemoryKM(psPerProc,
++ hDevMemContextInt,
++ hDeviceClassBufferInt,
++ &psMemInfo,
++ &hOSMapInfo);
++ if(psMapDevClassMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ OSMemSet(&psMapDevClassMemOUT->sClientMemInfo,
++ 0,
++ sizeof(psMapDevClassMemOUT->sClientMemInfo));
++ OSMemSet(&psMapDevClassMemOUT->sClientSyncInfo,
++ 0,
++ sizeof(psMapDevClassMemOUT->sClientSyncInfo));
++
++ psMapDevClassMemOUT->sClientMemInfo.pvLinAddrKM =
++ psMemInfo->pvLinAddrKM;
++
++ psMapDevClassMemOUT->sClientMemInfo.pvLinAddr = 0;
++ psMapDevClassMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++ psMapDevClassMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++ psMapDevClassMemOUT->sClientMemInfo.ui32AllocSize = psMemInfo->ui32AllocSize;
++ psMapDevClassMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo,
++ psMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psMapDevClassMemIN->hDeviceClassBuffer);
++
++ psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo = IMG_NULL;
++ psMapDevClassMemOUT->psKernelSyncInfo = IMG_NULL;
++
++
++ if(psMemInfo->psKernelSyncInfo)
++ {
++ psMapDevClassMemOUT->psKernelSyncInfo = psMemInfo->psKernelSyncInfo;
++
++ psMapDevClassMemOUT->sClientSyncInfo.psSyncData =
++ psMemInfo->psKernelSyncInfo->psSyncData;
++ psMapDevClassMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psMapDevClassMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo =
++ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
++
++ psMapDevClassMemOUT->sClientMemInfo.psClientSyncInfo = &psMapDevClassMemOUT->sClientSyncInfo;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo,
++ psMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo);
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psMapDevClassMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVUnmapDeviceClassMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY *psUnmapDevClassMemIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo,
++ psUnmapDevClassMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVUnmapDeviceClassMemoryKM(pvKernelMemInfo);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psUnmapDevClassMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ return 0;
++}
++
++
++#if defined(OS_PVRSRV_WRAP_EXT_MEM_BW)
++IMG_INT
++PVRSRVWrapExtMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY *psWrapExtMemIN,
++ PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY *psWrapExtMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++#else
++static IMG_INT
++PVRSRVWrapExtMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY *psWrapExtMemIN,
++ PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY *psWrapExtMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDevMemContextInt;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ IMG_UINT32 ui32PageTableSize = 0;
++ IMG_SYS_PHYADDR *psSysPAddr = IMG_NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_WRAP_EXT_MEMORY);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc, 2);
++
++
++ psWrapExtMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psWrapExtMemIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psWrapExtMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ psWrapExtMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psWrapExtMemIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if(psWrapExtMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if(psWrapExtMemIN->ui32NumPageTableEntries)
++ {
++ ui32PageTableSize = psWrapExtMemIN->ui32NumPageTableEntries
++ * sizeof(IMG_SYS_PHYADDR);
++
++ ASSIGN_AND_EXIT_ON_ERROR(psWrapExtMemOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32PageTableSize,
++ (IMG_VOID **)&psSysPAddr, 0,
++ "Page Table"));
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ psSysPAddr,
++ psWrapExtMemIN->psSysPAddr,
++ ui32PageTableSize) != PVRSRV_OK)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32PageTableSize, (IMG_VOID *)psSysPAddr, 0);
++
++ return -EFAULT;
++ }
++ }
++
++ psWrapExtMemOUT->eError =
++ PVRSRVWrapExtMemoryKM(hDevCookieInt,
++ psPerProc,
++ hDevMemContextInt,
++ psWrapExtMemIN->ui32ByteSize,
++ psWrapExtMemIN->ui32PageOffset,
++ psWrapExtMemIN->bPhysContig,
++ psSysPAddr,
++ psWrapExtMemIN->pvLinAddr,
++ psWrapExtMemIN->ui32Flags,
++ &psMemInfo);
++ if(psWrapExtMemIN->ui32NumPageTableEntries)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32PageTableSize,
++ (IMG_VOID *)psSysPAddr, 0);
++
++ }
++ if(psWrapExtMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psWrapExtMemOUT->sClientMemInfo.pvLinAddrKM =
++ psMemInfo->pvLinAddrKM;
++
++
++ psWrapExtMemOUT->sClientMemInfo.pvLinAddr = 0;
++ psWrapExtMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++ psWrapExtMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++ psWrapExtMemOUT->sClientMemInfo.ui32AllocSize = psMemInfo->ui32AllocSize;
++ psWrapExtMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo,
++ psMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++
++ psWrapExtMemOUT->sClientSyncInfo.psSyncData =
++ psMemInfo->psKernelSyncInfo->psSyncData;
++ psWrapExtMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psWrapExtMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psWrapExtMemOUT->sClientSyncInfo.hMappingInfo =
++ psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
++
++ psWrapExtMemOUT->sClientMemInfo.psClientSyncInfo = &psWrapExtMemOUT->sClientSyncInfo;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psWrapExtMemOUT->sClientSyncInfo.hKernelSyncInfo,
++ (IMG_HANDLE)psMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psWrapExtMemOUT->eError, psPerProc);
++
++ return 0;
++}
++#endif
++
++static IMG_INT
++PVRSRVUnwrapExtMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY *psUnwrapExtMemIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psUnwrapExtMemIN->hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVUnwrapExtMemoryKM((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psUnwrapExtMemIN->hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVGetFreeDeviceMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM *psGetFreeDeviceMemIN,
++ PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM *psGetFreeDeviceMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GETFREE_DEVICEMEM);
++
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psGetFreeDeviceMemOUT->eError =
++ PVRSRVGetFreeDeviceMemKM(psGetFreeDeviceMemIN->ui32Flags,
++ &psGetFreeDeviceMemOUT->ui32Total,
++ &psGetFreeDeviceMemOUT->ui32Free,
++ &psGetFreeDeviceMemOUT->ui32LargestBlock);
++
++ return 0;
++}
++
++static IMG_INT
++PVRMMapOSMemHandleToMMapDataBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MHANDLE_TO_MMAP_DATA *psMMapDataIN,
++ PVRSRV_BRIDGE_OUT_MHANDLE_TO_MMAP_DATA *psMMapDataOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA);
++
++#if defined (__linux__)
++ psMMapDataOUT->eError =
++ PVRMMapOSMemHandleToMMapData(psPerProc,
++ psMMapDataIN->hMHandle,
++ &psMMapDataOUT->ui32MMapOffset,
++ &psMMapDataOUT->ui32ByteOffset,
++ &psMMapDataOUT->ui32RealByteSize,
++ &psMMapDataOUT->ui32UserVAddr);
++#else
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ PVR_UNREFERENCED_PARAMETER(psMMapDataIN);
++
++ psMMapDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
++#endif
++ return 0;
++}
++
++
++static IMG_INT
++PVRMMapReleaseMMapDataBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_RELEASE_MMAP_DATA *psMMapDataIN,
++ PVRSRV_BRIDGE_OUT_RELEASE_MMAP_DATA *psMMapDataOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_RELEASE_MMAP_DATA);
++
++#if defined (__linux__)
++ psMMapDataOUT->eError =
++ PVRMMapReleaseMMapData(psPerProc,
++ psMMapDataIN->hMHandle,
++ &psMMapDataOUT->bMUnmap,
++ &psMMapDataOUT->ui32RealByteSize,
++ &psMMapDataOUT->ui32UserVAddr);
++#else
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ PVR_UNREFERENCED_PARAMETER(psMMapDataIN);
++
++ psMMapDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
++#endif
++ return 0;
++}
++
++
++#ifdef PDUMP
++static IMG_INT
++PDumpIsCaptureFrameBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING *psPDumpIsCapturingOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_ISCAPTURING);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psPDumpIsCapturingOUT->bIsCapturing = PDumpIsCaptureFrameKM();
++ psPDumpIsCapturingOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static IMG_INT
++PDumpCommentBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_COMMENT *psPDumpCommentIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_COMMENT);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError = PDumpCommentKM(&psPDumpCommentIN->szComment[0],
++ psPDumpCommentIN->ui32Flags);
++ return 0;
++}
++
++static IMG_INT
++PDumpSetFrameBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_SETFRAME *psPDumpSetFrameIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SETFRAME);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError = PDumpSetFrameKM(psPDumpSetFrameIN->ui32Frame);
++
++ return 0;
++}
++
++static IMG_INT
++PDumpRegWithFlagsBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DUMPREG *psPDumpRegDumpIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REG);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError =
++ PDumpRegWithFlagsKM(psPDumpRegDumpIN->sHWReg.ui32RegAddr,
++ psPDumpRegDumpIN->sHWReg.ui32RegVal,
++ psPDumpRegDumpIN->ui32Flags);
++
++ return 0;
++}
++
++static IMG_INT
++PDumpRegPolBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_REGPOL *psPDumpRegPolIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REGPOL);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError =
++ PDumpRegPolWithFlagsKM(psPDumpRegPolIN->sHWReg.ui32RegAddr,
++ psPDumpRegPolIN->sHWReg.ui32RegVal,
++ psPDumpRegPolIN->ui32Mask,
++ psPDumpRegPolIN->ui32Flags);
++
++ return 0;
++}
++
++static IMG_INT
++PDumpMemPolBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_MEMPOL *psPDumpMemPolIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_MEMPOL);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psPDumpMemPolIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PDumpMemPolKM(((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo),
++ psPDumpMemPolIN->ui32Offset,
++ psPDumpMemPolIN->ui32Value,
++ psPDumpMemPolIN->ui32Mask,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ psPDumpMemPolIN->ui32Flags,
++ MAKEUNIQUETAG(pvMemInfo));
++
++ return 0;
++}
++
++static IMG_INT
++PDumpMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM *psPDumpMemDumpIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPMEM);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psPDumpMemDumpIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PDumpMemUM(psPerProc,
++ psPDumpMemDumpIN->pvAltLinAddr,
++ psPDumpMemDumpIN->pvLinAddr,
++ pvMemInfo,
++ psPDumpMemDumpIN->ui32Offset,
++ psPDumpMemDumpIN->ui32Bytes,
++ psPDumpMemDumpIN->ui32Flags,
++ MAKEUNIQUETAG(pvMemInfo));
++
++ return 0;
++}
++
++static IMG_INT
++PDumpBitmapBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_BITMAP *psPDumpBitmapIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++
++ psRetOUT->eError =
++ PDumpBitmapKM(&psPDumpBitmapIN->szFileName[0],
++ psPDumpBitmapIN->ui32FileOffset,
++ psPDumpBitmapIN->ui32Width,
++ psPDumpBitmapIN->ui32Height,
++ psPDumpBitmapIN->ui32StrideInBytes,
++ psPDumpBitmapIN->sDevBaseAddr,
++ psPDumpBitmapIN->ui32Size,
++ psPDumpBitmapIN->ePixelFormat,
++ psPDumpBitmapIN->eMemFormat,
++ psPDumpBitmapIN->ui32Flags);
++
++ return 0;
++}
++
++static IMG_INT
++PDumpReadRegBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_READREG *psPDumpReadRegIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPREADREG);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError =
++ PDumpReadRegKM(&psPDumpReadRegIN->szFileName[0],
++ psPDumpReadRegIN->ui32FileOffset,
++ psPDumpReadRegIN->ui32Address,
++ psPDumpReadRegIN->ui32Size,
++ psPDumpReadRegIN->ui32Flags);
++
++ return 0;
++}
++
++static IMG_INT
++PDumpDriverInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO *psPDumpDriverInfoIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 ui32PDumpFlags;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DRIVERINFO);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ ui32PDumpFlags = 0;
++ if(psPDumpDriverInfoIN->bContinuous)
++ {
++ ui32PDumpFlags |= PDUMP_FLAGS_CONTINUOUS;
++ }
++ psRetOUT->eError =
++ PDumpDriverInfoKM(&psPDumpDriverInfoIN->szString[0],
++ ui32PDumpFlags);
++
++ return 0;
++}
++
++static IMG_INT
++PDumpSyncDumpBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC *psPDumpSyncDumpIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 ui32Bytes = psPDumpSyncDumpIN->ui32Bytes;
++ IMG_VOID *pvSyncInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPSYNC);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
++ psPDumpSyncDumpIN->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PDumpMemUM(psPerProc,
++ psPDumpSyncDumpIN->pvAltLinAddr,
++ IMG_NULL,
++ ((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM,
++ psPDumpSyncDumpIN->ui32Offset,
++ ui32Bytes,
++ 0,
++ MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM));
++
++ return 0;
++}
++
++static IMG_INT
++PDumpSyncPolBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL *psPDumpSyncPolIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 ui32Offset;
++ IMG_VOID *pvSyncInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SYNCPOL);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
++ psPDumpSyncPolIN->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if(psPDumpSyncPolIN->bIsRead)
++ {
++ ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
++ }
++ else
++ {
++ ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete);
++ }
++
++ psRetOUT->eError =
++ PDumpMemPolKM(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM,
++ ui32Offset,
++ psPDumpSyncPolIN->ui32Value,
++ psPDumpSyncPolIN->ui32Mask,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ 0,
++ MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM));
++
++ return 0;
++}
++
++static IMG_INT
++PDumpPDRegBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG *psPDumpPDRegDumpIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_PDREG);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ PDumpPDReg(psPDumpPDRegDumpIN->sHWReg.ui32RegAddr,
++ psPDumpPDRegDumpIN->sHWReg.ui32RegVal,
++ PDUMP_PD_UNIQUETAG);
++
++ psRetOUT->eError = PVRSRV_OK;
++ return 0;
++}
++
++static IMG_INT
++PDumpCycleCountRegReadBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ *psPDumpCycleCountRegReadIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ PDumpCycleCountRegRead(psPDumpCycleCountRegReadIN->ui32RegOffset,
++ psPDumpCycleCountRegReadIN->bLastFrame);
++
++ psRetOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static IMG_INT
++PDumpPDDevPAddrBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR *psPDumpPDDevPAddrIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvMemInfo,
++ psPDumpPDDevPAddrIN->hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PDumpPDDevPAddrKM((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo,
++ psPDumpPDDevPAddrIN->ui32Offset,
++ psPDumpPDDevPAddrIN->sPDDevPAddr,
++ MAKEUNIQUETAG(pvMemInfo),
++ PDUMP_PD_UNIQUETAG);
++ return 0;
++}
++
++static IMG_INT
++PDumpStartInitPhaseBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_STARTINITPHASE);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError = PDumpStartInitPhaseKM();
++
++ return 0;
++}
++
++static IMG_INT
++PDumpStopInitPhaseBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_STOPINITPHASE);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ psRetOUT->eError = PDumpStopInitPhaseKM();
++
++ return 0;
++}
++
++#endif
++
++
++static IMG_INT
++PVRSRVGetMiscInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_MISC_INFO *psGetMiscInfoIN,
++ PVRSRV_BRIDGE_OUT_GET_MISC_INFO *psGetMiscInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_ERROR eError;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_MISC_INFO);
++
++ OSMemCopy(&psGetMiscInfoOUT->sMiscInfo,
++ &psGetMiscInfoIN->sMiscInfo,
++ sizeof(PVRSRV_MISC_INFO));
++
++ if (((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0) &&
++ ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0))
++ {
++
++ psGetMiscInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++ if (((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0) ||
++ ((psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0))
++ {
++
++ ASSIGN_AND_EXIT_ON_ERROR(psGetMiscInfoOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
++ (IMG_VOID **)&psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0,
++ "Output string buffer"));
++
++ psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo);
++
++
++ eError = CopyToUserWrapper(psPerProc, ui32BridgeID,
++ psGetMiscInfoIN->sMiscInfo.pszMemoryStr,
++ psGetMiscInfoOUT->sMiscInfo.pszMemoryStr,
++ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen);
++
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
++ (IMG_VOID *)psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0);
++ psGetMiscInfoOUT->sMiscInfo.pszMemoryStr = IMG_NULL;
++
++
++ psGetMiscInfoOUT->sMiscInfo.pszMemoryStr = psGetMiscInfoIN->sMiscInfo.pszMemoryStr;
++
++ if(eError != PVRSRV_OK)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoBW Error copy to user"));
++ return -EFAULT;
++ }
++ }
++ else
++ {
++ psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo);
++ }
++
++
++ if (psGetMiscInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ if (psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT)
++ {
++ psGetMiscInfoOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
++ &psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.hOSEventKM,
++ psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.hOSEventKM,
++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++ if (psGetMiscInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle)
++ {
++
++ psGetMiscInfoOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
++ &psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle,
++ psGetMiscInfoOUT->sMiscInfo.hSOCTimerRegisterOSMemHandle,
++ PVRSRV_HANDLE_TYPE_SOC_TIMER,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++ if (psGetMiscInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVConnectBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_OUT_CONNECT_SERVICES *psConnectServicesOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CONNECT_SERVICES);
++
++ psConnectServicesOUT->hKernelServices = psPerProc->hPerProcData;
++ psConnectServicesOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVDisconnectBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DISCONNECT_SERVICES);
++
++
++ psRetOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVEnumerateDCBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ENUMCLASS *psEnumDispClassIN,
++ PVRSRV_BRIDGE_OUT_ENUMCLASS *psEnumDispClassOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_CLASS);
++
++ psEnumDispClassOUT->eError =
++ PVRSRVEnumerateDCKM(psEnumDispClassIN->sDeviceClass,
++ &psEnumDispClassOUT->ui32NumDevices,
++ &psEnumDispClassOUT->ui32DevID[0]);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVOpenDCDeviceBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceIN,
++ PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError, psPerProc, 1);
++
++ psOpenDispClassDeviceOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psOpenDispClassDeviceIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psOpenDispClassDeviceOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psOpenDispClassDeviceOUT->eError =
++ PVRSRVOpenDCDeviceKM(psPerProc,
++ psOpenDispClassDeviceIN->ui32DeviceID,
++ hDevCookieInt,
++ &hDispClassInfoInt);
++
++ if(psOpenDispClassDeviceOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psOpenDispClassDeviceOUT->hDeviceKM,
++ hDispClassInfoInt,
++ PVRSRV_HANDLE_TYPE_DISP_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ COMMIT_HANDLE_BATCH_OR_ERROR(psOpenDispClassDeviceOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVCloseDCDeviceBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE *psCloseDispClassDeviceIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfoInt,
++ psCloseDispClassDeviceIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVCloseDCDeviceKM(pvDispClassInfoInt, IMG_FALSE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psCloseDispClassDeviceIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ return 0;
++}
++
++static IMG_INT
++PVRSRVEnumDCFormatsBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsIN,
++ PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS);
++
++ psEnumDispClassFormatsOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfoInt,
++ psEnumDispClassFormatsIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psEnumDispClassFormatsOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psEnumDispClassFormatsOUT->eError =
++ PVRSRVEnumDCFormatsKM(pvDispClassInfoInt,
++ &psEnumDispClassFormatsOUT->ui32Count,
++ psEnumDispClassFormatsOUT->asFormat);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVEnumDCDimsBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsIN,
++ PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS);
++
++ psEnumDispClassDimsOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfoInt,
++ psEnumDispClassDimsIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++ if(psEnumDispClassDimsOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psEnumDispClassDimsOUT->eError =
++ PVRSRVEnumDCDimsKM(pvDispClassInfoInt,
++ &psEnumDispClassDimsIN->sFormat,
++ &psEnumDispClassDimsOUT->ui32Count,
++ psEnumDispClassDimsOUT->asDim);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVGetDCSystemBufferBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferIN,
++ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hBufferInt;
++ IMG_VOID *pvDispClassInfoInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError, psPerProc, 1);
++
++ psGetDispClassSysBufferOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfoInt,
++ psGetDispClassSysBufferIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psGetDispClassSysBufferOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetDispClassSysBufferOUT->eError =
++ PVRSRVGetDCSystemBufferKM(pvDispClassInfoInt,
++ &hBufferInt);
++
++ if(psGetDispClassSysBufferOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psGetDispClassSysBufferOUT->hBuffer,
++ hBufferInt,
++ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++ (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++ psGetDispClassSysBufferIN->hDeviceKM);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassSysBufferOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVGetDCInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO *psGetDispClassInfoIN,
++ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO *psGetDispClassInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_INFO);
++
++ psGetDispClassInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psGetDispClassInfoIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psGetDispClassInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetDispClassInfoOUT->eError =
++ PVRSRVGetDCInfoKM(pvDispClassInfo,
++ &psGetDispClassInfoOUT->sDisplayInfo);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVCreateDCSwapChainBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN *psCreateDispClassSwapChainIN,
++ PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN *psCreateDispClassSwapChainOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_HANDLE hSwapChainInt;
++ IMG_UINT32 ui32SwapChainID;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError, psPerProc, 1);
++
++ psCreateDispClassSwapChainOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psCreateDispClassSwapChainIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++ if(psCreateDispClassSwapChainOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ ui32SwapChainID = psCreateDispClassSwapChainIN->ui32SwapChainID;
++
++ psCreateDispClassSwapChainOUT->eError =
++ PVRSRVCreateDCSwapChainKM(psPerProc, pvDispClassInfo,
++ psCreateDispClassSwapChainIN->ui32Flags,
++ &psCreateDispClassSwapChainIN->sDstSurfAttrib,
++ &psCreateDispClassSwapChainIN->sSrcSurfAttrib,
++ psCreateDispClassSwapChainIN->ui32BufferCount,
++ psCreateDispClassSwapChainIN->ui32OEMFlags,
++ &hSwapChainInt,
++ &ui32SwapChainID);
++
++ if(psCreateDispClassSwapChainOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ psCreateDispClassSwapChainOUT->ui32SwapChainID = ui32SwapChainID;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psCreateDispClassSwapChainOUT->hSwapChain,
++ hSwapChainInt,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++ psCreateDispClassSwapChainIN->hDeviceKM);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psCreateDispClassSwapChainOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVDestroyDCSwapChainBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN *psDestroyDispClassSwapChainIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSwapChain,
++ psDestroyDispClassSwapChainIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVDestroyDCSwapChainKM(pvSwapChain);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psDestroyDispClassSwapChainIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVSetDCDstRectBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassDstRectIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSetDispClassDstRectIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSetDispClassDstRectIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVSetDCDstRectKM(pvDispClassInfo,
++ pvSwapChain,
++ &psSetDispClassDstRectIN->sRect);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVSetDCSrcRectBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassSrcRectIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSetDispClassSrcRectIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSetDispClassSrcRectIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVSetDCSrcRectKM(pvDispClassInfo,
++ pvSwapChain,
++ &psSetDispClassSrcRectIN->sRect);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVSetDCDstColourKeyBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSetDispClassColKeyIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSetDispClassColKeyIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVSetDCDstColourKeyKM(pvDispClassInfo,
++ pvSwapChain,
++ psSetDispClassColKeyIN->ui32CKColour);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVSetDCSrcColourKeyBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSetDispClassColKeyIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSetDispClassColKeyIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVSetDCSrcColourKeyKM(pvDispClassInfo,
++ pvSwapChain,
++ psSetDispClassColKeyIN->ui32CKColour);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVGetDCBuffersBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersIN,
++ PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChain;
++ IMG_UINT32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError, psPerProc, PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
++
++ psGetDispClassBuffersOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psGetDispClassBuffersIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psGetDispClassBuffersOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetDispClassBuffersOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psGetDispClassBuffersIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++ if(psGetDispClassBuffersOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetDispClassBuffersOUT->eError =
++ PVRSRVGetDCBuffersKM(pvDispClassInfo,
++ pvSwapChain,
++ &psGetDispClassBuffersOUT->ui32BufferCount,
++ psGetDispClassBuffersOUT->ahBuffer);
++ if (psGetDispClassBuffersOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ PVR_ASSERT(psGetDispClassBuffersOUT->ui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
++
++ for(i = 0; i < psGetDispClassBuffersOUT->ui32BufferCount; i++)
++ {
++ IMG_HANDLE hBufferExt;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &hBufferExt,
++ psGetDispClassBuffersOUT->ahBuffer[i],
++ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++ (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++ psGetDispClassBuffersIN->hSwapChain);
++
++ psGetDispClassBuffersOUT->ahBuffer[i] = hBufferExt;
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psGetDispClassBuffersOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVSwapToDCBufferBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER *psSwapDispClassBufferIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChainBuf;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSwapDispClassBufferIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupSubHandle(psPerProc->psHandleBase,
++ &pvSwapChainBuf,
++ psSwapDispClassBufferIN->hBuffer,
++ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++ psSwapDispClassBufferIN->hDeviceKM);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVSwapToDCBufferKM(pvDispClassInfo,
++ pvSwapChainBuf,
++ psSwapDispClassBufferIN->ui32SwapInterval,
++ psSwapDispClassBufferIN->hPrivateTag,
++ psSwapDispClassBufferIN->ui32ClipRectCount,
++ psSwapDispClassBufferIN->sClipRect);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVSwapToDCSystemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM *psSwapDispClassSystemIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvDispClassInfo;
++ IMG_VOID *pvSwapChain;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvDispClassInfo,
++ psSwapDispClassSystemIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_DISP_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupSubHandle(psPerProc->psHandleBase,
++ &pvSwapChain,
++ psSwapDispClassSystemIN->hSwapChain,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++ psSwapDispClassSystemIN->hDeviceKM);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ psRetOUT->eError =
++ PVRSRVSwapToDCSystemKM(pvDispClassInfo,
++ pvSwapChain);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVOpenBCDeviceBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceIN,
++ PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hBufClassInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError, psPerProc, 1);
++
++ psOpenBufferClassDeviceOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psOpenBufferClassDeviceIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psOpenBufferClassDeviceOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psOpenBufferClassDeviceOUT->eError =
++ PVRSRVOpenBCDeviceKM(psPerProc,
++ psOpenBufferClassDeviceIN->ui32DeviceID,
++ hDevCookieInt,
++ &hBufClassInfo);
++ if(psOpenBufferClassDeviceOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psOpenBufferClassDeviceOUT->hDeviceKM,
++ hBufClassInfo,
++ PVRSRV_HANDLE_TYPE_BUF_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psOpenBufferClassDeviceOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVCloseBCDeviceBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE *psCloseBufferClassDeviceIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvBufClassInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvBufClassInfo,
++ psCloseBufferClassDeviceIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_BUF_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVCloseBCDeviceKM(pvBufClassInfo, IMG_FALSE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psCloseBufferClassDeviceIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_BUF_INFO);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVGetBCInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO *psGetBufferClassInfoIN,
++ PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO *psGetBufferClassInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvBufClassInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO);
++
++ psGetBufferClassInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvBufClassInfo,
++ psGetBufferClassInfoIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_BUF_INFO);
++ if(psGetBufferClassInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetBufferClassInfoOUT->eError =
++ PVRSRVGetBCInfoKM(pvBufClassInfo,
++ &psGetBufferClassInfoOUT->sBufferInfo);
++ return 0;
++}
++
++static IMG_INT
++PVRSRVGetBCBufferBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferIN,
++ PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_VOID *pvBufClassInfo;
++ IMG_HANDLE hBufferInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError, psPerProc, 1);
++
++ psGetBufferClassBufferOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvBufClassInfo,
++ psGetBufferClassBufferIN->hDeviceKM,
++ PVRSRV_HANDLE_TYPE_BUF_INFO);
++ if(psGetBufferClassBufferOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetBufferClassBufferOUT->eError =
++ PVRSRVGetBCBufferKM(pvBufClassInfo,
++ psGetBufferClassBufferIN->ui32BufferIndex,
++ &hBufferInt);
++
++ if(psGetBufferClassBufferOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psGetBufferClassBufferOUT->hBuffer,
++ hBufferInt,
++ PVRSRV_HANDLE_TYPE_BUF_BUFFER,
++ (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++ psGetBufferClassBufferIN->hDeviceKM);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psGetBufferClassBufferOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVAllocSharedSysMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemIN,
++ PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc, 1);
++
++ psAllocSharedSysMemOUT->eError =
++ PVRSRVAllocSharedSysMemoryKM(psPerProc,
++ psAllocSharedSysMemIN->ui32Flags,
++ psAllocSharedSysMemIN->ui32Size,
++ &psKernelMemInfo);
++ if(psAllocSharedSysMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ OSMemSet(&psAllocSharedSysMemOUT->sClientMemInfo,
++ 0,
++ sizeof(psAllocSharedSysMemOUT->sClientMemInfo));
++
++ psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddrKM =
++ psKernelMemInfo->pvLinAddrKM;
++
++ psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddr = 0;
++ psAllocSharedSysMemOUT->sClientMemInfo.ui32Flags =
++ psKernelMemInfo->ui32Flags;
++ psAllocSharedSysMemOUT->sClientMemInfo.ui32AllocSize =
++ psKernelMemInfo->ui32AllocSize;
++ psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo = psKernelMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psAllocSharedSysMemOUT->sClientMemInfo.hKernelMemInfo,
++ psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psAllocSharedSysMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++static IMG_INT
++PVRSRVFreeSharedSysMemoryBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM *psFreeSharedSysMemIN,
++ PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM *psFreeSharedSysMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM);
++
++ psFreeSharedSysMemOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID **)&psKernelMemInfo,
++ psFreeSharedSysMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++
++ if(psFreeSharedSysMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psFreeSharedSysMemOUT->eError =
++ PVRSRVFreeSharedSysMemoryKM(psKernelMemInfo);
++ if(psFreeSharedSysMemOUT->eError != PVRSRV_OK)
++ return 0;
++
++ psFreeSharedSysMemOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psFreeSharedSysMemIN->psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ return 0;
++}
++
++static IMG_INT
++PVRSRVMapMemInfoMemBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM *psMapMemInfoMemIN,
++ PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM *psMapMemInfoMemOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++ PVRSRV_HANDLE_TYPE eHandleType;
++ IMG_HANDLE hParent;
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_MEMINFO_MEM);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc, 2);
++
++ psMapMemInfoMemOUT->eError =
++ PVRSRVLookupHandleAnyType(psPerProc->psHandleBase,
++ (IMG_VOID **)&psKernelMemInfo,
++ &eHandleType,
++ psMapMemInfoMemIN->hKernelMemInfo);
++ if(psMapMemInfoMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ switch (eHandleType)
++ {
++#if defined(PVR_SECURE_HANDLES)
++ case PVRSRV_HANDLE_TYPE_MEM_INFO:
++ case PVRSRV_HANDLE_TYPE_MEM_INFO_REF:
++ case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO:
++#else
++ case PVRSRV_HANDLE_TYPE_NONE:
++#endif
++ break;
++ default:
++ psMapMemInfoMemOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++
++ psMapMemInfoMemOUT->eError =
++ PVRSRVGetParentHandle(psPerProc->psHandleBase,
++ &hParent,
++ psMapMemInfoMemIN->hKernelMemInfo,
++ eHandleType);
++ if (psMapMemInfoMemOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ if (hParent == IMG_NULL)
++ {
++ hParent = psMapMemInfoMemIN->hKernelMemInfo;
++ }
++
++ OSMemSet(&psMapMemInfoMemOUT->sClientMemInfo,
++ 0,
++ sizeof(psMapMemInfoMemOUT->sClientMemInfo));
++
++ psMapMemInfoMemOUT->sClientMemInfo.pvLinAddrKM =
++ psKernelMemInfo->pvLinAddrKM;
++
++ psMapMemInfoMemOUT->sClientMemInfo.pvLinAddr = 0;
++ psMapMemInfoMemOUT->sClientMemInfo.sDevVAddr =
++ psKernelMemInfo->sDevVAddr;
++ psMapMemInfoMemOUT->sClientMemInfo.ui32Flags =
++ psKernelMemInfo->ui32Flags;
++ psMapMemInfoMemOUT->sClientMemInfo.ui32AllocSize =
++ psKernelMemInfo->ui32AllocSize;
++ psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo = psKernelMemInfo->sMemBlk.hOSMemHandle;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo,
++ psKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ hParent);
++
++ if(psKernelMemInfo->ui32Flags & PVRSRV_MEM_NO_SYNCOBJ)
++ {
++
++ OSMemSet(&psMapMemInfoMemOUT->sClientSyncInfo,
++ 0,
++ sizeof (PVRSRV_CLIENT_SYNC_INFO));
++ psMapMemInfoMemOUT->psKernelSyncInfo = IMG_NULL;
++ }
++ else
++ {
++
++ psMapMemInfoMemOUT->sClientSyncInfo.psSyncData =
++ psKernelMemInfo->psKernelSyncInfo->psSyncData;
++ psMapMemInfoMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++ psKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++ psMapMemInfoMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++ psKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo =
++ psKernelMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->sMemBlk.hOSMemHandle;
++
++ psMapMemInfoMemOUT->sClientMemInfo.psClientSyncInfo = &psMapMemInfoMemOUT->sClientSyncInfo;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psMapMemInfoMemOUT->sClientSyncInfo.hKernelSyncInfo,
++ psKernelMemInfo->psKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo);
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psMapMemInfoMemOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++
++static IMG_INT
++MMU_GetPDDevPAddrBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR *psGetMmuPDDevPAddrIN,
++ PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR *psGetMmuPDDevPAddrOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevMemContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR);
++
++ psGetMmuPDDevPAddrOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++ psGetMmuPDDevPAddrIN->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++ if(psGetMmuPDDevPAddrOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetMmuPDDevPAddrOUT->sPDDevPAddr =
++ BM_GetDeviceNode(hDevMemContextInt)->pfnMMUGetPDDevPAddr(BM_GetMMUContextFromMemContext(hDevMemContextInt));
++ if(psGetMmuPDDevPAddrOUT->sPDDevPAddr.uiAddr)
++ {
++ psGetMmuPDDevPAddrOUT->eError = PVRSRV_OK;
++ }
++ else
++ {
++ psGetMmuPDDevPAddrOUT->eError = PVRSRV_ERROR_GENERIC;
++ }
++ return 0;
++}
++
++
++
++IMG_INT
++DummyBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ IMG_VOID *psBridgeOut,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++#if !defined(DEBUG)
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++#endif
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++ PVR_UNREFERENCED_PARAMETER(psBridgeOut);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++#if defined(DEBUG_BRIDGE_KM)
++ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %lu (%s) mapped to "
++ "Dummy Wrapper (probably not what you want!)",
++ __FUNCTION__, ui32BridgeID, g_BridgeDispatchTable[ui32BridgeID].pszIOCName));
++#else
++ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %lu mapped to "
++ "Dummy Wrapper (probably not what you want!)",
++ __FUNCTION__, ui32BridgeID));
++#endif
++ return -ENOTTY;
++}
++
++
++IMG_VOID
++_SetDispatchTableEntry(IMG_UINT32 ui32Index,
++ const IMG_CHAR *pszIOCName,
++ BridgeWrapperFunction pfFunction,
++ const IMG_CHAR *pszFunctionName)
++{
++ static IMG_UINT32 ui32PrevIndex = ~0UL;
++#if !defined(DEBUG)
++ PVR_UNREFERENCED_PARAMETER(pszIOCName);
++#endif
++#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM)
++ PVR_UNREFERENCED_PARAMETER(pszFunctionName);
++#endif
++
++#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
++
++ PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s", __FUNCTION__, ui32Index, pszIOCName, pszFunctionName));
++#endif
++
++
++ if(g_BridgeDispatchTable[ui32Index].pfFunction)
++ {
++#if defined(DEBUG_BRIDGE_KM)
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry for %s",
++ __FUNCTION__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName));
++#else
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry (index=%lu)",
++ __FUNCTION__, pszIOCName, ui32Index));
++#endif
++ PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.",
++ __FUNCTION__));
++ }
++
++
++ if((ui32PrevIndex != ~0UL) &&
++ ((ui32Index >= ui32PrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) ||
++ (ui32Index <= ui32PrevIndex)))
++ {
++#if defined(DEBUG_BRIDGE_KM)
++ PVR_DPF((PVR_DBG_WARNING,
++ "%s: There is a gap in the dispatch table between indices %lu (%s) and %lu (%s)",
++ __FUNCTION__, ui32PrevIndex, g_BridgeDispatchTable[ui32PrevIndex].pszIOCName,
++ ui32Index, pszIOCName));
++#else
++ PVR_DPF((PVR_DBG_WARNING,
++ "%s: There is a gap in the dispatch table between indices %u and %u (%s)",
++ __FUNCTION__, (IMG_UINT)ui32PrevIndex, (IMG_UINT)ui32Index, pszIOCName));
++#endif
++ PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.",
++ __FUNCTION__));
++ }
++
++ g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction;
++#if defined(DEBUG_BRIDGE_KM)
++ g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName;
++ g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName;
++ g_BridgeDispatchTable[ui32Index].ui32CallCount = 0;
++ g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0;
++#endif
++
++ ui32PrevIndex = ui32Index;
++}
++
++static IMG_INT
++PVRSRVInitSrvConnectBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_CONNECT);
++ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++ if(!OSProcHasPrivSrvInit() || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING) || PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN))
++ {
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++#if defined (__linux__)
++ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_TRUE);
++#endif
++ psPerProc->bInitProcess = IMG_TRUE;
++
++ psRetOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVInitSrvDisconnectBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT *psInitSrvDisconnectIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_DISCONNECT);
++
++ if(!psPerProc->bInitProcess)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psPerProc->bInitProcess = IMG_FALSE;
++
++ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RUNNING, IMG_FALSE);
++ PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_RAN, IMG_TRUE);
++
++ psRetOUT->eError = PVRSRVFinaliseSystem(psInitSrvDisconnectIN->bInitSuccesful);
++
++ PVRSRVSetInitServerState( PVRSRV_INIT_SERVER_SUCCESSFUL,
++ (((psRetOUT->eError == PVRSRV_OK) && (psInitSrvDisconnectIN->bInitSuccesful)))
++ ? IMG_TRUE : IMG_FALSE);
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVEventObjectWaitBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT *psEventObjectWaitIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hOSEventKM;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_WAIT);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hOSEventKM,
++ psEventObjectWaitIN->hOSEventKM,
++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = OSEventObjectWait(hOSEventKM);
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVEventObjectOpenBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN *psEventObjectOpenIN,
++ PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN *psEventObjectOpenOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_OPEN);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc, 1);
++
++ psEventObjectOpenOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psEventObjectOpenIN->sEventObject.hOSEventKM,
++ psEventObjectOpenIN->sEventObject.hOSEventKM,
++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
++
++ if(psEventObjectOpenOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psEventObjectOpenOUT->eError = OSEventObjectOpen(&psEventObjectOpenIN->sEventObject, &psEventObjectOpenOUT->hOSEvent);
++
++ if(psEventObjectOpenOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psEventObjectOpenOUT->hOSEvent,
++ psEventObjectOpenOUT->hOSEvent,
++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psEventObjectOpenOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVEventObjectCloseBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE *psEventObjectCloseIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hOSEventKM;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psEventObjectCloseIN->sEventObject.hOSEventKM,
++ psEventObjectCloseIN->sEventObject.hOSEventKM,
++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &hOSEventKM,
++ psEventObjectCloseIN->hOSEventKM,
++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = OSEventObjectClose(&psEventObjectCloseIN->sEventObject, hOSEventKM);
++
++ return 0;
++}
++
++
++typedef struct _MODIFY_SYNC_OP_INFO
++{
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ IMG_UINT32 ui32ModifyFlags;
++ IMG_UINT32 ui32ReadOpsPendingSnapShot;
++ IMG_UINT32 ui32WriteOpsPendingSnapShot;
++} MODIFY_SYNC_OP_INFO;
++
++
++static PVRSRV_ERROR ModifyCompleteSyncOpsCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ if (!pvParam)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ModifyCompleteSyncOpsCallBack: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psModSyncOpInfo = (MODIFY_SYNC_OP_INFO*)pvParam;
++ psKernelSyncInfo = psModSyncOpInfo->psKernelSyncInfo;
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if((psModSyncOpInfo->ui32WriteOpsPendingSnapShot == psKernelSyncInfo->psSyncData->ui32WriteOpsComplete)
++ && (psModSyncOpInfo->ui32ReadOpsPendingSnapShot == psKernelSyncInfo->psSyncData->ui32ReadOpsComplete))
++ {
++ goto OpFlushedComplete;
++ }
++ PVR_DPF((PVR_DBG_ERROR, "ModifyCompleteSyncOpsCallBack: waiting for old Ops to flush"));
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++ PVR_DPF((PVR_DBG_ERROR, "ModifyCompleteSyncOpsCallBack: waiting for old Ops to flush timed out"));
++
++ return PVRSRV_ERROR_TIMEOUT;
++
++OpFlushedComplete:
++
++
++ if(psModSyncOpInfo->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC)
++ {
++ psKernelSyncInfo->psSyncData->ui32WriteOpsComplete++;
++ }
++
++
++ if(psModSyncOpInfo->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC)
++ {
++ psKernelSyncInfo->psSyncData->ui32ReadOpsComplete++;
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MODIFY_SYNC_OP_INFO), (IMG_VOID *)psModSyncOpInfo, 0);
++
++
++
++ PVRSRVCommandCompleteCallbacks();
++
++ return PVRSRV_OK;
++}
++
++
++static IMG_INT
++PVRSRVModifyPendingSyncOpsBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MODIFY_PENDING_SYNC_OPS *psModifySyncOpsIN,
++ PVRSRV_BRIDGE_OUT_MODIFY_PENDING_SYNC_OPS *psModifySyncOpsOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hKernelSyncInfo;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ MODIFY_SYNC_OP_INFO *psModSyncOpInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS);
++
++ psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hKernelSyncInfo,
++ psModifySyncOpsIN->hKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if (psModifySyncOpsOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyPendingSyncOpsBW: PVRSRVLookupHandle failed"));
++ return 0;
++ }
++
++ psKernelSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)hKernelSyncInfo;
++
++ if(psKernelSyncInfo->hResItem != IMG_NULL)
++ {
++
++ psModifySyncOpsOUT->eError = PVRSRV_ERROR_RETRY;
++ return 0;
++ }
++
++ ASSIGN_AND_EXIT_ON_ERROR(psModifySyncOpsOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(MODIFY_SYNC_OP_INFO),
++ (IMG_VOID **)&psModSyncOpInfo, 0,
++ "ModSyncOpInfo (MODIFY_SYNC_OP_INFO)"));
++
++
++ psModSyncOpInfo->psKernelSyncInfo = psKernelSyncInfo;
++ psModSyncOpInfo->ui32ModifyFlags = psModifySyncOpsIN->ui32ModifyFlags;
++ psModSyncOpInfo->ui32ReadOpsPendingSnapShot = psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
++ psModSyncOpInfo->ui32WriteOpsPendingSnapShot = psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
++
++
++
++ psModifySyncOpsOUT->ui32ReadOpsPending = psKernelSyncInfo->psSyncData->ui32ReadOpsPending;
++ psModifySyncOpsOUT->ui32WriteOpsPending = psKernelSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ if(psModifySyncOpsIN->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_WO_INC)
++ {
++ psKernelSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++
++ if(psModifySyncOpsIN->ui32ModifyFlags & PVRSRV_MODIFYSYNCOPS_FLAGS_RO_INC)
++ {
++ psKernelSyncInfo->psSyncData->ui32ReadOpsPending++;
++ }
++
++ psKernelSyncInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_MODIFY_SYNC_OPS,
++ psModSyncOpInfo,
++ 0,
++ ModifyCompleteSyncOpsCallBack);
++ return 0;
++}
++
++
++static IMG_INT
++PVRSRVModifyCompleteSyncOpsBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_MODIFY_COMPLETE_SYNC_OPS *psModifySyncOpsIN,
++ PVRSRV_BRIDGE_RETURN *psModifySyncOpsOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS);
++
++ psModifySyncOpsOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID**)&psKernelSyncInfo,
++ psModifySyncOpsIN->hKernelSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if (psModifySyncOpsOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyCompleteSyncOpsBW: PVRSRVLookupHandle failed"));
++ return 0;
++ }
++
++ if(psKernelSyncInfo->hResItem == IMG_NULL)
++ {
++
++ psModifySyncOpsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++
++
++
++
++
++
++
++
++
++ eError = ResManFreeResByPtr(psKernelSyncInfo->hResItem);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVModifyCompleteSyncOpsBW: ResManFreeResByPtr failed"));
++ return 0;
++ }
++
++ psKernelSyncInfo->hResItem = IMG_NULL;
++
++ return 0;
++}
++
++
++PVRSRV_ERROR
++CommonBridgeInit(IMG_VOID)
++{
++ IMG_UINT32 i;
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DEVICES, PVRSRVEnumerateDevicesBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO, PVRSRVAcquireDeviceDataBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_DEVICEINFO, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT, PVRSRVCreateDeviceMemContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT, PVRSRVDestroyDeviceMemContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVMEM_HEAPINFO, PVRSRVGetDeviceMemHeapInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_DEVICEMEM, PVRSRVAllocDeviceMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEVICEMEM, PVRSRVFreeDeviceMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GETFREE_DEVICEMEM, PVRSRVGetFreeDeviceMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_COMMANDQUEUE, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MHANDLE_TO_MMAP_DATA, PVRMMapOSMemHandleToMMapDataBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CONNECT_SERVICES, PVRSRVConnectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DISCONNECT_SERVICES, PVRSRVDisconnectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_DEVICE_MEM, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVICEMEMINFO, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM , DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEV_VIRTMEM, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_EXT_MEMORY, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_EXT_MEMORY, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEV_MEMORY, PVRSRVMapDeviceMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEV_MEMORY, PVRSRVUnmapDeviceMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY, PVRSRVMapDeviceClassMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY, PVRSRVUnmapDeviceClassMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_EXPORT_DEVICEMEM, PVRSRVExportDeviceMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MMAP_DATA, PVRMMapReleaseMMapDataBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_REGISTER_SIM_PROCESS, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS, DummyBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP, DummyBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_FB_STATS, DummyBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_MISC_INFO, PVRSRVGetMiscInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_MISC_INFO, DummyBW);
++
++
++#if defined (SUPPORT_OVERLAY_ROTATE_BLIT)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_INIT_3D_OVL_BLT_RES, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DEINIT_3D_OVL_BLT_RES, DummyBW);
++#endif
++
++
++
++#if defined(PDUMP)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_INIT, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_MEMPOL, PDumpMemPolBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPMEM, PDumpMemBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REG, PDumpRegWithFlagsBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REGPOL, PDumpRegPolBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_COMMENT, PDumpCommentBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SETFRAME, PDumpSetFrameBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_ISCAPTURING, PDumpIsCaptureFrameBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPBITMAP, PDumpBitmapBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPREADREG, PDumpReadRegBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SYNCPOL, PDumpSyncPolBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPSYNC, PDumpSyncDumpBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DRIVERINFO, PDumpDriverInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_PDREG, PDumpPDRegBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR, PDumpPDDevPAddrBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ, PDumpCycleCountRegReadBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_STARTINITPHASE, PDumpStartInitPhaseBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_STOPINITPHASE, PDumpStopInitPhaseBW);
++#endif
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_OEMJTABLE, DummyBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_CLASS, PVRSRVEnumerateDCBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE, PVRSRVOpenDCDeviceBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE, PVRSRVCloseDCDeviceBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS, PVRSRVEnumDCFormatsBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS, PVRSRVEnumDCDimsBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER, PVRSRVGetDCSystemBufferBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_INFO, PVRSRVGetDCInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN, PVRSRVCreateDCSwapChainBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN, PVRSRVDestroyDCSwapChainBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT, PVRSRVSetDCDstRectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT, PVRSRVSetDCSrcRectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY, PVRSRVSetDCDstColourKeyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY, PVRSRVSetDCSrcColourKeyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS, PVRSRVGetDCBuffersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER, PVRSRVSwapToDCBufferBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM, PVRSRVSwapToDCSystemBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE, PVRSRVOpenBCDeviceBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE, PVRSRVCloseBCDeviceBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO, PVRSRVGetBCInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER, PVRSRVGetBCBufferBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_EXT_MEMORY, PVRSRVWrapExtMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY, PVRSRVUnwrapExtMemoryBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM, PVRSRVAllocSharedSysMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM, PVRSRVFreeSharedSysMemoryBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEMINFO_MEM, PVRSRVMapMemInfoMemBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR, MMU_GetPDDevPAddrBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_CONNECT, PVRSRVInitSrvConnectBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_DISCONNECT, PVRSRVInitSrvDisconnectBW);
++
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_WAIT, PVRSRVEventObjectWaitBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_OPEN, PVRSRVEventObjectOpenBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE, PVRSRVEventObjectCloseBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MODIFY_PENDING_SYNC_OPS, PVRSRVModifyPendingSyncOpsBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_MODIFY_COMPLETE_SYNC_OPS, PVRSRVModifyCompleteSyncOpsBW);
++
++#if defined (SUPPORT_SGX)
++ SetSGXDispatchTableEntry();
++#endif
++#if defined (SUPPORT_VGX)
++ SetVGXDispatchTableEntry();
++#endif
++#if defined (SUPPORT_MSVDX)
++ SetMSVDXDispatchTableEntry();
++#endif
++
++
++
++
++ for(i=0;i<BRIDGE_DISPATCH_TABLE_ENTRY_COUNT;i++)
++ {
++ if(!g_BridgeDispatchTable[i].pfFunction)
++ {
++ g_BridgeDispatchTable[i].pfFunction = DummyBW;
++#if defined(DEBUG_BRIDGE_KM)
++ g_BridgeDispatchTable[i].pszIOCName = "_PVRSRV_BRIDGE_DUMMY";
++ g_BridgeDispatchTable[i].pszFunctionName = "DummyBW";
++ g_BridgeDispatchTable[i].ui32CallCount = 0;
++ g_BridgeDispatchTable[i].ui32CopyFromUserTotalBytes = 0;
++ g_BridgeDispatchTable[i].ui32CopyToUserTotalBytes = 0;
++#endif
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_INT BridgedDispatchKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM)
++{
++
++ IMG_VOID * psBridgeIn;
++ IMG_VOID * psBridgeOut;
++ BridgeWrapperFunction pfBridgeHandler;
++ IMG_UINT32 ui32BridgeID = psBridgePackageKM->ui32BridgeID;
++ IMG_INT err = -EFAULT;
++
++#if defined(DEBUG_TRACE_BRIDGE_KM)
++ PVR_DPF((PVR_DBG_ERROR, "%s: %s",
++ __FUNCTION__,
++ g_BridgeDispatchTable[ui32BridgeID].pszIOCName));
++#endif
++
++#if defined(DEBUG_BRIDGE_KM)
++ g_BridgeDispatchTable[ui32BridgeID].ui32CallCount++;
++ g_BridgeGlobalStats.ui32IOCTLCount++;
++#endif
++
++ if(!psPerProc->bInitProcess)
++ {
++ if(PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RAN))
++ {
++ if(!PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation failed. Driver unusable.",
++ __FUNCTION__));
++ goto return_fault;
++ }
++ }
++ else
++ {
++ if(PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_RUNNING))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation is in progress",
++ __FUNCTION__));
++ goto return_fault;
++ }
++ else
++ {
++
++ switch(ui32BridgeID)
++ {
++ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_CONNECT_SERVICES):
++ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_DISCONNECT_SERVICES):
++ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_CONNECT):
++ case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_DISCONNECT):
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: Driver initialisation not completed yet.",
++ __FUNCTION__));
++ goto return_fault;
++ }
++ }
++ }
++ }
++
++
++
++#if defined(__linux__)
++ {
++
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++
++ psBridgeIn = ((ENV_DATA *)psSysData->pvEnvSpecificData)->pvBridgeData;
++ psBridgeOut = (IMG_PVOID)((IMG_PBYTE)psBridgeIn + PVRSRV_MAX_BRIDGE_IN_SIZE);
++
++ if(psBridgePackageKM->ui32InBufferSize > 0)
++ {
++ if(!OSAccessOK(PVR_VERIFY_READ,
++ psBridgePackageKM->pvParamIn,
++ psBridgePackageKM->ui32InBufferSize))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid pvParamIn pointer", __FUNCTION__));
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ psBridgeIn,
++ psBridgePackageKM->pvParamIn,
++ psBridgePackageKM->ui32InBufferSize)
++ != PVRSRV_OK)
++ {
++ goto return_fault;
++ }
++ }
++ }
++#else
++ psBridgeIn = psBridgePackageKM->pvParamIn;
++ psBridgeOut = psBridgePackageKM->pvParamOut;
++#endif
++
++ if(ui32BridgeID >= (BRIDGE_DISPATCH_TABLE_ENTRY_COUNT))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: ui32BridgeID = %d is out if range!",
++ __FUNCTION__, ui32BridgeID));
++ goto return_fault;
++ }
++ pfBridgeHandler =
++ (BridgeWrapperFunction)g_BridgeDispatchTable[ui32BridgeID].pfFunction;
++ err = pfBridgeHandler(ui32BridgeID,
++ psBridgeIn,
++ psBridgeOut,
++ psPerProc);
++ if(err < 0)
++ {
++ goto return_fault;
++ }
++
++
++#if defined(__linux__)
++
++ if(CopyToUserWrapper(psPerProc,
++ ui32BridgeID,
++ psBridgePackageKM->pvParamOut,
++ psBridgeOut,
++ psBridgePackageKM->ui32OutBufferSize)
++ != PVRSRV_OK)
++ {
++ goto return_fault;
++ }
++#endif
++
++ err = 0;
++return_fault:
++ ReleaseHandleBatch(psPerProc);
++ return err;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_pvr_bridge.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_pvr_bridge.h
+new file mode 100644
+index 0000000..95a6377
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_pvr_bridge.h
+@@ -0,0 +1,231 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __BRIDGED_PVR_BRIDGE_H__
++#define __BRIDGED_PVR_BRIDGE_H__
++
++#include "pvr_bridge.h"
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++#if defined(__linux__)
++#define PVRSRV_GET_BRIDGE_ID(X) _IOC_NR(X)
++#else
++#define PVRSRV_GET_BRIDGE_ID(X) (X - PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST))
++#endif
++
++#ifndef ENOMEM
++#define ENOMEM 12
++#endif
++#ifndef EFAULT
++#define EFAULT 14
++#endif
++#ifndef ENOTTY
++#define ENOTTY 25
++#endif
++
++#if defined(DEBUG_BRIDGE_KM)
++PVRSRV_ERROR
++CopyFromUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData,
++ IMG_UINT32 ui32BridgeID,
++ IMG_VOID *pvDest,
++ IMG_VOID *pvSrc,
++ IMG_UINT32 ui32Size);
++PVRSRV_ERROR
++CopyToUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData,
++ IMG_UINT32 ui32BridgeID,
++ IMG_VOID *pvDest,
++ IMG_VOID *pvSrc,
++ IMG_UINT32 ui32Size);
++#else
++#define CopyFromUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \
++ OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size)
++#define CopyToUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \
++ OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size)
++#endif
++
++
++#define ASSIGN_AND_RETURN_ON_ERROR(error, src, res) \
++ do \
++ { \
++ (error) = (src); \
++ if ((error) != PVRSRV_OK) \
++ { \
++ return (res); \
++ } \
++ } while (error != PVRSRV_OK)
++
++#define ASSIGN_AND_EXIT_ON_ERROR(error, src) \
++ ASSIGN_AND_RETURN_ON_ERROR(error, src, 0)
++
++#if defined (PVR_SECURE_HANDLES)
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(NewHandleBatch)
++#endif
++static INLINE PVRSRV_ERROR
++NewHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32BatchSize)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(!psPerProc->bHandlesBatched);
++
++ eError = PVRSRVNewHandleBatch(psPerProc->psHandleBase, ui32BatchSize);
++
++ if (eError == PVRSRV_OK)
++ {
++ psPerProc->bHandlesBatched = IMG_TRUE;
++ }
++
++ return eError;
++}
++
++#define NEW_HANDLE_BATCH_OR_ERROR(error, psPerProc, ui32BatchSize) \
++ ASSIGN_AND_EXIT_ON_ERROR(error, NewHandleBatch(psPerProc, ui32BatchSize))
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(CommitHandleBatch)
++#endif
++static INLINE PVRSRV_ERROR
++CommitHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVR_ASSERT(psPerProc->bHandlesBatched);
++
++ psPerProc->bHandlesBatched = IMG_FALSE;
++
++ return PVRSRVCommitHandleBatch(psPerProc->psHandleBase);
++}
++
++
++#define COMMIT_HANDLE_BATCH_OR_ERROR(error, psPerProc) \
++ ASSIGN_AND_EXIT_ON_ERROR(error, CommitHandleBatch(psPerProc))
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(ReleaseHandleBatch)
++#endif
++static INLINE IMG_VOID
++ReleaseHandleBatch(PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ if (psPerProc->bHandlesBatched)
++ {
++ psPerProc->bHandlesBatched = IMG_FALSE;
++
++ PVRSRVReleaseHandleBatch(psPerProc->psHandleBase);
++ }
++}
++#else
++#define NEW_HANDLE_BATCH_OR_ERROR(error, psPerProc, ui32BatchSize)
++#define COMMIT_HANDLE_BATCH_OR_ERROR(error, psPerProc)
++#define ReleaseHandleBatch(psPerProc)
++#endif
++
++IMG_INT
++DummyBW(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ IMG_VOID *psBridgeOut,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++typedef IMG_INT (*BridgeWrapperFunction)(IMG_UINT32 ui32BridgeID,
++ IMG_VOID *psBridgeIn,
++ IMG_VOID *psBridgeOut,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY
++{
++ BridgeWrapperFunction pfFunction;
++#if defined(DEBUG_BRIDGE_KM)
++ const IMG_CHAR *pszIOCName;
++ const IMG_CHAR *pszFunctionName;
++ IMG_UINT32 ui32CallCount;
++ IMG_UINT32 ui32CopyFromUserTotalBytes;
++ IMG_UINT32 ui32CopyToUserTotalBytes;
++#endif
++}PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY;
++
++#if defined(SUPPORT_VGX) || defined(SUPPORT_MSVDX)
++ #if defined(SUPPORT_VGX)
++ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_VGX_CMD+1)
++ #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_VGX_CMD
++ #else
++ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_MSVDX_CMD+1)
++ #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_MSVDX_CMD
++ #endif
++#else
++ #if defined(SUPPORT_SGX)
++ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_SGX_CMD+1)
++ #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_SGX_CMD
++ #else
++ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD+1)
++ #define PVRSRV_BRIDGE_LAST_DEVICE_CMD PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD
++ #endif
++#endif
++
++extern PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
++
++IMG_VOID
++_SetDispatchTableEntry(IMG_UINT32 ui32Index,
++ const IMG_CHAR *pszIOCName,
++ BridgeWrapperFunction pfFunction,
++ const IMG_CHAR *pszFunctionName);
++
++
++#define SetDispatchTableEntry(ui32Index, pfFunction) \
++ _SetDispatchTableEntry(PVRSRV_GET_BRIDGE_ID(ui32Index), #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction)
++
++#define DISPATCH_TABLE_GAP_THRESHOLD 5
++
++#if defined(DEBUG)
++#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_ASSERT(X == PVRSRV_GET_BRIDGE_ID(Y))
++#else
++#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_UNREFERENCED_PARAMETER(X)
++#endif
++
++
++#if defined(DEBUG_BRIDGE_KM)
++typedef struct _PVRSRV_BRIDGE_GLOBAL_STATS
++{
++ IMG_UINT32 ui32IOCTLCount;
++ IMG_UINT32 ui32TotalCopyFromUserBytes;
++ IMG_UINT32 ui32TotalCopyToUserBytes;
++}PVRSRV_BRIDGE_GLOBAL_STATS;
++
++extern PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
++#endif
++
++
++PVRSRV_ERROR CommonBridgeInit(IMG_VOID);
++
++IMG_INT BridgedDispatchKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_support.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_support.c
+new file mode 100644
+index 0000000..adc9610
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_support.c
+@@ -0,0 +1,85 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "img_defs.h"
++#include "servicesint.h"
++#include "bridged_support.h"
++
++
++PVRSRV_ERROR
++PVRSRVLookupOSMemHandle(PVRSRV_HANDLE_BASE *psHandleBase, IMG_HANDLE *phOSMemHandle, IMG_HANDLE hMHandle)
++{
++ IMG_HANDLE hMHandleInt;
++ PVRSRV_HANDLE_TYPE eHandleType;
++ PVRSRV_ERROR eError;
++
++
++ eError = PVRSRVLookupHandleAnyType(psHandleBase, &hMHandleInt,
++ &eHandleType,
++ hMHandle);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ switch(eHandleType)
++ {
++#if defined(PVR_SECURE_HANDLES)
++ case PVRSRV_HANDLE_TYPE_MEM_INFO:
++ case PVRSRV_HANDLE_TYPE_MEM_INFO_REF:
++ case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO:
++ {
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)hMHandleInt;
++
++ *phOSMemHandle = psMemInfo->sMemBlk.hOSMemHandle;
++
++ break;
++ }
++ case PVRSRV_HANDLE_TYPE_SYNC_INFO:
++ {
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)hMHandleInt;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = psSyncInfo->psSyncDataMemInfoKM;
++
++ *phOSMemHandle = psMemInfo->sMemBlk.hOSMemHandle;
++
++ break;
++ }
++ case PVRSRV_HANDLE_TYPE_SOC_TIMER:
++ {
++ *phOSMemHandle = (IMG_VOID *)hMHandleInt;
++ break;
++ }
++#else
++ case PVRSRV_HANDLE_TYPE_NONE:
++ *phOSMemHandle = (IMG_VOID *)hMHandleInt;
++ break;
++#endif
++ default:
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++
++ return PVRSRV_OK;
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_support.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_support.h
+new file mode 100644
+index 0000000..9785d37
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/bridged_support.h
+@@ -0,0 +1,43 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __BRIDGED_SUPPORT_H__
++#define __BRIDGED_SUPPORT_H__
++
++#include "handle.h"
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++PVRSRV_ERROR PVRSRVLookupOSMemHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phOSMemHandle, IMG_HANDLE hMHandle);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/sgx/bridged_sgx_bridge.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/sgx/bridged_sgx_bridge.c
+new file mode 100644
+index 0000000..be7e23d
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/sgx/bridged_sgx_bridge.c
+@@ -0,0 +1,2511 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++
++#include <stddef.h>
++
++#include "img_defs.h"
++
++#if defined(SUPPORT_SGX)
++
++#include "services.h"
++#include "pvr_debug.h"
++#include "pvr_bridge.h"
++#include "sgx_bridge.h"
++#include "perproc.h"
++#include "power.h"
++#include "pvr_bridge_km.h"
++#include "sgx_bridge_km.h"
++
++#if defined(SUPPORT_MSVDX)
++ #include "msvdx_bridge.h"
++#endif
++
++#include "bridged_pvr_bridge.h"
++#include "bridged_sgx_bridge.h"
++#include "sgxutils.h"
++#include "pdump_km.h"
++
++static IMG_INT
++SGXGetClientInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GETCLIENTINFO *psGetClientInfoIN,
++ PVRSRV_BRIDGE_OUT_GETCLIENTINFO *psGetClientInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETCLIENTINFO);
++
++ psGetClientInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psGetClientInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psGetClientInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psGetClientInfoOUT->eError =
++ SGXGetClientInfoKM(hDevCookieInt,
++ &psGetClientInfoOUT->sClientInfo);
++ return 0;
++}
++
++static IMG_INT
++SGXReleaseClientInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_RELEASECLIENTINFO *psReleaseClientInfoIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psReleaseClientInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++ PVR_ASSERT(psDevInfo->ui32ClientRefCount > 0);
++
++ psDevInfo->ui32ClientRefCount--;
++
++ psRetOUT->eError = PVRSRV_OK;
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXGetInternalDevInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO *psSGXGetInternalDevInfoIN,
++ PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO *psSGXGetInternalDevInfoOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO);
++
++ psSGXGetInternalDevInfoOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psSGXGetInternalDevInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psSGXGetInternalDevInfoOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psSGXGetInternalDevInfoOUT->eError =
++ SGXGetInternalDevInfoKM(hDevCookieInt,
++ &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo);
++
++
++ psSGXGetInternalDevInfoOUT->eError =
++ PVRSRVAllocHandle(psPerProc->psHandleBase,
++ &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo.hHostCtlKernelMemInfoHandle,
++ psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo.hHostCtlKernelMemInfoHandle,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXDoKickBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_DOKICK *psDoKickIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_UINT32 i;
++ IMG_INT ret = 0;
++ IMG_UINT32 ui32NumDstSyncs;
++ IMG_HANDLE *phKernelSyncInfoHandles = IMG_NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DOKICK);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psDoKickIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.hCCBKernelMemInfo,
++ psDoKickIN->sCCBKick.hCCBKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if(psDoKickIN->sCCBKick.hTA3DSyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.hTA3DSyncInfo,
++ psDoKickIN->sCCBKick.hTA3DSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if(psDoKickIN->sCCBKick.hTASyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.hTASyncInfo,
++ psDoKickIN->sCCBKick.hTASyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if(psDoKickIN->sCCBKick.h3DSyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.h3DSyncInfo,
++ psDoKickIN->sCCBKick.h3DSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++
++ if (psDoKickIN->sCCBKick.ui32NumTASrcSyncs > SGX_MAX_TA_SRC_SYNCS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++ for(i=0; i<psDoKickIN->sCCBKick.ui32NumTASrcSyncs; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ahTASrcKernelSyncInfo[i],
++ psDoKickIN->sCCBKick.ahTASrcKernelSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psDoKickIN->sCCBKick.ui32NumTADstSyncs > SGX_MAX_TA_DST_SYNCS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++ for(i=0; i<psDoKickIN->sCCBKick.ui32NumTADstSyncs; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ahTADstKernelSyncInfo[i],
++ psDoKickIN->sCCBKick.ahTADstKernelSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psDoKickIN->sCCBKick.ui32Num3DSrcSyncs > SGX_MAX_3D_SRC_SYNCS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++
++ for(i=0; i<psDoKickIN->sCCBKick.ui32Num3DSrcSyncs; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ah3DSrcKernelSyncInfo[i],
++ psDoKickIN->sCCBKick.ah3DSrcKernelSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++#else
++
++ if (psDoKickIN->sCCBKick.ui32NumSrcSyncs > SGX_MAX_SRC_SYNCS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for(i=0; i<psDoKickIN->sCCBKick.ui32NumSrcSyncs; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ahSrcKernelSyncInfo[i],
++ psDoKickIN->sCCBKick.ahSrcKernelSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++#endif
++
++ if (psDoKickIN->sCCBKick.ui32NumTAStatusVals > SGX_MAX_TA_STATUS_VALS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psDoKickIN->sCCBKick.ui32NumTAStatusVals; i++)
++ {
++ psRetOUT->eError =
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.asTAStatusUpdate[i].hKernelMemInfo,
++ psDoKickIN->sCCBKick.asTAStatusUpdate[i].hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++#else
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ahTAStatusSyncInfo[i],
++ psDoKickIN->sCCBKick.ahTAStatusSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++#endif
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psDoKickIN->sCCBKick.ui32Num3DStatusVals > SGX_MAX_3D_STATUS_VALS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for(i = 0; i < psDoKickIN->sCCBKick.ui32Num3DStatusVals; i++)
++ {
++ psRetOUT->eError =
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.as3DStatusUpdate[i].hKernelMemInfo,
++ psDoKickIN->sCCBKick.as3DStatusUpdate[i].hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++#else
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.ah3DStatusSyncInfo[i],
++ psDoKickIN->sCCBKick.ah3DStatusSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++#endif
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ ui32NumDstSyncs = psDoKickIN->sCCBKick.ui32NumDstSyncObjects;
++
++ if(ui32NumDstSyncs > 0)
++ {
++ if(!OSAccessOK(PVR_VERIFY_READ,
++ psDoKickIN->sCCBKick.pahDstSyncHandles,
++ ui32NumDstSyncs * sizeof(IMG_HANDLE)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: SGXDoKickBW:"
++ " Invalid pasDstSyncHandles pointer", __FUNCTION__));
++ return -EFAULT;
++ }
++
++ psRetOUT->eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32NumDstSyncs * sizeof(IMG_HANDLE),
++ (IMG_VOID **)&phKernelSyncInfoHandles,
++ 0,
++ "Array of Synchronization Info Handles");
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ phKernelSyncInfoHandles,
++ psDoKickIN->sCCBKick.pahDstSyncHandles,
++ ui32NumDstSyncs * sizeof(IMG_HANDLE)) != PVRSRV_OK)
++ {
++ ret = -EFAULT;
++ goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT;
++ }
++
++
++ psDoKickIN->sCCBKick.pahDstSyncHandles = phKernelSyncInfoHandles;
++
++ for( i = 0; i < ui32NumDstSyncs; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.pahDstSyncHandles[i],
++ psDoKickIN->sCCBKick.pahDstSyncHandles[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT;
++ }
++
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psDoKickIN->sCCBKick.hKernelHWSyncListMemInfo,
++ psDoKickIN->sCCBKick.hKernelHWSyncListMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT;
++ }
++ }
++
++ psRetOUT->eError =
++ SGXDoKickKM(hDevCookieInt,
++ &psDoKickIN->sCCBKick);
++
++PVRSRV_BRIDGE_SGX_DOKICK_RETURN_RESULT:
++
++ if(phKernelSyncInfoHandles)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32NumDstSyncs * sizeof(IMG_HANDLE),
++ (IMG_VOID *)phKernelSyncInfoHandles,
++ 0);
++
++ }
++
++ return ret;
++}
++
++
++static IMG_INT
++SGXScheduleProcessQueuesBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_SCHEDULE_PROCESS_QUEUES *psScheduleProcQIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psScheduleProcQIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = SGXScheduleProcessQueuesKM(hDevCookieInt);
++
++ return 0;
++}
++
++
++#if defined(TRANSFER_QUEUE)
++static IMG_INT
++SGXSubmitTransferBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SUBMITTRANSFER *psSubmitTransferIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_TRANSFER_SGX_KICK *psKick;
++ IMG_UINT32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SUBMITTRANSFER);
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++
++ psKick = &psSubmitTransferIN->sKick;
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSubmitTransferIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hCCBMemInfo,
++ psKick->hCCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hTASyncInfo,
++ psKick->hTASyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->h3DSyncInfo,
++ psKick->h3DSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->ui32NumSrcSync > SGX_MAX_TRANSFER_SYNC_OPS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->ahSrcSyncInfo[i],
++ psKick->ahSrcSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->ui32NumDstSync > SGX_MAX_TRANSFER_SYNC_OPS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psKick->ui32NumDstSync; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->ahDstSyncInfo[i],
++ psKick->ahDstSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ psRetOUT->eError = SGXSubmitTransferKM(hDevCookieInt, psKick);
++
++ return 0;
++}
++
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++static IMG_INT
++SGXSubmit2DBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SUBMIT2D *psSubmit2DIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_2D_SGX_KICK *psKick;
++ IMG_UINT32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SUBMIT2D);
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSubmit2DIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psKick = &psSubmit2DIN->sKick;
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hCCBMemInfo,
++ psKick->hCCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hTASyncInfo,
++ psKick->hTASyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->h3DSyncInfo,
++ psKick->h3DSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->ui32NumSrcSync > SGX_MAX_2D_SRC_SYNC_OPS)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++ return 0;
++ }
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->ahSrcSyncInfo[i],
++ psKick->ahSrcSyncInfo[i],
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &psKick->hDstSyncInfo,
++ psKick->hDstSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++ psRetOUT->eError =
++ SGXSubmit2DKM(hDevCookieInt, psKick);
++
++ return 0;
++}
++#endif
++#endif
++
++
++static IMG_INT
++SGXGetMiscInfoBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXGETMISCINFO *psSGXGetMiscInfoIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hDevMemContextInt = 0;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ SGX_MISC_INFO sMiscInfo;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++ PVRSRV_BRIDGE_SGX_GETMISCINFO);
++
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXGetMiscInfoIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++
++ if (psSGXGetMiscInfoIN->psMiscInfo->eRequest == SGX_MISC_INFO_REQUEST_MEMREAD)
++ {
++ psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevMemContextInt,
++ psSGXGetMiscInfoIN->psMiscInfo->hDevMemContext,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++#endif
++
++ psDeviceNode = hDevCookieInt;
++ PVR_ASSERT(psDeviceNode != IMG_NULL);
++ if (psDeviceNode == IMG_NULL)
++ {
++ return -EFAULT;
++ }
++
++ psDevInfo = psDeviceNode->pvDevice;
++
++
++ psRetOUT->eError = CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ &sMiscInfo,
++ psSGXGetMiscInfoIN->psMiscInfo,
++ sizeof(SGX_MISC_INFO));
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ return -EFAULT;
++ }
++
++#ifdef SUPPORT_SGX_HWPERF
++ if (sMiscInfo.eRequest == SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB)
++ {
++
++ IMG_VOID * pAllocated;
++ IMG_HANDLE hAllocatedHandle;
++ IMG_VOID * psTmpUserData;
++ IMG_UINT32 allocatedSize;
++
++ allocatedSize = (IMG_UINT32)(sMiscInfo.uData.sRetrieveCB.ui32ArraySize * sizeof(PVRSRV_SGX_HWPERF_CBDATA));
++
++ ASSIGN_AND_EXIT_ON_ERROR(psRetOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ allocatedSize,
++ &pAllocated,
++ &hAllocatedHandle,
++ "Array of Hardware Performance Circular Buffer Data"));
++
++
++ psTmpUserData = sMiscInfo.uData.sRetrieveCB.psHWPerfData;
++ sMiscInfo.uData.sRetrieveCB.psHWPerfData = pAllocated;
++
++ psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo, &sMiscInfo, psDeviceNode, 0);
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ allocatedSize,
++ pAllocated,
++ hAllocatedHandle);
++
++ return 0;
++ }
++
++
++ psRetOUT->eError = CopyToUserWrapper(psPerProc,
++ ui32BridgeID,
++ psTmpUserData,
++ sMiscInfo.uData.sRetrieveCB.psHWPerfData,
++ allocatedSize);
++
++ sMiscInfo.uData.sRetrieveCB.psHWPerfData = psTmpUserData;
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ allocatedSize,
++ pAllocated,
++ hAllocatedHandle);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ return -EFAULT;
++ }
++ }
++ else
++#endif
++ {
++ psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo, &sMiscInfo, psDeviceNode, hDevMemContextInt);
++
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++ }
++
++
++ psRetOUT->eError = CopyToUserWrapper(psPerProc,
++ ui32BridgeID,
++ psSGXGetMiscInfoIN->psMiscInfo,
++ &sMiscInfo,
++ sizeof(SGX_MISC_INFO));
++ if (psRetOUT->eError != PVRSRV_OK)
++ {
++ return -EFAULT;
++ }
++ return 0;
++}
++
++
++#if defined(SUPPORT_SGX_HWPERF)
++static IMG_INT
++SGXReadDiffCountersBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_READ_DIFF_COUNTERS *psSGXReadDiffCountersIN,
++ PVRSRV_BRIDGE_OUT_SGX_READ_DIFF_COUNTERS *psSGXReadDiffCountersOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_READ_DIFF_COUNTERS);
++
++ psSGXReadDiffCountersOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXReadDiffCountersIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psSGXReadDiffCountersOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psSGXReadDiffCountersOUT->eError = SGXReadDiffCountersKM(hDevCookieInt,
++ psSGXReadDiffCountersIN->ui32Reg,
++ &psSGXReadDiffCountersOUT->ui32Old,
++ psSGXReadDiffCountersIN->bNew,
++ psSGXReadDiffCountersIN->ui32New,
++ psSGXReadDiffCountersIN->ui32NewReset,
++ psSGXReadDiffCountersIN->ui32CountersReg,
++ psSGXReadDiffCountersIN->ui32Reg2,
++ &psSGXReadDiffCountersOUT->bActive,
++ &psSGXReadDiffCountersOUT->sDiffs);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXReadHWPerfCBBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_CB *psSGXReadHWPerfCBIN,
++ PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_CB *psSGXReadHWPerfCBOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_SGX_HWPERF_CB_ENTRY *psAllocated;
++ IMG_HANDLE hAllocatedHandle;
++ IMG_UINT32 ui32AllocatedSize;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_READ_HWPERF_CB);
++
++ psSGXReadHWPerfCBOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXReadHWPerfCBIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psSGXReadHWPerfCBOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ ui32AllocatedSize = psSGXReadHWPerfCBIN->ui32ArraySize *
++ sizeof(psSGXReadHWPerfCBIN->psHWPerfCBData[0]);
++ ASSIGN_AND_EXIT_ON_ERROR(psSGXReadHWPerfCBOUT->eError,
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32AllocatedSize,
++ (IMG_VOID **)&psAllocated,
++ &hAllocatedHandle,
++ "Array of Hardware Performance Circular Buffer Data"));
++
++ psSGXReadHWPerfCBOUT->eError = SGXReadHWPerfCBKM(hDevCookieInt,
++ psSGXReadHWPerfCBIN->ui32ArraySize,
++ psAllocated,
++ &psSGXReadHWPerfCBOUT->ui32DataCount,
++ &psSGXReadHWPerfCBOUT->ui32ClockSpeed,
++ &psSGXReadHWPerfCBOUT->ui32HostTimeStamp);
++ if (psSGXReadHWPerfCBOUT->eError == PVRSRV_OK)
++ {
++ psSGXReadHWPerfCBOUT->eError = CopyToUserWrapper(psPerProc,
++ ui32BridgeID,
++ psSGXReadHWPerfCBIN->psHWPerfCBData,
++ psAllocated,
++ ui32AllocatedSize);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32AllocatedSize,
++ psAllocated,
++ hAllocatedHandle);
++
++
++ return 0;
++}
++#endif
++
++
++static IMG_INT
++SGXDevInitPart2BW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXDEVINITPART2 *psSGXDevInitPart2IN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_ERROR eError;
++ IMG_BOOL bDissociateFailed = IMG_FALSE;
++ IMG_BOOL bLookupFailed = IMG_FALSE;
++ IMG_BOOL bReleaseFailed = IMG_FALSE;
++ IMG_HANDLE hDummy;
++ IMG_UINT32 i;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DEVINITPART2);
++
++ if(!psPerProc->bInitProcess)
++ {
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXDevInitPart2IN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SUPPORT_SGX_HWPERF)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ psSGXDevInitPart2IN->sInitInfo.hKernelTmpDPMStateMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++ {
++ IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++ if (hHandle == IMG_NULL)
++ {
++ continue;
++ }
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDummy,
++ hHandle,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bLookupFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++ }
++
++ if (bLookupFailed)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A handle lookup failed"));
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++
++ #if defined(SGX_SUPPORT_HWPROFILING)
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SUPPORT_SGX_HWPERF)
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ &psSGXDevInitPart2IN->sInitInfo.hKernelTmpDPMStateMemInfo,
++ psSGXDevInitPart2IN->sInitInfo.hKernelTmpDPMStateMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++
++ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++ {
++ IMG_HANDLE *phHandle = &psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++ if (*phHandle == IMG_NULL)
++ continue;
++
++ eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase,
++ phHandle,
++ *phHandle,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ bReleaseFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++ }
++
++ if (bReleaseFailed)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A handle release failed"));
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++
++ PVR_DBG_BREAK;
++ return 0;
++ }
++
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SUPPORT_SGX_HWPERF)
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelEDMStatusBufferMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelTmpDPMStateMemInfo);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++#endif
++
++ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++ {
++ IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++ if (hHandle == IMG_NULL)
++ continue;
++
++ eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, hHandle);
++ bDissociateFailed |= (IMG_BOOL)(eError != PVRSRV_OK);
++ }
++
++
++ if(bDissociateFailed)
++ {
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXTA3DCtlMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXMiscMemInfo);
++
++ for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++ {
++ IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++ if (hHandle == IMG_NULL)
++ continue;
++
++ PVRSRVFreeDeviceMemKM(hDevCookieInt, (PVRSRV_KERNEL_MEM_INFO *)hHandle);
++
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A dissociate failed"));
++
++ psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++
++
++ PVR_DBG_BREAK;
++ return 0;
++ }
++
++ psRetOUT->eError =
++ DevInitSGXPart2KM(psPerProc,
++ hDevCookieInt,
++ &psSGXDevInitPart2IN->sInitInfo);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXRegisterHWRenderContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT *psSGXRegHWRenderContextIN,
++ PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT *psSGXRegHWRenderContextOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hHWRenderContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHWRenderContextOUT->eError, psPerProc, 1);
++
++ psSGXRegHWRenderContextOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXRegHWRenderContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psSGXRegHWRenderContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ hHWRenderContextInt =
++ SGXRegisterHWRenderContextKM(hDevCookieInt,
++ &psSGXRegHWRenderContextIN->sHWRenderContextDevVAddr,
++ psPerProc);
++
++ if (hHWRenderContextInt == IMG_NULL)
++ {
++ psSGXRegHWRenderContextOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXRegHWRenderContextOUT->hHWRenderContext,
++ hHWRenderContextInt,
++ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHWRenderContextOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXUnregisterHWRenderContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT *psSGXUnregHWRenderContextIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hHWRenderContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hHWRenderContextInt,
++ psSGXUnregHWRenderContextIN->hHWRenderContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = SGXUnregisterHWRenderContextKM(hHWRenderContextInt);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXUnregHWRenderContextIN->hHWRenderContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXRegisterHWTransferContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT *psSGXRegHWTransferContextIN,
++ PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT *psSGXRegHWTransferContextOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hHWTransferContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHWTransferContextOUT->eError, psPerProc, 1);
++
++ psSGXRegHWTransferContextOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXRegHWTransferContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psSGXRegHWTransferContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ hHWTransferContextInt =
++ SGXRegisterHWTransferContextKM(hDevCookieInt,
++ &psSGXRegHWTransferContextIN->sHWTransferContextDevVAddr,
++ psPerProc);
++
++ if (hHWTransferContextInt == IMG_NULL)
++ {
++ psSGXRegHWTransferContextOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXRegHWTransferContextOUT->hHWTransferContext,
++ hHWTransferContextInt,
++ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHWTransferContextOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXUnregisterHWTransferContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT *psSGXUnregHWTransferContextIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hHWTransferContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hHWTransferContextInt,
++ psSGXUnregHWTransferContextIN->hHWTransferContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = SGXUnregisterHWTransferContextKM(hHWTransferContextInt);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXUnregHWTransferContextIN->hHWTransferContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT);
++
++ return 0;
++}
++
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++static IMG_INT
++SGXRegisterHW2DContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT *psSGXRegHW2DContextIN,
++ PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT *psSGXRegHW2DContextOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_HANDLE hHW2DContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXRegHW2DContextOUT->eError, psPerProc, 1);
++
++ psSGXRegHW2DContextOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXRegHW2DContextIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psSGXRegHW2DContextOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ hHW2DContextInt =
++ SGXRegisterHW2DContextKM(hDevCookieInt,
++ &psSGXRegHW2DContextIN->sHW2DContextDevVAddr,
++ psPerProc);
++
++ if (hHW2DContextInt == IMG_NULL)
++ {
++ psSGXRegHW2DContextOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXRegHW2DContextOUT->hHW2DContext,
++ hHW2DContextInt,
++ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXRegHW2DContextOUT->eError, psPerProc);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXUnregisterHW2DContextBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT *psSGXUnregHW2DContextIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hHW2DContextInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hHW2DContextInt,
++ psSGXUnregHW2DContextIN->hHW2DContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError = SGXUnregisterHW2DContextKM(hHW2DContextInt);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXUnregHW2DContextIN->hHW2DContext,
++ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT);
++
++ return 0;
++}
++#endif
++
++static IMG_INT
++SGXFlushHWRenderTargetBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET *psSGXFlushHWRenderTargetIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXFlushHWRenderTargetIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ SGXFlushHWRenderTargetKM(hDevCookieInt, psSGXFlushHWRenderTargetIN->sHWRTDataSetDevVAddr);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGX2DQueryBlitsCompleteBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE *ps2DQueryBltsCompleteIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_VOID *pvSyncInfo;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ ps2DQueryBltsCompleteIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
++ ps2DQueryBltsCompleteIN->hKernSyncInfo,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++ psRetOUT->eError =
++ SGX2DQueryBlitsCompleteKM(psDevInfo,
++ (PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo,
++ ps2DQueryBltsCompleteIN->bWaitForComplete);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXFindSharedPBDescBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescIN,
++ PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos = IMG_NULL;
++ IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount = 0;
++ IMG_UINT32 i;
++ IMG_HANDLE hSharedPBDesc = IMG_NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXFindSharedPBDescOUT->eError, psPerProc, PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS + 4);
++
++ psSGXFindSharedPBDescOUT->hSharedPBDesc = IMG_NULL;
++
++ psSGXFindSharedPBDescOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXFindSharedPBDescIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++ psSGXFindSharedPBDescOUT->eError =
++ SGXFindSharedPBDescKM(psPerProc, hDevCookieInt,
++ psSGXFindSharedPBDescIN->bLockOnFailure,
++ psSGXFindSharedPBDescIN->ui32TotalPBSize,
++ &hSharedPBDesc,
++ &psSharedPBDescKernelMemInfo,
++ &psHWPBDescKernelMemInfo,
++ &psBlockKernelMemInfo,
++ &psHWBlockKernelMemInfo,
++ &ppsSharedPBDescSubKernelMemInfos,
++ &ui32SharedPBDescSubKernelMemInfosCount);
++ if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++ PVR_ASSERT(ui32SharedPBDescSubKernelMemInfosCount
++ <= PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS);
++
++ psSGXFindSharedPBDescOUT->ui32SharedPBDescSubKernelMemInfoHandlesCount =
++ ui32SharedPBDescSubKernelMemInfosCount;
++
++ if(hSharedPBDesc == IMG_NULL)
++ {
++ psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle = 0;
++
++ goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hSharedPBDesc,
++ hSharedPBDesc,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle,
++ psSharedPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDesc);
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hHWPBDescKernelMemInfoHandle,
++ psHWPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDesc);
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hBlockKernelMemInfoHandle,
++ psBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDesc);
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOUT->hHWBlockKernelMemInfoHandle,
++ psHWBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDesc);
++
++
++ for(i=0; i<ui32SharedPBDescSubKernelMemInfosCount; i++)
++ {
++ PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescOut =
++ psSGXFindSharedPBDescOUT;
++
++ PVRSRVAllocSubHandleNR(psPerProc->psHandleBase,
++ &psSGXFindSharedPBDescOut->ahSharedPBDescSubKernelMemInfoHandles[i],
++ ppsSharedPBDescSubKernelMemInfos[i],
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++ psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle);
++ }
++
++PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT:
++ if (ppsSharedPBDescSubKernelMemInfos != IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *) * ui32SharedPBDescSubKernelMemInfosCount,
++ ppsSharedPBDescSubKernelMemInfos,
++ IMG_NULL);
++ }
++
++ if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++ {
++ if(hSharedPBDesc != IMG_NULL)
++ {
++ SGXUnrefSharedPBDescKM(hSharedPBDesc);
++ }
++ }
++ else
++ {
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXFindSharedPBDescOUT->eError, psPerProc);
++ }
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXUnrefSharedPBDescBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC *psSGXUnrefSharedPBDescIN,
++ PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC *psSGXUnrefSharedPBDescOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hSharedPBDesc;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC);
++
++ psSGXUnrefSharedPBDescOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hSharedPBDesc,
++ psSGXUnrefSharedPBDescIN->hSharedPBDesc,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC);
++ if(psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psSGXUnrefSharedPBDescOUT->eError =
++ SGXUnrefSharedPBDescKM(hSharedPBDesc);
++
++ if(psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psSGXUnrefSharedPBDescOUT->eError =
++ PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXUnrefSharedPBDescIN->hSharedPBDesc,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC);
++
++ return 0;
++}
++
++
++static IMG_INT
++SGXAddSharedPBDescBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescIN,
++ PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo;
++ IMG_UINT32 ui32KernelMemInfoHandlesCount =
++ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount;
++ IMG_INT ret = 0;
++ IMG_HANDLE *phKernelMemInfoHandles = IMG_NULL;
++ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfos = IMG_NULL;
++ IMG_UINT32 i;
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hSharedPBDesc = IMG_NULL;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXAddSharedPBDescOUT->eError, psPerProc, 1);
++
++ psSGXAddSharedPBDescOUT->hSharedPBDesc = IMG_NULL;
++
++ PVR_ASSERT(ui32KernelMemInfoHandlesCount
++ <= PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS);
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &hDevCookieInt,
++ psSGXAddSharedPBDescIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID **)&psSharedPBDescKernelMemInfo,
++ psSGXAddSharedPBDescIN->hSharedPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID **)&psHWPBDescKernelMemInfo,
++ psSGXAddSharedPBDescIN->hHWPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID **)&psBlockKernelMemInfo,
++ psSGXAddSharedPBDescIN->hBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID **)&psHWBlockKernelMemInfo,
++ psSGXAddSharedPBDescIN->hHWBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++
++ if(!OSAccessOK(PVR_VERIFY_READ,
++ psSGXAddSharedPBDescIN->phKernelMemInfoHandles,
++ ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC:"
++ " Invalid phKernelMemInfos pointer", __FUNCTION__));
++ ret = -EFAULT;
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE),
++ (IMG_VOID **)&phKernelMemInfoHandles,
++ 0,
++ "Array of Handles");
++ if (eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ phKernelMemInfoHandles,
++ psSGXAddSharedPBDescIN->phKernelMemInfoHandles,
++ ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE))
++ != PVRSRV_OK)
++ {
++ ret = -EFAULT;
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32KernelMemInfoHandlesCount * sizeof(PVRSRV_KERNEL_MEM_INFO *),
++ (IMG_VOID **)&ppsKernelMemInfos,
++ 0,
++ "Array of pointers to Kernel Memory Info");
++ if (eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ for(i=0; i<ui32KernelMemInfoHandlesCount; i++)
++ {
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ (IMG_VOID **)&ppsKernelMemInfos[i],
++ phKernelMemInfoHandles[i],
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++ }
++
++
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXAddSharedPBDescIN->hSharedPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXAddSharedPBDescIN->hHWPBDescKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXAddSharedPBDescIN->hBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ psSGXAddSharedPBDescIN->hHWBlockKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++ for(i=0; i<ui32KernelMemInfoHandlesCount; i++)
++ {
++
++ eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++ phKernelMemInfoHandles[i],
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ PVR_ASSERT(eError == PVRSRV_OK);
++ }
++
++ eError = SGXAddSharedPBDescKM(psPerProc, hDevCookieInt,
++ psSharedPBDescKernelMemInfo,
++ psHWPBDescKernelMemInfo,
++ psBlockKernelMemInfo,
++ psHWBlockKernelMemInfo,
++ psSGXAddSharedPBDescIN->ui32TotalPBSize,
++ &hSharedPBDesc,
++ ppsKernelMemInfos,
++ ui32KernelMemInfoHandlesCount);
++
++
++ if (eError != PVRSRV_OK)
++ {
++ goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++ }
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &psSGXAddSharedPBDescOUT->hSharedPBDesc,
++ hSharedPBDesc,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT:
++
++ if(phKernelMemInfoHandles)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE),
++ (IMG_VOID *)phKernelMemInfoHandles,
++ 0);
++ }
++ if(ppsKernelMemInfos)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount * sizeof(PVRSRV_KERNEL_MEM_INFO *),
++ (IMG_VOID *)ppsKernelMemInfos,
++ 0);
++ }
++
++ if(ret == 0 && eError == PVRSRV_OK)
++ {
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXAddSharedPBDescOUT->eError, psPerProc);
++ }
++
++ psSGXAddSharedPBDescOUT->eError = eError;
++
++ return ret;
++}
++
++static IMG_INT
++SGXGetInfoForSrvinitBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitIN,
++ PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_HANDLE hDevCookieInt;
++ IMG_UINT32 i;
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT);
++
++ NEW_HANDLE_BATCH_OR_ERROR(psSGXInfoForSrvinitOUT->eError, psPerProc, PVRSRV_MAX_CLIENT_HEAPS);
++
++ if(!psPerProc->bInitProcess)
++ {
++ psSGXInfoForSrvinitOUT->eError = PVRSRV_ERROR_GENERIC;
++ return 0;
++ }
++
++ psSGXInfoForSrvinitOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psSGXInfoForSrvinitIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++ if(psSGXInfoForSrvinitOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psSGXInfoForSrvinitOUT->eError =
++ SGXGetInfoForSrvinitKM(hDevCookieInt,
++ &psSGXInfoForSrvinitOUT->sInitInfo);
++
++ if(psSGXInfoForSrvinitOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ for(i = 0; i < PVRSRV_MAX_CLIENT_HEAPS; i++)
++ {
++ PVRSRV_HEAP_INFO *psHeapInfo;
++
++ psHeapInfo = &psSGXInfoForSrvinitOUT->sInitInfo.asHeapInfo[i];
++
++ if (psHeapInfo->ui32HeapID != (IMG_UINT32)SGX_UNDEFINED_HEAP_ID)
++ {
++ IMG_HANDLE hDevMemHeapExt;
++
++ if (psHeapInfo->hDevMemHeap != IMG_NULL)
++ {
++
++ PVRSRVAllocHandleNR(psPerProc->psHandleBase,
++ &hDevMemHeapExt,
++ psHeapInfo->hDevMemHeap,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++ psHeapInfo->hDevMemHeap = hDevMemHeapExt;
++ }
++ }
++ }
++
++ COMMIT_HANDLE_BATCH_OR_ERROR(psSGXInfoForSrvinitOUT->eError, psPerProc);
++
++ return 0;
++}
++
++#if defined(PDUMP)
++static IMG_VOID
++DumpBufferArray(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ PSGX_KICKTA_DUMP_BUFFER psBufferArray,
++ IMG_UINT32 ui32BufferArrayLength,
++ IMG_BOOL bDumpPolls)
++{
++ IMG_UINT32 i;
++
++ for (i=0; i<ui32BufferArrayLength; i++)
++ {
++ PSGX_KICKTA_DUMP_BUFFER psBuffer;
++ PVRSRV_KERNEL_MEM_INFO *psCtrlMemInfoKM;
++ IMG_CHAR * pszName;
++ IMG_HANDLE hUniqueTag;
++ IMG_UINT32 ui32Offset;
++
++ psBuffer = &psBufferArray[i];
++ pszName = psBuffer->pszName;
++ if (!pszName)
++ {
++ pszName = "Nameless buffer";
++ }
++
++ hUniqueTag = MAKEUNIQUETAG((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hKernelMemInfo);
++
++ #if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ psCtrlMemInfoKM = ((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hCtrlKernelMemInfo);
++ ui32Offset = psBuffer->sCtrlDevVAddr.uiAddr - psCtrlMemInfoKM->sDevVAddr.uiAddr;
++ #else
++ psCtrlMemInfoKM = ((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hKernelMemInfo)->psKernelSyncInfo->psSyncDataMemInfoKM;
++ ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
++ #endif
++
++ if (psBuffer->ui32Start <= psBuffer->ui32End)
++ {
++ if (bDumpPolls)
++ {
++ PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
++ PDUMPCBP(psCtrlMemInfoKM,
++ ui32Offset,
++ psBuffer->ui32Start,
++ psBuffer->ui32SpaceUsed,
++ psBuffer->ui32BufferSize,
++ 0,
++ MAKEUNIQUETAG(psCtrlMemInfoKM));
++ }
++
++ PDUMPCOMMENTWITHFLAGS(0, "%s\r\n", pszName);
++ PDUMPMEMUM(psPerProc,
++ IMG_NULL,
++ psBuffer->pvLinAddr,
++ (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
++ psBuffer->ui32Start,
++ psBuffer->ui32End - psBuffer->ui32Start,
++ 0,
++ hUniqueTag);
++ }
++ else
++ {
++
++
++ if (bDumpPolls)
++ {
++ PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
++ PDUMPCBP(psCtrlMemInfoKM,
++ ui32Offset,
++ psBuffer->ui32Start,
++ psBuffer->ui32BackEndLength,
++ psBuffer->ui32BufferSize,
++ 0,
++ MAKEUNIQUETAG(psCtrlMemInfoKM));
++ }
++ PDUMPCOMMENTWITHFLAGS(0, "%s (part 1)\r\n", pszName);
++ PDUMPMEMUM(psPerProc,
++ IMG_NULL,
++ psBuffer->pvLinAddr,
++ (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
++ psBuffer->ui32Start,
++ psBuffer->ui32BackEndLength,
++ 0,
++ hUniqueTag);
++
++ if (bDumpPolls)
++ {
++ PDUMPMEMPOL(psCtrlMemInfoKM,
++ ui32Offset,
++ 0,
++ 0xFFFFFFFF,
++ PDUMP_POLL_OPERATOR_NOTEQUAL,
++ 0,
++ MAKEUNIQUETAG(psCtrlMemInfoKM));
++
++ PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
++ PDUMPCBP(psCtrlMemInfoKM,
++ ui32Offset,
++ 0,
++ psBuffer->ui32End,
++ psBuffer->ui32BufferSize,
++ 0,
++ MAKEUNIQUETAG(psCtrlMemInfoKM));
++ }
++ PDUMPCOMMENTWITHFLAGS(0, "%s (part 2)\r\n", pszName);
++ PDUMPMEMUM(psPerProc,
++ IMG_NULL,
++ psBuffer->pvLinAddr,
++ (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
++ 0,
++ psBuffer->ui32End,
++ 0,
++ hUniqueTag);
++ }
++ }
++}
++static IMG_INT
++SGXPDumpBufferArrayBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY *psPDumpBufferArrayIN,
++ IMG_VOID *psBridgeOut,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 i;
++ SGX_KICKTA_DUMP_BUFFER *psKickTADumpBuffer;
++ IMG_UINT32 ui32BufferArrayLength =
++ psPDumpBufferArrayIN->ui32BufferArrayLength;
++ IMG_UINT32 ui32BufferArraySize =
++ ui32BufferArrayLength * sizeof(SGX_KICKTA_DUMP_BUFFER);
++ PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
++
++ PVR_UNREFERENCED_PARAMETER(psBridgeOut);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY);
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32BufferArraySize,
++ (IMG_PVOID *)&psKickTADumpBuffer, 0,
++ "Array of Kick Tile Accelerator Dump Buffer") != PVRSRV_OK)
++ {
++ return -ENOMEM;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ psKickTADumpBuffer,
++ psPDumpBufferArrayIN->psBufferArray,
++ ui32BufferArraySize) != PVRSRV_OK)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBuffer, 0);
++
++ return -EFAULT;
++ }
++
++ for(i = 0; i < ui32BufferArrayLength; i++)
++ {
++ IMG_VOID *pvMemInfo;
++
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psKickTADumpBuffer[i].hKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY: "
++ "PVRSRVLookupHandle failed (%d)", eError));
++ break;
++ }
++ psKickTADumpBuffer[i].hKernelMemInfo = pvMemInfo;
++
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++ &pvMemInfo,
++ psKickTADumpBuffer[i].hCtrlKernelMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY: "
++ "PVRSRVLookupHandle failed (%d)", eError));
++ break;
++ }
++ psKickTADumpBuffer[i].hCtrlKernelMemInfo = pvMemInfo;
++#endif
++ }
++
++ if(eError == PVRSRV_OK)
++ {
++ DumpBufferArray(psPerProc,
++ psKickTADumpBuffer,
++ ui32BufferArrayLength,
++ psPDumpBufferArrayIN->bDumpPolls);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBuffer, 0);
++
++
++ return 0;
++}
++
++static IMG_INT
++SGXPDump3DSignatureRegistersBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_3D_SIGNATURE_REGISTERS *psPDump3DSignatureRegistersIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 ui32RegisterArraySize = psPDump3DSignatureRegistersIN->ui32NumRegisters * sizeof(IMG_UINT32);
++ IMG_UINT32 *pui32Registers = IMG_NULL;
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ PVRSRV_SGXDEV_INFO *psDevInfo = IMG_NULL;
++ IMG_HANDLE hDevCookieInt;
++ IMG_UINT32 ui32RegVal = 0;
++#endif
++ IMG_INT ret = -EFAULT;
++
++ PVR_UNREFERENCED_PARAMETER(psRetOUT);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS);
++
++ if (ui32RegisterArraySize == 0)
++ {
++ goto ExitNoError;
++ }
++
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psPDump3DSignatureRegistersIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: hDevCookie lookup failed"));
++ goto Exit;
++ }
++
++ psDevInfo = ((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT);
++#if defined(PDUMP)
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT,
++ psPDump3DSignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++#endif
++#endif
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32RegisterArraySize,
++ (IMG_PVOID *)&pui32Registers, 0,
++ "Array of Registers") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDump3DSignatureRegistersBW: OSAllocMem failed"));
++ goto Exit;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ pui32Registers,
++ psPDump3DSignatureRegistersIN->pui32Registers,
++ ui32RegisterArraySize) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDump3DSignatureRegistersBW: CopyFromUserWrapper failed"));
++ goto Exit;
++ }
++
++ PDump3DSignatureRegisters(psPDump3DSignatureRegistersIN->ui32DumpFrameNum,
++ psPDump3DSignatureRegistersIN->bLastFrame,
++ pui32Registers,
++ psPDump3DSignatureRegistersIN->ui32NumRegisters);
++
++ExitNoError:
++ psRetOUT->eError = PVRSRV_OK;
++ ret = 0;
++Exit:
++ if (pui32Registers != IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0);
++ }
++
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ if (psDevInfo != IMG_NULL)
++ {
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, ui32RegVal);
++#if defined(PDUMP)
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_CORE, ui32RegVal,
++ psPDump3DSignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++#endif
++ }
++#endif
++
++ return ret;
++}
++
++static IMG_INT
++SGXPDumpCounterRegistersBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_COUNTER_REGISTERS *psPDumpCounterRegistersIN,
++ IMG_VOID *psBridgeOut,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 ui32RegisterArraySize = psPDumpCounterRegistersIN->ui32NumRegisters * sizeof(IMG_UINT32);
++ IMG_UINT32 *pui32Registers = IMG_NULL;
++ IMG_INT ret = -EFAULT;
++
++ PVR_UNREFERENCED_PARAMETER(psBridgeOut);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS);
++
++ if (ui32RegisterArraySize == 0)
++ {
++ goto ExitNoError;
++ }
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32RegisterArraySize,
++ (IMG_PVOID *)&pui32Registers, 0,
++ "Array of Registers") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpCounterRegistersBW: OSAllocMem failed"));
++ ret = -ENOMEM;
++ goto Exit;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ pui32Registers,
++ psPDumpCounterRegistersIN->pui32Registers,
++ ui32RegisterArraySize) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpCounterRegistersBW: CopyFromUserWrapper failed"));
++ goto Exit;
++ }
++
++ PDumpCounterRegisters(psPDumpCounterRegistersIN->ui32DumpFrameNum,
++ psPDumpCounterRegistersIN->bLastFrame,
++ pui32Registers,
++ psPDumpCounterRegistersIN->ui32NumRegisters);
++
++ExitNoError:
++ ret = 0;
++Exit:
++ if (pui32Registers != IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0);
++ }
++
++ return ret;
++}
++
++static IMG_INT
++SGXPDumpTASignatureRegistersBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_TA_SIGNATURE_REGISTERS *psPDumpTASignatureRegistersIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ IMG_UINT32 ui32RegisterArraySize = psPDumpTASignatureRegistersIN->ui32NumRegisters * sizeof(IMG_UINT32);
++ IMG_UINT32 *pui32Registers = IMG_NULL;
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ PVRSRV_SGXDEV_INFO *psDevInfo = IMG_NULL;
++ IMG_HANDLE hDevCookieInt;
++ IMG_UINT32 ui32RegVal = 0;
++#endif
++ IMG_INT ret = -EFAULT;
++
++ PVR_UNREFERENCED_PARAMETER(psRetOUT);
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS);
++
++ if (ui32RegisterArraySize == 0)
++ {
++ goto ExitNoError;
++ }
++
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psPDumpTASignatureRegistersIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: hDevCookie lookup failed"));
++ goto Exit;
++ }
++
++ psDevInfo = ((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT);
++#if defined(PDUMP)
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_CORE, (SGX_FEATURE_MP_CORE_COUNT - 1) << EUR_CR_MASTER_CORE_ENABLE_SHIFT,
++ psPDumpTASignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++#endif
++#endif
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32RegisterArraySize,
++ (IMG_PVOID *)&pui32Registers, 0,
++ "Array of Registers") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: OSAllocMem failed"));
++ ret = -ENOMEM;
++ goto Exit;
++ }
++
++ if(CopyFromUserWrapper(psPerProc,
++ ui32BridgeID,
++ pui32Registers,
++ psPDumpTASignatureRegistersIN->pui32Registers,
++ ui32RegisterArraySize) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpTASignatureRegistersBW: CopyFromUserWrapper failed"));
++ goto Exit;
++ }
++
++ PDumpTASignatureRegisters(psPDumpTASignatureRegistersIN->ui32DumpFrameNum,
++ psPDumpTASignatureRegistersIN->ui32TAKickCount,
++ psPDumpTASignatureRegistersIN->bLastFrame,
++ pui32Registers,
++ psPDumpTASignatureRegistersIN->ui32NumRegisters);
++
++ExitNoError:
++ psRetOUT->eError = PVRSRV_OK;
++ ret = 0;
++Exit:
++ if (pui32Registers != IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32RegisterArraySize, pui32Registers, 0);
++ }
++
++#if defined(SGX_FEATURE_MP) && defined(FIX_HW_BRN_27270)
++ if (psDevInfo != IMG_NULL)
++ {
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_CORE, ui32RegVal);
++#if defined(PDUMP)
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_CORE, ui32RegVal,
++ psPDumpTASignatureRegistersIN->bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++#endif
++ }
++#endif
++
++ return ret;
++}
++static IMG_INT
++SGXPDumpHWPerfCBBW(IMG_UINT32 ui32BridgeID,
++ PVRSRV_BRIDGE_IN_PDUMP_HWPERFCB *psPDumpHWPerfCBIN,
++ PVRSRV_BRIDGE_RETURN *psRetOUT,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++#if defined(SUPPORT_SGX_HWPERF)
++#if defined(__linux__)
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ IMG_HANDLE hDevCookieInt;
++
++ PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB);
++
++ psRetOUT->eError =
++ PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++ psPDumpHWPerfCBIN->hDevCookie,
++ PVRSRV_HANDLE_TYPE_DEV_NODE);
++ if(psRetOUT->eError != PVRSRV_OK)
++ {
++ return 0;
++ }
++
++ psDevInfo = ((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++ PDumpHWPerfCBKM(&psPDumpHWPerfCBIN->szFileName[0],
++ psPDumpHWPerfCBIN->ui32FileOffset,
++ psDevInfo->psKernelHWPerfCBMemInfo->sDevVAddr,
++ psDevInfo->psKernelHWPerfCBMemInfo->ui32AllocSize,
++ psPDumpHWPerfCBIN->ui32PDumpFlags);
++
++ return 0;
++#else
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++ PVR_UNREFERENCED_PARAMETER(psPDumpHWPerfCBIN);
++ PVR_UNREFERENCED_PARAMETER(psRetOUT);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ return 0;
++#endif
++#else
++ PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++ PVR_UNREFERENCED_PARAMETER(psPDumpHWPerfCBIN);
++ PVR_UNREFERENCED_PARAMETER(psRetOUT);
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ return -EFAULT;
++#endif
++}
++
++#endif
++
++
++IMG_VOID SetSGXDispatchTableEntry(IMG_VOID)
++{
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETCLIENTINFO, SGXGetClientInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO, SGXReleaseClientInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO, SGXGetInternalDevInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DOKICK, SGXDoKickBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR, DummyBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READREGISTRYDWORD, DummyBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE, SGX2DQueryBlitsCompleteBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETMMUPDADDR, DummyBW);
++
++#if defined(TRANSFER_QUEUE)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMITTRANSFER, SGXSubmitTransferBW);
++#endif
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETMISCINFO, SGXGetMiscInfoBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT , SGXGetInfoForSrvinitBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DEVINITPART2, SGXDevInitPart2BW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC, SGXFindSharedPBDescBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC, SGXUnrefSharedPBDescBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC, SGXAddSharedPBDescBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT, SGXRegisterHWRenderContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET, SGXFlushHWRenderTargetBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT, SGXUnregisterHWRenderContextBW);
++#if defined(SGX_FEATURE_2D_HARDWARE)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMIT2D, SGXSubmit2DBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT, SGXRegisterHW2DContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT, SGXUnregisterHW2DContextBW);
++#endif
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT, SGXRegisterHWTransferContextBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT, SGXUnregisterHWTransferContextBW);
++
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SCHEDULE_PROCESS_QUEUES, SGXScheduleProcessQueuesBW);
++
++#if defined(SUPPORT_SGX_HWPERF)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READ_DIFF_COUNTERS, SGXReadDiffCountersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READ_HWPERF_CB, SGXReadHWPerfCBBW);
++#endif
++
++#if defined(PDUMP)
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_BUFFER_ARRAY, SGXPDumpBufferArrayBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_3D_SIGNATURE_REGISTERS, SGXPDump3DSignatureRegistersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_COUNTER_REGISTERS, SGXPDumpCounterRegistersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_TA_SIGNATURE_REGISTERS, SGXPDumpTASignatureRegistersBW);
++ SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_PDUMP_HWPERFCB, SGXPDumpHWPerfCBBW);
++#endif
++}
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/sgx/bridged_sgx_bridge.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/sgx/bridged_sgx_bridge.h
+new file mode 100644
+index 0000000..23f3600
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/bridged/sgx/bridged_sgx_bridge.h
+@@ -0,0 +1,42 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __BRIDGED_SGX_BRIDGE_H__
++#define __BRIDGED_SGX_BRIDGE_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++
++IMG_VOID SetSGXDispatchTableEntry(IMG_VOID);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/.gitignore b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/.gitignore
+new file mode 100644
+index 0000000..2f89523
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/.gitignore
+@@ -0,0 +1,5 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++*.o
++*.o.cmd
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/buffer_manager.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/buffer_manager.c
+new file mode 100644
+index 0000000..946fe79
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/buffer_manager.c
+@@ -0,0 +1,2036 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++
++#include "sysconfig.h"
++#include "hash.h"
++#include "ra.h"
++#include "pdump_km.h"
++
++#define MIN(a,b) (a > b ? b : a)
++
++
++#include "lists.h"
++
++DECLARE_LIST_ANY_VA(BM_HEAP);
++DECLARE_LIST_ANY_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK);
++DECLARE_LIST_ANY_VA_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK);
++DECLARE_LIST_FOR_EACH_VA(BM_HEAP);
++DECLARE_LIST_INSERT(BM_HEAP);
++DECLARE_LIST_REMOVE(BM_HEAP);
++
++DECLARE_LIST_FOR_EACH(BM_CONTEXT);
++DECLARE_LIST_ANY_VA(BM_CONTEXT);
++DECLARE_LIST_ANY_VA_2(BM_CONTEXT, IMG_HANDLE, IMG_NULL);
++DECLARE_LIST_INSERT(BM_CONTEXT);
++DECLARE_LIST_REMOVE(BM_CONTEXT);
++
++
++static IMG_BOOL
++ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags);
++static IMG_VOID
++BM_FreeMemory (IMG_VOID *pH, IMG_UINTPTR_T base, BM_MAPPING *psMapping);
++static IMG_BOOL
++BM_ImportMemory(IMG_VOID *pH, IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize, BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags, IMG_UINTPTR_T *pBase);
++
++static IMG_BOOL
++DevMemoryAlloc (BM_CONTEXT *pBMContext,
++ BM_MAPPING *pMapping,
++ IMG_SIZE_T *pActualSize,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 dev_vaddr_alignment,
++ IMG_DEV_VIRTADDR *pDevVAddr);
++static IMG_VOID
++DevMemoryFree (BM_MAPPING *pMapping);
++
++static IMG_BOOL
++AllocMemory (BM_CONTEXT *pBMContext,
++ BM_HEAP *psBMHeap,
++ IMG_DEV_VIRTADDR *psDevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uDevVAddrAlignment,
++ BM_BUF *pBuf)
++{
++ BM_MAPPING *pMapping;
++ IMG_UINTPTR_T uOffset;
++ RA_ARENA *pArena = IMG_NULL;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "AllocMemory (pBMContext=%08X, uSize=0x%x, uFlags=0x%x, align=0x%x, pBuf=%08X)",
++ pBMContext, uSize, uFlags, uDevVAddrAlignment, pBuf));
++
++
++
++
++ if(uFlags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
++ {
++ if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
++ {
++
++ PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: combination of DevVAddr management and RAM backing mode unsupported"));
++ return IMG_FALSE;
++ }
++
++
++
++
++ if(psBMHeap->ui32Attribs
++ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++ |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
++ {
++
++ pArena = psBMHeap->pImportArena;
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: backing store type doesn't match heap"));
++ return IMG_FALSE;
++ }
++
++
++ if (!RA_Alloc(pArena,
++ uSize,
++ IMG_NULL,
++ (IMG_VOID*) &pMapping,
++ uFlags,
++ uDevVAddrAlignment,
++ 0,
++ (IMG_UINTPTR_T *)&(pBuf->DevVAddr.uiAddr)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: RA_Alloc(0x%x) FAILED", uSize));
++ return IMG_FALSE;
++ }
++
++ uOffset = pBuf->DevVAddr.uiAddr - pMapping->DevVAddr.uiAddr;
++ if(pMapping->CpuVAddr)
++ {
++ pBuf->CpuVAddr = (IMG_VOID*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + uOffset);
++ }
++ else
++ {
++ pBuf->CpuVAddr = IMG_NULL;
++ }
++
++ if(uSize == pMapping->uSize)
++ {
++ pBuf->hOSMemHandle = pMapping->hOSMemHandle;
++ }
++ else
++ {
++ if(OSGetSubMemHandle(pMapping->hOSMemHandle,
++ uOffset,
++ uSize,
++ psBMHeap->ui32Attribs,
++ &pBuf->hOSMemHandle)!=PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSGetSubMemHandle FAILED"));
++ return IMG_FALSE;
++ }
++ }
++
++
++ pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + uOffset;
++
++ if(uFlags & PVRSRV_MEM_ZERO)
++ {
++ if(!ZeroBuf(pBuf, pMapping, uSize, psBMHeap->ui32Attribs | uFlags))
++ {
++ return IMG_FALSE;
++ }
++ }
++ }
++ else
++ {
++ if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
++ {
++
++ PVR_ASSERT(psDevVAddr != IMG_NULL);
++
++ if (psDevVAddr == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: invalid parameter - psDevVAddr"));
++ return IMG_FALSE;
++ }
++
++
++ pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap,
++ uSize,
++ IMG_NULL,
++ PVRSRV_MEM_USER_SUPPLIED_DEVVADDR,
++ uDevVAddrAlignment,
++ psDevVAddr);
++
++
++ pBuf->DevVAddr = *psDevVAddr;
++ }
++ else
++ {
++
++
++
++ pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap,
++ uSize,
++ IMG_NULL,
++ 0,
++ uDevVAddrAlignment,
++ &pBuf->DevVAddr);
++ }
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (struct _BM_MAPPING_),
++ (IMG_PVOID *)&pMapping, IMG_NULL,
++ "Buffer Manager Mapping") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSAllocMem(0x%x) FAILED"));
++ return IMG_FALSE;
++ }
++
++
++ pBuf->CpuVAddr = IMG_NULL;
++ pBuf->hOSMemHandle = 0;
++ pBuf->CpuPAddr.uiAddr = 0;
++
++
++ pMapping->CpuVAddr = IMG_NULL;
++ pMapping->CpuPAddr.uiAddr = 0;
++ pMapping->DevVAddr = pBuf->DevVAddr;
++ pMapping->psSysAddr = IMG_NULL;
++ pMapping->uSize = uSize;
++ pMapping->hOSMemHandle = 0;
++ }
++
++
++ pMapping->pArena = pArena;
++
++
++ pMapping->pBMHeap = psBMHeap;
++ pBuf->pMapping = pMapping;
++
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "AllocMemory: pMapping=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++ pMapping,
++ pMapping->DevVAddr.uiAddr,
++ pMapping->CpuVAddr,
++ pMapping->CpuPAddr.uiAddr,
++ pMapping->uSize));
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "AllocMemory: pBuf=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++ pBuf,
++ pBuf->DevVAddr.uiAddr,
++ pBuf->CpuVAddr,
++ pBuf->CpuPAddr.uiAddr,
++ uSize));
++
++
++ PVR_ASSERT(((pBuf->DevVAddr.uiAddr) & (uDevVAddrAlignment - 1)) == 0);
++
++ return IMG_TRUE;
++}
++
++
++static IMG_BOOL
++WrapMemory (BM_HEAP *psBMHeap,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T ui32BaseOffset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psAddr,
++ IMG_VOID *pvCPUVAddr,
++ IMG_UINT32 uFlags,
++ BM_BUF *pBuf)
++{
++ IMG_DEV_VIRTADDR DevVAddr = {0};
++ BM_MAPPING *pMapping;
++ IMG_BOOL bResult;
++ IMG_SIZE_T const ui32PageSize = HOST_PAGESIZE();
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "WrapMemory(psBMHeap=%08X, size=0x%x, offset=0x%x, bPhysContig=0x%x, pvCPUVAddr = 0x%x, flags=0x%x, pBuf=%08X)",
++ psBMHeap, uSize, ui32BaseOffset, bPhysContig, pvCPUVAddr, uFlags, pBuf));
++
++ PVR_ASSERT((psAddr->uiAddr & (ui32PageSize - 1)) == 0);
++
++ PVR_ASSERT(((IMG_UINTPTR_T)pvCPUVAddr & (ui32PageSize - 1)) == 0);
++
++ uSize += ui32BaseOffset;
++ uSize = HOST_PAGEALIGN (uSize);
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*pMapping),
++ (IMG_PVOID *)&pMapping, IMG_NULL,
++ "Mocked-up mapping") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSAllocMem(0x%x) FAILED",sizeof(*pMapping)));
++ return IMG_FALSE;
++ }
++
++ OSMemSet(pMapping, 0, sizeof (*pMapping));
++
++ pMapping->uSize = uSize;
++ pMapping->pBMHeap = psBMHeap;
++
++ if(pvCPUVAddr)
++ {
++ pMapping->CpuVAddr = pvCPUVAddr;
++
++ if (bPhysContig)
++ {
++ pMapping->eCpuMemoryOrigin = hm_wrapped_virtaddr;
++ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);
++
++ if(OSRegisterMem(pMapping->CpuPAddr,
++ pMapping->CpuVAddr,
++ pMapping->uSize,
++ uFlags,
++ &pMapping->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSRegisterMem Phys=0x%08X, CpuVAddr = 0x%08X, Size=%d) failed",
++ pMapping->CpuPAddr, pMapping->CpuVAddr, pMapping->uSize));
++ goto fail_cleanup;
++ }
++ }
++ else
++ {
++ pMapping->eCpuMemoryOrigin = hm_wrapped_scatter_virtaddr;
++ pMapping->psSysAddr = psAddr;
++
++ if(OSRegisterDiscontigMem(pMapping->psSysAddr,
++ pMapping->CpuVAddr,
++ pMapping->uSize,
++ uFlags,
++ &pMapping->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSRegisterDiscontigMem CpuVAddr = 0x%08X, Size=%d) failed",
++ pMapping->CpuVAddr, pMapping->uSize));
++ goto fail_cleanup;
++ }
++ }
++ }
++ else
++ {
++ if (bPhysContig)
++ {
++ pMapping->eCpuMemoryOrigin = hm_wrapped;
++ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);
++
++ if(OSReservePhys(pMapping->CpuPAddr,
++ pMapping->uSize,
++ uFlags,
++ &pMapping->CpuVAddr,
++ &pMapping->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSReservePhys Phys=0x%08X, Size=%d) failed",
++ pMapping->CpuPAddr, pMapping->uSize));
++ goto fail_cleanup;
++ }
++ }
++ else
++ {
++ pMapping->eCpuMemoryOrigin = hm_wrapped_scatter;
++ pMapping->psSysAddr = psAddr;
++
++ if(OSReserveDiscontigPhys(pMapping->psSysAddr,
++ pMapping->uSize,
++ uFlags,
++ &pMapping->CpuVAddr,
++ &pMapping->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSReserveDiscontigPhys Size=%d) failed",
++ pMapping->uSize));
++ goto fail_cleanup;
++ }
++ }
++ }
++
++
++ bResult = DevMemoryAlloc(psBMHeap->pBMContext,
++ pMapping,
++ IMG_NULL,
++ uFlags | PVRSRV_MEM_READ | PVRSRV_MEM_WRITE,
++ IMG_CAST_TO_DEVVADDR_UINT(ui32PageSize),
++ &DevVAddr);
++ if (!bResult)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "WrapMemory: DevMemoryAlloc(0x%x) failed",
++ pMapping->uSize));
++ goto fail_cleanup;
++ }
++
++
++ pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + ui32BaseOffset;
++ if(!ui32BaseOffset)
++ {
++ pBuf->hOSMemHandle = pMapping->hOSMemHandle;
++ }
++ else
++ {
++ if(OSGetSubMemHandle(pMapping->hOSMemHandle,
++ ui32BaseOffset,
++ (pMapping->uSize-ui32BaseOffset),
++ uFlags,
++ &pBuf->hOSMemHandle)!=PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSGetSubMemHandle failed"));
++ goto fail_cleanup;
++ }
++ }
++ if(pMapping->CpuVAddr)
++ {
++ pBuf->CpuVAddr = (IMG_VOID*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + ui32BaseOffset);
++ }
++ pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr + IMG_CAST_TO_DEVVADDR_UINT(ui32BaseOffset);
++
++ if(uFlags & PVRSRV_MEM_ZERO)
++ {
++ if(!ZeroBuf(pBuf, pMapping, uSize, uFlags))
++ {
++ return IMG_FALSE;
++ }
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "DevVaddr.uiAddr=%08X", DevVAddr.uiAddr));
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "WrapMemory: pMapping=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++ pMapping, pMapping->DevVAddr.uiAddr,
++ pMapping->CpuVAddr, pMapping->CpuPAddr.uiAddr, pMapping->uSize));
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "WrapMemory: pBuf=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++ pBuf, pBuf->DevVAddr.uiAddr,
++ pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr, uSize));
++
++ pBuf->pMapping = pMapping;
++ return IMG_TRUE;
++
++fail_cleanup:
++ if(ui32BaseOffset && pBuf->hOSMemHandle)
++ {
++ OSReleaseSubMemHandle(pBuf->hOSMemHandle, uFlags);
++ }
++
++ if(pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle))
++ {
++ switch(pMapping->eCpuMemoryOrigin)
++ {
++ case hm_wrapped:
++ OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_virtaddr:
++ OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_scatter:
++ OSUnReserveDiscontigPhys(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_scatter_virtaddr:
++ OSUnRegisterDiscontigMem(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
++ break;
++ default:
++ break;
++ }
++
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
++
++
++ return IMG_FALSE;
++}
++
++
++static IMG_BOOL
++ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags)
++{
++ IMG_VOID *pvCpuVAddr;
++
++ if(pBuf->CpuVAddr)
++ {
++ OSMemSet(pBuf->CpuVAddr, 0, ui32Bytes);
++ }
++ else if(pMapping->eCpuMemoryOrigin == hm_contiguous
++ || pMapping->eCpuMemoryOrigin == hm_wrapped)
++ {
++ pvCpuVAddr = OSMapPhysToLin(pBuf->CpuPAddr,
++ ui32Bytes,
++ PVRSRV_HAP_KERNEL_ONLY
++ | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++ IMG_NULL);
++ if(!pvCpuVAddr)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin for contiguous buffer failed"));
++ return IMG_FALSE;
++ }
++ OSMemSet(pvCpuVAddr, 0, ui32Bytes);
++ OSUnMapPhysToLin(pvCpuVAddr,
++ ui32Bytes,
++ PVRSRV_HAP_KERNEL_ONLY
++ | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++ IMG_NULL);
++ }
++ else
++ {
++ IMG_SIZE_T ui32BytesRemaining = ui32Bytes;
++ IMG_SIZE_T ui32CurrentOffset = 0;
++ IMG_CPU_PHYADDR CpuPAddr;
++
++
++ PVR_ASSERT(pBuf->hOSMemHandle);
++
++ while(ui32BytesRemaining > 0)
++ {
++ IMG_SIZE_T ui32BlockBytes = MIN(ui32BytesRemaining, HOST_PAGESIZE());
++ CpuPAddr = OSMemHandleToCpuPAddr(pBuf->hOSMemHandle, ui32CurrentOffset);
++
++ if(CpuPAddr.uiAddr & (HOST_PAGESIZE() -1))
++ {
++ ui32BlockBytes =
++ MIN(ui32BytesRemaining, HOST_PAGEALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr);
++ }
++
++ pvCpuVAddr = OSMapPhysToLin(CpuPAddr,
++ ui32BlockBytes,
++ PVRSRV_HAP_KERNEL_ONLY
++ | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++ IMG_NULL);
++ if(!pvCpuVAddr)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin while zeroing non-contiguous memory FAILED"));
++ return IMG_FALSE;
++ }
++ OSMemSet(pvCpuVAddr, 0, ui32BlockBytes);
++ OSUnMapPhysToLin(pvCpuVAddr,
++ ui32BlockBytes,
++ PVRSRV_HAP_KERNEL_ONLY
++ | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++ IMG_NULL);
++
++ ui32BytesRemaining -= ui32BlockBytes;
++ ui32CurrentOffset += ui32BlockBytes;
++ }
++ }
++
++ return IMG_TRUE;
++}
++
++static IMG_VOID
++FreeBuf (BM_BUF *pBuf, IMG_UINT32 ui32Flags)
++{
++ BM_MAPPING *pMapping;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "FreeBuf: pBuf=%08X: DevVAddr=%08X CpuVAddr=%08X CpuPAddr=%08X",
++ pBuf, pBuf->DevVAddr.uiAddr, pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr));
++
++
++ pMapping = pBuf->pMapping;
++
++ if(ui32Flags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
++ {
++
++ if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
++ {
++
++ PVR_DPF ((PVR_DBG_ERROR, "FreeBuf: combination of DevVAddr management and RAM backing mode unsupported"));
++ }
++ else
++ {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
++ pBuf->pMapping = IMG_NULL;
++ }
++ }
++ else
++ {
++
++ if(pBuf->hOSMemHandle != pMapping->hOSMemHandle)
++ {
++ OSReleaseSubMemHandle(pBuf->hOSMemHandle, ui32Flags);
++ }
++ if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
++ {
++
++
++
++ RA_Free (pBuf->pMapping->pArena, pBuf->DevVAddr.uiAddr, IMG_FALSE);
++ }
++ else
++ {
++ switch (pMapping->eCpuMemoryOrigin)
++ {
++ case hm_wrapped:
++ OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_virtaddr:
++ OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_scatter:
++ OSUnReserveDiscontigPhys(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
++ break;
++ case hm_wrapped_scatter_virtaddr:
++ OSUnRegisterDiscontigMem(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
++ break;
++ default:
++ break;
++ }
++
++ DevMemoryFree (pMapping);
++
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
++ pBuf->pMapping = IMG_NULL;
++ }
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_BUF), pBuf, IMG_NULL);
++
++}
++
++PVRSRV_ERROR BM_DestroyContext_AnyCb(BM_HEAP *psBMHeap)
++{
++ if(psBMHeap->ui32Attribs
++ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++ |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
++ {
++ if (psBMHeap->pImportArena)
++ {
++ IMG_BOOL bTestDelete = RA_TestDelete(psBMHeap->pImportArena);
++ if (!bTestDelete)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext_AnyCb: RA_TestDelete failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++ }
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++BM_DestroyContext(IMG_HANDLE hBMContext,
++ IMG_BOOL *pbDestroyed)
++{
++ PVRSRV_ERROR eError;
++ BM_CONTEXT *pBMContext = (BM_CONTEXT*)hBMContext;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "BM_DestroyContext"));
++
++ if (pbDestroyed != IMG_NULL)
++ {
++ *pbDestroyed = IMG_FALSE;
++ }
++
++
++
++ if (pBMContext == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: Invalid handle"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ pBMContext->ui32RefCount--;
++
++ if (pBMContext->ui32RefCount > 0)
++ {
++
++ return PVRSRV_OK;
++ }
++
++
++
++
++ eError = List_BM_HEAP_PVRSRV_ERROR_Any(pBMContext->psBMHeap, BM_DestroyContext_AnyCb);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: List_BM_HEAP_PVRSRV_ERROR_Any failed"));
++#if 0
++
++
++
++
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: Cleaning up with ResManFreeSpecial"));
++ if(ResManFreeSpecial() != PVRSRV_OK)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: ResManFreeSpecial failed %d",eError));
++ }
++
++#endif
++ return eError;
++ }
++ else
++ {
++
++ eError = ResManFreeResByPtr(pBMContext->hResItem);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: ResManFreeResByPtr failed %d",eError));
++ return eError;
++ }
++
++
++ if (pbDestroyed != IMG_NULL)
++ {
++ *pbDestroyed = IMG_TRUE;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR BM_DestroyContextCallBack_AnyVaCb(BM_HEAP *psBMHeap, va_list va)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ psDeviceNode = va_arg(va, PVRSRV_DEVICE_NODE*);
++
++
++ if(psBMHeap->ui32Attribs
++ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++ |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
++ {
++ if (psBMHeap->pImportArena)
++ {
++ RA_Delete (psBMHeap->pImportArena);
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_DestroyContext: backing store type unsupported"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ psDeviceNode->pfnMMUDelete(psBMHeap->pMMUHeap);
++
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR BM_DestroyContextCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ BM_CONTEXT *pBMContext = pvParam;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++
++
++ psDeviceNode = pBMContext->psDeviceNode;
++
++
++
++ if(List_BM_HEAP_PVRSRV_ERROR_Any_va(pBMContext->psBMHeap,
++ BM_DestroyContextCallBack_AnyVaCb,
++ psDeviceNode) != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ if (pBMContext->psMMUContext)
++ {
++ psDeviceNode->pfnMMUFinalise(pBMContext->psMMUContext);
++ }
++
++
++
++ if (pBMContext->pBufferHash)
++ {
++ HASH_Delete(pBMContext->pBufferHash);
++ }
++
++ if (pBMContext == psDeviceNode->sDevMemoryInfo.pBMKernelContext)
++ {
++
++ psDeviceNode->sDevMemoryInfo.pBMKernelContext = IMG_NULL;
++ }
++ else
++ {
++
++ List_BM_CONTEXT_Remove(pBMContext);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_CONTEXT), pBMContext, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++
++IMG_HANDLE BM_CreateContext_IncRefCount_AnyVaCb(BM_CONTEXT *pBMContext, va_list va)
++{
++ PRESMAN_CONTEXT hResManContext;
++ hResManContext = va_arg(va, PRESMAN_CONTEXT);
++ if(ResManFindResourceByPtr(hResManContext, pBMContext->hResItem) == PVRSRV_OK)
++ {
++
++ pBMContext->ui32RefCount++;
++ return pBMContext;
++ }
++ return IMG_NULL;
++}
++
++IMG_VOID BM_CreateContext_InsertHeap_ForEachVaCb(BM_HEAP *psBMHeap, va_list va)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ BM_CONTEXT *pBMContext;
++ psDeviceNode = va_arg(va, PVRSRV_DEVICE_NODE*);
++ pBMContext = va_arg(va, BM_CONTEXT*);
++ switch(psBMHeap->sDevArena.DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_SHARED:
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++
++ psDeviceNode->pfnMMUInsertHeap(pBMContext->psMMUContext, psBMHeap->pMMUHeap);
++ break;
++ }
++ }
++}
++
++IMG_HANDLE
++BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_DEV_PHYADDR *psPDDevPAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_BOOL *pbCreated)
++{
++ BM_CONTEXT *pBMContext;
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ IMG_BOOL bKernelContext;
++ PRESMAN_CONTEXT hResManContext;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateContext"));
++
++ if (psPerProc == IMG_NULL)
++ {
++ bKernelContext = IMG_TRUE;
++ hResManContext = psDeviceNode->hResManContext;
++ }
++ else
++ {
++ bKernelContext = IMG_FALSE;
++ hResManContext = psPerProc->hResManContext;
++ }
++
++ if (pbCreated != IMG_NULL)
++ {
++ *pbCreated = IMG_FALSE;
++ }
++
++
++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++
++ if (bKernelContext == IMG_FALSE)
++ {
++ IMG_HANDLE res = (IMG_HANDLE) List_BM_CONTEXT_Any_va(psDevMemoryInfo->pBMContext,
++ BM_CreateContext_IncRefCount_AnyVaCb,
++ hResManContext);
++ if (res)
++ {
++ return res;
++ }
++ }
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (struct _BM_CONTEXT_),
++ (IMG_PVOID *)&pBMContext, IMG_NULL,
++ "Buffer Manager Context") != PVRSRV_OK)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: Alloc failed"));
++ return IMG_NULL;
++ }
++ OSMemSet(pBMContext, 0, sizeof (BM_CONTEXT));
++
++
++ pBMContext->psDeviceNode = psDeviceNode;
++
++
++
++ pBMContext->pBufferHash = HASH_Create(32);
++ if (pBMContext->pBufferHash==IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: HASH_Create failed"));
++ goto cleanup;
++ }
++
++ if(psDeviceNode->pfnMMUInitialise(psDeviceNode,
++ &pBMContext->psMMUContext,
++ psPDDevPAddr) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateContext: MMUInitialise failed"));
++ goto cleanup;
++ }
++
++ if(bKernelContext)
++ {
++
++ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext == IMG_NULL);
++ psDevMemoryInfo->pBMKernelContext = pBMContext;
++ }
++ else
++ {
++
++
++
++
++
++ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext);
++
++ if (psDevMemoryInfo->pBMKernelContext == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateContext: psDevMemoryInfo->pBMKernelContext invalid"));
++ goto cleanup;
++ }
++
++ PVR_ASSERT(psDevMemoryInfo->pBMKernelContext->psBMHeap);
++
++
++
++
++
++ pBMContext->psBMSharedHeap = psDevMemoryInfo->pBMKernelContext->psBMHeap;
++
++
++
++
++ List_BM_HEAP_ForEach_va(pBMContext->psBMSharedHeap,
++ BM_CreateContext_InsertHeap_ForEachVaCb,
++ psDeviceNode,
++ pBMContext);
++
++
++ List_BM_CONTEXT_Insert(&psDevMemoryInfo->pBMContext, pBMContext);
++ }
++
++
++ pBMContext->ui32RefCount++;
++
++
++ pBMContext->hResItem = ResManRegisterRes(hResManContext,
++ RESMAN_TYPE_DEVICEMEM_CONTEXT,
++ pBMContext,
++ 0,
++ BM_DestroyContextCallBack);
++ if (pBMContext->hResItem == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: ResManRegisterRes failed"));
++ goto cleanup;
++ }
++
++ if (pbCreated != IMG_NULL)
++ {
++ *pbCreated = IMG_TRUE;
++ }
++ return (IMG_HANDLE)pBMContext;
++
++cleanup:
++ (IMG_VOID)BM_DestroyContextCallBack(pBMContext, 0);
++
++ return IMG_NULL;
++}
++
++
++IMG_VOID *BM_CreateHeap_AnyVaCb(BM_HEAP *psBMHeap, va_list va)
++{
++ DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo;
++ psDevMemHeapInfo = va_arg(va, DEVICE_MEMORY_HEAP_INFO*);
++ if (psBMHeap->sDevArena.ui32HeapID == psDevMemHeapInfo->ui32HeapID)
++ {
++
++ return psBMHeap;
++ }
++ else
++ {
++ return IMG_NULL;
++ }
++}
++
++IMG_HANDLE
++BM_CreateHeap (IMG_HANDLE hBMContext,
++ DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo)
++{
++ BM_CONTEXT *pBMContext = (BM_CONTEXT*)hBMContext;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ BM_HEAP *psBMHeap;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateHeap"));
++
++ if(!pBMContext)
++ {
++ return IMG_NULL;
++ }
++
++ psDeviceNode = pBMContext->psDeviceNode;
++
++
++
++
++
++
++ if(pBMContext->ui32RefCount > 0)
++ {
++ psBMHeap = (BM_HEAP*)List_BM_HEAP_Any_va(pBMContext->psBMHeap,
++ BM_CreateHeap_AnyVaCb,
++ psDevMemHeapInfo);
++
++ if (psBMHeap)
++ {
++ return psBMHeap;
++ }
++ }
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (BM_HEAP),
++ (IMG_PVOID *)&psBMHeap, IMG_NULL,
++ "Buffer Manager Heap") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: Alloc failed"));
++ return IMG_NULL;
++ }
++
++ OSMemSet (psBMHeap, 0, sizeof (BM_HEAP));
++
++ psBMHeap->sDevArena.ui32HeapID = psDevMemHeapInfo->ui32HeapID;
++ psBMHeap->sDevArena.pszName = psDevMemHeapInfo->pszName;
++ psBMHeap->sDevArena.BaseDevVAddr = psDevMemHeapInfo->sDevVAddrBase;
++ psBMHeap->sDevArena.ui32Size = psDevMemHeapInfo->ui32HeapSize;
++ psBMHeap->sDevArena.DevMemHeapType = psDevMemHeapInfo->DevMemHeapType;
++ psBMHeap->sDevArena.ui32DataPageSize = psDevMemHeapInfo->ui32DataPageSize;
++ psBMHeap->sDevArena.psDeviceMemoryHeapInfo = psDevMemHeapInfo;
++ psBMHeap->ui32Attribs = psDevMemHeapInfo->ui32Attribs;
++
++
++ psBMHeap->pBMContext = pBMContext;
++
++ psBMHeap->pMMUHeap = psDeviceNode->pfnMMUCreate (pBMContext->psMMUContext,
++ &psBMHeap->sDevArena,
++ &psBMHeap->pVMArena);
++ if (!psBMHeap->pMMUHeap)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: MMUCreate failed"));
++ goto ErrorExit;
++ }
++
++
++ psBMHeap->pImportArena = RA_Create (psDevMemHeapInfo->pszBSName,
++ 0, 0, IMG_NULL,
++ psBMHeap->sDevArena.ui32DataPageSize,
++ BM_ImportMemory,
++ BM_FreeMemory,
++ IMG_NULL,
++ psBMHeap);
++ if(psBMHeap->pImportArena == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: RA_Create failed"));
++ goto ErrorExit;
++ }
++
++ if(psBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
++ {
++
++
++
++
++ psBMHeap->pLocalDevMemArena = psDevMemHeapInfo->psLocalDevMemArena;
++ if(psBMHeap->pLocalDevMemArena == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: LocalDevMemArena null"));
++ goto ErrorExit;
++ }
++ }
++
++
++ List_BM_HEAP_Insert(&pBMContext->psBMHeap, psBMHeap);
++
++ return (IMG_HANDLE)psBMHeap;
++
++
++ErrorExit:
++
++
++ if (psBMHeap->pMMUHeap != IMG_NULL)
++ {
++ psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap);
++ psDeviceNode->pfnMMUFinalise (pBMContext->psMMUContext);
++ }
++
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL);
++
++
++ return IMG_NULL;
++}
++
++IMG_VOID
++BM_DestroyHeap (IMG_HANDLE hDevMemHeap)
++{
++ BM_HEAP* psBMHeap = (BM_HEAP*)hDevMemHeap;
++ PVRSRV_DEVICE_NODE *psDeviceNode = psBMHeap->pBMContext->psDeviceNode;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_DestroyHeap"));
++
++ if(psBMHeap)
++ {
++
++ if(psBMHeap->ui32Attribs
++ & (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++ |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
++ {
++ if (psBMHeap->pImportArena)
++ {
++ RA_Delete (psBMHeap->pImportArena);
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_DestroyHeap: backing store type unsupported"));
++ return;
++ }
++
++
++ psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap);
++
++
++ List_BM_HEAP_Remove(psBMHeap);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL);
++
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyHeap: invalid heap handle"));
++ }
++}
++
++
++IMG_BOOL
++BM_Reinitialise (PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_Reinitialise"));
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++
++
++ return IMG_TRUE;
++}
++
++IMG_BOOL
++BM_Alloc ( IMG_HANDLE hDevMemHeap,
++ IMG_DEV_VIRTADDR *psDevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 *pui32Flags,
++ IMG_UINT32 uDevVAddrAlignment,
++ BM_HANDLE *phBuf)
++{
++ BM_BUF *pBuf;
++ BM_CONTEXT *pBMContext;
++ BM_HEAP *psBMHeap;
++ SYS_DATA *psSysData;
++ IMG_UINT32 uFlags;
++
++ if (pui32Flags == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: invalid parameter"));
++ PVR_DBG_BREAK;
++ return IMG_FALSE;
++ }
++
++ uFlags = *pui32Flags;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_Alloc (uSize=0x%x, uFlags=0x%x, uDevVAddrAlignment=0x%x)",
++ uSize, uFlags, uDevVAddrAlignment));
++
++ SysAcquireData(&psSysData);
++
++ psBMHeap = (BM_HEAP*)hDevMemHeap;
++ pBMContext = psBMHeap->pBMContext;
++
++ if(uDevVAddrAlignment == 0)
++ {
++ uDevVAddrAlignment = 1;
++ }
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (BM_BUF),
++ (IMG_PVOID *)&pBuf, IMG_NULL,
++ "Buffer Manager buffer") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: BM_Buf alloc FAILED"));
++ return IMG_FALSE;
++ }
++ OSMemSet(pBuf, 0, sizeof (BM_BUF));
++
++
++ if (AllocMemory(pBMContext,
++ psBMHeap,
++ psDevVAddr,
++ uSize,
++ uFlags,
++ uDevVAddrAlignment,
++ pBuf) != IMG_TRUE)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL);
++
++ PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: AllocMemory FAILED"));
++ return IMG_FALSE;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_Alloc (uSize=0x%x, uFlags=0x%x)=%08X",
++ uSize, uFlags, pBuf));
++
++
++ pBuf->ui32RefCount = 1;
++ *phBuf = (BM_HANDLE)pBuf;
++ *pui32Flags = uFlags | psBMHeap->ui32Attribs;
++
++
++ if(uFlags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ *pui32Flags &= ~PVRSRV_HAP_CACHETYPE_MASK;
++ *pui32Flags |= (uFlags & PVRSRV_HAP_CACHETYPE_MASK);
++ }
++
++ return IMG_TRUE;
++}
++
++
++
++#if defined(PVR_LMA)
++static IMG_BOOL
++ValidSysPAddrArrayForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR *psSysPAddr, IMG_UINT32 ui32PageCount, IMG_SIZE_T ui32PageSize)
++{
++ IMG_UINT32 i;
++
++ for (i = 0; i < ui32PageCount; i++)
++ {
++ IMG_SYS_PHYADDR sStartSysPAddr = psSysPAddr[i];
++ IMG_SYS_PHYADDR sEndSysPAddr;
++
++ if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sStartSysPAddr))
++ {
++ return IMG_FALSE;
++ }
++
++ sEndSysPAddr.uiAddr = sStartSysPAddr.uiAddr + ui32PageSize;
++
++ if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sEndSysPAddr))
++ {
++ return IMG_FALSE;
++ }
++ }
++
++ return IMG_TRUE;
++}
++
++static IMG_BOOL
++ValidSysPAddrRangeForDev(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_SYS_PHYADDR sStartSysPAddr, IMG_SIZE_T ui32Range)
++{
++ IMG_SYS_PHYADDR sEndSysPAddr;
++
++ if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sStartSysPAddr))
++ {
++ return IMG_FALSE;
++ }
++
++ sEndSysPAddr.uiAddr = sStartSysPAddr.uiAddr + ui32Range;
++
++ if (!SysVerifySysPAddrToDevPAddr(psDeviceNode->sDevId.eDeviceType, sEndSysPAddr))
++ {
++ return IMG_FALSE;
++ }
++
++ return IMG_TRUE;
++}
++
++#define WRAP_MAPPING_SIZE(ui32ByteSize, ui32PageOffset) HOST_PAGEALIGN((ui32ByteSize) + (ui32PageOffset))
++
++#define WRAP_PAGE_COUNT(ui32ByteSize, ui32PageOffset, ui32HostPageSize) (WRAP_MAPPING_SIZE(ui32ByteSize, ui32PageOffset) / (ui32HostPageSize))
++
++#endif
++
++
++IMG_BOOL
++BM_Wrap ( IMG_HANDLE hDevMemHeap,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Offset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psSysAddr,
++ IMG_VOID *pvCPUVAddr,
++ IMG_UINT32 *pui32Flags,
++ BM_HANDLE *phBuf)
++{
++ BM_BUF *pBuf;
++ BM_CONTEXT *psBMContext;
++ BM_HEAP *psBMHeap;
++ SYS_DATA *psSysData;
++ IMG_SYS_PHYADDR sHashAddress;
++ IMG_UINT32 uFlags;
++
++ psBMHeap = (BM_HEAP*)hDevMemHeap;
++ psBMContext = psBMHeap->pBMContext;
++
++ uFlags = psBMHeap->ui32Attribs & (PVRSRV_HAP_CACHETYPE_MASK | PVRSRV_HAP_MAPTYPE_MASK);
++
++ if ((pui32Flags != IMG_NULL) && ((*pui32Flags & PVRSRV_HAP_CACHETYPE_MASK) != 0))
++ {
++ uFlags &= ~PVRSRV_HAP_CACHETYPE_MASK;
++ uFlags |= *pui32Flags & PVRSRV_HAP_CACHETYPE_MASK;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_Wrap (uSize=0x%x, uOffset=0x%x, bPhysContig=0x%x, pvCPUVAddr=0x%x, uFlags=0x%x)",
++ ui32Size, ui32Offset, bPhysContig, pvCPUVAddr, uFlags));
++
++ SysAcquireData(&psSysData);
++
++#if defined(PVR_LMA)
++ if (bPhysContig)
++ {
++ if (!ValidSysPAddrRangeForDev(psBMContext->psDeviceNode, *psSysAddr, WRAP_MAPPING_SIZE(ui32Size, ui32Offset)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: System address range invalid for device"));
++ return IMG_FALSE;
++ }
++ }
++ else
++ {
++ IMG_SIZE_T ui32HostPageSize = HOST_PAGESIZE();
++
++ if (!ValidSysPAddrArrayForDev(psBMContext->psDeviceNode, psSysAddr, WRAP_PAGE_COUNT(ui32Size, ui32Offset, ui32HostPageSize), ui32HostPageSize))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: Array of system addresses invalid for device"));
++ return IMG_FALSE;
++ }
++ }
++#endif
++
++ sHashAddress = psSysAddr[0];
++
++
++ sHashAddress.uiAddr += ui32Offset;
++
++
++ pBuf = (BM_BUF *)HASH_Retrieve(psBMContext->pBufferHash, (IMG_UINTPTR_T) sHashAddress.uiAddr);
++
++ if(pBuf)
++ {
++ IMG_SIZE_T ui32MappingSize = HOST_PAGEALIGN (ui32Size + ui32Offset);
++
++
++ if(pBuf->pMapping->uSize == ui32MappingSize && (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped ||
++ pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr))
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "BM_Wrap (Matched previous Wrap! uSize=0x%x, uOffset=0x%x, SysAddr=%08X)",
++ ui32Size, ui32Offset, sHashAddress.uiAddr));
++
++ pBuf->ui32RefCount++;
++ *phBuf = (BM_HANDLE)pBuf;
++ if(pui32Flags)
++ *pui32Flags = uFlags;
++
++ return IMG_TRUE;
++ }
++ }
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (BM_BUF),
++ (IMG_PVOID *)&pBuf, IMG_NULL,
++ "Buffer Manager buffer") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: BM_Buf alloc FAILED"));
++ return IMG_FALSE;
++ }
++ OSMemSet(pBuf, 0, sizeof (BM_BUF));
++
++
++ if (WrapMemory (psBMHeap, ui32Size, ui32Offset, bPhysContig, psSysAddr, pvCPUVAddr, uFlags, pBuf) != IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: WrapMemory FAILED"));
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL);
++
++ return IMG_FALSE;
++ }
++
++
++ if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
++ {
++
++ PVR_ASSERT(SysSysPAddrToCpuPAddr(sHashAddress).uiAddr == pBuf->CpuPAddr.uiAddr);
++
++ if (!HASH_Insert (psBMContext->pBufferHash, (IMG_UINTPTR_T) sHashAddress.uiAddr, (IMG_UINTPTR_T)pBuf))
++ {
++ FreeBuf (pBuf, uFlags);
++ PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: HASH_Insert FAILED"));
++ return IMG_FALSE;
++ }
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_Wrap (uSize=0x%x, uFlags=0x%x)=%08X(devVAddr=%08X)",
++ ui32Size, uFlags, pBuf, pBuf->DevVAddr.uiAddr));
++
++
++ pBuf->ui32RefCount = 1;
++ *phBuf = (BM_HANDLE)pBuf;
++ if(pui32Flags)
++ {
++
++ *pui32Flags = (uFlags & ~PVRSRV_HAP_MAPTYPE_MASK) | PVRSRV_HAP_MULTI_PROCESS;
++ }
++
++ return IMG_TRUE;
++}
++
++
++IMG_VOID
++BM_Free (BM_HANDLE hBuf,
++ IMG_UINT32 ui32Flags)
++{
++ BM_BUF *pBuf = (BM_BUF *)hBuf;
++ SYS_DATA *psSysData;
++ IMG_SYS_PHYADDR sHashAddr;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "BM_Free (h=%08X)", hBuf));
++ PVR_ASSERT (pBuf!=IMG_NULL);
++
++ if (pBuf == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_Free: invalid parameter"));
++ return;
++ }
++
++ SysAcquireData(&psSysData);
++
++ pBuf->ui32RefCount--;
++
++ if(pBuf->ui32RefCount == 0)
++ {
++ if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
++ {
++ sHashAddr = SysCpuPAddrToSysPAddr(pBuf->CpuPAddr);
++
++ HASH_Remove (pBuf->pMapping->pBMHeap->pBMContext->pBufferHash, (IMG_UINTPTR_T)sHashAddr.uiAddr);
++ }
++ FreeBuf (pBuf, ui32Flags);
++ }
++}
++
++
++IMG_CPU_VIRTADDR
++BM_HandleToCpuVaddr (BM_HANDLE hBuf)
++{
++ BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++ PVR_ASSERT (pBuf != IMG_NULL);
++ if (pBuf == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_HandleToCpuVaddr: invalid parameter"));
++ return IMG_NULL;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_HandleToCpuVaddr(h=%08X)=%08X",
++ hBuf, pBuf->CpuVAddr));
++ return pBuf->CpuVAddr;
++}
++
++
++IMG_DEV_VIRTADDR
++BM_HandleToDevVaddr (BM_HANDLE hBuf)
++{
++ BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++ PVR_ASSERT (pBuf != IMG_NULL);
++ if (pBuf == IMG_NULL)
++ {
++ IMG_DEV_VIRTADDR DevVAddr = {0};
++ PVR_DPF((PVR_DBG_ERROR, "BM_HandleToDevVaddr: invalid parameter"));
++ return DevVAddr;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToDevVaddr(h=%08X)=%08X", hBuf, pBuf->DevVAddr));
++ return pBuf->DevVAddr;
++}
++
++
++IMG_SYS_PHYADDR
++BM_HandleToSysPaddr (BM_HANDLE hBuf)
++{
++ BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++ PVR_ASSERT (pBuf != IMG_NULL);
++
++ if (pBuf == IMG_NULL)
++ {
++ IMG_SYS_PHYADDR PhysAddr = {0};
++ PVR_DPF((PVR_DBG_ERROR, "BM_HandleToSysPaddr: invalid parameter"));
++ return PhysAddr;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToSysPaddr(h=%08X)=%08X", hBuf, pBuf->CpuPAddr.uiAddr));
++ return SysCpuPAddrToSysPAddr (pBuf->CpuPAddr);
++}
++
++IMG_HANDLE
++BM_HandleToOSMemHandle(BM_HANDLE hBuf)
++{
++ BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++ PVR_ASSERT (pBuf != IMG_NULL);
++
++ if (pBuf == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_HandleToOSMemHandle: invalid parameter"));
++ return IMG_NULL;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_HandleToOSMemHandle(h=%08X)=%08X",
++ hBuf, pBuf->hOSMemHandle));
++ return pBuf->hOSMemHandle;
++}
++
++IMG_BOOL
++BM_ContiguousStatistics (IMG_UINT32 uFlags,
++ IMG_UINT32 *pTotalBytes,
++ IMG_UINT32 *pAvailableBytes)
++{
++ if (pAvailableBytes || pTotalBytes || uFlags);
++ return IMG_FALSE;
++}
++
++
++static IMG_BOOL
++DevMemoryAlloc (BM_CONTEXT *pBMContext,
++ BM_MAPPING *pMapping,
++ IMG_SIZE_T *pActualSize,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 dev_vaddr_alignment,
++ IMG_DEV_VIRTADDR *pDevVAddr)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++#ifdef PDUMP
++ IMG_UINT32 ui32PDumpSize = pMapping->uSize;
++#endif
++
++ psDeviceNode = pBMContext->psDeviceNode;
++
++ if(uFlags & PVRSRV_MEM_INTERLEAVED)
++ {
++
++ pMapping->uSize *= 2;
++ }
++
++#ifdef PDUMP
++ if(uFlags & PVRSRV_MEM_DUMMY)
++ {
++
++ ui32PDumpSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize;
++ }
++#endif
++
++
++ if (!psDeviceNode->pfnMMUAlloc (pMapping->pBMHeap->pMMUHeap,
++ pMapping->uSize,
++ pActualSize,
++ 0,
++ dev_vaddr_alignment,
++ &(pMapping->DevVAddr)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "DevMemoryAlloc ERROR MMU_Alloc"));
++ return IMG_FALSE;
++ }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ EnableHostAccess(pBMContext->psMMUContext);
++#endif
++
++
++
++ PDUMPMALLOCPAGES(psDeviceNode->sDevId.eDeviceType, pMapping->DevVAddr.uiAddr, pMapping->CpuVAddr, pMapping->hOSMemHandle, ui32PDumpSize, pMapping->pBMHeap->sDevArena.ui32DataPageSize, (IMG_HANDLE)pMapping);
++
++ switch (pMapping->eCpuMemoryOrigin)
++ {
++ case hm_wrapped:
++ case hm_wrapped_virtaddr:
++ case hm_contiguous:
++ {
++ psDeviceNode->pfnMMUMapPages ( pMapping->pBMHeap->pMMUHeap,
++ pMapping->DevVAddr,
++ SysCpuPAddrToSysPAddr (pMapping->CpuPAddr),
++ pMapping->uSize,
++ uFlags,
++ (IMG_HANDLE)pMapping);
++
++ *pDevVAddr = pMapping->DevVAddr;
++ break;
++ }
++ case hm_env:
++ {
++ psDeviceNode->pfnMMUMapShadow ( pMapping->pBMHeap->pMMUHeap,
++ pMapping->DevVAddr,
++ pMapping->uSize,
++ pMapping->CpuVAddr,
++ pMapping->hOSMemHandle,
++ pDevVAddr,
++ uFlags,
++ (IMG_HANDLE)pMapping);
++ break;
++ }
++ case hm_wrapped_scatter:
++ case hm_wrapped_scatter_virtaddr:
++ {
++ psDeviceNode->pfnMMUMapScatter (pMapping->pBMHeap->pMMUHeap,
++ pMapping->DevVAddr,
++ pMapping->psSysAddr,
++ pMapping->uSize,
++ uFlags,
++ (IMG_HANDLE)pMapping);
++
++ *pDevVAddr = pMapping->DevVAddr;
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "Illegal value %d for pMapping->eCpuMemoryOrigin",
++ pMapping->eCpuMemoryOrigin));
++ return IMG_FALSE;
++ }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ DisableHostAccess(pBMContext->psMMUContext);
++#endif
++
++ return IMG_TRUE;
++}
++
++static IMG_VOID
++DevMemoryFree (BM_MAPPING *pMapping)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++#ifdef PDUMP
++ IMG_UINT32 ui32PSize;
++#endif
++
++#ifdef PDUMP
++
++ if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++ {
++
++ ui32PSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize;
++ }
++ else
++ {
++ ui32PSize = pMapping->uSize;
++ }
++
++ PDUMPFREEPAGES(pMapping->pBMHeap,
++ pMapping->DevVAddr,
++ ui32PSize,
++ pMapping->pBMHeap->sDevArena.ui32DataPageSize,
++ (IMG_HANDLE)pMapping,
++ (pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) ? IMG_TRUE : IMG_FALSE);
++#endif
++
++ psDeviceNode = pMapping->pBMHeap->pBMContext->psDeviceNode;
++
++ psDeviceNode->pfnMMUFree (pMapping->pBMHeap->pMMUHeap, pMapping->DevVAddr, IMG_CAST_TO_DEVVADDR_UINT(pMapping->uSize));
++}
++
++static IMG_BOOL
++BM_ImportMemory (IMG_VOID *pH,
++ IMG_SIZE_T uRequestSize,
++ IMG_SIZE_T *pActualSize,
++ BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags,
++ IMG_UINTPTR_T *pBase)
++{
++ BM_MAPPING *pMapping;
++ BM_HEAP *pBMHeap = pH;
++ BM_CONTEXT *pBMContext = pBMHeap->pBMContext;
++ IMG_BOOL bResult;
++ IMG_SIZE_T uSize;
++ IMG_SIZE_T uPSize;
++ IMG_UINT32 uDevVAddrAlignment = 0;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_ImportMemory (pBMContext=%08X, uRequestSize=0x%x, uFlags=0x%x, uAlign=0x%x)",
++ pBMContext, uRequestSize, uFlags, uDevVAddrAlignment));
++
++ PVR_ASSERT (ppsMapping != IMG_NULL);
++ PVR_ASSERT (pBMContext != IMG_NULL);
++
++ if (ppsMapping == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: invalid parameter"));
++ goto fail_exit;
++ }
++
++ uSize = HOST_PAGEALIGN (uRequestSize);
++ PVR_ASSERT (uSize >= uRequestSize);
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (BM_MAPPING),
++ (IMG_PVOID *)&pMapping, IMG_NULL,
++ "Buffer Manager Mapping") != PVRSRV_OK)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "BM_ImportMemory: failed BM_MAPPING alloc"));
++ goto fail_exit;
++ }
++
++ pMapping->hOSMemHandle = 0;
++ pMapping->CpuVAddr = 0;
++ pMapping->DevVAddr.uiAddr = 0;
++ pMapping->CpuPAddr.uiAddr = 0;
++ pMapping->uSize = uSize;
++ pMapping->pBMHeap = pBMHeap;
++ pMapping->ui32Flags = uFlags;
++
++
++ if (pActualSize)
++ {
++ *pActualSize = uSize;
++ }
++
++
++ if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++ {
++ uPSize = pBMHeap->sDevArena.ui32DataPageSize;
++ }
++ else
++ {
++ uPSize = pMapping->uSize;
++ }
++
++
++
++ if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
++ {
++ IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs;
++
++
++ if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
++ ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
++ }
++
++
++ if (OSAllocPages(ui32Attribs,
++ uPSize,
++ pBMHeap->sDevArena.ui32DataPageSize,
++ (IMG_VOID **)&pMapping->CpuVAddr,
++ &pMapping->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_ImportMemory: OSAllocPages(0x%x) failed",
++ uPSize));
++ goto fail_mapping_alloc;
++ }
++
++
++ pMapping->eCpuMemoryOrigin = hm_env;
++ }
++ else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++ IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs;
++
++
++ if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
++ ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
++ }
++
++
++ PVR_ASSERT(pBMHeap->pLocalDevMemArena != IMG_NULL);
++
++ if (!RA_Alloc (pBMHeap->pLocalDevMemArena,
++ uPSize,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ pBMHeap->sDevArena.ui32DataPageSize,
++ 0,
++ (IMG_UINTPTR_T *)&sSysPAddr.uiAddr))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: RA_Alloc(0x%x) FAILED", uPSize));
++ goto fail_mapping_alloc;
++ }
++
++
++ pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ if(OSReservePhys(pMapping->CpuPAddr,
++ uPSize,
++ ui32Attribs,
++ &pMapping->CpuVAddr,
++ &pMapping->hOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: OSReservePhys failed"));
++ goto fail_dev_mem_alloc;
++ }
++
++
++ pMapping->eCpuMemoryOrigin = hm_contiguous;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: Invalid backing store type"));
++ goto fail_mapping_alloc;
++ }
++
++
++ bResult = DevMemoryAlloc (pBMContext,
++ pMapping,
++ IMG_NULL,
++ uFlags,
++ uDevVAddrAlignment,
++ &pMapping->DevVAddr);
++ if (!bResult)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "BM_ImportMemory: DevMemoryAlloc(0x%x) failed",
++ pMapping->uSize));
++ goto fail_dev_mem_alloc;
++ }
++
++
++
++ PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1);
++
++ *pBase = pMapping->DevVAddr.uiAddr;
++ *ppsMapping = pMapping;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE"));
++ return IMG_TRUE;
++
++fail_dev_mem_alloc:
++ if (pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle))
++ {
++
++ if(pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED)
++ {
++ pMapping->uSize /= 2;
++ }
++
++ if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++ {
++ uPSize = pBMHeap->sDevArena.ui32DataPageSize;
++ }
++ else
++ {
++ uPSize = pMapping->uSize;
++ }
++
++ if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
++ {
++ OSFreePages(pBMHeap->ui32Attribs,
++ uPSize,
++ (IMG_VOID *)pMapping->CpuVAddr,
++ pMapping->hOSMemHandle);
++ }
++ else
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++
++ if(pMapping->CpuVAddr)
++ {
++ OSUnReservePhys(pMapping->CpuVAddr,
++ uPSize,
++ pBMHeap->ui32Attribs,
++ pMapping->hOSMemHandle);
++ }
++ sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr);
++ RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++ }
++ }
++fail_mapping_alloc:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
++
++fail_exit:
++ return IMG_FALSE;
++}
++
++
++static IMG_VOID
++BM_FreeMemory (IMG_VOID *h, IMG_UINTPTR_T _base, BM_MAPPING *psMapping)
++{
++ BM_HEAP *pBMHeap = h;
++ IMG_SIZE_T uPSize;
++
++ PVR_UNREFERENCED_PARAMETER (_base);
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)", h, _base, psMapping));
++
++ PVR_ASSERT (psMapping != IMG_NULL);
++
++ if (psMapping == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: invalid parameter"));
++ return;
++ }
++
++ DevMemoryFree (psMapping);
++
++
++ if((psMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) != 0)
++ {
++ psMapping->uSize /= 2;
++ }
++
++ if(psMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++ {
++ uPSize = psMapping->pBMHeap->sDevArena.ui32DataPageSize;
++ }
++ else
++ {
++ uPSize = psMapping->uSize;
++ }
++
++ if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
++ {
++ OSFreePages(pBMHeap->ui32Attribs,
++ uPSize,
++ (IMG_VOID *) psMapping->CpuVAddr,
++ psMapping->hOSMemHandle);
++ }
++ else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++
++ OSUnReservePhys(psMapping->CpuVAddr, uPSize, pBMHeap->ui32Attribs, psMapping->hOSMemHandle);
++
++ sSysPAddr = SysCpuPAddrToSysPAddr(psMapping->CpuPAddr);
++
++ RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: Invalid backing store type"));
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), psMapping, IMG_NULL);
++
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "..BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)",
++ h, _base, psMapping));
++}
++
++IMG_VOID BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_DEV_VIRTADDR sDevVPageAddr,
++ IMG_DEV_PHYADDR *psDevPAddr)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "BM_GetPhysPageAddr"));
++
++ PVR_ASSERT (psMemInfo && psDevPAddr)
++
++
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++ psDeviceNode = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->pBMContext->psDeviceNode;
++
++ *psDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->pMMUHeap,
++ sDevVPageAddr);
++}
++
++
++PVRSRV_ERROR BM_GetHeapInfo(IMG_HANDLE hDevMemHeap, PVRSRV_HEAP_INFO *psHeapInfo)
++{
++ BM_HEAP *psBMHeap = (BM_HEAP *)hDevMemHeap;
++
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetHeapInfo"));
++
++ psHeapInfo->hDevMemHeap = hDevMemHeap;
++ psHeapInfo->sDevVAddrBase = psBMHeap->sDevArena.BaseDevVAddr;
++ psHeapInfo->ui32HeapByteSize = psBMHeap->sDevArena.ui32Size;
++ psHeapInfo->ui32Attribs = psBMHeap->ui32Attribs;
++
++ return PVRSRV_OK;
++}
++
++
++MMU_CONTEXT* BM_GetMMUContext(IMG_HANDLE hDevMemHeap)
++{
++ BM_HEAP *pBMHeap = (BM_HEAP*)hDevMemHeap;
++
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUContext"));
++
++ return pBMHeap->pBMContext->psMMUContext;
++}
++
++MMU_CONTEXT* BM_GetMMUContextFromMemContext(IMG_HANDLE hDevMemContext)
++{
++ BM_CONTEXT *pBMContext = (BM_CONTEXT*)hDevMemContext;
++
++ PVR_DPF ((PVR_DBG_VERBOSE, "BM_GetMMUContextFromMemContext"));
++
++ return pBMContext->psMMUContext;
++}
++
++IMG_HANDLE BM_GetMMUHeap(IMG_HANDLE hDevMemHeap)
++{
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUHeap"));
++
++ return (IMG_HANDLE)((BM_HEAP*)hDevMemHeap)->pMMUHeap;
++}
++
++
++PVRSRV_DEVICE_NODE* BM_GetDeviceNode(IMG_HANDLE hDevMemContext)
++{
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetDeviceNode"));
++
++ return ((BM_CONTEXT*)hDevMemContext)->psDeviceNode;
++}
++
++
++IMG_HANDLE BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMappingHandle"));
++
++ return ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->hOSMemHandle;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/deviceclass.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/deviceclass.c
+new file mode 100644
+index 0000000..3340dd8
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/deviceclass.c
+@@ -0,0 +1,1937 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "kernelbuffer.h"
++#include "pvr_bridge_km.h"
++
++#include "lists.h"
++DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_INSERT(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE);
++
++IMG_VOID* MatchDeviceKM_AnyVaCb(PVRSRV_DEVICE_NODE* psDeviceNode, va_list va);
++
++PVRSRV_ERROR AllocateDeviceID(SYS_DATA *psSysData, IMG_UINT32 *pui32DevID);
++PVRSRV_ERROR FreeDeviceID(SYS_DATA *psSysData, IMG_UINT32 ui32DevID);
++
++#if defined(SUPPORT_MISR_IN_THREAD)
++void OSVSyncMISR(IMG_HANDLE, IMG_BOOL);
++#endif
++
++
++typedef struct PVRSRV_DC_SRV2DISP_KMJTABLE_TAG *PPVRSRV_DC_SRV2DISP_KMJTABLE;
++
++typedef struct PVRSRV_DC_BUFFER_TAG
++{
++
++ PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer;
++
++ struct PVRSRV_DISPLAYCLASS_INFO_TAG *psDCInfo;
++ struct PVRSRV_DC_SWAPCHAIN_TAG *psSwapChain;
++} PVRSRV_DC_BUFFER;
++
++typedef struct PVRSRV_DC_SWAPCHAIN_TAG
++{
++ IMG_HANDLE hExtSwapChain;
++ IMG_UINT32 ui32SwapChainID;
++ IMG_UINT32 ui32Flags;
++ IMG_UINT32 ui32RefCount;
++ PVRSRV_QUEUE_INFO *psQueue;
++ PVRSRV_DC_BUFFER asBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++ IMG_UINT32 ui32BufferCount;
++ PVRSRV_DC_BUFFER *psLastFlipBuffer;
++ struct PVRSRV_DC_SWAPCHAIN_TAG *psNext;
++ struct PVRSRV_DISPLAYCLASS_INFO_TAG *psDCInfo;
++ //IMG_HANDLE hResItem;
++} PVRSRV_DC_SWAPCHAIN;
++
++typedef struct PVRSRV_DC_SWAPCHAIN_REF_TAG
++{
++ struct PVRSRV_DC_SWAPCHAIN_TAG *psSwapChain;
++ IMG_HANDLE hResItem;
++} PVRSRV_DC_SWAPCHAIN_REF;
++
++
++typedef struct PVRSRV_DISPLAYCLASS_INFO_TAG
++{
++ IMG_UINT32 ui32RefCount;
++ IMG_UINT32 ui32DeviceID;
++ IMG_HANDLE hExtDevice;
++ PPVRSRV_DC_SRV2DISP_KMJTABLE psFuncTable;
++ IMG_HANDLE hDevMemContext;
++ PVRSRV_DC_BUFFER sSystemBuffer;
++ struct PVRSRV_DC_SWAPCHAIN_TAG *psDCSwapChainShared;
++} PVRSRV_DISPLAYCLASS_INFO;
++
++
++typedef struct PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO_TAG
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PRESMAN_ITEM hResItem;
++} PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO;
++
++
++typedef struct PVRSRV_BC_SRV2BUFFER_KMJTABLE_TAG *PPVRSRV_BC_SRV2BUFFER_KMJTABLE;
++
++typedef struct PVRSRV_BC_BUFFER_TAG
++{
++
++ PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer;
++
++ struct PVRSRV_BUFFERCLASS_INFO_TAG *psBCInfo;
++} PVRSRV_BC_BUFFER;
++
++
++typedef struct PVRSRV_BUFFERCLASS_INFO_TAG
++{
++ IMG_UINT32 ui32RefCount;
++ IMG_UINT32 ui32DeviceID;
++ IMG_HANDLE hExtDevice;
++ PPVRSRV_BC_SRV2BUFFER_KMJTABLE psFuncTable;
++ IMG_HANDLE hDevMemContext;
++
++ IMG_UINT32 ui32BufferCount;
++ PVRSRV_BC_BUFFER *psBuffer;
++
++} PVRSRV_BUFFERCLASS_INFO;
++
++
++typedef struct PVRSRV_BUFFERCLASS_PERCONTEXT_INFO_TAG
++{
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++ IMG_HANDLE hResItem;
++} PVRSRV_BUFFERCLASS_PERCONTEXT_INFO;
++
++
++static PVRSRV_DISPLAYCLASS_INFO* DCDeviceHandleToDCInfo (IMG_HANDLE hDeviceKM)
++{
++ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++
++ psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)hDeviceKM;
++
++ return psDCPerContextInfo->psDCInfo;
++}
++
++
++static PVRSRV_BUFFERCLASS_INFO* BCDeviceHandleToBCInfo (IMG_HANDLE hDeviceKM)
++{
++ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++
++ psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)hDeviceKM;
++
++ return psBCPerContextInfo->psBCInfo;
++}
++
++IMG_VOID PVRSRVEnumerateDCKM_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
++{
++ IMG_UINT *pui32DevCount;
++ IMG_UINT32 **ppui32DevID;
++ PVRSRV_DEVICE_CLASS peDeviceClass;
++
++ pui32DevCount = va_arg(va, IMG_UINT*);
++ ppui32DevID = va_arg(va, IMG_UINT32**);
++ peDeviceClass = va_arg(va, PVRSRV_DEVICE_CLASS);
++
++ if ((psDeviceNode->sDevId.eDeviceClass == peDeviceClass)
++ && (psDeviceNode->sDevId.eDeviceType == PVRSRV_DEVICE_TYPE_EXT))
++ {
++ (*pui32DevCount)++;
++ if(*ppui32DevID)
++ {
++ *(*ppui32DevID)++ = psDeviceNode->sDevId.ui32DeviceIndex;
++ }
++ }
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVEnumerateDCKM (PVRSRV_DEVICE_CLASS DeviceClass,
++ IMG_UINT32 *pui32DevCount,
++ IMG_UINT32 *pui32DevID )
++{
++
++ IMG_UINT ui32DevCount = 0;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++
++ List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList,
++ PVRSRVEnumerateDCKM_ForEachVaCb,
++ &ui32DevCount,
++ &pui32DevID,
++ DeviceClass);
++
++ if(pui32DevCount)
++ {
++ *pui32DevCount = ui32DevCount;
++ }
++ else if(pui32DevID == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDCKM: Invalid parameters"));
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterDCDeviceKM (PVRSRV_DC_SRV2DISP_KMJTABLE *psFuncTable,
++ IMG_UINT32 *pui32DeviceID)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo = IMG_NULL;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ SysAcquireData(&psSysData);
++
++
++
++
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psDCInfo),
++ (IMG_VOID **)&psDCInfo, IMG_NULL,
++ "Display Class Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psDCInfo alloc"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet (psDCInfo, 0, sizeof(*psDCInfo));
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE),
++ (IMG_VOID **)&psDCInfo->psFuncTable, IMG_NULL,
++ "Function table for SRVKM->DISPLAY") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psFuncTable alloc"));
++ goto ErrorExit;
++ }
++ OSMemSet (psDCInfo->psFuncTable, 0, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE));
++
++
++ *psDCInfo->psFuncTable = *psFuncTable;
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE),
++ (IMG_VOID **)&psDeviceNode, IMG_NULL,
++ "Device Node") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psDeviceNode alloc"));
++ goto ErrorExit;
++ }
++ OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE));
++
++ psDeviceNode->pvDevice = (IMG_VOID*)psDCInfo;
++ psDeviceNode->ui32pvDeviceSize = sizeof(*psDCInfo);
++ psDeviceNode->ui32RefCount = 1;
++ psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT;
++ psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_DISPLAY;
++ psDeviceNode->psSysData = psSysData;
++
++
++ if (AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed to allocate Device ID"));
++ goto ErrorExit;
++ }
++ psDCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++ if (pui32DeviceID)
++ {
++ *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++ }
++
++
++ SysRegisterExternalDevice(psDeviceNode);
++
++
++ List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList, psDeviceNode);
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ if(psDCInfo->psFuncTable)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE), psDCInfo->psFuncTable, IMG_NULL);
++ psDCInfo->psFuncTable = IMG_NULL;
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_INFO), psDCInfo, IMG_NULL);
++
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++}
++
++PVRSRV_ERROR PVRSRVRemoveDCDeviceKM(IMG_UINT32 ui32DevIndex)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++ SysAcquireData(&psSysData);
++
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DevIndex,
++ IMG_FALSE,
++ PVRSRV_DEVICE_CLASS_DISPLAY);
++ if (!psDeviceNode)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveDCDeviceKM: requested device %d not present", ui32DevIndex));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ psDCInfo = (PVRSRV_DISPLAYCLASS_INFO*)psDeviceNode->pvDevice;
++
++
++
++
++ if(psDCInfo->ui32RefCount == 0)
++ {
++
++
++ List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode);
++
++
++ SysRemoveExternalDevice(psDeviceNode);
++
++
++
++
++ PVR_ASSERT(psDCInfo->ui32RefCount == 0);
++ (IMG_VOID)FreeDeviceID(psSysData, ui32DevIndex);
++ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE), psDCInfo->psFuncTable, IMG_NULL);
++ psDCInfo->psFuncTable = IMG_NULL;
++ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_INFO), psDCInfo, IMG_NULL);
++
++ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL);
++
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveDCDeviceKM: failed as %d Services DC API connections are still open", psDCInfo->ui32RefCount));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterBCDeviceKM (PVRSRV_BC_SRV2BUFFER_KMJTABLE *psFuncTable,
++ IMG_UINT32 *pui32DeviceID)
++{
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo = IMG_NULL;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ SysAcquireData(&psSysData);
++
++
++
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psBCInfo),
++ (IMG_VOID **)&psBCInfo, IMG_NULL,
++ "Buffer Class Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psBCInfo alloc"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet (psBCInfo, 0, sizeof(*psBCInfo));
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE),
++ (IMG_VOID **)&psBCInfo->psFuncTable, IMG_NULL,
++ "Function table for SRVKM->BUFFER") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psFuncTable alloc"));
++ goto ErrorExit;
++ }
++ OSMemSet (psBCInfo->psFuncTable, 0, sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE));
++
++
++ *psBCInfo->psFuncTable = *psFuncTable;
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE),
++ (IMG_VOID **)&psDeviceNode, IMG_NULL,
++ "Device Node") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psDeviceNode alloc"));
++ goto ErrorExit;
++ }
++ OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE));
++
++ psDeviceNode->pvDevice = (IMG_VOID*)psBCInfo;
++ psDeviceNode->ui32pvDeviceSize = sizeof(*psBCInfo);
++ psDeviceNode->ui32RefCount = 1;
++ psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT;
++ psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_BUFFER;
++ psDeviceNode->psSysData = psSysData;
++
++
++ if (AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed to allocate Device ID"));
++ goto ErrorExit;
++ }
++ psBCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++ if (pui32DeviceID)
++ {
++ *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++ }
++
++
++ List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList, psDeviceNode);
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ if(psBCInfo->psFuncTable)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PPVRSRV_BC_SRV2BUFFER_KMJTABLE), psBCInfo->psFuncTable, IMG_NULL);
++ psBCInfo->psFuncTable = IMG_NULL;
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_INFO), psBCInfo, IMG_NULL);
++
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++}
++
++
++PVRSRV_ERROR PVRSRVRemoveBCDeviceKM(IMG_UINT32 ui32DevIndex)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode;
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++
++ SysAcquireData(&psSysData);
++
++
++ psDevNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DevIndex,
++ IMG_FALSE,
++ PVRSRV_DEVICE_CLASS_BUFFER);
++
++ if (!psDevNode)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveBCDeviceKM: requested device %d not present", ui32DevIndex));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++
++ psBCInfo = (PVRSRV_BUFFERCLASS_INFO*)psDevNode->pvDevice;
++
++
++
++
++ if(psBCInfo->ui32RefCount == 0)
++ {
++
++
++ List_PVRSRV_DEVICE_NODE_Remove(psDevNode);
++
++
++
++
++ (IMG_VOID)FreeDeviceID(psSysData, ui32DevIndex);
++
++
++ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE), psBCInfo->psFuncTable, IMG_NULL);
++ psBCInfo->psFuncTable = IMG_NULL;
++ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_INFO), psBCInfo, IMG_NULL);
++
++ (IMG_VOID)OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DEVICE_NODE), psDevNode, IMG_NULL);
++
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveBCDeviceKM: failed as %d Services BC API connections are still open", psBCInfo->ui32RefCount));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVCloseDCDeviceKM (IMG_HANDLE hDeviceKM,
++ IMG_BOOL bResManCallback)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++
++ PVR_UNREFERENCED_PARAMETER(bResManCallback);
++
++ psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)hDeviceKM;
++
++
++ eError = ResManFreeResByPtr(psDCPerContextInfo->hResItem);
++
++ return eError;
++}
++
++
++static PVRSRV_ERROR CloseDCDeviceCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)pvParam;
++ psDCInfo = psDCPerContextInfo->psDCInfo;
++
++ psDCInfo->ui32RefCount--;
++ if(psDCInfo->ui32RefCount == 0)
++ {
++
++ psDCInfo->psFuncTable->pfnCloseDCDevice(psDCInfo->hExtDevice);
++
++ if (--psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount == 0)
++ {
++ PVRSRVFreeSyncInfoKM(psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo);
++ }
++
++ psDCInfo->hDevMemContext = IMG_NULL;
++ psDCInfo->hExtDevice = IMG_NULL;
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO), psDCPerContextInfo, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVOpenDCDeviceKM (PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32DeviceID,
++ IMG_HANDLE hDevCookie,
++ IMG_HANDLE *phDeviceKM)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++
++ if(!phDeviceKM || !hDevCookie)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Invalid params"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ SysAcquireData(&psSysData);
++
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DeviceID,
++ IMG_FALSE,
++ PVRSRV_DEVICE_CLASS_DISPLAY);
++ if (!psDeviceNode)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: no devnode matching index %d", ui32DeviceID));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ psDCInfo = (PVRSRV_DISPLAYCLASS_INFO*)psDeviceNode->pvDevice;
++
++
++
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psDCPerContextInfo),
++ (IMG_VOID **)&psDCPerContextInfo, IMG_NULL,
++ "Display Class per Context Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed psDCPerContextInfo alloc"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet(psDCPerContextInfo, 0, sizeof(*psDCPerContextInfo));
++
++ if(psDCInfo->ui32RefCount++ == 0)
++ {
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++
++
++ psDCInfo->hDevMemContext = (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext;
++
++
++ eError = PVRSRVAllocSyncInfoKM(IMG_NULL,
++ (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext,
++ &psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed sync info alloc"));
++ psDCInfo->ui32RefCount--;
++ return eError;
++ }
++
++
++ eError = psDCInfo->psFuncTable->pfnOpenDCDevice(ui32DeviceID,
++ &psDCInfo->hExtDevice,
++ (PVRSRV_SYNC_DATA*)psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed to open external DC device"));
++ psDCInfo->ui32RefCount--;
++ PVRSRVFreeSyncInfoKM(psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo);
++ return eError;
++ }
++
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount++;
++ }
++
++ psDCPerContextInfo->psDCInfo = psDCInfo;
++ psDCPerContextInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DISPLAYCLASS_DEVICE,
++ psDCPerContextInfo,
++ 0,
++ CloseDCDeviceCallBack);
++
++
++ *phDeviceKM = (IMG_HANDLE)psDCPerContextInfo;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVEnumDCFormatsKM (IMG_HANDLE hDeviceKM,
++ IMG_UINT32 *pui32Count,
++ DISPLAY_FORMAT *psFormat)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++ if(!hDeviceKM || !pui32Count || !psFormat)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumDCFormatsKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++
++ return psDCInfo->psFuncTable->pfnEnumDCFormats(psDCInfo->hExtDevice, pui32Count, psFormat);
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVEnumDCDimsKM (IMG_HANDLE hDeviceKM,
++ DISPLAY_FORMAT *psFormat,
++ IMG_UINT32 *pui32Count,
++ DISPLAY_DIMS *psDim)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++ if(!hDeviceKM || !pui32Count || !psFormat)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumDCDimsKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++
++ return psDCInfo->psFuncTable->pfnEnumDCDims(psDCInfo->hExtDevice, psFormat, pui32Count, psDim);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetDCSystemBufferKM (IMG_HANDLE hDeviceKM,
++ IMG_HANDLE *phBuffer)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ IMG_HANDLE hExtBuffer;
++
++ if(!hDeviceKM || !phBuffer)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCSystemBufferKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++
++ eError = psDCInfo->psFuncTable->pfnGetDCSystemBuffer(psDCInfo->hExtDevice, &hExtBuffer);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCSystemBufferKM: Failed to get valid buffer handle from external driver"));
++ return eError;
++ }
++
++
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.pfnGetBufferAddr = psDCInfo->psFuncTable->pfnGetBufferAddr;
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hDevMemContext = psDCInfo->hDevMemContext;
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtDevice = psDCInfo->hExtDevice;
++ psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer = hExtBuffer;
++
++ psDCInfo->sSystemBuffer.psDCInfo = psDCInfo;
++
++
++ *phBuffer = (IMG_HANDLE)&(psDCInfo->sSystemBuffer);
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetDCInfoKM (IMG_HANDLE hDeviceKM,
++ DISPLAY_INFO *psDisplayInfo)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_ERROR eError;
++
++ if(!hDeviceKM || !psDisplayInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCInfoKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++
++ eError = psDCInfo->psFuncTable->pfnGetDCInfo(psDCInfo->hExtDevice, psDisplayInfo);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ if (psDisplayInfo->ui32MaxSwapChainBuffers > PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS)
++ {
++ psDisplayInfo->ui32MaxSwapChainBuffers = PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(IMG_HANDLE hSwapChainRef)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef;
++
++ if(!hSwapChainRef)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDestroyDCSwapChainKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psSwapChainRef = hSwapChainRef;
++
++ eError = ResManFreeResByPtr(psSwapChainRef->hResItem);
++
++ return eError;
++}
++
++
++static PVRSRV_ERROR DestroyDCSwapChain(PVRSRV_DC_SWAPCHAIN *psSwapChain)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo = psSwapChain->psDCInfo;
++ IMG_UINT32 i;
++
++
++
++ if( psDCInfo->psDCSwapChainShared )
++ {
++ if( psDCInfo->psDCSwapChainShared == psSwapChain )
++ {
++ psDCInfo->psDCSwapChainShared = psSwapChain->psNext;
++ }
++ else
++ {
++ PVRSRV_DC_SWAPCHAIN *psCurrentSwapChain;
++ psCurrentSwapChain = psDCInfo->psDCSwapChainShared;
++ while( psCurrentSwapChain->psNext )
++ {
++ if( psCurrentSwapChain->psNext != psSwapChain )
++ {
++ psCurrentSwapChain = psCurrentSwapChain->psNext;
++ continue;
++ }
++ psCurrentSwapChain->psNext = psSwapChain->psNext;
++ break;
++ }
++ }
++ }
++
++
++ PVRSRVDestroyCommandQueueKM(psSwapChain->psQueue);
++
++
++ eError = psDCInfo->psFuncTable->pfnDestroyDCSwapChain(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain);
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DestroyDCSwapChainCallBack: Failed to destroy DC swap chain"));
++ return eError;
++ }
++
++
++ for(i=0; i<psSwapChain->ui32BufferCount; i++)
++ {
++ if(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
++ {
++ if (--psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount == 0)
++ {
++ PVRSRVFreeSyncInfoKM(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++ }
++ }
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN), psSwapChain, IMG_NULL);
++
++
++ return eError;
++}
++
++
++static PVRSRV_ERROR DestroyDCSwapChainRefCallBack(IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef = (PVRSRV_DC_SWAPCHAIN_REF *) pvParam;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ if(--psSwapChainRef->psSwapChain->ui32RefCount == 0)
++ {
++ eError = DestroyDCSwapChain(psSwapChainRef->psSwapChain);
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN_REF), psSwapChainRef, IMG_NULL);
++ return eError;
++}
++
++static PVRSRV_DC_SWAPCHAIN* PVRSRVFindSharedDCSwapChainKM(PVRSRV_DISPLAYCLASS_INFO *psDCInfo,
++ IMG_UINT32 ui32SwapChainID)
++{
++ PVRSRV_DC_SWAPCHAIN *psCurrentSwapChain;
++
++ for(psCurrentSwapChain = psDCInfo->psDCSwapChainShared;
++ psCurrentSwapChain;
++ psCurrentSwapChain = psCurrentSwapChain->psNext)
++ {
++ if(psCurrentSwapChain->ui32SwapChainID == ui32SwapChainID)
++ return psCurrentSwapChain;
++ }
++ return IMG_NULL;
++}
++
++static PVRSRV_ERROR PVRSRVCreateDCSwapChainRefKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ PVRSRV_DC_SWAPCHAIN *psSwapChain,
++ PVRSRV_DC_SWAPCHAIN_REF **ppsSwapChainRef)
++{
++ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef = IMG_NULL;
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DC_SWAPCHAIN_REF),
++ (IMG_VOID **)&psSwapChainRef, IMG_NULL,
++ "Display Class Swapchain Reference") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainRefKM: Failed psSwapChainRef alloc"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet (psSwapChainRef, 0, sizeof(PVRSRV_DC_SWAPCHAIN_REF));
++
++
++ psSwapChain->ui32RefCount++;
++
++
++ psSwapChainRef->psSwapChain = psSwapChain;
++ psSwapChainRef->hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF,
++ psSwapChainRef,
++ 0,
++ &DestroyDCSwapChainRefCallBack);
++ *ppsSwapChainRef = psSwapChainRef;
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVCreateDCSwapChainKM (PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDeviceKM,
++ IMG_UINT32 ui32Flags,
++ DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++ DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++ IMG_UINT32 ui32BufferCount,
++ IMG_UINT32 ui32OEMFlags,
++ IMG_HANDLE *phSwapChainRef,
++ IMG_UINT32 *pui32SwapChainID)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain = IMG_NULL;
++ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef = IMG_NULL;
++ PVRSRV_SYNC_DATA *apsSyncData[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++ PVRSRV_QUEUE_INFO *psQueue = IMG_NULL;
++ PVRSRV_ERROR eError;
++ IMG_UINT32 i;
++ DISPLAY_INFO sDisplayInfo;
++
++
++ if(!hDeviceKM
++ || !psDstSurfAttrib
++ || !psSrcSurfAttrib
++ || !phSwapChainRef
++ || !pui32SwapChainID)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if (ui32BufferCount > PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Too many buffers"));
++ return PVRSRV_ERROR_TOOMANYBUFFERS;
++ }
++
++ if (ui32BufferCount < 2)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Too few buffers"));
++ return PVRSRV_ERROR_TOO_FEW_BUFFERS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++ if( ui32Flags & PVRSRV_CREATE_SWAPCHAIN_QUERY )
++ {
++
++ psSwapChain = PVRSRVFindSharedDCSwapChainKM(psDCInfo, *pui32SwapChainID );
++ if( psSwapChain )
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: found query"));
++
++ eError = PVRSRVCreateDCSwapChainRefKM(psPerProc,
++ psSwapChain,
++ &psSwapChainRef);
++ if( eError != PVRSRV_OK )
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Couldn't create swap chain reference"));
++ return eError;
++ }
++
++ *phSwapChainRef = (IMG_HANDLE)psSwapChainRef;
++ return PVRSRV_OK;
++ }
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: No shared SwapChain found for query"));
++ return PVRSRV_ERROR_FLIP_CHAIN_EXISTS;
++ }
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DC_SWAPCHAIN),
++ (IMG_VOID **)&psSwapChain, IMG_NULL,
++ "Display Class Swapchain") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed psSwapChain alloc"));
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExit;
++ }
++ OSMemSet (psSwapChain, 0, sizeof(PVRSRV_DC_SWAPCHAIN));
++
++
++ eError = PVRSRVCreateCommandQueueKM(1024, &psQueue);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to create CmdQueue"));
++ goto ErrorExit;
++ }
++
++
++ psSwapChain->psQueue = psQueue;
++
++
++ for(i=0; i<ui32BufferCount; i++)
++ {
++ eError = PVRSRVAllocSyncInfoKM(IMG_NULL,
++ psDCInfo->hDevMemContext,
++ &psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to alloc syninfo for psSwapChain"));
++ goto ErrorExit;
++ }
++
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount++;
++
++
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.pfnGetBufferAddr = psDCInfo->psFuncTable->pfnGetBufferAddr;
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.hDevMemContext = psDCInfo->hDevMemContext;
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtDevice = psDCInfo->hExtDevice;
++
++
++ psSwapChain->asBuffer[i].psDCInfo = psDCInfo;
++ psSwapChain->asBuffer[i].psSwapChain = psSwapChain;
++
++
++ apsSyncData[i] = (PVRSRV_SYNC_DATA*)psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM;
++ }
++
++ psSwapChain->ui32BufferCount = ui32BufferCount;
++ psSwapChain->psDCInfo = psDCInfo;
++
++ eError = psDCInfo->psFuncTable->pfnGetDCInfo(psDCInfo->hExtDevice, &sDisplayInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to get DC info"));
++ return eError;
++ }
++
++
++ eError = psDCInfo->psFuncTable->pfnCreateDCSwapChain(psDCInfo->hExtDevice,
++ ui32Flags,
++ psDstSurfAttrib,
++ psSrcSurfAttrib,
++ ui32BufferCount,
++ apsSyncData,
++ ui32OEMFlags,
++ &psSwapChain->hExtSwapChain,
++ &psSwapChain->ui32SwapChainID);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to create 3rd party SwapChain"));
++ goto ErrorExit;
++ }
++
++
++ eError = PVRSRVCreateDCSwapChainRefKM(psPerProc,
++ psSwapChain,
++ &psSwapChainRef);
++ if( eError != PVRSRV_OK )
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Couldn't create swap chain reference"));
++ goto ErrorExit;
++ }
++
++ psSwapChain->ui32RefCount = 1;
++ psSwapChain->ui32Flags = ui32Flags;
++
++
++ if( ui32Flags & PVRSRV_CREATE_SWAPCHAIN_SHARED )
++ {
++ if(! psDCInfo->psDCSwapChainShared )
++ {
++ psDCInfo->psDCSwapChainShared = psSwapChain;
++ }
++ else
++ {
++ PVRSRV_DC_SWAPCHAIN *psOldHead = psDCInfo->psDCSwapChainShared;
++ psDCInfo->psDCSwapChainShared = psSwapChain;
++ psSwapChain->psNext = psOldHead;
++ }
++ }
++
++
++ *pui32SwapChainID = psSwapChain->ui32SwapChainID;
++
++
++ *phSwapChainRef= (IMG_HANDLE)psSwapChainRef;
++
++ return eError;
++
++ErrorExit:
++
++ for(i=0; i<ui32BufferCount; i++)
++ {
++ if(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
++ {
++ if (--psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount == 0)
++ {
++ PVRSRVFreeSyncInfoKM(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++ }
++ }
++ }
++
++ if(psQueue)
++ {
++ PVRSRVDestroyCommandQueueKM(psQueue);
++ }
++
++ if(psSwapChain)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_DC_SWAPCHAIN), psSwapChain, IMG_NULL);
++
++ }
++
++ return eError;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDCDstRectKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChainRef,
++ IMG_RECT *psRect)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if(!hDeviceKM || !hSwapChainRef)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCDstRectKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain;
++
++ return psDCInfo->psFuncTable->pfnSetDCDstRect(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain,
++ psRect);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDCSrcRectKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChainRef,
++ IMG_RECT *psRect)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if(!hDeviceKM || !hSwapChainRef)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCSrcRectKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain;
++
++ return psDCInfo->psFuncTable->pfnSetDCSrcRect(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain,
++ psRect);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChainRef,
++ IMG_UINT32 ui32CKColour)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if(!hDeviceKM || !hSwapChainRef)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCDstColourKeyKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain;
++
++ return psDCInfo->psFuncTable->pfnSetDCDstColourKey(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain,
++ ui32CKColour);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChainRef,
++ IMG_UINT32 ui32CKColour)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++ if(!hDeviceKM || !hSwapChainRef)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCSrcColourKeyKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain;
++
++ return psDCInfo->psFuncTable->pfnSetDCSrcColourKey(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain,
++ ui32CKColour);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetDCBuffersKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChainRef,
++ IMG_UINT32 *pui32BufferCount,
++ IMG_HANDLE *phBuffer)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++ IMG_HANDLE ahExtBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++ PVRSRV_ERROR eError;
++ IMG_UINT32 i;
++
++ if(!hDeviceKM || !hSwapChainRef || !phBuffer)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCBuffersKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChain = ((PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef)->psSwapChain;
++
++
++ eError = psDCInfo->psFuncTable->pfnGetDCBuffers(psDCInfo->hExtDevice,
++ psSwapChain->hExtSwapChain,
++ pui32BufferCount,
++ ahExtBuffer);
++
++ PVR_ASSERT(*pui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
++
++
++
++
++ for(i=0; i<*pui32BufferCount; i++)
++ {
++ psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtBuffer = ahExtBuffer[i];
++ phBuffer[i] = (IMG_HANDLE)&psSwapChain->asBuffer[i];
++ }
++
++ return eError;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSwapToDCBufferKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hBuffer,
++ IMG_UINT32 ui32SwapInterval,
++ IMG_HANDLE hPrivateTag,
++ IMG_UINT32 ui32ClipRectCount,
++ IMG_RECT *psClipRect)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_BUFFER *psBuffer;
++ PVRSRV_QUEUE_INFO *psQueue;
++ DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
++ IMG_UINT32 i;
++ IMG_UINT32 ui32NumSrcSyncs = 1;
++ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2];
++ PVRSRV_COMMAND *psCommand;
++
++ if(!hDeviceKM || !hBuffer || !psClipRect)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++#if defined(SUPPORT_LMA)
++ eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++#endif
++
++ psBuffer = (PVRSRV_DC_BUFFER*)hBuffer;
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++
++ psQueue = psBuffer->psSwapChain->psQueue;
++
++
++ apsSrcSync[0] = psBuffer->sDeviceClassBuffer.psKernelSyncInfo;
++
++
++
++ if(psBuffer->psSwapChain->psLastFlipBuffer &&
++ psBuffer != psBuffer->psSwapChain->psLastFlipBuffer)
++ {
++ apsSrcSync[1] = psBuffer->psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo;
++
++
++
++ ui32NumSrcSyncs++;
++ }
++
++
++ eError = PVRSRVInsertCommandKM (psQueue,
++ &psCommand,
++ psDCInfo->ui32DeviceID,
++ DC_FLIP_COMMAND,
++ 0,
++ IMG_NULL,
++ ui32NumSrcSyncs,
++ apsSrcSync,
++ sizeof(DISPLAYCLASS_FLIP_COMMAND) + (sizeof(IMG_RECT) * ui32ClipRectCount));
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to get space in queue"));
++ goto Exit;
++ }
++
++
++ psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)psCommand->pvData;
++
++
++ psFlipCmd->hExtDevice = psDCInfo->hExtDevice;
++
++
++ psFlipCmd->hExtSwapChain = psBuffer->psSwapChain->hExtSwapChain;
++
++
++ psFlipCmd->hExtBuffer = psBuffer->sDeviceClassBuffer.hExtBuffer;
++
++
++ psFlipCmd->hPrivateTag = hPrivateTag;
++
++
++ psFlipCmd->ui32ClipRectCount = ui32ClipRectCount;
++
++ psFlipCmd->psClipRect = (IMG_RECT*)((IMG_UINT8*)psFlipCmd + sizeof(DISPLAYCLASS_FLIP_COMMAND));
++
++ for(i=0; i<ui32ClipRectCount; i++)
++ {
++ psFlipCmd->psClipRect[i] = psClipRect[i];
++ }
++
++
++ psFlipCmd->ui32SwapInterval = ui32SwapInterval;
++
++
++ eError = PVRSRVSubmitCommandKM (psQueue, psCommand);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to submit command"));
++ goto Exit;
++ }
++
++
++
++
++
++
++
++
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if(PVRSRVProcessQueues(KERNEL_ID, IMG_FALSE) != PVRSRV_ERROR_PROCESSING_BLOCKED)
++ {
++ goto ProcessedQueues;
++ }
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to process queues"));
++
++ eError = PVRSRV_ERROR_GENERIC;
++ goto Exit;
++
++ProcessedQueues:
++
++ psBuffer->psSwapChain->psLastFlipBuffer = psBuffer;
++
++Exit:
++
++ if(eError == PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE)
++ {
++ eError = PVRSRV_ERROR_RETRY;
++ }
++
++#if defined(SUPPORT_LMA)
++ PVRSRVPowerUnlock(KERNEL_ID);
++#endif
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSwapToDCSystemKM(IMG_HANDLE hDeviceKM,
++ IMG_HANDLE hSwapChainRef)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_QUEUE_INFO *psQueue;
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ PVRSRV_DC_SWAPCHAIN *psSwapChain;
++ PVRSRV_DC_SWAPCHAIN_REF *psSwapChainRef;
++ DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
++ IMG_UINT32 ui32NumSrcSyncs = 1;
++ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2];
++ PVRSRV_COMMAND *psCommand;
++
++ if(!hDeviceKM || !hSwapChainRef)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++#if defined(SUPPORT_LMA)
++ eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++#endif
++
++ psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++ psSwapChainRef = (PVRSRV_DC_SWAPCHAIN_REF*)hSwapChainRef;
++ psSwapChain = psSwapChainRef->psSwapChain;
++
++
++ psQueue = psSwapChain->psQueue;
++
++
++ apsSrcSync[0] = psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo;
++
++
++
++ if(psSwapChain->psLastFlipBuffer)
++ {
++
++ if (apsSrcSync[0] != psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo)
++ {
++ apsSrcSync[1] = psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo;
++
++
++
++ ui32NumSrcSyncs++;
++ }
++ }
++
++
++ eError = PVRSRVInsertCommandKM (psQueue,
++ &psCommand,
++ psDCInfo->ui32DeviceID,
++ DC_FLIP_COMMAND,
++ 0,
++ IMG_NULL,
++ ui32NumSrcSyncs,
++ apsSrcSync,
++ sizeof(DISPLAYCLASS_FLIP_COMMAND));
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to get space in queue"));
++ goto Exit;
++ }
++
++
++ psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)psCommand->pvData;
++
++
++ psFlipCmd->hExtDevice = psDCInfo->hExtDevice;
++
++
++ psFlipCmd->hExtSwapChain = psSwapChain->hExtSwapChain;
++
++
++ psFlipCmd->hExtBuffer = psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer;
++
++
++ psFlipCmd->hPrivateTag = IMG_NULL;
++
++
++ psFlipCmd->ui32ClipRectCount = 0;
++
++ psFlipCmd->ui32SwapInterval = 1;
++
++
++ eError = PVRSRVSubmitCommandKM (psQueue, psCommand);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to submit command"));
++ goto Exit;
++ }
++
++
++
++
++
++
++
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if(PVRSRVProcessQueues(KERNEL_ID, IMG_FALSE) != PVRSRV_ERROR_PROCESSING_BLOCKED)
++ {
++ goto ProcessedQueues;
++ }
++
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to process queues"));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto Exit;
++
++ProcessedQueues:
++
++ psSwapChain->psLastFlipBuffer = &psDCInfo->sSystemBuffer;
++
++ eError = PVRSRV_OK;
++
++Exit:
++
++ if(eError == PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE)
++ {
++ eError = PVRSRV_ERROR_RETRY;
++ }
++
++#if defined(SUPPORT_LMA)
++ PVRSRVPowerUnlock(KERNEL_ID);
++#endif
++ return eError;
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterSystemISRHandler (PFN_ISR_HANDLER pfnISRHandler,
++ IMG_VOID *pvISRHandlerData,
++ IMG_UINT32 ui32ISRSourceMask,
++ IMG_UINT32 ui32DeviceID)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode;
++
++ PVR_UNREFERENCED_PARAMETER(ui32ISRSourceMask);
++
++ SysAcquireData(&psSysData);
++
++
++ psDevNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DeviceID,
++ IMG_TRUE);
++
++ if (psDevNode == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterSystemISRHandler: Failed to get psDevNode"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ psDevNode->pvISRData = (IMG_VOID*) pvISRHandlerData;
++
++
++ psDevNode->pfnDeviceISR = pfnISRHandler;
++
++ return PVRSRV_OK;
++}
++
++IMG_VOID PVRSRVSetDCState_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
++{
++ PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++ IMG_UINT32 ui32State;
++ ui32State = va_arg(va, IMG_UINT32);
++
++ if (psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_DISPLAY)
++ {
++ psDCInfo = (PVRSRV_DISPLAYCLASS_INFO *)psDeviceNode->pvDevice;
++ if (psDCInfo->psFuncTable->pfnSetDCState && psDCInfo->hExtDevice)
++ {
++ psDCInfo->psFuncTable->pfnSetDCState(psDCInfo->hExtDevice, ui32State);
++ }
++ }
++}
++
++
++IMG_VOID IMG_CALLCONV PVRSRVSetDCState(IMG_UINT32 ui32State)
++{
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList,
++ PVRSRVSetDCState_ForEachVaCb,
++ ui32State);
++}
++
++
++IMG_EXPORT
++IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable)
++{
++ psJTable->ui32TableSize = sizeof(PVRSRV_DC_DISP2SRV_KMJTABLE);
++ psJTable->pfnPVRSRVRegisterDCDevice = PVRSRVRegisterDCDeviceKM;
++ psJTable->pfnPVRSRVRemoveDCDevice = PVRSRVRemoveDCDeviceKM;
++ psJTable->pfnPVRSRVOEMFunction = SysOEMFunction;
++ psJTable->pfnPVRSRVRegisterCmdProcList = PVRSRVRegisterCmdProcListKM;
++ psJTable->pfnPVRSRVRemoveCmdProcList = PVRSRVRemoveCmdProcListKM;
++#if defined(SUPPORT_MISR_IN_THREAD)
++ psJTable->pfnPVRSRVCmdComplete = OSVSyncMISR;
++#else
++ psJTable->pfnPVRSRVCmdComplete = PVRSRVCommandCompleteKM;
++#endif
++ psJTable->pfnPVRSRVRegisterSystemISRHandler = PVRSRVRegisterSystemISRHandler;
++ psJTable->pfnPVRSRVRegisterPowerDevice = PVRSRVRegisterPowerDevice;
++
++ return IMG_TRUE;
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVCloseBCDeviceKM (IMG_HANDLE hDeviceKM,
++ IMG_BOOL bResManCallback)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++
++ PVR_UNREFERENCED_PARAMETER(bResManCallback);
++
++ psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)hDeviceKM;
++
++
++ eError = ResManFreeResByPtr(psBCPerContextInfo->hResItem);
++
++ return eError;
++}
++
++
++static PVRSRV_ERROR CloseBCDeviceCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)pvParam;
++ psBCInfo = psBCPerContextInfo->psBCInfo;
++
++ psBCInfo->ui32RefCount--;
++ if(psBCInfo->ui32RefCount == 0)
++ {
++ IMG_UINT32 i;
++
++
++ psBCInfo->psFuncTable->pfnCloseBCDevice(psBCInfo->hExtDevice);
++
++
++ for(i=0; i<psBCInfo->ui32BufferCount; i++)
++ {
++ if(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
++ {
++ if (--psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount == 0)
++ {
++ PVRSRVFreeSyncInfoKM(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++ }
++ }
++ }
++
++
++ if(psBCInfo->psBuffer)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BC_BUFFER), psBCInfo->psBuffer, IMG_NULL);
++ psBCInfo->psBuffer = IMG_NULL;
++ }
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BUFFERCLASS_PERCONTEXT_INFO), psBCPerContextInfo, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVOpenBCDeviceKM (PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32DeviceID,
++ IMG_HANDLE hDevCookie,
++ IMG_HANDLE *phDeviceKM)
++{
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++ PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++ IMG_UINT32 i;
++ PVRSRV_ERROR eError;
++
++ if(!phDeviceKM || !hDevCookie)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Invalid params"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ SysAcquireData(&psSysData);
++
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DeviceID,
++ IMG_FALSE,
++ PVRSRV_DEVICE_CLASS_BUFFER);
++ if (!psDeviceNode)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: No devnode matching index %d", ui32DeviceID));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ psBCInfo = (PVRSRV_BUFFERCLASS_INFO*)psDeviceNode->pvDevice;
++
++
++
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psBCPerContextInfo),
++ (IMG_VOID **)&psBCPerContextInfo, IMG_NULL,
++ "Buffer Class per Context Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed psBCPerContextInfo alloc"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ OSMemSet(psBCPerContextInfo, 0, sizeof(*psBCPerContextInfo));
++
++ if(psBCInfo->ui32RefCount++ == 0)
++ {
++ BUFFER_INFO sBufferInfo;
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++
++
++ psBCInfo->hDevMemContext = (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext;
++
++
++ eError = psBCInfo->psFuncTable->pfnOpenBCDevice(&psBCInfo->hExtDevice);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to open external BC device"));
++ return eError;
++ }
++
++
++ eError = psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice, &sBufferInfo);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM : Failed to get BC Info"));
++ return eError;
++ }
++
++
++ psBCInfo->ui32BufferCount = sBufferInfo.ui32BufferCount;
++
++
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_BC_BUFFER) * sBufferInfo.ui32BufferCount,
++ (IMG_VOID **)&psBCInfo->psBuffer,
++ IMG_NULL,
++ "Array of Buffer Class Buffer");
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to allocate BC buffers"));
++ return eError;
++ }
++ OSMemSet (psBCInfo->psBuffer,
++ 0,
++ sizeof(PVRSRV_BC_BUFFER) * sBufferInfo.ui32BufferCount);
++
++ for(i=0; i<psBCInfo->ui32BufferCount; i++)
++ {
++
++ eError = PVRSRVAllocSyncInfoKM(IMG_NULL,
++ psBCInfo->hDevMemContext,
++ &psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed sync info alloc"));
++ goto ErrorExit;
++ }
++
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount++;
++
++
++
++
++ eError = psBCInfo->psFuncTable->pfnGetBCBuffer(psBCInfo->hExtDevice,
++ i,
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->psSyncData,
++ &psBCInfo->psBuffer[i].sDeviceClassBuffer.hExtBuffer);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to get BC buffers"));
++ goto ErrorExit;
++ }
++
++
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.pfnGetBufferAddr = psBCInfo->psFuncTable->pfnGetBufferAddr;
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.hDevMemContext = psBCInfo->hDevMemContext;
++ psBCInfo->psBuffer[i].sDeviceClassBuffer.hExtDevice = psBCInfo->hExtDevice;
++ }
++ }
++
++ psBCPerContextInfo->psBCInfo = psBCInfo;
++ psBCPerContextInfo->hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_BUFFERCLASS_DEVICE,
++ psBCPerContextInfo,
++ 0,
++ CloseBCDeviceCallBack);
++
++
++ *phDeviceKM = (IMG_HANDLE)psBCPerContextInfo;
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++
++ for(i=0; i<psBCInfo->ui32BufferCount; i++)
++ {
++ if(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
++ {
++ if (--psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->ui32RefCount == 0)
++ {
++ PVRSRVFreeSyncInfoKM(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++ }
++ }
++ }
++
++
++ if(psBCInfo->psBuffer)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_BC_BUFFER), psBCInfo->psBuffer, IMG_NULL);
++ psBCInfo->psBuffer = IMG_NULL;
++ }
++
++ return eError;
++}
++
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetBCInfoKM (IMG_HANDLE hDeviceKM,
++ BUFFER_INFO *psBufferInfo)
++{
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++ PVRSRV_ERROR eError;
++
++ if(!hDeviceKM || !psBufferInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCInfoKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM);
++
++ eError = psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice, psBufferInfo);
++
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCInfoKM : Failed to get BC Info"));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetBCBufferKM (IMG_HANDLE hDeviceKM,
++ IMG_UINT32 ui32BufferIndex,
++ IMG_HANDLE *phBuffer)
++{
++ PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++
++ if(!hDeviceKM || !phBuffer)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCBufferKM: Invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM);
++
++ if(ui32BufferIndex < psBCInfo->ui32BufferCount)
++ {
++ *phBuffer = (IMG_HANDLE)&psBCInfo->psBuffer[ui32BufferIndex];
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCBufferKM: Buffer index %d out of range (%d)", ui32BufferIndex,psBCInfo->ui32BufferCount));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++IMG_BOOL PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable)
++{
++ psJTable->ui32TableSize = sizeof(PVRSRV_BC_BUFFER2SRV_KMJTABLE);
++
++ psJTable->pfnPVRSRVRegisterBCDevice = PVRSRVRegisterBCDeviceKM;
++ psJTable->pfnPVRSRVRemoveBCDevice = PVRSRVRemoveBCDeviceKM;
++
++ return IMG_TRUE;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/devicemem.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/devicemem.c
+new file mode 100644
+index 0000000..ed60870
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/devicemem.c
+@@ -0,0 +1,1448 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "pdump_km.h"
++#include "pvr_bridge_km.h"
++
++static PVRSRV_ERROR AllocDeviceMem(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemHeap,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++typedef struct _RESMAN_MAP_DEVICE_MEM_DATA_
++{
++
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++
++ PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo;
++} RESMAN_MAP_DEVICE_MEM_DATA;
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapsKM(IMG_HANDLE hDevCookie,
++ PVRSRV_HEAP_INFO *psHeapInfo)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_UINT32 ui32HeapCount;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ IMG_UINT32 i;
++
++ if (hDevCookie == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetDeviceMemHeapsKM: hDevCookie invalid"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++
++
++ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++
++
++ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
++
++
++ for(i=0; i<ui32HeapCount; i++)
++ {
++
++ psHeapInfo[i].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[i].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap;
++ psHeapInfo[i].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[i].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[i].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++ }
++
++ for(; i < PVRSRV_MAX_CLIENT_HEAPS; i++)
++ {
++ OSMemSet(psHeapInfo + i, 0, sizeof(*psHeapInfo));
++ psHeapInfo[i].ui32HeapID = (IMG_UINT32)PVRSRV_UNDEFINED_HEAP_ID;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContextKM(IMG_HANDLE hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE *phDevMemContext,
++ IMG_UINT32 *pui32ClientHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo,
++ IMG_BOOL *pbCreated,
++ IMG_BOOL *pbShared)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_UINT32 ui32HeapCount, ui32ClientHeapCount=0;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ IMG_HANDLE hDevMemContext;
++ IMG_HANDLE hDevMemHeap;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ IMG_UINT32 i;
++
++#if !defined(PVR_SECURE_HANDLES)
++ PVR_UNREFERENCED_PARAMETER(pbShared);
++#endif
++
++ if (hDevCookie == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVCreateDeviceMemContextKM: hDevCookie invalid"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++
++
++
++ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++
++
++
++ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
++
++
++
++ hDevMemContext = BM_CreateContext(psDeviceNode,
++ &sPDDevPAddr,
++ psPerProc,
++ pbCreated);
++ if (hDevMemContext == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDeviceMemContextKM: Failed BM_CreateContext"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ for(i=0; i<ui32HeapCount; i++)
++ {
++ switch(psDeviceMemoryHeap[i].DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++
++ psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[ui32ClientHeapCount].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap;
++ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++#if defined(PVR_SECURE_HANDLES)
++ pbShared[ui32ClientHeapCount] = IMG_TRUE;
++#endif
++ ui32ClientHeapCount++;
++ break;
++ }
++ case DEVICE_MEMORY_HEAP_PERCONTEXT:
++ {
++ hDevMemHeap = BM_CreateHeap(hDevMemContext,
++ &psDeviceMemoryHeap[i]);
++
++
++ psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[ui32ClientHeapCount].hDevMemHeap = hDevMemHeap;
++ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++#if defined(PVR_SECURE_HANDLES)
++ pbShared[ui32ClientHeapCount] = IMG_FALSE;
++#endif
++
++ ui32ClientHeapCount++;
++ break;
++ }
++ }
++ }
++
++
++ *pui32ClientHeapCount = ui32ClientHeapCount;
++ *phDevMemContext = hDevMemContext;
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContextKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ IMG_BOOL *pbDestroyed)
++{
++ PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++ return BM_DestroyContext(hDevMemContext, pbDestroyed);
++}
++
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapInfoKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ IMG_UINT32 *pui32ClientHeapCount,
++ PVRSRV_HEAP_INFO *psHeapInfo,
++ IMG_BOOL *pbShared)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_UINT32 ui32HeapCount, ui32ClientHeapCount=0;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ IMG_HANDLE hDevMemHeap;
++ IMG_UINT32 i;
++
++#if !defined(PVR_SECURE_HANDLES)
++ PVR_UNREFERENCED_PARAMETER(pbShared);
++#endif
++
++ if (hDevCookie == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetDeviceMemHeapInfoKM: hDevCookie invalid"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++
++
++
++ ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++ psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++
++
++
++ PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
++
++
++ for(i=0; i<ui32HeapCount; i++)
++ {
++ switch(psDeviceMemoryHeap[i].DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++
++ psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[ui32ClientHeapCount].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap;
++ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++#if defined(PVR_SECURE_HANDLES)
++ pbShared[ui32ClientHeapCount] = IMG_TRUE;
++#endif
++ ui32ClientHeapCount++;
++ break;
++ }
++ case DEVICE_MEMORY_HEAP_PERCONTEXT:
++ {
++ hDevMemHeap = BM_CreateHeap(hDevMemContext,
++ &psDeviceMemoryHeap[i]);
++
++
++ psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++ psHeapInfo[ui32ClientHeapCount].hDevMemHeap = hDevMemHeap;
++ psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++ psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++ psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++#if defined(PVR_SECURE_HANDLES)
++ pbShared[ui32ClientHeapCount] = IMG_FALSE;
++#endif
++
++ ui32ClientHeapCount++;
++ break;
++ }
++ }
++ }
++
++
++ *pui32ClientHeapCount = ui32ClientHeapCount;
++
++ return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR AllocDeviceMem(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemHeap,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ BM_HANDLE hBuffer;
++
++ PVRSRV_MEMBLK *psMemBlock;
++ IMG_BOOL bBMError;
++
++ PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++ *ppsMemInfo = IMG_NULL;
++
++ if(OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ (IMG_VOID **)&psMemInfo, IMG_NULL,
++ "Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"AllocDeviceMem: Failed to alloc memory for block"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++
++ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++
++ psMemBlock = &(psMemInfo->sMemBlk);
++
++
++ psMemInfo->ui32Flags = ui32Flags | PVRSRV_MEM_RAM_BACKED_ALLOCATION;
++
++ bBMError = BM_Alloc (hDevMemHeap,
++ IMG_NULL,
++ ui32Size,
++ &psMemInfo->ui32Flags,
++ IMG_CAST_TO_DEVVADDR_UINT(ui32Alignment),
++ &hBuffer);
++
++ if (!bBMError)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"AllocDeviceMem: BM_Alloc Failed"));
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++
++ psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
++
++
++
++ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++
++ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++
++ psMemInfo->ui32AllocSize = ui32Size;
++
++
++ psMemInfo->pvSysBackupBuffer = IMG_NULL;
++
++
++ *ppsMemInfo = psMemInfo;
++
++
++ return (PVRSRV_OK);
++}
++
++
++static PVRSRV_ERROR FreeDeviceMem(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ BM_HANDLE hBuffer;
++
++ if (!psMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ hBuffer = psMemInfo->sMemBlk.hBuffer;
++
++
++ BM_Free(hBuffer, psMemInfo->ui32Flags);
++
++ if(psMemInfo->pvSysBackupBuffer)
++ {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, psMemInfo->ui32AllocSize, psMemInfo->pvSysBackupBuffer, IMG_NULL);
++ psMemInfo->pvSysBackupBuffer = IMG_NULL;
++ }
++
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
++
++
++ return(PVRSRV_OK);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocSyncInfoKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ PVRSRV_KERNEL_SYNC_INFO **ppsKernelSyncInfo)
++{
++ IMG_HANDLE hSyncDevMemHeap;
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ BM_CONTEXT *pBMContext;
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++ PVRSRV_SYNC_DATA *psSyncData;
++
++ eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_KERNEL_SYNC_INFO),
++ (IMG_VOID **)&psKernelSyncInfo, IMG_NULL,
++ "Kernel Synchronization Info");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSyncInfoKM: Failed to alloc memory"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ psKernelSyncInfo->ui32RefCount = 0;
++
++
++ pBMContext = (BM_CONTEXT*)hDevMemContext;
++ psDevMemoryInfo = &pBMContext->psDeviceNode->sDevMemoryInfo;
++
++
++ hSyncDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[psDevMemoryInfo->ui32SyncHeapID].hDevMemHeap;
++
++
++
++
++ eError = AllocDeviceMem(hDevCookie,
++ hSyncDevMemHeap,
++ PVRSRV_MEM_CACHE_CONSISTENT,
++ sizeof(PVRSRV_SYNC_DATA),
++ sizeof(IMG_UINT32),
++ &psKernelSyncInfo->psSyncDataMemInfoKM);
++
++ if (eError != PVRSRV_OK)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSyncInfoKM: Failed to alloc memory"));
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_SYNC_INFO), psKernelSyncInfo, IMG_NULL);
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ psKernelSyncInfo->psSyncData = psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM;
++ psSyncData = psKernelSyncInfo->psSyncData;
++
++ psSyncData->ui32WriteOpsPending = 0;
++ psSyncData->ui32WriteOpsComplete = 0;
++ psSyncData->ui32ReadOpsPending = 0;
++ psSyncData->ui32ReadOpsComplete = 0;
++ psSyncData->ui32LastOpDumpVal = 0;
++ psSyncData->ui32LastReadOpDumpVal = 0;
++
++#if defined(PDUMP)
++ PDUMPMEM(psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM,
++ psKernelSyncInfo->psSyncDataMemInfoKM,
++ 0,
++ psKernelSyncInfo->psSyncDataMemInfoKM->ui32AllocSize,
++ PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psKernelSyncInfo->psSyncDataMemInfoKM));
++#endif
++
++ psKernelSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr = psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete);
++ psKernelSyncInfo->sReadOpsCompleteDevVAddr.uiAddr = psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
++
++
++ psKernelSyncInfo->psSyncDataMemInfoKM->psKernelSyncInfo = IMG_NULL;
++
++
++ psKernelSyncInfo->hResItem = IMG_NULL;
++
++
++ *ppsKernelSyncInfo = psKernelSyncInfo;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo)
++{
++ PVRSRV_ERROR eError;
++
++ if (psKernelSyncInfo->ui32RefCount != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "oops: sync info ref count not zero at destruction"));
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ eError = FreeDeviceMem(psKernelSyncInfo->psSyncDataMemInfoKM);
++ (IMG_VOID)OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_SYNC_INFO), psKernelSyncInfo, IMG_NULL);
++
++
++ return eError;
++}
++
++
++static PVRSRV_ERROR FreeDeviceMemCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++
++ psMemInfo->ui32RefCount--;
++
++
++ if(psMemInfo->ui32Flags & PVRSRV_MEM_EXPORTED)
++ {
++ IMG_HANDLE hMemInfo = IMG_NULL;
++
++
++ if (psMemInfo->ui32RefCount != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeDeviceMemCallBack: mappings are open in other processes"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ eError = PVRSRVFindHandle(KERNEL_HANDLE_BASE,
++ &hMemInfo,
++ psMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeDeviceMemCallBack: can't find exported meminfo in the global handle list"));
++ return eError;
++ }
++
++
++ eError = PVRSRVReleaseHandle(KERNEL_HANDLE_BASE,
++ hMemInfo,
++ PVRSRV_HANDLE_TYPE_MEM_INFO);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeDeviceMemCallBack: PVRSRVReleaseHandle failed for exported meminfo"));
++ return eError;
++ }
++ }
++
++ PVR_ASSERT(psMemInfo->ui32RefCount == 0);
++
++ if (psMemInfo->psKernelSyncInfo)
++ {
++ psMemInfo->psKernelSyncInfo->ui32RefCount--;
++
++ if (psMemInfo->psKernelSyncInfo->ui32RefCount == 0)
++ {
++ eError = PVRSRVFreeSyncInfoKM(psMemInfo->psKernelSyncInfo);
++ }
++ }
++
++ if (eError == PVRSRV_OK)
++ {
++ eError = FreeDeviceMem(psMemInfo);
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMemKM(IMG_HANDLE hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++ if (!psMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if (psMemInfo->sMemBlk.hResItem != IMG_NULL)
++ {
++ eError = ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem);
++ }
++ else
++ {
++
++ eError = FreeDeviceMemCallBack(psMemInfo, 0);
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV _PVRSRVAllocDeviceMemKM(IMG_HANDLE hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevMemHeap,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Alignment,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ PVRSRV_ERROR eError;
++ BM_HEAP *psBMHeap;
++ IMG_HANDLE hDevMemContext;
++
++ if (!hDevMemHeap ||
++ (ui32Size == 0))
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ if (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ if (((ui32Size % HOST_PAGESIZE()) != 0) ||
++ ((ui32Alignment % HOST_PAGESIZE()) != 0))
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++
++ eError = AllocDeviceMem(hDevCookie,
++ hDevMemHeap,
++ ui32Flags,
++ ui32Size,
++ ui32Alignment,
++ &psMemInfo);
++
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ if (ui32Flags & PVRSRV_MEM_NO_SYNCOBJ)
++ {
++ psMemInfo->psKernelSyncInfo = IMG_NULL;
++ }
++ else
++ {
++
++
++
++ psBMHeap = (BM_HEAP*)hDevMemHeap;
++ hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext;
++ eError = PVRSRVAllocSyncInfoKM(hDevCookie,
++ hDevMemContext,
++ &psMemInfo->psKernelSyncInfo);
++ if(eError != PVRSRV_OK)
++ {
++ goto free_mainalloc;
++ }
++ psMemInfo->psKernelSyncInfo->ui32RefCount++;
++ }
++
++
++ *ppsMemInfo = psMemInfo;
++
++ if (ui32Flags & PVRSRV_MEM_NO_RESMAN)
++ {
++ psMemInfo->sMemBlk.hResItem = IMG_NULL;
++ }
++ else
++ {
++
++ psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DEVICEMEM_ALLOCATION,
++ psMemInfo,
++ 0,
++ FreeDeviceMemCallBack);
++ if (psMemInfo->sMemBlk.hResItem == IMG_NULL)
++ {
++
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto free_mainalloc;
++ }
++ }
++
++
++ psMemInfo->ui32RefCount++;
++
++
++ return (PVRSRV_OK);
++
++free_mainalloc:
++ FreeDeviceMem(psMemInfo);
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDissociateDeviceMemKM(IMG_HANDLE hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevCookie;
++
++ PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++ if (!psMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = ResManDissociateRes(psMemInfo->sMemBlk.hResItem, psDeviceNode->hResManContext);
++
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFreeDeviceMemKM(IMG_UINT32 ui32Flags,
++ IMG_SIZE_T *pui32Total,
++ IMG_SIZE_T *pui32Free,
++ IMG_SIZE_T *pui32LargestBlock)
++{
++
++
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ PVR_UNREFERENCED_PARAMETER(pui32Total);
++ PVR_UNREFERENCED_PARAMETER(pui32Free);
++ PVR_UNREFERENCED_PARAMETER(pui32LargestBlock);
++
++ return PVRSRV_OK;
++}
++
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemoryKM (PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ if (!psMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem);
++}
++
++
++static PVRSRV_ERROR UnwrapExtMemoryCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = pvParam;
++ IMG_HANDLE hOSWrapMem;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ hOSWrapMem = psMemInfo->sMemBlk.hOSWrapMem;
++
++ if (psMemInfo->psKernelSyncInfo)
++ {
++ psMemInfo->psKernelSyncInfo->ui32RefCount--;
++ if (psMemInfo->psKernelSyncInfo->ui32RefCount == 0)
++ {
++ eError = PVRSRVFreeSyncInfoKM(psMemInfo->psKernelSyncInfo);
++ }
++ }
++
++
++ if(psMemInfo->sMemBlk.psIntSysPAddr)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR), psMemInfo->sMemBlk.psIntSysPAddr, IMG_NULL);
++ psMemInfo->sMemBlk.psIntSysPAddr = IMG_NULL;
++ }
++
++ if (eError == PVRSRV_OK)
++ {
++
++ psMemInfo->ui32RefCount--;
++
++ eError = FreeDeviceMem(psMemInfo);
++ }
++
++ if(hOSWrapMem)
++ {
++ OSReleasePhysPageAddr(hOSWrapMem);
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemoryKM(IMG_HANDLE hDevCookie,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevMemContext,
++ IMG_SIZE_T ui32ByteSize,
++ IMG_SIZE_T ui32PageOffset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psExtSysPAddr,
++ IMG_VOID *pvLinAddr,
++ IMG_UINT32 ui32Flags,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL;
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ IMG_SIZE_T ui32HostPageSize = HOST_PAGESIZE();
++ IMG_HANDLE hDevMemHeap = IMG_NULL;
++ PVRSRV_DEVICE_NODE* psDeviceNode;
++ BM_HANDLE hBuffer;
++ PVRSRV_MEMBLK *psMemBlock;
++ IMG_BOOL bBMError;
++ BM_HEAP *psBMHeap;
++ PVRSRV_ERROR eError;
++ IMG_VOID *pvPageAlignedCPUVAddr;
++ IMG_SYS_PHYADDR *psIntSysPAddr = IMG_NULL;
++ IMG_HANDLE hOSWrapMem = IMG_NULL;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ IMG_SIZE_T ui32PageCount = 0;
++ IMG_UINT32 i;
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)hDevCookie;
++ PVR_ASSERT(psDeviceNode != IMG_NULL);
++
++ if (psDeviceNode == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVWrapExtMemoryKM: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if(pvLinAddr)
++ {
++
++ ui32PageOffset = (IMG_UINTPTR_T)pvLinAddr & (ui32HostPageSize - 1);
++
++
++ ui32PageCount = HOST_PAGEALIGN(ui32ByteSize + ui32PageOffset) / ui32HostPageSize;
++ pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)pvLinAddr - ui32PageOffset);
++
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32PageCount * sizeof(IMG_SYS_PHYADDR),
++ (IMG_VOID **)&psIntSysPAddr, IMG_NULL,
++ "Array of Page Addresses") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ eError = OSAcquirePhysPageAddr(pvPageAlignedCPUVAddr,
++ ui32PageCount * ui32HostPageSize,
++ psIntSysPAddr,
++ &hOSWrapMem,
++ (ui32Flags != 0) ? IMG_TRUE : IMG_FALSE);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExitPhase1;
++ }
++
++
++ psExtSysPAddr = psIntSysPAddr;
++
++
++
++ bPhysContig = IMG_FALSE;
++ }
++ else
++ {
++
++ }
++
++
++ psDevMemoryInfo = &((BM_CONTEXT*)hDevMemContext)->psDeviceNode->sDevMemoryInfo;
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++ for(i=0; i<PVRSRV_MAX_CLIENT_HEAPS; i++)
++ {
++ if(HEAP_IDX(psDeviceMemoryHeap[i].ui32HeapID) == psDevMemoryInfo->ui32MappingHeapID)
++ {
++ if(psDeviceMemoryHeap[i].DevMemHeapType == DEVICE_MEMORY_HEAP_PERCONTEXT)
++ {
++
++ hDevMemHeap = BM_CreateHeap(hDevMemContext, &psDeviceMemoryHeap[i]);
++ }
++ else
++ {
++ hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[i].hDevMemHeap;
++ }
++ break;
++ }
++ }
++
++ if(hDevMemHeap == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: unable to find mapping heap"));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto ErrorExitPhase2;
++ }
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ (IMG_VOID **)&psMemInfo, IMG_NULL,
++ "Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExitPhase2;
++ }
++
++ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++ psMemInfo->ui32Flags = ui32Flags;
++
++ psMemBlock = &(psMemInfo->sMemBlk);
++
++ bBMError = BM_Wrap(hDevMemHeap,
++ ui32ByteSize,
++ ui32PageOffset,
++ bPhysContig,
++ psExtSysPAddr,
++ IMG_NULL,
++ &psMemInfo->ui32Flags,
++ &hBuffer);
++ if (!bBMError)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: BM_Wrap Failed"));
++ eError = PVRSRV_ERROR_BAD_MAPPING;
++ goto ErrorExitPhase3;
++ }
++
++
++ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++ psMemBlock->hOSWrapMem = hOSWrapMem;
++ psMemBlock->psIntSysPAddr = psIntSysPAddr;
++
++
++ psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
++
++
++ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++ psMemInfo->ui32AllocSize = ui32ByteSize;
++
++
++
++ psMemInfo->pvSysBackupBuffer = IMG_NULL;
++
++
++
++
++ psBMHeap = (BM_HEAP*)hDevMemHeap;
++ hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext;
++ eError = PVRSRVAllocSyncInfoKM(hDevCookie,
++ hDevMemContext,
++ &psMemInfo->psKernelSyncInfo);
++ if(eError != PVRSRV_OK)
++ {
++ goto ErrorExitPhase4;
++ }
++
++ psMemInfo->psKernelSyncInfo->ui32RefCount++;
++
++
++ psMemInfo->ui32RefCount++;
++
++
++ psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DEVICEMEM_WRAP,
++ psMemInfo,
++ 0,
++ UnwrapExtMemoryCallBack);
++
++
++ *ppsMemInfo = psMemInfo;
++
++ return PVRSRV_OK;
++
++
++
++ErrorExitPhase4:
++ if(psMemInfo)
++ {
++ FreeDeviceMem(psMemInfo);
++
++
++
++ psMemInfo = IMG_NULL;
++ }
++
++ErrorExitPhase3:
++ if(psMemInfo)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
++
++ }
++
++ErrorExitPhase2:
++ if(psIntSysPAddr)
++ {
++ OSReleasePhysPageAddr(hOSWrapMem);
++ }
++
++ErrorExitPhase1:
++ if(psIntSysPAddr)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32PageCount * sizeof(IMG_SYS_PHYADDR), psIntSysPAddr, IMG_NULL);
++
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemoryKM (PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ if (!psMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem);
++}
++
++
++static PVRSRV_ERROR UnmapDeviceMemoryCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_ERROR eError;
++ RESMAN_MAP_DEVICE_MEM_DATA *psMapData = pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ if(psMapData->psMemInfo->sMemBlk.psIntSysPAddr)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR), psMapData->psMemInfo->sMemBlk.psIntSysPAddr, IMG_NULL);
++ psMapData->psMemInfo->sMemBlk.psIntSysPAddr = IMG_NULL;
++ }
++
++ psMapData->psMemInfo->psKernelSyncInfo->ui32RefCount--;
++ if (psMapData->psMemInfo->psKernelSyncInfo->ui32RefCount == 0)
++ {
++ eError = PVRSRVFreeSyncInfoKM(psMapData->psMemInfo->psKernelSyncInfo);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"UnmapDeviceMemoryCallBack: Failed to free sync info"));
++ return eError;
++ }
++ }
++
++ eError = FreeDeviceMem(psMapData->psMemInfo);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"UnmapDeviceMemoryCallBack: Failed to free DST meminfo"));
++ return eError;
++ }
++
++
++ psMapData->psSrcMemInfo->ui32RefCount--;
++
++ if (psMapData->psSrcMemInfo->ui32RefCount == 1 &&
++ psMapData->psSrcMemInfo->bPendingFree == IMG_TRUE)
++ {
++
++
++
++ if (psMapData->psSrcMemInfo->sMemBlk.hResItem != IMG_NULL)
++ {
++
++
++ eError = ResManFreeResByPtr(psMapData->psSrcMemInfo->sMemBlk.hResItem);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"UnmapDeviceMemoryCallBack: Failed to free SRC meminfo"));
++ PVR_DBG_BREAK;
++ }
++ }
++ else
++ {
++
++ eError = FreeDeviceMemCallBack(psMapData->psSrcMemInfo, 0);
++ }
++ }
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_MAP_DEVICE_MEM_DATA), psMapData, IMG_NULL);
++
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo,
++ IMG_HANDLE hDstDevMemHeap,
++ PVRSRV_KERNEL_MEM_INFO **ppsDstMemInfo)
++{
++ PVRSRV_ERROR eError;
++ IMG_UINT32 i;
++ IMG_SIZE_T ui32PageCount, ui32PageOffset;
++ IMG_SIZE_T ui32HostPageSize = HOST_PAGESIZE();
++ IMG_SYS_PHYADDR *psSysPAddr = IMG_NULL;
++ IMG_DEV_PHYADDR sDevPAddr;
++ BM_BUF *psBuf;
++ IMG_DEV_VIRTADDR sDevVAddr;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL;
++ BM_HANDLE hBuffer;
++ PVRSRV_MEMBLK *psMemBlock;
++ IMG_BOOL bBMError;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_VOID *pvPageAlignedCPUVAddr;
++ RESMAN_MAP_DEVICE_MEM_DATA *psMapData = IMG_NULL;
++
++
++ if(!psSrcMemInfo || !hDstDevMemHeap || !ppsDstMemInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ *ppsDstMemInfo = IMG_NULL;
++
++ ui32PageOffset = psSrcMemInfo->sDevVAddr.uiAddr & (ui32HostPageSize - 1);
++ ui32PageCount = HOST_PAGEALIGN(psSrcMemInfo->ui32AllocSize + ui32PageOffset) / ui32HostPageSize;
++ pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)psSrcMemInfo->pvLinAddrKM - ui32PageOffset);
++
++
++
++
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32PageCount*sizeof(IMG_SYS_PHYADDR),
++ (IMG_VOID **)&psSysPAddr, IMG_NULL,
++ "Array of Page Addresses") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ psBuf = psSrcMemInfo->sMemBlk.hBuffer;
++
++
++ psDeviceNode = psBuf->pMapping->pBMHeap->pBMContext->psDeviceNode;
++
++
++ sDevVAddr.uiAddr = psSrcMemInfo->sDevVAddr.uiAddr - IMG_CAST_TO_DEVVADDR_UINT(ui32PageOffset);
++ for(i=0; i<ui32PageCount; i++)
++ {
++ BM_GetPhysPageAddr(psSrcMemInfo, sDevVAddr, &sDevPAddr);
++
++
++ psSysPAddr[i] = SysDevPAddrToSysPAddr (psDeviceNode->sDevId.eDeviceType, sDevPAddr);
++
++
++ sDevVAddr.uiAddr += IMG_CAST_TO_DEVVADDR_UINT(ui32HostPageSize);
++ }
++
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(RESMAN_MAP_DEVICE_MEM_DATA),
++ (IMG_VOID **)&psMapData, IMG_NULL,
++ "Resource Manager Map Data") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc resman map data"));
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExit;
++ }
++
++
++ if(OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ (IMG_VOID **)&psMemInfo, IMG_NULL,
++ "Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block"));
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ErrorExit;
++ }
++
++ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++ psMemInfo->ui32Flags = psSrcMemInfo->ui32Flags;
++
++ psMemBlock = &(psMemInfo->sMemBlk);
++
++ bBMError = BM_Wrap(hDstDevMemHeap,
++ psSrcMemInfo->ui32AllocSize,
++ ui32PageOffset,
++ IMG_FALSE,
++ psSysPAddr,
++ pvPageAlignedCPUVAddr,
++ &psMemInfo->ui32Flags,
++ &hBuffer);
++
++ if (!bBMError)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: BM_Wrap Failed"));
++ eError = PVRSRV_ERROR_BAD_MAPPING;
++ goto ErrorExit;
++ }
++
++
++ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++
++ psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
++
++
++ psMemBlock->psIntSysPAddr = psSysPAddr;
++
++
++ psMemInfo->pvLinAddrKM = psSrcMemInfo->pvLinAddrKM;
++
++
++ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++ psMemInfo->ui32AllocSize = psSrcMemInfo->ui32AllocSize;
++ psMemInfo->psKernelSyncInfo = psSrcMemInfo->psKernelSyncInfo;
++
++
++ psMemInfo->psKernelSyncInfo->ui32RefCount++;
++
++
++
++ psMemInfo->pvSysBackupBuffer = IMG_NULL;
++
++
++ psSrcMemInfo->ui32RefCount++;
++
++
++ psMapData->psMemInfo = psMemInfo;
++ psMapData->psSrcMemInfo = psSrcMemInfo;
++
++
++ psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DEVICEMEM_MAPPING,
++ psMapData,
++ 0,
++ UnmapDeviceMemoryCallBack);
++
++ *ppsDstMemInfo = psMemInfo;
++
++ return PVRSRV_OK;
++
++
++
++ErrorExit:
++
++ if(psSysPAddr)
++ {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR), psSysPAddr, IMG_NULL);
++
++ }
++
++ if(psMemInfo)
++ {
++
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
++
++ }
++
++ if(psMapData)
++ {
++
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(RESMAN_MAP_DEVICE_MEM_DATA), psMapData, IMG_NULL);
++
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++ if (!psMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem);
++}
++
++
++static PVRSRV_ERROR UnmapDeviceClassMemoryCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo = pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ return FreeDeviceMem(psMemInfo);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevMemContext,
++ IMG_HANDLE hDeviceClassBuffer,
++ PVRSRV_KERNEL_MEM_INFO **ppsMemInfo,
++ IMG_HANDLE *phOSMapInfo)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++ PVRSRV_DEVICECLASS_BUFFER *psDeviceClassBuffer;
++ IMG_SYS_PHYADDR *psSysPAddr;
++ IMG_VOID *pvCPUVAddr, *pvPageAlignedCPUVAddr;
++ IMG_BOOL bPhysContig;
++ BM_CONTEXT *psBMContext;
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ IMG_HANDLE hDevMemHeap = IMG_NULL;
++ IMG_SIZE_T ui32ByteSize;
++ IMG_SIZE_T ui32Offset;
++ IMG_SIZE_T ui32PageSize = HOST_PAGESIZE();
++ BM_HANDLE hBuffer;
++ PVRSRV_MEMBLK *psMemBlock;
++ IMG_BOOL bBMError;
++ IMG_UINT32 i;
++
++ if(!hDeviceClassBuffer || !ppsMemInfo || !phOSMapInfo || !hDevMemContext)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psDeviceClassBuffer = (PVRSRV_DEVICECLASS_BUFFER*)hDeviceClassBuffer;
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ eError = psDeviceClassBuffer->pfnGetBufferAddr(psDeviceClassBuffer->hExtDevice,
++ psDeviceClassBuffer->hExtBuffer,
++ &psSysPAddr,
++ &ui32ByteSize,
++ &pvCPUVAddr,
++ phOSMapInfo,
++ &bPhysContig);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: unable to get buffer address"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ psBMContext = (BM_CONTEXT*)psDeviceClassBuffer->hDevMemContext;
++ psDevMemoryInfo = &psBMContext->psDeviceNode->sDevMemoryInfo;
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++ for(i=0; i<PVRSRV_MAX_CLIENT_HEAPS; i++)
++ {
++ if(HEAP_IDX(psDeviceMemoryHeap[i].ui32HeapID) == psDevMemoryInfo->ui32MappingHeapID)
++ {
++ if(psDeviceMemoryHeap[i].DevMemHeapType == DEVICE_MEMORY_HEAP_PERCONTEXT)
++ {
++
++ hDevMemHeap = BM_CreateHeap(hDevMemContext, &psDeviceMemoryHeap[i]);
++ }
++ else
++ {
++ hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[i].hDevMemHeap;
++ }
++ break;
++ }
++ }
++
++ if(hDevMemHeap == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: unable to find mapping heap"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ ui32Offset = ((IMG_UINTPTR_T)pvCPUVAddr) & (ui32PageSize - 1);
++ pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)pvCPUVAddr - ui32Offset);
++
++ if(OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ (IMG_VOID **)&psMemInfo, IMG_NULL,
++ "Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: Failed to alloc memory for block"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++
++ OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++
++ psMemBlock = &(psMemInfo->sMemBlk);
++
++ bBMError = BM_Wrap(hDevMemHeap,
++ ui32ByteSize,
++ ui32Offset,
++ bPhysContig,
++ psSysPAddr,
++ pvPageAlignedCPUVAddr,
++ &psMemInfo->ui32Flags,
++ &hBuffer);
++
++ if (!bBMError)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: BM_Wrap Failed"));
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
++
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++
++
++ psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++ psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++
++ psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
++
++
++
++ psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++
++
++ psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++ psMemInfo->ui32AllocSize = ui32ByteSize;
++ psMemInfo->psKernelSyncInfo = psDeviceClassBuffer->psKernelSyncInfo;
++
++
++
++ psMemInfo->pvSysBackupBuffer = IMG_NULL;
++
++
++ psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_DEVICECLASSMEM_MAPPING,
++ psMemInfo,
++ 0,
++ UnmapDeviceClassMemoryCallBack);
++
++
++ *ppsMemInfo = psMemInfo;
++
++ return PVRSRV_OK;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/handle.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/handle.c
+new file mode 100644
+index 0000000..6ac016a
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/handle.c
+@@ -0,0 +1,1547 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifdef PVR_SECURE_HANDLES
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "handle.h"
++
++#ifdef DEBUG
++#define HANDLE_BLOCK_SIZE 1
++#else
++#define HANDLE_BLOCK_SIZE 256
++#endif
++
++#define HANDLE_HASH_TAB_INIT_SIZE 32
++
++#define DEFAULT_MAX_INDEX_PLUS_ONE 0xfffffffful
++#define DEFAULT_MAX_HANDLE DEFAULT_MAX_INDEX_PLUS_ONE
++
++#define INDEX_IS_VALID(psBase, i) ((i) < (psBase)->ui32TotalHandCount)
++
++#define INDEX_TO_HANDLE(psBase, idx) ((IMG_HANDLE)((idx) + 1))
++#define HANDLE_TO_INDEX(psBase, hand) ((IMG_UINT32)(hand) - 1)
++
++#define INDEX_TO_HANDLE_PTR(psBase, i) (((psBase)->psHandleArray) + (i))
++#define HANDLE_TO_HANDLE_PTR(psBase, h) (INDEX_TO_HANDLE_PTR(psBase, HANDLE_TO_INDEX(psBase, h)))
++
++#define HANDLE_PTR_TO_INDEX(psBase, psHandle) (IMG_UINT32)((psHandle) - ((psBase)->psHandleArray))
++#define HANDLE_PTR_TO_HANDLE(psBase, psHandle) \
++ INDEX_TO_HANDLE(psBase, HANDLE_PTR_TO_INDEX(psBase, psHandle))
++
++#define ROUND_UP_TO_MULTIPLE(a, b) ((((a) + (b) - 1) / (b)) * (b))
++
++#define HANDLES_BATCHED(psBase) ((psBase)->ui32HandBatchSize != 0)
++
++#define SET_FLAG(v, f) ((IMG_VOID)((v) |= (f)))
++#define CLEAR_FLAG(v, f) ((IMG_VOID)((v) &= ~(f)))
++#define TEST_FLAG(v, f) ((IMG_BOOL)(((v) & (f)) != 0))
++
++#define TEST_ALLOC_FLAG(psHandle, f) TEST_FLAG((psHandle)->eFlag, f)
++
++#define SET_INTERNAL_FLAG(psHandle, f) SET_FLAG((psHandle)->eInternalFlag, f)
++#define CLEAR_INTERNAL_FLAG(psHandle, f) CLEAR_FLAG((psHandle)->eInternalFlag, f)
++#define TEST_INTERNAL_FLAG(psHandle, f) TEST_FLAG((psHandle)->eInternalFlag, f)
++
++#define BATCHED_HANDLE(psHandle) TEST_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED)
++
++#define SET_BATCHED_HANDLE(psHandle) SET_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED)
++
++#define SET_UNBATCHED_HANDLE(psHandle) CLEAR_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED)
++
++#define BATCHED_HANDLE_PARTIALLY_FREE(psHandle) TEST_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE)
++
++#define SET_BATCHED_HANDLE_PARTIALLY_FREE(psHandle) SET_INTERNAL_FLAG(psHandle, INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE)
++
++#define HANDLE_STRUCT_IS_FREE(psHandle) ((psHandle)->eType == PVRSRV_HANDLE_TYPE_NONE && (psHandle)->eInternalFlag == INTERNAL_HANDLE_FLAG_NONE)
++
++#ifdef MIN
++#undef MIN
++#endif
++
++#define MIN(x, y) (((x) < (y)) ? (x) : (y))
++
++struct sHandleList
++{
++ IMG_UINT32 ui32Prev;
++ IMG_UINT32 ui32Next;
++ IMG_HANDLE hParent;
++};
++
++enum ePVRSRVInternalHandleFlag
++{
++ INTERNAL_HANDLE_FLAG_NONE = 0x00,
++ INTERNAL_HANDLE_FLAG_BATCHED = 0x01,
++ INTERNAL_HANDLE_FLAG_BATCHED_PARTIALLY_FREE = 0x02,
++};
++
++struct sHandle
++{
++
++ PVRSRV_HANDLE_TYPE eType;
++
++
++ IMG_VOID *pvData;
++
++
++ IMG_UINT32 ui32NextIndexPlusOne;
++
++
++ enum ePVRSRVInternalHandleFlag eInternalFlag;
++
++
++ PVRSRV_HANDLE_ALLOC_FLAG eFlag;
++
++
++ IMG_UINT32 ui32Index;
++
++
++ struct sHandleList sChildren;
++
++
++ struct sHandleList sSiblings;
++};
++
++struct _PVRSRV_HANDLE_BASE_
++{
++
++ IMG_HANDLE hBaseBlockAlloc;
++
++
++ IMG_HANDLE hHandBlockAlloc;
++
++
++ struct sHandle *psHandleArray;
++
++
++ HASH_TABLE *psHashTab;
++
++
++ IMG_UINT32 ui32FreeHandCount;
++
++
++ IMG_UINT32 ui32FirstFreeIndex;
++
++
++ IMG_UINT32 ui32MaxIndexPlusOne;
++
++
++ IMG_UINT32 ui32TotalHandCount;
++
++
++ IMG_UINT32 ui32LastFreeIndexPlusOne;
++
++
++ IMG_UINT32 ui32HandBatchSize;
++
++
++ IMG_UINT32 ui32TotalHandCountPreBatch;
++
++
++ IMG_UINT32 ui32FirstBatchIndexPlusOne;
++
++
++ IMG_UINT32 ui32BatchHandAllocFailures;
++
++
++ IMG_BOOL bPurgingEnabled;
++};
++
++enum eHandKey {
++ HAND_KEY_DATA = 0,
++ HAND_KEY_TYPE,
++ HAND_KEY_PARENT,
++ HAND_KEY_LEN
++};
++
++PVRSRV_HANDLE_BASE *gpsKernelHandleBase = IMG_NULL;
++
++typedef IMG_UINTPTR_T HAND_KEY[HAND_KEY_LEN];
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListInit)
++#endif
++static INLINE
++IMG_VOID HandleListInit(IMG_UINT32 ui32Index, struct sHandleList *psList, IMG_HANDLE hParent)
++{
++ psList->ui32Next = ui32Index;
++ psList->ui32Prev = ui32Index;
++ psList->hParent = hParent;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(InitParentList)
++#endif
++static INLINE
++IMG_VOID InitParentList(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++ IMG_UINT32 ui32Parent = HANDLE_PTR_TO_INDEX(psBase, psHandle);
++
++ HandleListInit(ui32Parent, &psHandle->sChildren, INDEX_TO_HANDLE(psBase, ui32Parent));
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(InitChildEntry)
++#endif
++static INLINE
++IMG_VOID InitChildEntry(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++ HandleListInit(HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sSiblings, IMG_NULL);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListIsEmpty)
++#endif
++static INLINE
++IMG_BOOL HandleListIsEmpty(IMG_UINT32 ui32Index, struct sHandleList *psList)
++{
++ IMG_BOOL bIsEmpty;
++
++ bIsEmpty = (IMG_BOOL)(psList->ui32Next == ui32Index);
++
++#ifdef DEBUG
++ {
++ IMG_BOOL bIsEmpty2;
++
++ bIsEmpty2 = (IMG_BOOL)(psList->ui32Prev == ui32Index);
++ PVR_ASSERT(bIsEmpty == bIsEmpty2);
++ }
++#endif
++
++ return bIsEmpty;
++}
++
++#ifdef DEBUG
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(NoChildren)
++#endif
++static INLINE
++IMG_BOOL NoChildren(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++ PVR_ASSERT(psHandle->sChildren.hParent == HANDLE_PTR_TO_HANDLE(psBase, psHandle));
++
++ return HandleListIsEmpty(HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sChildren);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(NoParent)
++#endif
++static INLINE
++IMG_BOOL NoParent(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++ if (HandleListIsEmpty(HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sSiblings))
++ {
++ PVR_ASSERT(psHandle->sSiblings.hParent == IMG_NULL);
++
++ return IMG_TRUE;
++ }
++ else
++ {
++ PVR_ASSERT(psHandle->sSiblings.hParent != IMG_NULL);
++ }
++ return IMG_FALSE;
++}
++#endif
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(ParentHandle)
++#endif
++static INLINE
++IMG_HANDLE ParentHandle(struct sHandle *psHandle)
++{
++ return psHandle->sSiblings.hParent;
++}
++
++#define LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, i, p, po, eo) \
++ ((struct sHandleList *)((IMG_CHAR *)(INDEX_TO_HANDLE_PTR(psBase, i)) + (((i) == (p)) ? (po) : (eo))))
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListInsertBefore)
++#endif
++static INLINE
++IMG_VOID HandleListInsertBefore(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32InsIndex, struct sHandleList *psIns, IMG_SIZE_T uiParentOffset, IMG_UINT32 ui32EntryIndex, struct sHandleList *psEntry, IMG_SIZE_T uiEntryOffset, IMG_UINT32 ui32ParentIndex)
++{
++
++ struct sHandleList *psPrevIns = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psIns->ui32Prev, ui32ParentIndex, uiParentOffset, uiEntryOffset);
++
++ PVR_ASSERT(psEntry->hParent == IMG_NULL);
++ PVR_ASSERT(ui32InsIndex == psPrevIns->ui32Next);
++ PVR_ASSERT(LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, ui32ParentIndex, ui32ParentIndex, uiParentOffset, uiParentOffset)->hParent == INDEX_TO_HANDLE(psBase, ui32ParentIndex));
++
++ psEntry->ui32Prev = psIns->ui32Prev;
++ psIns->ui32Prev = ui32EntryIndex;
++ psEntry->ui32Next = ui32InsIndex;
++ psPrevIns->ui32Next = ui32EntryIndex;
++
++ psEntry->hParent = INDEX_TO_HANDLE(psBase, ui32ParentIndex);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(AdoptChild)
++#endif
++static INLINE
++IMG_VOID AdoptChild(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psParent, struct sHandle *psChild)
++{
++ IMG_UINT32 ui32Parent = HANDLE_TO_INDEX(psBase, psParent->sChildren.hParent);
++
++ PVR_ASSERT(ui32Parent == HANDLE_PTR_TO_INDEX(psBase, psParent));
++
++ HandleListInsertBefore(psBase, ui32Parent, &psParent->sChildren, offsetof(struct sHandle, sChildren), HANDLE_PTR_TO_INDEX(psBase, psChild), &psChild->sSiblings, offsetof(struct sHandle, sSiblings), ui32Parent);
++
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListRemove)
++#endif
++static INLINE
++IMG_VOID HandleListRemove(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32EntryIndex, struct sHandleList *psEntry, IMG_SIZE_T uiEntryOffset, IMG_SIZE_T uiParentOffset)
++{
++ if (!HandleListIsEmpty(ui32EntryIndex, psEntry))
++ {
++
++ struct sHandleList *psPrev = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Prev, HANDLE_TO_INDEX(psBase, psEntry->hParent), uiParentOffset, uiEntryOffset);
++ struct sHandleList *psNext = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Next, HANDLE_TO_INDEX(psBase, psEntry->hParent), uiParentOffset, uiEntryOffset);
++
++
++ PVR_ASSERT(psEntry->hParent != IMG_NULL);
++
++ psPrev->ui32Next = psEntry->ui32Next;
++ psNext->ui32Prev = psEntry->ui32Prev;
++
++ HandleListInit(ui32EntryIndex, psEntry, IMG_NULL);
++ }
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(UnlinkFromParent)
++#endif
++static INLINE
++IMG_VOID UnlinkFromParent(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++ HandleListRemove(psBase, HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sSiblings, offsetof(struct sHandle, sSiblings), offsetof(struct sHandle, sChildren));
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListIterate)
++#endif
++static INLINE
++PVRSRV_ERROR HandleListIterate(PVRSRV_HANDLE_BASE *psBase, struct sHandleList *psHead, IMG_SIZE_T uiParentOffset, IMG_SIZE_T uiEntryOffset, PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, struct sHandle *))
++{
++ IMG_UINT32 ui32Index;
++ IMG_UINT32 ui32Parent = HANDLE_TO_INDEX(psBase, psHead->hParent);
++
++ PVR_ASSERT(psHead->hParent != IMG_NULL);
++
++
++ for(ui32Index = psHead->ui32Next; ui32Index != ui32Parent; )
++ {
++ struct sHandle *psHandle = INDEX_TO_HANDLE_PTR(psBase, ui32Index);
++
++ struct sHandleList *psEntry = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, ui32Index, ui32Parent, uiParentOffset, uiEntryOffset);
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(psEntry->hParent == psHead->hParent);
++
++ ui32Index = psEntry->ui32Next;
++
++ eError = (*pfnIterFunc)(psBase, psHandle);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(IterateOverChildren)
++#endif
++static INLINE
++PVRSRV_ERROR IterateOverChildren(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psParent, PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, struct sHandle *))
++{
++ return HandleListIterate(psBase, &psParent->sChildren, offsetof(struct sHandle, sChildren), offsetof(struct sHandle, sSiblings), pfnIterFunc);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(GetHandleStructure)
++#endif
++static INLINE
++PVRSRV_ERROR GetHandleStructure(PVRSRV_HANDLE_BASE *psBase, struct sHandle **ppsHandle, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ IMG_UINT32 ui32Index = HANDLE_TO_INDEX(psBase, hHandle);
++ struct sHandle *psHandle;
++
++
++ if (!INDEX_IS_VALID(psBase, ui32Index))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle index out of range (%u >= %u)", ui32Index, psBase->ui32TotalHandCount));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psHandle = INDEX_TO_HANDLE_PTR(psBase, ui32Index);
++ if (psHandle->eType == PVRSRV_HANDLE_TYPE_NONE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle not allocated (index: %u)", ui32Index));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ if (eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandle->eType)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle type mismatch (%d != %d)", eType, psHandle->eType));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ *ppsHandle = psHandle;
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(ParentIfPrivate)
++#endif
++static INLINE
++IMG_HANDLE ParentIfPrivate(struct sHandle *psHandle)
++{
++ return TEST_ALLOC_FLAG(psHandle, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
++ ParentHandle(psHandle) : IMG_NULL;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(InitKey)
++#endif
++static INLINE
++IMG_VOID InitKey(HAND_KEY aKey, PVRSRV_HANDLE_BASE *psBase, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hParent)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ aKey[HAND_KEY_DATA] = (IMG_UINTPTR_T)pvData;
++ aKey[HAND_KEY_TYPE] = (IMG_UINTPTR_T)eType;
++ aKey[HAND_KEY_PARENT] = (IMG_UINTPTR_T)hParent;
++}
++
++static PVRSRV_ERROR FreeHandleArray(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (psBase->psHandleArray != IMG_NULL)
++ {
++ eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ psBase->ui32TotalHandCount * sizeof(struct sHandle),
++ psBase->psHandleArray,
++ psBase->hHandBlockAlloc);
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeHandleArray: Error freeing memory (%d)", eError));
++ }
++ else
++ {
++ psBase->psHandleArray = IMG_NULL;
++ }
++ }
++
++ return eError;
++}
++
++static PVRSRV_ERROR FreeHandle(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++ HAND_KEY aKey;
++ IMG_UINT32 ui32Index = HANDLE_PTR_TO_INDEX(psBase, psHandle);
++ PVRSRV_ERROR eError;
++
++
++ InitKey(aKey, psBase, psHandle->pvData, psHandle->eType, ParentIfPrivate(psHandle));
++
++ if (!TEST_ALLOC_FLAG(psHandle, PVRSRV_HANDLE_ALLOC_FLAG_MULTI) && !BATCHED_HANDLE_PARTIALLY_FREE(psHandle))
++ {
++ IMG_HANDLE hHandle;
++ hHandle = (IMG_HANDLE) HASH_Remove_Extended(psBase->psHashTab, aKey);
++
++ PVR_ASSERT(hHandle != IMG_NULL);
++ PVR_ASSERT(hHandle == INDEX_TO_HANDLE(psBase, ui32Index));
++ PVR_UNREFERENCED_PARAMETER(hHandle);
++ }
++
++
++ UnlinkFromParent(psBase, psHandle);
++
++
++ eError = IterateOverChildren(psBase, psHandle, FreeHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeHandle: Error whilst freeing subhandles (%d)", eError));
++ return eError;
++ }
++
++
++ psHandle->eType = PVRSRV_HANDLE_TYPE_NONE;
++
++ if (BATCHED_HANDLE(psHandle) && !BATCHED_HANDLE_PARTIALLY_FREE(psHandle))
++ {
++ SET_BATCHED_HANDLE_PARTIALLY_FREE(psHandle);
++
++ return PVRSRV_OK;
++ }
++
++
++ if (!psBase->bPurgingEnabled)
++ {
++ if (psBase->ui32FreeHandCount == 0)
++ {
++ PVR_ASSERT(psBase->ui32FirstFreeIndex == 0);
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0);
++
++ psBase->ui32FirstFreeIndex = ui32Index;
++ }
++ else
++ {
++
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0);
++ PVR_ASSERT(INDEX_TO_HANDLE_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne == 0);
++ INDEX_TO_HANDLE_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne = ui32Index + 1;
++ }
++
++ PVR_ASSERT(psHandle->ui32NextIndexPlusOne == 0);
++
++
++ psBase->ui32LastFreeIndexPlusOne = ui32Index + 1;
++ }
++
++ psBase->ui32FreeHandCount++;
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR FreeAllHandles(PVRSRV_HANDLE_BASE *psBase)
++{
++ IMG_UINT32 i;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (psBase->ui32FreeHandCount == psBase->ui32TotalHandCount)
++ {
++ return eError;
++ }
++
++ for (i = 0; i < psBase->ui32TotalHandCount; i++)
++ {
++ struct sHandle *psHandle;
++
++ psHandle = INDEX_TO_HANDLE_PTR(psBase, i);
++
++ if (psHandle->eType != PVRSRV_HANDLE_TYPE_NONE)
++ {
++ eError = FreeHandle(psBase, psHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeAllHandles: FreeHandle failed (%d)", eError));
++ break;
++ }
++
++
++ if (psBase->ui32FreeHandCount == psBase->ui32TotalHandCount)
++ {
++ break;
++ }
++ }
++ }
++
++ return eError;
++}
++
++static PVRSRV_ERROR FreeHandleBase(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVRSRV_ERROR eError;
++
++ if (HANDLES_BATCHED(psBase))
++ {
++ PVR_DPF((PVR_DBG_WARNING, "FreeHandleBase: Uncommitted/Unreleased handle batch"));
++ PVRSRVReleaseHandleBatch(psBase);
++ }
++
++
++ eError = FreeAllHandles(psBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handles (%d)", eError));
++ return eError;
++ }
++
++
++ eError = FreeHandleArray(psBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handle array (%d)", eError));
++ return eError;
++ }
++
++ if (psBase->psHashTab != IMG_NULL)
++ {
++
++ HASH_Delete(psBase->psHashTab);
++ }
++
++ eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psBase),
++ psBase,
++ psBase->hBaseBlockAlloc);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handle base (%d)", eError));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(FindHandle)
++#endif
++static INLINE
++IMG_HANDLE FindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hParent)
++{
++ HAND_KEY aKey;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ InitKey(aKey, psBase, pvData, eType, hParent);
++
++ return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey);
++}
++
++static PVRSRV_ERROR ReallocMem(IMG_PVOID *ppvMem, IMG_HANDLE *phBlockAlloc, IMG_UINT32 ui32NewSize, IMG_UINT32 ui32OldSize)
++{
++ IMG_VOID *pvOldMem = *ppvMem;
++ IMG_HANDLE hOldBlockAlloc = *phBlockAlloc;
++ IMG_UINT32 ui32CopySize = MIN(ui32NewSize, ui32OldSize);
++ IMG_VOID *pvNewMem = IMG_NULL;
++ IMG_HANDLE hNewBlockAlloc = IMG_NULL;
++ PVRSRV_ERROR eError;
++
++ if (ui32NewSize == ui32OldSize)
++ {
++ return (PVRSRV_OK);
++ }
++
++ if (ui32NewSize != 0)
++ {
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32NewSize,
++ &pvNewMem,
++ &hNewBlockAlloc,
++ "Memory Area");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ReallocMem: Couldn't allocate new memory area (%d)", eError));
++ return eError;
++ }
++ }
++
++ if (ui32CopySize != 0)
++ {
++
++ OSMemCopy(pvNewMem, pvOldMem, ui32CopySize);
++ }
++
++ if (ui32OldSize != 0)
++ {
++
++ eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32OldSize,
++ pvOldMem,
++ hOldBlockAlloc);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ReallocMem: Couldn't free old memory area (%d)", eError));
++ }
++ }
++
++ *ppvMem = pvNewMem;
++ *phBlockAlloc = hNewBlockAlloc;
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(ReallocHandleArray)
++#endif
++static INLINE
++PVRSRV_ERROR ReallocHandleArray(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32NewCount, IMG_UINT32 ui32OldCount)
++{
++ return ReallocMem((IMG_PVOID *)&psBase->psHandleArray,
++ &psBase->hHandBlockAlloc,
++ ui32NewCount * sizeof(struct sHandle),
++ ui32OldCount * sizeof(struct sHandle));
++}
++
++static PVRSRV_ERROR IncreaseHandleArraySize(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32Delta)
++{
++ PVRSRV_ERROR eError;
++ struct sHandle *psHandle;
++ IMG_UINT32 ui32DeltaAdjusted = ROUND_UP_TO_MULTIPLE(ui32Delta, HANDLE_BLOCK_SIZE);
++ IMG_UINT32 ui32NewTotalHandCount = psBase->ui32TotalHandCount + ui32DeltaAdjusted;
++;
++
++ PVR_ASSERT(ui32Delta != 0);
++
++
++ if (ui32NewTotalHandCount > psBase->ui32MaxIndexPlusOne || ui32NewTotalHandCount <= psBase->ui32TotalHandCount)
++ {
++ ui32NewTotalHandCount = psBase->ui32MaxIndexPlusOne;
++
++ ui32DeltaAdjusted = ui32NewTotalHandCount - psBase->ui32TotalHandCount;
++
++ if (ui32DeltaAdjusted < ui32Delta)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "IncreaseHandleArraySize: Maximum handle limit reached (%d)", psBase->ui32MaxIndexPlusOne));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ }
++
++ PVR_ASSERT(ui32DeltaAdjusted >= ui32Delta);
++
++
++ eError = ReallocHandleArray(psBase, ui32NewTotalHandCount, psBase->ui32TotalHandCount);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "IncreaseHandleArraySize: ReallocHandleArray failed (%d)", eError));
++ return eError;
++ }
++
++
++ for(psHandle = psBase->psHandleArray + psBase->ui32TotalHandCount;
++ psHandle < psBase->psHandleArray + ui32NewTotalHandCount;
++ psHandle++)
++ {
++ psHandle->eType = PVRSRV_HANDLE_TYPE_NONE;
++ psHandle->eInternalFlag = INTERNAL_HANDLE_FLAG_NONE;
++ psHandle->ui32NextIndexPlusOne = 0;
++ }
++
++
++ psBase->ui32FreeHandCount += ui32DeltaAdjusted;
++
++ if (psBase->ui32FirstFreeIndex == 0)
++ {
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0);
++
++ psBase->ui32FirstFreeIndex = psBase->ui32TotalHandCount;
++ }
++ else
++ {
++ if (!psBase->bPurgingEnabled)
++ {
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0)
++ PVR_ASSERT(INDEX_TO_HANDLE_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne == 0);
++
++ INDEX_TO_HANDLE_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne = psBase->ui32TotalHandCount + 1;
++ }
++ }
++
++ if (!psBase->bPurgingEnabled)
++ {
++ psBase->ui32LastFreeIndexPlusOne = ui32NewTotalHandCount;
++ }
++
++ psBase->ui32TotalHandCount = ui32NewTotalHandCount;
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR EnsureFreeHandles(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32Free)
++{
++ PVRSRV_ERROR eError;
++
++ if (ui32Free > psBase->ui32FreeHandCount)
++ {
++ IMG_UINT32 ui32FreeHandDelta = ui32Free - psBase->ui32FreeHandCount;
++ eError = IncreaseHandleArraySize(psBase, ui32FreeHandDelta);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "EnsureFreeHandles: Couldn't allocate %u handles to ensure %u free handles (IncreaseHandleArraySize failed with error %d)", ui32FreeHandDelta, ui32Free, eError));
++
++ return eError;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent)
++{
++ IMG_UINT32 ui32NewIndex;
++ struct sHandle *psNewHandle = IMG_NULL;
++ IMG_HANDLE hHandle;
++ HAND_KEY aKey;
++ PVRSRV_ERROR eError;
++
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ PVR_ASSERT(psBase->psHashTab != IMG_NULL);
++
++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++ {
++
++ PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == IMG_NULL);
++ }
++
++ if (psBase->ui32FreeHandCount == 0 && HANDLES_BATCHED(psBase))
++ {
++ PVR_DPF((PVR_DBG_WARNING, "AllocHandle: Handle batch size (%u) was too small, allocating additional space", psBase->ui32HandBatchSize));
++ }
++
++
++ eError = EnsureFreeHandles(psBase, 1);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "AllocHandle: EnsureFreeHandles failed (%d)", eError));
++ return eError;
++ }
++ PVR_ASSERT(psBase->ui32FreeHandCount != 0)
++
++ if (!psBase->bPurgingEnabled)
++ {
++
++ ui32NewIndex = psBase->ui32FirstFreeIndex;
++
++
++ psNewHandle = INDEX_TO_HANDLE_PTR(psBase, ui32NewIndex);
++ }
++ else
++ {
++
++ for(ui32NewIndex = psBase->ui32FirstFreeIndex; ui32NewIndex < psBase->ui32TotalHandCount; ui32NewIndex++)
++ {
++ psNewHandle = INDEX_TO_HANDLE_PTR(psBase, ui32NewIndex);
++ if (HANDLE_STRUCT_IS_FREE(psNewHandle))
++ {
++ break;
++ }
++
++ }
++ psBase->ui32FirstFreeIndex = 0;
++ PVR_ASSERT(ui32NewIndex < psBase->ui32TotalHandCount);
++ }
++ PVR_ASSERT(psNewHandle != IMG_NULL);
++
++
++ hHandle = INDEX_TO_HANDLE(psBase, ui32NewIndex);
++
++
++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++ {
++
++ InitKey(aKey, psBase, pvData, eType, hParent);
++
++
++ if (!HASH_Insert_Extended(psBase->psHashTab, aKey, (IMG_UINTPTR_T)hHandle))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Couldn't add handle to hash table"));
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ psBase->ui32FreeHandCount--;
++
++
++ if (!psBase->bPurgingEnabled)
++ {
++
++ if (psBase->ui32FreeHandCount == 0)
++ {
++ PVR_ASSERT(psBase->ui32FirstFreeIndex == ui32NewIndex);
++ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == (ui32NewIndex + 1));
++
++ psBase->ui32LastFreeIndexPlusOne = 0;
++ psBase->ui32FirstFreeIndex = 0;
++ }
++ else
++ {
++
++ psBase->ui32FirstFreeIndex = (psNewHandle->ui32NextIndexPlusOne == 0) ?
++ ui32NewIndex + 1 :
++ psNewHandle->ui32NextIndexPlusOne - 1;
++ }
++ }
++
++
++ psNewHandle->eType = eType;
++ psNewHandle->pvData = pvData;
++ psNewHandle->eInternalFlag = INTERNAL_HANDLE_FLAG_NONE;
++ psNewHandle->eFlag = eFlag;
++ psNewHandle->ui32Index = ui32NewIndex;
++
++ InitParentList(psBase, psNewHandle);
++#if defined(DEBUG)
++ PVR_ASSERT(NoChildren(psBase, psNewHandle));
++#endif
++
++ InitChildEntry(psBase, psNewHandle);
++#if defined(DEBUG)
++ PVR_ASSERT(NoParent(psBase, psNewHandle));
++#endif
++
++ if (HANDLES_BATCHED(psBase))
++ {
++
++ psNewHandle->ui32NextIndexPlusOne = psBase->ui32FirstBatchIndexPlusOne;
++
++ psBase->ui32FirstBatchIndexPlusOne = ui32NewIndex + 1;
++
++ SET_BATCHED_HANDLE(psNewHandle);
++ }
++ else
++ {
++ psNewHandle->ui32NextIndexPlusOne = 0;
++ }
++
++
++ *phHandle = hHandle;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag)
++{
++ IMG_HANDLE hHandle;
++ PVRSRV_ERROR eError;
++
++ *phHandle = IMG_NULL;
++
++ if (HANDLES_BATCHED(psBase))
++ {
++
++ psBase->ui32BatchHandAllocFailures++;
++ }
++
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++ {
++
++ hHandle = FindHandle(psBase, pvData, eType, IMG_NULL);
++ if (hHandle != IMG_NULL)
++ {
++ struct sHandle *psHandle;
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandle: Lookup of existing handle failed"));
++ return eError;
++ }
++
++
++ if (TEST_FLAG(psHandle->eFlag & eFlag, PVRSRV_HANDLE_ALLOC_FLAG_SHARED))
++ {
++ *phHandle = hHandle;
++ eError = PVRSRV_OK;
++ goto exit_ok;
++ }
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ eError = AllocHandle(psBase, phHandle, pvData, eType, eFlag, IMG_NULL);
++
++exit_ok:
++ if (HANDLES_BATCHED(psBase) && (eError == PVRSRV_OK))
++ {
++ psBase->ui32BatchHandAllocFailures--;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent)
++{
++ struct sHandle *psPHand;
++ struct sHandle *psCHand;
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hParentKey;
++ IMG_HANDLE hHandle;
++
++ *phHandle = IMG_NULL;
++
++ if (HANDLES_BATCHED(psBase))
++ {
++
++ psBase->ui32BatchHandAllocFailures++;
++ }
++
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ hParentKey = TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
++ hParent : IMG_NULL;
++
++
++ eError = GetHandleStructure(psBase, &psPHand, hParent, PVRSRV_HANDLE_TYPE_NONE);
++ if (eError != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++ {
++
++ hHandle = FindHandle(psBase, pvData, eType, hParentKey);
++ if (hHandle != IMG_NULL)
++ {
++ struct sHandle *psCHandle;
++ PVRSRV_ERROR eErr;
++
++ eErr = GetHandleStructure(psBase, &psCHandle, hHandle, eType);
++ if (eErr != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Lookup of existing handle failed"));
++ return eErr;
++ }
++
++ PVR_ASSERT(hParentKey != IMG_NULL && ParentHandle(HANDLE_TO_HANDLE_PTR(psBase, hHandle)) == hParent);
++
++
++ if (TEST_FLAG(psCHandle->eFlag & eFlag, PVRSRV_HANDLE_ALLOC_FLAG_SHARED) && ParentHandle(HANDLE_TO_HANDLE_PTR(psBase, hHandle)) == hParent)
++ {
++ *phHandle = hHandle;
++ goto exit_ok;
++ }
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ eError = AllocHandle(psBase, &hHandle, pvData, eType, eFlag, hParentKey);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++
++ psPHand = HANDLE_TO_HANDLE_PTR(psBase, hParent);
++
++ psCHand = HANDLE_TO_HANDLE_PTR(psBase, hHandle);
++
++ AdoptChild(psBase, psPHand, psCHand);
++
++ *phHandle = hHandle;
++
++exit_ok:
++ if (HANDLES_BATCHED(psBase))
++ {
++ psBase->ui32BatchHandAllocFailures--;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType)
++{
++ IMG_HANDLE hHandle;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++
++ hHandle = (IMG_HANDLE) FindHandle(psBase, pvData, eType, IMG_NULL);
++ if (hHandle == IMG_NULL)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ *phHandle = hHandle;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle)
++{
++ struct sHandle *psHandle;
++ PVRSRV_ERROR eError;
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, PVRSRV_HANDLE_TYPE_NONE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupHandleAnyType: Error looking up handle (%d)", eError));
++ return eError;
++ }
++
++ *ppvData = psHandle->pvData;
++ *peType = psHandle->eType;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ struct sHandle *psHandle;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupHandle: Error looking up handle (%d)", eError));
++ return eError;
++ }
++
++ *ppvData = psHandle->pvData;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor)
++{
++ struct sHandle *psPHand;
++ struct sHandle *psCHand;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psCHand, hHandle, eType);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupSubHandle: Error looking up subhandle (%d)", eError));
++ return eError;
++ }
++
++
++ for (psPHand = psCHand; ParentHandle(psPHand) != hAncestor; )
++ {
++ eError = GetHandleStructure(psBase, &psPHand, ParentHandle(psPHand), PVRSRV_HANDLE_TYPE_NONE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupSubHandle: Subhandle doesn't belong to given ancestor"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ *ppvData = psCHand->pvData;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ struct sHandle *psHandle;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetParentHandle: Error looking up subhandle (%d)", eError));
++ return eError;
++ }
++
++ *phParent = ParentHandle(psHandle);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ struct sHandle *psHandle;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupAndReleaseHandle: Error looking up handle (%d)", eError));
++ return eError;
++ }
++
++ *ppvData = psHandle->pvData;
++
++ eError = FreeHandle(psBase, psHandle);
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ struct sHandle *psHandle;
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++ eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVReleaseHandle: Error looking up handle (%d)", eError));
++ return eError;
++ }
++
++ eError = FreeHandle(psBase, psHandle);
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32BatchSize)
++{
++ PVRSRV_ERROR eError;
++
++ if (HANDLES_BATCHED(psBase))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVNewHandleBatch: There is a handle batch already in use (size %u)", psBase->ui32HandBatchSize));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (ui32BatchSize == 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVNewHandleBatch: Invalid batch size (%u)", ui32BatchSize));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = EnsureFreeHandles(psBase, ui32BatchSize);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVNewHandleBatch: EnsureFreeHandles failed (error %d)", eError));
++ return eError;
++ }
++
++ psBase->ui32HandBatchSize = ui32BatchSize;
++
++
++ psBase->ui32TotalHandCountPreBatch = psBase->ui32TotalHandCount;
++
++ PVR_ASSERT(psBase->ui32BatchHandAllocFailures == 0);
++
++ PVR_ASSERT(psBase->ui32FirstBatchIndexPlusOne == 0);
++
++ PVR_ASSERT(HANDLES_BATCHED(psBase));
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR PVRSRVHandleBatchCommitOrRelease(PVRSRV_HANDLE_BASE *psBase, IMG_BOOL bCommit)
++{
++
++ IMG_UINT32 ui32IndexPlusOne;
++ IMG_BOOL bCommitBatch = bCommit;
++
++ if (!HANDLES_BATCHED(psBase))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleBatchCommitOrRelease: There is no handle batch"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++
++ }
++
++ if (psBase->ui32BatchHandAllocFailures != 0)
++ {
++ if (bCommit)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleBatchCommitOrRelease: Attempting to commit batch with handle allocation failures."));
++ }
++ bCommitBatch = IMG_FALSE;
++ }
++
++ PVR_ASSERT(psBase->ui32BatchHandAllocFailures == 0 || !bCommit);
++
++ ui32IndexPlusOne = psBase->ui32FirstBatchIndexPlusOne;
++ while(ui32IndexPlusOne != 0)
++ {
++ struct sHandle *psHandle = INDEX_TO_HANDLE_PTR(psBase, ui32IndexPlusOne - 1);
++ IMG_UINT32 ui32NextIndexPlusOne = psHandle->ui32NextIndexPlusOne;
++ PVR_ASSERT(BATCHED_HANDLE(psHandle));
++
++ psHandle->ui32NextIndexPlusOne = 0;
++
++ if (!bCommitBatch || BATCHED_HANDLE_PARTIALLY_FREE(psHandle))
++ {
++ PVRSRV_ERROR eError;
++
++
++ if (!BATCHED_HANDLE_PARTIALLY_FREE(psHandle))
++ {
++ SET_UNBATCHED_HANDLE(psHandle);
++ }
++
++ eError = FreeHandle(psBase, psHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleBatchCommitOrRelease: Error freeing handle (%d)", eError));
++ }
++ PVR_ASSERT(eError == PVRSRV_OK);
++ }
++ else
++ {
++ SET_UNBATCHED_HANDLE(psHandle);
++ }
++
++ ui32IndexPlusOne = ui32NextIndexPlusOne;
++ }
++
++#ifdef DEBUG
++ if (psBase->ui32TotalHandCountPreBatch != psBase->ui32TotalHandCount)
++ {
++ IMG_UINT32 ui32Delta = psBase->ui32TotalHandCount - psBase->ui32TotalHandCountPreBatch;
++
++ PVR_ASSERT(psBase->ui32TotalHandCount > psBase->ui32TotalHandCountPreBatch);
++
++ PVR_DPF((PVR_DBG_WARNING, "PVRSRVHandleBatchCommitOrRelease: The batch size was too small. Batch size was %u, but needs to be %u", psBase->ui32HandBatchSize, psBase->ui32HandBatchSize + ui32Delta));
++
++ }
++#endif
++
++ psBase->ui32HandBatchSize = 0;
++ psBase->ui32FirstBatchIndexPlusOne = 0;
++ psBase->ui32TotalHandCountPreBatch = 0;
++ psBase->ui32BatchHandAllocFailures = 0;
++
++ if (psBase->ui32BatchHandAllocFailures != 0 && bCommit)
++ {
++ PVR_ASSERT(!bCommitBatch);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase)
++{
++ return PVRSRVHandleBatchCommitOrRelease(psBase, IMG_TRUE);
++}
++
++IMG_VOID PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase)
++{
++ (IMG_VOID) PVRSRVHandleBatchCommitOrRelease(psBase, IMG_FALSE);
++}
++
++PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32MaxHandle)
++{
++ if (HANDLES_BATCHED(psBase))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSetMaxHandle: Limit cannot be set whilst in batch mode"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ if (ui32MaxHandle == 0 || ui32MaxHandle >= DEFAULT_MAX_HANDLE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSetMaxHandle: Limit must be between %u and %u, inclusive", 0, DEFAULT_MAX_HANDLE));
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ if (psBase->ui32TotalHandCount != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSetMaxHandle: Limit cannot be set becuase handles have already been allocated"));
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psBase->ui32MaxIndexPlusOne = ui32MaxHandle;
++
++ return PVRSRV_OK;
++}
++
++IMG_UINT32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase)
++{
++ return psBase->ui32MaxIndexPlusOne;
++}
++
++PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase)
++{
++ if (psBase->bPurgingEnabled)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "PVRSRVEnableHandlePurging: Purging already enabled"));
++ return PVRSRV_OK;
++ }
++
++
++ if (psBase->ui32TotalHandCount != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVEnableHandlePurging: Handles have already been allocated"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psBase->bPurgingEnabled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase)
++{
++ IMG_UINT32 ui32Handle;
++ IMG_UINT32 ui32NewHandCount;
++
++ if (!psBase->bPurgingEnabled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPurgeHandles: Purging not enabled for this handle base"));
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++ }
++
++ if (HANDLES_BATCHED(psBase))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPurgeHandles: Purging not allowed whilst in batch mode"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ for (ui32Handle = psBase->ui32TotalHandCount; ui32Handle != 0; ui32Handle--)
++ {
++ struct sHandle *psHandle = HANDLE_TO_HANDLE_PTR(psBase, ui32Handle);
++ if (!HANDLE_STRUCT_IS_FREE(psHandle))
++ {
++ break;
++ }
++ }
++
++ ui32NewHandCount = ROUND_UP_TO_MULTIPLE(ui32Handle, HANDLE_BLOCK_SIZE);
++
++
++ if (ui32NewHandCount >= ui32Handle && ui32NewHandCount <= (psBase->ui32TotalHandCount/2))
++ {
++ IMG_UINT32 ui32Delta = psBase->ui32TotalHandCount - ui32NewHandCount;
++ PVRSRV_ERROR eError;
++
++
++
++ eError = ReallocHandleArray(psBase, ui32NewHandCount, psBase->ui32TotalHandCount);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++
++ psBase->ui32TotalHandCount = ui32NewHandCount;
++ psBase->ui32FreeHandCount -= ui32Delta;
++ psBase->ui32FirstFreeIndex = 0;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase)
++{
++ PVRSRV_HANDLE_BASE *psBase;
++ IMG_HANDLE hBlockAlloc;
++ PVRSRV_ERROR eError;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*psBase),
++ (IMG_PVOID *)&psBase,
++ &hBlockAlloc,
++ "Handle Base");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't allocate handle base (%d)", eError));
++ return eError;
++ }
++ OSMemSet(psBase, 0, sizeof(*psBase));
++
++
++ psBase->psHashTab = HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE, sizeof(HAND_KEY), HASH_Func_Default, HASH_Key_Comp_Default);
++ if (psBase->psHashTab == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't create data pointer hash table\n"));
++ goto failure;
++ }
++
++ psBase->hBaseBlockAlloc = hBlockAlloc;
++
++ psBase->ui32MaxIndexPlusOne = DEFAULT_MAX_INDEX_PLUS_ONE;
++
++ *ppsBase = psBase;
++
++ return PVRSRV_OK;
++failure:
++ (IMG_VOID)PVRSRVFreeHandleBase(psBase);
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(psBase != gpsKernelHandleBase);
++
++ eError = FreeHandleBase(psBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVFreeHandleBase: FreeHandleBase failed (%d)", eError));
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(gpsKernelHandleBase == IMG_NULL);
++
++ eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleInit: PVRSRVAllocHandleBase failed (%d)", eError));
++ goto error;
++ }
++
++ eError = PVRSRVEnableHandlePurging(gpsKernelHandleBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleInit: PVRSRVEnableHandlePurging failed (%d)", eError));
++ goto error;
++ }
++
++ return PVRSRV_OK;
++error:
++ (IMG_VOID) PVRSRVHandleDeInit();
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if (gpsKernelHandleBase != IMG_NULL)
++ {
++ eError = FreeHandleBase(gpsKernelHandleBase);
++ if (eError == PVRSRV_OK)
++ {
++ gpsKernelHandleBase = IMG_NULL;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVHandleDeInit: FreeHandleBase failed (%d)", eError));
++ }
++ }
++
++ return eError;
++}
++#else
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/hash.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/hash.c
+new file mode 100644
+index 0000000..489a9c5
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/hash.c
+@@ -0,0 +1,463 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "pvr_debug.h"
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "hash.h"
++#include "osfunc.h"
++
++#define PRIVATE_MAX(a,b) ((a)>(b)?(a):(b))
++
++#define KEY_TO_INDEX(pHash, key, uSize) \
++ ((pHash)->pfnHashFunc((pHash)->uKeySize, key, uSize) % uSize)
++
++#define KEY_COMPARE(pHash, pKey1, pKey2) \
++ ((pHash)->pfnKeyComp((pHash)->uKeySize, pKey1, pKey2))
++
++struct _BUCKET_
++{
++
++ struct _BUCKET_ *pNext;
++
++
++ IMG_UINTPTR_T v;
++
++
++ IMG_UINTPTR_T k[];
++};
++typedef struct _BUCKET_ BUCKET;
++
++struct _HASH_TABLE_
++{
++
++ BUCKET **ppBucketTable;
++
++
++ IMG_UINT32 uSize;
++
++
++ IMG_UINT32 uCount;
++
++
++ IMG_UINT32 uMinimumSize;
++
++
++ IMG_UINT32 uKeySize;
++
++
++ HASH_FUNC *pfnHashFunc;
++
++
++ HASH_KEY_COMP *pfnKeyComp;
++};
++
++IMG_UINT32
++HASH_Func_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen)
++{
++ IMG_UINTPTR_T *p = (IMG_UINTPTR_T *)pKey;
++ IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINTPTR_T);
++ IMG_UINT32 ui;
++ IMG_UINT32 uHashKey = 0;
++
++ PVR_UNREFERENCED_PARAMETER(uHashTabLen);
++
++ PVR_ASSERT((uKeySize % sizeof(IMG_UINTPTR_T)) == 0);
++
++ for (ui = 0; ui < uKeyLen; ui++)
++ {
++ IMG_UINT32 uHashPart = (IMG_UINT32)*p++;
++
++ uHashPart += (uHashPart << 12);
++ uHashPart ^= (uHashPart >> 22);
++ uHashPart += (uHashPart << 4);
++ uHashPart ^= (uHashPart >> 9);
++ uHashPart += (uHashPart << 10);
++ uHashPart ^= (uHashPart >> 2);
++ uHashPart += (uHashPart << 7);
++ uHashPart ^= (uHashPart >> 12);
++
++ uHashKey += uHashPart;
++ }
++
++ return uHashKey;
++}
++
++IMG_BOOL
++HASH_Key_Comp_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2)
++{
++ IMG_UINTPTR_T *p1 = (IMG_UINTPTR_T *)pKey1;
++ IMG_UINTPTR_T *p2 = (IMG_UINTPTR_T *)pKey2;
++ IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINTPTR_T);
++ IMG_UINT32 ui;
++
++ PVR_ASSERT((uKeySize % sizeof(IMG_UINTPTR_T)) == 0);
++
++ for (ui = 0; ui < uKeyLen; ui++)
++ {
++ if (*p1++ != *p2++)
++ return IMG_FALSE;
++ }
++
++ return IMG_TRUE;
++}
++
++static PVRSRV_ERROR
++_ChainInsert (HASH_TABLE *pHash, BUCKET *pBucket, BUCKET **ppBucketTable, IMG_UINT32 uSize)
++{
++ IMG_UINT32 uIndex;
++
++ PVR_ASSERT (pBucket != IMG_NULL);
++ PVR_ASSERT (ppBucketTable != IMG_NULL);
++ PVR_ASSERT (uSize != 0);
++
++ if ((pBucket == IMG_NULL) || (ppBucketTable == IMG_NULL) || (uSize == 0))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_ChainInsert: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize);
++ pBucket->pNext = ppBucketTable[uIndex];
++ ppBucketTable[uIndex] = pBucket;
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR
++_Rehash (HASH_TABLE *pHash,
++ BUCKET **ppOldTable, IMG_UINT32 uOldSize,
++ BUCKET **ppNewTable, IMG_UINT32 uNewSize)
++{
++ IMG_UINT32 uIndex;
++ for (uIndex=0; uIndex< uOldSize; uIndex++)
++ {
++ BUCKET *pBucket;
++ pBucket = ppOldTable[uIndex];
++ while (pBucket != IMG_NULL)
++ {
++ BUCKET *pNextBucket = pBucket->pNext;
++ if (_ChainInsert (pHash, pBucket, ppNewTable, uNewSize) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_Rehash: call to _ChainInsert failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ pBucket = pNextBucket;
++ }
++ }
++ return PVRSRV_OK;
++}
++
++static IMG_BOOL
++_Resize (HASH_TABLE *pHash, IMG_UINT32 uNewSize)
++{
++ if (uNewSize != pHash->uSize)
++ {
++ BUCKET **ppNewTable;
++ IMG_UINT32 uIndex;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Resize: oldsize=0x%x newsize=0x%x count=0x%x",
++ pHash->uSize, uNewSize, pHash->uCount));
++
++ OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof (BUCKET *) * uNewSize,
++ (IMG_PVOID*)&ppNewTable, IMG_NULL,
++ "Hash Table Buckets");
++ if (ppNewTable == IMG_NULL)
++ return IMG_FALSE;
++
++ for (uIndex=0; uIndex<uNewSize; uIndex++)
++ ppNewTable[uIndex] = IMG_NULL;
++
++ if (_Rehash (pHash, pHash->ppBucketTable, pHash->uSize, ppNewTable, uNewSize) != PVRSRV_OK)
++ {
++ return IMG_FALSE;
++ }
++
++ OSFreeMem (PVRSRV_PAGEABLE_SELECT, sizeof(BUCKET *)*pHash->uSize, pHash->ppBucketTable, IMG_NULL);
++
++ pHash->ppBucketTable = ppNewTable;
++ pHash->uSize = uNewSize;
++ }
++ return IMG_TRUE;
++}
++
++
++HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, IMG_SIZE_T uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp)
++{
++ HASH_TABLE *pHash;
++ IMG_UINT32 uIndex;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Create_Extended: InitialSize=0x%x", uInitialLen));
++
++ if(OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(HASH_TABLE),
++ (IMG_VOID **)&pHash, IMG_NULL,
++ "Hash Table") != PVRSRV_OK)
++ {
++ return IMG_NULL;
++ }
++
++ pHash->uCount = 0;
++ pHash->uSize = uInitialLen;
++ pHash->uMinimumSize = uInitialLen;
++ pHash->uKeySize = uKeySize;
++ pHash->pfnHashFunc = pfnHashFunc;
++ pHash->pfnKeyComp = pfnKeyComp;
++
++ OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof (BUCKET *) * pHash->uSize,
++ (IMG_PVOID*)&pHash->ppBucketTable, IMG_NULL,
++ "Hash Table Buckets");
++
++ if (pHash->ppBucketTable == IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(HASH_TABLE), pHash, IMG_NULL);
++
++ return IMG_NULL;
++ }
++
++ for (uIndex=0; uIndex<pHash->uSize; uIndex++)
++ pHash->ppBucketTable[uIndex] = IMG_NULL;
++ return pHash;
++}
++
++HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen)
++{
++ return HASH_Create_Extended(uInitialLen, sizeof(IMG_UINTPTR_T),
++ &HASH_Func_Default, &HASH_Key_Comp_Default);
++}
++
++IMG_VOID
++HASH_Delete (HASH_TABLE *pHash)
++{
++ if (pHash != IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Delete"));
++
++ PVR_ASSERT (pHash->uCount==0);
++ if(pHash->uCount != 0)
++ {
++ PVR_DPF ((PVR_DBG_ERROR, "HASH_Delete: leak detected in hash table!"));
++ PVR_DPF ((PVR_DBG_ERROR, "Likely Cause: client drivers not freeing alocations before destroying devmemcontext"));
++ }
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(BUCKET *)*pHash->uSize, pHash->ppBucketTable, IMG_NULL);
++ pHash->ppBucketTable = IMG_NULL;
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(HASH_TABLE), pHash, IMG_NULL);
++
++ }
++}
++
++IMG_BOOL
++HASH_Insert_Extended (HASH_TABLE *pHash, IMG_VOID *pKey, IMG_UINTPTR_T v)
++{
++ BUCKET *pBucket;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Insert_Extended: Hash=%08X, pKey=%08X, v=0x%x", pHash, pKey, v));
++
++ PVR_ASSERT (pHash != IMG_NULL);
++
++ if (pHash == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "HASH_Insert_Extended: invalid parameter"));
++ return IMG_FALSE;
++ }
++
++ if(OSAllocMem(PVRSRV_PAGEABLE_SELECT,
++ sizeof(BUCKET) + pHash->uKeySize,
++ (IMG_VOID **)&pBucket, IMG_NULL,
++ "Hash Table entry") != PVRSRV_OK)
++ {
++ return IMG_FALSE;
++ }
++
++ pBucket->v = v;
++
++ OSMemCopy(pBucket->k, pKey, pHash->uKeySize);
++ if (_ChainInsert (pHash, pBucket, pHash->ppBucketTable, pHash->uSize) != PVRSRV_OK)
++ {
++ return IMG_FALSE;
++ }
++
++ pHash->uCount++;
++
++
++ if (pHash->uCount << 1 > pHash->uSize)
++ {
++
++
++ _Resize (pHash, pHash->uSize << 1);
++ }
++
++
++ return IMG_TRUE;
++}
++
++IMG_BOOL
++HASH_Insert (HASH_TABLE *pHash, IMG_UINTPTR_T k, IMG_UINTPTR_T v)
++{
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Insert: Hash=%08X, k=0x%x, v=0x%x", pHash, k, v));
++
++ return HASH_Insert_Extended(pHash, &k, v);
++}
++
++IMG_UINTPTR_T
++HASH_Remove_Extended(HASH_TABLE *pHash, IMG_VOID *pKey)
++{
++ BUCKET **ppBucket;
++ IMG_UINT32 uIndex;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Remove_Extended: Hash=%08X, pKey=%08X", pHash, pKey));
++
++ PVR_ASSERT (pHash != IMG_NULL);
++
++ if (pHash == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "HASH_Remove_Extended: Null hash table"));
++ return 0;
++ }
++
++ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
++
++ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != IMG_NULL; ppBucket = &((*ppBucket)->pNext))
++ {
++
++ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
++ {
++ BUCKET *pBucket = *ppBucket;
++ IMG_UINTPTR_T v = pBucket->v;
++ (*ppBucket) = pBucket->pNext;
++
++ OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(BUCKET) + pHash->uKeySize, pBucket, IMG_NULL);
++
++
++ pHash->uCount--;
++
++
++ if (pHash->uSize > (pHash->uCount << 2) &&
++ pHash->uSize > pHash->uMinimumSize)
++ {
++
++
++ _Resize (pHash,
++ PRIVATE_MAX (pHash->uSize >> 1,
++ pHash->uMinimumSize));
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Remove_Extended: Hash=%08X, pKey=%08X = 0x%x",
++ pHash, pKey, v));
++ return v;
++ }
++ }
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Remove_Extended: Hash=%08X, pKey=%08X = 0x0 !!!!", pHash, pKey));
++ return 0;
++}
++
++IMG_UINTPTR_T
++HASH_Remove (HASH_TABLE *pHash, IMG_UINTPTR_T k)
++{
++ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Remove: Hash=%08X, k=0x%x", pHash, k));
++
++ return HASH_Remove_Extended(pHash, &k);
++}
++
++IMG_UINTPTR_T
++HASH_Retrieve_Extended (HASH_TABLE *pHash, IMG_VOID *pKey)
++{
++ BUCKET **ppBucket;
++ IMG_UINT32 uIndex;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Retrieve_Extended: Hash=%08X, pKey=%08X", pHash,pKey));
++
++ PVR_ASSERT (pHash != IMG_NULL);
++
++ if (pHash == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "HASH_Retrieve_Extended: Null hash table"));
++ return 0;
++ }
++
++ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
++
++ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != IMG_NULL; ppBucket = &((*ppBucket)->pNext))
++ {
++
++ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
++ {
++ BUCKET *pBucket = *ppBucket;
++ IMG_UINTPTR_T v = pBucket->v;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Retrieve: Hash=%08X, pKey=%08X = 0x%x",
++ pHash, pKey, v));
++ return v;
++ }
++ }
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "HASH_Retrieve: Hash=%08X, pKey=%08X = 0x0 !!!!", pHash, pKey));
++ return 0;
++}
++
++IMG_UINTPTR_T
++HASH_Retrieve (HASH_TABLE *pHash, IMG_UINTPTR_T k)
++{
++ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Retrieve: Hash=%08X, k=0x%x", pHash,k));
++ return HASH_Retrieve_Extended(pHash, &k);
++}
++
++#ifdef HASH_TRACE
++IMG_VOID
++HASH_Dump (HASH_TABLE *pHash)
++{
++ IMG_UINT32 uIndex;
++ IMG_UINT32 uMaxLength=0;
++ IMG_UINT32 uEmptyCount=0;
++
++ PVR_ASSERT (pHash != IMG_NULL);
++ for (uIndex=0; uIndex<pHash->uSize; uIndex++)
++ {
++ BUCKET *pBucket;
++ IMG_UINT32 uLength = 0;
++ if (pHash->ppBucketTable[uIndex] == IMG_NULL)
++ uEmptyCount++;
++ for (pBucket=pHash->ppBucketTable[uIndex];
++ pBucket != IMG_NULL;
++ pBucket = pBucket->pNext)
++ uLength++;
++ uMaxLength = PRIVATE_MAX (uMaxLength, uLength);
++ }
++
++ PVR_TRACE(("hash table: uMinimumSize=%d size=%d count=%d",
++ pHash->uMinimumSize, pHash->uSize, pHash->uCount));
++ PVR_TRACE((" empty=%d max=%d", uEmptyCount, uMaxLength));
++}
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/lists.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/lists.c
+new file mode 100644
+index 0000000..cb54071
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/lists.c
+@@ -0,0 +1,99 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "lists.h"
++#include "services_headers.h"
++
++IMPLEMENT_LIST_ANY_VA(BM_HEAP)
++IMPLEMENT_LIST_ANY_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK)
++IMPLEMENT_LIST_ANY_VA_2(BM_HEAP, PVRSRV_ERROR, PVRSRV_OK)
++IMPLEMENT_LIST_FOR_EACH_VA(BM_HEAP)
++IMPLEMENT_LIST_REMOVE(BM_HEAP)
++IMPLEMENT_LIST_INSERT(BM_HEAP)
++
++IMPLEMENT_LIST_ANY_VA(BM_CONTEXT)
++IMPLEMENT_LIST_ANY_VA_2(BM_CONTEXT, IMG_HANDLE, IMG_NULL)
++IMPLEMENT_LIST_ANY_VA_2(BM_CONTEXT, PVRSRV_ERROR, PVRSRV_OK)
++IMPLEMENT_LIST_FOR_EACH(BM_CONTEXT)
++IMPLEMENT_LIST_REMOVE(BM_CONTEXT)
++IMPLEMENT_LIST_INSERT(BM_CONTEXT)
++
++IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
++IMPLEMENT_LIST_ANY_VA(PVRSRV_DEVICE_NODE)
++IMPLEMENT_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
++IMPLEMENT_LIST_FOR_EACH(PVRSRV_DEVICE_NODE)
++IMPLEMENT_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE)
++IMPLEMENT_LIST_INSERT(PVRSRV_DEVICE_NODE)
++IMPLEMENT_LIST_REMOVE(PVRSRV_DEVICE_NODE)
++
++IMPLEMENT_LIST_ANY_VA(PVRSRV_POWER_DEV)
++IMPLEMENT_LIST_ANY_VA_2(PVRSRV_POWER_DEV, PVRSRV_ERROR, PVRSRV_OK)
++IMPLEMENT_LIST_INSERT(PVRSRV_POWER_DEV)
++IMPLEMENT_LIST_REMOVE(PVRSRV_POWER_DEV)
++
++
++IMG_VOID* MatchDeviceKM_AnyVaCb(PVRSRV_DEVICE_NODE* psDeviceNode, va_list va)
++{
++ IMG_UINT32 ui32DevIndex;
++ IMG_BOOL bIgnoreClass;
++ PVRSRV_DEVICE_CLASS eDevClass;
++
++ ui32DevIndex = va_arg(va, IMG_UINT32);
++ bIgnoreClass = va_arg(va, IMG_BOOL);
++ if (!bIgnoreClass)
++ {
++ eDevClass = va_arg(va, PVRSRV_DEVICE_CLASS);
++ }
++ else
++ {
++
++
++ eDevClass = PVRSRV_DEVICE_CLASS_FORCE_I32;
++ }
++
++ if ((bIgnoreClass || psDeviceNode->sDevId.eDeviceClass == eDevClass) &&
++ psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex)
++ {
++ return psDeviceNode;
++ }
++ return IMG_NULL;
++}
++
++IMG_VOID* MatchPowerDeviceIndex_AnyVaCb(PVRSRV_POWER_DEV *psPowerDev, va_list va)
++{
++ IMG_UINT32 ui32DeviceIndex;
++
++ ui32DeviceIndex = va_arg(va, IMG_UINT32);
++
++ if (psPowerDev->ui32DeviceIndex == ui32DeviceIndex)
++ {
++ return psPowerDev;
++ }
++ else
++ {
++ return IMG_NULL;
++ }
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/mem.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/mem.c
+new file mode 100644
+index 0000000..ad2ec50
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/mem.c
+@@ -0,0 +1,151 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "pvr_bridge_km.h"
++
++
++static PVRSRV_ERROR
++FreeSharedSysMemCallBack(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ OSFreePages(psKernelMemInfo->ui32Flags,
++ psKernelMemInfo->ui32AllocSize,
++ psKernelMemInfo->pvLinAddrKM,
++ psKernelMemInfo->sMemBlk.hOSMemHandle);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ psKernelMemInfo,
++ IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++PVRSRVAllocSharedSysMemoryKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_UINT32 ui32Flags,
++ IMG_SIZE_T ui32Size,
++ PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo)
++{
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ (IMG_VOID **)&psKernelMemInfo, IMG_NULL,
++ "Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for meminfo"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ OSMemSet(psKernelMemInfo, 0, sizeof(*psKernelMemInfo));
++
++ ui32Flags &= ~PVRSRV_HAP_MAPTYPE_MASK;
++ ui32Flags |= PVRSRV_HAP_MULTI_PROCESS;
++ psKernelMemInfo->ui32Flags = ui32Flags;
++ psKernelMemInfo->ui32AllocSize = ui32Size;
++
++ if(OSAllocPages(psKernelMemInfo->ui32Flags,
++ psKernelMemInfo->ui32AllocSize,
++ HOST_PAGESIZE(),
++ &psKernelMemInfo->pvLinAddrKM,
++ &psKernelMemInfo->sMemBlk.hOSMemHandle)
++ != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for block"));
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO),
++ psKernelMemInfo,
++ 0);
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ psKernelMemInfo->sMemBlk.hResItem =
++ ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_MEM_INFO,
++ psKernelMemInfo,
++ 0,
++ FreeSharedSysMemCallBack);
++
++ *ppsKernelMemInfo = psKernelMemInfo;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++PVRSRVFreeSharedSysMemoryKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
++{
++ PVRSRV_ERROR eError;
++
++ if(psKernelMemInfo->sMemBlk.hResItem)
++ {
++ eError = ResManFreeResByPtr(psKernelMemInfo->sMemBlk.hResItem);
++ }
++ else
++ {
++ eError = FreeSharedSysMemCallBack(psKernelMemInfo, 0);
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++PVRSRVDissociateMemFromResmanKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(!psKernelMemInfo)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ if(psKernelMemInfo->sMemBlk.hResItem)
++ {
++ eError = ResManDissociateRes(psKernelMemInfo->sMemBlk.hResItem, IMG_NULL);
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDissociateMemFromResmanKM: ResManDissociateRes failed"));
++ PVR_DBG_BREAK;
++ return eError;
++ }
++
++ psKernelMemInfo->sMemBlk.hResItem = IMG_NULL;
++ }
++
++ return eError;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/mem_debug.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/mem_debug.c
+new file mode 100644
+index 0000000..eeb86ae
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/mem_debug.c
+@@ -0,0 +1,250 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef MEM_DEBUG_C
++#define MEM_DEBUG_C
++
++#if defined(PVRSRV_DEBUG_OS_MEMORY)
++
++#include "img_types.h"
++#include "services_headers.h"
++
++#if defined (__cplusplus)
++extern "C"
++{
++#endif
++
++#define STOP_ON_ERROR 0
++
++
++
++
++
++
++
++
++
++ IMG_BOOL MemCheck(const IMG_PVOID pvAddr, const IMG_UINT8 ui8Pattern, IMG_SIZE_T uSize)
++ {
++ IMG_UINT8 *pui8Addr;
++ for (pui8Addr = (IMG_UINT8*)pvAddr; uSize > 0; uSize--, pui8Addr++)
++ {
++ if (*pui8Addr != ui8Pattern)
++ {
++ return IMG_FALSE;
++ }
++ }
++ return IMG_TRUE;
++ }
++
++
++
++ IMG_VOID OSCheckMemDebug(IMG_PVOID pvCpuVAddr, IMG_SIZE_T uSize, const IMG_CHAR *pszFileName, const IMG_UINT32 uLine)
++ {
++ OSMEM_DEBUG_INFO const *psInfo = (OSMEM_DEBUG_INFO *)((IMG_UINT32)pvCpuVAddr - TEST_BUFFER_PADDING_STATUS);
++
++
++ if (pvCpuVAddr == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : null pointer"
++ " - referenced %s:%d - allocated %s:%d",
++ pvCpuVAddr,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR);
++ }
++
++
++ if (((IMG_UINT32)pvCpuVAddr&3) != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : invalid alignment"
++ " - referenced %s:%d - allocated %s:%d",
++ pvCpuVAddr,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR);
++ }
++
++
++ if (!MemCheck((IMG_PVOID)psInfo->sGuardRegionBefore, 0xB1, sizeof(psInfo->sGuardRegionBefore)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : guard region before overwritten"
++ " - referenced %s:%d - allocated %s:%d",
++ pvCpuVAddr,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR);
++ }
++
++
++ if (uSize != psInfo->uSize)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "Pointer 0x%X : supplied size was different to stored size (0x%X != 0x%X)"
++ " - referenced %s:%d - allocated %s:%d",
++ pvCpuVAddr, uSize, psInfo->uSize,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR);
++ }
++
++
++ if ((0x01234567 ^ psInfo->uSizeParityCheck) != psInfo->uSize)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "Pointer 0x%X : stored size parity error (0x%X != 0x%X)"
++ " - referenced %s:%d - allocated %s:%d",
++ pvCpuVAddr, psInfo->uSize, 0x01234567 ^ psInfo->uSizeParityCheck,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR);
++ }
++ else
++ {
++
++ uSize = psInfo->uSize;
++ }
++
++
++ if (uSize)
++ {
++ if (!MemCheck((IMG_VOID*)((IMG_UINT32)pvCpuVAddr + uSize), 0xB2, TEST_BUFFER_PADDING_AFTER))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : guard region after overwritten"
++ " - referenced from %s:%d - allocated from %s:%d",
++ pvCpuVAddr,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ }
++ }
++
++
++ if (psInfo->eValid != isAllocated)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Pointer 0x%X : not allocated (freed? %d)"
++ " - referenced %s:%d - freed %s:%d",
++ pvCpuVAddr, psInfo->eValid == isFree,
++ pszFileName, uLine,
++ psInfo->sFileName, psInfo->uLineNo));
++ while (STOP_ON_ERROR);
++ }
++ }
++
++ IMG_VOID debug_strcpy(IMG_CHAR *pDest, const IMG_CHAR *pSrc)
++ {
++ IMG_SIZE_T i = 0;
++
++ for (; i < 128; i++)
++ {
++ *pDest = *pSrc;
++ if (*pSrc == '\0') break;
++ pDest++;
++ pSrc++;
++ }
++ }
++
++ PVRSRV_ERROR OSAllocMem_Debug_Wrapper(IMG_UINT32 ui32Flags,
++ IMG_UINT32 ui32Size,
++ IMG_PVOID *ppvCpuVAddr,
++ IMG_HANDLE *phBlockAlloc,
++ IMG_CHAR *pszFilename,
++ IMG_UINT32 ui32Line)
++ {
++ OSMEM_DEBUG_INFO *psInfo;
++
++ PVRSRV_ERROR eError;
++
++ eError = OSAllocMem_Debug_Linux_Memory_Allocations(ui32Flags,
++ ui32Size + TEST_BUFFER_PADDING,
++ ppvCpuVAddr,
++ phBlockAlloc,
++ pszFilename,
++ ui32Line);
++
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++
++ OSMemSet((IMG_CHAR *)(*ppvCpuVAddr) + TEST_BUFFER_PADDING_STATUS, 0xBB, ui32Size);
++ OSMemSet((IMG_CHAR *)(*ppvCpuVAddr) + ui32Size + TEST_BUFFER_PADDING_STATUS, 0xB2, TEST_BUFFER_PADDING_AFTER);
++
++
++ psInfo = (OSMEM_DEBUG_INFO *)(*ppvCpuVAddr);
++
++ OSMemSet(psInfo->sGuardRegionBefore, 0xB1, sizeof(psInfo->sGuardRegionBefore));
++ debug_strcpy(psInfo->sFileName, pszFilename);
++ psInfo->uLineNo = ui32Line;
++ psInfo->eValid = isAllocated;
++ psInfo->uSize = ui32Size;
++ psInfo->uSizeParityCheck = 0x01234567 ^ ui32Size;
++
++
++ *ppvCpuVAddr = (IMG_PVOID) ((IMG_UINT32)*ppvCpuVAddr)+TEST_BUFFER_PADDING_STATUS;
++
++#ifdef PVRSRV_LOG_MEMORY_ALLOCS
++
++ PVR_TRACE(("Allocated pointer (after debug info): 0x%X from %s:%d", *ppvCpuVAddr, pszFilename, ui32Line));
++#endif
++
++ return PVRSRV_OK;
++ }
++
++ PVRSRV_ERROR OSFreeMem_Debug_Wrapper(IMG_UINT32 ui32Flags,
++ IMG_UINT32 ui32Size,
++ IMG_PVOID pvCpuVAddr,
++ IMG_HANDLE hBlockAlloc,
++ IMG_CHAR *pszFilename,
++ IMG_UINT32 ui32Line)
++ {
++ OSMEM_DEBUG_INFO *psInfo;
++
++
++ OSCheckMemDebug(pvCpuVAddr, ui32Size, pszFilename, ui32Line);
++
++
++ OSMemSet(pvCpuVAddr, 0xBF, ui32Size + TEST_BUFFER_PADDING_AFTER);
++
++
++ psInfo = (OSMEM_DEBUG_INFO *)((IMG_UINT32) pvCpuVAddr - TEST_BUFFER_PADDING_STATUS);
++
++
++ psInfo->uSize = 0;
++ psInfo->uSizeParityCheck = 0;
++ psInfo->eValid = isFree;
++ psInfo->uLineNo = ui32Line;
++ debug_strcpy(psInfo->sFileName, pszFilename);
++
++ return OSFreeMem_Debug_Linux_Memory_Allocations(ui32Flags, ui32Size + TEST_BUFFER_PADDING, psInfo, hBlockAlloc, pszFilename, ui32Line);
++ }
++
++#if defined (__cplusplus)
++
++}
++#endif
++
++#endif
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/metrics.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/metrics.c
+new file mode 100644
+index 0000000..216696e
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/metrics.c
+@@ -0,0 +1,160 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "metrics.h"
++
++#if defined(SUPPORT_VGX)
++#include "vgxapi_km.h"
++#endif
++
++#if defined(SUPPORT_SGX)
++#include "sgxapi_km.h"
++#endif
++
++#if defined(DEBUG) || defined(TIMING)
++
++static volatile IMG_UINT32 *pui32TimerRegister = 0;
++
++#define PVRSRV_TIMER_TOTAL_IN_TICKS(X) asTimers[X].ui32Total
++#define PVRSRV_TIMER_TOTAL_IN_MS(X) ((1000*asTimers[X].ui32Total)/ui32TicksPerMS)
++#define PVRSRV_TIMER_COUNT(X) asTimers[X].ui32Count
++
++
++Temporal_Data asTimers[PVRSRV_NUM_TIMERS];
++
++
++IMG_UINT32 PVRSRVTimeNow(IMG_VOID)
++{
++ if (!pui32TimerRegister)
++ {
++ static IMG_BOOL bFirstTime = IMG_TRUE;
++
++ if (bFirstTime)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVTimeNow: No timer register set up"));
++
++ bFirstTime = IMG_FALSE;
++ }
++
++ return 0;
++ }
++
++#if defined(__sh__)
++
++ return (0xffffffff-*pui32TimerRegister);
++
++#else
++
++ return 0;
++
++#endif
++}
++
++
++static IMG_UINT32 PVRSRVGetCPUFreq(IMG_VOID)
++{
++ IMG_UINT32 ui32Time1, ui32Time2;
++
++ ui32Time1 = PVRSRVTimeNow();
++
++ OSWaitus(1000000);
++
++ ui32Time2 = PVRSRVTimeNow();
++
++ PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetCPUFreq: timer frequency = %d Hz", ui32Time2 - ui32Time1));
++
++ return (ui32Time2 - ui32Time1);
++}
++
++
++IMG_VOID PVRSRVSetupMetricTimers(IMG_VOID *pvDevInfo)
++{
++ IMG_UINT32 ui32Loop;
++
++ PVR_UNREFERENCED_PARAMETER(pvDevInfo);
++
++ for(ui32Loop=0; ui32Loop < (PVRSRV_NUM_TIMERS); ui32Loop++)
++ {
++ asTimers[ui32Loop].ui32Total = 0;
++ asTimers[ui32Loop].ui32Count = 0;
++ }
++
++
++ #if defined(__sh__)
++
++
++
++
++
++ *TCR_2 = TIMER_DIVISOR;
++
++
++ *TCOR_2 = *TCNT_2 = (IMG_UINT)0xffffffff;
++
++
++ *TST_REG |= (IMG_UINT8)0x04;
++
++ pui32TimerRegister = (IMG_UINT32 *)TCNT_2;
++
++ #else
++
++ pui32TimerRegister = 0;
++
++ #endif
++
++}
++
++
++IMG_VOID PVRSRVOutputMetricTotals(IMG_VOID)
++{
++ IMG_UINT32 ui32TicksPerMS, ui32Loop;
++
++ ui32TicksPerMS = PVRSRVGetCPUFreq();
++
++ if (!ui32TicksPerMS)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVOutputMetricTotals: Failed to get CPU Freq"));
++ return;
++ }
++
++ for(ui32Loop=0; ui32Loop < (PVRSRV_NUM_TIMERS); ui32Loop++)
++ {
++ if (asTimers[ui32Loop].ui32Count & 0x80000000L)
++ {
++ PVR_DPF((PVR_DBG_WARNING,"PVRSRVOutputMetricTotals: Timer %u is still ON", ui32Loop));
++ }
++ }
++#if 0
++
++ PVR_DPF((PVR_DBG_ERROR," Timer(%u): Total = %u",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_TOTAL_IN_TICKS(PVRSRV_TIMER_EXAMPLE_1)));
++ PVR_DPF((PVR_DBG_ERROR," Timer(%u): Time = %ums",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_TOTAL_IN_MS(PVRSRV_TIMER_EXAMPLE_1)));
++ PVR_DPF((PVR_DBG_ERROR," Timer(%u): Count = %u",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_COUNT(PVRSRV_TIMER_EXAMPLE_1)));
++#endif
++}
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/pdump_common.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/pdump_common.c
+new file mode 100644
+index 0000000..94bfc09
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/pdump_common.c
+@@ -0,0 +1,1723 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(PDUMP)
++#include <stdarg.h>
++
++#include "services_headers.h"
++#if defined(SUPPORT_SGX)
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#endif
++#include "pdump_km.h"
++
++#if !defined(PDUMP_TEMP_BUFFER_SIZE)
++#define PDUMP_TEMP_BUFFER_SIZE (64 * 1024L)
++#endif
++
++#if 1
++#define PDUMP_DBG(a) PDumpOSDebugPrintf a
++#else
++#define PDUMP_DBG(a)
++#endif
++
++#define PDUMP_DATAMASTER_PIXEL (1)
++#define PDUMP_DATAMASTER_EDM (3)
++
++#define MIN(x, y) (((x) < (y)) ? (x) : (y))
++#define PTR_PLUS(t, p, x) ((t *)(((IMG_CHAR *)(p)) + (x)))
++#define VPTR_PLUS(p, x) PTR_PLUS(IMG_VOID, p, x)
++#define VPTR_INC(p, x) (p = VPTR_PLUS(p, x))
++#define MAX_PDUMP_MMU_CONTEXTS (32)
++static IMG_VOID *gpvTempBuffer = IMG_NULL;
++static IMG_HANDLE ghTempBufferBlockAlloc;
++static IMG_UINT16 gui16MMUContextUsage = 0;
++
++
++
++static IMG_VOID *GetTempBuffer(IMG_VOID)
++{
++
++ if (gpvTempBuffer == IMG_NULL)
++ {
++ PVRSRV_ERROR eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ PDUMP_TEMP_BUFFER_SIZE,
++ &gpvTempBuffer,
++ &ghTempBufferBlockAlloc,
++ "PDUMP Temporary Buffer");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "GetTempBuffer: OSAllocMem failed: %d", eError));
++ }
++ }
++
++ return gpvTempBuffer;
++}
++
++static IMG_VOID FreeTempBuffer(IMG_VOID)
++{
++
++ if (gpvTempBuffer != IMG_NULL)
++ {
++ PVRSRV_ERROR eError = OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ PDUMP_TEMP_BUFFER_SIZE,
++ gpvTempBuffer,
++ ghTempBufferBlockAlloc);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeTempBuffer: OSFreeMem failed: %d", eError));
++ }
++ else
++ {
++ gpvTempBuffer = IMG_NULL;
++ }
++ }
++}
++
++IMG_VOID PDumpInitCommon(IMG_VOID)
++{
++
++ (IMG_VOID) GetTempBuffer();
++
++
++ PDumpInit();
++}
++
++IMG_VOID PDumpDeInitCommon(IMG_VOID)
++{
++
++ FreeTempBuffer();
++
++
++ PDumpDeInit();
++}
++
++#if defined(SGX_SUPPORT_COMMON_PDUMP)
++
++IMG_BOOL PDumpIsSuspended(IMG_VOID)
++{
++ return PDumpOSIsSuspended();
++}
++
++PVRSRV_ERROR PDumpRegWithFlagsKM(IMG_UINT32 ui32Reg, IMG_UINT32 ui32Data, IMG_UINT32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING()
++ PDUMP_DBG(("PDumpRegWithFlagsKM"));
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :SGXREG:0x%8.8lX 0x%8.8lX\r\n", ui32Reg, ui32Data);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpRegKM(IMG_UINT32 ui32Reg,IMG_UINT32 ui32Data)
++{
++ return PDumpRegWithFlagsKM(ui32Reg, ui32Data, PDUMP_FLAGS_CONTINUOUS);
++}
++
++PVRSRV_ERROR PDumpRegPolWithFlagsKM(IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue, IMG_UINT32 ui32Mask, IMG_UINT32 ui32Flags)
++{
++
++ #define POLL_DELAY 1000UL
++ #define POLL_COUNT_LONG (2000000000UL / POLL_DELAY)
++ #define POLL_COUNT_SHORT (1000000UL / POLL_DELAY)
++
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32PollCount;
++
++ PDUMP_GET_SCRIPT_STRING();
++ PDUMP_DBG(("PDumpRegPolWithFlagsKM"));
++
++ if (((ui32RegAddr == EUR_CR_EVENT_STATUS) &&
++ (ui32RegValue & ui32Mask & EUR_CR_EVENT_STATUS_TA_FINISHED_MASK) != 0) ||
++ ((ui32RegAddr == EUR_CR_EVENT_STATUS) &&
++ (ui32RegValue & ui32Mask & EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK) != 0) ||
++ ((ui32RegAddr == EUR_CR_EVENT_STATUS) &&
++ (ui32RegValue & ui32Mask & EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK) != 0))
++ {
++ ui32PollCount = POLL_COUNT_LONG;
++ }
++ else
++ {
++ ui32PollCount = POLL_COUNT_SHORT;
++ }
++
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "POL :SGXREG:0x%8.8lX 0x%8.8lX 0x%8.8lX %d %lu %d\r\n",
++ ui32RegAddr, ui32RegValue, ui32Mask, 0, ui32PollCount, POLL_DELAY);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PDumpRegPolKM(IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue, IMG_UINT32 ui32Mask)
++{
++ return PDumpRegPolWithFlagsKM(ui32RegAddr, ui32RegValue, ui32Mask, PDUMP_FLAGS_CONTINUOUS);
++}
++
++PVRSRV_ERROR PDumpMallocPages (PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_UINT32 ui32DevVAddr,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32NumBytes,
++ IMG_UINT32 ui32PageSize,
++ IMG_HANDLE hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ IMG_PUINT8 pui8LinAddr;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32NumPages;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_UINT32 ui32Page;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++#if defined(LINUX)
++ PVR_ASSERT(hOSMemHandle);
++#else
++
++ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++ PVR_ASSERT(((IMG_UINT32) pvLinAddr & (SGX_MMU_PAGE_MASK)) == 0);
++#endif
++
++ PVR_ASSERT(((IMG_UINT32) ui32DevVAddr & (SGX_MMU_PAGE_MASK)) == 0);
++ PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (SGX_MMU_PAGE_MASK)) == 0);
++
++
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- MALLOC :SGXMEM:VA_%8.8lX 0x%8.8lX %lu\r\n",
++ ui32DevVAddr, ui32NumBytes, ui32PageSize);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++
++
++ pui8LinAddr = (IMG_PUINT8) pvLinAddr;
++ ui32Offset = 0;
++ ui32NumPages = ui32NumBytes / ui32PageSize;
++ while (ui32NumPages)
++ {
++ ui32NumPages--;
++
++
++ PDumpOSCPUVAddrToDevPAddr(eDeviceType,
++ hOSMemHandle,
++ ui32Offset,
++ pui8LinAddr,
++ ui32PageSize,
++ &sDevPAddr);
++ ui32Page = sDevPAddr.uiAddr / ui32PageSize;
++
++ pui8LinAddr += ui32PageSize;
++ ui32Offset += ui32PageSize;
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC :SGXMEM:PA_%8.8lX%8.8lX %lu %lu 0x%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag,
++ ui32Page * ui32PageSize,
++ ui32PageSize,
++ ui32PageSize,
++ ui32Page * ui32PageSize);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpMallocPageTable (PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32PTSize,
++ IMG_HANDLE hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_UINT32 ui32Page;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++ PVR_ASSERT(((IMG_UINT32) pvLinAddr & (ui32PTSize - 1)) == 0);
++
++
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- MALLOC :SGXMEM:PAGE_TABLE 0x%8.8lX %lu\r\n", ui32PTSize, SGX_MMU_PAGE_SIZE);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ {
++
++
++ PDumpOSCPUVAddrToDevPAddr(eDeviceType,
++ IMG_NULL,
++ 0,
++ (IMG_PUINT8) pvLinAddr,
++ SGX_MMU_PAGE_SIZE,
++ &sDevPAddr);
++ ui32Page = sDevPAddr.uiAddr >> SGX_MMU_PAGE_SHIFT;
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC :SGXMEM:PA_%8.8lX%8.8lX 0x%lX %lu 0x%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag,
++ ui32Page * SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ ui32Page * SGX_MMU_PAGE_SIZE);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpFreePages (BM_HEAP *psBMHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_UINT32 ui32PageSize,
++ IMG_HANDLE hUniqueTag,
++ IMG_BOOL bInterleaved)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32NumPages, ui32PageCounter;
++ IMG_DEV_PHYADDR sDevPAddr;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++ PVR_ASSERT(((IMG_UINT32) sDevVAddr.uiAddr & (ui32PageSize - 1)) == 0);
++ PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (ui32PageSize - 1)) == 0);
++
++
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- FREE :SGXMEM:VA_%8.8lX\r\n", sDevVAddr.uiAddr);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++
++
++ ui32NumPages = ui32NumBytes / ui32PageSize;
++ psDeviceNode = psBMHeap->pBMContext->psDeviceNode;
++ for (ui32PageCounter = 0; ui32PageCounter < ui32NumPages; ui32PageCounter++)
++ {
++ if (!bInterleaved || (ui32PageCounter % 2) == 0)
++ {
++ sDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(psBMHeap->pMMUHeap, sDevVAddr);
++ {
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE :SGXMEM:PA_%8.8lX%8.8lX\r\n", (IMG_UINT32) hUniqueTag, sDevPAddr.uiAddr);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ }
++ }
++ else
++ {
++
++ }
++
++ sDevVAddr.uiAddr += ui32PageSize;
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpFreePageTable (PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32PTSize,
++ IMG_HANDLE hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_UINT32 ui32Page;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++ PVR_UNREFERENCED_PARAMETER(ui32PTSize);
++
++
++ PVR_ASSERT(((IMG_UINT32) pvLinAddr & (ui32PTSize-1UL)) == 0);
++
++
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- FREE :SGXMEM:PAGE_TABLE\r\n");
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++
++
++
++
++
++
++
++
++
++
++
++ {
++ PDumpOSCPUVAddrToDevPAddr(eDeviceType,
++ IMG_NULL,
++ 0,
++ (IMG_PUINT8) pvLinAddr,
++ SGX_MMU_PAGE_SIZE,
++ &sDevPAddr);
++ ui32Page = sDevPAddr.uiAddr >> SGX_MMU_PAGE_SHIFT;
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE :SGXMEM:PA_%8.8lX%8.8lX\r\n", (IMG_UINT32) hUniqueTag, ui32Page * SGX_MMU_PAGE_SIZE);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpPDRegWithFlags(IMG_UINT32 ui32Reg,
++ IMG_UINT32 ui32Data,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING()
++
++
++
++#if defined(SGX_FEATURE_36BIT_MMU)
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
++ "WRW :SGXMEM:$1 :SGXMEM:PA_%8.8lX%8.8lX:0x0\r\n",
++ (IMG_UINT32)hUniqueTag,
++ (ui32Data & SGX_MMU_PDE_ADDR_MASK) << SGX_MMU_PDE_ADDR_ALIGNSHIFT);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "SHR :SGXMEM:$1 :SGXMEM:$1 0x4\r\n");
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
++ "WRW :SGXREG:0x%8.8lX: SGXMEM:$1\r\n",
++ ui32Reg);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++#else
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXREG:0x%8.8lX :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX\r\n",
++ ui32Reg,
++ (IMG_UINT32) hUniqueTag,
++ (ui32Data & SGX_MMU_PDE_ADDR_MASK) << SGX_MMU_PDE_ADDR_ALIGNSHIFT,
++ ui32Data & ~SGX_MMU_PDE_ADDR_MASK);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++#endif
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpPDReg (IMG_UINT32 ui32Reg,
++ IMG_UINT32 ui32Data,
++ IMG_HANDLE hUniqueTag)
++{
++ return PDumpPDRegWithFlags(ui32Reg, ui32Data, PDUMP_FLAGS_CONTINUOUS, hUniqueTag);
++}
++
++PVRSRV_ERROR PDumpMemPolKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ PDUMP_POLL_OPERATOR eOperator,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag)
++{
++ #define MEMPOLL_DELAY (1000)
++ #define MEMPOLL_COUNT (2000000000 / MEMPOLL_DELAY)
++
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32PageOffset;
++ IMG_UINT8 *pui8LinAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_DEV_VIRTADDR sDevVPageAddr;
++ PDUMP_GET_SCRIPT_STRING();
++
++
++ PVR_ASSERT((ui32Offset + sizeof(IMG_UINT32)) <= psMemInfo->ui32AllocSize);
++
++
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "-- POL :SGXMEM:VA_%8.8lX 0x%8.8lX 0x%8.8lX %d %d %d\r\n",
++ psMemInfo->sDevVAddr.uiAddr + ui32Offset,
++ ui32Value,
++ ui32Mask,
++ eOperator,
++ MEMPOLL_COUNT,
++ MEMPOLL_DELAY);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++
++ pui8LinAddr = psMemInfo->pvLinAddrKM;
++
++
++ pui8LinAddr += ui32Offset;
++
++
++
++
++ PDumpOSCPUVAddrToPhysPages(psMemInfo->sMemBlk.hOSMemHandle,
++ ui32Offset,
++ pui8LinAddr,
++ &ui32PageOffset);
++
++
++ sDevVPageAddr.uiAddr = psMemInfo->sDevVAddr.uiAddr + ui32Offset - ui32PageOffset;
++
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++
++ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++
++
++ sDevPAddr.uiAddr += ui32PageOffset;
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "POL :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %d %d %d\r\n",
++ (IMG_UINT32) hUniqueTag,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ ui32Value,
++ ui32Mask,
++ eOperator,
++ MEMPOLL_COUNT,
++ MEMPOLL_DELAY);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpMemKM(IMG_PVOID pvAltLinAddr,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32NumPages;
++ IMG_UINT32 ui32PageByteOffset;
++ IMG_UINT32 ui32BlockBytes;
++ IMG_UINT8* pui8LinAddr;
++ IMG_UINT8* pui8DataLinAddr = IMG_NULL;
++ IMG_DEV_VIRTADDR sDevVPageAddr;
++ IMG_DEV_VIRTADDR sDevVAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_UINT32 ui32ParamOutPos;
++
++ PDUMP_GET_SCRIPT_AND_FILE_STRING();
++
++
++ PVR_ASSERT((ui32Offset + ui32Bytes) <= psMemInfo->ui32AllocSize);
++
++ if (!PDumpOSJTInitialised())
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (ui32Bytes == 0 || PDumpOSIsSuspended())
++ {
++ return PVRSRV_OK;
++ }
++
++
++ if(pvAltLinAddr)
++ {
++ pui8DataLinAddr = pvAltLinAddr;
++ }
++ else if(psMemInfo->pvLinAddrKM)
++ {
++ pui8DataLinAddr = (IMG_UINT8 *)psMemInfo->pvLinAddrKM + ui32Offset;
++ }
++ pui8LinAddr = (IMG_UINT8 *)psMemInfo->pvLinAddrKM;
++ sDevVAddr = psMemInfo->sDevVAddr;
++
++
++ sDevVAddr.uiAddr += ui32Offset;
++ pui8LinAddr += ui32Offset;
++
++ PVR_ASSERT(pui8DataLinAddr);
++
++ PDumpOSCheckForSplitting(PDumpOSGetStream(PDUMP_STREAM_PARAM2), ui32Bytes, ui32Flags);
++
++ ui32ParamOutPos = PDumpOSGetStreamOffset(PDUMP_STREAM_PARAM2);
++
++
++
++ if(!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
++ pui8DataLinAddr,
++ ui32Bytes,
++ ui32Flags))
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (PDumpOSGetParamFileNum() == 0)
++ {
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%.prm");
++ }
++ else
++ {
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%%lu.prm", PDumpOSGetParamFileNum());
++ }
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "-- LDB :SGXMEM:VA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
++ (IMG_UINT32)hUniqueTag,
++ psMemInfo->sDevVAddr.uiAddr,
++ ui32Offset,
++ ui32Bytes,
++ ui32ParamOutPos,
++ pszFileName);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++
++
++
++ PDumpOSCPUVAddrToPhysPages(psMemInfo->sMemBlk.hOSMemHandle,
++ ui32Offset,
++ pui8LinAddr,
++ &ui32PageByteOffset);
++ ui32NumPages = (ui32PageByteOffset + ui32Bytes + HOST_PAGESIZE() - 1) / HOST_PAGESIZE();
++
++ while(ui32NumPages)
++ {
++#if 0
++ IMG_UINT32 ui32BlockBytes = MIN(ui32BytesRemaining, PAGE_SIZE);
++ CpuPAddr = OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle,
++ ui32CurrentOffset);
++#endif
++ ui32NumPages--;
++
++
++ sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageByteOffset;
++
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++
++ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++
++
++ sDevPAddr.uiAddr += ui32PageByteOffset;
++#if 0
++ if(ui32PageByteOffset)
++ {
++ ui32BlockBytes =
++ MIN(ui32BytesRemaining, PAGE_ALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr);
++
++ ui32PageByteOffset = 0;
++ }
++#endif
++
++ if (ui32PageByteOffset + ui32Bytes > HOST_PAGESIZE())
++ {
++
++ ui32BlockBytes = HOST_PAGESIZE() - ui32PageByteOffset;
++ }
++ else
++ {
++
++ ui32BlockBytes = ui32Bytes;
++ }
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "LDB :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
++ (IMG_UINT32) hUniqueTag,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ ui32BlockBytes,
++ ui32ParamOutPos,
++ pszFileName);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++
++
++
++
++ ui32PageByteOffset = 0;
++
++ ui32Bytes -= ui32BlockBytes;
++
++ sDevVAddr.uiAddr += ui32BlockBytes;
++
++ pui8LinAddr += ui32BlockBytes;
++
++ ui32ParamOutPos += ui32BlockBytes;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpMem2KM(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_BOOL bInitialisePages,
++ IMG_HANDLE hUniqueTag1,
++ IMG_HANDLE hUniqueTag2)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32NumPages;
++ IMG_UINT32 ui32PageOffset;
++ IMG_UINT32 ui32BlockBytes;
++ IMG_UINT8* pui8LinAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT32 ui32ParamOutPos;
++
++ PDUMP_GET_SCRIPT_AND_FILE_STRING();
++
++ if (!pvLinAddr || !PDumpOSJTInitialised())
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (PDumpOSIsSuspended())
++ {
++ return PVRSRV_OK;
++ }
++
++ PDumpOSCheckForSplitting(PDumpOSGetStream(PDUMP_STREAM_PARAM2), ui32Bytes, ui32Flags);
++
++ ui32ParamOutPos = PDumpOSGetStreamOffset(PDUMP_STREAM_PARAM2);
++
++ if (bInitialisePages)
++ {
++
++
++
++ if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
++ pvLinAddr,
++ ui32Bytes,
++ PDUMP_FLAGS_CONTINUOUS))
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (PDumpOSGetParamFileNum() == 0)
++ {
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%.prm");
++ }
++ else
++ {
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLenFileName, "%%0%%%lu.prm", PDumpOSGetParamFileNum());
++ }
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ }
++
++
++
++
++ ui32PageOffset = (IMG_UINT32) pvLinAddr & (HOST_PAGESIZE() - 1);
++ ui32NumPages = (ui32PageOffset + ui32Bytes + HOST_PAGESIZE() - 1) / HOST_PAGESIZE();
++ pui8LinAddr = (IMG_UINT8*) pvLinAddr;
++
++ while (ui32NumPages)
++ {
++ ui32NumPages--;
++ sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr);
++ sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++
++
++ if (ui32PageOffset + ui32Bytes > HOST_PAGESIZE())
++ {
++
++ ui32BlockBytes = HOST_PAGESIZE() - ui32PageOffset;
++ }
++ else
++ {
++
++ ui32BlockBytes = ui32Bytes;
++ }
++
++
++
++ if (bInitialisePages)
++ {
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "LDB :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
++ (IMG_UINT32) hUniqueTag1,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ ui32BlockBytes,
++ ui32ParamOutPos,
++ pszFileName);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ }
++ else
++ {
++ for (ui32Offset = 0; ui32Offset < ui32BlockBytes; ui32Offset += sizeof(IMG_UINT32))
++ {
++ IMG_UINT32 ui32PTE = *((IMG_UINT32 *) (pui8LinAddr + ui32Offset));
++
++ if ((ui32PTE & SGX_MMU_PDE_ADDR_MASK) != 0)
++ {
++#if defined(SGX_FEATURE_36BIT_MMU)
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "WRW :SGXMEM:$1 :SGXMEM:PA_%8.8lX%8.8lX:0x0\r\n",
++ (IMG_UINT32)hUniqueTag2,
++ (ui32PTE & SGX_MMU_PDE_ADDR_MASK) << SGX_MMU_PTE_ADDR_ALIGNSHIFT);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript, "SHR :SGXMEM:$1 :SGXMEM:$1 0x4\r\n");
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript, "OR :SGXMEM:$1 :SGXMEM:$1 0x%8.8lX\r\n", ui32PTE & ~SGX_MMU_PDE_ADDR_MASK);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:$1\r\n",
++ (IMG_UINT32)hUniqueTag1,
++ (sDevPAddr.uiAddr + ui32Offset) & ~(SGX_MMU_PAGE_MASK),
++ (sDevPAddr.uiAddr + ui32Offset) & (SGX_MMU_PAGE_MASK));
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++#else
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag1,
++ (sDevPAddr.uiAddr + ui32Offset) & ~(SGX_MMU_PAGE_MASK),
++ (sDevPAddr.uiAddr + ui32Offset) & (SGX_MMU_PAGE_MASK),
++ (IMG_UINT32) hUniqueTag2,
++ (ui32PTE & SGX_MMU_PDE_ADDR_MASK) << SGX_MMU_PTE_ADDR_ALIGNSHIFT,
++ ui32PTE & ~SGX_MMU_PDE_ADDR_MASK);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++#endif
++ }
++ else
++ {
++ PVR_ASSERT((ui32PTE & SGX_MMU_PTE_VALID) == 0UL);
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLenScript,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag1,
++ (sDevPAddr.uiAddr + ui32Offset) & ~(SGX_MMU_PAGE_MASK),
++ (sDevPAddr.uiAddr + ui32Offset) & (SGX_MMU_PAGE_MASK),
++ (ui32PTE << SGX_MMU_PTE_ADDR_ALIGNSHIFT),
++ (IMG_UINT32) hUniqueTag2);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++ }
++ }
++
++
++
++
++ ui32PageOffset = 0;
++
++ ui32Bytes -= ui32BlockBytes;
++
++ pui8LinAddr += ui32BlockBytes;
++
++ ui32ParamOutPos += ui32BlockBytes;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpPDDevPAddrKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_DEV_PHYADDR sPDDevPAddr,
++ IMG_HANDLE hUniqueTag1,
++ IMG_HANDLE hUniqueTag2)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32PageByteOffset;
++ IMG_DEV_VIRTADDR sDevVAddr;
++ IMG_DEV_VIRTADDR sDevVPageAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++
++ PDUMP_GET_SCRIPT_STRING();
++
++ if(!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
++ (IMG_UINT8 *)&sPDDevPAddr,
++ sizeof(IMG_DEV_PHYADDR),
++ PDUMP_FLAGS_CONTINUOUS))
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ sDevVAddr = psMemInfo->sDevVAddr;
++ ui32PageByteOffset = sDevVAddr.uiAddr & (SGX_MMU_PAGE_MASK);
++
++ sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageByteOffset;
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++ BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++ sDevPAddr.uiAddr += ui32PageByteOffset + ui32Offset;
++
++ if ((sPDDevPAddr.uiAddr & SGX_MMU_PDE_ADDR_MASK) != 0UL)
++ {
++#if defined(SGX_FEATURE_36BIT_MMU)
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXMEM:$1 :SGXMEM:PA_%8.8lX%8.8lX:0x0\r\n",
++ (IMG_UINT32)hUniqueTag2,
++ sPDDevPAddr.uiAddr);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "AND :SGXMEM:$2 :SGXMEM:$1 0xFFFFFFFF\r\n");
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:$2\r\n",
++ (IMG_UINT32)hUniqueTag1,
++ (sDevPAddr.uiAddr) & ~(SGX_MMU_PAGE_MASK),
++ (sDevPAddr.uiAddr) & (SGX_MMU_PAGE_MASK));
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "SHR :SGXMEM:$2 :SGXMEM:$1 0x20\r\n");
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:$2\r\n",
++ (IMG_UINT32)hUniqueTag1,
++ (sDevPAddr.uiAddr + 4) & ~(SGX_MMU_PAGE_MASK),
++ (sDevPAddr.uiAddr + 4) & (SGX_MMU_PAGE_MASK));
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++#else
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag1,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ (IMG_UINT32) hUniqueTag2,
++ sPDDevPAddr.uiAddr & SGX_MMU_PDE_ADDR_MASK,
++ sPDDevPAddr.uiAddr & ~SGX_MMU_PDE_ADDR_MASK);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++#endif
++ }
++ else
++ {
++ PVR_ASSERT(!(sDevPAddr.uiAddr & SGX_MMU_PTE_VALID));
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag1,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ sPDDevPAddr.uiAddr);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ }
++ PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_MSG_STRING();
++ PDUMP_DBG(("PDumpCommentKM"));
++
++
++ if (!PDumpOSWriteString2("-- ", ui32Flags))
++ {
++ if(ui32Flags & PDUMP_FLAGS_CONTINUOUS)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ else
++ {
++ return PVRSRV_ERROR_CMD_NOT_PROCESSED;
++ }
++ }
++
++
++ eErr = PDumpOSBufprintf(hMsg, ui32MaxLen, "%s", pszComment);
++ if( (eErr != PVRSRV_OK) &&
++ (eErr != PVRSRV_ERROR_PDUMP_BUF_OVERFLOW))
++ {
++ return eErr;
++ }
++
++
++ PDumpOSVerifyLineEnding(hMsg, ui32MaxLen);
++ PDumpOSWriteString2(hMsg, ui32Flags);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags, IMG_CHAR * pszFormat, ...)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_va_list ap;
++ PDUMP_GET_MSG_STRING();
++
++
++ PDUMP_va_start(ap, pszFormat);
++ eErr = PDumpOSVSprintf(hMsg, ui32MaxLen, pszFormat, ap);
++ PDUMP_va_end(ap);
++
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ return PDumpCommentKM(hMsg, ui32Flags);
++}
++
++PVRSRV_ERROR PDumpComment(IMG_CHAR *pszFormat, ...)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_va_list ap;
++ PDUMP_GET_MSG_STRING();
++
++
++ PDUMP_va_start(ap, pszFormat);
++ eErr = PDumpOSVSprintf(hMsg, ui32MaxLen, pszFormat, ap);
++ PDUMP_va_end(ap);
++
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ return PDumpCommentKM(hMsg, PDUMP_FLAGS_CONTINUOUS);
++}
++
++PVRSRV_ERROR PDumpDriverInfoKM(IMG_CHAR *pszString, IMG_UINT32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32MsgLen;
++ PDUMP_GET_MSG_STRING();
++
++
++ eErr = PDumpOSBufprintf(hMsg, ui32MaxLen, "%s", pszString);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++
++ PDumpOSVerifyLineEnding(hMsg, ui32MaxLen);
++ ui32MsgLen = PDumpOSBuflen(hMsg, ui32MaxLen);
++
++ if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_DRIVERINFO),
++ (IMG_UINT8 *)hMsg,
++ ui32MsgLen,
++ ui32Flags))
++ {
++ if (ui32Flags & PDUMP_FLAGS_CONTINUOUS)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ else
++ {
++ return PVRSRV_ERROR_CMD_NOT_PROCESSED;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpBitmapKM( IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_UINT32 ui32Width,
++ IMG_UINT32 ui32Height,
++ IMG_UINT32 ui32StrideInBytes,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ IMG_UINT32 ui32Size,
++ PDUMP_PIXEL_FORMAT ePixelFormat,
++ PDUMP_MEM_FORMAT eMemFormat,
++ IMG_UINT32 ui32PDumpFlags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++ PDumpCommentWithFlags(ui32PDumpFlags, "\r\n-- Dump bitmap of render\r\n");
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "SII %s %s.bin :SGXMEM:v%x:0x%08lX 0x%08lX 0x%08lX 0x%08X 0x%08lX 0x%08lX 0x%08lX 0x%08X\r\n",
++ pszFileName,
++ pszFileName,
++ PDUMP_DATAMASTER_PIXEL,
++ sDevBaseAddr.uiAddr,
++ ui32Size,
++ ui32FileOffset,
++ ePixelFormat,
++ ui32Width,
++ ui32Height,
++ ui32StrideInBytes,
++ eMemFormat);
++#else
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "SII %s %s.bin :SGXMEM:v:0x%08lX 0x%08lX 0x%08lX 0x%08X 0x%08lX 0x%08lX 0x%08lX 0x%08X\r\n",
++ pszFileName,
++ pszFileName,
++ sDevBaseAddr.uiAddr,
++ ui32Size,
++ ui32FileOffset,
++ ePixelFormat,
++ ui32Width,
++ ui32Height,
++ ui32StrideInBytes,
++ eMemFormat);
++#endif
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpOSWriteString2( hScript, ui32PDumpFlags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpReadRegKM ( IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_UINT32 ui32Address,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "SAB :SGXREG:0x%08lX 0x%08lX %s\r\n",
++ ui32Address,
++ ui32FileOffset,
++ pszFileName);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpOSWriteString2( hScript, ui32PDumpFlags);
++
++ return PVRSRV_OK;
++}
++
++IMG_BOOL PDumpTestNextFrame(IMG_UINT32 ui32CurrentFrame)
++{
++ IMG_BOOL bFrameDumped;
++
++
++
++ (IMG_VOID) PDumpSetFrameKM(ui32CurrentFrame + 1);
++ bFrameDumped = PDumpIsCaptureFrameKM();
++ (IMG_VOID) PDumpSetFrameKM(ui32CurrentFrame);
++
++ return bFrameDumped;
++}
++
++static PVRSRV_ERROR PDumpSignatureRegister (IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Address,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 *pui32FileOffset,
++ IMG_UINT32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "SAB :SGXREG:0x%08X 0x%08X %s\r\n",
++ ui32Address,
++ *pui32FileOffset,
++ pszFileName);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpOSWriteString2(hScript, ui32Flags);
++ *pui32FileOffset += ui32Size;
++ return PVRSRV_OK;
++}
++
++static IMG_VOID PDumpRegisterRange(IMG_CHAR *pszFileName,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters,
++ IMG_UINT32 *pui32FileOffset,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32Flags)
++{
++ IMG_UINT32 i;
++ for (i = 0; i < ui32NumRegisters; i++)
++ {
++ PDumpSignatureRegister(pszFileName, pui32Registers[i], ui32Size, pui32FileOffset, ui32Flags);
++ }
++}
++
++PVRSRV_ERROR PDump3DSignatureRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32FileOffset, ui32Flags;
++
++ PDUMP_GET_FILE_STRING();
++
++ ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0;
++ ui32FileOffset = 0;
++
++ PDumpCommentWithFlags(ui32Flags, "\r\n-- Dump 3D signature registers\r\n");
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLen, "out%lu_3d.sig", ui32DumpFrameNum);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpRegisterRange(pszFileName, pui32Registers, ui32NumRegisters, &ui32FileOffset, sizeof(IMG_UINT32), ui32Flags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpTASignatureRegisters (IMG_UINT32 ui32DumpFrameNum,
++ IMG_UINT32 ui32TAKickCount,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32FileOffset, ui32Flags;
++
++ PDUMP_GET_FILE_STRING();
++
++ ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0;
++ ui32FileOffset = ui32TAKickCount * ui32NumRegisters * sizeof(IMG_UINT32);
++
++ PDumpCommentWithFlags(ui32Flags, "\r\n-- Dump TA signature registers\r\n");
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLen, "out%lu_ta.sig", ui32DumpFrameNum);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpRegisterRange(pszFileName, pui32Registers, ui32NumRegisters, &ui32FileOffset, sizeof(IMG_UINT32), ui32Flags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpCounterRegisters (IMG_UINT32 ui32DumpFrameNum,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32FileOffset, ui32Flags;
++
++ PDUMP_GET_FILE_STRING();
++
++ ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0UL;
++ ui32FileOffset = 0UL;
++
++ PDumpCommentWithFlags(ui32Flags, "\r\n-- Dump counter registers\r\n");
++ eErr = PDumpOSSprintf(pszFileName, ui32MaxLen, "out%lu.perf", ui32DumpFrameNum);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpRegisterRange(pszFileName, pui32Registers, ui32NumRegisters, &ui32FileOffset, sizeof(IMG_UINT32), ui32Flags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpRegRead(const IMG_UINT32 ui32RegOffset, IMG_UINT32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW :SGXREG:0x%lX\r\n", ui32RegOffset);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpCycleCountRegRead(const IMG_UINT32 ui32RegOffset, IMG_BOOL bLastFrame)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW :SGXREG:0x%lX\r\n", ui32RegOffset);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpHWPerfCBKM (IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++ PDumpCommentWithFlags(ui32PDumpFlags, "\r\n-- Dump Hardware Performance Circular Buffer\r\n");
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ "SAB :SGXMEM:v%x:0x%08lX 0x%08lX 0x%08lX %s.bin\r\n",
++ PDUMP_DATAMASTER_EDM,
++#else
++ "SAB :SGXMEM:v:0x%08lX 0x%08lX 0x%08lX %s.bin\r\n",
++#endif
++ sDevBaseAddr.uiAddr,
++ ui32Size,
++ ui32FileOffset,
++ pszFileName);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++
++ PDumpOSWriteString2(hScript, ui32PDumpFlags);
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PDumpCBP(PPVRSRV_KERNEL_MEM_INFO psROffMemInfo,
++ IMG_UINT32 ui32ROffOffset,
++ IMG_UINT32 ui32WPosVal,
++ IMG_UINT32 ui32PacketSize,
++ IMG_UINT32 ui32BufferSize,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag)
++{
++ PVRSRV_ERROR eErr;
++ IMG_UINT32 ui32PageOffset;
++ IMG_UINT8 *pui8LinAddr;
++ IMG_DEV_VIRTADDR sDevVAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_DEV_VIRTADDR sDevVPageAddr;
++
++
++ PDUMP_GET_SCRIPT_STRING();
++
++
++ PVR_ASSERT((ui32ROffOffset + sizeof(IMG_UINT32)) <= psROffMemInfo->ui32AllocSize);
++
++ pui8LinAddr = psROffMemInfo->pvLinAddrKM;
++ sDevVAddr = psROffMemInfo->sDevVAddr;
++
++
++ pui8LinAddr += ui32ROffOffset;
++ sDevVAddr.uiAddr += ui32ROffOffset;
++
++
++
++
++
++
++ PDumpOSCPUVAddrToPhysPages(psROffMemInfo->sMemBlk.hOSMemHandle,
++ ui32ROffOffset,
++ pui8LinAddr,
++ &ui32PageOffset);
++
++
++ sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageOffset;
++
++ PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++
++ BM_GetPhysPageAddr(psROffMemInfo, sDevVPageAddr, &sDevPAddr);
++
++
++ sDevPAddr.uiAddr += ui32PageOffset;
++
++ eErr = PDumpOSBufprintf(hScript,
++ ui32MaxLen,
++ "CBP :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX 0x%8.8lX\r\n",
++ (IMG_UINT32) hUniqueTag,
++ sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
++ sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
++ ui32WPosVal,
++ ui32PacketSize,
++ ui32BufferSize);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags)
++{
++ PVRSRV_ERROR eErr;
++ PDUMP_GET_SCRIPT_STRING();
++ PDUMP_DBG(("PDumpIDLWithFlags"));
++
++ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "IDL %lu\r\n", ui32Clocks);
++ if(eErr != PVRSRV_OK)
++ {
++ return eErr;
++ }
++ PDumpOSWriteString2(hScript, ui32Flags);
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks)
++{
++ return PDumpIDLWithFlags(ui32Clocks, PDUMP_FLAGS_CONTINUOUS);
++}
++#endif
++
++
++PVRSRV_ERROR PDumpMemUM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_PVOID pvAltLinAddrUM,
++ IMG_PVOID pvLinAddrUM,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag)
++{
++ IMG_VOID *pvAddrUM;
++ IMG_VOID *pvAddrKM;
++ IMG_UINT32 ui32BytesDumped;
++ IMG_UINT32 ui32CurrentOffset;
++
++ if (psMemInfo->pvLinAddrKM != IMG_NULL && pvAltLinAddrUM == IMG_NULL)
++ {
++
++ return PDumpMemKM(IMG_NULL,
++ psMemInfo,
++ ui32Offset,
++ ui32Bytes,
++ ui32Flags,
++ hUniqueTag);
++ }
++
++ pvAddrUM = (pvAltLinAddrUM != IMG_NULL) ? pvAltLinAddrUM : ((pvLinAddrUM != IMG_NULL) ? VPTR_PLUS(pvLinAddrUM, ui32Offset) : IMG_NULL);
++
++ pvAddrKM = GetTempBuffer();
++
++
++ PVR_ASSERT(pvAddrUM != IMG_NULL && pvAddrKM != IMG_NULL);
++ if (pvAddrUM == IMG_NULL || pvAddrKM == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: Nothing to dump"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (ui32Bytes > PDUMP_TEMP_BUFFER_SIZE)
++ {
++ PDumpCommentWithFlags(ui32Flags, "Dumping 0x%8.8lx bytes of memory, in blocks of 0x%8.8lx bytes", ui32Bytes, (IMG_UINT32)PDUMP_TEMP_BUFFER_SIZE);
++ }
++
++ ui32CurrentOffset = ui32Offset;
++ for (ui32BytesDumped = 0; ui32BytesDumped < ui32Bytes;)
++ {
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32BytesToDump = MIN(PDUMP_TEMP_BUFFER_SIZE, ui32Bytes - ui32BytesDumped);
++
++ eError = OSCopyFromUser(psPerProc,
++ pvAddrKM,
++ pvAddrUM,
++ ui32BytesToDump);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: OSCopyFromUser failed (%d), eError"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ eError = PDumpMemKM(pvAddrKM,
++ psMemInfo,
++ ui32CurrentOffset,
++ ui32BytesToDump,
++ ui32Flags,
++ hUniqueTag);
++
++ if (eError != PVRSRV_OK)
++ {
++
++ if (ui32BytesDumped != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpMemUM: PDumpMemKM failed (%d)", eError));
++ }
++ PVR_ASSERT(ui32BytesDumped == 0);
++ return eError;
++ }
++
++ VPTR_INC(pvAddrUM, ui32BytesToDump);
++ ui32CurrentOffset += ui32BytesToDump;
++ ui32BytesDumped += ui32BytesToDump;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR _PdumpAllocMMUContext(IMG_UINT32 *pui32MMUContextID)
++{
++ IMG_UINT32 i;
++
++
++ for(i=0; i<MAX_PDUMP_MMU_CONTEXTS; i++)
++ {
++ if((gui16MMUContextUsage & (1U << i)) == 0)
++ {
++
++ gui16MMUContextUsage |= 1U << i;
++ *pui32MMUContextID = i;
++ return PVRSRV_OK;
++ }
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "_PdumpAllocMMUContext: no free MMU context ids"));
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++
++static PVRSRV_ERROR _PdumpFreeMMUContext(IMG_UINT32 ui32MMUContextID)
++{
++ if(ui32MMUContextID < MAX_PDUMP_MMU_CONTEXTS)
++ {
++
++ gui16MMUContextUsage &= ~(1U << ui32MMUContextID);
++ return PVRSRV_OK;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "_PdumpFreeMMUContext: MMU context ids invalid"));
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++
++PVRSRV_ERROR PDumpSetMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CHAR *pszMemSpace,
++ IMG_UINT32 *pui32MMUContextID,
++ IMG_UINT32 ui32MMUType,
++ IMG_HANDLE hUniqueTag1,
++ IMG_VOID *pvPDCPUAddr)
++{
++ IMG_UINT8 *pui8LinAddr = (IMG_UINT8 *)pvPDCPUAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_UINT32 ui32MMUContextID;
++ PVRSRV_ERROR eError;
++
++ eError = _PdumpAllocMMUContext(&ui32MMUContextID);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpSetMMUContext: _PdumpAllocMMUContext failed: %d", eError));
++ return eError;
++ }
++
++
++ sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr);
++ sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++
++ sDevPAddr.uiAddr &= ~((PVRSRV_4K_PAGE_SIZE) -1);
++
++ PDumpComment("Set MMU Context\r\n");
++
++ PDumpComment("MMU :%s:v%d %d :%s:PA_%8.8lX%8.8lX\r\n",
++ pszMemSpace,
++ ui32MMUContextID,
++ ui32MMUType,
++ pszMemSpace,
++ hUniqueTag1,
++ sDevPAddr.uiAddr);
++
++
++ *pui32MMUContextID = ui32MMUContextID;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PDumpClearMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CHAR *pszMemSpace,
++ IMG_UINT32 ui32MMUContextID,
++ IMG_UINT32 ui32MMUType)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ PDumpComment("Clear MMU Context for memory space %s\r\n", pszMemSpace);
++
++ PDumpComment("MMU :%s:v%d %d\r\n",
++ pszMemSpace,
++ ui32MMUContextID,
++ ui32MMUType);
++
++ eError = _PdumpFreeMMUContext(ui32MMUContextID);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PDumpClearMMUContext: _PdumpFreeMMUContext failed: %d", eError));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++#else
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/perproc.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/perproc.c
+new file mode 100644
+index 0000000..982c31f
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/perproc.c
+@@ -0,0 +1,283 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "resman.h"
++#include "handle.h"
++#include "perproc.h"
++#include "osperproc.h"
++
++#define HASH_TAB_INIT_SIZE 32
++
++static HASH_TABLE *psHashTab = IMG_NULL;
++
++static PVRSRV_ERROR FreePerProcessData(PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_ERROR eError;
++ IMG_UINTPTR_T uiPerProc;
++
++ PVR_ASSERT(psPerProc != IMG_NULL);
++
++ if (psPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ uiPerProc = HASH_Remove(psHashTab, (IMG_UINTPTR_T)psPerProc->ui32PID);
++ if (uiPerProc == 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't find process in per-process data hash table"));
++
++ PVR_ASSERT(psPerProc->ui32PID == 0);
++ }
++ else
++ {
++ PVR_ASSERT((PVRSRV_PER_PROCESS_DATA *)uiPerProc == psPerProc);
++ PVR_ASSERT(((PVRSRV_PER_PROCESS_DATA *)uiPerProc)->ui32PID == psPerProc->ui32PID);
++ }
++
++
++ if (psPerProc->psHandleBase != IMG_NULL)
++ {
++ eError = PVRSRVFreeHandleBase(psPerProc->psHandleBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't free handle base for process (%d)", eError));
++ return eError;
++ }
++ }
++
++
++ if (psPerProc->hPerProcData != IMG_NULL)
++ {
++ eError = PVRSRVReleaseHandle(KERNEL_HANDLE_BASE, psPerProc->hPerProcData, PVRSRV_HANDLE_TYPE_PERPROC_DATA);
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't release per-process data handle (%d)", eError));
++ return eError;
++ }
++ }
++
++
++ eError = OSPerProcessPrivateDataDeInit(psPerProc->hOsPrivateData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: OSPerProcessPrivateDataDeInit failed (%d)", eError));
++ return eError;
++ }
++
++ eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(*psPerProc),
++ psPerProc,
++ psPerProc->hBlockAlloc);
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't free per-process data (%d)", eError));
++ return eError;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(IMG_UINT32 ui32PID)
++{
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++
++ PVR_ASSERT(psHashTab != IMG_NULL);
++
++
++ psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID);
++ return psPerProc;
++}
++
++
++PVRSRV_ERROR PVRSRVPerProcessDataConnect(IMG_UINT32 ui32PID)
++{
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++ IMG_HANDLE hBlockAlloc;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ PVR_ASSERT(psHashTab != IMG_NULL);
++
++
++ psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID);
++
++ if (psPerProc == IMG_NULL)
++ {
++
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(*psPerProc),
++ (IMG_PVOID *)&psPerProc,
++ &hBlockAlloc,
++ "Per Process Data");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate per-process data (%d)", eError));
++ return eError;
++ }
++ OSMemSet(psPerProc, 0, sizeof(*psPerProc));
++ psPerProc->hBlockAlloc = hBlockAlloc;
++
++ if (!HASH_Insert(psHashTab, (IMG_UINTPTR_T)ui32PID, (IMG_UINTPTR_T)psPerProc))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't insert per-process data into hash table"));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto failure;
++ }
++
++ psPerProc->ui32PID = ui32PID;
++ psPerProc->ui32RefCount = 0;
++
++
++ eError = OSPerProcessPrivateDataInit(&psPerProc->hOsPrivateData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: OSPerProcessPrivateDataInit failed (%d)", eError));
++ goto failure;
++ }
++
++
++ eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE,
++ &psPerProc->hPerProcData,
++ psPerProc,
++ PVRSRV_HANDLE_TYPE_PERPROC_DATA,
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate handle for per-process data (%d)", eError));
++ goto failure;
++ }
++
++
++ eError = PVRSRVAllocHandleBase(&psPerProc->psHandleBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate handle base for process (%d)", eError));
++ goto failure;
++ }
++
++
++ eError = OSPerProcessSetHandleOptions(psPerProc->psHandleBase);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't set handle options (%d)", eError));
++ goto failure;
++ }
++
++
++ eError = PVRSRVResManConnect(psPerProc, &psPerProc->hResManContext);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't register with the resource manager"));
++ goto failure;
++ }
++ }
++
++ psPerProc->ui32RefCount++;
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "PVRSRVPerProcessDataConnect: Process 0x%x has ref-count %d",
++ ui32PID, psPerProc->ui32RefCount));
++
++ return eError;
++
++failure:
++ (IMG_VOID)FreePerProcessData(psPerProc);
++ return eError;
++}
++
++
++IMG_VOID PVRSRVPerProcessDataDisconnect(IMG_UINT32 ui32PID)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++
++ PVR_ASSERT(psHashTab != IMG_NULL);
++
++ psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID);
++ if (psPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataDealloc: Couldn't locate per-process data for PID %u", ui32PID));
++ }
++ else
++ {
++ psPerProc->ui32RefCount--;
++ if (psPerProc->ui32RefCount == 0)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVPerProcessDataDisconnect: "
++ "Last close from process 0x%x received", ui32PID));
++
++
++ PVRSRVResManDisconnect(psPerProc->hResManContext, IMG_FALSE);
++
++
++ eError = FreePerProcessData(psPerProc);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataDisconnect: Error freeing per-process data"));
++ }
++ }
++ }
++
++ eError = PVRSRVPurgeHandles(KERNEL_HANDLE_BASE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataDisconnect: Purge of global handle pool failed (%d)", eError));
++ }
++}
++
++
++PVRSRV_ERROR PVRSRVPerProcessDataInit(IMG_VOID)
++{
++ PVR_ASSERT(psHashTab == IMG_NULL);
++
++
++ psHashTab = HASH_Create(HASH_TAB_INIT_SIZE);
++ if (psHashTab == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataInit: Couldn't create per-process data hash table"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVPerProcessDataDeInit(IMG_VOID)
++{
++
++ if (psHashTab != IMG_NULL)
++ {
++
++ HASH_Delete(psHashTab);
++ psHashTab = IMG_NULL;
++ }
++
++ return PVRSRV_OK;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/power.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/power.c
+new file mode 100644
+index 0000000..826aaa2
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/power.c
+@@ -0,0 +1,818 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "pdump_km.h"
++
++#include "lists.h"
++
++DECLARE_LIST_ANY_VA(PVRSRV_POWER_DEV);
++DECLARE_LIST_ANY_VA_2(PVRSRV_POWER_DEV, PVRSRV_ERROR, PVRSRV_OK);
++DECLARE_LIST_INSERT(PVRSRV_POWER_DEV);
++DECLARE_LIST_REMOVE(PVRSRV_POWER_DEV);
++
++IMG_VOID* MatchPowerDeviceIndex_AnyVaCb(PVRSRV_POWER_DEV *psPowerDev, va_list va);
++
++
++static IMG_BOOL gbInitServerRunning = IMG_FALSE;
++static IMG_BOOL gbInitServerRan = IMG_FALSE;
++static IMG_BOOL gbInitSuccessful = IMG_FALSE;
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState, IMG_BOOL bState)
++{
++
++ switch(eInitServerState)
++ {
++ case PVRSRV_INIT_SERVER_RUNNING:
++ gbInitServerRunning = bState;
++ break;
++ case PVRSRV_INIT_SERVER_RAN:
++ gbInitServerRan = bState;
++ break;
++ case PVRSRV_INIT_SERVER_SUCCESSFUL:
++ gbInitSuccessful = bState;
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSetInitServerState : Unknown state %lx", eInitServerState));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++IMG_BOOL PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState)
++{
++ IMG_BOOL bReturnVal;
++
++ switch(eInitServerState)
++ {
++ case PVRSRV_INIT_SERVER_RUNNING:
++ bReturnVal = gbInitServerRunning;
++ break;
++ case PVRSRV_INIT_SERVER_RAN:
++ bReturnVal = gbInitServerRan;
++ break;
++ case PVRSRV_INIT_SERVER_SUCCESSFUL:
++ bReturnVal = gbInitSuccessful;
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVGetInitServerState : Unknown state %lx", eInitServerState));
++ bReturnVal = IMG_FALSE;
++ }
++
++ return bReturnVal;
++}
++
++static IMG_BOOL _IsSystemStatePowered(PVRSRV_SYS_POWER_STATE eSystemPowerState)
++{
++ return (IMG_BOOL)(eSystemPowerState < PVRSRV_SYS_POWER_STATE_D2);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32 ui32CallerID,
++ IMG_BOOL bSystemPowerEvent)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++#if !defined(SYS_NO_POWER_LOCK_TIMEOUT)
++ IMG_UINT32 ui32Timeout = 1000000;
++
++#if defined(SUPPORT_LMA)
++ ui32Timeout *= 60;
++#endif
++#endif
++ SysAcquireData(&psSysData);
++
++#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
++ eError = SysPowerLockWrap(psSysData);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++#endif
++ do
++ {
++ eError = OSLockResource(&psSysData->sPowerStateChangeResource,
++ ui32CallerID);
++ if (eError == PVRSRV_OK)
++ {
++ break;
++ }
++ else if (ui32CallerID == ISR_ID)
++ {
++
++
++ eError = PVRSRV_ERROR_RETRY;
++ break;
++ }
++
++ OSWaitus(1);
++#if defined(SYS_NO_POWER_LOCK_TIMEOUT)
++ } while (1);
++#else
++ ui32Timeout--;
++ } while (ui32Timeout > 0);
++#endif
++
++#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
++ if (eError != PVRSRV_OK)
++ {
++ SysPowerLockUnwrap(psSysData);
++ }
++#endif
++ if ((eError == PVRSRV_OK) &&
++ !bSystemPowerEvent &&
++ !_IsSystemStatePowered(psSysData->eCurrentPowerState))
++ {
++
++ PVRSRVPowerUnlock(ui32CallerID);
++ eError = PVRSRV_ERROR_RETRY;
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++IMG_VOID PVRSRVPowerUnlock(IMG_UINT32 ui32CallerID)
++{
++ OSUnlockResource(&gpsSysData->sPowerStateChangeResource, ui32CallerID);
++#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
++ SysPowerLockUnwrap(gpsSysData);
++#endif
++}
++
++
++PVRSRV_ERROR PVRSRVDevicePrePowerStateKM_AnyVaCb(PVRSRV_POWER_DEV *psPowerDevice, va_list va)
++{
++ PVRSRV_DEV_POWER_STATE eNewDevicePowerState;
++ PVRSRV_ERROR eError;
++
++
++ IMG_BOOL bAllDevices;
++ IMG_UINT32 ui32DeviceIndex;
++ PVRSRV_DEV_POWER_STATE eNewPowerState;
++
++
++ bAllDevices = va_arg(va, IMG_BOOL);
++ ui32DeviceIndex = va_arg(va, IMG_UINT32);
++ eNewPowerState = va_arg(va, PVRSRV_DEV_POWER_STATE);
++
++ if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex))
++ {
++ eNewDevicePowerState = (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ?
++ psPowerDevice->eDefaultPowerState : eNewPowerState;
++
++ if (psPowerDevice->eCurrentPowerState != eNewDevicePowerState)
++ {
++ if (psPowerDevice->pfnPrePower != IMG_NULL)
++ {
++
++ eError = psPowerDevice->pfnPrePower(psPowerDevice->hDevCookie,
++ eNewDevicePowerState,
++ psPowerDevice->eCurrentPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++
++
++ eError = SysDevicePrePowerState(psPowerDevice->ui32DeviceIndex,
++ eNewDevicePowerState,
++ psPowerDevice->eCurrentPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(IMG_BOOL bAllDevices,
++ IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++
++ eError = List_PVRSRV_POWER_DEV_PVRSRV_ERROR_Any_va(psSysData->psPowerDeviceList,
++ PVRSRVDevicePrePowerStateKM_AnyVaCb,
++ bAllDevices,
++ ui32DeviceIndex,
++ eNewPowerState);
++
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVDevicePostPowerStateKM_AnyVaCb(PVRSRV_POWER_DEV *psPowerDevice, va_list va)
++{
++ PVRSRV_DEV_POWER_STATE eNewDevicePowerState;
++ PVRSRV_ERROR eError;
++
++
++ IMG_BOOL bAllDevices;
++ IMG_UINT32 ui32DeviceIndex;
++ PVRSRV_DEV_POWER_STATE eNewPowerState;
++
++
++ bAllDevices = va_arg(va, IMG_BOOL);
++ ui32DeviceIndex = va_arg(va, IMG_UINT32);
++ eNewPowerState = va_arg(va, PVRSRV_DEV_POWER_STATE);
++
++ if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex))
++ {
++ eNewDevicePowerState = (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ?
++ psPowerDevice->eDefaultPowerState : eNewPowerState;
++
++ if (psPowerDevice->eCurrentPowerState != eNewDevicePowerState)
++ {
++
++ eError = SysDevicePostPowerState(psPowerDevice->ui32DeviceIndex,
++ eNewDevicePowerState,
++ psPowerDevice->eCurrentPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ if (psPowerDevice->pfnPostPower != IMG_NULL)
++ {
++
++ eError = psPowerDevice->pfnPostPower(psPowerDevice->hDevCookie,
++ eNewDevicePowerState,
++ psPowerDevice->eCurrentPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++
++ psPowerDevice->eCurrentPowerState = eNewDevicePowerState;
++ }
++ }
++ return PVRSRV_OK;
++}
++
++static
++PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(IMG_BOOL bAllDevices,
++ IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++
++ eError = List_PVRSRV_POWER_DEV_PVRSRV_ERROR_Any_va(psSysData->psPowerDeviceList,
++ PVRSRVDevicePostPowerStateKM_AnyVaCb,
++ bAllDevices,
++ ui32DeviceIndex,
++ eNewPowerState);
++
++ return eError;
++}
++
++
++PVRSRV_ERROR PVRSRVSetDevicePowerStateCoreKM(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError;
++ eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ IMG_UINT32 ui32CallerID,
++ IMG_BOOL bRetainMutex)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ #if defined(PDUMP)
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT)
++ {
++
++
++
++
++ eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, PVRSRV_DEV_POWER_STATE_ON);
++ if(eError != PVRSRV_OK)
++ {
++ goto Exit;
++ }
++
++ eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, PVRSRV_DEV_POWER_STATE_ON);
++
++ if (eError != PVRSRV_OK)
++ {
++ goto Exit;
++ }
++
++ PDUMPSUSPEND();
++ }
++ #endif
++
++ eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
++ if(eError != PVRSRV_OK)
++ {
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT)
++ {
++ PDUMPRESUME();
++ }
++ goto Exit;
++ }
++
++ eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
++
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT)
++ {
++ PDUMPRESUME();
++ }
++
++Exit:
++
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSetDevicePowerStateKM : Transition to %d FAILED 0x%x", eNewPowerState, eError));
++ }
++
++ if (!bRetainMutex || (eError != PVRSRV_OK))
++ {
++ PVRSRVPowerUnlock(ui32CallerID);
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(PVRSRV_SYS_POWER_STATE eNewSysPowerState)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ PVRSRV_DEV_POWER_STATE eNewDevicePowerState;
++
++ SysAcquireData(&psSysData);
++
++
++ eError = PVRSRVPowerLock(KERNEL_ID, IMG_TRUE);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ if (_IsSystemStatePowered(eNewSysPowerState) !=
++ _IsSystemStatePowered(psSysData->eCurrentPowerState))
++ {
++ if (_IsSystemStatePowered(eNewSysPowerState))
++ {
++
++ eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT;
++ }
++ else
++ {
++ eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_OFF;
++ }
++
++
++ eError = PVRSRVDevicePrePowerStateKM(IMG_TRUE, 0, eNewDevicePowerState);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++ }
++
++ if (eNewSysPowerState != psSysData->eCurrentPowerState)
++ {
++
++ eError = SysSystemPrePowerState(eNewSysPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++ }
++
++ return eError;
++
++ErrorExit:
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSystemPrePowerStateKM: Transition from %d to %d FAILED 0x%x",
++ psSysData->eCurrentPowerState, eNewSysPowerState, eError));
++
++
++ psSysData->eFailedPowerState = eNewSysPowerState;
++
++ PVRSRVPowerUnlock(KERNEL_ID);
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(PVRSRV_SYS_POWER_STATE eNewSysPowerState)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ SYS_DATA *psSysData;
++ PVRSRV_DEV_POWER_STATE eNewDevicePowerState;
++
++ SysAcquireData(&psSysData);
++
++ if (eNewSysPowerState != psSysData->eCurrentPowerState)
++ {
++
++ eError = SysSystemPostPowerState(eNewSysPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ goto Exit;
++ }
++ }
++
++ if (_IsSystemStatePowered(eNewSysPowerState) !=
++ _IsSystemStatePowered(psSysData->eCurrentPowerState))
++ {
++ if (_IsSystemStatePowered(eNewSysPowerState))
++ {
++
++ eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT;
++ }
++ else
++ {
++ eNewDevicePowerState = PVRSRV_DEV_POWER_STATE_OFF;
++ }
++
++
++ eError = PVRSRVDevicePostPowerStateKM(IMG_TRUE, 0, eNewDevicePowerState);
++ if (eError != PVRSRV_OK)
++ {
++ goto Exit;
++ }
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "PVRSRVSystemPostPowerStateKM: System Power Transition from %d to %d OK",
++ psSysData->eCurrentPowerState, eNewSysPowerState));
++
++ psSysData->eCurrentPowerState = eNewSysPowerState;
++
++Exit:
++
++ PVRSRVPowerUnlock(KERNEL_ID);
++
++ if (_IsSystemStatePowered(eNewSysPowerState) &&
++ PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL))
++ {
++
++
++
++ PVRSRVCommandCompleteCallbacks();
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE eNewSysPowerState)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ eError = PVRSRVSystemPrePowerStateKM(eNewSysPowerState);
++ if(eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ eError = PVRSRVSystemPostPowerStateKM(eNewSysPowerState);
++ if(eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++
++ psSysData->eFailedPowerState = PVRSRV_SYS_POWER_STATE_Unspecified;
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVSetPowerStateKM: Transition from %d to %d FAILED 0x%x",
++ psSysData->eCurrentPowerState, eNewSysPowerState, eError));
++
++
++ psSysData->eFailedPowerState = eNewSysPowerState;
++
++ return eError;
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterPowerDevice(IMG_UINT32 ui32DeviceIndex,
++ PFN_PRE_POWER pfnPrePower,
++ PFN_POST_POWER pfnPostPower,
++ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange,
++ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange,
++ IMG_HANDLE hDevCookie,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDevice;
++
++ if (pfnPrePower == IMG_NULL &&
++ pfnPostPower == IMG_NULL)
++ {
++ return PVRSRVRemovePowerDevice(ui32DeviceIndex);
++ }
++
++ SysAcquireData(&psSysData);
++
++ eError = OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_POWER_DEV),
++ (IMG_VOID **)&psPowerDevice, IMG_NULL,
++ "Power Device");
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterPowerDevice: Failed to alloc PVRSRV_POWER_DEV"));
++ return eError;
++ }
++
++
++ psPowerDevice->pfnPrePower = pfnPrePower;
++ psPowerDevice->pfnPostPower = pfnPostPower;
++ psPowerDevice->pfnPreClockSpeedChange = pfnPreClockSpeedChange;
++ psPowerDevice->pfnPostClockSpeedChange = pfnPostClockSpeedChange;
++ psPowerDevice->hDevCookie = hDevCookie;
++ psPowerDevice->ui32DeviceIndex = ui32DeviceIndex;
++ psPowerDevice->eCurrentPowerState = eCurrentPowerState;
++ psPowerDevice->eDefaultPowerState = eDefaultPowerState;
++
++
++ List_PVRSRV_POWER_DEV_Insert(&(psSysData->psPowerDeviceList), psPowerDevice);
++
++ return (PVRSRV_OK);
++}
++
++
++PVRSRV_ERROR PVRSRVRemovePowerDevice (IMG_UINT32 ui32DeviceIndex)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDev;
++
++ SysAcquireData(&psSysData);
++
++
++ psPowerDev = (PVRSRV_POWER_DEV*)
++ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
++ MatchPowerDeviceIndex_AnyVaCb,
++ ui32DeviceIndex);
++
++ if (psPowerDev)
++ {
++ List_PVRSRV_POWER_DEV_Remove(psPowerDev);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_POWER_DEV), psPowerDev, IMG_NULL);
++
++ }
++
++ return (PVRSRV_OK);
++}
++
++
++IMG_EXPORT
++IMG_BOOL PVRSRVIsDevicePowered(IMG_UINT32 ui32DeviceIndex)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDevice;
++
++ SysAcquireData(&psSysData);
++
++ if (OSIsResourceLocked(&psSysData->sPowerStateChangeResource, KERNEL_ID) ||
++ OSIsResourceLocked(&psSysData->sPowerStateChangeResource, ISR_ID))
++ {
++ return IMG_FALSE;
++ }
++
++ psPowerDevice = (PVRSRV_POWER_DEV*)
++ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
++ MatchPowerDeviceIndex_AnyVaCb,
++ ui32DeviceIndex);
++ return (psPowerDevice && (psPowerDevice->eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON))
++ ? IMG_TRUE : IMG_FALSE;
++}
++
++
++PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(IMG_UINT32 ui32DeviceIndex,
++ IMG_BOOL bIdleDevice,
++ IMG_VOID *pvInfo)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDevice;
++
++ PVR_UNREFERENCED_PARAMETER(pvInfo);
++
++ SysAcquireData(&psSysData);
++
++ if (bIdleDevice)
++ {
++
++ eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVDevicePreClockSpeedChange : failed to acquire lock, error:0x%lx", eError));
++ return eError;
++ }
++ }
++
++
++ psPowerDevice = (PVRSRV_POWER_DEV*)
++ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
++ MatchPowerDeviceIndex_AnyVaCb,
++ ui32DeviceIndex);
++
++ if (psPowerDevice && psPowerDevice->pfnPostClockSpeedChange)
++ {
++ eError = psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->hDevCookie,
++ bIdleDevice,
++ psPowerDevice->eCurrentPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVDevicePreClockSpeedChange : Device %lu failed, error:0x%lx",
++ ui32DeviceIndex, eError));
++ }
++ }
++
++ if (bIdleDevice && eError != PVRSRV_OK)
++ {
++ PVRSRVPowerUnlock(KERNEL_ID);
++ }
++
++ return eError;
++}
++
++
++IMG_VOID PVRSRVDevicePostClockSpeedChange(IMG_UINT32 ui32DeviceIndex,
++ IMG_BOOL bIdleDevice,
++ IMG_VOID *pvInfo)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDevice;
++
++ PVR_UNREFERENCED_PARAMETER(pvInfo);
++
++ SysAcquireData(&psSysData);
++
++
++ psPowerDevice = (PVRSRV_POWER_DEV*)
++ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
++ MatchPowerDeviceIndex_AnyVaCb,
++ ui32DeviceIndex);
++
++ if (psPowerDevice && psPowerDevice->pfnPostClockSpeedChange)
++ {
++ eError = psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->hDevCookie,
++ bIdleDevice,
++ psPowerDevice->eCurrentPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVDevicePostClockSpeedChange : Device %lu failed, error:0x%lx",
++ ui32DeviceIndex, eError));
++ }
++ }
++
++
++ if (bIdleDevice)
++ {
++
++ PVRSRVPowerUnlock(KERNEL_ID);
++ }
++}
++
++
++/*
++ * PVRSRVPowerOnSystemWithDevice
++ *
++ * Description: Power on the System if it is off, but instead of powering all
++ * of the devices to their "default" state, only turn on the specified
++ * device index.
++ */
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVPowerOnSystemWithDevice(IMG_UINT32 ui32DeviceIndex,
++ IMG_UINT32 ui32CallerID,
++ IMG_BOOL bRetainMutex)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ eError = PVRSRVPowerLock(ui32CallerID, IMG_TRUE);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, PVRSRV_DEV_POWER_STATE_ON);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ if (!_IsSystemStatePowered(psSysData->eCurrentPowerState))
++ {
++ eError = SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE_D0);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ eError = SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE_D0);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++ psSysData->eCurrentPowerState = PVRSRV_SYS_POWER_STATE_D0;
++ }
++
++ eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, PVRSRV_DEV_POWER_STATE_ON);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ErrorExit:
++
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVPowerOnSystemWithDevice : FAILED 0x%x", eError));
++ }
++
++ if (!bRetainMutex || (eError != PVRSRV_OK))
++ {
++ PVRSRVPowerUnlock(ui32CallerID);
++ }
++
++ return eError;
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/pvrsrv.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/pvrsrv.c
+new file mode 100644
+index 0000000..2c33fce
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/pvrsrv.c
+@@ -0,0 +1,1195 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "handle.h"
++#include "perproc.h"
++#include "pdump_km.h"
++#include "ra.h"
++
++#include "pvrversion.h"
++
++#include "lists.h"
++
++DECLARE_LIST_ANY_VA_2(BM_CONTEXT, PVRSRV_ERROR, PVRSRV_OK);
++
++DECLARE_LIST_FOR_EACH_VA(BM_HEAP);
++
++DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
++DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
++DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_INSERT(PVRSRV_DEVICE_NODE);
++DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE);
++
++IMG_VOID* MatchDeviceKM_AnyVaCb(PVRSRV_DEVICE_NODE* psDeviceNode, va_list va);
++
++
++PVRSRV_ERROR AllocateDeviceID(SYS_DATA *psSysData, IMG_UINT32 *pui32DevID)
++{
++ SYS_DEVICE_ID* psDeviceWalker;
++ SYS_DEVICE_ID* psDeviceEnd;
++
++ psDeviceWalker = &psSysData->sDeviceID[0];
++ psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices;
++
++
++ while (psDeviceWalker < psDeviceEnd)
++ {
++ if (!psDeviceWalker->bInUse)
++ {
++ psDeviceWalker->bInUse = IMG_TRUE;
++ *pui32DevID = psDeviceWalker->uiID;
++ return PVRSRV_OK;
++ }
++ psDeviceWalker++;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR,"AllocateDeviceID: No free and valid device IDs available!"));
++
++
++ PVR_ASSERT(psDeviceWalker < psDeviceEnd);
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++
++PVRSRV_ERROR FreeDeviceID(SYS_DATA *psSysData, IMG_UINT32 ui32DevID)
++{
++ SYS_DEVICE_ID* psDeviceWalker;
++ SYS_DEVICE_ID* psDeviceEnd;
++
++ psDeviceWalker = &psSysData->sDeviceID[0];
++ psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices;
++
++
++ while (psDeviceWalker < psDeviceEnd)
++ {
++
++ if (
++ (psDeviceWalker->uiID == ui32DevID) &&
++ (psDeviceWalker->bInUse)
++ )
++ {
++ psDeviceWalker->bInUse = IMG_FALSE;
++ return PVRSRV_OK;
++ }
++ psDeviceWalker++;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR,"FreeDeviceID: no matching dev ID that is in use!"));
++
++
++ PVR_ASSERT(psDeviceWalker < psDeviceEnd);
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++
++#ifndef ReadHWReg
++IMG_EXPORT
++IMG_UINT32 ReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset)
++{
++ return *(volatile IMG_UINT32*)((IMG_UINTPTR_T)pvLinRegBaseAddr+ui32Offset);
++}
++#endif
++
++
++#ifndef WriteHWReg
++IMG_EXPORT
++IMG_VOID WriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
++{
++ PVR_DPF((PVR_DBG_MESSAGE,"WriteHWReg Base:%x, Offset: %x, Value %x",pvLinRegBaseAddr,ui32Offset,ui32Value));
++
++ *(IMG_UINT32*)((IMG_UINTPTR_T)pvLinRegBaseAddr+ui32Offset) = ui32Value;
++}
++#endif
++
++
++#ifndef WriteHWRegs
++IMG_EXPORT
++IMG_VOID WriteHWRegs(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Count, PVRSRV_HWREG *psHWRegs)
++{
++ while (ui32Count)
++ {
++ WriteHWReg (pvLinRegBaseAddr, psHWRegs->ui32RegAddr, psHWRegs->ui32RegVal);
++ psHWRegs++;
++ ui32Count--;
++ }
++}
++#endif
++
++IMG_VOID PVRSRVEnumerateDevicesKM_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
++{
++ IMG_UINT *pui32DevCount;
++ PVRSRV_DEVICE_IDENTIFIER **ppsDevIdList;
++
++ pui32DevCount = va_arg(va, IMG_UINT*);
++ ppsDevIdList = va_arg(va, PVRSRV_DEVICE_IDENTIFIER**);
++
++ if (psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_EXT)
++ {
++ *(*ppsDevIdList) = psDeviceNode->sDevId;
++ (*ppsDevIdList)++;
++ (*pui32DevCount)++;
++ }
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevicesKM(IMG_UINT32 *pui32NumDevices,
++ PVRSRV_DEVICE_IDENTIFIER *psDevIdList)
++{
++ SYS_DATA *psSysData;
++ IMG_UINT32 i;
++
++ if (!pui32NumDevices || !psDevIdList)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDevicesKM: Invalid params"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ SysAcquireData(&psSysData);
++
++
++
++ for (i=0; i<PVRSRV_MAX_DEVICES; i++)
++ {
++ psDevIdList[i].eDeviceType = PVRSRV_DEVICE_TYPE_UNKNOWN;
++ }
++
++
++ *pui32NumDevices = 0;
++
++
++
++
++
++ List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList,
++ PVRSRVEnumerateDevicesKM_ForEachVaCb,
++ pui32NumDevices,
++ &psDevIdList);
++
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInit(PSYS_DATA psSysData)
++{
++ PVRSRV_ERROR eError;
++
++
++ eError = ResManInit();
++ if (eError != PVRSRV_OK)
++ {
++ goto Error;
++ }
++
++ eError = PVRSRVPerProcessDataInit();
++ if(eError != PVRSRV_OK)
++ {
++ goto Error;
++ }
++
++
++ eError = PVRSRVHandleInit();
++ if(eError != PVRSRV_OK)
++ {
++ goto Error;
++ }
++
++
++ eError = OSCreateResource(&psSysData->sPowerStateChangeResource);
++ if (eError != PVRSRV_OK)
++ {
++ goto Error;
++ }
++
++
++ psSysData->eCurrentPowerState = PVRSRV_SYS_POWER_STATE_D0;
++ psSysData->eFailedPowerState = PVRSRV_SYS_POWER_STATE_Unspecified;
++
++
++ if(OSAllocMem( PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_EVENTOBJECT) ,
++ (IMG_VOID **)&psSysData->psGlobalEventObject, 0,
++ "Event Object") != PVRSRV_OK)
++ {
++
++ goto Error;
++ }
++
++ if(OSEventObjectCreate("PVRSRV_GLOBAL_EVENTOBJECT", psSysData->psGlobalEventObject) != PVRSRV_OK)
++ {
++ goto Error;
++ }
++
++ return eError;
++
++Error:
++ PVRSRVDeInit(psSysData);
++ return eError;
++}
++
++
++
++IMG_VOID IMG_CALLCONV PVRSRVDeInit(PSYS_DATA psSysData)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++
++ if (psSysData == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVHandleDeInit failed - invalid param"));
++ return;
++ }
++
++
++ if(psSysData->psGlobalEventObject)
++ {
++ OSEventObjectDestroy(psSysData->psGlobalEventObject);
++ OSFreeMem( PVRSRV_PAGEABLE_SELECT,
++ sizeof(PVRSRV_EVENTOBJECT),
++ psSysData->psGlobalEventObject,
++ 0);
++ psSysData->psGlobalEventObject = IMG_NULL;
++ }
++
++ eError = PVRSRVHandleDeInit();
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVHandleDeInit failed"));
++ }
++
++ eError = PVRSRVPerProcessDataDeInit();
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVPerProcessDataDeInit failed"));
++ }
++
++ ResManDeInit();
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterDevice(PSYS_DATA psSysData,
++ PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
++ IMG_UINT32 ui32SOCInterruptBit,
++ IMG_UINT32 *pui32DeviceIndex)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++
++ if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE),
++ (IMG_VOID **)&psDeviceNode, IMG_NULL,
++ "Device Node") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDevice : Failed to alloc memory for psDeviceNode"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++ OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE));
++
++ eError = pfnRegisterDevice(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL);
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDevice : Failed to register device"));
++ return (PVRSRV_ERROR_DEVICE_REGISTER_FAILED);
++ }
++
++
++
++
++
++
++ psDeviceNode->ui32RefCount = 1;
++ psDeviceNode->psSysData = psSysData;
++ psDeviceNode->ui32SOCInterruptBit = ui32SOCInterruptBit;
++
++
++ AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex);
++
++
++ List_PVRSRV_DEVICE_NODE_Insert(&psSysData->psDeviceNodeList, psDeviceNode);
++
++
++ *pui32DeviceIndex = psDeviceNode->sDevId.ui32DeviceIndex;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInitialiseDevice (IMG_UINT32 ui32DevIndex)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInitialiseDevice"));
++
++ SysAcquireData(&psSysData);
++
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DevIndex,
++ IMG_TRUE);
++ if(!psDeviceNode)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: requested device is not present"));
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++ PVR_ASSERT (psDeviceNode->ui32RefCount > 0);
++
++
++
++ eError = PVRSRVResManConnect(IMG_NULL, &psDeviceNode->hResManContext);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: Failed PVRSRVResManConnect call"));
++ return eError;
++ }
++
++
++ if(psDeviceNode->pfnInitDevice != IMG_NULL)
++ {
++ eError = psDeviceNode->pfnInitDevice(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: Failed InitDevice call"));
++ return eError;
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVFinaliseSystem_SetPowerState_AnyCb(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE_DEFAULT,
++ KERNEL_ID, IMG_FALSE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed PVRSRVSetDevicePowerStateKM call (device index: %d)", psDeviceNode->sDevId.ui32DeviceIndex));
++ }
++ return eError;
++}
++
++PVRSRV_ERROR PVRSRVFinaliseSystem_CompatCheck_AnyCb(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ eError = PVRSRVDevInitCompatCheck(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed PVRSRVDevInitCompatCheck call (device index: %d)", psDeviceNode->sDevId.ui32DeviceIndex));
++ }
++ return eError;
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFinaliseSystem(IMG_BOOL bInitSuccessful)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVFinaliseSystem"));
++
++ SysAcquireData(&psSysData);
++
++ if (bInitSuccessful)
++ {
++ eError = SysFinalise();
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: SysFinalise failed (%d)", eError));
++ return eError;
++ }
++
++
++ eError = List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any(psSysData->psDeviceNodeList,
++ PVRSRVFinaliseSystem_SetPowerState_AnyCb);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++
++ eError = List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any(psSysData->psDeviceNodeList,
++ PVRSRVFinaliseSystem_CompatCheck_AnyCb);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++
++
++
++
++
++
++
++
++#if !defined(SUPPORT_PDUMP_DELAYED_INITPHASE_TERMINATION)
++ PDUMPENDINITPHASE();
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++
++ if (psDeviceNode->pfnInitDeviceCompatCheck)
++ return psDeviceNode->pfnInitDeviceCompatCheck(psDeviceNode);
++ else
++ return PVRSRV_OK;
++}
++
++IMG_VOID * PVRSRVAcquireDeviceDataKM_Match_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
++{
++ PVRSRV_DEVICE_TYPE eDeviceType;
++ IMG_UINT32 ui32DevIndex;
++
++ eDeviceType = va_arg(va, PVRSRV_DEVICE_TYPE);
++ ui32DevIndex = va_arg(va, IMG_UINT32);
++
++ if ((eDeviceType != PVRSRV_DEVICE_TYPE_UNKNOWN &&
++ psDeviceNode->sDevId.eDeviceType == eDeviceType) ||
++ (eDeviceType == PVRSRV_DEVICE_TYPE_UNKNOWN &&
++ psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex))
++ {
++ return psDeviceNode;
++ }
++ else
++ {
++ return IMG_NULL;
++ }
++}
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceDataKM (IMG_UINT32 ui32DevIndex,
++ PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_HANDLE *phDevCookie)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVAcquireDeviceDataKM"));
++
++ SysAcquireData(&psSysData);
++
++
++ psDeviceNode = List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ PVRSRVAcquireDeviceDataKM_Match_AnyVaCb,
++ eDeviceType,
++ ui32DevIndex);
++
++
++ if (!psDeviceNode)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVAcquireDeviceDataKM: requested device is not present"));
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++
++ PVR_ASSERT (psDeviceNode->ui32RefCount > 0);
++
++
++ if (phDevCookie)
++ {
++ *phDevCookie = (IMG_HANDLE)psDeviceNode;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++
++ SysAcquireData(&psSysData);
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ MatchDeviceKM_AnyVaCb,
++ ui32DevIndex,
++ IMG_TRUE);
++
++ if (!psDeviceNode)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: requested device %d is not present", ui32DevIndex));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++
++ eError = PVRSRVSetDevicePowerStateKM(ui32DevIndex,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ KERNEL_ID,
++ IMG_FALSE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed PVRSRVSetDevicePowerStateKM call"));
++ return eError;
++ }
++
++
++
++ eError = ResManFreeResByCriteria(psDeviceNode->hResManContext,
++ RESMAN_CRITERIA_RESTYPE,
++ RESMAN_TYPE_DEVICEMEM_ALLOCATION,
++ IMG_NULL, 0);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed ResManFreeResByCriteria call"));
++ return eError;
++ }
++
++
++
++ if(psDeviceNode->pfnDeInitDevice != IMG_NULL)
++ {
++ eError = psDeviceNode->pfnDeInitDevice(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed DeInitDevice call"));
++ return eError;
++ }
++ }
++
++
++
++ PVRSRVResManDisconnect(psDeviceNode->hResManContext, IMG_TRUE);
++ psDeviceNode->hResManContext = IMG_NULL;
++
++
++ List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode);
++
++
++ (IMG_VOID)FreeDeviceID(psSysData, ui32DevIndex);
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_DEVICE_NODE), psDeviceNode, IMG_NULL);
++
++
++ return (PVRSRV_OK);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Waitus,
++ IMG_UINT32 ui32Tries)
++{
++ {
++ IMG_UINT32 uiMaxTime = ui32Tries * ui32Waitus;
++
++ LOOP_UNTIL_TIMEOUT(uiMaxTime)
++ {
++ if((*pui32LinMemAddr & ui32Mask) == ui32Value)
++ {
++ return PVRSRV_OK;
++ }
++ OSWaitus(ui32Waitus);
++ } END_LOOP_UNTIL_TIMEOUT();
++ }
++
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++
++#if defined (USING_ISR_INTERRUPTS)
++
++extern IMG_UINT32 gui32EventStatusServicesByISR;
++
++PVRSRV_ERROR PollForInterruptKM (IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Waitus,
++ IMG_UINT32 ui32Tries)
++{
++ IMG_UINT32 uiMaxTime;
++
++ uiMaxTime = ui32Tries * ui32Waitus;
++
++
++ LOOP_UNTIL_TIMEOUT(uiMaxTime)
++ {
++ if ((gui32EventStatusServicesByISR & ui32Mask) == ui32Value)
++ {
++ gui32EventStatusServicesByISR = 0;
++ return PVRSRV_OK;
++ }
++ OSWaitus(ui32Waitus);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++ return PVRSRV_ERROR_GENERIC;
++}
++#endif
++
++IMG_VOID PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb(BM_HEAP *psBMHeap, va_list va)
++{
++ IMG_CHAR **ppszStr;
++ IMG_UINT32 *pui32StrLen;
++
++ ppszStr = va_arg(va, IMG_CHAR**);
++ pui32StrLen = va_arg(va, IMG_UINT32*);
++
++ if(psBMHeap->pImportArena)
++ {
++ RA_GetStats(psBMHeap->pImportArena,
++ ppszStr,
++ pui32StrLen);
++ }
++
++ if(psBMHeap->pVMArena)
++ {
++ RA_GetStats(psBMHeap->pVMArena,
++ ppszStr,
++ pui32StrLen);
++ }
++}
++
++PVRSRV_ERROR PVRSRVGetMiscInfoKM_BMContext_AnyVaCb(BM_CONTEXT *psBMContext, va_list va)
++{
++
++ IMG_UINT32 *pui32StrLen;
++ IMG_INT32 *pi32Count;
++ IMG_CHAR **ppszStr;
++
++ pui32StrLen = va_arg(va, IMG_UINT32*);
++ pi32Count = va_arg(va, IMG_INT32*);
++ ppszStr = va_arg(va, IMG_CHAR**);
++
++ CHECK_SPACE(*pui32StrLen);
++ *pi32Count = OSSNPrintf(*ppszStr, 100, "\nApplication Context (hDevMemContext) 0x%08X:\n",
++ (IMG_HANDLE)psBMContext);
++ UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen);
++
++ List_BM_HEAP_ForEach_va(psBMContext->psBMHeap,
++ PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb,
++ ppszStr,
++ pui32StrLen);
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVGetMiscInfoKM_Device_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
++{
++ IMG_UINT32 *pui32StrLen;
++ IMG_INT32 *pi32Count;
++ IMG_CHAR **ppszStr;
++
++ pui32StrLen = va_arg(va, IMG_UINT32*);
++ pi32Count = va_arg(va, IMG_INT32*);
++ ppszStr = va_arg(va, IMG_CHAR**);
++
++ CHECK_SPACE(*pui32StrLen);
++ *pi32Count = OSSNPrintf(*ppszStr, 100, "\n\nDevice Type %d:\n", psDeviceNode->sDevId.eDeviceType);
++ UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen);
++
++
++ if(psDeviceNode->sDevMemoryInfo.pBMKernelContext)
++ {
++ CHECK_SPACE(*pui32StrLen);
++ *pi32Count = OSSNPrintf(*ppszStr, 100, "\nKernel Context:\n");
++ UPDATE_SPACE(*ppszStr, *pi32Count, *pui32StrLen);
++
++
++ List_BM_HEAP_ForEach_va(psDeviceNode->sDevMemoryInfo.pBMKernelContext->psBMHeap,
++ PVRSRVGetMiscInfoKM_RA_GetStats_ForEachVaCb,
++ ppszStr,
++ pui32StrLen);
++ }
++
++
++ return List_BM_CONTEXT_PVRSRV_ERROR_Any_va(psDeviceNode->sDevMemoryInfo.pBMContext,
++ PVRSRVGetMiscInfoKM_BMContext_AnyVaCb,
++ pui32StrLen,
++ pi32Count,
++ ppszStr);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo)
++{
++ SYS_DATA *psSysData;
++
++ if(!psMiscInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ psMiscInfo->ui32StatePresent = 0;
++
++
++ if(psMiscInfo->ui32StateRequest & ~(PVRSRV_MISC_INFO_TIMER_PRESENT
++ |PVRSRV_MISC_INFO_CLOCKGATE_PRESENT
++ |PVRSRV_MISC_INFO_MEMSTATS_PRESENT
++ |PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT
++ |PVRSRV_MISC_INFO_DDKVERSION_PRESENT
++ |PVRSRV_MISC_INFO_CPUCACHEFLUSH_PRESENT
++ |PVRSRV_MISC_INFO_RESET_PRESENT))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid state request flags"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ SysAcquireData(&psSysData);
++
++
++ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_TIMER_PRESENT) != 0UL) &&
++ (psSysData->pvSOCTimerRegisterKM != IMG_NULL))
++ {
++ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_TIMER_PRESENT;
++ psMiscInfo->pvSOCTimerRegisterKM = psSysData->pvSOCTimerRegisterKM;
++ psMiscInfo->hSOCTimerRegisterOSMemHandle = psSysData->hSOCTimerRegisterOSMemHandle;
++ }
++ else
++ {
++ psMiscInfo->pvSOCTimerRegisterKM = IMG_NULL;
++ psMiscInfo->hSOCTimerRegisterOSMemHandle = IMG_NULL;
++ }
++
++
++ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CLOCKGATE_PRESENT) != 0UL) &&
++ (psSysData->pvSOCClockGateRegsBase != IMG_NULL))
++ {
++ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CLOCKGATE_PRESENT;
++ psMiscInfo->pvSOCClockGateRegs = psSysData->pvSOCClockGateRegsBase;
++ psMiscInfo->ui32SOCClockGateRegsSize = psSysData->ui32SOCClockGateRegsSize;
++ }
++
++
++ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0UL) &&
++ (psMiscInfo->pszMemoryStr != IMG_NULL))
++ {
++ RA_ARENA **ppArena;
++ IMG_CHAR *pszStr;
++ IMG_UINT32 ui32StrLen;
++ IMG_INT32 i32Count;
++
++ pszStr = psMiscInfo->pszMemoryStr;
++ ui32StrLen = psMiscInfo->ui32MemoryStrLen;
++
++ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_MEMSTATS_PRESENT;
++
++
++ ppArena = &psSysData->apsLocalDevMemArena[0];
++ while(*ppArena)
++ {
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "\nLocal Backing Store:\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ RA_GetStats(*ppArena,
++ &pszStr,
++ &ui32StrLen);
++
++ ppArena++;
++ }
++
++
++
++ List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList,
++ PVRSRVGetMiscInfoKM_Device_AnyVaCb,
++ &ui32StrLen,
++ &i32Count,
++ &pszStr);
++
++
++ i32Count = OSSNPrintf(pszStr, 100, "\n\0");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++
++ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) != 0UL) &&
++ (psSysData->psGlobalEventObject != IMG_NULL))
++ {
++ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT;
++ psMiscInfo->sGlobalEventObject = *psSysData->psGlobalEventObject;
++ }
++
++
++
++ if (((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0UL)
++ && ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) == 0UL)
++ && (psMiscInfo->pszMemoryStr != IMG_NULL))
++ {
++ IMG_CHAR *pszStr;
++ IMG_UINT32 ui32StrLen;
++ IMG_UINT32 ui32LenStrPerNum = 12;
++ IMG_INT32 i32Count;
++ IMG_INT i;
++ psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_DDKVERSION_PRESENT;
++
++
++ psMiscInfo->aui32DDKVersion[0] = PVRVERSION_MAJ;
++ psMiscInfo->aui32DDKVersion[1] = PVRVERSION_MIN;
++ psMiscInfo->aui32DDKVersion[2] = PVRVERSION_BRANCH;
++ psMiscInfo->aui32DDKVersion[3] = PVRVERSION_BUILD;
++
++ pszStr = psMiscInfo->pszMemoryStr;
++ ui32StrLen = psMiscInfo->ui32MemoryStrLen;
++
++ for (i=0; i<4; i++)
++ {
++ if (ui32StrLen < ui32LenStrPerNum)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ i32Count = OSSNPrintf(pszStr, ui32LenStrPerNum, "%ld", psMiscInfo->aui32DDKVersion[i]);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ if (i != 3)
++ {
++ i32Count = OSSNPrintf(pszStr, 2, ".");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++ }
++ }
++
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++ if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CPUCACHEFLUSH_PRESENT) != 0UL)
++ {
++ if(psMiscInfo->bDeferCPUCacheFlush)
++ {
++
++ if(!psMiscInfo->bCPUCacheFlushAll)
++ {
++
++
++
++ PVR_DPF((PVR_DBG_MESSAGE,"PVRSRVGetMiscInfoKM: don't support deferred range flushes"));
++ PVR_DPF((PVR_DBG_MESSAGE," using deferred flush all instead"));
++ }
++
++ psSysData->bFlushAll = IMG_TRUE;
++ }
++ else
++ {
++
++ if(psMiscInfo->bCPUCacheFlushAll)
++ {
++
++ OSFlushCPUCacheKM();
++
++ psSysData->bFlushAll = IMG_FALSE;
++ }
++ else
++ {
++
++ OSFlushCPUCacheRangeKM(psMiscInfo->pvRangeAddrStart, psMiscInfo->pvRangeAddrEnd);
++ }
++ }
++ }
++#endif
++
++#if defined(PVRSRV_RESET_ON_HWTIMEOUT)
++ if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_RESET_PRESENT) != 0UL)
++ {
++ PVR_LOG(("User requested OS reset"));
++ OSPanic();
++ }
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFBStatsKM(IMG_UINT32 *pui32Total,
++ IMG_UINT32 *pui32Available)
++{
++ IMG_UINT32 ui32Total = 0, i = 0;
++ IMG_UINT32 ui32Available = 0;
++
++ *pui32Total = 0;
++ *pui32Available = 0;
++
++
++ while(BM_ContiguousStatistics(i, &ui32Total, &ui32Available) == IMG_TRUE)
++ {
++ *pui32Total += ui32Total;
++ *pui32Available += ui32Available;
++
++ i++;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_BOOL IMG_CALLCONV PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ SYS_DATA *psSysData;
++ IMG_BOOL bStatus = IMG_FALSE;
++ IMG_UINT32 ui32InterruptSource;
++
++ if(!psDeviceNode)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVDeviceLISR: Invalid params\n"));
++ goto out;
++ }
++ psSysData = psDeviceNode->psSysData;
++
++
++ ui32InterruptSource = SysGetInterruptSource(psSysData, psDeviceNode);
++ if(ui32InterruptSource & psDeviceNode->ui32SOCInterruptBit)
++ {
++ if(psDeviceNode->pfnDeviceISR != IMG_NULL)
++ {
++ bStatus = (*psDeviceNode->pfnDeviceISR)(psDeviceNode->pvISRData);
++ }
++
++ SysClearInterrupts(psSysData, psDeviceNode->ui32SOCInterruptBit);
++ }
++
++out:
++ return bStatus;
++}
++
++IMG_VOID PVRSRVSystemLISR_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, va_list va)
++{
++
++ IMG_BOOL *pbStatus;
++ IMG_UINT32 *pui32InterruptSource;
++ IMG_UINT32 *pui32ClearInterrupts;
++
++ pbStatus = va_arg(va, IMG_BOOL*);
++ pui32InterruptSource = va_arg(va, IMG_UINT32*);
++ pui32ClearInterrupts = va_arg(va, IMG_UINT32*);
++
++
++ if(psDeviceNode->pfnDeviceISR != IMG_NULL)
++ {
++ if(*pui32InterruptSource & psDeviceNode->ui32SOCInterruptBit)
++ {
++ if((*psDeviceNode->pfnDeviceISR)(psDeviceNode->pvISRData))
++ {
++
++ *pbStatus = IMG_TRUE;
++ }
++
++ *pui32ClearInterrupts |= psDeviceNode->ui32SOCInterruptBit;
++ }
++ }
++}
++
++IMG_BOOL IMG_CALLCONV PVRSRVSystemLISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = pvSysData;
++ IMG_BOOL bStatus = IMG_FALSE;
++ IMG_UINT32 ui32InterruptSource;
++ IMG_UINT32 ui32ClearInterrupts = 0;
++ if(!psSysData)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSystemLISR: Invalid params\n"));
++ }
++ else
++ {
++
++ ui32InterruptSource = SysGetInterruptSource(psSysData, IMG_NULL);
++
++
++ if(ui32InterruptSource)
++ {
++
++ List_PVRSRV_DEVICE_NODE_ForEach_va(psSysData->psDeviceNodeList,
++ PVRSRVSystemLISR_ForEachVaCb,
++ &bStatus,
++ &ui32InterruptSource,
++ &ui32ClearInterrupts);
++
++ SysClearInterrupts(psSysData, ui32ClearInterrupts);
++ }
++ }
++ return bStatus;
++}
++
++
++IMG_VOID PVRSRVMISR_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ if(psDeviceNode->pfnDeviceMISR != IMG_NULL)
++ {
++ (*psDeviceNode->pfnDeviceMISR)(psDeviceNode->pvISRData);
++ }
++}
++
++IMG_VOID IMG_CALLCONV PVRSRVMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = pvSysData;
++ if(!psSysData)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVMISR: Invalid params\n"));
++ return;
++ }
++
++
++ List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList,
++ PVRSRVMISR_ForEachCb);
++
++
++ if (PVRSRVProcessQueues(ISR_ID, IMG_FALSE) == PVRSRV_ERROR_PROCESSING_BLOCKED)
++ {
++ PVRSRVProcessQueues(ISR_ID, IMG_FALSE);
++ }
++
++
++ if (psSysData->psGlobalEventObject)
++ {
++ IMG_HANDLE hOSEventKM = psSysData->psGlobalEventObject->hOSEventKM;
++ if(hOSEventKM)
++ {
++ OSEventObjectSignal(hOSEventKM);
++ }
++ }
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVProcessConnect(IMG_UINT32 ui32PID)
++{
++ return PVRSRVPerProcessDataConnect(ui32PID);
++}
++
++
++IMG_EXPORT
++IMG_VOID IMG_CALLCONV PVRSRVProcessDisconnect(IMG_UINT32 ui32PID)
++{
++ PVRSRVPerProcessDataDisconnect(ui32PID);
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer,
++ IMG_SIZE_T *puiBufSize, IMG_BOOL bSave)
++{
++ IMG_SIZE_T uiBytesSaved = 0;
++ IMG_PVOID pvLocalMemCPUVAddr;
++ RA_SEGMENT_DETAILS sSegDetails;
++
++ if (hArena == IMG_NULL)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ sSegDetails.uiSize = 0;
++ sSegDetails.sCpuPhyAddr.uiAddr = 0;
++ sSegDetails.hSegment = 0;
++
++
++ while (RA_GetNextLiveSegment(hArena, &sSegDetails))
++ {
++ if (pbyBuffer == IMG_NULL)
++ {
++
++ uiBytesSaved += sizeof(sSegDetails.uiSize) + sSegDetails.uiSize;
++ }
++ else
++ {
++ if ((uiBytesSaved + sizeof(sSegDetails.uiSize) + sSegDetails.uiSize) > *puiBufSize)
++ {
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVSaveRestoreLiveSegments: Base %08x size %08x", sSegDetails.sCpuPhyAddr.uiAddr, sSegDetails.uiSize));
++
++
++ pvLocalMemCPUVAddr = OSMapPhysToLin(sSegDetails.sCpuPhyAddr,
++ sSegDetails.uiSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ if (pvLocalMemCPUVAddr == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSaveRestoreLiveSegments: Failed to map local memory to host"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++
++ if (bSave)
++ {
++
++ OSMemCopy(pbyBuffer, &sSegDetails.uiSize, sizeof(sSegDetails.uiSize));
++ pbyBuffer += sizeof(sSegDetails.uiSize);
++
++ OSMemCopy(pbyBuffer, pvLocalMemCPUVAddr, sSegDetails.uiSize);
++ pbyBuffer += sSegDetails.uiSize;
++ }
++ else
++ {
++ IMG_UINT32 uiSize;
++
++ OSMemCopy(&uiSize, pbyBuffer, sizeof(sSegDetails.uiSize));
++
++ if (uiSize != sSegDetails.uiSize)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSaveRestoreLiveSegments: Segment size error"));
++ }
++ else
++ {
++ pbyBuffer += sizeof(sSegDetails.uiSize);
++
++ OSMemCopy(pvLocalMemCPUVAddr, pbyBuffer, sSegDetails.uiSize);
++ pbyBuffer += sSegDetails.uiSize;
++ }
++ }
++
++
++ uiBytesSaved += sizeof(sSegDetails.uiSize) + sSegDetails.uiSize;
++
++ OSUnMapPhysToLin(pvLocalMemCPUVAddr,
++ sSegDetails.uiSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++ }
++
++ if (pbyBuffer == IMG_NULL)
++ {
++ *puiBufSize = uiBytesSaved;
++ }
++
++ return (PVRSRV_OK);
++}
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/queue.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/queue.c
+new file mode 100644
+index 0000000..e535ddd
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/queue.c
+@@ -0,0 +1,1137 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++
++#include "lists.h"
++
++DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE);
++
++#if defined(__linux__) && defined(__KERNEL__)
++
++#include "proc.h"
++
++static IMG_INT
++QueuePrintCommands (PVRSRV_QUEUE_INFO * psQueue, IMG_CHAR * buffer, size_t size)
++{
++ off_t off = 0;
++ IMG_INT cmds = 0;
++ IMG_SIZE_T ui32ReadOffset = psQueue->ui32ReadOffset;
++ IMG_SIZE_T ui32WriteOffset = psQueue->ui32WriteOffset;
++ PVRSRV_COMMAND * psCmd;
++
++ while (ui32ReadOffset != ui32WriteOffset)
++ {
++ psCmd= (PVRSRV_COMMAND *)((IMG_UINTPTR_T)psQueue->pvLinQueueKM + ui32ReadOffset);
++
++ off = printAppend(buffer, size, off, "%p %p %5lu %6lu %3lu %5lu %2lu %2lu %3lu \n",
++ psQueue,
++ psCmd,
++ psCmd->ui32ProcessID,
++ psCmd->CommandType,
++ psCmd->ui32CmdSize,
++ psCmd->ui32DevIndex,
++ psCmd->ui32DstSyncCount,
++ psCmd->ui32SrcSyncCount,
++ psCmd->ui32DataSize);
++
++ ui32ReadOffset += psCmd->ui32CmdSize;
++ ui32ReadOffset &= psQueue->ui32QueueSize - 1;
++ cmds++;
++ }
++ if (cmds == 0)
++ off = printAppend(buffer, size, off, "%p <empty>\n", psQueue);
++ return off;
++}
++
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++void ProcSeqShowQueue(struct seq_file *sfile,void* el)
++{
++ PVRSRV_QUEUE_INFO * psQueue = (PVRSRV_QUEUE_INFO*)el;
++ IMG_INT cmds = 0;
++ IMG_SIZE_T ui32ReadOffset;
++ IMG_SIZE_T ui32WriteOffset;
++ PVRSRV_COMMAND * psCmd;
++
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf( sfile,
++ "Command Queues\n"
++ "Queue CmdPtr Pid Command Size DevInd DSC SSC #Data ...\n");
++ return;
++ }
++
++ ui32ReadOffset = psQueue->ui32ReadOffset;
++ ui32WriteOffset = psQueue->ui32WriteOffset;
++
++ while (ui32ReadOffset != ui32WriteOffset)
++ {
++ psCmd= (PVRSRV_COMMAND *)((IMG_UINTPTR_T)psQueue->pvLinQueueKM + ui32ReadOffset);
++
++ seq_printf(sfile, "%p %p %5lu %6lu %3lu %5lu %2lu %2lu %3lu \n",
++ psQueue,
++ psCmd,
++ psCmd->ui32ProcessID,
++ psCmd->CommandType,
++ psCmd->ui32CmdSize,
++ psCmd->ui32DevIndex,
++ psCmd->ui32DstSyncCount,
++ psCmd->ui32SrcSyncCount,
++ psCmd->ui32DataSize);
++
++ ui32ReadOffset += psCmd->ui32CmdSize;
++ ui32ReadOffset &= psQueue->ui32QueueSize - 1;
++ cmds++;
++ }
++
++ if (cmds == 0)
++ seq_printf(sfile, "%p <empty>\n", psQueue);
++}
++
++void* ProcSeqOff2ElementQueue(struct seq_file * sfile, loff_t off)
++{
++ PVRSRV_QUEUE_INFO * psQueue;
++ SYS_DATA * psSysData;
++
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++
++ SysAcquireData(&psSysData);
++
++ for (psQueue = psSysData->psQueueList; (((--off) > 0) && (psQueue != IMG_NULL)); psQueue = psQueue->psNextKM);
++ return psQueue;
++}
++
++#endif
++
++off_t
++QueuePrintQueues (IMG_CHAR * buffer, size_t size, off_t off)
++{
++ SYS_DATA * psSysData;
++ PVRSRV_QUEUE_INFO * psQueue;
++
++ SysAcquireData(&psSysData);
++
++ if (!off)
++ return printAppend (buffer, size, 0,
++ "Command Queues\n"
++ "Queue CmdPtr Pid Command Size DevInd DSC SSC #Data ...\n");
++
++
++
++ for (psQueue = psSysData->psQueueList; (((--off) > 0) && (psQueue != IMG_NULL)); psQueue = psQueue->psNextKM)
++ ;
++
++ return psQueue ? QueuePrintCommands (psQueue, buffer, size) : END_OF_FILE;
++}
++#endif
++
++#define GET_SPACE_IN_CMDQ(psQueue) \
++ (((psQueue->ui32ReadOffset - psQueue->ui32WriteOffset) \
++ + (psQueue->ui32QueueSize - 1)) & (psQueue->ui32QueueSize - 1))
++
++#define UPDATE_QUEUE_WOFF(psQueue, ui32Size) \
++ psQueue->ui32WriteOffset = (psQueue->ui32WriteOffset + ui32Size) \
++ & (psQueue->ui32QueueSize - 1);
++
++#define SYNCOPS_STALE(ui32OpsComplete, ui32OpsPending) \
++ (ui32OpsComplete >= ui32OpsPending)
++
++
++DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE);
++
++static IMG_VOID QueueDumpCmdComplete(COMMAND_COMPLETE_DATA *psCmdCompleteData,
++ IMG_UINT32 i,
++ IMG_BOOL bIsSrc)
++{
++ PVRSRV_SYNC_OBJECT *psSyncObject;
++
++ psSyncObject = bIsSrc ? psCmdCompleteData->psSrcSync : psCmdCompleteData->psDstSync;
++
++ if (psCmdCompleteData->bInUse)
++ {
++ PVR_LOG(("\t%s %lu: ROC DevVAddr:0x%lX ROP:0x%lx ROC:0x%lx, WOC DevVAddr:0x%lX WOP:0x%lx WOC:0x%lx",
++ bIsSrc ? "SRC" : "DEST", i,
++ psSyncObject[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32ReadOpsPending,
++ psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32ReadOpsComplete,
++ psSyncObject[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsPending,
++ psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsComplete));
++ }
++ else
++ {
++ PVR_LOG(("\t%s %lu: (Not in use)", bIsSrc ? "SRC" : "DEST", i));
++ }
++}
++
++
++static IMG_VOID QueueDumpDebugInfo_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ if (psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_DISPLAY)
++ {
++ IMG_UINT32 i;
++ SYS_DATA *psSysData;
++ COMMAND_COMPLETE_DATA **ppsCmdCompleteData;
++ COMMAND_COMPLETE_DATA *psCmdCompleteData;
++
++ SysAcquireData(&psSysData);
++
++ ppsCmdCompleteData = psSysData->ppsCmdCompleteData[psDeviceNode->sDevId.ui32DeviceIndex];
++
++ if (ppsCmdCompleteData != IMG_NULL)
++ {
++ psCmdCompleteData = ppsCmdCompleteData[DC_FLIP_COMMAND];
++
++ PVR_LOG(("Command Complete Data for display device %lu:", psDeviceNode->sDevId.ui32DeviceIndex));
++
++ for (i = 0; i < psCmdCompleteData->ui32SrcSyncCount; i++)
++ {
++ QueueDumpCmdComplete(psCmdCompleteData, i, IMG_TRUE);
++ }
++
++ for (i = 0; i < psCmdCompleteData->ui32DstSyncCount; i++)
++ {
++ QueueDumpCmdComplete(psCmdCompleteData, i, IMG_FALSE);
++ }
++ }
++ else
++ {
++ PVR_LOG(("There is no Command Complete Data for display device %u", psDeviceNode->sDevId.ui32DeviceIndex));
++ }
++ }
++}
++
++
++IMG_VOID QueueDumpDebugInfo(IMG_VOID)
++{
++ SYS_DATA *psSysData;
++ SysAcquireData(&psSysData);
++ List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList, QueueDumpDebugInfo_ForEachCb);
++}
++
++
++IMG_SIZE_T NearestPower2(IMG_SIZE_T ui32Value)
++{
++ IMG_SIZE_T ui32Temp, ui32Result = 1;
++
++ if(!ui32Value)
++ return 0;
++
++ ui32Temp = ui32Value - 1;
++ while(ui32Temp)
++ {
++ ui32Result <<= 1;
++ ui32Temp >>= 1;
++ }
++
++ return ui32Result;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_SIZE_T ui32QueueSize,
++ PVRSRV_QUEUE_INFO **ppsQueueInfo)
++{
++ PVRSRV_QUEUE_INFO *psQueueInfo;
++ IMG_SIZE_T ui32Power2QueueSize = NearestPower2(ui32QueueSize);
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hMemBlock;
++
++ SysAcquireData(&psSysData);
++
++
++ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_QUEUE_INFO),
++ (IMG_VOID **)&psQueueInfo, &hMemBlock,
++ "Queue Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateCommandQueueKM: Failed to alloc queue struct"));
++ goto ErrorExit;
++ }
++ OSMemSet(psQueueInfo, 0, sizeof(PVRSRV_QUEUE_INFO));
++
++ psQueueInfo->hMemBlock[0] = hMemBlock;
++ psQueueInfo->ui32ProcessID = OSGetCurrentProcessIDKM();
++
++
++ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32Power2QueueSize + PVRSRV_MAX_CMD_SIZE,
++ &psQueueInfo->pvLinQueueKM, &hMemBlock,
++ "Command Queue") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateCommandQueueKM: Failed to alloc queue buffer"));
++ goto ErrorExit;
++ }
++
++ psQueueInfo->hMemBlock[1] = hMemBlock;
++ psQueueInfo->pvLinQueueUM = psQueueInfo->pvLinQueueKM;
++
++
++ PVR_ASSERT(psQueueInfo->ui32ReadOffset == 0);
++ PVR_ASSERT(psQueueInfo->ui32WriteOffset == 0);
++
++ psQueueInfo->ui32QueueSize = ui32Power2QueueSize;
++
++
++ if (psSysData->psQueueList == IMG_NULL)
++ {
++ eError = OSCreateResource(&psSysData->sQProcessResource);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++ }
++
++
++ if (OSLockResource(&psSysData->sQProcessResource,
++ KERNEL_ID) != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ psQueueInfo->psNextKM = psSysData->psQueueList;
++ psSysData->psQueueList = psQueueInfo;
++
++ if (OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID) != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ *ppsQueueInfo = psQueueInfo;
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++ if(psQueueInfo)
++ {
++ if(psQueueInfo->pvLinQueueKM)
++ {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ psQueueInfo->ui32QueueSize,
++ psQueueInfo->pvLinQueueKM,
++ psQueueInfo->hMemBlock[1]);
++ psQueueInfo->pvLinQueueKM = IMG_NULL;
++ }
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_QUEUE_INFO),
++ psQueueInfo,
++ psQueueInfo->hMemBlock[0]);
++
++ }
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo)
++{
++ PVRSRV_QUEUE_INFO *psQueue;
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++ IMG_BOOL bTimeout = IMG_TRUE;
++
++ SysAcquireData(&psSysData);
++
++ psQueue = psSysData->psQueueList;
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if(psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset)
++ {
++ bTimeout = IMG_FALSE;
++ break;
++ }
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++ if (bTimeout)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDestroyCommandQueueKM : Failed to empty queue"));
++ eError = PVRSRV_ERROR_CANNOT_FLUSH_QUEUE;
++ goto ErrorExit;
++ }
++
++
++ eError = OSLockResource(&psSysData->sQProcessResource,
++ KERNEL_ID);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++ if(psQueue == psQueueInfo)
++ {
++ psSysData->psQueueList = psQueueInfo->psNextKM;
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ NearestPower2(psQueueInfo->ui32QueueSize) + PVRSRV_MAX_CMD_SIZE,
++ psQueueInfo->pvLinQueueKM,
++ psQueueInfo->hMemBlock[1]);
++ psQueueInfo->pvLinQueueKM = IMG_NULL;
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_QUEUE_INFO),
++ psQueueInfo,
++ psQueueInfo->hMemBlock[0]);
++ psQueueInfo = IMG_NULL;
++ }
++ else
++ {
++ while(psQueue)
++ {
++ if(psQueue->psNextKM == psQueueInfo)
++ {
++ psQueue->psNextKM = psQueueInfo->psNextKM;
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ psQueueInfo->ui32QueueSize,
++ psQueueInfo->pvLinQueueKM,
++ psQueueInfo->hMemBlock[1]);
++ psQueueInfo->pvLinQueueKM = IMG_NULL;
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_QUEUE_INFO),
++ psQueueInfo,
++ psQueueInfo->hMemBlock[0]);
++ psQueueInfo = IMG_NULL;
++ break;
++ }
++ psQueue = psQueue->psNextKM;
++ }
++
++ if(!psQueue)
++ {
++ eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ goto ErrorExit;
++ }
++ }
++
++
++ eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++
++
++ if (psSysData->psQueueList == IMG_NULL)
++ {
++ eError = OSDestroyResource(&psSysData->sQProcessResource);
++ if (eError != PVRSRV_OK)
++ {
++ goto ErrorExit;
++ }
++ }
++
++ErrorExit:
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO *psQueue,
++ IMG_SIZE_T ui32ParamSize,
++ IMG_VOID **ppvSpace)
++{
++ IMG_BOOL bTimeout = IMG_TRUE;
++
++
++ ui32ParamSize = (ui32ParamSize+3) & 0xFFFFFFFC;
++
++ if (ui32ParamSize > PVRSRV_MAX_CMD_SIZE)
++ {
++ PVR_DPF((PVR_DBG_WARNING,"PVRSRVGetQueueSpace: max command size is %d bytes", PVRSRV_MAX_CMD_SIZE));
++ return PVRSRV_ERROR_CMD_TOO_BIG;
++ }
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if (GET_SPACE_IN_CMDQ(psQueue) > ui32ParamSize)
++ {
++ bTimeout = IMG_FALSE;
++ break;
++ }
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++ if (bTimeout == IMG_TRUE)
++ {
++ *ppvSpace = IMG_NULL;
++
++ return PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE;
++ }
++ else
++ {
++ *ppvSpace = (IMG_VOID *)((IMG_UINTPTR_T)psQueue->pvLinQueueUM + psQueue->ui32WriteOffset);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO *psQueue,
++ PVRSRV_COMMAND **ppsCommand,
++ IMG_UINT32 ui32DevIndex,
++ IMG_UINT16 CommandType,
++ IMG_UINT32 ui32DstSyncCount,
++ PVRSRV_KERNEL_SYNC_INFO *apsDstSync[],
++ IMG_UINT32 ui32SrcSyncCount,
++ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
++ IMG_SIZE_T ui32DataByteSize )
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_COMMAND *psCommand;
++ IMG_SIZE_T ui32CommandSize;
++ IMG_UINT32 i;
++
++
++ ui32DataByteSize = (ui32DataByteSize + 3UL) & ~3UL;
++
++
++ ui32CommandSize = sizeof(PVRSRV_COMMAND)
++ + ((ui32DstSyncCount + ui32SrcSyncCount) * sizeof(PVRSRV_SYNC_OBJECT))
++ + ui32DataByteSize;
++
++
++ eError = PVRSRVGetQueueSpaceKM (psQueue, ui32CommandSize, (IMG_VOID**)&psCommand);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ psCommand->ui32ProcessID = OSGetCurrentProcessIDKM();
++
++
++ psCommand->ui32CmdSize = ui32CommandSize;
++ psCommand->ui32DevIndex = ui32DevIndex;
++ psCommand->CommandType = CommandType;
++ psCommand->ui32DstSyncCount = ui32DstSyncCount;
++ psCommand->ui32SrcSyncCount = ui32SrcSyncCount;
++
++
++ psCommand->psDstSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand) + sizeof(PVRSRV_COMMAND));
++
++
++ psCommand->psSrcSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand->psDstSync)
++ + (ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
++
++ psCommand->pvData = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand->psSrcSync)
++ + (ui32SrcSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
++ psCommand->ui32DataSize = ui32DataByteSize;
++
++
++ for (i=0; i<ui32DstSyncCount; i++)
++ {
++ psCommand->psDstSync[i].psKernelSyncInfoKM = apsDstSync[i];
++ psCommand->psDstSync[i].ui32WriteOpsPending = PVRSRVGetWriteOpsPending(apsDstSync[i], IMG_FALSE);
++ psCommand->psDstSync[i].ui32ReadOpsPending = PVRSRVGetReadOpsPending(apsDstSync[i], IMG_FALSE);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInsertCommandKM: Dst %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i, psCommand->psDstSync[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psCommand->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psCommand->psDstSync[i].ui32ReadOpsPending,
++ psCommand->psDstSync[i].ui32WriteOpsPending));
++ }
++
++
++ for (i=0; i<ui32SrcSyncCount; i++)
++ {
++ psCommand->psSrcSync[i].psKernelSyncInfoKM = apsSrcSync[i];
++ psCommand->psSrcSync[i].ui32WriteOpsPending = PVRSRVGetWriteOpsPending(apsSrcSync[i], IMG_TRUE);
++ psCommand->psSrcSync[i].ui32ReadOpsPending = PVRSRVGetReadOpsPending(apsSrcSync[i], IMG_TRUE);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInsertCommandKM: Src %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i, psCommand->psSrcSync[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psCommand->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psCommand->psSrcSync[i].ui32ReadOpsPending,
++ psCommand->psSrcSync[i].ui32WriteOpsPending));
++ }
++
++
++ *ppsCommand = psCommand;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO *psQueue,
++ PVRSRV_COMMAND *psCommand)
++{
++
++
++
++ if (psCommand->ui32DstSyncCount > 0)
++ {
++ psCommand->psDstSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM)
++ + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND));
++ }
++
++ if (psCommand->ui32SrcSyncCount > 0)
++ {
++ psCommand->psSrcSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM)
++ + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND)
++ + (psCommand->ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
++ }
++
++ psCommand->pvData = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM)
++ + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND)
++ + (psCommand->ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT))
++ + (psCommand->ui32SrcSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
++
++
++ UPDATE_QUEUE_WOFF(psQueue, psCommand->ui32CmdSize);
++
++ return PVRSRV_OK;
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVProcessCommand(SYS_DATA *psSysData,
++ PVRSRV_COMMAND *psCommand,
++ IMG_BOOL bFlush)
++{
++ PVRSRV_SYNC_OBJECT *psWalkerObj;
++ PVRSRV_SYNC_OBJECT *psEndObj;
++ IMG_UINT32 i;
++ COMMAND_COMPLETE_DATA *psCmdCompleteData;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ IMG_UINT32 ui32WriteOpsComplete;
++ IMG_UINT32 ui32ReadOpsComplete;
++
++
++ psWalkerObj = psCommand->psDstSync;
++ psEndObj = psWalkerObj + psCommand->ui32DstSyncCount;
++ while (psWalkerObj < psEndObj)
++ {
++ PVRSRV_SYNC_DATA *psSyncData = psWalkerObj->psKernelSyncInfoKM->psSyncData;
++
++ ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete;
++ ui32ReadOpsComplete = psSyncData->ui32ReadOpsComplete;
++
++ if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending)
++ || (ui32ReadOpsComplete != psWalkerObj->ui32ReadOpsPending))
++ {
++ if (!bFlush ||
++ !SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) ||
++ !SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOpsPending))
++ {
++ return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++ }
++ }
++
++ psWalkerObj++;
++ }
++
++
++ psWalkerObj = psCommand->psSrcSync;
++ psEndObj = psWalkerObj + psCommand->ui32SrcSyncCount;
++ while (psWalkerObj < psEndObj)
++ {
++ PVRSRV_SYNC_DATA *psSyncData = psWalkerObj->psKernelSyncInfoKM->psSyncData;
++
++ ui32ReadOpsComplete = psSyncData->ui32ReadOpsComplete;
++ ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete;
++
++ if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending)
++ || (ui32ReadOpsComplete != psWalkerObj->ui32ReadOpsPending))
++ {
++ if (!bFlush &&
++ SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) &&
++ SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOpsPending))
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "PVRSRVProcessCommand: Stale syncops psSyncData:0x%x ui32WriteOpsComplete:0x%x ui32WriteOpsPending:0x%x",
++ psSyncData, ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending));
++ }
++
++ if (!bFlush ||
++ !SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) ||
++ !SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOpsPending))
++ {
++ return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++ }
++ }
++ psWalkerObj++;
++ }
++
++
++ if (psCommand->ui32DevIndex >= SYS_DEVICE_COUNT)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVProcessCommand: invalid DeviceType 0x%x",
++ psCommand->ui32DevIndex));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ psCmdCompleteData = psSysData->ppsCmdCompleteData[psCommand->ui32DevIndex][psCommand->CommandType];
++ if (psCmdCompleteData->bInUse)
++ {
++
++ return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++ }
++
++
++ psCmdCompleteData->bInUse = IMG_TRUE;
++
++
++ psCmdCompleteData->ui32DstSyncCount = psCommand->ui32DstSyncCount;
++ for (i=0; i<psCommand->ui32DstSyncCount; i++)
++ {
++ psCmdCompleteData->psDstSync[i] = psCommand->psDstSync[i];
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVProcessCommand: Dst %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i, psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psDstSync[i].ui32ReadOpsPending,
++ psCmdCompleteData->psDstSync[i].ui32WriteOpsPending));
++ }
++
++
++ psCmdCompleteData->ui32SrcSyncCount = psCommand->ui32SrcSyncCount;
++ for (i=0; i<psCommand->ui32SrcSyncCount; i++)
++ {
++ psCmdCompleteData->psSrcSync[i] = psCommand->psSrcSync[i];
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVProcessCommand: Src %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i, psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psSrcSync[i].ui32ReadOpsPending,
++ psCmdCompleteData->psSrcSync[i].ui32WriteOpsPending));
++ }
++
++
++
++
++
++
++
++
++
++
++
++ if (psSysData->ppfnCmdProcList[psCommand->ui32DevIndex][psCommand->CommandType]((IMG_HANDLE)psCmdCompleteData,
++ psCommand->ui32DataSize,
++ psCommand->pvData) == IMG_FALSE)
++ {
++
++
++
++ psCmdCompleteData->bInUse = IMG_FALSE;
++ eError = PVRSRV_ERROR_CMD_NOT_PROCESSED;
++ }
++
++ return eError;
++}
++
++
++IMG_VOID PVRSRVProcessQueues_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ if (psDeviceNode->bReProcessDeviceCommandComplete &&
++ psDeviceNode->pfnDeviceCommandComplete != IMG_NULL)
++ {
++ (*psDeviceNode->pfnDeviceCommandComplete)(psDeviceNode);
++ }
++}
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVProcessQueues(IMG_UINT32 ui32CallerID,
++ IMG_BOOL bFlush)
++{
++ PVRSRV_QUEUE_INFO *psQueue;
++ SYS_DATA *psSysData;
++ PVRSRV_COMMAND *psCommand;
++ PVRSRV_ERROR eError;
++
++ SysAcquireData(&psSysData);
++
++
++ psSysData->bReProcessQueues = IMG_FALSE;
++
++
++ eError = OSLockResource(&psSysData->sQProcessResource,
++ ui32CallerID);
++ if(eError != PVRSRV_OK)
++ {
++
++ psSysData->bReProcessQueues = IMG_TRUE;
++
++
++ if(ui32CallerID == ISR_ID)
++ {
++ if (bFlush)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVProcessQueues: Couldn't acquire queue processing lock for FLUSH"));
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"PVRSRVProcessQueues: Couldn't acquire queue processing lock"));
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"PVRSRVProcessQueues: Queue processing lock-acquire failed when called from the Services driver."));
++ PVR_DPF((PVR_DBG_MESSAGE," This is due to MISR queue processing being interrupted by the Services driver."));
++ }
++
++ return PVRSRV_OK;
++ }
++
++ psQueue = psSysData->psQueueList;
++
++ if(!psQueue)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"No Queues installed - cannot process commands"));
++ }
++
++ if (bFlush)
++ {
++ PVRSRVSetDCState(DC_STATE_FLUSH_COMMANDS);
++ }
++
++ while (psQueue)
++ {
++ while (psQueue->ui32ReadOffset != psQueue->ui32WriteOffset)
++ {
++ psCommand = (PVRSRV_COMMAND*)((IMG_UINTPTR_T)psQueue->pvLinQueueKM + psQueue->ui32ReadOffset);
++
++ if (PVRSRVProcessCommand(psSysData, psCommand, bFlush) == PVRSRV_OK)
++ {
++
++ UPDATE_QUEUE_ROFF(psQueue, psCommand->ui32CmdSize)
++
++ if (bFlush)
++ {
++ continue;
++ }
++ }
++
++ break;
++ }
++ psQueue = psQueue->psNextKM;
++ }
++
++ if (bFlush)
++ {
++ PVRSRVSetDCState(DC_STATE_NO_FLUSH_COMMANDS);
++ }
++
++
++ List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList,
++ PVRSRVProcessQueues_ForEachCb);
++
++
++
++ OSUnlockResource(&psSysData->sQProcessResource, ui32CallerID);
++
++
++ if(psSysData->bReProcessQueues)
++ {
++ return PVRSRV_ERROR_PROCESSING_BLOCKED;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++IMG_VOID PVRSRVCommandCompleteKM(IMG_HANDLE hCmdCookie,
++ IMG_BOOL bScheduleMISR)
++{
++ IMG_UINT32 i;
++ COMMAND_COMPLETE_DATA *psCmdCompleteData = (COMMAND_COMPLETE_DATA *)hCmdCookie;
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++
++ for (i=0; i<psCmdCompleteData->ui32DstSyncCount; i++)
++ {
++ psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsComplete++;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVCommandCompleteKM: Dst %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i, psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psDstSync[i].ui32ReadOpsPending,
++ psCmdCompleteData->psDstSync[i].ui32WriteOpsPending));
++ }
++
++
++ for (i=0; i<psCmdCompleteData->ui32SrcSyncCount; i++)
++ {
++ psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->psSyncData->ui32ReadOpsComplete++;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVCommandCompleteKM: Src %lu RO-VA:0x%lx WO-VA:0x%lx ROP:0x%lx WOP:0x%lx",
++ i, psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sReadOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
++ psCmdCompleteData->psSrcSync[i].ui32ReadOpsPending,
++ psCmdCompleteData->psSrcSync[i].ui32WriteOpsPending));
++ }
++
++
++ psCmdCompleteData->bInUse = IMG_FALSE;
++
++
++ PVRSRVCommandCompleteCallbacks();
++
++#if defined(SYS_USING_INTERRUPTS)
++ if(bScheduleMISR)
++ {
++ OSScheduleMISR(psSysData);
++ }
++#else
++ PVR_UNREFERENCED_PARAMETER(bScheduleMISR);
++#endif
++}
++
++
++IMG_VOID PVRSRVCommandCompleteCallbacks_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ if(psDeviceNode->pfnDeviceCommandComplete != IMG_NULL)
++ {
++
++ (*psDeviceNode->pfnDeviceCommandComplete)(psDeviceNode);
++ }
++}
++
++IMG_VOID PVRSRVCommandCompleteCallbacks(IMG_VOID)
++{
++ SYS_DATA *psSysData;
++ SysAcquireData(&psSysData);
++
++
++ List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList,
++ PVRSRVCommandCompleteCallbacks_ForEachCb);
++}
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(IMG_UINT32 ui32DevIndex,
++ PFN_CMD_PROC *ppfnCmdProcList,
++ IMG_UINT32 ui32MaxSyncsPerCmd[][2],
++ IMG_UINT32 ui32CmdCount)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_ERROR eError;
++ IMG_UINT32 i;
++ IMG_SIZE_T ui32AllocSize;
++ PFN_CMD_PROC *ppfnCmdProc;
++ COMMAND_COMPLETE_DATA *psCmdCompleteData;
++
++
++ if(ui32DevIndex >= SYS_DEVICE_COUNT)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRegisterCmdProcListKM: invalid DeviceType 0x%x",
++ ui32DevIndex));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ SysAcquireData(&psSysData);
++
++
++ eError = OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ ui32CmdCount * sizeof(PFN_CMD_PROC),
++ (IMG_VOID **)&psSysData->ppfnCmdProcList[ui32DevIndex], IMG_NULL,
++ "Internal Queue Info structure");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc queue"));
++ return eError;
++ }
++
++
++ ppfnCmdProc = psSysData->ppfnCmdProcList[ui32DevIndex];
++
++
++ for (i=0; i<ui32CmdCount; i++)
++ {
++ ppfnCmdProc[i] = ppfnCmdProcList[i];
++ }
++
++
++ ui32AllocSize = ui32CmdCount * sizeof(COMMAND_COMPLETE_DATA*);
++ eError = OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32AllocSize,
++ (IMG_VOID **)&psSysData->ppsCmdCompleteData[ui32DevIndex], IMG_NULL,
++ "Array of Pointers for Command Store");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc CC data"));
++ goto ErrorExit;
++ }
++
++ for (i=0; i<ui32CmdCount; i++)
++ {
++
++
++ ui32AllocSize = sizeof(COMMAND_COMPLETE_DATA)
++ + ((ui32MaxSyncsPerCmd[i][0]
++ + ui32MaxSyncsPerCmd[i][1])
++ * sizeof(PVRSRV_SYNC_OBJECT));
++
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32AllocSize,
++ (IMG_VOID **)&psSysData->ppsCmdCompleteData[ui32DevIndex][i],
++ IMG_NULL,
++ "Command Complete Data");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc cmd %d",i));
++ goto ErrorExit;
++ }
++
++
++ OSMemSet(psSysData->ppsCmdCompleteData[ui32DevIndex][i], 0x00, ui32AllocSize);
++
++ psCmdCompleteData = psSysData->ppsCmdCompleteData[ui32DevIndex][i];
++
++
++ psCmdCompleteData->psDstSync = (PVRSRV_SYNC_OBJECT*)
++ (((IMG_UINTPTR_T)psCmdCompleteData)
++ + sizeof(COMMAND_COMPLETE_DATA));
++ psCmdCompleteData->psSrcSync = (PVRSRV_SYNC_OBJECT*)
++ (((IMG_UINTPTR_T)psCmdCompleteData->psDstSync)
++ + (sizeof(PVRSRV_SYNC_OBJECT) * ui32MaxSyncsPerCmd[i][0]));
++
++ psCmdCompleteData->ui32AllocSize = ui32AllocSize;
++ }
++
++ return PVRSRV_OK;
++
++ErrorExit:
++
++
++
++ if(psSysData->ppsCmdCompleteData[ui32DevIndex] != IMG_NULL)
++ {
++ for (i=0; i<ui32CmdCount; i++)
++ {
++ if (psSysData->ppsCmdCompleteData[ui32DevIndex][i] != IMG_NULL)
++ {
++ ui32AllocSize = sizeof(COMMAND_COMPLETE_DATA)
++ + ((ui32MaxSyncsPerCmd[i][0]
++ + ui32MaxSyncsPerCmd[i][1])
++ * sizeof(PVRSRV_SYNC_OBJECT));
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, ui32AllocSize, psSysData->ppsCmdCompleteData[ui32DevIndex][i], IMG_NULL);
++ psSysData->ppsCmdCompleteData[ui32DevIndex][i] = IMG_NULL;
++ }
++ }
++ ui32AllocSize = ui32CmdCount * sizeof(COMMAND_COMPLETE_DATA*);
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, ui32AllocSize, psSysData->ppsCmdCompleteData[ui32DevIndex], IMG_NULL);
++ psSysData->ppsCmdCompleteData[ui32DevIndex] = IMG_NULL;
++ }
++
++ if(psSysData->ppfnCmdProcList[ui32DevIndex] != IMG_NULL)
++ {
++ ui32AllocSize = ui32CmdCount * sizeof(PFN_CMD_PROC);
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, ui32AllocSize, psSysData->ppfnCmdProcList[ui32DevIndex], IMG_NULL);
++ psSysData->ppfnCmdProcList[ui32DevIndex] = IMG_NULL;
++ }
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(IMG_UINT32 ui32DevIndex,
++ IMG_UINT32 ui32CmdCount)
++{
++ SYS_DATA *psSysData;
++ IMG_UINT32 i;
++
++
++ if(ui32DevIndex >= SYS_DEVICE_COUNT)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "PVRSRVRemoveCmdProcListKM: invalid DeviceType 0x%x",
++ ui32DevIndex));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ SysAcquireData(&psSysData);
++
++ if(psSysData->ppsCmdCompleteData[ui32DevIndex] == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveCmdProcListKM: Invalid command array"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ else
++ {
++ for(i=0; i<ui32CmdCount; i++)
++ {
++
++ if(psSysData->ppsCmdCompleteData[ui32DevIndex][i] != IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ psSysData->ppsCmdCompleteData[ui32DevIndex][i]->ui32AllocSize,
++ psSysData->ppsCmdCompleteData[ui32DevIndex][i],
++ IMG_NULL);
++ psSysData->ppsCmdCompleteData[ui32DevIndex][i] = IMG_NULL;
++ }
++ }
++
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32CmdCount * sizeof(COMMAND_COMPLETE_DATA*),
++ psSysData->ppsCmdCompleteData[ui32DevIndex],
++ IMG_NULL);
++ psSysData->ppsCmdCompleteData[ui32DevIndex] = IMG_NULL;
++ }
++
++
++ if(psSysData->ppfnCmdProcList[ui32DevIndex] != IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ ui32CmdCount * sizeof(PFN_CMD_PROC),
++ psSysData->ppfnCmdProcList[ui32DevIndex],
++ IMG_NULL);
++ psSysData->ppfnCmdProcList[ui32DevIndex] = IMG_NULL;
++ }
++
++ return PVRSRV_OK;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/ra.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/ra.c
+new file mode 100644
+index 0000000..d4eab59
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/ra.c
+@@ -0,0 +1,1871 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "hash.h"
++#include "ra.h"
++#include "buffer_manager.h"
++#include "osfunc.h"
++
++#ifdef __linux__
++#include <linux/kernel.h>
++#include "proc.h"
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++#include <stdio.h>
++#endif
++
++#define MINIMUM_HASH_SIZE (64)
++
++#if defined(VALIDATE_ARENA_TEST)
++
++typedef enum RESOURCE_DESCRIPTOR_TAG {
++
++ RESOURCE_SPAN_LIVE = 10,
++ RESOURCE_SPAN_FREE,
++ IMPORTED_RESOURCE_SPAN_START,
++ IMPORTED_RESOURCE_SPAN_LIVE,
++ IMPORTED_RESOURCE_SPAN_FREE,
++ IMPORTED_RESOURCE_SPAN_END,
++
++} RESOURCE_DESCRIPTOR;
++
++typedef enum RESOURCE_TYPE_TAG {
++
++ IMPORTED_RESOURCE_TYPE = 20,
++ NON_IMPORTED_RESOURCE_TYPE
++
++} RESOURCE_TYPE;
++
++
++static IMG_UINT32 ui32BoundaryTagID = 0;
++
++IMG_UINT32 ValidateArena(RA_ARENA *pArena);
++#endif
++
++struct _BT_
++{
++ enum bt_type
++ {
++ btt_span,
++ btt_free,
++ btt_live
++ } type;
++
++
++ IMG_UINTPTR_T base;
++ IMG_SIZE_T uSize;
++
++
++ struct _BT_ *pNextSegment;
++ struct _BT_ *pPrevSegment;
++
++ struct _BT_ *pNextFree;
++ struct _BT_ *pPrevFree;
++
++ BM_MAPPING *psMapping;
++
++#if defined(VALIDATE_ARENA_TEST)
++ RESOURCE_DESCRIPTOR eResourceSpan;
++ RESOURCE_TYPE eResourceType;
++
++
++ IMG_UINT32 ui32BoundaryTagID;
++#endif
++
++};
++typedef struct _BT_ BT;
++
++
++struct _RA_ARENA_
++{
++
++ IMG_CHAR *name;
++
++
++ IMG_SIZE_T uQuantum;
++
++
++ IMG_BOOL (*pImportAlloc)(IMG_VOID *,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize,
++ BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags,
++ IMG_UINTPTR_T *pBase);
++ IMG_VOID (*pImportFree) (IMG_VOID *,
++ IMG_UINTPTR_T,
++ BM_MAPPING *psMapping);
++ IMG_VOID (*pBackingStoreFree) (IMG_VOID *, IMG_SIZE_T, IMG_SIZE_T, IMG_HANDLE);
++
++
++ IMG_VOID *pImportHandle;
++
++
++#define FREE_TABLE_LIMIT 32
++
++
++ BT *aHeadFree [FREE_TABLE_LIMIT];
++
++
++ BT *pHeadSegment;
++ BT *pTailSegment;
++
++
++ HASH_TABLE *pSegmentHash;
++
++#ifdef RA_STATS
++ RA_STATISTICS sStatistics;
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++#define PROC_NAME_SIZE 32
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ struct proc_dir_entry* pProcInfo;
++ struct proc_dir_entry* pProcSegs;
++#else
++ IMG_CHAR szProcInfoName[PROC_NAME_SIZE];
++ IMG_CHAR szProcSegsName[PROC_NAME_SIZE];
++#endif
++
++ IMG_BOOL bInitProcEntry;
++#endif
++};
++#if defined(ENABLE_RA_DUMP)
++IMG_VOID RA_Dump (RA_ARENA *pArena);
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void RA_ProcSeqShowInfo(struct seq_file *sfile, void* el);
++static void* RA_ProcSeqOff2ElementInfo(struct seq_file * sfile, loff_t off);
++
++static void RA_ProcSeqShowRegs(struct seq_file *sfile, void* el);
++static void* RA_ProcSeqOff2ElementRegs(struct seq_file * sfile, loff_t off);
++
++#else
++static IMG_INT
++RA_DumpSegs(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data);
++static IMG_INT
++RA_DumpInfo(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data);
++#endif
++
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++IMG_VOID CheckBMFreespace(IMG_VOID);
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++static IMG_CHAR *ReplaceSpaces(IMG_CHAR * const pS)
++{
++ IMG_CHAR *pT;
++
++ for(pT = pS; *pT != 0; pT++)
++ {
++ if (*pT == ' ' || *pT == '\t')
++ {
++ *pT = '_';
++ }
++ }
++
++ return pS;
++}
++#endif
++
++static IMG_BOOL
++_RequestAllocFail (IMG_VOID *_h,
++ IMG_SIZE_T _uSize,
++ IMG_SIZE_T *_pActualSize,
++ BM_MAPPING **_ppsMapping,
++ IMG_UINT32 _uFlags,
++ IMG_UINTPTR_T *_pBase)
++{
++ PVR_UNREFERENCED_PARAMETER (_h);
++ PVR_UNREFERENCED_PARAMETER (_uSize);
++ PVR_UNREFERENCED_PARAMETER (_pActualSize);
++ PVR_UNREFERENCED_PARAMETER (_ppsMapping);
++ PVR_UNREFERENCED_PARAMETER (_uFlags);
++ PVR_UNREFERENCED_PARAMETER (_pBase);
++
++ return IMG_FALSE;
++}
++
++static IMG_UINT32
++pvr_log2 (IMG_SIZE_T n)
++{
++ IMG_UINT32 l = 0;
++ n>>=1;
++ while (n>0)
++ {
++ n>>=1;
++ l++;
++ }
++ return l;
++}
++
++static PVRSRV_ERROR
++_SegmentListInsertAfter (RA_ARENA *pArena,
++ BT *pInsertionPoint,
++ BT *pBT)
++{
++ PVR_ASSERT (pArena != IMG_NULL);
++ PVR_ASSERT (pInsertionPoint != IMG_NULL);
++
++ if ((pInsertionPoint == IMG_NULL) || (pArena == IMG_NULL))
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_SegmentListInsertAfter: invalid parameters"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ pBT->pNextSegment = pInsertionPoint->pNextSegment;
++ pBT->pPrevSegment = pInsertionPoint;
++ if (pInsertionPoint->pNextSegment == IMG_NULL)
++ pArena->pTailSegment = pBT;
++ else
++ pInsertionPoint->pNextSegment->pPrevSegment = pBT;
++ pInsertionPoint->pNextSegment = pBT;
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR
++_SegmentListInsert (RA_ARENA *pArena, BT *pBT)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++
++ if (pArena->pHeadSegment == IMG_NULL)
++ {
++ pArena->pHeadSegment = pArena->pTailSegment = pBT;
++ pBT->pNextSegment = pBT->pPrevSegment = IMG_NULL;
++ }
++ else
++ {
++ BT *pBTScan;
++
++ if (pBT->base < pArena->pHeadSegment->base)
++ {
++
++ pBT->pNextSegment = pArena->pHeadSegment;
++ pArena->pHeadSegment->pPrevSegment = pBT;
++ pArena->pHeadSegment = pBT;
++ pBT->pPrevSegment = IMG_NULL;
++ }
++ else
++ {
++
++
++
++
++ pBTScan = pArena->pHeadSegment;
++
++ while ((pBTScan->pNextSegment != IMG_NULL) && (pBT->base >= pBTScan->pNextSegment->base))
++ {
++ pBTScan = pBTScan->pNextSegment;
++ }
++
++ eError = _SegmentListInsertAfter (pArena, pBTScan, pBT);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++ }
++ return eError;
++}
++
++static IMG_VOID
++_SegmentListRemove (RA_ARENA *pArena, BT *pBT)
++{
++ if (pBT->pPrevSegment == IMG_NULL)
++ pArena->pHeadSegment = pBT->pNextSegment;
++ else
++ pBT->pPrevSegment->pNextSegment = pBT->pNextSegment;
++
++ if (pBT->pNextSegment == IMG_NULL)
++ pArena->pTailSegment = pBT->pPrevSegment;
++ else
++ pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment;
++}
++
++static BT *
++_SegmentSplit (RA_ARENA *pArena, BT *pBT, IMG_SIZE_T uSize)
++{
++ BT *pNeighbour;
++
++ PVR_ASSERT (pArena != IMG_NULL);
++
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_SegmentSplit: invalid parameter - pArena"));
++ return IMG_NULL;
++ }
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(BT),
++ (IMG_VOID **)&pNeighbour, IMG_NULL,
++ "Boundary Tag") != PVRSRV_OK)
++ {
++ return IMG_NULL;
++ }
++
++ OSMemSet(pNeighbour, 0, sizeof(BT));
++
++#if defined(VALIDATE_ARENA_TEST)
++ pNeighbour->ui32BoundaryTagID = ++ui32BoundaryTagID;
++#endif
++
++ pNeighbour->pPrevSegment = pBT;
++ pNeighbour->pNextSegment = pBT->pNextSegment;
++ if (pBT->pNextSegment == IMG_NULL)
++ pArena->pTailSegment = pNeighbour;
++ else
++ pBT->pNextSegment->pPrevSegment = pNeighbour;
++ pBT->pNextSegment = pNeighbour;
++
++ pNeighbour->type = btt_free;
++ pNeighbour->uSize = pBT->uSize - uSize;
++ pNeighbour->base = pBT->base + uSize;
++ pNeighbour->psMapping = pBT->psMapping;
++ pBT->uSize = uSize;
++
++#if defined(VALIDATE_ARENA_TEST)
++ if (pNeighbour->pPrevSegment->eResourceType == IMPORTED_RESOURCE_TYPE)
++ {
++ pNeighbour->eResourceType = IMPORTED_RESOURCE_TYPE;
++ pNeighbour->eResourceSpan = IMPORTED_RESOURCE_SPAN_FREE;
++ }
++ else if (pNeighbour->pPrevSegment->eResourceType == NON_IMPORTED_RESOURCE_TYPE)
++ {
++ pNeighbour->eResourceType = NON_IMPORTED_RESOURCE_TYPE;
++ pNeighbour->eResourceSpan = RESOURCE_SPAN_FREE;
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_SegmentSplit: pNeighbour->pPrevSegment->eResourceType unrecognized"));
++ PVR_DBG_BREAK;
++ }
++#endif
++
++ return pNeighbour;
++}
++
++static IMG_VOID
++_FreeListInsert (RA_ARENA *pArena, BT *pBT)
++{
++ IMG_UINT32 uIndex;
++ uIndex = pvr_log2 (pBT->uSize);
++ pBT->type = btt_free;
++ pBT->pNextFree = pArena->aHeadFree [uIndex];
++ pBT->pPrevFree = IMG_NULL;
++ if (pArena->aHeadFree[uIndex] != IMG_NULL)
++ pArena->aHeadFree[uIndex]->pPrevFree = pBT;
++ pArena->aHeadFree [uIndex] = pBT;
++}
++
++static IMG_VOID
++_FreeListRemove (RA_ARENA *pArena, BT *pBT)
++{
++ IMG_UINT32 uIndex;
++ uIndex = pvr_log2 (pBT->uSize);
++ if (pBT->pNextFree != IMG_NULL)
++ pBT->pNextFree->pPrevFree = pBT->pPrevFree;
++ if (pBT->pPrevFree == IMG_NULL)
++ pArena->aHeadFree[uIndex] = pBT->pNextFree;
++ else
++ pBT->pPrevFree->pNextFree = pBT->pNextFree;
++}
++
++static BT *
++_BuildSpanMarker (IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++ BT *pBT;
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(BT),
++ (IMG_VOID **)&pBT, IMG_NULL,
++ "Boundary Tag") != PVRSRV_OK)
++ {
++ return IMG_NULL;
++ }
++
++ OSMemSet(pBT, 0, sizeof(BT));
++
++#if defined(VALIDATE_ARENA_TEST)
++ pBT->ui32BoundaryTagID = ++ui32BoundaryTagID;
++#endif
++
++ pBT->type = btt_span;
++ pBT->base = base;
++ pBT->uSize = uSize;
++ pBT->psMapping = IMG_NULL;
++
++ return pBT;
++}
++
++static BT *
++_BuildBT (IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++ BT *pBT;
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(BT),
++ (IMG_VOID **)&pBT, IMG_NULL,
++ "Boundary Tag") != PVRSRV_OK)
++ {
++ return IMG_NULL;
++ }
++
++ OSMemSet(pBT, 0, sizeof(BT));
++
++#if defined(VALIDATE_ARENA_TEST)
++ pBT->ui32BoundaryTagID = ++ui32BoundaryTagID;
++#endif
++
++ pBT->type = btt_free;
++ pBT->base = base;
++ pBT->uSize = uSize;
++
++ return pBT;
++}
++
++static BT *
++_InsertResource (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++ BT *pBT;
++ PVR_ASSERT (pArena!=IMG_NULL);
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_InsertResource: invalid parameter - pArena"));
++ return IMG_NULL;
++ }
++
++ pBT = _BuildBT (base, uSize);
++ if (pBT != IMG_NULL)
++ {
++
++#if defined(VALIDATE_ARENA_TEST)
++ pBT->eResourceSpan = RESOURCE_SPAN_FREE;
++ pBT->eResourceType = NON_IMPORTED_RESOURCE_TYPE;
++#endif
++
++ if (_SegmentListInsert (pArena, pBT) != PVRSRV_OK)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_InsertResource: call to _SegmentListInsert failed"));
++ return IMG_NULL;
++ }
++ _FreeListInsert (pArena, pBT);
++#ifdef RA_STATS
++ pArena->sStatistics.uTotalResourceCount+=uSize;
++ pArena->sStatistics.uFreeResourceCount+=uSize;
++ pArena->sStatistics.uSpanCount++;
++#endif
++ }
++ return pBT;
++}
++
++static BT *
++_InsertResourceSpan (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++ PVRSRV_ERROR eError;
++ BT *pSpanStart;
++ BT *pSpanEnd;
++ BT *pBT;
++
++ PVR_ASSERT (pArena != IMG_NULL);
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_InsertResourceSpan: invalid parameter - pArena"));
++ return IMG_NULL;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_InsertResourceSpan: arena='%s', base=0x%x, size=0x%x",
++ pArena->name, base, uSize));
++
++ pSpanStart = _BuildSpanMarker (base, uSize);
++ if (pSpanStart == IMG_NULL)
++ {
++ goto fail_start;
++ }
++
++#if defined(VALIDATE_ARENA_TEST)
++ pSpanStart->eResourceSpan = IMPORTED_RESOURCE_SPAN_START;
++ pSpanStart->eResourceType = IMPORTED_RESOURCE_TYPE;
++#endif
++
++ pSpanEnd = _BuildSpanMarker (base + uSize, 0);
++ if (pSpanEnd == IMG_NULL)
++ {
++ goto fail_end;
++ }
++
++#if defined(VALIDATE_ARENA_TEST)
++ pSpanEnd->eResourceSpan = IMPORTED_RESOURCE_SPAN_END;
++ pSpanEnd->eResourceType = IMPORTED_RESOURCE_TYPE;
++#endif
++
++ pBT = _BuildBT (base, uSize);
++ if (pBT == IMG_NULL)
++ {
++ goto fail_bt;
++ }
++
++#if defined(VALIDATE_ARENA_TEST)
++ pBT->eResourceSpan = IMPORTED_RESOURCE_SPAN_FREE;
++ pBT->eResourceType = IMPORTED_RESOURCE_TYPE;
++#endif
++
++ eError = _SegmentListInsert (pArena, pSpanStart);
++ if (eError != PVRSRV_OK)
++ {
++ goto fail_SegListInsert;
++ }
++
++ eError = _SegmentListInsertAfter (pArena, pSpanStart, pBT);
++ if (eError != PVRSRV_OK)
++ {
++ goto fail_SegListInsert;
++ }
++
++ _FreeListInsert (pArena, pBT);
++
++ eError = _SegmentListInsertAfter (pArena, pBT, pSpanEnd);
++ if (eError != PVRSRV_OK)
++ {
++ goto fail_SegListInsert;
++ }
++
++#ifdef RA_STATS
++ pArena->sStatistics.uTotalResourceCount+=uSize;
++#endif
++ return pBT;
++
++ fail_SegListInsert:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL);
++
++ fail_bt:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pSpanEnd, IMG_NULL);
++
++ fail_end:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pSpanStart, IMG_NULL);
++
++ fail_start:
++ return IMG_NULL;
++}
++
++static IMG_VOID
++_FreeBT (RA_ARENA *pArena, BT *pBT, IMG_BOOL bFreeBackingStore)
++{
++ BT *pNeighbour;
++ IMG_UINTPTR_T uOrigBase;
++ IMG_SIZE_T uOrigSize;
++
++ PVR_ASSERT (pArena!=IMG_NULL);
++ PVR_ASSERT (pBT!=IMG_NULL);
++
++ if ((pArena == IMG_NULL) || (pBT == IMG_NULL))
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_FreeBT: invalid parameter"));
++ return;
++ }
++
++#ifdef RA_STATS
++ pArena->sStatistics.uLiveSegmentCount--;
++ pArena->sStatistics.uFreeSegmentCount++;
++ pArena->sStatistics.uFreeResourceCount+=pBT->uSize;
++#endif
++
++ uOrigBase = pBT->base;
++ uOrigSize = pBT->uSize;
++
++
++ pNeighbour = pBT->pPrevSegment;
++ if (pNeighbour!=IMG_NULL
++ && pNeighbour->type == btt_free
++ && pNeighbour->base + pNeighbour->uSize == pBT->base)
++ {
++ _FreeListRemove (pArena, pNeighbour);
++ _SegmentListRemove (pArena, pNeighbour);
++ pBT->base = pNeighbour->base;
++ pBT->uSize += pNeighbour->uSize;
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pNeighbour, IMG_NULL);
++
++#ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount--;
++#endif
++ }
++
++
++ pNeighbour = pBT->pNextSegment;
++ if (pNeighbour!=IMG_NULL
++ && pNeighbour->type == btt_free
++ && pBT->base + pBT->uSize == pNeighbour->base)
++ {
++ _FreeListRemove (pArena, pNeighbour);
++ _SegmentListRemove (pArena, pNeighbour);
++ pBT->uSize += pNeighbour->uSize;
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pNeighbour, IMG_NULL);
++
++#ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount--;
++#endif
++ }
++
++
++ if (pArena->pBackingStoreFree != IMG_NULL && bFreeBackingStore)
++ {
++ IMG_UINTPTR_T uRoundedStart, uRoundedEnd;
++
++
++ uRoundedStart = (uOrigBase / pArena->uQuantum) * pArena->uQuantum;
++
++ if (uRoundedStart < pBT->base)
++ {
++ uRoundedStart += pArena->uQuantum;
++ }
++
++
++ uRoundedEnd = ((uOrigBase + uOrigSize + pArena->uQuantum - 1) / pArena->uQuantum) * pArena->uQuantum;
++
++ if (uRoundedEnd > (pBT->base + pBT->uSize))
++ {
++ uRoundedEnd -= pArena->uQuantum;
++ }
++
++ if (uRoundedStart < uRoundedEnd)
++ {
++ pArena->pBackingStoreFree(pArena->pImportHandle, uRoundedStart, uRoundedEnd, (IMG_HANDLE)0);
++ }
++ }
++
++ if (pBT->pNextSegment!=IMG_NULL && pBT->pNextSegment->type == btt_span
++ && pBT->pPrevSegment!=IMG_NULL && pBT->pPrevSegment->type == btt_span)
++ {
++ BT *next = pBT->pNextSegment;
++ BT *prev = pBT->pPrevSegment;
++ _SegmentListRemove (pArena, next);
++ _SegmentListRemove (pArena, prev);
++ _SegmentListRemove (pArena, pBT);
++ pArena->pImportFree (pArena->pImportHandle, pBT->base, pBT->psMapping);
++#ifdef RA_STATS
++ pArena->sStatistics.uSpanCount--;
++ pArena->sStatistics.uExportCount++;
++ pArena->sStatistics.uFreeSegmentCount--;
++ pArena->sStatistics.uFreeResourceCount-=pBT->uSize;
++ pArena->sStatistics.uTotalResourceCount-=pBT->uSize;
++#endif
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), next, IMG_NULL);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), prev, IMG_NULL);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL);
++
++ }
++ else
++ _FreeListInsert (pArena, pBT);
++}
++
++
++static IMG_BOOL
++_AttemptAllocAligned (RA_ARENA *pArena,
++ IMG_SIZE_T uSize,
++ BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uAlignment,
++ IMG_UINT32 uAlignmentOffset,
++ IMG_UINTPTR_T *base)
++{
++ IMG_UINT32 uIndex;
++ PVR_ASSERT (pArena!=IMG_NULL);
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: invalid parameter - pArena"));
++ return IMG_FALSE;
++ }
++
++ if (uAlignment>1)
++ uAlignmentOffset %= uAlignment;
++
++
++
++ uIndex = pvr_log2 (uSize);
++
++#if 0
++
++ if (1u<<uIndex < uSize)
++ uIndex++;
++#endif
++
++ while (uIndex < FREE_TABLE_LIMIT && pArena->aHeadFree[uIndex]==IMG_NULL)
++ uIndex++;
++
++ while (uIndex < FREE_TABLE_LIMIT)
++ {
++ if (pArena->aHeadFree[uIndex]!=IMG_NULL)
++ {
++
++ BT *pBT;
++
++ pBT = pArena->aHeadFree [uIndex];
++ while (pBT!=IMG_NULL)
++ {
++ IMG_UINTPTR_T aligned_base;
++
++ if (uAlignment>1)
++ aligned_base = (pBT->base + uAlignmentOffset + uAlignment - 1) / uAlignment * uAlignment - uAlignmentOffset;
++ else
++ aligned_base = pBT->base;
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_AttemptAllocAligned: pBT-base=0x%x "
++ "pBT-size=0x%x alignedbase=0x%x size=0x%x",
++ pBT->base, pBT->uSize, aligned_base, uSize));
++
++ if (pBT->base + pBT->uSize >= aligned_base + uSize)
++ {
++ if(!pBT->psMapping || pBT->psMapping->ui32Flags == uFlags)
++ {
++ _FreeListRemove (pArena, pBT);
++
++ PVR_ASSERT (pBT->type == btt_free);
++
++#ifdef RA_STATS
++ pArena->sStatistics.uLiveSegmentCount++;
++ pArena->sStatistics.uFreeSegmentCount--;
++ pArena->sStatistics.uFreeResourceCount-=pBT->uSize;
++#endif
++
++
++ if (aligned_base > pBT->base)
++ {
++ BT *pNeighbour;
++
++ pNeighbour = _SegmentSplit (pArena, pBT, aligned_base-pBT->base);
++
++ if (pNeighbour==IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: Front split failed"));
++
++ _FreeListInsert (pArena, pBT);
++ return IMG_FALSE;
++ }
++
++ _FreeListInsert (pArena, pBT);
++ #ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount++;
++ pArena->sStatistics.uFreeResourceCount+=pBT->uSize;
++ #endif
++ pBT = pNeighbour;
++ }
++
++
++ if (pBT->uSize > uSize)
++ {
++ BT *pNeighbour;
++ pNeighbour = _SegmentSplit (pArena, pBT, uSize);
++
++ if (pNeighbour==IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: Back split failed"));
++
++ _FreeListInsert (pArena, pBT);
++ return IMG_FALSE;
++ }
++
++ _FreeListInsert (pArena, pNeighbour);
++ #ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount++;
++ pArena->sStatistics.uFreeResourceCount+=pNeighbour->uSize;
++ #endif
++ }
++
++ pBT->type = btt_live;
++
++#if defined(VALIDATE_ARENA_TEST)
++ if (pBT->eResourceType == IMPORTED_RESOURCE_TYPE)
++ {
++ pBT->eResourceSpan = IMPORTED_RESOURCE_SPAN_LIVE;
++ }
++ else if (pBT->eResourceType == NON_IMPORTED_RESOURCE_TYPE)
++ {
++ pBT->eResourceSpan = RESOURCE_SPAN_LIVE;
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned ERROR: pBT->eResourceType unrecognized"));
++ PVR_DBG_BREAK;
++ }
++#endif
++ if (!HASH_Insert (pArena->pSegmentHash, pBT->base, (IMG_UINTPTR_T) pBT))
++ {
++ _FreeBT (pArena, pBT, IMG_FALSE);
++ return IMG_FALSE;
++ }
++
++ if (ppsMapping!=IMG_NULL)
++ *ppsMapping = pBT->psMapping;
++
++ *base = pBT->base;
++
++ return IMG_TRUE;
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "AttemptAllocAligned: mismatch in flags. Import has %x, request was %x", pBT->psMapping->ui32Flags, uFlags));
++
++ }
++ }
++ pBT = pBT->pNextFree;
++ }
++
++ }
++ uIndex++;
++ }
++
++ return IMG_FALSE;
++}
++
++
++
++RA_ARENA *
++RA_Create (IMG_CHAR *name,
++ IMG_UINTPTR_T base,
++ IMG_SIZE_T uSize,
++ BM_MAPPING *psMapping,
++ IMG_SIZE_T uQuantum,
++ IMG_BOOL (*imp_alloc)(IMG_VOID *, IMG_SIZE_T uSize, IMG_SIZE_T *pActualSize,
++ BM_MAPPING **ppsMapping, IMG_UINT32 _flags, IMG_UINTPTR_T *pBase),
++ IMG_VOID (*imp_free) (IMG_VOID *, IMG_UINTPTR_T, BM_MAPPING *),
++ IMG_VOID (*backingstore_free) (IMG_VOID*, IMG_SIZE_T, IMG_SIZE_T, IMG_HANDLE),
++ IMG_VOID *pImportHandle)
++{
++ RA_ARENA *pArena;
++ BT *pBT;
++ IMG_INT i;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Create: name='%s', base=0x%x, uSize=0x%x, alloc=0x%x, free=0x%x",
++ name, base, uSize, imp_alloc, imp_free));
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (*pArena),
++ (IMG_VOID **)&pArena, IMG_NULL,
++ "Resource Arena") != PVRSRV_OK)
++ {
++ goto arena_fail;
++ }
++
++ pArena->name = name;
++ pArena->pImportAlloc = (imp_alloc!=IMG_NULL) ? imp_alloc : _RequestAllocFail;
++ pArena->pImportFree = imp_free;
++ pArena->pBackingStoreFree = backingstore_free;
++ pArena->pImportHandle = pImportHandle;
++ for (i=0; i<FREE_TABLE_LIMIT; i++)
++ pArena->aHeadFree[i] = IMG_NULL;
++ pArena->pHeadSegment = IMG_NULL;
++ pArena->pTailSegment = IMG_NULL;
++ pArena->uQuantum = uQuantum;
++
++#ifdef RA_STATS
++ pArena->sStatistics.uSpanCount = 0;
++ pArena->sStatistics.uLiveSegmentCount = 0;
++ pArena->sStatistics.uFreeSegmentCount = 0;
++ pArena->sStatistics.uFreeResourceCount = 0;
++ pArena->sStatistics.uTotalResourceCount = 0;
++ pArena->sStatistics.uCumulativeAllocs = 0;
++ pArena->sStatistics.uCumulativeFrees = 0;
++ pArena->sStatistics.uImportCount = 0;
++ pArena->sStatistics.uExportCount = 0;
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++ if(strcmp(pArena->name,"") != 0)
++ {
++
++#ifndef PVR_PROC_USE_SEQ_FILE
++ IMG_INT ret;
++ IMG_INT (*pfnCreateProcEntry)(const IMG_CHAR *, read_proc_t, write_proc_t, IMG_VOID *);
++
++ pArena->bInitProcEntry = !PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL);
++
++
++ pfnCreateProcEntry = pArena->bInitProcEntry ? CreateProcEntry : CreatePerProcessProcEntry;
++
++ ret = snprintf(pArena->szProcInfoName, sizeof(pArena->szProcInfoName), "ra_info_%s", pArena->name);
++ if (ret > 0 && ret < sizeof(pArena->szProcInfoName))
++ {
++ (IMG_VOID) pfnCreateProcEntry(ReplaceSpaces(pArena->szProcInfoName), RA_DumpInfo, 0, pArena);
++ }
++ else
++ {
++ pArena->szProcInfoName[0] = 0;
++ PVR_DPF((PVR_DBG_ERROR, "RA_Create: couldn't create ra_info proc entry for arena %s", pArena->name));
++ }
++
++ ret = snprintf(pArena->szProcSegsName, sizeof(pArena->szProcSegsName), "ra_segs_%s", pArena->name);
++ if (ret > 0 && ret < sizeof(pArena->szProcSegsName))
++ {
++ (IMG_VOID) pfnCreateProcEntry(ReplaceSpaces(pArena->szProcSegsName), RA_DumpSegs, 0, pArena);
++ }
++ else
++ {
++ pArena->szProcSegsName[0] = 0;
++ PVR_DPF((PVR_DBG_ERROR, "RA_Create: couldn't create ra_segs proc entry for arena %s", pArena->name));
++ }
++#else
++
++ IMG_INT ret;
++ IMG_CHAR szProcInfoName[PROC_NAME_SIZE];
++ IMG_CHAR szProcSegsName[PROC_NAME_SIZE];
++ struct proc_dir_entry* (*pfnCreateProcEntrySeq)(const IMG_CHAR *,
++ IMG_VOID*,
++ pvr_next_proc_seq_t,
++ pvr_show_proc_seq_t,
++ pvr_off2element_proc_seq_t,
++ pvr_startstop_proc_seq_t,
++ write_proc_t);
++
++ pArena->bInitProcEntry = !PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL);
++
++
++ pfnCreateProcEntrySeq = pArena->bInitProcEntry ? CreateProcEntrySeq : CreatePerProcessProcEntrySeq;
++
++ ret = snprintf(szProcInfoName, sizeof(szProcInfoName), "ra_info_%s", pArena->name);
++ if (ret > 0 && ret < sizeof(szProcInfoName))
++ {
++ pArena->pProcInfo = pfnCreateProcEntrySeq(ReplaceSpaces(szProcInfoName), pArena, NULL,
++ RA_ProcSeqShowInfo, RA_ProcSeqOff2ElementInfo, NULL, NULL);
++ }
++ else
++ {
++ pArena->pProcInfo = 0;
++ PVR_DPF((PVR_DBG_ERROR, "RA_Create: couldn't create ra_info proc entry for arena %s", pArena->name));
++ }
++
++ ret = snprintf(szProcSegsName, sizeof(szProcSegsName), "ra_segs_%s", pArena->name);
++ if (ret > 0 && ret < sizeof(szProcInfoName))
++ {
++ pArena->pProcSegs = pfnCreateProcEntrySeq(ReplaceSpaces(szProcSegsName), pArena, NULL,
++ RA_ProcSeqShowRegs, RA_ProcSeqOff2ElementRegs, NULL, NULL);
++ }
++ else
++ {
++ pArena->pProcSegs = 0;
++ PVR_DPF((PVR_DBG_ERROR, "RA_Create: couldn't create ra_segs proc entry for arena %s", pArena->name));
++ }
++
++#endif
++
++ }
++#endif
++
++ pArena->pSegmentHash = HASH_Create (MINIMUM_HASH_SIZE);
++ if (pArena->pSegmentHash==IMG_NULL)
++ {
++ goto hash_fail;
++ }
++ if (uSize>0)
++ {
++ uSize = (uSize + uQuantum - 1) / uQuantum * uQuantum;
++ pBT = _InsertResource (pArena, base, uSize);
++ if (pBT == IMG_NULL)
++ {
++ goto insert_fail;
++ }
++ pBT->psMapping = psMapping;
++
++ }
++ return pArena;
++
++insert_fail:
++ HASH_Delete (pArena->pSegmentHash);
++hash_fail:
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RA_ARENA), pArena, IMG_NULL);
++
++arena_fail:
++ return IMG_NULL;
++}
++
++IMG_VOID
++RA_Delete (RA_ARENA *pArena)
++{
++ IMG_UINT32 uIndex;
++
++ PVR_ASSERT(pArena != IMG_NULL);
++
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: invalid parameter - pArena"));
++ return;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Delete: name='%s'", pArena->name));
++
++ for (uIndex=0; uIndex<FREE_TABLE_LIMIT; uIndex++)
++ pArena->aHeadFree[uIndex] = IMG_NULL;
++
++ while (pArena->pHeadSegment != IMG_NULL)
++ {
++ BT *pBT = pArena->pHeadSegment;
++
++ if (pBT->type != btt_free)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: allocations still exist in the arena that is being destroyed"));
++ PVR_DPF ((PVR_DBG_ERROR,"Likely Cause: client drivers not freeing alocations before destroying devmemcontext"));
++ PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: base = 0x%x size=0x%x", pBT->base, pBT->uSize));
++ }
++
++ _SegmentListRemove (pArena, pBT);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL);
++
++#ifdef RA_STATS
++ pArena->sStatistics.uSpanCount--;
++#endif
++ }
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++ {
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ IMG_VOID (*pfnRemoveProcEntrySeq)(struct proc_dir_entry*);
++
++ pfnRemoveProcEntrySeq = pArena->bInitProcEntry ? RemoveProcEntrySeq : RemovePerProcessProcEntrySeq;
++
++ if (pArena->pProcInfo != 0)
++ {
++ pfnRemoveProcEntrySeq( pArena->pProcInfo );
++ }
++
++ if (pArena->pProcSegs != 0)
++ {
++ pfnRemoveProcEntrySeq( pArena->pProcSegs );
++ }
++
++#else
++ IMG_VOID (*pfnRemoveProcEntry)(const IMG_CHAR *);
++
++ pfnRemoveProcEntry = pArena->bInitProcEntry ? RemoveProcEntry : RemovePerProcessProcEntry;
++
++ if (pArena->szProcInfoName[0] != 0)
++ {
++ pfnRemoveProcEntry(pArena->szProcInfoName);
++ }
++
++ if (pArena->szProcSegsName[0] != 0)
++ {
++ pfnRemoveProcEntry(pArena->szProcSegsName);
++ }
++
++#endif
++ }
++#endif
++ HASH_Delete (pArena->pSegmentHash);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RA_ARENA), pArena, IMG_NULL);
++
++}
++
++IMG_BOOL
++RA_TestDelete (RA_ARENA *pArena)
++{
++ PVR_ASSERT(pArena != IMG_NULL);
++
++ if (pArena != IMG_NULL)
++ {
++ while (pArena->pHeadSegment != IMG_NULL)
++ {
++ BT *pBT = pArena->pHeadSegment;
++ if (pBT->type != btt_free)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"RA_TestDelete: detected resource leak!"));
++ PVR_DPF ((PVR_DBG_ERROR,"RA_TestDelete: base = 0x%x size=0x%x", pBT->base, pBT->uSize));
++ return IMG_FALSE;
++ }
++ }
++ }
++
++ return IMG_TRUE;
++}
++
++IMG_BOOL
++RA_Add (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++ PVR_ASSERT (pArena != IMG_NULL);
++
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"RA_Add: invalid parameter - pArena"));
++ return IMG_FALSE;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Add: name='%s', base=0x%x, size=0x%x", pArena->name, base, uSize));
++
++ uSize = (uSize + pArena->uQuantum - 1) / pArena->uQuantum * pArena->uQuantum;
++ return ((IMG_BOOL)(_InsertResource (pArena, base, uSize) != IMG_NULL));
++}
++
++IMG_BOOL
++RA_Alloc (RA_ARENA *pArena,
++ IMG_SIZE_T uRequestSize,
++ IMG_SIZE_T *pActualSize,
++ BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uAlignment,
++ IMG_UINT32 uAlignmentOffset,
++ IMG_UINTPTR_T *base)
++{
++ IMG_BOOL bResult;
++ IMG_SIZE_T uSize = uRequestSize;
++
++ PVR_ASSERT (pArena!=IMG_NULL);
++
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"RA_Alloc: invalid parameter - pArena"));
++ return IMG_FALSE;
++ }
++
++#if defined(VALIDATE_ARENA_TEST)
++ ValidateArena(pArena);
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++ CheckBMFreespace();
++#endif
++
++ if (pActualSize != IMG_NULL)
++ {
++ *pActualSize = uSize;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Alloc: arena='%s', size=0x%x(0x%x), alignment=0x%x, offset=0x%x",
++ pArena->name, uSize, uRequestSize, uAlignment, uAlignmentOffset));
++
++
++
++ bResult = _AttemptAllocAligned (pArena, uSize, ppsMapping, uFlags,
++ uAlignment, uAlignmentOffset, base);
++ if (!bResult)
++ {
++ BM_MAPPING *psImportMapping;
++ IMG_UINTPTR_T import_base;
++ IMG_SIZE_T uImportSize = uSize;
++
++
++
++
++ if (uAlignment > pArena->uQuantum)
++ {
++ uImportSize += (uAlignment - 1);
++ }
++
++
++ uImportSize = ((uImportSize + pArena->uQuantum - 1)/pArena->uQuantum)*pArena->uQuantum;
++
++ bResult =
++ pArena->pImportAlloc (pArena->pImportHandle, uImportSize, &uImportSize,
++ &psImportMapping, uFlags, &import_base);
++ if (bResult)
++ {
++ BT *pBT;
++ pBT = _InsertResourceSpan (pArena, import_base, uImportSize);
++
++ if (pBT == IMG_NULL)
++ {
++
++ pArena->pImportFree(pArena->pImportHandle, import_base,
++ psImportMapping);
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Alloc: name='%s', size=0x%x failed!",
++ pArena->name, uSize));
++
++ return IMG_FALSE;
++ }
++ pBT->psMapping = psImportMapping;
++#ifdef RA_STATS
++ pArena->sStatistics.uFreeSegmentCount++;
++ pArena->sStatistics.uFreeResourceCount += uImportSize;
++ pArena->sStatistics.uImportCount++;
++ pArena->sStatistics.uSpanCount++;
++#endif
++ bResult = _AttemptAllocAligned(pArena, uSize, ppsMapping, uFlags,
++ uAlignment, uAlignmentOffset,
++ base);
++ if (!bResult)
++ {
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Alloc: name='%s' uAlignment failed!",
++ pArena->name));
++ }
++ }
++ }
++#ifdef RA_STATS
++ if (bResult)
++ pArena->sStatistics.uCumulativeAllocs++;
++#endif
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Alloc: name='%s', size=0x%x, *base=0x%x = %d",
++ pArena->name, uSize, *base, bResult));
++
++
++
++#if defined(VALIDATE_ARENA_TEST)
++ ValidateArena(pArena);
++#endif
++
++ return bResult;
++}
++
++
++#if defined(VALIDATE_ARENA_TEST)
++
++IMG_UINT32 ValidateArena(RA_ARENA *pArena)
++{
++ BT* pSegment;
++ RESOURCE_DESCRIPTOR eNextSpan;
++
++ pSegment = pArena->pHeadSegment;
++
++ if (pSegment == IMG_NULL)
++ {
++ return 0;
++ }
++
++ if (pSegment->eResourceType == IMPORTED_RESOURCE_TYPE)
++ {
++ PVR_ASSERT(pSegment->eResourceSpan == IMPORTED_RESOURCE_SPAN_START);
++
++ while (pSegment->pNextSegment)
++ {
++ eNextSpan = pSegment->pNextSegment->eResourceSpan;
++
++ switch (pSegment->eResourceSpan)
++ {
++ case IMPORTED_RESOURCE_SPAN_LIVE:
++
++ if (!((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) ||
++ (eNextSpan == IMPORTED_RESOURCE_SPAN_FREE) ||
++ (eNextSpan == IMPORTED_RESOURCE_SPAN_END)))
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ case IMPORTED_RESOURCE_SPAN_FREE:
++
++ if (!((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) ||
++ (eNextSpan == IMPORTED_RESOURCE_SPAN_END)))
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ case IMPORTED_RESOURCE_SPAN_END:
++
++ if ((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) ||
++ (eNextSpan == IMPORTED_RESOURCE_SPAN_FREE) ||
++ (eNextSpan == IMPORTED_RESOURCE_SPAN_END))
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++
++ case IMPORTED_RESOURCE_SPAN_START:
++
++ if (!((eNextSpan == IMPORTED_RESOURCE_SPAN_LIVE) ||
++ (eNextSpan == IMPORTED_RESOURCE_SPAN_FREE)))
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ break;
++ }
++ pSegment = pSegment->pNextSegment;
++ }
++ }
++ else if (pSegment->eResourceType == NON_IMPORTED_RESOURCE_TYPE)
++ {
++ PVR_ASSERT((pSegment->eResourceSpan == RESOURCE_SPAN_FREE) || (pSegment->eResourceSpan == RESOURCE_SPAN_LIVE));
++
++ while (pSegment->pNextSegment)
++ {
++ eNextSpan = pSegment->pNextSegment->eResourceSpan;
++
++ switch (pSegment->eResourceSpan)
++ {
++ case RESOURCE_SPAN_LIVE:
++
++ if (!((eNextSpan == RESOURCE_SPAN_FREE) ||
++ (eNextSpan == RESOURCE_SPAN_LIVE)))
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ case RESOURCE_SPAN_FREE:
++
++ if (!((eNextSpan == RESOURCE_SPAN_FREE) ||
++ (eNextSpan == RESOURCE_SPAN_LIVE)))
++ {
++
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ }
++ break;
++
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "ValidateArena ERROR: adjacent boundary tags %d (base=0x%x) and %d (base=0x%x) are incompatible (arena: %s)",
++ pSegment->ui32BoundaryTagID, pSegment->base, pSegment->pNextSegment->ui32BoundaryTagID, pSegment->pNextSegment->base, pArena->name));
++
++ PVR_DBG_BREAK;
++ break;
++ }
++ pSegment = pSegment->pNextSegment;
++ }
++
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"ValidateArena ERROR: pSegment->eResourceType unrecognized"));
++
++ PVR_DBG_BREAK;
++ }
++
++ return 0;
++}
++
++#endif
++
++
++IMG_VOID
++RA_Free (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_BOOL bFreeBackingStore)
++{
++ BT *pBT;
++
++ PVR_ASSERT (pArena != IMG_NULL);
++
++ if (pArena == IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_ERROR,"RA_Free: invalid parameter - pArena"));
++ return;
++ }
++
++#ifdef USE_BM_FREESPACE_CHECK
++ CheckBMFreespace();
++#endif
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "RA_Free: name='%s', base=0x%x", pArena->name, base));
++
++ pBT = (BT *) HASH_Remove (pArena->pSegmentHash, base);
++ PVR_ASSERT (pBT != IMG_NULL);
++
++ if (pBT)
++ {
++ PVR_ASSERT (pBT->base == base);
++
++#ifdef RA_STATS
++ pArena->sStatistics.uCumulativeFrees++;
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++{
++ IMG_BYTE* p;
++ IMG_BYTE* endp;
++
++ p = (IMG_BYTE*)pBT->base + SysGetDevicePhysOffset();
++ endp = (IMG_BYTE*)((IMG_UINT32)(p + pBT->uSize));
++ while ((IMG_UINT32)p & 3)
++ {
++ *p++ = 0xAA;
++ }
++ while (p < (IMG_BYTE*)((IMG_UINT32)endp & 0xfffffffc))
++ {
++ *(IMG_UINT32*)p = 0xAAAAAAAA;
++ p += sizeof(IMG_UINT32);
++ }
++ while (p < endp)
++ {
++ *p++ = 0xAA;
++ }
++ PVR_DPF((PVR_DBG_MESSAGE,"BM_FREESPACE_CHECK: RA_Free Cleared %08X to %08X (size=0x%x)",(IMG_BYTE*)pBT->base + SysGetDevicePhysOffset(),endp-1,pBT->uSize));
++}
++#endif
++ _FreeBT (pArena, pBT, bFreeBackingStore);
++ }
++}
++
++
++IMG_BOOL RA_GetNextLiveSegment(IMG_HANDLE hArena, RA_SEGMENT_DETAILS *psSegDetails)
++{
++ BT *pBT;
++
++ if (psSegDetails->hSegment)
++ {
++ pBT = (BT *)psSegDetails->hSegment;
++ }
++ else
++ {
++ RA_ARENA *pArena = (RA_ARENA *)hArena;
++
++ pBT = pArena->pHeadSegment;
++ }
++
++ while (pBT != IMG_NULL)
++ {
++ if (pBT->type == btt_live)
++ {
++ psSegDetails->uiSize = pBT->uSize;
++ psSegDetails->sCpuPhyAddr.uiAddr = pBT->base;
++ psSegDetails->hSegment = (IMG_HANDLE)pBT->pNextSegment;
++
++ return IMG_TRUE;
++ }
++
++ pBT = pBT->pNextSegment;
++ }
++
++ psSegDetails->uiSize = 0;
++ psSegDetails->sCpuPhyAddr.uiAddr = 0;
++ psSegDetails->hSegment = (IMG_HANDLE)-1;
++
++ return IMG_FALSE;
++}
++
++
++#ifdef USE_BM_FREESPACE_CHECK
++RA_ARENA* pJFSavedArena = IMG_NULL;
++
++IMG_VOID CheckBMFreespace(IMG_VOID)
++{
++ BT *pBT;
++ IMG_BYTE* p;
++ IMG_BYTE* endp;
++
++ if (pJFSavedArena != IMG_NULL)
++ {
++ for (pBT=pJFSavedArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment)
++ {
++ if (pBT->type == btt_free)
++ {
++ p = (IMG_BYTE*)pBT->base + SysGetDevicePhysOffset();
++ endp = (IMG_BYTE*)((IMG_UINT32)(p + pBT->uSize) & 0xfffffffc);
++
++ while ((IMG_UINT32)p & 3)
++ {
++ if (*p++ != 0xAA)
++ {
++ fprintf(stderr,"BM_FREESPACE_CHECK: Blank space at %08X has changed to 0x%x\n",p,*(IMG_UINT32*)p);
++ for (;;);
++ break;
++ }
++ }
++ while (p < endp)
++ {
++ if (*(IMG_UINT32*)p != 0xAAAAAAAA)
++ {
++ fprintf(stderr,"BM_FREESPACE_CHECK: Blank space at %08X has changed to 0x%x\n",p,*(IMG_UINT32*)p);
++ for (;;);
++ break;
++ }
++ p += 4;
++ }
++ }
++ }
++ }
++}
++#endif
++
++
++#if (defined(CONFIG_PROC_FS) && defined(DEBUG)) || defined (RA_STATS)
++static IMG_CHAR *
++_BTType (IMG_INT eType)
++{
++ switch (eType)
++ {
++ case btt_span: return "span";
++ case btt_free: return "free";
++ case btt_live: return "live";
++ }
++ return "junk";
++}
++#endif
++
++#if defined(ENABLE_RA_DUMP)
++IMG_VOID
++RA_Dump (RA_ARENA *pArena)
++{
++ BT *pBT;
++ PVR_ASSERT (pArena != IMG_NULL);
++ PVR_DPF ((PVR_DBG_MESSAGE,"Arena '%s':", pArena->name));
++ PVR_DPF ((PVR_DBG_MESSAGE," alloc=%08X free=%08X handle=%08X quantum=%d",
++ pArena->pImportAlloc, pArena->pImportFree, pArena->pImportHandle,
++ pArena->uQuantum));
++ PVR_DPF ((PVR_DBG_MESSAGE," segment Chain:"));
++ if (pArena->pHeadSegment != IMG_NULL &&
++ pArena->pHeadSegment->pPrevSegment != IMG_NULL)
++ PVR_DPF ((PVR_DBG_MESSAGE," error: head boundary tag has invalid pPrevSegment"));
++ if (pArena->pTailSegment != IMG_NULL &&
++ pArena->pTailSegment->pNextSegment != IMG_NULL)
++ PVR_DPF ((PVR_DBG_MESSAGE," error: tail boundary tag has invalid pNextSegment"));
++
++ for (pBT=pArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment)
++ {
++ PVR_DPF ((PVR_DBG_MESSAGE,"\tbase=0x%x size=0x%x type=%s ref=%08X",
++ (IMG_UINT32) pBT->base, pBT->uSize, _BTType (pBT->type),
++ pBT->pRef));
++ }
++
++#ifdef HASH_TRACE
++ HASH_Dump (pArena->pSegmentHash);
++#endif
++}
++#endif
++
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void RA_ProcSeqShowInfo(struct seq_file *sfile, void* el)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)sfile->private;
++ RA_ARENA *pArena = (RA_ARENA *)handlers->data;
++ IMG_INT off = (IMG_INT)el;
++
++ switch (off)
++ {
++ case 1:
++ seq_printf(sfile, "quantum\t\t\t%lu\n", pArena->uQuantum);
++ break;
++ case 2:
++ seq_printf(sfile, "import_handle\t\t%08X\n", (IMG_UINT)pArena->pImportHandle);
++ break;
++#ifdef RA_STATS
++ case 3:
++ seq_printf(sfile,"span count\t\t%lu\n", pArena->sStatistics.uSpanCount);
++ break;
++ case 4:
++ seq_printf(sfile, "live segment count\t%lu\n", pArena->sStatistics.uLiveSegmentCount);
++ break;
++ case 5:
++ seq_printf(sfile, "free segment count\t%lu\n", pArena->sStatistics.uFreeSegmentCount);
++ break;
++ case 6:
++ seq_printf(sfile, "free resource count\t%lu (0x%x)\n",
++ pArena->sStatistics.uFreeResourceCount,
++ (IMG_UINT)pArena->sStatistics.uFreeResourceCount);
++ break;
++ case 7:
++ seq_printf(sfile, "total allocs\t\t%lu\n", pArena->sStatistics.uCumulativeAllocs);
++ break;
++ case 8:
++ seq_printf(sfile, "total frees\t\t%lu\n", pArena->sStatistics.uCumulativeFrees);
++ break;
++ case 9:
++ seq_printf(sfile, "import count\t\t%lu\n", pArena->sStatistics.uImportCount);
++ break;
++ case 10:
++ seq_printf(sfile, "export count\t\t%lu\n", pArena->sStatistics.uExportCount);
++ break;
++#endif
++ }
++
++}
++
++static void* RA_ProcSeqOff2ElementInfo(struct seq_file * sfile, loff_t off)
++{
++#ifdef RA_STATS
++ if(off <= 9)
++#else
++ if(off <= 1)
++#endif
++ return (void*)(IMG_INT)(off+1);
++ return 0;
++}
++
++static void RA_ProcSeqShowRegs(struct seq_file *sfile, void* el)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)sfile->private;
++ RA_ARENA *pArena = (RA_ARENA *)handlers->data;
++ BT *pBT = (BT*)el;
++
++ if (el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf(sfile, "Arena \"%s\"\nBase Size Type Ref\n", pArena->name);
++ return;
++ }
++
++ if (pBT)
++ {
++ seq_printf(sfile, "%08x %8x %4s %08x\n",
++ (IMG_UINT)pBT->base, (IMG_UINT)pBT->uSize, _BTType (pBT->type),
++ (IMG_UINT)pBT->psMapping);
++ }
++}
++
++static void* RA_ProcSeqOff2ElementRegs(struct seq_file * sfile, loff_t off)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)sfile->private;
++ RA_ARENA *pArena = (RA_ARENA *)handlers->data;
++ BT *pBT = 0;
++
++ if(off == 0)
++ return PVR_PROC_SEQ_START_TOKEN;
++
++ for (pBT=pArena->pHeadSegment; --off && pBT; pBT=pBT->pNextSegment);
++
++ return (void*)pBT;
++}
++
++
++
++#else
++static IMG_INT
++RA_DumpSegs(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data)
++{
++ BT *pBT = 0;
++ IMG_INT len = 0;
++ RA_ARENA *pArena = (RA_ARENA *)data;
++
++ if (count < 80)
++ {
++ *start = (IMG_CHAR *)0;
++ return (0);
++ }
++ *eof = 0;
++ *start = (IMG_CHAR *)1;
++ if (off == 0)
++ {
++ return printAppend(page, count, 0, "Arena \"%s\"\nBase Size Type Ref\n", pArena->name);
++ }
++ for (pBT=pArena->pHeadSegment; --off && pBT; pBT=pBT->pNextSegment)
++ ;
++ if (pBT)
++ {
++ len = printAppend(page, count, 0, "%08x %8x %4s %08x\n",
++ (IMG_UINT)pBT->base, (IMG_UINT)pBT->uSize, _BTType (pBT->type),
++ (IMG_UINT)pBT->psMapping);
++ }
++ else
++ {
++ *eof = 1;
++ }
++ return (len);
++}
++
++static IMG_INT
++RA_DumpInfo(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data)
++{
++ IMG_INT len = 0;
++ RA_ARENA *pArena = (RA_ARENA *)data;
++
++ if (count < 80)
++ {
++ *start = (IMG_CHAR *)0;
++ return (0);
++ }
++ *eof = 0;
++ switch (off)
++ {
++ case 0:
++ len = printAppend(page, count, 0, "quantum\t\t\t%lu\n", pArena->uQuantum);
++ break;
++ case 1:
++ len = printAppend(page, count, 0, "import_handle\t\t%08X\n", (IMG_UINT)pArena->pImportHandle);
++ break;
++#ifdef RA_STATS
++ case 2:
++ len = printAppend(page, count, 0, "span count\t\t%lu\n", pArena->sStatistics.uSpanCount);
++ break;
++ case 3:
++ len = printAppend(page, count, 0, "live segment count\t%lu\n", pArena->sStatistics.uLiveSegmentCount);
++ break;
++ case 4:
++ len = printAppend(page, count, 0, "free segment count\t%lu\n", pArena->sStatistics.uFreeSegmentCount);
++ break;
++ case 5:
++ len = printAppend(page, count, 0, "free resource count\t%lu (0x%x)\n",
++ pArena->sStatistics.uFreeResourceCount,
++ (IMG_UINT)pArena->sStatistics.uFreeResourceCount);
++ break;
++ case 6:
++ len = printAppend(page, count, 0, "total allocs\t\t%lu\n", pArena->sStatistics.uCumulativeAllocs);
++ break;
++ case 7:
++ len = printAppend(page, count, 0, "total frees\t\t%lu\n", pArena->sStatistics.uCumulativeFrees);
++ break;
++ case 8:
++ len = printAppend(page, count, 0, "import count\t\t%lu\n", pArena->sStatistics.uImportCount);
++ break;
++ case 9:
++ len = printAppend(page, count, 0, "export count\t\t%lu\n", pArena->sStatistics.uExportCount);
++ break;
++#endif
++
++ default:
++ *eof = 1;
++ }
++ *start = (IMG_CHAR *)1;
++ return (len);
++}
++#endif
++#endif
++
++
++#ifdef RA_STATS
++PVRSRV_ERROR RA_GetStats(RA_ARENA *pArena,
++ IMG_CHAR **ppszStr,
++ IMG_UINT32 *pui32StrLen)
++{
++ IMG_CHAR *pszStr = *ppszStr;
++ IMG_UINT32 ui32StrLen = *pui32StrLen;
++ IMG_INT32 i32Count;
++ BT *pBT;
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "\nArena '%s':\n", pArena->name);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, " allocCB=%08X freeCB=%08X handle=%08X quantum=%d\n",
++ pArena->pImportAlloc,
++ pArena->pImportFree,
++ pArena->pImportHandle,
++ pArena->uQuantum);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "span count\t\t%lu\n", pArena->sStatistics.uSpanCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "live segment count\t%lu\n", pArena->sStatistics.uLiveSegmentCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "free segment count\t%lu\n", pArena->sStatistics.uFreeSegmentCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "free resource count\t%lu (0x%x)\n",
++ pArena->sStatistics.uFreeResourceCount,
++ (IMG_UINT)pArena->sStatistics.uFreeResourceCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "total allocs\t\t%lu\n", pArena->sStatistics.uCumulativeAllocs);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "total frees\t\t%lu\n", pArena->sStatistics.uCumulativeFrees);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "import count\t\t%lu\n", pArena->sStatistics.uImportCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "export count\t\t%lu\n", pArena->sStatistics.uExportCount);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, " segment Chain:\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++ if (pArena->pHeadSegment != IMG_NULL &&
++ pArena->pHeadSegment->pPrevSegment != IMG_NULL)
++ {
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, " error: head boundary tag has invalid pPrevSegment\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++
++ if (pArena->pTailSegment != IMG_NULL &&
++ pArena->pTailSegment->pNextSegment != IMG_NULL)
++ {
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, " error: tail boundary tag has invalid pNextSegment\n");
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++
++ for (pBT=pArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment)
++ {
++ CHECK_SPACE(ui32StrLen);
++ i32Count = OSSNPrintf(pszStr, 100, "\tbase=0x%x size=0x%x type=%s ref=%08X\n",
++ (IMG_UINT32) pBT->base,
++ pBT->uSize,
++ _BTType(pBT->type),
++ pBT->psMapping);
++ UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++ }
++
++ *ppszStr = pszStr;
++ *pui32StrLen = ui32StrLen;
++
++ return PVRSRV_OK;
++}
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/resman.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/resman.c
+new file mode 100644
+index 0000000..3f7ff03
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/common/resman.c
+@@ -0,0 +1,717 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "resman.h"
++
++#ifdef __linux__
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/sched.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
++#include <linux/hardirq.h>
++#else
++#include <asm/hardirq.h>
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
++#include <linux/semaphore.h>
++#else
++#include <asm/semaphore.h>
++#endif
++
++static DECLARE_MUTEX(lock);
++
++#define ACQUIRE_SYNC_OBJ do { \
++ if (in_interrupt()) { \
++ printk ("ISR cannot take RESMAN mutex\n"); \
++ BUG(); \
++ } \
++ else down (&lock); \
++} while (0)
++#define RELEASE_SYNC_OBJ up (&lock)
++
++#else
++
++#define ACQUIRE_SYNC_OBJ
++#define RELEASE_SYNC_OBJ
++
++#endif
++
++#define RESMAN_SIGNATURE 0x12345678
++
++typedef struct _RESMAN_ITEM_
++{
++#ifdef DEBUG
++ IMG_UINT32 ui32Signature;
++#endif
++ struct _RESMAN_ITEM_ **ppsThis;
++ struct _RESMAN_ITEM_ *psNext;
++
++ IMG_UINT32 ui32Flags;
++ IMG_UINT32 ui32ResType;
++
++ IMG_PVOID pvParam;
++ IMG_UINT32 ui32Param;
++
++ RESMAN_FREE_FN pfnFreeResource;
++} RESMAN_ITEM;
++
++
++typedef struct _RESMAN_CONTEXT_
++{
++#ifdef DEBUG
++ IMG_UINT32 ui32Signature;
++#endif
++ struct _RESMAN_CONTEXT_ **ppsThis;
++ struct _RESMAN_CONTEXT_ *psNext;
++
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++
++ RESMAN_ITEM *psResItemList;
++
++} RESMAN_CONTEXT;
++
++
++typedef struct
++{
++ RESMAN_CONTEXT *psContextList;
++
++} RESMAN_LIST, *PRESMAN_LIST;
++
++
++PRESMAN_LIST gpsResList = IMG_NULL;
++
++#include "lists.h"
++
++static IMPLEMENT_LIST_ANY_VA(RESMAN_ITEM)
++static IMPLEMENT_LIST_ANY_VA_2(RESMAN_ITEM, IMG_BOOL, IMG_FALSE)
++static IMPLEMENT_LIST_INSERT(RESMAN_ITEM)
++static IMPLEMENT_LIST_REMOVE(RESMAN_ITEM)
++
++static IMPLEMENT_LIST_REMOVE(RESMAN_CONTEXT)
++static IMPLEMENT_LIST_INSERT(RESMAN_CONTEXT)
++
++
++#define PRINT_RESLIST(x, y, z)
++
++static PVRSRV_ERROR FreeResourceByPtr(RESMAN_ITEM *psItem, IMG_BOOL bExecuteCallback);
++
++static PVRSRV_ERROR FreeResourceByCriteria(PRESMAN_CONTEXT psContext,
++ IMG_UINT32 ui32SearchCriteria,
++ IMG_UINT32 ui32ResType,
++ IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param,
++ IMG_BOOL bExecuteCallback);
++
++
++#ifdef DEBUG
++ static IMG_VOID ValidateResList(PRESMAN_LIST psResList);
++ #define VALIDATERESLIST() ValidateResList(gpsResList)
++#else
++ #define VALIDATERESLIST()
++#endif
++
++
++
++
++
++
++PVRSRV_ERROR ResManInit(IMG_VOID)
++{
++ if (gpsResList == IMG_NULL)
++ {
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(*gpsResList),
++ (IMG_VOID **)&gpsResList, IMG_NULL,
++ "Resource Manager List") != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ gpsResList->psContextList = IMG_NULL;
++
++
++ VALIDATERESLIST();
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_VOID ResManDeInit(IMG_VOID)
++{
++ if (gpsResList != IMG_NULL)
++ {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*gpsResList), gpsResList, IMG_NULL);
++ gpsResList = IMG_NULL;
++ }
++}
++
++
++PVRSRV_ERROR PVRSRVResManConnect(IMG_HANDLE hPerProc,
++ PRESMAN_CONTEXT *phResManContext)
++{
++ PVRSRV_ERROR eError;
++ PRESMAN_CONTEXT psResManContext;
++
++
++ ACQUIRE_SYNC_OBJ;
++
++
++ VALIDATERESLIST();
++
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psResManContext),
++ (IMG_VOID **)&psResManContext, IMG_NULL,
++ "Resource Manager Context");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVResManConnect: ERROR allocating new RESMAN context struct"));
++
++
++ VALIDATERESLIST();
++
++
++ RELEASE_SYNC_OBJ;
++
++ return eError;
++ }
++
++#ifdef DEBUG
++ psResManContext->ui32Signature = RESMAN_SIGNATURE;
++#endif
++ psResManContext->psResItemList = IMG_NULL;
++ psResManContext->psPerProc = hPerProc;
++
++
++ List_RESMAN_CONTEXT_Insert(&gpsResList->psContextList, psResManContext);
++
++
++ VALIDATERESLIST();
++
++
++ RELEASE_SYNC_OBJ;
++
++ *phResManContext = psResManContext;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_VOID PVRSRVResManDisconnect(PRESMAN_CONTEXT psResManContext,
++ IMG_BOOL bKernelContext)
++{
++
++ ACQUIRE_SYNC_OBJ;
++
++
++ VALIDATERESLIST();
++
++
++ PRINT_RESLIST(gpsResList, psResManContext, IMG_TRUE);
++
++
++
++ if (!bKernelContext)
++ {
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_OS_USERMODE_MAPPING, 0, 0, IMG_TRUE);
++
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_EVENT_OBJECT, 0, 0, IMG_TRUE);
++
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_MODIFY_SYNC_OPS, 0, 0, IMG_TRUE);
++
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_RENDER_CONTEXT, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_TRANSFER_CONTEXT, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_2D_CONTEXT, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_TRANSFER_CONTEXT, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SHARED_PB_DESC, 0, 0, IMG_TRUE);
++
++
++
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DISPLAYCLASS_DEVICE, 0, 0, IMG_TRUE);
++
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_BUFFERCLASS_DEVICE, 0, 0, IMG_TRUE);
++
++
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICECLASSMEM_MAPPING, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_WRAP, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_MAPPING, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_ALLOCATION, 0, 0, IMG_TRUE);
++ FreeResourceByCriteria(psResManContext, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_CONTEXT, 0, 0, IMG_TRUE);
++ }
++
++
++ PVR_ASSERT(psResManContext->psResItemList == IMG_NULL);
++
++
++ List_RESMAN_CONTEXT_Remove(psResManContext);
++
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_CONTEXT), psResManContext, IMG_NULL);
++
++
++
++
++ VALIDATERESLIST();
++
++
++ PRINT_RESLIST(gpsResList, psResManContext, IMG_FALSE);
++
++
++ RELEASE_SYNC_OBJ;
++}
++
++
++PRESMAN_ITEM ResManRegisterRes(PRESMAN_CONTEXT psResManContext,
++ IMG_UINT32 ui32ResType,
++ IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param,
++ RESMAN_FREE_FN pfnFreeResource)
++{
++ PRESMAN_ITEM psNewResItem;
++
++ PVR_ASSERT(psResManContext != IMG_NULL);
++ PVR_ASSERT(ui32ResType != 0);
++
++ if (psResManContext == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ResManRegisterRes: invalid parameter - psResManContext"));
++ return (PRESMAN_ITEM) IMG_NULL;
++ }
++
++
++ ACQUIRE_SYNC_OBJ;
++
++
++ VALIDATERESLIST();
++
++ PVR_DPF((PVR_DBG_MESSAGE, "ResManRegisterRes: register resource "
++ "Context 0x%x, ResType 0x%x, pvParam 0x%x, ui32Param 0x%x, "
++ "FreeFunc %08X",
++ psResManContext, ui32ResType, (IMG_UINT32)pvParam,
++ ui32Param, pfnFreeResource));
++
++
++ if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(RESMAN_ITEM), (IMG_VOID **)&psNewResItem,
++ IMG_NULL,
++ "Resource Manager Item") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ResManRegisterRes: "
++ "ERROR allocating new resource item"));
++
++
++ RELEASE_SYNC_OBJ;
++
++ return((PRESMAN_ITEM)IMG_NULL);
++ }
++
++
++#ifdef DEBUG
++ psNewResItem->ui32Signature = RESMAN_SIGNATURE;
++#endif
++ psNewResItem->ui32ResType = ui32ResType;
++ psNewResItem->pvParam = pvParam;
++ psNewResItem->ui32Param = ui32Param;
++ psNewResItem->pfnFreeResource = pfnFreeResource;
++ psNewResItem->ui32Flags = 0;
++
++
++ List_RESMAN_ITEM_Insert(&psResManContext->psResItemList, psNewResItem);
++
++
++ VALIDATERESLIST();
++
++
++ RELEASE_SYNC_OBJ;
++
++ return(psNewResItem);
++}
++
++PVRSRV_ERROR ResManFreeResByPtr(RESMAN_ITEM *psResItem)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(psResItem != IMG_NULL);
++
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByPtr: NULL ptr - nothing to do"));
++ return PVRSRV_OK;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByPtr: freeing resource at %08X", psResItem));
++
++
++ ACQUIRE_SYNC_OBJ;
++
++
++ VALIDATERESLIST();
++
++
++ eError = FreeResourceByPtr(psResItem, IMG_TRUE);
++
++
++ VALIDATERESLIST();
++
++
++ RELEASE_SYNC_OBJ;
++
++ return(eError);
++}
++
++
++PVRSRV_ERROR ResManFreeResByCriteria(PRESMAN_CONTEXT psResManContext,
++ IMG_UINT32 ui32SearchCriteria,
++ IMG_UINT32 ui32ResType,
++ IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ PVRSRV_ERROR eError;
++
++ PVR_ASSERT(psResManContext != IMG_NULL);
++
++
++ ACQUIRE_SYNC_OBJ;
++
++
++ VALIDATERESLIST();
++
++ PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByCriteria: "
++ "Context 0x%x, Criteria 0x%x, Type 0x%x, Addr 0x%x, Param 0x%x",
++ psResManContext, ui32SearchCriteria, ui32ResType,
++ (IMG_UINT32)pvParam, ui32Param));
++
++
++ eError = FreeResourceByCriteria(psResManContext, ui32SearchCriteria,
++ ui32ResType, pvParam, ui32Param,
++ IMG_TRUE);
++
++
++ VALIDATERESLIST();
++
++
++ RELEASE_SYNC_OBJ;
++
++ return eError;
++}
++
++
++PVRSRV_ERROR ResManDissociateRes(RESMAN_ITEM *psResItem,
++ PRESMAN_CONTEXT psNewResManContext)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ PVR_ASSERT(psResItem != IMG_NULL);
++
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ResManDissociateRes: invalid parameter - psResItem"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++#ifdef DEBUG
++ PVR_ASSERT(psResItem->ui32Signature == RESMAN_SIGNATURE);
++#endif
++
++ if (psNewResManContext != IMG_NULL)
++ {
++
++ List_RESMAN_ITEM_Remove(psResItem);
++
++
++ List_RESMAN_ITEM_Insert(&psNewResManContext->psResItemList, psResItem);
++
++ }
++ else
++ {
++ eError = FreeResourceByPtr(psResItem, IMG_FALSE);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ResManDissociateRes: failed to free resource by pointer"));
++ return eError;
++ }
++ }
++
++ return eError;
++}
++
++IMG_BOOL ResManFindResourceByPtr_AnyVaCb(RESMAN_ITEM *psCurItem, va_list va)
++{
++ RESMAN_ITEM *psItem;
++
++ psItem = va_arg(va, RESMAN_ITEM*);
++
++ return (IMG_BOOL)(psCurItem == psItem);
++}
++
++
++IMG_INTERNAL PVRSRV_ERROR ResManFindResourceByPtr(PRESMAN_CONTEXT psResManContext,
++ RESMAN_ITEM *psItem)
++{
++ PVRSRV_ERROR eResult;
++
++ PVR_ASSERT(psResManContext != IMG_NULL);
++ PVR_ASSERT(psItem != IMG_NULL);
++
++ if ((psItem == IMG_NULL) || (psResManContext == IMG_NULL))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "ResManFindResourceByPtr: invalid parameter"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++#ifdef DEBUG
++ PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE);
++#endif
++
++
++ ACQUIRE_SYNC_OBJ;
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "FindResourceByPtr: psItem=%08X, psItem->psNext=%08X",
++ psItem, psItem->psNext));
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "FindResourceByPtr: Resource Ctx 0x%x, Type 0x%x, Addr 0x%x, "
++ "Param 0x%x, FnCall %08X, Flags 0x%x",
++ psResManContext,
++ psItem->ui32ResType, (IMG_UINT32)psItem->pvParam, psItem->ui32Param,
++ psItem->pfnFreeResource, psItem->ui32Flags));
++
++
++ if(List_RESMAN_ITEM_IMG_BOOL_Any_va(psResManContext->psResItemList,
++ ResManFindResourceByPtr_AnyVaCb,
++ psItem))
++ {
++ eResult = PVRSRV_OK;
++ }
++ else
++ {
++ eResult = PVRSRV_ERROR_NOT_OWNER;
++ }
++
++
++ RELEASE_SYNC_OBJ;
++
++ return eResult;
++}
++
++static PVRSRV_ERROR FreeResourceByPtr(RESMAN_ITEM *psItem,
++ IMG_BOOL bExecuteCallback)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ PVR_ASSERT(psItem != IMG_NULL);
++
++ if (psItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeResourceByPtr: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++#ifdef DEBUG
++ PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE);
++#endif
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "FreeResourceByPtr: psItem=%08X, psItem->psNext=%08X",
++ psItem, psItem->psNext));
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "FreeResourceByPtr: Type 0x%x, Addr 0x%x, "
++ "Param 0x%x, FnCall %08X, Flags 0x%x",
++ psItem->ui32ResType, (IMG_UINT32)psItem->pvParam, psItem->ui32Param,
++ psItem->pfnFreeResource, psItem->ui32Flags));
++
++
++ List_RESMAN_ITEM_Remove(psItem);
++
++
++
++ RELEASE_SYNC_OBJ;
++
++
++ if (bExecuteCallback)
++ {
++ eError = psItem->pfnFreeResource(psItem->pvParam, psItem->ui32Param);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeResourceByPtr: ERROR calling FreeResource function"));
++ }
++ }
++
++
++ ACQUIRE_SYNC_OBJ;
++
++
++ if(OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_ITEM), psItem, IMG_NULL) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "FreeResourceByPtr: ERROR freeing resource list item memory"));
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++
++ return(eError);
++}
++
++IMG_VOID* FreeResourceByCriteria_AnyVaCb(RESMAN_ITEM *psCurItem, va_list va)
++{
++ IMG_UINT32 ui32SearchCriteria;
++ IMG_UINT32 ui32ResType;
++ IMG_PVOID pvParam;
++ IMG_UINT32 ui32Param;
++
++ ui32SearchCriteria = va_arg(va, IMG_UINT32);
++ ui32ResType = va_arg(va, IMG_UINT32);
++ pvParam = va_arg(va, IMG_PVOID);
++ ui32Param = va_arg(va, IMG_UINT32);
++
++
++ if(
++
++ (((ui32SearchCriteria & RESMAN_CRITERIA_RESTYPE) == 0UL) ||
++ (psCurItem->ui32ResType == ui32ResType))
++ &&
++
++ (((ui32SearchCriteria & RESMAN_CRITERIA_PVOID_PARAM) == 0UL) ||
++ (psCurItem->pvParam == pvParam))
++ &&
++
++ (((ui32SearchCriteria & RESMAN_CRITERIA_UI32_PARAM) == 0UL) ||
++ (psCurItem->ui32Param == ui32Param))
++ )
++ {
++ return psCurItem;
++ }
++ else
++ {
++ return IMG_NULL;
++ }
++}
++
++static PVRSRV_ERROR FreeResourceByCriteria(PRESMAN_CONTEXT psResManContext,
++ IMG_UINT32 ui32SearchCriteria,
++ IMG_UINT32 ui32ResType,
++ IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param,
++ IMG_BOOL bExecuteCallback)
++{
++ PRESMAN_ITEM psCurItem;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++
++
++ while((psCurItem = (PRESMAN_ITEM)
++ List_RESMAN_ITEM_Any_va(psResManContext->psResItemList,
++ FreeResourceByCriteria_AnyVaCb,
++ ui32SearchCriteria,
++ ui32ResType,
++ pvParam,
++ ui32Param)) != IMG_NULL
++ && eError == PVRSRV_OK)
++ {
++ eError = FreeResourceByPtr(psCurItem, bExecuteCallback);
++ }
++
++ return eError;
++}
++
++
++#ifdef DEBUG
++static IMG_VOID ValidateResList(PRESMAN_LIST psResList)
++{
++ PRESMAN_ITEM psCurItem, *ppsThisItem;
++ PRESMAN_CONTEXT psCurContext, *ppsThisContext;
++
++
++ if (psResList == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "ValidateResList: resman not initialised yet"));
++ return;
++ }
++
++ psCurContext = psResList->psContextList;
++ ppsThisContext = &psResList->psContextList;
++
++
++ while(psCurContext != IMG_NULL)
++ {
++
++ PVR_ASSERT(psCurContext->ui32Signature == RESMAN_SIGNATURE);
++ if (psCurContext->ppsThis != ppsThisContext)
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "psCC=%08X psCC->ppsThis=%08X psCC->psNext=%08X ppsTC=%08X",
++ psCurContext, psCurContext->ppsThis,
++ psCurContext->psNext, ppsThisContext));
++ PVR_ASSERT(psCurContext->ppsThis == ppsThisContext);
++ }
++
++
++ psCurItem = psCurContext->psResItemList;
++ ppsThisItem = &psCurContext->psResItemList;
++ while(psCurItem != IMG_NULL)
++ {
++
++ PVR_ASSERT(psCurItem->ui32Signature == RESMAN_SIGNATURE);
++ if (psCurItem->ppsThis != ppsThisItem)
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "psCurItem=%08X psCurItem->ppsThis=%08X psCurItem->psNext=%08X ppsThisItem=%08X",
++ psCurItem, psCurItem->ppsThis, psCurItem->psNext, ppsThisItem));
++ PVR_ASSERT(psCurItem->ppsThis == ppsThisItem);
++ }
++
++
++ ppsThisItem = &psCurItem->psNext;
++ psCurItem = psCurItem->psNext;
++ }
++
++
++ ppsThisContext = &psCurContext->psNext;
++ psCurContext = psCurContext->psNext;
++ }
++}
++#endif
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/.gitignore b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/.gitignore
+new file mode 100644
+index 0000000..2f89523
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/.gitignore
+@@ -0,0 +1,5 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++*.o
++*.o.cmd
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/mmu.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/mmu.c
+new file mode 100644
+index 0000000..7408661
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/mmu.c
+@@ -0,0 +1,2776 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "hash.h"
++#include "ra.h"
++#include "pdump_km.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "mmu.h"
++#include "sgxconfig.h"
++
++#define UINT32_MAX_VALUE 0xFFFFFFFFUL
++
++#define SGX_MAX_PD_ENTRIES (1<<(SGX_FEATURE_ADDRESS_SPACE_SIZE - SGX_MMU_PT_SHIFT - SGX_MMU_PAGE_SHIFT))
++
++typedef struct _MMU_PT_INFO_
++{
++
++ IMG_VOID *hPTPageOSMemHandle;
++ IMG_CPU_VIRTADDR PTPageCpuVAddr;
++ IMG_UINT32 ui32ValidPTECount;
++} MMU_PT_INFO;
++
++struct _MMU_CONTEXT_
++{
++
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++
++ IMG_CPU_VIRTADDR pvPDCpuVAddr;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++
++ IMG_VOID *hPDOSMemHandle;
++
++
++ MMU_PT_INFO *apsPTInfoList[SGX_MAX_PD_ENTRIES];
++
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++
++#if defined(PDUMP)
++ IMG_UINT32 ui32PDumpMMUContextID;
++#endif
++
++ struct _MMU_CONTEXT_ *psNext;
++};
++
++struct _MMU_HEAP_
++{
++
++ MMU_CONTEXT *psMMUContext;
++
++
++
++
++ IMG_UINT32 ui32PDBaseIndex;
++
++ IMG_UINT32 ui32PageTableCount;
++
++ IMG_UINT32 ui32PTETotal;
++
++ IMG_UINT32 ui32PDEPageSizeCtrl;
++
++
++
++
++ IMG_UINT32 ui32DataPageSize;
++
++ IMG_UINT32 ui32DataPageBitWidth;
++
++ IMG_UINT32 ui32DataPageMask;
++
++
++
++
++ IMG_UINT32 ui32PTShift;
++
++ IMG_UINT32 ui32PTBitWidth;
++
++ IMG_UINT32 ui32PTMask;
++
++ IMG_UINT32 ui32PTSize;
++
++ IMG_UINT32 ui32PTECount;
++
++
++
++
++ IMG_UINT32 ui32PDShift;
++
++ IMG_UINT32 ui32PDBitWidth;
++
++ IMG_UINT32 ui32PDMask;
++
++
++
++ RA_ARENA *psVMArena;
++ DEV_ARENA_DESCRIPTOR *psDevArena;
++};
++
++
++
++#if defined (SUPPORT_SGX_MMU_DUMMY_PAGE)
++#define DUMMY_DATA_PAGE_SIGNATURE 0xDEADBEEF
++#endif
++
++#if defined(PDUMP)
++static IMG_VOID
++MMU_PDumpPageTables (MMU_HEAP *pMMUHeap,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_BOOL bForUnmap,
++ IMG_HANDLE hUniqueTag);
++#endif
++
++#define PAGE_TEST 0
++#if PAGE_TEST
++static IMG_VOID PageTest(IMG_VOID* pMem, IMG_DEV_PHYADDR sDevPAddr);
++#endif
++
++#define PT_DEBUG 0
++#if PT_DEBUG
++static IMG_VOID DumpPT(MMU_PT_INFO *psPTInfoList)
++{
++ IMG_UINT32 *p = (IMG_UINT32*)psPTInfoList->PTPageCpuVAddr;
++ IMG_UINT32 i;
++
++
++ for(i = 0; i < 1024; i += 8)
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "%.8lx %.8lx %.8lx %.8lx %.8lx %.8lx %.8lx %.8lx\n",
++ p[i + 0], p[i + 1], p[i + 2], p[i + 3],
++ p[i + 4], p[i + 5], p[i + 6], p[i + 7]));
++ }
++}
++
++static IMG_VOID CheckPT(MMU_PT_INFO *psPTInfoList)
++{
++ IMG_UINT32 *p = (IMG_UINT32*) psPTInfoList->PTPageCpuVAddr;
++ IMG_UINT32 i, ui32Count = 0;
++
++
++ for(i = 0; i < 1024; i++)
++ if(p[i] & SGX_MMU_PTE_VALID)
++ ui32Count++;
++
++ if(psPTInfoList->ui32ValidPTECount != ui32Count)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "ui32ValidPTECount: %lu ui32Count: %lu\n",
++ psPTInfoList->ui32ValidPTECount, ui32Count));
++ DumpPT(psPTInfoList);
++ BUG();
++ }
++}
++#else
++static INLINE IMG_VOID DumpPT(MMU_PT_INFO *psPTInfoList)
++{
++ PVR_UNREFERENCED_PARAMETER(psPTInfoList);
++}
++
++static INLINE IMG_VOID CheckPT(MMU_PT_INFO *psPTInfoList)
++{
++ PVR_UNREFERENCED_PARAMETER(psPTInfoList);
++}
++#endif
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++IMG_VOID
++EnableHostAccess (MMU_CONTEXT *psMMUContext)
++{
++ IMG_UINT32 ui32RegVal;
++ IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM;
++
++
++
++
++ ui32RegVal = OSReadHWReg(pvRegsBaseKM, EUR_CR_BIF_CTRL);
++
++ OSWriteHWReg(pvRegsBaseKM,
++ EUR_CR_BIF_CTRL,
++ ui32RegVal | EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
++
++ PDUMPREG(EUR_CR_BIF_CTRL, EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
++}
++
++IMG_VOID
++DisableHostAccess (MMU_CONTEXT *psMMUContext)
++{
++ IMG_UINT32 ui32RegVal;
++ IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM;
++
++
++
++
++
++ OSWriteHWReg(pvRegsBaseKM,
++ EUR_CR_BIF_CTRL,
++ ui32RegVal & ~EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
++
++ PDUMPREG(EUR_CR_BIF_CTRL, 0);
++}
++#endif
++
++
++IMG_VOID MMU_InvalidateSystemLevelCache(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ #if defined(SGX_FEATURE_MP)
++ psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_SLCACHE;
++ #else
++
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++ #endif
++}
++
++
++IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PDCACHE;
++ #if defined(SGX_FEATURE_SYSTEM_CACHE)
++ MMU_InvalidateSystemLevelCache(psDevInfo);
++ #endif
++}
++
++
++IMG_VOID MMU_InvalidatePageTableCache(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PTCACHE;
++ #if defined(SGX_FEATURE_SYSTEM_CACHE)
++ MMU_InvalidateSystemLevelCache(psDevInfo);
++ #endif
++}
++
++
++static IMG_BOOL
++_AllocPageTableMemory (MMU_HEAP *pMMUHeap,
++ MMU_PT_INFO *psPTInfoList,
++ IMG_DEV_PHYADDR *psDevPAddr)
++{
++ IMG_DEV_PHYADDR sDevPAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++
++
++
++ if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL)
++ {
++
++ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ pMMUHeap->ui32PTSize,
++ SGX_MMU_PAGE_SIZE,
++ (IMG_VOID **)&psPTInfoList->PTPageCpuVAddr,
++ &psPTInfoList->hPTPageOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR call to OSAllocPages failed"));
++ return IMG_FALSE;
++ }
++
++
++ if(psPTInfoList->PTPageCpuVAddr)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(psPTInfoList->PTPageCpuVAddr);
++ }
++ else
++ {
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(psPTInfoList->hPTPageOSMemHandle, 0);
++ }
++
++ sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++ }
++ else
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++
++
++
++
++
++ if(RA_Alloc(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(sSysPAddr.uiAddr))!= IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR call to RA_Alloc failed"));
++ return IMG_FALSE;
++ }
++
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++
++ psPTInfoList->PTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &psPTInfoList->hPTPageOSMemHandle);
++ if(!psPTInfoList->PTPageCpuVAddr)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_AllocPageTableMemory: ERROR failed to map page tables"));
++ return IMG_FALSE;
++ }
++
++
++ sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++ #if PAGE_TEST
++ PageTest(psPTInfoList->PTPageCpuVAddr, sDevPAddr);
++ #endif
++ }
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++ {
++ IMG_UINT32 *pui32Tmp;
++ IMG_UINT32 i;
++
++ pui32Tmp = (IMG_UINT32*)psPTInfoList->PTPageCpuVAddr;
++
++ for(i=0; i<pMMUHeap->ui32PTECount; i++)
++ {
++ pui32Tmp[i] = (pMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++ }
++ }
++#else
++
++ OSMemSet(psPTInfoList->PTPageCpuVAddr, 0, pMMUHeap->ui32PTSize);
++#endif
++
++
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psPTInfoList->PTPageCpuVAddr, pMMUHeap->ui32PTSize, PDUMP_PT_UNIQUETAG);
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, psPTInfoList->PTPageCpuVAddr, pMMUHeap->ui32PTSize, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++
++ *psDevPAddr = sDevPAddr;
++
++ return IMG_TRUE;
++}
++
++
++static IMG_VOID
++_FreePageTableMemory (MMU_HEAP *pMMUHeap, MMU_PT_INFO *psPTInfoList)
++{
++
++
++
++
++ if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL)
++ {
++
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ pMMUHeap->ui32PTSize,
++ psPTInfoList->PTPageCpuVAddr,
++ psPTInfoList->hPTPageOSMemHandle);
++ }
++ else
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++
++ sCpuPAddr = OSMapLinToCPUPhys(psPTInfoList->PTPageCpuVAddr);
++ sSysPAddr = SysCpuPAddrToSysPAddr (sCpuPAddr);
++
++
++
++ OSUnMapPhysToLin(psPTInfoList->PTPageCpuVAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psPTInfoList->hPTPageOSMemHandle);
++
++
++
++
++ RA_Free (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++ }
++}
++
++
++
++static IMG_VOID
++_DeferredFreePageTable (MMU_HEAP *pMMUHeap, IMG_UINT32 ui32PTIndex, IMG_BOOL bOSFreePT)
++{
++ IMG_UINT32 *pui32PDEntry;
++ IMG_UINT32 i;
++ IMG_UINT32 ui32PDIndex;
++ SYS_DATA *psSysData;
++ MMU_PT_INFO **ppsPTInfoList;
++
++ SysAcquireData(&psSysData);
++
++
++ ui32PDIndex = pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++ {
++#if PT_DEBUG
++ if(ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount > 0)
++ {
++ DumpPT(ppsPTInfoList[ui32PTIndex]);
++
++ }
++#endif
++
++
++ PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == IMG_NULL || ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount == 0);
++ }
++
++
++ PDUMPCOMMENT("Free page table (page count == %08X)", pMMUHeap->ui32PageTableCount);
++ if(ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr)
++ {
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr, pMMUHeap->ui32PTSize, PDUMP_PT_UNIQUETAG);
++ }
++
++ switch(pMMUHeap->psDevArena->DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_SHARED :
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
++ {
++
++ MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
++
++ while(psMMUContext)
++ {
++
++ pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr;
++ pui32PDEntry += ui32PDIndex;
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ pui32PDEntry[ui32PTIndex] = (psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr
++ >>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_PAGE_SIZE_4K
++ | SGX_MMU_PDE_VALID;
++#else
++
++ if(bOSFreePT)
++ {
++ pui32PDEntry[ui32PTIndex] = 0;
++ }
++#endif
++
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++
++ psMMUContext = psMMUContext->psNext;
++ }
++ break;
++ }
++ case DEVICE_MEMORY_HEAP_PERCONTEXT :
++ case DEVICE_MEMORY_HEAP_KERNEL :
++ {
++
++ pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr;
++ pui32PDEntry += ui32PDIndex;
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ pui32PDEntry[ui32PTIndex] = (pMMUHeap->psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr
++ >>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_PAGE_SIZE_4K
++ | SGX_MMU_PDE_VALID;
++#else
++
++ if(bOSFreePT)
++ {
++ pui32PDEntry[ui32PTIndex] = 0;
++ }
++#endif
++
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_DeferredFreePagetable: ERROR invalid heap type"));
++ return;
++ }
++ }
++
++
++ if(ppsPTInfoList[ui32PTIndex] != IMG_NULL)
++ {
++ if(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != IMG_NULL)
++ {
++ IMG_PUINT32 pui32Tmp;
++
++ pui32Tmp = (IMG_UINT32*)ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr;
++
++
++ for(i=0;
++ (i<pMMUHeap->ui32PTETotal) && (i<pMMUHeap->ui32PTECount);
++ i++)
++ {
++ pui32Tmp[i] = 0;
++ }
++
++
++
++ if(bOSFreePT)
++ {
++ _FreePageTableMemory(pMMUHeap, ppsPTInfoList[ui32PTIndex]);
++ }
++
++
++
++
++ pMMUHeap->ui32PTETotal -= i;
++ }
++ else
++ {
++
++ pMMUHeap->ui32PTETotal -= pMMUHeap->ui32PTECount;
++ }
++
++ if(bOSFreePT)
++ {
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(MMU_PT_INFO),
++ ppsPTInfoList[ui32PTIndex],
++ IMG_NULL);
++ ppsPTInfoList[ui32PTIndex] = IMG_NULL;
++ }
++ }
++ else
++ {
++
++ pMMUHeap->ui32PTETotal -= pMMUHeap->ui32PTECount;
++ }
++
++ PDUMPCOMMENT("Finished free page table (page count == %08X)", pMMUHeap->ui32PageTableCount);
++}
++
++static IMG_VOID
++_DeferredFreePageTables (MMU_HEAP *pMMUHeap)
++{
++ IMG_UINT32 i;
++
++ for(i=0; i<pMMUHeap->ui32PageTableCount; i++)
++ {
++ _DeferredFreePageTable(pMMUHeap, i, IMG_TRUE);
++ }
++ MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
++}
++
++
++static IMG_BOOL
++_DeferredAllocPagetables(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size)
++{
++ IMG_UINT32 ui32PageTableCount;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 i;
++ IMG_UINT32 *pui32PDEntry;
++ MMU_PT_INFO **ppsPTInfoList;
++ SYS_DATA *psSysData;
++ IMG_DEV_VIRTADDR sHighDevVAddr;
++
++
++#if SGX_FEATURE_ADDRESS_SPACE_SIZE < 32
++ PVR_ASSERT(DevVAddr.uiAddr < (1<<SGX_FEATURE_ADDRESS_SPACE_SIZE));
++#endif
++
++
++ SysAcquireData(&psSysData);
++
++
++ ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++
++
++ if((UINT32_MAX_VALUE - DevVAddr.uiAddr)
++ < (ui32Size + pMMUHeap->ui32DataPageMask + pMMUHeap->ui32PTMask))
++ {
++
++ sHighDevVAddr.uiAddr = UINT32_MAX_VALUE;
++ }
++ else
++ {
++ sHighDevVAddr.uiAddr = DevVAddr.uiAddr
++ + ui32Size
++ + pMMUHeap->ui32DataPageMask
++ + pMMUHeap->ui32PTMask;
++ }
++
++ ui32PageTableCount = sHighDevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++ ui32PageTableCount -= ui32PDIndex;
++
++
++ pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr;
++ pui32PDEntry += ui32PDIndex;
++
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++ PDUMPCOMMENT("Alloc page table (page count == %08X)", ui32PageTableCount);
++ PDUMPCOMMENT("Page directory mods (page count == %08X)", ui32PageTableCount);
++
++
++ for(i=0; i<ui32PageTableCount; i++)
++ {
++ if(ppsPTInfoList[i] == IMG_NULL)
++ {
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (MMU_PT_INFO),
++ (IMG_VOID **)&ppsPTInfoList[i], IMG_NULL,
++ "MMU Page Table Info");
++ if (ppsPTInfoList[i] == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to OSAllocMem failed"));
++ return IMG_FALSE;
++ }
++ OSMemSet (ppsPTInfoList[i], 0, sizeof(MMU_PT_INFO));
++ }
++
++ if(ppsPTInfoList[i]->hPTPageOSMemHandle == IMG_NULL
++ && ppsPTInfoList[i]->PTPageCpuVAddr == IMG_NULL)
++ {
++ IMG_DEV_PHYADDR sDevPAddr;
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++ IMG_UINT32 *pui32Tmp;
++ IMG_UINT32 j;
++#else
++
++ PVR_ASSERT(pui32PDEntry[i] == 0);
++#endif
++
++ if(_AllocPageTableMemory (pMMUHeap, ppsPTInfoList[i], &sDevPAddr) != IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to _AllocPageTableMemory failed"));
++ return IMG_FALSE;
++ }
++
++ switch(pMMUHeap->psDevArena->DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_SHARED :
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
++ {
++
++ MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
++
++ while(psMMUContext)
++ {
++
++ pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr;
++ pui32PDEntry += ui32PDIndex;
++
++
++ pui32PDEntry[i] = (sDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | pMMUHeap->ui32PDEPageSizeCtrl
++ | SGX_MMU_PDE_VALID;
++
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++
++ psMMUContext = psMMUContext->psNext;
++ }
++ break;
++ }
++ case DEVICE_MEMORY_HEAP_PERCONTEXT :
++ case DEVICE_MEMORY_HEAP_KERNEL :
++ {
++
++ pui32PDEntry[i] = (sDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | pMMUHeap->ui32PDEPageSizeCtrl
++ | SGX_MMU_PDE_VALID;
++
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR invalid heap type"));
++ return IMG_FALSE;
++ }
++ }
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++
++
++
++
++ MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
++#endif
++ }
++ else
++ {
++
++ PVR_ASSERT(pui32PDEntry[i] != 0);
++ }
++ }
++
++ #if defined(SGX_FEATURE_SYSTEM_CACHE)
++ MMU_InvalidateSystemLevelCache(pMMUHeap->psMMUContext->psDevInfo);
++ #endif
++
++ return IMG_TRUE;
++}
++
++
++PVRSRV_ERROR
++MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr)
++{
++ IMG_UINT32 *pui32Tmp;
++ IMG_UINT32 i;
++ IMG_CPU_VIRTADDR pvPDCpuVAddr;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++ MMU_CONTEXT *psMMUContext;
++ IMG_HANDLE hPDOSMemHandle;
++ SYS_DATA *psSysData;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Initialise"));
++
++ SysAcquireData(&psSysData);
++
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (MMU_CONTEXT),
++ (IMG_VOID **)&psMMUContext, IMG_NULL,
++ "MMU Context");
++ if (psMMUContext == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocMem failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ OSMemSet (psMMUContext, 0, sizeof(MMU_CONTEXT));
++
++
++ psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++ psMMUContext->psDevInfo = psDevInfo;
++
++
++ psMMUContext->psDeviceNode = psDeviceNode;
++
++
++ if(psDeviceNode->psLocalDevMemArena == IMG_NULL)
++ {
++ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ &pvPDCpuVAddr,
++ &hPDOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if(pvPDCpuVAddr)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(pvPDCpuVAddr);
++ }
++ else
++ {
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0);
++ }
++ sPDDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++ #if PAGE_TEST
++ PageTest(pvPDCpuVAddr, sPDDevPAddr);
++ #endif
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ if(!psDevInfo->pvMMUContextList)
++ {
++
++ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ &psDevInfo->pvDummyPTPageCpuVAddr,
++ &psDevInfo->hDummyPTPageOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if(psDevInfo->pvDummyPTPageCpuVAddr)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->pvDummyPTPageCpuVAddr);
++ }
++ else
++ {
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyPTPageOSMemHandle, 0);
++ }
++ psDevInfo->sDummyPTDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++
++ if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ &psDevInfo->pvDummyDataPageCpuVAddr,
++ &psDevInfo->hDummyDataPageOSMemHandle) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if(psDevInfo->pvDummyDataPageCpuVAddr)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->pvDummyDataPageCpuVAddr);
++ }
++ else
++ {
++ sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyDataPageOSMemHandle, 0);
++ }
++ psDevInfo->sDummyDataDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++ }
++#endif
++ }
++ else
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++
++
++ if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(sSysPAddr.uiAddr))!= IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ sPDDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
++ pvPDCpuVAddr = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &hPDOSMemHandle);
++ if(!pvPDCpuVAddr)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ #if PAGE_TEST
++ PageTest(pvPDCpuVAddr, sPDDevPAddr);
++ #endif
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ if(!psDevInfo->pvMMUContextList)
++ {
++
++ if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(sSysPAddr.uiAddr))!= IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ psDevInfo->sDummyPTDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
++ psDevInfo->pvDummyPTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &psDevInfo->hDummyPTPageOSMemHandle);
++ if(!psDevInfo->pvDummyPTPageCpuVAddr)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(sSysPAddr.uiAddr))!= IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ psDevInfo->sDummyDataDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
++ psDevInfo->pvDummyDataPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &psDevInfo->hDummyDataPageOSMemHandle);
++ if(!psDevInfo->pvDummyDataPageCpuVAddr)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++#endif
++ }
++
++
++ PDUMPCOMMENT("Alloc page directory");
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ EnableHostAccess(psMMUContext);
++#endif
++
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PD_UNIQUETAG);
++
++ if (pvPDCpuVAddr)
++ {
++ pui32Tmp = (IMG_UINT32 *)pvPDCpuVAddr;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: pvPDCpuVAddr invalid"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ for(i=0; i<SGX_MMU_PD_SIZE; i++)
++ {
++ pui32Tmp[i] = (psDevInfo->sDummyPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_PAGE_SIZE_4K
++ | SGX_MMU_PDE_VALID;
++ }
++
++ if(!psDevInfo->pvMMUContextList)
++ {
++
++
++
++ pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyPTPageCpuVAddr;
++ for(i=0; i<SGX_MMU_PT_SIZE; i++)
++ {
++ pui32Tmp[i] = (psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++ }
++
++ PDUMPCOMMENT("Dummy Page table contents");
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++
++
++ pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyDataPageCpuVAddr;
++ for(i=0; i<(SGX_MMU_PAGE_SIZE/4); i++)
++ {
++ pui32Tmp[i] = DUMMY_DATA_PAGE_SIGNATURE;
++ }
++
++ PDUMPCOMMENT("Dummy Data Page contents");
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ }
++#else
++
++ for(i=0; i<SGX_MMU_PD_SIZE; i++)
++ {
++
++ pui32Tmp[i] = 0;
++ }
++#endif
++
++
++ PDUMPCOMMENT("Page directory contents");
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++
++#if defined(PDUMP)
++ if(PDumpSetMMUContext(PVRSRV_DEVICE_TYPE_SGX,
++ "SGXMEM",
++ &psMMUContext->ui32PDumpMMUContextID,
++ 2,
++ PDUMP_PT_UNIQUETAG,
++ pvPDCpuVAddr) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to PDumpSetMMUContext failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++#endif
++
++
++ psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr;
++ psMMUContext->sPDDevPAddr = sPDDevPAddr;
++ psMMUContext->hPDOSMemHandle = hPDOSMemHandle;
++
++
++ *ppsMMUContext = psMMUContext;
++
++
++ *psPDDevPAddr = sPDDevPAddr;
++
++
++ psMMUContext->psNext = (MMU_CONTEXT*)psDevInfo->pvMMUContextList;
++ psDevInfo->pvMMUContextList = (IMG_VOID*)psMMUContext;
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ DisableHostAccess(psMMUContext);
++#endif
++
++ return PVRSRV_OK;
++}
++
++IMG_VOID
++MMU_Finalise (MMU_CONTEXT *psMMUContext)
++{
++ IMG_UINT32 *pui32Tmp, i;
++ SYS_DATA *psSysData;
++ MMU_CONTEXT **ppsMMUContext;
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psMMUContext->psDevInfo;
++ MMU_CONTEXT *psMMUContextList = (MMU_CONTEXT*)psDevInfo->pvMMUContextList;
++#endif
++
++ SysAcquireData(&psSysData);
++
++
++ PDUMPCLEARMMUCONTEXT(PVRSRV_DEVICE_TYPE_SGX, "SGXMEM", psMMUContext->ui32PDumpMMUContextID, 2);
++
++
++ PDUMPCOMMENT("Free page directory");
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psMMUContext->pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++#endif
++
++ pui32Tmp = (IMG_UINT32 *)psMMUContext->pvPDCpuVAddr;
++
++
++ for(i=0; i<SGX_MMU_PD_SIZE; i++)
++ {
++
++ pui32Tmp[i] = 0;
++ }
++
++
++
++
++
++ if(psMMUContext->psDeviceNode->psLocalDevMemArena == IMG_NULL)
++ {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ psMMUContext->pvPDCpuVAddr,
++ psMMUContext->hPDOSMemHandle);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ if(!psMMUContextList->psNext)
++ {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ psDevInfo->pvDummyPTPageCpuVAddr,
++ psDevInfo->hDummyPTPageOSMemHandle);
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ psDevInfo->pvDummyDataPageCpuVAddr,
++ psDevInfo->hDummyDataPageOSMemHandle);
++ }
++#endif
++ }
++ else
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++
++ sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->pvPDCpuVAddr);
++ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++
++
++ OSUnMapPhysToLin(psMMUContext->pvPDCpuVAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psMMUContext->hPDOSMemHandle);
++
++ RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ if(!psMMUContextList->psNext)
++ {
++
++ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->pvDummyPTPageCpuVAddr);
++ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++
++
++ OSUnMapPhysToLin(psDevInfo->pvDummyPTPageCpuVAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->hDummyPTPageOSMemHandle);
++
++ RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++
++
++ sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->pvDummyDataPageCpuVAddr);
++ sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++
++
++ OSUnMapPhysToLin(psDevInfo->pvDummyDataPageCpuVAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->hDummyDataPageOSMemHandle);
++
++ RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++ }
++#endif
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Finalise"));
++
++
++ ppsMMUContext = (MMU_CONTEXT**)&psMMUContext->psDevInfo->pvMMUContextList;
++ while(*ppsMMUContext)
++ {
++ if(*ppsMMUContext == psMMUContext)
++ {
++
++ *ppsMMUContext = psMMUContext->psNext;
++ break;
++ }
++
++
++ ppsMMUContext = &((*ppsMMUContext)->psNext);
++ }
++
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_CONTEXT), psMMUContext, IMG_NULL);
++
++}
++
++
++IMG_VOID
++MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap)
++{
++ IMG_UINT32 *pui32PDCpuVAddr = (IMG_UINT32 *) psMMUContext->pvPDCpuVAddr;
++ IMG_UINT32 *pui32KernelPDCpuVAddr = (IMG_UINT32 *) psMMUHeap->psMMUContext->pvPDCpuVAddr;
++ IMG_UINT32 ui32PDEntry;
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
++#endif
++
++
++ pui32PDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
++ pui32KernelPDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
++
++
++
++
++ PDUMPCOMMENT("Page directory shared heap range copy");
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ EnableHostAccess(psMMUContext);
++#endif
++
++ for (ui32PDEntry = 0; ui32PDEntry < psMMUHeap->ui32PageTableCount; ui32PDEntry++)
++ {
++#if !defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ PVR_ASSERT(pui32PDCpuVAddr[ui32PDEntry] == 0);
++#endif
++
++
++ pui32PDCpuVAddr[ui32PDEntry] = pui32KernelPDCpuVAddr[ui32PDEntry];
++ if (pui32PDCpuVAddr[ui32PDEntry])
++ {
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID *) &pui32PDCpuVAddr[ui32PDEntry], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ bInvalidateDirectoryCache = IMG_TRUE;
++#endif
++ }
++ }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ DisableHostAccess(psMMUContext);
++#endif
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ if (bInvalidateDirectoryCache)
++ {
++
++
++
++
++ MMU_InvalidateDirectoryCache(psMMUContext->psDevInfo);
++ }
++#endif
++}
++
++
++static IMG_VOID
++MMU_UnmapPagesAndFreePTs (MMU_HEAP *psMMUHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_UINT32 ui32PageCount,
++ IMG_HANDLE hUniqueTag)
++{
++ IMG_DEV_VIRTADDR sTmpDevVAddr;
++ IMG_UINT32 i;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 ui32PTIndex;
++ IMG_UINT32 *pui32Tmp;
++ IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
++
++#if !defined (PDUMP)
++ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++ sTmpDevVAddr = sDevVAddr;
++
++ for(i=0; i<ui32PageCount; i++)
++ {
++ MMU_PT_INFO **ppsPTInfoList;
++
++
++ ui32PDIndex = sTmpDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
++
++
++ ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++ {
++
++ ui32PTIndex = (sTmpDevVAddr.uiAddr & psMMUHeap->ui32PTMask) >> psMMUHeap->ui32PTShift;
++
++
++ if (!ppsPTInfoList[0])
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Invalid PT for alloc at VAddr:0x%08lX (VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
++
++
++ sTmpDevVAddr.uiAddr += psMMUHeap->ui32DataPageSize;
++
++
++ continue;
++ }
++
++
++ pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++
++ if (!pui32Tmp)
++ {
++ continue;
++ }
++
++ CheckPT(ppsPTInfoList[0]);
++
++
++ if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
++ {
++ ppsPTInfoList[0]->ui32ValidPTECount--;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Page is already invalid for alloc at VAddr:0x%08lX (VAddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
++ }
++
++
++ PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ pui32Tmp[ui32PTIndex] = (psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++#else
++
++ pui32Tmp[ui32PTIndex] = 0;
++#endif
++
++ CheckPT(ppsPTInfoList[0]);
++ }
++
++
++
++ if (ppsPTInfoList[0] && ppsPTInfoList[0]->ui32ValidPTECount == 0)
++ {
++ _DeferredFreePageTable(psMMUHeap, ui32PDIndex - psMMUHeap->ui32PDBaseIndex, IMG_TRUE);
++ bInvalidateDirectoryCache = IMG_TRUE;
++ }
++
++
++ sTmpDevVAddr.uiAddr += psMMUHeap->ui32DataPageSize;
++ }
++
++ if(bInvalidateDirectoryCache)
++ {
++ MMU_InvalidateDirectoryCache(psMMUHeap->psMMUContext->psDevInfo);
++ }
++ else
++ {
++ MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
++ }
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables(psMMUHeap,
++ sDevVAddr,
++ psMMUHeap->ui32DataPageSize * ui32PageCount,
++ IMG_TRUE,
++ hUniqueTag);
++#endif
++}
++
++
++IMG_VOID MMU_FreePageTables(IMG_PVOID pvMMUHeap,
++ IMG_SIZE_T ui32Start,
++ IMG_SIZE_T ui32End,
++ IMG_HANDLE hUniqueTag)
++{
++ MMU_HEAP *pMMUHeap = (MMU_HEAP*)pvMMUHeap;
++ IMG_DEV_VIRTADDR Start;
++
++ Start.uiAddr = ui32Start;
++
++ MMU_UnmapPagesAndFreePTs(pMMUHeap, Start, (ui32End - ui32Start) >> pMMUHeap->ui32PTShift, hUniqueTag);
++}
++
++MMU_HEAP *
++MMU_Create (MMU_CONTEXT *psMMUContext,
++ DEV_ARENA_DESCRIPTOR *psDevArena,
++ RA_ARENA **ppsVMArena)
++{
++ MMU_HEAP *pMMUHeap;
++ IMG_UINT32 ui32ScaleSize;
++
++ PVR_ASSERT (psDevArena != IMG_NULL);
++
++ if (psDevArena == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid parameter"));
++ return IMG_NULL;
++ }
++
++ OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof (MMU_HEAP),
++ (IMG_VOID **)&pMMUHeap, IMG_NULL,
++ "MMU Heap");
++ if (pMMUHeap == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to OSAllocMem failed"));
++ return IMG_NULL;
++ }
++
++ pMMUHeap->psMMUContext = psMMUContext;
++ pMMUHeap->psDevArena = psDevArena;
++
++
++
++
++ switch(pMMUHeap->psDevArena->ui32DataPageSize)
++ {
++ case 0x1000:
++ ui32ScaleSize = 0;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_4K;
++ break;
++#if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE)
++ case 0x4000:
++ ui32ScaleSize = 2;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_16K;
++ break;
++ case 0x10000:
++ ui32ScaleSize = 4;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_64K;
++ break;
++ case 0x40000:
++ ui32ScaleSize = 6;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_256K;
++ break;
++ case 0x100000:
++ ui32ScaleSize = 8;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_1M;
++ break;
++ case 0x400000:
++ ui32ScaleSize = 10;
++ pMMUHeap->ui32PDEPageSizeCtrl = SGX_MMU_PDE_PAGE_SIZE_4M;
++ break;
++#endif
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: invalid data page size"));
++ goto ErrorFreeHeap;
++ }
++
++
++ pMMUHeap->ui32DataPageSize = psDevArena->ui32DataPageSize;
++ pMMUHeap->ui32DataPageBitWidth = SGX_MMU_PAGE_SHIFT + ui32ScaleSize;
++ pMMUHeap->ui32DataPageMask = pMMUHeap->ui32DataPageSize - 1;
++
++ pMMUHeap->ui32PTShift = pMMUHeap->ui32DataPageBitWidth;
++ pMMUHeap->ui32PTBitWidth = SGX_MMU_PT_SHIFT - ui32ScaleSize;
++ pMMUHeap->ui32PTMask = SGX_MMU_PT_MASK & (SGX_MMU_PT_MASK<<ui32ScaleSize);
++ pMMUHeap->ui32PTSize = (1UL<<pMMUHeap->ui32PTBitWidth) * sizeof(IMG_UINT32);
++
++ if(pMMUHeap->ui32PTSize < 4 * sizeof(IMG_UINT32))
++ {
++ pMMUHeap->ui32PTSize = 4 * sizeof(IMG_UINT32);
++ }
++ pMMUHeap->ui32PTECount = pMMUHeap->ui32PTSize >> 2;
++
++
++ pMMUHeap->ui32PDShift = pMMUHeap->ui32PTBitWidth + pMMUHeap->ui32PTShift;
++ pMMUHeap->ui32PDBitWidth = SGX_FEATURE_ADDRESS_SPACE_SIZE - pMMUHeap->ui32PTBitWidth - pMMUHeap->ui32DataPageBitWidth;
++ pMMUHeap->ui32PDMask = SGX_MMU_PD_MASK & (SGX_MMU_PD_MASK>>(32-SGX_FEATURE_ADDRESS_SPACE_SIZE));
++
++
++
++
++
++ if(psDevArena->BaseDevVAddr.uiAddr > (pMMUHeap->ui32DataPageMask | pMMUHeap->ui32PTMask))
++ {
++
++
++
++ PVR_ASSERT ((psDevArena->BaseDevVAddr.uiAddr
++ & (pMMUHeap->ui32DataPageMask
++ | pMMUHeap->ui32PTMask)) == 0);
++ }
++
++
++ pMMUHeap->ui32PTETotal = pMMUHeap->psDevArena->ui32Size >> pMMUHeap->ui32PTShift;
++
++
++ pMMUHeap->ui32PDBaseIndex = (pMMUHeap->psDevArena->BaseDevVAddr.uiAddr & pMMUHeap->ui32PDMask) >> pMMUHeap->ui32PDShift;
++
++
++
++
++ pMMUHeap->ui32PageTableCount = (pMMUHeap->ui32PTETotal + pMMUHeap->ui32PTECount - 1)
++ >> pMMUHeap->ui32PTBitWidth;
++
++
++ pMMUHeap->psVMArena = RA_Create(psDevArena->pszName,
++ psDevArena->BaseDevVAddr.uiAddr,
++ psDevArena->ui32Size,
++ IMG_NULL,
++ pMMUHeap->ui32DataPageSize,
++ IMG_NULL,
++ IMG_NULL,
++ MMU_FreePageTables,
++ pMMUHeap);
++
++ if (pMMUHeap->psVMArena == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to RA_Create failed"));
++ goto ErrorFreePagetables;
++ }
++
++#if 0
++
++ if(psDevArena->ui32HeapID == SGX_TILED_HEAP_ID)
++ {
++ IMG_UINT32 ui32RegVal;
++ IMG_UINT32 ui32XTileStride;
++
++
++
++
++
++
++ ui32XTileStride = 2;
++
++ ui32RegVal = (EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK
++ & ((psDevArena->BaseDevVAddr.uiAddr>>20)
++ << EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT))
++ |(EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK
++ & (((psDevArena->BaseDevVAddr.uiAddr+psDevArena->ui32Size)>>20)
++ << EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT))
++ |(EUR_CR_BIF_TILE0_CFG_MASK
++ & (((ui32XTileStride<<1)|8) << EUR_CR_BIF_TILE0_CFG_SHIFT));
++ PDUMPREG(EUR_CR_BIF_TILE0, ui32RegVal);
++ }
++#endif
++
++
++
++ *ppsVMArena = pMMUHeap->psVMArena;
++
++ return pMMUHeap;
++
++
++ErrorFreePagetables:
++ _DeferredFreePageTables (pMMUHeap);
++
++ErrorFreeHeap:
++ OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap, IMG_NULL);
++
++
++ return IMG_NULL;
++}
++
++IMG_VOID
++MMU_Delete (MMU_HEAP *pMMUHeap)
++{
++ if (pMMUHeap != IMG_NULL)
++ {
++ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Delete"));
++
++ if(pMMUHeap->psVMArena)
++ {
++ RA_Delete (pMMUHeap->psVMArena);
++ }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ EnableHostAccess(pMMUHeap->psMMUContext);
++#endif
++ _DeferredFreePageTables (pMMUHeap);
++#ifdef SUPPORT_SGX_MMU_BYPASS
++ DisableHostAccess(pMMUHeap->psMMUContext);
++#endif
++
++ OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_HEAP), pMMUHeap, IMG_NULL);
++
++ }
++}
++
++IMG_BOOL
++MMU_Alloc (MMU_HEAP *pMMUHeap,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uDevVAddrAlignment,
++ IMG_DEV_VIRTADDR *psDevVAddr)
++{
++ IMG_BOOL bStatus;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "MMU_Alloc: uSize=0x%x, flags=0x%x, align=0x%x",
++ uSize, uFlags, uDevVAddrAlignment));
++
++
++
++ if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
++ {
++ IMG_UINTPTR_T uiAddr;
++
++ bStatus = RA_Alloc (pMMUHeap->psVMArena,
++ uSize,
++ pActualSize,
++ IMG_NULL,
++ 0,
++ uDevVAddrAlignment,
++ 0,
++ &uiAddr);
++ if(!bStatus)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: RA_Alloc of VMArena failed"));
++ return bStatus;
++ }
++
++ psDevVAddr->uiAddr = IMG_CAST_TO_DEVVADDR_UINT(uiAddr);
++ }
++
++ #ifdef SUPPORT_SGX_MMU_BYPASS
++ EnableHostAccess(pMMUHeap->psMMUContext);
++ #endif
++
++
++ bStatus = _DeferredAllocPagetables(pMMUHeap, *psDevVAddr, uSize);
++
++ #ifdef SUPPORT_SGX_MMU_BYPASS
++ DisableHostAccess(pMMUHeap->psMMUContext);
++ #endif
++
++ if (!bStatus)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: _DeferredAllocPagetables failed"));
++ if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
++ {
++
++ RA_Free (pMMUHeap->psVMArena, psDevVAddr->uiAddr, IMG_FALSE);
++ }
++ }
++
++ return bStatus;
++}
++
++IMG_VOID
++MMU_Free (MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size)
++{
++ PVR_ASSERT (pMMUHeap != IMG_NULL);
++
++ if (pMMUHeap == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_Free: invalid parameter"));
++ return;
++ }
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "MMU_Free: mmu=%08X, dev_vaddr=%08X", pMMUHeap, DevVAddr.uiAddr));
++
++ if((DevVAddr.uiAddr >= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr) &&
++ (DevVAddr.uiAddr + ui32Size <= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr + pMMUHeap->psDevArena->ui32Size))
++ {
++ RA_Free (pMMUHeap->psVMArena, DevVAddr.uiAddr, IMG_TRUE);
++ return;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR,"MMU_Free: Couldn't find DevVAddr %08X in a DevArena",DevVAddr.uiAddr));
++}
++
++IMG_VOID
++MMU_Enable (MMU_HEAP *pMMUHeap)
++{
++ PVR_UNREFERENCED_PARAMETER(pMMUHeap);
++
++}
++
++IMG_VOID
++MMU_Disable (MMU_HEAP *pMMUHeap)
++{
++ PVR_UNREFERENCED_PARAMETER(pMMUHeap);
++
++}
++
++#if defined(PDUMP)
++static IMG_VOID
++MMU_PDumpPageTables (MMU_HEAP *pMMUHeap,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_BOOL bForUnmap,
++ IMG_HANDLE hUniqueTag)
++{
++ IMG_UINT32 ui32NumPTEntries;
++ IMG_UINT32 ui32PTIndex;
++ IMG_UINT32 *pui32PTEntry;
++
++ MMU_PT_INFO **ppsPTInfoList;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 ui32PTDumpCount;
++
++
++ ui32NumPTEntries = (uSize + pMMUHeap->ui32DataPageMask) >> pMMUHeap->ui32PTShift;
++
++
++ ui32PDIndex = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++
++ ui32PTIndex = (DevVAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
++
++
++ PDUMPCOMMENT("Page table mods (num entries == %08X) %s", ui32NumPTEntries, bForUnmap ? "(for unmap)" : "");
++
++
++ while(ui32NumPTEntries > 0)
++ {
++ MMU_PT_INFO* psPTInfo = *ppsPTInfoList++;
++
++ if(ui32NumPTEntries <= pMMUHeap->ui32PTECount - ui32PTIndex)
++ {
++ ui32PTDumpCount = ui32NumPTEntries;
++ }
++ else
++ {
++ ui32PTDumpCount = pMMUHeap->ui32PTECount - ui32PTIndex;
++ }
++
++ if (psPTInfo)
++ {
++ pui32PTEntry = (IMG_UINT32*)psPTInfo->PTPageCpuVAddr;
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID *) &pui32PTEntry[ui32PTIndex], ui32PTDumpCount * sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, hUniqueTag);
++ }
++
++
++ ui32NumPTEntries -= ui32PTDumpCount;
++
++
++ ui32PTIndex = 0;
++ }
++
++ PDUMPCOMMENT("Finished page table mods %s", bForUnmap ? "(for unmap)" : "");
++}
++#endif
++
++
++static IMG_VOID
++MMU_MapPage (MMU_HEAP *pMMUHeap,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_DEV_PHYADDR DevPAddr,
++ IMG_UINT32 ui32MemFlags)
++{
++ IMG_UINT32 ui32Index;
++ IMG_UINT32 *pui32Tmp;
++ IMG_UINT32 ui32MMUFlags = 0;
++ MMU_PT_INFO **ppsPTInfoList;
++
++
++ PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
++
++
++
++ if(((PVRSRV_MEM_READ|PVRSRV_MEM_WRITE) & ui32MemFlags) == (PVRSRV_MEM_READ|PVRSRV_MEM_WRITE))
++ {
++
++ ui32MMUFlags = 0;
++ }
++ else if(PVRSRV_MEM_READ & ui32MemFlags)
++ {
++
++ ui32MMUFlags |= SGX_MMU_PTE_READONLY;
++ }
++ else if(PVRSRV_MEM_WRITE & ui32MemFlags)
++ {
++
++ ui32MMUFlags |= SGX_MMU_PTE_WRITEONLY;
++ }
++
++
++ if(PVRSRV_MEM_CACHE_CONSISTENT & ui32MemFlags)
++ {
++ ui32MMUFlags |= SGX_MMU_PTE_CACHECONSISTENT;
++ }
++
++#if !defined(FIX_HW_BRN_25503)
++
++ if(PVRSRV_MEM_EDM_PROTECT & ui32MemFlags)
++ {
++ ui32MMUFlags |= SGX_MMU_PTE_EDMPROTECT;
++ }
++#endif
++
++
++
++
++
++ ui32Index = DevVAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
++
++ CheckPT(ppsPTInfoList[0]);
++
++
++ ui32Index = (DevVAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
++
++
++ pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++#if !defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ if (pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Page is already valid for alloc at VAddr:0x%08lX PDIdx:%u PTIdx:%u",
++ DevVAddr.uiAddr,
++ DevVAddr.uiAddr >> pMMUHeap->ui32PDShift,
++ ui32Index ));
++ PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Page table entry value: 0x%08lX", pui32Tmp[ui32Index]));
++ PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Physical page to map: 0x%08lX", DevPAddr.uiAddr));
++ }
++
++ PVR_ASSERT((pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID) == 0);
++#endif
++
++
++ ppsPTInfoList[0]->ui32ValidPTECount++;
++
++
++ pui32Tmp[ui32Index] = ((DevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ & ((~pMMUHeap->ui32DataPageMask)>>SGX_MMU_PTE_ADDR_ALIGNSHIFT))
++ | SGX_MMU_PTE_VALID
++ | ui32MMUFlags;
++
++ CheckPT(ppsPTInfoList[0]);
++}
++
++
++IMG_VOID
++MMU_MapScatter (MMU_HEAP *pMMUHeap,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SYS_PHYADDR *psSysAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag)
++{
++#if defined(PDUMP)
++ IMG_DEV_VIRTADDR MapBaseDevVAddr;
++#endif
++ IMG_UINT32 uCount, i;
++ IMG_DEV_PHYADDR DevPAddr;
++
++ PVR_ASSERT (pMMUHeap != IMG_NULL);
++
++#if defined(PDUMP)
++ MapBaseDevVAddr = DevVAddr;
++#else
++ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++ for (i=0, uCount=0; uCount<uSize; i++, uCount+=pMMUHeap->ui32DataPageSize)
++ {
++ IMG_SYS_PHYADDR sSysAddr;
++
++ sSysAddr = psSysAddr[i];
++
++
++
++ PVR_ASSERT((sSysAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
++
++ DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysAddr);
++
++ MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
++ DevVAddr.uiAddr += pMMUHeap->ui32DataPageSize;
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "MMU_MapScatter: devVAddr=%08X, SysAddr=%08X, size=0x%x/0x%x",
++ DevVAddr.uiAddr, sSysAddr.uiAddr, uCount, uSize));
++ }
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag);
++#endif
++}
++
++IMG_VOID
++MMU_MapPages (MMU_HEAP *pMMUHeap,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SYS_PHYADDR SysPAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag)
++{
++ IMG_DEV_PHYADDR DevPAddr;
++#if defined(PDUMP)
++ IMG_DEV_VIRTADDR MapBaseDevVAddr;
++#endif
++ IMG_UINT32 uCount;
++ IMG_UINT32 ui32VAdvance;
++ IMG_UINT32 ui32PAdvance;
++
++ PVR_ASSERT (pMMUHeap != IMG_NULL);
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "MMU_MapPages: mmu=%08X, devVAddr=%08X, SysPAddr=%08X, size=0x%x",
++ pMMUHeap, DevVAddr.uiAddr, SysPAddr.uiAddr, uSize));
++
++
++ ui32VAdvance = pMMUHeap->ui32DataPageSize;
++ ui32PAdvance = pMMUHeap->ui32DataPageSize;
++
++#if defined(PDUMP)
++ MapBaseDevVAddr = DevVAddr;
++#else
++ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++ DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr);
++
++
++ PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
++
++#if defined(FIX_HW_BRN_23281)
++ if(ui32MemFlags & PVRSRV_MEM_INTERLEAVED)
++ {
++ ui32VAdvance *= 2;
++ }
++#endif
++
++
++
++
++ if(ui32MemFlags & PVRSRV_MEM_DUMMY)
++ {
++ ui32PAdvance = 0;
++ }
++
++ for (uCount=0; uCount<uSize; uCount+=ui32VAdvance)
++ {
++ MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
++ DevVAddr.uiAddr += ui32VAdvance;
++ DevPAddr.uiAddr += ui32PAdvance;
++ }
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag);
++#endif
++}
++
++IMG_VOID
++MMU_MapShadow (MMU_HEAP *pMMUHeap,
++ IMG_DEV_VIRTADDR MapBaseDevVAddr,
++ IMG_SIZE_T uByteSize,
++ IMG_CPU_VIRTADDR CpuVAddr,
++ IMG_HANDLE hOSMemHandle,
++ IMG_DEV_VIRTADDR *pDevVAddr,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag)
++{
++ IMG_UINT32 i;
++ IMG_UINT32 uOffset = 0;
++ IMG_DEV_VIRTADDR MapDevVAddr;
++ IMG_UINT32 ui32VAdvance;
++ IMG_UINT32 ui32PAdvance;
++
++#if !defined (PDUMP)
++ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "MMU_MapShadow: %08X, 0x%x, %08X",
++ MapBaseDevVAddr.uiAddr,
++ uByteSize,
++ CpuVAddr));
++
++
++ ui32VAdvance = pMMUHeap->ui32DataPageSize;
++ ui32PAdvance = pMMUHeap->ui32DataPageSize;
++
++
++ PVR_ASSERT(((IMG_UINT32)CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++ PVR_ASSERT(((IMG_UINT32)uByteSize & pMMUHeap->ui32DataPageMask) == 0);
++ pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr;
++
++#if defined(FIX_HW_BRN_23281)
++ if(ui32MemFlags & PVRSRV_MEM_INTERLEAVED)
++ {
++ ui32VAdvance *= 2;
++ }
++#endif
++
++
++
++
++ if(ui32MemFlags & PVRSRV_MEM_DUMMY)
++ {
++ ui32PAdvance = 0;
++ }
++
++
++ MapDevVAddr = MapBaseDevVAddr;
++ for (i=0; i<uByteSize; i+=ui32VAdvance)
++ {
++ IMG_CPU_PHYADDR CpuPAddr;
++ IMG_DEV_PHYADDR DevPAddr;
++
++ if(CpuVAddr)
++ {
++ CpuPAddr = OSMapLinToCPUPhys ((IMG_VOID *)((IMG_UINT32)CpuVAddr + uOffset));
++ }
++ else
++ {
++ CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset);
++ }
++ DevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, CpuPAddr);
++
++
++ PVR_ASSERT((DevPAddr.uiAddr & pMMUHeap->ui32DataPageMask) == 0);
++
++ PVR_DPF ((PVR_DBG_MESSAGE,
++ "0x%x: CpuVAddr=%08X, CpuPAddr=%08X, DevVAddr=%08X, DevPAddr=%08X",
++ uOffset,
++ (IMG_UINTPTR_T)CpuVAddr + uOffset,
++ CpuPAddr.uiAddr,
++ MapDevVAddr.uiAddr,
++ DevPAddr.uiAddr));
++
++ MMU_MapPage (pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags);
++
++
++ MapDevVAddr.uiAddr += ui32VAdvance;
++ uOffset += ui32PAdvance;
++ }
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uByteSize, IMG_FALSE, hUniqueTag);
++#endif
++}
++
++
++IMG_VOID
++MMU_UnmapPages (MMU_HEAP *psMMUHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_UINT32 ui32PageCount,
++ IMG_HANDLE hUniqueTag)
++{
++ IMG_UINT32 uPageSize = psMMUHeap->ui32DataPageSize;
++ IMG_DEV_VIRTADDR sTmpDevVAddr;
++ IMG_UINT32 i;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 ui32PTIndex;
++ IMG_UINT32 *pui32Tmp;
++
++#if !defined (PDUMP)
++ PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++
++ sTmpDevVAddr = sDevVAddr;
++
++ for(i=0; i<ui32PageCount; i++)
++ {
++ MMU_PT_INFO **ppsPTInfoList;
++
++
++ ui32PDIndex = sTmpDevVAddr.uiAddr >> psMMUHeap->ui32PDShift;
++
++
++ ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++
++ ui32PTIndex = (sTmpDevVAddr.uiAddr & psMMUHeap->ui32PTMask) >> psMMUHeap->ui32PTShift;
++
++
++ if (!ppsPTInfoList[0])
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: ERROR Invalid PT for alloc at VAddr:0x%08lX (VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",
++ sTmpDevVAddr.uiAddr,
++ sDevVAddr.uiAddr,
++ i,
++ ui32PDIndex,
++ ui32PTIndex));
++
++
++ sTmpDevVAddr.uiAddr += uPageSize;
++
++
++ continue;
++ }
++
++ CheckPT(ppsPTInfoList[0]);
++
++
++ pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++
++ if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
++ {
++ ppsPTInfoList[0]->ui32ValidPTECount--;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Page is already invalid for alloc at VAddr:0x%08lX (VAddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",
++ sTmpDevVAddr.uiAddr,
++ sDevVAddr.uiAddr,
++ i,
++ ui32PDIndex,
++ ui32PTIndex));
++ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Page table entry value: 0x%08lX", pui32Tmp[ui32PTIndex]));
++ }
++
++
++ PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ pui32Tmp[ui32PTIndex] = (psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++#else
++
++ pui32Tmp[ui32PTIndex] = 0;
++#endif
++
++ CheckPT(ppsPTInfoList[0]);
++
++
++ sTmpDevVAddr.uiAddr += uPageSize;
++ }
++
++ MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
++
++#if defined(PDUMP)
++ MMU_PDumpPageTables (psMMUHeap, sDevVAddr, uPageSize*ui32PageCount, IMG_TRUE, hUniqueTag);
++#endif
++}
++
++
++IMG_DEV_PHYADDR
++MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr)
++{
++ IMG_UINT32 *pui32PageTable;
++ IMG_UINT32 ui32Index;
++ IMG_DEV_PHYADDR sDevPAddr;
++ MMU_PT_INFO **ppsPTInfoList;
++
++
++ ui32Index = sDevVPageAddr.uiAddr >> pMMUHeap->ui32PDShift;
++
++
++ ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
++ if (!ppsPTInfoList[0])
++ {
++ PVR_DPF((PVR_DBG_ERROR,"MMU_GetPhysPageAddr: Not mapped in at 0x%08x", sDevVPageAddr.uiAddr));
++ sDevPAddr.uiAddr = 0;
++ return sDevPAddr;
++ }
++
++
++ ui32Index = (sDevVPageAddr.uiAddr & pMMUHeap->ui32PTMask) >> pMMUHeap->ui32PTShift;
++
++
++ pui32PageTable = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++
++ sDevPAddr.uiAddr = pui32PageTable[ui32Index];
++
++
++ sDevPAddr.uiAddr &= ~(pMMUHeap->ui32DataPageMask>>SGX_MMU_PTE_ADDR_ALIGNSHIFT);
++
++
++ sDevPAddr.uiAddr <<= SGX_MMU_PTE_ADDR_ALIGNSHIFT;
++
++ return sDevPAddr;
++}
++
++
++IMG_DEV_PHYADDR MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext)
++{
++ return (pMMUContext->sPDDevPAddr);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetPhysPageAddrKM (IMG_HANDLE hDevMemHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_DEV_PHYADDR *pDevPAddr,
++ IMG_CPU_PHYADDR *pCpuPAddr)
++{
++ MMU_HEAP *pMMUHeap;
++ IMG_DEV_PHYADDR DevPAddr;
++
++
++
++ pMMUHeap = (MMU_HEAP*)BM_GetMMUHeap(hDevMemHeap);
++
++ DevPAddr = MMU_GetPhysPageAddr(pMMUHeap, sDevVAddr);
++ pCpuPAddr->uiAddr = DevPAddr.uiAddr;
++ pDevPAddr->uiAddr = DevPAddr.uiAddr;
++
++ return (pDevPAddr->uiAddr != 0) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_PARAMS;
++}
++
++
++PVRSRV_ERROR SGXGetMMUPDAddrKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ IMG_DEV_PHYADDR *psPDDevPAddr)
++{
++ if (!hDevCookie || !hDevMemContext || !psPDDevPAddr)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ *psPDDevPAddr = ((BM_CONTEXT*)hDevMemContext)->psMMUContext->sPDDevPAddr;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++ IMG_HANDLE hOSMemHandle = IMG_NULL;
++ IMG_BYTE *pui8MemBlock = IMG_NULL;
++ IMG_SYS_PHYADDR sMemBlockSysPAddr;
++ IMG_CPU_PHYADDR sMemBlockCpuPAddr;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++
++ if(psLocalDevMemArena == IMG_NULL)
++ {
++
++ eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ 3 * SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ (IMG_VOID **)&pui8MemBlock,
++ &hOSMemHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to OSAllocPages failed"));
++ return eError;
++ }
++
++
++ if(pui8MemBlock)
++ {
++ sMemBlockCpuPAddr = OSMapLinToCPUPhys(pui8MemBlock);
++ }
++ else
++ {
++
++ sMemBlockCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, 0);
++ }
++ }
++ else
++ {
++
++
++ if(RA_Alloc(psLocalDevMemArena,
++ 3 * SGX_MMU_PAGE_SIZE,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(sMemBlockSysPAddr.uiAddr)) != IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ sMemBlockCpuPAddr = SysSysPAddrToCpuPAddr(sMemBlockSysPAddr);
++ pui8MemBlock = OSMapPhysToLin(sMemBlockCpuPAddr,
++ SGX_MMU_PAGE_SIZE * 3,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &hOSMemHandle);
++ if(!pui8MemBlock)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ }
++
++ psDevInfo->hBIFResetPDOSMemHandle = hOSMemHandle;
++ psDevInfo->sBIFResetPDDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sMemBlockCpuPAddr);
++ psDevInfo->sBIFResetPTDevPAddr.uiAddr = psDevInfo->sBIFResetPDDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
++ psDevInfo->sBIFResetPageDevPAddr.uiAddr = psDevInfo->sBIFResetPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
++
++
++ psDevInfo->pui32BIFResetPD = (IMG_UINT32 *)pui8MemBlock;
++ psDevInfo->pui32BIFResetPT = (IMG_UINT32 *)(pui8MemBlock + SGX_MMU_PAGE_SIZE);
++
++
++ OSMemSet(psDevInfo->pui32BIFResetPD, 0, SGX_MMU_PAGE_SIZE);
++ OSMemSet(psDevInfo->pui32BIFResetPT, 0, SGX_MMU_PAGE_SIZE);
++
++ OSMemSet(pui8MemBlock + (2 * SGX_MMU_PAGE_SIZE), 0xDB, SGX_MMU_PAGE_SIZE);
++
++ return PVRSRV_OK;
++}
++
++IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++ IMG_SYS_PHYADDR sPDSysPAddr;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++
++ if(psLocalDevMemArena == IMG_NULL)
++ {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ 3 * SGX_MMU_PAGE_SIZE,
++ psDevInfo->pui32BIFResetPD,
++ psDevInfo->hBIFResetPDOSMemHandle);
++ }
++ else
++ {
++ OSUnMapPhysToLin(psDevInfo->pui32BIFResetPD,
++ 3 * SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->hBIFResetPDOSMemHandle);
++
++ sPDSysPAddr = SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->sBIFResetPDDevPAddr);
++ RA_Free(psLocalDevMemArena, sPDSysPAddr.uiAddr, IMG_FALSE);
++ }
++}
++
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++PVRSRV_ERROR WorkaroundBRN22997Alloc(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++ IMG_HANDLE hPTPageOSMemHandle = IMG_NULL;
++ IMG_HANDLE hPDPageOSMemHandle = IMG_NULL;
++ IMG_UINT32 *pui32PD = IMG_NULL;
++ IMG_UINT32 *pui32PT = IMG_NULL;
++ IMG_CPU_PHYADDR sCpuPAddr;
++ IMG_DEV_PHYADDR sPTDevPAddr;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++
++ if(psLocalDevMemArena == IMG_NULL)
++ {
++
++ eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ (IMG_VOID **)&pui32PT,
++ &hPTPageOSMemHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WorkaroundBRN22997: ERROR call to OSAllocPages failed"));
++ return eError;
++ }
++
++ eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ (IMG_VOID **)&pui32PD,
++ &hPDPageOSMemHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WorkaroundBRN22997: ERROR call to OSAllocPages failed"));
++ return eError;
++ }
++
++
++ if(pui32PT)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(pui32PT);
++ }
++ else
++ {
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hPTPageOSMemHandle, 0);
++ }
++ sPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++ if(pui32PD)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(pui32PD);
++ }
++ else
++ {
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hPDPageOSMemHandle, 0);
++ }
++ sPDDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++ }
++ else
++ {
++
++
++ if(RA_Alloc(psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE * 2,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(psDevInfo->sBRN22997SysPAddr.uiAddr))!= IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WorkaroundBRN22997: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(psDevInfo->sBRN22997SysPAddr);
++ pui32PT = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE * 2,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &hPTPageOSMemHandle);
++ if(!pui32PT)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "WorkaroundBRN22997: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++
++
++ sPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++ pui32PD = pui32PT + 1024;
++ sPDDevPAddr.uiAddr = sPTDevPAddr.uiAddr + 4096;
++ }
++
++ OSMemSet(pui32PD, 0, SGX_MMU_PAGE_SIZE);
++ OSMemSet(pui32PT, 0, SGX_MMU_PAGE_SIZE);
++
++
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, PDUMP_PD_UNIQUETAG);
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
++
++ psDevInfo->hBRN22997PTPageOSMemHandle = hPTPageOSMemHandle;
++ psDevInfo->hBRN22997PDPageOSMemHandle = hPDPageOSMemHandle;
++ psDevInfo->sBRN22997PTDevPAddr = sPTDevPAddr;
++ psDevInfo->sBRN22997PDDevPAddr = sPDDevPAddr;
++ psDevInfo->pui32BRN22997PD = pui32PD;
++ psDevInfo->pui32BRN22997PT = pui32PT;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_VOID WorkaroundBRN22997ReadHostPort(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ IMG_UINT32 *pui32PD = psDevInfo->pui32BRN22997PD;
++ IMG_UINT32 *pui32PT = psDevInfo->pui32BRN22997PT;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 ui32PTIndex;
++ IMG_DEV_VIRTADDR sDevVAddr;
++ volatile IMG_UINT32 *pui32HostPort;
++ IMG_UINT32 ui32BIFCtrl;
++
++
++
++
++ pui32HostPort = (volatile IMG_UINT32*)(((IMG_UINT8*)psDevInfo->pvHostPortBaseKM) + SYS_SGX_HOSTPORT_BRN23030_OFFSET);
++
++
++ sDevVAddr.uiAddr = SYS_SGX_HOSTPORT_BASE_DEVVADDR + SYS_SGX_HOSTPORT_BRN23030_OFFSET;
++
++ ui32PDIndex = (sDevVAddr.uiAddr & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++ ui32PTIndex = (sDevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++
++ pui32PD[ui32PDIndex] = (psDevInfo->sBRN22997PTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_VALID;
++
++ pui32PT[ui32PTIndex] = (psDevInfo->sBRN22997PTDevPAddr.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
++
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0,
++ psDevInfo->sBRN22997PDDevPAddr.uiAddr);
++ PDUMPPDREG(EUR_CR_BIF_DIR_LIST_BASE0, psDevInfo->sBRN22997PDDevPAddr.uiAddr, PDUMP_PD_UNIQUETAG);
++
++
++ ui32BIFCtrl = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
++ PDUMPREG(EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl);
++ PDUMPREG(EUR_CR_BIF_CTRL, ui32BIFCtrl);
++
++
++ if (pui32HostPort)
++ {
++
++ IMG_UINT32 ui32Tmp;
++ ui32Tmp = *pui32HostPort;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"Host Port not present for BRN22997 workaround"));
++ }
++
++
++
++
++
++
++
++ PDUMPCOMMENT("RDW :SGXMEM:v4:%08lX\r\n", sDevVAddr.uiAddr);
++
++ PDUMPCOMMENT("SAB :SGXMEM:v4:%08lX 4 0 hostport.bin", sDevVAddr.uiAddr);
++
++
++ pui32PD[ui32PDIndex] = 0;
++ pui32PT[ui32PTIndex] = 0;
++
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
++ PDUMPREG(EUR_CR_BIF_CTRL, ui32BIFCtrl | EUR_CR_BIF_CTRL_INVALDC_MASK);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BIFCtrl);
++ PDUMPREG(EUR_CR_BIF_CTRL, ui32BIFCtrl);
++}
++
++
++IMG_VOID WorkaroundBRN22997Free(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pui32BRN22997PD, SGX_MMU_PAGE_SIZE, PDUMP_PD_UNIQUETAG);
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pui32BRN22997PT, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++
++
++ if(psLocalDevMemArena == IMG_NULL)
++ {
++ if (psDevInfo->pui32BRN22997PD != IMG_NULL)
++ {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ psDevInfo->pui32BRN22997PD,
++ psDevInfo->hBRN22997PDPageOSMemHandle);
++ }
++
++ if (psDevInfo->pui32BRN22997PT != IMG_NULL)
++ {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ psDevInfo->pui32BRN22997PT,
++ psDevInfo->hBRN22997PTPageOSMemHandle);
++ }
++ }
++ else
++ {
++ if (psDevInfo->pui32BRN22997PT != IMG_NULL)
++ {
++ OSUnMapPhysToLin(psDevInfo->pui32BRN22997PT,
++ SGX_MMU_PAGE_SIZE * 2,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->hBRN22997PTPageOSMemHandle);
++
++
++ RA_Free(psLocalDevMemArena, psDevInfo->sBRN22997SysPAddr.uiAddr, IMG_FALSE);
++ }
++ }
++}
++#endif
++
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++PVRSRV_ERROR MMU_MapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++ IMG_HANDLE hPTPageOSMemHandle = IMG_NULL;
++ IMG_UINT32 *pui32PD;
++ IMG_UINT32 *pui32PT = IMG_NULL;
++ IMG_CPU_PHYADDR sCpuPAddr;
++ IMG_DEV_PHYADDR sPTDevPAddr;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 ui32PTIndex;
++
++ psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++ pui32PD = (IMG_UINT32*)psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->pvPDCpuVAddr;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++
++ if(psLocalDevMemArena == IMG_NULL)
++ {
++
++ eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ SGX_MMU_PAGE_SIZE,
++ (IMG_VOID **)&pui32PT,
++ &hPTPageOSMemHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_MapExtSystemCacheRegs: ERROR call to OSAllocPages failed"));
++ return eError;
++ }
++
++
++ if(pui32PT)
++ {
++ sCpuPAddr = OSMapLinToCPUPhys(pui32PT);
++ }
++ else
++ {
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hPTPageOSMemHandle, 0);
++ }
++ sPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++ }
++ else
++ {
++ IMG_SYS_PHYADDR sSysPAddr;
++
++
++ if(RA_Alloc(psLocalDevMemArena,
++ SGX_MMU_PAGE_SIZE,
++ IMG_NULL,
++ IMG_NULL,
++ 0,
++ SGX_MMU_PAGE_SIZE,
++ 0,
++ &(sSysPAddr.uiAddr))!= IMG_TRUE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_MapExtSystemCacheRegs: ERROR call to RA_Alloc failed"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++ pui32PT = OSMapPhysToLin(sCpuPAddr,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ &hPTPageOSMemHandle);
++ if(!pui32PT)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "MMU_MapExtSystemCacheRegs: ERROR failed to map page tables"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++
++
++ sPTDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++
++ psDevInfo->sExtSystemCacheRegsPTSysPAddr = sSysPAddr;
++ }
++
++ OSMemSet(pui32PT, 0, SGX_MMU_PAGE_SIZE);
++
++ ui32PDIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++ ui32PTIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++
++ pui32PD[ui32PDIndex] = (sPTDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_VALID;
++
++ pui32PT[ui32PTIndex] = (psDevInfo->sExtSysCacheRegsDevPBase.uiAddr>>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++
++
++ PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PT, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PD_UNIQUETAG);
++
++
++ psDevInfo->pui32ExtSystemCacheRegsPT = pui32PT;
++ psDevInfo->hExtSystemCacheRegsPTPageOSMemHandle = hPTPageOSMemHandle;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR MMU_UnmapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ SYS_DATA *psSysData;
++ RA_ARENA *psLocalDevMemArena;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ IMG_UINT32 ui32PDIndex;
++ IMG_UINT32 *pui32PD;
++
++ psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++ pui32PD = (IMG_UINT32*)psDeviceNode->sDevMemoryInfo.pBMKernelContext->psMMUContext->pvPDCpuVAddr;
++
++ SysAcquireData(&psSysData);
++
++ psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++
++ ui32PDIndex = (SGX_EXT_SYSTEM_CACHE_REGS_DEVVADDR_BASE & SGX_MMU_PD_MASK) >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++ pui32PD[ui32PDIndex] = 0;
++
++ PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pui32PD, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++ PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pui32ExtSystemCacheRegsPT, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++
++
++ if(psLocalDevMemArena == IMG_NULL)
++ {
++ if (psDevInfo->pui32ExtSystemCacheRegsPT != IMG_NULL)
++ {
++ OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++ SGX_MMU_PAGE_SIZE,
++ psDevInfo->pui32ExtSystemCacheRegsPT,
++ psDevInfo->hExtSystemCacheRegsPTPageOSMemHandle);
++ }
++ }
++ else
++ {
++ if (psDevInfo->pui32ExtSystemCacheRegsPT != IMG_NULL)
++ {
++ OSUnMapPhysToLin(psDevInfo->pui32ExtSystemCacheRegsPT,
++ SGX_MMU_PAGE_SIZE,
++ PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++ psDevInfo->hExtSystemCacheRegsPTPageOSMemHandle);
++
++ RA_Free(psLocalDevMemArena, psDevInfo->sExtSystemCacheRegsPTSysPAddr.uiAddr, IMG_FALSE);
++ }
++ }
++
++ return PVRSRV_OK;
++}
++#endif
++
++
++#if PAGE_TEST
++static IMG_VOID PageTest(IMG_VOID* pMem, IMG_DEV_PHYADDR sDevPAddr)
++{
++ volatile IMG_UINT32 ui32WriteData;
++ volatile IMG_UINT32 ui32ReadData;
++ volatile IMG_UINT32 *pMem32 = (volatile IMG_UINT32 *)pMem;
++ IMG_INT n;
++ IMG_BOOL bOK=IMG_TRUE;
++
++ ui32WriteData = 0xffffffff;
++
++ for (n=0; n<1024; n++)
++ {
++ pMem32[n] = ui32WriteData;
++ ui32ReadData = pMem32[n];
++
++ if (ui32WriteData != ui32ReadData)
++ {
++
++ PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x%08X", sDevPAddr.uiAddr + (n<<2) ));
++ PVR_DBG_BREAK;
++ bOK = IMG_FALSE;
++ }
++ }
++
++ ui32WriteData = 0;
++
++ for (n=0; n<1024; n++)
++ {
++ pMem32[n] = ui32WriteData;
++ ui32ReadData = pMem32[n];
++
++ if (ui32WriteData != ui32ReadData)
++ {
++
++ PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x%08X", sDevPAddr.uiAddr + (n<<2) ));
++ PVR_DBG_BREAK;
++ bOK = IMG_FALSE;
++ }
++ }
++
++ if (bOK)
++ {
++ PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x%08X is OK", sDevPAddr.uiAddr));
++ }
++ else
++ {
++ PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x%08X *** FAILED ***", sDevPAddr.uiAddr));
++ }
++}
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/mmu.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/mmu.h
+new file mode 100644
+index 0000000..7313769
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/mmu.h
+@@ -0,0 +1,139 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _MMU_H_
++#define _MMU_H_
++
++#include "sgxinfokm.h"
++
++PVRSRV_ERROR
++MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr);
++
++IMG_VOID
++MMU_Finalise (MMU_CONTEXT *psMMUContext);
++
++
++IMG_VOID
++MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap);
++
++MMU_HEAP *
++MMU_Create (MMU_CONTEXT *psMMUContext,
++ DEV_ARENA_DESCRIPTOR *psDevArena,
++ RA_ARENA **ppsVMArena);
++
++IMG_VOID
++MMU_Delete (MMU_HEAP *pMMU);
++
++IMG_BOOL
++MMU_Alloc (MMU_HEAP *pMMU,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uDevVAddrAlignment,
++ IMG_DEV_VIRTADDR *pDevVAddr);
++
++IMG_VOID
++MMU_Free (MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_UINT32 ui32Size);
++
++IMG_VOID
++MMU_Enable (MMU_HEAP *pMMU);
++
++IMG_VOID
++MMU_Disable (MMU_HEAP *pMMU);
++
++IMG_VOID
++MMU_MapPages (MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR devVAddr,
++ IMG_SYS_PHYADDR SysPAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag);
++
++IMG_VOID
++MMU_MapShadow (MMU_HEAP * pMMU,
++ IMG_DEV_VIRTADDR MapBaseDevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_CPU_VIRTADDR CpuVAddr,
++ IMG_HANDLE hOSMemHandle,
++ IMG_DEV_VIRTADDR * pDevVAddr,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag);
++
++IMG_VOID
++MMU_UnmapPages (MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR dev_vaddr,
++ IMG_UINT32 ui32PageCount,
++ IMG_HANDLE hUniqueTag);
++
++IMG_VOID
++MMU_MapScatter (MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SYS_PHYADDR *psSysAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag);
++
++
++IMG_DEV_PHYADDR
++MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr);
++
++
++IMG_DEV_PHYADDR
++MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext);
++
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++IMG_VOID
++EnableHostAccess (MMU_CONTEXT *psMMUContext);
++
++
++IMG_VOID
++DisableHostAccess (MMU_CONTEXT *psMMUContext);
++#endif
++
++IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++PVRSRV_ERROR WorkaroundBRN22997Alloc(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++IMG_VOID WorkaroundBRN22997ReadHostPort(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++IMG_VOID WorkaroundBRN22997Free(PVRSRV_SGXDEV_INFO *psDevInfo);
++#endif
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++PVRSRV_ERROR MMU_MapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++PVRSRV_ERROR MMU_UnmapExtSystemCacheRegs(PVRSRV_DEVICE_NODE *psDeviceNode);
++#endif
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/pb.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/pb.c
+new file mode 100644
+index 0000000..afeb78a
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/pb.c
+@@ -0,0 +1,458 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "pvr_bridge_km.h"
++#include "pdump_km.h"
++#include "sgxutils.h"
++
++#ifndef __linux__
++#pragma message("TODO: Review use of OS_PAGEABLE vs OS_NON_PAGEABLE")
++#endif
++
++#include "lists.h"
++
++static IMPLEMENT_LIST_INSERT(PVRSRV_STUB_PBDESC)
++static IMPLEMENT_LIST_REMOVE(PVRSRV_STUB_PBDESC)
++
++static PRESMAN_ITEM psResItemCreateSharedPB = IMG_NULL;
++static PVRSRV_PER_PROCESS_DATA *psPerProcCreateSharedPB = IMG_NULL;
++
++static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param);
++static PVRSRV_ERROR SGXCleanupSharedPBDescCreateLockCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param);
++
++IMG_EXPORT PVRSRV_ERROR
++SGXFindSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevCookie,
++ IMG_BOOL bLockOnFailure,
++ IMG_UINT32 ui32TotalPBSize,
++ IMG_HANDLE *phSharedPBDesc,
++ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsHWBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
++ IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount)
++{
++ PVRSRV_STUB_PBDESC *psStubPBDesc;
++ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos=IMG_NULL;
++ PVRSRV_SGXDEV_INFO *psSGXDevInfo;
++ PVRSRV_ERROR eError;
++
++ psSGXDevInfo = ((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++ psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
++ if (psStubPBDesc != IMG_NULL)
++ {
++ IMG_UINT32 i;
++ PRESMAN_ITEM psResItem;
++
++ if(psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize)
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "SGXFindSharedPBDescKM: Shared PB requested with different size (0x%x) from existing shared PB (0x%x) - requested size ignored",
++ ui32TotalPBSize, psStubPBDesc->ui32TotalPBSize));
++ }
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *)
++ * psStubPBDesc->ui32SubKernelMemInfosCount,
++ (IMG_VOID **)&ppsSharedPBDescSubKernelMemInfos,
++ IMG_NULL,
++ "Array of Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: OSAllocMem failed"));
++
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto ExitNotFound;
++ }
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_PB_DESC,
++ psStubPBDesc,
++ 0,
++ &SGXCleanupSharedPBDescCallback);
++
++ if (psResItem == IMG_NULL)
++ {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *) * psStubPBDesc->ui32SubKernelMemInfosCount,
++ ppsSharedPBDescSubKernelMemInfos,
++ 0);
++
++
++ PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: ResManRegisterRes failed"));
++
++ eError = PVRSRV_ERROR_GENERIC;
++ goto ExitNotFound;
++ }
++
++ *ppsSharedPBDescKernelMemInfo = psStubPBDesc->psSharedPBDescKernelMemInfo;
++ *ppsHWPBDescKernelMemInfo = psStubPBDesc->psHWPBDescKernelMemInfo;
++ *ppsBlockKernelMemInfo = psStubPBDesc->psBlockKernelMemInfo;
++ *ppsHWBlockKernelMemInfo = psStubPBDesc->psHWBlockKernelMemInfo;
++
++ *ui32SharedPBDescSubKernelMemInfosCount =
++ psStubPBDesc->ui32SubKernelMemInfosCount;
++
++ *pppsSharedPBDescSubKernelMemInfos = ppsSharedPBDescSubKernelMemInfos;
++
++ for(i=0; i<psStubPBDesc->ui32SubKernelMemInfosCount; i++)
++ {
++ ppsSharedPBDescSubKernelMemInfos[i] =
++ psStubPBDesc->ppsSubKernelMemInfos[i];
++ }
++
++ psStubPBDesc->ui32RefCount++;
++ *phSharedPBDesc = (IMG_HANDLE)psResItem;
++ return PVRSRV_OK;
++ }
++
++ eError = PVRSRV_OK;
++ if (bLockOnFailure)
++ {
++ if (psResItemCreateSharedPB == IMG_NULL)
++ {
++ psResItemCreateSharedPB = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK,
++ psPerProc,
++ 0,
++ &SGXCleanupSharedPBDescCreateLockCallback);
++
++ if (psResItemCreateSharedPB == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: ResManRegisterRes failed"));
++
++ eError = PVRSRV_ERROR_GENERIC;
++ goto ExitNotFound;
++ }
++ PVR_ASSERT(psPerProcCreateSharedPB == IMG_NULL);
++ psPerProcCreateSharedPB = psPerProc;
++ }
++ else
++ {
++ eError = PVRSRV_ERROR_PROCESSING_BLOCKED;
++ }
++ }
++ExitNotFound:
++ *phSharedPBDesc = IMG_NULL;
++
++ return eError;
++}
++
++
++static PVRSRV_ERROR
++SGXCleanupSharedPBDescKM(PVRSRV_STUB_PBDESC *psStubPBDescIn)
++{
++
++ IMG_UINT32 i;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)psStubPBDescIn->hDevCookie;
++
++
++
++
++ psStubPBDescIn->ui32RefCount--;
++ if (psStubPBDescIn->ui32RefCount == 0)
++ {
++ List_PVRSRV_STUB_PBDESC_Remove(psStubPBDescIn);
++ for(i=0 ; i<psStubPBDescIn->ui32SubKernelMemInfosCount; i++)
++ {
++
++ PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie,
++ psStubPBDescIn->ppsSubKernelMemInfos[i]);
++ }
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *) * psStubPBDescIn->ui32SubKernelMemInfosCount,
++ psStubPBDescIn->ppsSubKernelMemInfos,
++ 0);
++ psStubPBDescIn->ppsSubKernelMemInfos = IMG_NULL;
++
++ PVRSRVFreeSharedSysMemoryKM(psStubPBDescIn->psBlockKernelMemInfo);
++
++ PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie, psStubPBDescIn->psHWBlockKernelMemInfo);
++
++ PVRSRVFreeDeviceMemKM(psStubPBDescIn->hDevCookie, psStubPBDescIn->psHWPBDescKernelMemInfo);
++
++ PVRSRVFreeSharedSysMemoryKM(psStubPBDescIn->psSharedPBDescKernelMemInfo);
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_STUB_PBDESC),
++ psStubPBDescIn,
++ 0);
++
++
++
++ SGXCleanupRequest(psDeviceNode,
++ IMG_NULL,
++ PVRSRV_CLEANUPCMD_PB);
++ }
++ return PVRSRV_OK;
++
++}
++
++static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++ PVRSRV_STUB_PBDESC *psStubPBDesc = (PVRSRV_STUB_PBDESC *)pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ return SGXCleanupSharedPBDescKM(psStubPBDesc);
++}
++
++static PVRSRV_ERROR SGXCleanupSharedPBDescCreateLockCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++#ifdef DEBUG
++ PVRSRV_PER_PROCESS_DATA *psPerProc = (PVRSRV_PER_PROCESS_DATA *)pvParam;
++ PVR_ASSERT(psPerProc == psPerProcCreateSharedPB);
++#else
++ PVR_UNREFERENCED_PARAMETER(pvParam);
++#endif
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ psPerProcCreateSharedPB = IMG_NULL;
++ psResItemCreateSharedPB = IMG_NULL;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc)
++{
++ PVR_ASSERT(hSharedPBDesc != IMG_NULL);
++
++ return ResManFreeResByPtr(hSharedPBDesc);
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++SGXAddSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo,
++ IMG_UINT32 ui32TotalPBSize,
++ IMG_HANDLE *phSharedPBDesc,
++ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos,
++ IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount)
++{
++ PVRSRV_STUB_PBDESC *psStubPBDesc=IMG_NULL;
++ PVRSRV_ERROR eRet = PVRSRV_ERROR_GENERIC;
++ IMG_UINT32 i;
++ PVRSRV_SGXDEV_INFO *psSGXDevInfo;
++ PRESMAN_ITEM psResItem;
++
++
++ if (psPerProcCreateSharedPB != psPerProc)
++ {
++ goto NoAdd;
++ }
++ else
++ {
++ PVR_ASSERT(psResItemCreateSharedPB != IMG_NULL);
++
++ ResManFreeResByPtr(psResItemCreateSharedPB);
++
++ PVR_ASSERT(psResItemCreateSharedPB == IMG_NULL);
++ PVR_ASSERT(psPerProcCreateSharedPB == IMG_NULL);
++ }
++
++ psSGXDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++ psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
++ if (psStubPBDesc != IMG_NULL)
++ {
++ if(psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize)
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "SGXAddSharedPBDescKM: Shared PB requested with different size (0x%x) from existing shared PB (0x%x) - requested size ignored",
++ ui32TotalPBSize, psStubPBDesc->ui32TotalPBSize));
++
++ }
++
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_PB_DESC,
++ psStubPBDesc,
++ 0,
++ &SGXCleanupSharedPBDescCallback);
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "SGXAddSharedPBDescKM: "
++ "Failed to register existing shared "
++ "PBDesc with the resource manager"));
++ goto NoAddKeepPB;
++ }
++
++
++ psStubPBDesc->ui32RefCount++;
++
++ *phSharedPBDesc = (IMG_HANDLE)psResItem;
++ eRet = PVRSRV_OK;
++ goto NoAddKeepPB;
++ }
++
++ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_STUB_PBDESC),
++ (IMG_VOID **)&psStubPBDesc,
++ 0,
++ "Stub Parameter Buffer Description") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: Failed to alloc "
++ "StubPBDesc"));
++ eRet = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto NoAdd;
++ }
++
++
++ psStubPBDesc->ppsSubKernelMemInfos = IMG_NULL;
++
++ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *)
++ * ui32SharedPBDescSubKernelMemInfosCount,
++ (IMG_VOID **)&psStubPBDesc->ppsSubKernelMemInfos,
++ 0,
++ "Array of Kernel Memory Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++ "Failed to alloc "
++ "StubPBDesc->ppsSubKernelMemInfos"));
++ eRet = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto NoAdd;
++ }
++
++ if(PVRSRVDissociateMemFromResmanKM(psSharedPBDescKernelMemInfo)
++ != PVRSRV_OK)
++ {
++ goto NoAdd;
++ }
++
++ if(PVRSRVDissociateMemFromResmanKM(psHWPBDescKernelMemInfo)
++ != PVRSRV_OK)
++ {
++ goto NoAdd;
++ }
++
++ if(PVRSRVDissociateMemFromResmanKM(psBlockKernelMemInfo)
++ != PVRSRV_OK)
++ {
++ goto NoAdd;
++ }
++
++ if(PVRSRVDissociateMemFromResmanKM(psHWBlockKernelMemInfo)
++ != PVRSRV_OK)
++ {
++ goto NoAdd;
++ }
++
++ psStubPBDesc->ui32RefCount = 1;
++ psStubPBDesc->ui32TotalPBSize = ui32TotalPBSize;
++ psStubPBDesc->psSharedPBDescKernelMemInfo = psSharedPBDescKernelMemInfo;
++ psStubPBDesc->psHWPBDescKernelMemInfo = psHWPBDescKernelMemInfo;
++ psStubPBDesc->psBlockKernelMemInfo = psBlockKernelMemInfo;
++ psStubPBDesc->psHWBlockKernelMemInfo = psHWBlockKernelMemInfo;
++
++ psStubPBDesc->ui32SubKernelMemInfosCount =
++ ui32SharedPBDescSubKernelMemInfosCount;
++ for(i=0; i<ui32SharedPBDescSubKernelMemInfosCount; i++)
++ {
++ psStubPBDesc->ppsSubKernelMemInfos[i] = ppsSharedPBDescSubKernelMemInfos[i];
++ if(PVRSRVDissociateMemFromResmanKM(ppsSharedPBDescSubKernelMemInfos[i])
++ != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++ "Failed to dissociate shared PBDesc "
++ "from process"));
++ goto NoAdd;
++ }
++ }
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_SHARED_PB_DESC,
++ psStubPBDesc,
++ 0,
++ &SGXCleanupSharedPBDescCallback);
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++ "Failed to register shared PBDesc "
++ " with the resource manager"));
++ goto NoAdd;
++ }
++ psStubPBDesc->hDevCookie = hDevCookie;
++
++
++ List_PVRSRV_STUB_PBDESC_Insert(&(psSGXDevInfo->psStubPBDescListKM),
++ psStubPBDesc);
++
++ *phSharedPBDesc = (IMG_HANDLE)psResItem;
++
++ return PVRSRV_OK;
++
++NoAdd:
++ if(psStubPBDesc)
++ {
++ if(psStubPBDesc->ppsSubKernelMemInfos)
++ {
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_KERNEL_MEM_INFO *) * ui32SharedPBDescSubKernelMemInfosCount,
++ psStubPBDesc->ppsSubKernelMemInfos,
++ 0);
++ psStubPBDesc->ppsSubKernelMemInfos = IMG_NULL;
++ }
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_STUB_PBDESC),
++ psStubPBDesc,
++ 0);
++
++ }
++
++NoAddKeepPB:
++ for (i = 0; i < ui32SharedPBDescSubKernelMemInfosCount; i++)
++ {
++ PVRSRVFreeDeviceMemKM(hDevCookie, ppsSharedPBDescSubKernelMemInfos[i]);
++ }
++
++ PVRSRVFreeSharedSysMemoryKM(psSharedPBDescKernelMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookie, psHWPBDescKernelMemInfo);
++
++ PVRSRVFreeSharedSysMemoryKM(psBlockKernelMemInfo);
++ PVRSRVFreeDeviceMemKM(hDevCookie, psHWBlockKernelMemInfo);
++
++ return eRet;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgx_bridge_km.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgx_bridge_km.h
+new file mode 100644
+index 0000000..72f025d
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgx_bridge_km.h
+@@ -0,0 +1,147 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SGX_BRIDGE_KM_H__)
++#define __SGX_BRIDGE_KM_H__
++
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "sgx_bridge.h"
++#include "pvr_bridge.h"
++#include "perproc.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK *psKick);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++IMG_IMPORT
++PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK *psKick);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle,
++ SGX_CCB_KICK *psCCBKick);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetPhysPageAddrKM(IMG_HANDLE hDevMemHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_DEV_PHYADDR *pDevPAddr,
++ IMG_CPU_PHYADDR *pCpuPAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV SGXGetMMUPDAddrKM(IMG_HANDLE hDevCookie,
++ IMG_HANDLE hDevMemContext,
++ IMG_DEV_PHYADDR *psPDDevPAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE hDevCookie,
++ SGX_CLIENT_INFO* psClientInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO *psDevInfo,
++ SGX_MISC_INFO *psMiscInfo,
++ PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_HANDLE hDevMemContext);
++
++#if defined(SUPPORT_SGX_HWPERF)
++IMG_IMPORT
++PVRSRV_ERROR SGXReadDiffCountersKM(IMG_HANDLE hDevHandle,
++ IMG_UINT32 ui32Reg,
++ IMG_UINT32 *pui32Old,
++ IMG_BOOL bNew,
++ IMG_UINT32 ui32New,
++ IMG_UINT32 ui32NewReset,
++ IMG_UINT32 ui32CountersReg,
++ IMG_UINT32 ui32Reg2,
++ IMG_BOOL *pbActive,
++ PVRSRV_SGXDEV_DIFF_INFO *psDiffs);
++IMG_IMPORT
++PVRSRV_ERROR SGXReadHWPerfCBKM(IMG_HANDLE hDevHandle,
++ IMG_UINT32 ui32ArraySize,
++ PVRSRV_SGX_HWPERF_CB_ENTRY *psHWPerfCBData,
++ IMG_UINT32 *pui32DataCount,
++ IMG_UINT32 *pui32ClockSpeed,
++ IMG_UINT32 *pui32HostTimeStamp);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO *psDevInfo,
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
++ IMG_BOOL bWaitForComplete);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle,
++ SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR DevInitSGXPart2KM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevHandle,
++ SGX_BRIDGE_INIT_INFO *psInitInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXFindSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevCookie,
++ IMG_BOOL bLockOnFailure,
++ IMG_UINT32 ui32TotalPBSize,
++ IMG_HANDLE *phSharedPBDesc,
++ PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO **ppsHWBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
++ IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXAddSharedPBDescKM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevCookie,
++ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
++ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo,
++ IMG_UINT32 ui32TotalPBSize,
++ IMG_HANDLE *phSharedPBDesc,
++ PVRSRV_KERNEL_MEM_INFO **psSharedPBDescSubKernelMemInfos,
++ IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount);
++
++
++IMG_IMPORT PVRSRV_ERROR
++SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie,
++ SGX_INTERNAL_DEVINFO *psSGXInternalDevInfo);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxconfig.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxconfig.h
+new file mode 100644
+index 0000000..63cd151
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxconfig.h
+@@ -0,0 +1,134 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SGXCONFIG_H__
++#define __SGXCONFIG_H__
++
++#include "sgxdefs.h"
++
++#define DEV_DEVICE_TYPE PVRSRV_DEVICE_TYPE_SGX
++#define DEV_DEVICE_CLASS PVRSRV_DEVICE_CLASS_3D
++
++#define DEV_MAJOR_VERSION 1
++#define DEV_MINOR_VERSION 0
++
++#if SGX_FEATURE_ADDRESS_SPACE_SIZE == 32
++ #if defined(SGX_FEATURE_2D_HARDWARE)
++ #define SGX_2D_HEAP_BASE 0x00100000
++ #define SGX_2D_HEAP_SIZE (0x08000000-0x00100000-0x00001000)
++ #else
++ #if defined(FIX_HW_BRN_26915)
++ #define SGX_CGBUFFER_HEAP_BASE 0x00100000
++ #define SGX_CGBUFFER_HEAP_SIZE (0x08000000-0x00100000-0x00001000)
++ #endif
++ #endif
++
++ #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++ #define SGX_GENERAL_MAPPING_HEAP_BASE 0x08000000
++ #define SGX_GENERAL_MAPPING_HEAP_SIZE (0x08000000-0x00001000)
++ #endif
++
++ #define SGX_GENERAL_HEAP_BASE 0x10000000
++ #define SGX_GENERAL_HEAP_SIZE (0xC8000000-0x00001000)
++
++ #define SGX_3DPARAMETERS_HEAP_BASE 0xD8000000
++ #define SGX_3DPARAMETERS_HEAP_SIZE (0x10000000-0x00001000)
++
++ #define SGX_TADATA_HEAP_BASE 0xE8000000
++ #define SGX_TADATA_HEAP_SIZE (0x0D000000-0x00001000)
++
++ #define SGX_SYNCINFO_HEAP_BASE 0xF5000000
++ #define SGX_SYNCINFO_HEAP_SIZE (0x01000000-0x00001000)
++
++ #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0xF6000000
++ #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x02000000-0x00001000)
++
++ #define SGX_KERNEL_CODE_HEAP_BASE 0xF8000000
++ #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000)
++
++ #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0xF8400000
++ #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x01C00000-0x00001000)
++
++ #define SGX_KERNEL_DATA_HEAP_BASE 0xFA000000
++ #define SGX_KERNEL_DATA_HEAP_SIZE (0x05000000-0x00001000)
++
++ #define SGX_PIXELSHADER_HEAP_BASE 0xFF000000
++ #define SGX_PIXELSHADER_HEAP_SIZE (0x00500000-0x00001000)
++
++ #define SGX_VERTEXSHADER_HEAP_BASE 0xFF800000
++ #define SGX_VERTEXSHADER_HEAP_SIZE (0x00200000-0x00001000)
++
++
++ #define SGX_CORE_IDENTIFIED
++#endif
++
++#if SGX_FEATURE_ADDRESS_SPACE_SIZE == 28
++ #if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++ #define SGX_GENERAL_MAPPING_HEAP_BASE 0x00001000
++ #define SGX_GENERAL_MAPPING_HEAP_SIZE (0x01800000-0x00001000-0x00001000)
++ #endif
++
++ #define SGX_GENERAL_HEAP_BASE 0x01800000
++ #define SGX_GENERAL_HEAP_SIZE (0x07000000-0x00001000)
++
++ #define SGX_3DPARAMETERS_HEAP_BASE 0x08800000
++ #define SGX_3DPARAMETERS_HEAP_SIZE (0x04000000-0x00001000)
++
++ #define SGX_TADATA_HEAP_BASE 0x0C800000
++ #define SGX_TADATA_HEAP_SIZE (0x01000000-0x00001000)
++
++ #define SGX_SYNCINFO_HEAP_BASE 0x0D800000
++ #define SGX_SYNCINFO_HEAP_SIZE (0x00400000-0x00001000)
++
++ #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE 0x0DC00000
++ #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE (0x00800000-0x00001000)
++
++ #define SGX_KERNEL_CODE_HEAP_BASE 0x0E400000
++ #define SGX_KERNEL_CODE_HEAP_SIZE (0x00080000-0x00001000)
++
++ #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE 0x0E800000
++ #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE (0x00800000-0x00001000)
++
++ #define SGX_KERNEL_DATA_HEAP_BASE 0x0F000000
++ #define SGX_KERNEL_DATA_HEAP_SIZE (0x00400000-0x00001000)
++
++ #define SGX_PIXELSHADER_HEAP_BASE 0x0F400000
++ #define SGX_PIXELSHADER_HEAP_SIZE (0x00500000-0x00001000)
++
++ #define SGX_VERTEXSHADER_HEAP_BASE 0x0FC00000
++ #define SGX_VERTEXSHADER_HEAP_SIZE (0x00200000-0x00001000)
++
++
++ #define SGX_CORE_IDENTIFIED
++
++#endif
++
++#if !defined(SGX_CORE_IDENTIFIED)
++ #error "sgxconfig.h: ERROR: unspecified SGX Core version"
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxinfokm.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxinfokm.h
+new file mode 100644
+index 0000000..1ddd709
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxinfokm.h
+@@ -0,0 +1,352 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SGXINFOKM_H__
++#define __SGXINFOKM_H__
++
++#include "sgxdefs.h"
++#include "device.h"
++#include "power.h"
++#include "sysconfig.h"
++#include "sgxscript.h"
++#include "sgxinfo.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#define SGX_HOSTPORT_PRESENT 0x00000001UL
++
++
++typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC;
++
++
++typedef struct _PVRSRV_SGX_CCB_INFO_ *PPVRSRV_SGX_CCB_INFO;
++
++typedef struct _PVRSRV_SGXDEV_INFO_
++{
++ PVRSRV_DEVICE_TYPE eDeviceType;
++ PVRSRV_DEVICE_CLASS eDeviceClass;
++
++ IMG_UINT8 ui8VersionMajor;
++ IMG_UINT8 ui8VersionMinor;
++ IMG_UINT32 ui32CoreConfig;
++ IMG_UINT32 ui32CoreFlags;
++
++
++ IMG_PVOID pvRegsBaseKM;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++
++ IMG_PVOID pvHostPortBaseKM;
++
++ IMG_UINT32 ui32HPSize;
++
++ IMG_SYS_PHYADDR sHPSysPAddr;
++#endif
++
++
++ IMG_HANDLE hRegMapping;
++
++
++ IMG_SYS_PHYADDR sRegsPhysBase;
++
++ IMG_UINT32 ui32RegSize;
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++
++ IMG_UINT32 ui32ExtSysCacheRegsSize;
++
++ IMG_DEV_PHYADDR sExtSysCacheRegsDevPBase;
++
++ IMG_UINT32 *pui32ExtSystemCacheRegsPT;
++
++ IMG_HANDLE hExtSystemCacheRegsPTPageOSMemHandle;
++
++ IMG_SYS_PHYADDR sExtSystemCacheRegsPTSysPAddr;
++#endif
++
++
++ IMG_UINT32 ui32CoreClockSpeed;
++ IMG_UINT32 ui32uKernelTimerClock;
++
++ PVRSRV_STUB_PBDESC *psStubPBDescListKM;
++
++
++
++ IMG_DEV_PHYADDR sKernelPDDevPAddr;
++
++ IMG_VOID *pvDeviceMemoryHeap;
++ PPVRSRV_KERNEL_MEM_INFO psKernelCCBMemInfo;
++ PVRSRV_SGX_KERNEL_CCB *psKernelCCB;
++ PPVRSRV_SGX_CCB_INFO psKernelCCBInfo;
++ PPVRSRV_KERNEL_MEM_INFO psKernelCCBCtlMemInfo;
++ PVRSRV_SGX_CCB_CTL *psKernelCCBCtl;
++ PPVRSRV_KERNEL_MEM_INFO psKernelCCBEventKickerMemInfo;
++ IMG_UINT32 *pui32KernelCCBEventKicker;
++#if defined(PDUMP)
++ IMG_UINT32 ui32KernelCCBEventKickerDumpVal;
++#endif
++ PVRSRV_KERNEL_MEM_INFO *psKernelSGXMiscMemInfo;
++ IMG_UINT32 aui32HostKickAddr[SGXMKIF_CMD_MAX];
++#if defined(SGX_SUPPORT_HWPROFILING)
++ PPVRSRV_KERNEL_MEM_INFO psKernelHWProfilingMemInfo;
++#endif
++ IMG_UINT32 ui32KickTACounter;
++ IMG_UINT32 ui32KickTARenderCounter;
++#if defined(SUPPORT_SGX_HWPERF)
++ PPVRSRV_KERNEL_MEM_INFO psKernelHWPerfCBMemInfo;
++ IMG_UINT32 ui32HWGroupRequested;
++ IMG_UINT32 ui32HWReset;
++#endif
++#ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
++ PPVRSRV_KERNEL_MEM_INFO psKernelEDMStatusBufferMemInfo;
++#endif
++#if defined(SGX_FEATURE_OVERLAPPED_SPM)
++ PPVRSRV_KERNEL_MEM_INFO psKernelTmpRgnHeaderMemInfo;
++#endif
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ PPVRSRV_KERNEL_MEM_INFO psKernelTmpDPMStateMemInfo;
++#endif
++
++
++ IMG_UINT32 ui32ClientRefCount;
++
++
++ IMG_UINT32 ui32CacheControl;
++
++
++ IMG_UINT32 ui32ClientBuildOptions;
++
++
++ SGX_MISCINFO_STRUCT_SIZES sSGXStructSizes;
++
++
++
++
++ IMG_VOID *pvMMUContextList;
++
++
++ IMG_BOOL bForcePTOff;
++
++ IMG_UINT32 ui32EDMTaskReg0;
++ IMG_UINT32 ui32EDMTaskReg1;
++
++ IMG_UINT32 ui32ClkGateStatusReg;
++ IMG_UINT32 ui32ClkGateStatusMask;
++#if defined(SGX_FEATURE_MP)
++ IMG_UINT32 ui32MasterClkGateStatusReg;
++ IMG_UINT32 ui32MasterClkGateStatusMask;
++#endif
++ SGX_INIT_SCRIPTS sScripts;
++
++
++ IMG_HANDLE hBIFResetPDOSMemHandle;
++ IMG_DEV_PHYADDR sBIFResetPDDevPAddr;
++ IMG_DEV_PHYADDR sBIFResetPTDevPAddr;
++ IMG_DEV_PHYADDR sBIFResetPageDevPAddr;
++ IMG_UINT32 *pui32BIFResetPD;
++ IMG_UINT32 *pui32BIFResetPT;
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++
++ IMG_HANDLE hBRN22997PTPageOSMemHandle;
++ IMG_HANDLE hBRN22997PDPageOSMemHandle;
++ IMG_DEV_PHYADDR sBRN22997PTDevPAddr;
++ IMG_DEV_PHYADDR sBRN22997PDDevPAddr;
++ IMG_UINT32 *pui32BRN22997PT;
++ IMG_UINT32 *pui32BRN22997PD;
++ IMG_SYS_PHYADDR sBRN22997SysPAddr;
++#endif
++
++#if defined(SUPPORT_HW_RECOVERY)
++
++ IMG_HANDLE hTimer;
++
++ IMG_UINT32 ui32TimeStamp;
++#endif
++
++
++ IMG_UINT32 ui32NumResets;
++
++
++ PVRSRV_KERNEL_MEM_INFO *psKernelSGXHostCtlMemInfo;
++ SGXMKIF_HOST_CTL *psSGXHostCtl;
++
++
++ PVRSRV_KERNEL_MEM_INFO *psKernelSGXTA3DCtlMemInfo;
++
++ IMG_UINT32 ui32Flags;
++
++ #if defined(PDUMP)
++ PVRSRV_SGX_PDUMP_CONTEXT sPDContext;
++ #endif
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++
++ IMG_VOID *pvDummyPTPageCpuVAddr;
++ IMG_DEV_PHYADDR sDummyPTDevPAddr;
++ IMG_HANDLE hDummyPTPageOSMemHandle;
++ IMG_VOID *pvDummyDataPageCpuVAddr;
++ IMG_DEV_PHYADDR sDummyDataDevPAddr;
++ IMG_HANDLE hDummyDataPageOSMemHandle;
++#endif
++
++ IMG_UINT32 asSGXDevData[SGX_MAX_DEV_DATA];
++
++} PVRSRV_SGXDEV_INFO;
++
++
++typedef struct _SGX_TIMING_INFORMATION_
++{
++ IMG_UINT32 ui32CoreClockSpeed;
++ IMG_UINT32 ui32HWRecoveryFreq;
++ IMG_BOOL bEnableActivePM;
++ IMG_UINT32 ui32ActivePowManLatencyms;
++ IMG_UINT32 ui32uKernelFreq;
++} SGX_TIMING_INFORMATION;
++
++typedef struct _SGX_DEVICE_MAP_
++{
++ IMG_UINT32 ui32Flags;
++
++
++ IMG_SYS_PHYADDR sRegsSysPBase;
++ IMG_CPU_PHYADDR sRegsCpuPBase;
++ IMG_CPU_VIRTADDR pvRegsCpuVBase;
++ IMG_UINT32 ui32RegsSize;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ IMG_SYS_PHYADDR sHPSysPBase;
++ IMG_CPU_PHYADDR sHPCpuPBase;
++ IMG_UINT32 ui32HPSize;
++#endif
++
++
++ IMG_SYS_PHYADDR sLocalMemSysPBase;
++ IMG_DEV_PHYADDR sLocalMemDevPBase;
++ IMG_CPU_PHYADDR sLocalMemCpuPBase;
++ IMG_UINT32 ui32LocalMemSize;
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++ IMG_UINT32 ui32ExtSysCacheRegsSize;
++ IMG_DEV_PHYADDR sExtSysCacheRegsDevPBase;
++#endif
++
++
++ IMG_UINT32 ui32IRQ;
++
++#if !defined(SGX_DYNAMIC_TIMING_INFO)
++
++ SGX_TIMING_INFORMATION sTimingInfo;
++#endif
++} SGX_DEVICE_MAP;
++
++
++struct _PVRSRV_STUB_PBDESC_
++{
++ IMG_UINT32 ui32RefCount;
++ IMG_UINT32 ui32TotalPBSize;
++ PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO **ppsSubKernelMemInfos;
++ IMG_UINT32 ui32SubKernelMemInfosCount;
++ IMG_HANDLE hDevCookie;
++ PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psHWBlockKernelMemInfo;
++ PVRSRV_STUB_PBDESC *psNext;
++ PVRSRV_STUB_PBDESC **ppsThis;
++};
++
++typedef struct _PVRSRV_SGX_CCB_INFO_
++{
++ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo;
++ PVRSRV_KERNEL_MEM_INFO *psCCBCtlMemInfo;
++ SGXMKIF_COMMAND *psCommands;
++ IMG_UINT32 *pui32WriteOffset;
++ volatile IMG_UINT32 *pui32ReadOffset;
++#if defined(PDUMP)
++ IMG_UINT32 ui32CCBDumpWOff;
++#endif
++} PVRSRV_SGX_CCB_INFO;
++
++PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_VOID SGXOSTimer(IMG_VOID *pvData);
++
++IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32PDUMPFlags);
++
++PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo);
++PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie);
++
++PVRSRV_ERROR SGXPrePowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++PVRSRV_ERROR SGXPostPowerState(IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++PVRSRV_ERROR SGXPreClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++PVRSRV_ERROR SGXPostClockSpeedChange(IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++IMG_VOID SGXPanic(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++PVRSRV_ERROR SGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++#if defined(SGX_DYNAMIC_TIMING_INFO)
++IMG_VOID SysGetSGXTimingInformation(SGX_TIMING_INFORMATION *psSGXTimingInfo);
++#endif
++
++#if defined(NO_HARDWARE)
++static INLINE IMG_VOID NoHardwareGenerateEvent(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32StatusRegister,
++ IMG_UINT32 ui32StatusValue,
++ IMG_UINT32 ui32StatusMask)
++{
++ IMG_UINT32 ui32RegVal;
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister);
++
++ ui32RegVal &= ~ui32StatusMask;
++ ui32RegVal |= (ui32StatusValue & ui32StatusMask);
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister, ui32RegVal);
++}
++#endif
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxinit.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxinit.c
+new file mode 100644
+index 0000000..d8f6aef
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxinit.c
+@@ -0,0 +1,2218 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgx_mkif_km.h"
++#include "sgxconfig.h"
++#include "sysconfig.h"
++#include "pvr_bridge_km.h"
++
++#include "sgx_bridge_km.h"
++
++#include "pdump_km.h"
++#include "ra.h"
++#include "mmu.h"
++#include "handle.h"
++#include "perproc.h"
++
++#include "sgxutils.h"
++#include "pvrversion.h"
++#include "sgx_options.h"
++
++#include "lists.h"
++#include "srvkm.h"
++
++DECLARE_LIST_ANY_VA(PVRSRV_POWER_DEV);
++
++#if defined(SUPPORT_SGX_HWPERF)
++IMG_VOID* MatchPowerDeviceIndex_AnyVaCb(PVRSRV_POWER_DEV *psPowerDev, va_list va);
++#endif
++
++#define VAR(x) #x
++
++#define CHECK_SIZE(NAME) \
++{ \
++ if (psSGXStructSizes->ui32Sizeof_##NAME != psDevInfo->sSGXStructSizes.ui32Sizeof_##NAME) \
++ { \
++ PVR_DPF((PVR_DBG_ERROR, "SGXDevInitCompatCheck: Size check failed for SGXMKIF_%s (client) = %d bytes, (ukernel) = %d bytes\n", \
++ VAR(NAME), \
++ psDevInfo->sSGXStructSizes.ui32Sizeof_##NAME, \
++ psSGXStructSizes->ui32Sizeof_##NAME )); \
++ bStructSizesFailed = IMG_TRUE; \
++ } \
++}
++
++#if defined (SYS_USING_INTERRUPTS)
++IMG_BOOL SGX_ISRHandler(IMG_VOID *pvData);
++#endif
++
++IMG_UINT32 gui32EventStatusServicesByISR = 0;
++
++
++static
++PVRSRV_ERROR SGXGetMiscInfoUkernel(PVRSRV_SGXDEV_INFO *psDevInfo,
++ PVRSRV_DEVICE_NODE *psDeviceNode);
++
++
++static IMG_VOID SGXCommandComplete(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++#if defined(OS_SUPPORTS_IN_LISR)
++ if (OSInLISR(psDeviceNode->psSysData))
++ {
++
++ psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
++ }
++ else
++ {
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++ }
++#else
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++#endif
++}
++
++static IMG_UINT32 DeinitDevInfo(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ if (psDevInfo->psKernelCCBInfo != IMG_NULL)
++ {
++
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_SGX_CCB_INFO), psDevInfo->psKernelCCBInfo, IMG_NULL);
++ }
++
++ return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR InitDevInfo(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ PVRSRV_DEVICE_NODE *psDeviceNode,
++ SGX_BRIDGE_INIT_INFO *psInitInfo)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++ PVRSRV_ERROR eError;
++
++ PVRSRV_SGX_CCB_INFO *psKernelCCBInfo = IMG_NULL;
++
++ PVR_UNREFERENCED_PARAMETER(psPerProc);
++ psDevInfo->sScripts = psInitInfo->sScripts;
++
++ psDevInfo->psKernelCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBMemInfo;
++ psDevInfo->psKernelCCB = (PVRSRV_SGX_KERNEL_CCB *) psDevInfo->psKernelCCBMemInfo->pvLinAddrKM;
++
++ psDevInfo->psKernelCCBCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBCtlMemInfo;
++ psDevInfo->psKernelCCBCtl = (PVRSRV_SGX_CCB_CTL *) psDevInfo->psKernelCCBCtlMemInfo->pvLinAddrKM;
++
++ psDevInfo->psKernelCCBEventKickerMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBEventKickerMemInfo;
++ psDevInfo->pui32KernelCCBEventKicker = (IMG_UINT32 *)psDevInfo->psKernelCCBEventKickerMemInfo->pvLinAddrKM;
++
++ psDevInfo->psKernelSGXHostCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXHostCtlMemInfo;
++ psDevInfo->psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
++
++ psDevInfo->psKernelSGXTA3DCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXTA3DCtlMemInfo;
++
++ psDevInfo->psKernelSGXMiscMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXMiscMemInfo;
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++ psDevInfo->psKernelHWProfilingMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWProfilingMemInfo;
++#endif
++#if defined(SUPPORT_SGX_HWPERF)
++ psDevInfo->psKernelHWPerfCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWPerfCBMemInfo;
++#endif
++#ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
++ psDevInfo->psKernelEDMStatusBufferMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelEDMStatusBufferMemInfo;
++#endif
++#if defined(SGX_FEATURE_OVERLAPPED_SPM)
++ psDevInfo->psKernelTmpRgnHeaderMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelTmpRgnHeaderMemInfo;
++#endif
++#if defined(SGX_FEATURE_SPM_MODE_0)
++ psDevInfo->psKernelTmpDPMStateMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelTmpDPMStateMemInfo;
++#endif
++
++ psDevInfo->ui32ClientBuildOptions = psInitInfo->ui32ClientBuildOptions;
++
++
++ psDevInfo->sSGXStructSizes = psInitInfo->sSGXStructSizes;
++
++
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(PVRSRV_SGX_CCB_INFO),
++ (IMG_VOID **)&psKernelCCBInfo, 0,
++ "SGX Circular Command Buffer Info");
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"InitDevInfo: Failed to alloc memory"));
++ goto failed_allockernelccb;
++ }
++
++
++ OSMemSet(psKernelCCBInfo, 0, sizeof(PVRSRV_SGX_CCB_INFO));
++ psKernelCCBInfo->psCCBMemInfo = psDevInfo->psKernelCCBMemInfo;
++ psKernelCCBInfo->psCCBCtlMemInfo = psDevInfo->psKernelCCBCtlMemInfo;
++ psKernelCCBInfo->psCommands = psDevInfo->psKernelCCB->asCommands;
++ psKernelCCBInfo->pui32WriteOffset = &psDevInfo->psKernelCCBCtl->ui32WriteOffset;
++ psKernelCCBInfo->pui32ReadOffset = &psDevInfo->psKernelCCBCtl->ui32ReadOffset;
++ psDevInfo->psKernelCCBInfo = psKernelCCBInfo;
++
++
++
++ OSMemCopy(psDevInfo->aui32HostKickAddr, psInitInfo->aui32HostKickAddr,
++ SGXMKIF_CMD_MAX * sizeof(psDevInfo->aui32HostKickAddr[0]));
++
++ psDevInfo->bForcePTOff = IMG_FALSE;
++
++ psDevInfo->ui32CacheControl = psInitInfo->ui32CacheControl;
++
++ psDevInfo->ui32EDMTaskReg0 = psInitInfo->ui32EDMTaskReg0;
++ psDevInfo->ui32EDMTaskReg1 = psInitInfo->ui32EDMTaskReg1;
++ psDevInfo->ui32ClkGateStatusReg = psInitInfo->ui32ClkGateStatusReg;
++ psDevInfo->ui32ClkGateStatusMask = psInitInfo->ui32ClkGateStatusMask;
++#if defined(SGX_FEATURE_MP)
++ psDevInfo->ui32MasterClkGateStatusReg = psInitInfo->ui32MasterClkGateStatusReg;
++ psDevInfo->ui32MasterClkGateStatusMask = psInitInfo->ui32MasterClkGateStatusMask;
++#endif
++
++
++
++ OSMemCopy(&psDevInfo->asSGXDevData, &psInitInfo->asInitDevData, sizeof(psDevInfo->asSGXDevData));
++
++ return PVRSRV_OK;
++
++failed_allockernelccb:
++ DeinitDevInfo(psDevInfo);
++
++ return eError;
++}
++
++
++
++
++static PVRSRV_ERROR SGXRunScript(PVRSRV_SGXDEV_INFO *psDevInfo, SGX_INIT_COMMAND *psScript, IMG_UINT32 ui32NumInitCommands)
++{
++ IMG_UINT32 ui32PC;
++ SGX_INIT_COMMAND *psComm;
++
++ for (ui32PC = 0, psComm = psScript;
++ ui32PC < ui32NumInitCommands;
++ ui32PC++, psComm++)
++ {
++ switch (psComm->eOp)
++ {
++ case SGX_INIT_OP_WRITE_HW_REG:
++ {
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value);
++ PDUMPREG(psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value);
++ break;
++ }
++#if defined(PDUMP)
++ case SGX_INIT_OP_PDUMP_HW_REG:
++ {
++ PDUMPREG(psComm->sPDumpHWReg.ui32Offset, psComm->sPDumpHWReg.ui32Value);
++ break;
++ }
++#endif
++ case SGX_INIT_OP_HALT:
++ {
++ return PVRSRV_OK;
++ }
++ case SGX_INIT_OP_ILLEGAL:
++
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXRunScript: PC %d: Illegal command: %d", ui32PC, psComm->eOp));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ }
++
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_MEM_INFO *psSGXHostCtlMemInfo = psDevInfo->psKernelSGXHostCtlMemInfo;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = psSGXHostCtlMemInfo->pvLinAddrKM;
++#if defined(PDUMP)
++ static IMG_BOOL bFirstTime = IMG_TRUE;
++#endif
++
++
++
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "SGX initialisation script part 1\n");
++ eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommandsPart1, SGX_MAX_INIT_COMMANDS);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXInitialise: SGXRunScript (part 1) failed (%d)", eError));
++ return (PVRSRV_ERROR_GENERIC);
++ }
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "End of SGX initialisation script part 1\n");
++
++
++ SGXReset(psDevInfo, PDUMP_FLAGS_CONTINUOUS);
++
++#if defined(EUR_CR_POWER)
++#if defined(SGX531)
++
++
++
++
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_POWER, 1);
++ PDUMPREG(EUR_CR_POWER, 1);
++#else
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_POWER, 0);
++ PDUMPREG(EUR_CR_POWER, 0);
++#endif
++#endif
++
++
++ *psDevInfo->pui32KernelCCBEventKicker = 0;
++#if defined(PDUMP)
++ if (bFirstTime)
++ {
++ psDevInfo->ui32KernelCCBEventKickerDumpVal = 0;
++ PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal,
++ psDevInfo->psKernelCCBEventKickerMemInfo, 0,
++ sizeof(*psDevInfo->pui32KernelCCBEventKicker), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++ }
++#endif
++
++
++
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "SGX initialisation script part 2\n");
++ eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommandsPart2, SGX_MAX_INIT_COMMANDS);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXInitialise: SGXRunScript (part 2) failed (%d)", eError));
++ return (PVRSRV_ERROR_GENERIC);
++ }
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "End of SGX initialisation script part 2\n");
++
++
++ psSGXHostCtl->ui32InitStatus = 0;
++#if defined(PDUMP)
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "Reset the SGX microkernel initialisation status\n");
++ PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32InitStatus),
++ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psSGXHostCtlMemInfo));
++#endif
++
++ *psDevInfo->pui32KernelCCBEventKicker = (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
++ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0),
++ EUR_CR_EVENT_KICK_NOW_MASK);
++
++#if defined(PDUMP)
++
++
++
++
++
++
++ if (bFirstTime)
++ {
++ psDevInfo->ui32KernelCCBEventKickerDumpVal = 1;
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "First increment of the SGX event kicker value\n");
++ PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal,
++ psDevInfo->psKernelCCBEventKickerMemInfo,
++ 0,
++ sizeof(IMG_UINT32),
++ PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++ PDUMPREG(SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0), EUR_CR_EVENT_KICK_NOW_MASK);
++ bFirstTime = IMG_FALSE;
++ }
++#endif
++
++#if !defined(NO_HARDWARE)
++
++
++ if (PollForValueKM(&psSGXHostCtl->ui32InitStatus,
++ PVRSRV_USSE_EDM_INIT_COMPLETE,
++ PVRSRV_USSE_EDM_INIT_COMPLETE,
++ MAX_HW_TIME_US/WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXInitialise: Wait for uKernel initialisation failed"));
++ PVR_DBG_BREAK;
++ return PVRSRV_ERROR_RETRY;
++ }
++#endif
++
++#if defined(PDUMP)
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
++ "Wait for the SGX microkernel initialisation to complete");
++ PDUMPMEMPOL(psSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32InitStatus),
++ PVRSRV_USSE_EDM_INIT_COMPLETE,
++ PVRSRV_USSE_EDM_INIT_COMPLETE,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psSGXHostCtlMemInfo));
++#endif
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++
++
++
++ WorkaroundBRN22997ReadHostPort(psDevInfo);
++#endif
++
++ PVR_ASSERT(psDevInfo->psKernelCCBCtl->ui32ReadOffset == psDevInfo->psKernelCCBCtl->ui32WriteOffset);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie)
++
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *) hDevCookie;
++ PVRSRV_ERROR eError;
++
++
++ if (psDevInfo->pvRegsBaseKM == IMG_NULL)
++ {
++ return PVRSRV_OK;
++ }
++
++ eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asDeinitCommands, SGX_MAX_DEINIT_COMMANDS);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXDeinitialise: SGXRunScript failed (%d)", eError));
++ return (PVRSRV_ERROR_GENERIC);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR DevInitSGXPart1 (IMG_VOID *pvDeviceNode)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ IMG_HANDLE hKernelDevMemContext;
++ IMG_DEV_PHYADDR sPDDevPAddr;
++ IMG_UINT32 i;
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++ PVRSRV_ERROR eError;
++
++ PDUMPCOMMENT("SGX Initialisation Part 1");
++
++
++ PDUMPCOMMENT("SGX Core Version Information: %s", SGX_CORE_FRIENDLY_NAME);
++#ifdef SGX_CORE_REV
++ PDUMPCOMMENT("SGX Core Revision Information: %d", SGX_CORE_REV);
++#else
++ PDUMPCOMMENT("SGX Core Revision Information: head rtl");
++#endif
++
++ #if defined(SGX_FEATURE_SYSTEM_CACHE)
++ PDUMPCOMMENT("SGX System Level Cache is present\r\n");
++ #if defined(SGX_BYPASS_SYSTEM_CACHE)
++ PDUMPCOMMENT("SGX System Level Cache is bypassed\r\n");
++ #endif
++ #endif
++
++
++ if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_SGXDEV_INFO),
++ (IMG_VOID **)&psDevInfo, IMG_NULL,
++ "SGX Device Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1 : Failed to alloc memory for DevInfo"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++ OSMemSet (psDevInfo, 0, sizeof(PVRSRV_SGXDEV_INFO));
++
++
++ psDevInfo->eDeviceType = DEV_DEVICE_TYPE;
++ psDevInfo->eDeviceClass = DEV_DEVICE_CLASS;
++
++
++ psDeviceNode->pvDevice = (IMG_PVOID)psDevInfo;
++
++
++ psDevInfo->pvDeviceMemoryHeap = (IMG_VOID*)psDeviceMemoryHeap;
++
++
++ hKernelDevMemContext = BM_CreateContext(psDeviceNode,
++ &sPDDevPAddr,
++ IMG_NULL,
++ IMG_NULL);
++ if (hKernelDevMemContext == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1: Failed BM_CreateContext"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ psDevInfo->sKernelPDDevPAddr = sPDDevPAddr;
++
++
++ for(i=0; i<psDeviceNode->sDevMemoryInfo.ui32HeapCount; i++)
++ {
++ IMG_HANDLE hDevMemHeap;
++
++ switch(psDeviceMemoryHeap[i].DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_KERNEL:
++ case DEVICE_MEMORY_HEAP_SHARED:
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++ hDevMemHeap = BM_CreateHeap (hKernelDevMemContext,
++ &psDeviceMemoryHeap[i]);
++
++
++
++ psDeviceMemoryHeap[i].hDevMemHeap = hDevMemHeap;
++ break;
++ }
++ }
++ }
++
++ eError = MMU_BIFResetPDAlloc(psDevInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGX : Failed to alloc memory for BIF reset"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle, SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ PVRSRV_ERROR eError;
++
++ PDUMPCOMMENT("SGXGetInfoForSrvinit");
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
++ psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++
++ psInitInfo->sPDDevPAddr = psDevInfo->sKernelPDDevPAddr;
++
++ eError = PVRSRVGetDeviceMemHeapsKM(hDevHandle, &psInitInfo->asHeapInfo[0]);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXGetInfoForSrvinit: PVRSRVGetDeviceMemHeapsKM failed (%d)", eError));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return eError;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR DevInitSGXPart2KM (PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hDevHandle,
++ SGX_BRIDGE_INIT_INFO *psInitInfo)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ PVRSRV_ERROR eError;
++ SGX_DEVICE_MAP *psSGXDeviceMap;
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState;
++
++ PDUMPCOMMENT("SGX Initialisation Part 2");
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
++ psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++
++
++
++ eError = InitDevInfo(psPerProc, psDeviceNode, psInitInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to load EDM program"));
++ goto failed_init_dev_info;
++ }
++
++
++ eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
++ (IMG_VOID**)&psSGXDeviceMap);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to get device memory map!"));
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++
++
++ if (psSGXDeviceMap->pvRegsCpuVBase)
++ {
++ psDevInfo->pvRegsBaseKM = psSGXDeviceMap->pvRegsCpuVBase;
++ }
++ else
++ {
++
++ psDevInfo->pvRegsBaseKM = OSMapPhysToLin(psSGXDeviceMap->sRegsCpuPBase,
++ psSGXDeviceMap->ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ if (!psDevInfo->pvRegsBaseKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to map in regs\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ }
++ psDevInfo->ui32RegSize = psSGXDeviceMap->ui32RegsSize;
++ psDevInfo->sRegsPhysBase = psSGXDeviceMap->sRegsSysPBase;
++
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ if (psSGXDeviceMap->ui32Flags & SGX_HOSTPORT_PRESENT)
++ {
++
++ psDevInfo->pvHostPortBaseKM = OSMapPhysToLin(psSGXDeviceMap->sHPCpuPBase,
++ psSGXDeviceMap->ui32HPSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ if (!psDevInfo->pvHostPortBaseKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to map in host port\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ psDevInfo->ui32HPSize = psSGXDeviceMap->ui32HPSize;
++ psDevInfo->sHPSysPAddr = psSGXDeviceMap->sHPSysPBase;
++ }
++#endif
++
++#if defined (SYS_USING_INTERRUPTS)
++
++
++ psDeviceNode->pvISRData = psDeviceNode;
++
++ PVR_ASSERT(psDeviceNode->pfnDeviceISR == SGX_ISRHandler);
++
++#endif
++
++
++ psDevInfo->psSGXHostCtl->ui32PowerStatus |= PVRSRV_USSE_EDM_POWMAN_NO_WORK;
++ eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF;
++
++ eError = PVRSRVRegisterPowerDevice (psDeviceNode->sDevId.ui32DeviceIndex,
++ SGXPrePowerState, SGXPostPowerState,
++ SGXPreClockSpeedChange, SGXPostClockSpeedChange,
++ (IMG_HANDLE)psDeviceNode,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ eDefaultPowerState);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: failed to register device with power manager"));
++ return eError;
++ }
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++ eError = WorkaroundBRN22997Alloc(psDevInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXInitialise : Failed to alloc memory for BRN22997 workaround"));
++ return eError;
++ }
++#endif
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++
++ psDevInfo->ui32ExtSysCacheRegsSize = psSGXDeviceMap->ui32ExtSysCacheRegsSize;
++ psDevInfo->sExtSysCacheRegsDevPBase = psSGXDeviceMap->sExtSysCacheRegsDevPBase;
++ eError = MMU_MapExtSystemCacheRegs(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXInitialise : Failed to map external system cache registers"));
++ return eError;
++ }
++#endif
++
++
++
++ OSMemSet(psDevInfo->psKernelCCB, 0, sizeof(PVRSRV_SGX_KERNEL_CCB));
++ OSMemSet(psDevInfo->psKernelCCBCtl, 0, sizeof(PVRSRV_SGX_CCB_CTL));
++ OSMemSet(psDevInfo->pui32KernelCCBEventKicker, 0, sizeof(*psDevInfo->pui32KernelCCBEventKicker));
++ PDUMPCOMMENT("Initialise Kernel CCB");
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBMemInfo, 0, sizeof(PVRSRV_SGX_KERNEL_CCB), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBMemInfo));
++ PDUMPCOMMENT("Initialise Kernel CCB Control");
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBCtlMemInfo, 0, sizeof(PVRSRV_SGX_CCB_CTL), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBCtlMemInfo));
++ PDUMPCOMMENT("Initialise Kernel CCB Event Kicker");
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBEventKickerMemInfo, 0, sizeof(*psDevInfo->pui32KernelCCBEventKicker), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++
++ return PVRSRV_OK;
++
++failed_init_dev_info:
++ return eError;
++}
++
++static PVRSRV_ERROR DevDeInitSGX (IMG_VOID *pvDeviceNode)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32Heap;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++ SGX_DEVICE_MAP *psSGXDeviceMap;
++
++ if (!psDevInfo)
++ {
++
++ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Null DevInfo"));
++ return PVRSRV_OK;
++ }
++
++#if defined(SUPPORT_HW_RECOVERY)
++ if (psDevInfo->hTimer)
++ {
++ eError = OSRemoveTimer(psDevInfo->hTimer);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to remove timer"));
++ return eError;
++ }
++ psDevInfo->hTimer = IMG_NULL;
++ }
++#endif
++
++#if defined(SUPPORT_EXTERNAL_SYSTEM_CACHE)
++
++ eError = MMU_UnmapExtSystemCacheRegs(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to unmap ext system cache registers"));
++ return eError;
++ }
++#endif
++
++#if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030) && defined(SGX_FEATURE_HOST_PORT)
++ WorkaroundBRN22997Free(psDevInfo);
++#endif
++
++ MMU_BIFResetPDFree(psDevInfo);
++
++
++
++
++ DeinitDevInfo(psDevInfo);
++
++
++ psDeviceMemoryHeap = (DEVICE_MEMORY_HEAP_INFO *)psDevInfo->pvDeviceMemoryHeap;
++ for(ui32Heap=0; ui32Heap<psDeviceNode->sDevMemoryInfo.ui32HeapCount; ui32Heap++)
++ {
++ switch(psDeviceMemoryHeap[ui32Heap].DevMemHeapType)
++ {
++ case DEVICE_MEMORY_HEAP_KERNEL:
++ case DEVICE_MEMORY_HEAP_SHARED:
++ case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++ {
++ if (psDeviceMemoryHeap[ui32Heap].hDevMemHeap != IMG_NULL)
++ {
++ BM_DestroyHeap(psDeviceMemoryHeap[ui32Heap].hDevMemHeap);
++ }
++ break;
++ }
++ }
++ }
++
++
++ eError = BM_DestroyContext(psDeviceNode->sDevMemoryInfo.pBMKernelContext, IMG_NULL);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX : Failed to destroy kernel context"));
++ return eError;
++ }
++
++
++ eError = PVRSRVRemovePowerDevice (((PVRSRV_DEVICE_NODE*)pvDeviceNode)->sDevId.ui32DeviceIndex);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
++ (IMG_VOID**)&psSGXDeviceMap);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to get device memory map!"));
++ return eError;
++ }
++
++
++ if (!psSGXDeviceMap->pvRegsCpuVBase)
++ {
++
++ if (psDevInfo->pvRegsBaseKM != IMG_NULL)
++ {
++ OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
++ psDevInfo->ui32RegSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++ }
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ if (psSGXDeviceMap->ui32Flags & SGX_HOSTPORT_PRESENT)
++ {
++
++ if (psDevInfo->pvHostPortBaseKM != IMG_NULL)
++ {
++ OSUnMapPhysToLin(psDevInfo->pvHostPortBaseKM,
++ psDevInfo->ui32HPSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++ }
++#endif
++
++
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_SGXDEV_INFO),
++ psDevInfo,
++ 0);
++
++ psDeviceNode->pvDevice = IMG_NULL;
++
++ if (psDeviceMemoryHeap != IMG_NULL)
++ {
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID,
++ psDeviceMemoryHeap,
++ 0);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_VOID SGXDumpDebugInfo (PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_BOOL bDumpSGXRegs)
++{
++ IMG_UINT ui32RegVal;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++
++ if (bDumpSGXRegs)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGX Register Base Address (Linear): 0x%08X", psDevInfo->pvRegsBaseKM));
++ PVR_DPF((PVR_DBG_ERROR,"SGX Register Base Address (Physical): 0x%08X", psDevInfo->sRegsPhysBase));
++
++
++
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
++ if (ui32RegVal & (EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK | EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK))
++ {
++ PVR_LOG(("DPM out of memory!!"));
++ }
++ PVR_LOG(("EUR_CR_EVENT_STATUS: %x", ui32RegVal));
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2);
++ PVR_LOG(("EUR_CR_EVENT_STATUS2: %x", ui32RegVal));
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL);
++ PVR_LOG(("EUR_CR_BIF_CTRL: %x", ui32RegVal));
++
++ #if defined(EUR_CR_BIF_BANK0)
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0);
++ PVR_LOG(("EUR_CR_BIF_BANK0: %x", ui32RegVal));
++ #endif
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++ PVR_LOG(("EUR_CR_BIF_INT_STAT: %x", ui32RegVal));
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT);
++ PVR_LOG(("EUR_CR_BIF_FAULT: %x", ui32RegVal));
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_REQ_STAT);
++ PVR_LOG(("EUR_CR_BIF_MEM_REQ_STAT: %x", ui32RegVal));
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_CLKGATECTL);
++ PVR_LOG(("EUR_CR_CLKGATECTL: %x", ui32RegVal));
++
++ #if defined(EUR_CR_PDS_PC_BASE)
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_PDS_PC_BASE);
++ PVR_LOG(("EUR_CR_PDS_PC_BASE: %x", ui32RegVal));
++ #endif
++
++
++ }
++
++ #if defined(PVRSRV_USSE_EDM_STATUS_DEBUG)
++ {
++ IMG_UINT32 *pui32MKTraceBuffer = psDevInfo->psKernelEDMStatusBufferMemInfo->pvLinAddrKM;
++ IMG_UINT32 ui32LastStatusCode, ui32WriteOffset;
++
++ ui32LastStatusCode = *pui32MKTraceBuffer;
++ pui32MKTraceBuffer++;
++ ui32WriteOffset = *pui32MKTraceBuffer;
++ pui32MKTraceBuffer++;
++
++ PVR_LOG(("Last SGX microkernel status code: 0x%x", ui32LastStatusCode));
++
++ #if defined(PVRSRV_DUMP_MK_TRACE)
++
++
++ {
++ IMG_UINT32 ui32LoopCounter;
++
++ for (ui32LoopCounter = 0;
++ ui32LoopCounter < SGXMK_TRACE_BUFFER_SIZE;
++ ui32LoopCounter++)
++ {
++ IMG_UINT32 *pui32BufPtr;
++ pui32BufPtr = pui32MKTraceBuffer +
++ (((ui32WriteOffset + ui32LoopCounter) % SGXMK_TRACE_BUFFER_SIZE) * 4);
++ PVR_LOG(("(MKT%u) %08X %08X %08X %08X", ui32LoopCounter,
++ pui32BufPtr[2], pui32BufPtr[3], pui32BufPtr[1], pui32BufPtr[0]));
++ }
++ }
++ #endif
++ }
++ #endif
++
++ {
++
++
++ IMG_UINT32 *pui32HostCtlBuffer = (IMG_UINT32 *)psDevInfo->psSGXHostCtl;
++ IMG_UINT32 ui32LoopCounter;
++
++ PVR_LOG(("SGX Host control:"));
++
++ for (ui32LoopCounter = 0;
++ ui32LoopCounter < sizeof(*psDevInfo->psSGXHostCtl) / sizeof(*pui32HostCtlBuffer);
++ ui32LoopCounter += 4)
++ {
++ PVR_LOG(("\t0x%X: 0x%08X 0x%08X 0x%08X 0x%08X", ui32LoopCounter * sizeof(*pui32HostCtlBuffer),
++ pui32HostCtlBuffer[ui32LoopCounter + 0], pui32HostCtlBuffer[ui32LoopCounter + 1],
++ pui32HostCtlBuffer[ui32LoopCounter + 2], pui32HostCtlBuffer[ui32LoopCounter + 3]));
++ }
++ }
++
++ {
++
++
++ IMG_UINT32 *pui32TA3DCtlBuffer = psDevInfo->psKernelSGXTA3DCtlMemInfo->pvLinAddrKM;
++ IMG_UINT32 ui32LoopCounter;
++
++ PVR_LOG(("SGX TA/3D control:"));
++
++ for (ui32LoopCounter = 0;
++ ui32LoopCounter < psDevInfo->psKernelSGXTA3DCtlMemInfo->ui32AllocSize / sizeof(*pui32TA3DCtlBuffer);
++ ui32LoopCounter += 4)
++ {
++ PVR_LOG(("\t0x%X: 0x%08X 0x%08X 0x%08X 0x%08X", ui32LoopCounter * sizeof(*pui32TA3DCtlBuffer),
++ pui32TA3DCtlBuffer[ui32LoopCounter + 0], pui32TA3DCtlBuffer[ui32LoopCounter + 1],
++ pui32TA3DCtlBuffer[ui32LoopCounter + 2], pui32TA3DCtlBuffer[ui32LoopCounter + 3]));
++ }
++ }
++
++ QueueDumpDebugInfo();
++}
++
++
++#if defined(SYS_USING_INTERRUPTS) || defined(SUPPORT_HW_RECOVERY)
++static
++IMG_VOID HWRecoveryResetSGX (PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_UINT32 ui32Component,
++ IMG_UINT32 ui32CallerID)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Component);
++
++
++
++ eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
++ if(eError != PVRSRV_OK)
++ {
++
++
++
++ PVR_DPF((PVR_DBG_WARNING,"HWRecoveryResetSGX: Power transition in progress"));
++ return;
++ }
++
++ psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_HWR;
++
++ PVR_LOG(("HWRecoveryResetSGX: SGX Hardware Recovery triggered"));
++
++ SGXDumpDebugInfo(psDeviceNode, IMG_TRUE);
++
++
++ PDUMPSUSPEND();
++
++
++ eError = SGXInitialise(psDevInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"HWRecoveryResetSGX: SGXInitialise failed (%d)", eError));
++ }
++
++
++ PDUMPRESUME();
++
++ PVRSRVPowerUnlock(ui32CallerID);
++
++
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++
++
++
++ PVRSRVProcessQueues(ui32CallerID, IMG_TRUE);
++}
++#endif
++
++
++#if defined(SUPPORT_HW_RECOVERY)
++IMG_VOID SGXOSTimer(IMG_VOID *pvData)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ static IMG_UINT32 ui32EDMTasks = 0;
++ static IMG_UINT32 ui32LockupCounter = 0;
++ static IMG_UINT32 ui32NumResets = 0;
++ IMG_UINT32 ui32CurrentEDMTasks;
++ IMG_BOOL bLockup = IMG_FALSE;
++ IMG_BOOL bPoweredDown;
++
++
++ psDevInfo->ui32TimeStamp++;
++
++#if defined(NO_HARDWARE)
++ bPoweredDown = IMG_TRUE;
++#else
++ bPoweredDown = SGXIsDevicePowered(psDeviceNode) ? IMG_FALSE : IMG_TRUE;
++#endif
++
++
++
++ if (bPoweredDown)
++ {
++ ui32LockupCounter = 0;
++ }
++ else
++ {
++
++ ui32CurrentEDMTasks = OSReadHWReg(psDevInfo->pvRegsBaseKM, psDevInfo->ui32EDMTaskReg0);
++ if (psDevInfo->ui32EDMTaskReg1 != 0)
++ {
++ ui32CurrentEDMTasks ^= OSReadHWReg(psDevInfo->pvRegsBaseKM, psDevInfo->ui32EDMTaskReg1);
++ }
++ if ((ui32CurrentEDMTasks == ui32EDMTasks) &&
++ (psDevInfo->ui32NumResets == ui32NumResets))
++ {
++ ui32LockupCounter++;
++ if (ui32LockupCounter == 3)
++ {
++ ui32LockupCounter = 0;
++ PVR_DPF((PVR_DBG_ERROR, "SGXOSTimer() detected SGX lockup (0x%x tasks)", ui32EDMTasks));
++
++ bLockup = IMG_TRUE;
++ }
++ }
++ else
++ {
++ ui32LockupCounter = 0;
++ ui32EDMTasks = ui32CurrentEDMTasks;
++ ui32NumResets = psDevInfo->ui32NumResets;
++ }
++ }
++
++ if (bLockup)
++ {
++ SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl;
++
++
++ psSGXHostCtl->ui32HostDetectedLockups ++;
++
++
++ HWRecoveryResetSGX(psDeviceNode, 0, KERNEL_ID);
++ }
++}
++#endif
++
++
++#if defined(SYS_USING_INTERRUPTS)
++
++IMG_BOOL SGX_ISRHandler (IMG_VOID *pvData)
++{
++ IMG_BOOL bInterruptProcessed = IMG_FALSE;
++
++
++
++ {
++ IMG_UINT32 ui32EventStatus, ui32EventEnable;
++ IMG_UINT32 ui32EventClear = 0;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++
++
++ if(pvData == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGX_ISRHandler: Invalid params\n"));
++ return bInterruptProcessed;
++ }
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
++ psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++
++ ui32EventStatus = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
++ ui32EventEnable = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_ENABLE);
++
++
++
++ gui32EventStatusServicesByISR = ui32EventStatus;
++
++
++ ui32EventStatus &= ui32EventEnable;
++
++ if (ui32EventStatus & EUR_CR_EVENT_STATUS_SW_EVENT_MASK)
++ {
++ ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK;
++ }
++
++ if (ui32EventClear)
++ {
++ bInterruptProcessed = IMG_TRUE;
++
++
++ ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK;
++
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32EventClear);
++ }
++ }
++
++ return bInterruptProcessed;
++}
++
++
++IMG_VOID SGX_MISRHandler (IMG_VOID *pvData)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = (SGXMKIF_HOST_CTL *)psDevInfo->psSGXHostCtl;
++
++ if (((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) != 0UL) &&
++ ((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) == 0UL))
++ {
++ HWRecoveryResetSGX(psDeviceNode, 0, ISR_ID);
++ }
++
++#if defined(OS_SUPPORTS_IN_LISR)
++ if (psDeviceNode->bReProcessDeviceCommandComplete)
++ {
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++ }
++#endif
++
++ SGXTestActivePowerEvent(psDeviceNode, ISR_ID);
++}
++#endif
++
++
++PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++
++
++ psDeviceNode->sDevId.eDeviceType = DEV_DEVICE_TYPE;
++ psDeviceNode->sDevId.eDeviceClass = DEV_DEVICE_CLASS;
++
++ psDeviceNode->pfnInitDevice = DevInitSGXPart1;
++ psDeviceNode->pfnDeInitDevice = DevDeInitSGX;
++
++ psDeviceNode->pfnInitDeviceCompatCheck = SGXDevInitCompatCheck;
++
++
++
++ psDeviceNode->pfnMMUInitialise = MMU_Initialise;
++ psDeviceNode->pfnMMUFinalise = MMU_Finalise;
++ psDeviceNode->pfnMMUInsertHeap = MMU_InsertHeap;
++ psDeviceNode->pfnMMUCreate = MMU_Create;
++ psDeviceNode->pfnMMUDelete = MMU_Delete;
++ psDeviceNode->pfnMMUAlloc = MMU_Alloc;
++ psDeviceNode->pfnMMUFree = MMU_Free;
++ psDeviceNode->pfnMMUMapPages = MMU_MapPages;
++ psDeviceNode->pfnMMUMapShadow = MMU_MapShadow;
++ psDeviceNode->pfnMMUUnmapPages = MMU_UnmapPages;
++ psDeviceNode->pfnMMUMapScatter = MMU_MapScatter;
++ psDeviceNode->pfnMMUGetPhysPageAddr = MMU_GetPhysPageAddr;
++ psDeviceNode->pfnMMUGetPDDevPAddr = MMU_GetPDDevPAddr;
++
++#if defined (SYS_USING_INTERRUPTS)
++
++
++ psDeviceNode->pfnDeviceISR = SGX_ISRHandler;
++ psDeviceNode->pfnDeviceMISR = SGX_MISRHandler;
++#endif
++
++
++
++ psDeviceNode->pfnDeviceCommandComplete = SGXCommandComplete;
++
++
++
++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++
++ psDevMemoryInfo->ui32AddressSpaceSizeLog2 = SGX_FEATURE_ADDRESS_SPACE_SIZE;
++
++
++ psDevMemoryInfo->ui32Flags = 0;
++
++
++ if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID,
++ (IMG_VOID **)&psDevMemoryInfo->psDeviceMemoryHeap, 0,
++ "Array of Device Memory Heap Info") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXRegisterDevice : Failed to alloc memory for DEVICE_MEMORY_HEAP_INFO"));
++ return (PVRSRV_ERROR_OUT_OF_MEMORY);
++ }
++ OSMemSet(psDevMemoryInfo->psDeviceMemoryHeap, 0, sizeof(DEVICE_MEMORY_HEAP_INFO) * SGX_MAX_HEAP_ID);
++
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++
++
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_GENERAL_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_GENERAL_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_GENERAL_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "General";
++ psDeviceMemoryHeap->pszBSName = "General BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++#if !defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++
++ psDevMemoryInfo->ui32MappingHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
++#endif
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_TADATA_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_TADATA_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_TADATA_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->pszName = "TA Data";
++ psDeviceMemoryHeap->pszBSName = "TA Data BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_CODE_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_KERNEL_CODE_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_KERNEL_CODE_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->pszName = "Kernel Code";
++ psDeviceMemoryHeap->pszBSName = "Kernel Code BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_KERNEL_DATA_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_KERNEL_DATA_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_KERNEL_DATA_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->pszName = "KernelData";
++ psDeviceMemoryHeap->pszBSName = "KernelData BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PIXELSHADER_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PIXELSHADER_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_PIXELSHADER_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "PixelShaderUSSE";
++ psDeviceMemoryHeap->pszBSName = "PixelShaderUSSE BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_VERTEXSHADER_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_VERTEXSHADER_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_VERTEXSHADER_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "VertexShaderUSSE";
++ psDeviceMemoryHeap->pszBSName = "VertexShaderUSSE BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PDSPIXEL_CODEDATA_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PDSPIXEL_CODEDATA_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_PDSPIXEL_CODEDATA_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "PDSPixelCodeData";
++ psDeviceMemoryHeap->pszBSName = "PDSPixelCodeData BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_PDSVERTEX_CODEDATA_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_PDSVERTEX_CODEDATA_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_PDSVERTEX_CODEDATA_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "PDSVertexCodeData";
++ psDeviceMemoryHeap->pszBSName = "PDSVertexCodeData BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_SYNCINFO_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_SYNCINFO_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_SYNCINFO_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->pszName = "CacheCoherent";
++ psDeviceMemoryHeap->pszBSName = "CacheCoherent BS";
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++
++ psDevMemoryInfo->ui32SyncHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
++ psDeviceMemoryHeap++;
++
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_3DPARAMETERS_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_3DPARAMETERS_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_3DPARAMETERS_HEAP_SIZE;
++ psDeviceMemoryHeap->pszName = "3DParameters";
++ psDeviceMemoryHeap->pszBSName = "3DParameters BS";
++#if defined(SUPPORT_PERCONTEXT_PB)
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++#else
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++#endif
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++
++
++#if defined(SUPPORT_SGX_GENERAL_MAPPING_HEAP)
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_GENERAL_MAPPING_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_GENERAL_MAPPING_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_GENERAL_MAPPING_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_MULTI_PROCESS;
++ psDeviceMemoryHeap->pszName = "GeneralMapping";
++ psDeviceMemoryHeap->pszBSName = "GeneralMapping BS";
++ #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) && defined(FIX_HW_BRN_23410)
++
++
++
++
++
++
++
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++#else
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++#endif
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++
++ psDevMemoryInfo->ui32MappingHeapID = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
++ psDeviceMemoryHeap++;
++#endif
++
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_2D_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_2D_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_2D_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "2D";
++ psDeviceMemoryHeap->pszBSName = "2D BS";
++
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++#endif
++
++
++#if defined(FIX_HW_BRN_26915)
++
++
++ psDeviceMemoryHeap->ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX, SGX_CGBUFFER_HEAP_ID);
++ psDeviceMemoryHeap->sDevVAddrBase.uiAddr = SGX_CGBUFFER_HEAP_BASE;
++ psDeviceMemoryHeap->ui32HeapSize = SGX_CGBUFFER_HEAP_SIZE;
++ psDeviceMemoryHeap->ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++ | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++ | PVRSRV_HAP_SINGLE_PROCESS;
++ psDeviceMemoryHeap->pszName = "CGBuffer";
++ psDeviceMemoryHeap->pszBSName = "CGBuffer BS";
++
++ psDeviceMemoryHeap->DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++ psDeviceMemoryHeap->ui32DataPageSize = SGX_MMU_PAGE_SIZE;
++ psDeviceMemoryHeap++;
++#endif
++
++
++ psDevMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeap - psDevMemoryInfo->psDeviceMemoryHeap);
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE hDevCookie,
++ SGX_CLIENT_INFO* psClientInfo)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++
++
++ psDevInfo->ui32ClientRefCount++;
++
++#if defined(PDUMP)
++
++ psDevInfo->psKernelCCBInfo->ui32CCBDumpWOff = 0;
++#endif
++
++
++ psClientInfo->ui32ProcessID = OSGetCurrentProcessIDKM();
++
++
++
++ OSMemCopy(&psClientInfo->asDevData, &psDevInfo->asSGXDevData, sizeof(psClientInfo->asDevData));
++
++
++ return PVRSRV_OK;
++}
++
++
++IMG_VOID SGXPanic(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVR_LOG(("SGX panic"));
++ SGXDumpDebugInfo(psDeviceNode, IMG_FALSE);
++ OSPanic();
++}
++
++
++PVRSRV_ERROR SGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++ IMG_UINT32 ui32BuildOptions, ui32BuildOptionsMismatch;
++#if !defined(NO_HARDWARE)
++ PPVRSRV_KERNEL_MEM_INFO psMemInfo;
++ PVRSRV_SGX_MISCINFO_INFO *psSGXMiscInfoInt;
++ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++ SGX_MISCINFO_STRUCT_SIZES *psSGXStructSizes;
++ IMG_BOOL bStructSizesFailed;
++
++
++ IMG_BOOL bCheckCoreRev;
++ const IMG_UINT32 aui32CoreRevExceptions[] = {
++ 0x10100, 0x10101
++ };
++ const IMG_UINT32 ui32NumCoreExceptions = sizeof(aui32CoreRevExceptions) / (2*sizeof(IMG_UINT32));
++ IMG_UINT i;
++#endif
++
++
++ if(psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_SGX)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Device not of type SGX"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ goto chk_exit;
++ }
++
++ psDevInfo = psDeviceNode->pvDevice;
++
++
++
++ ui32BuildOptions = (SGX_BUILD_OPTIONS);
++ if (ui32BuildOptions != psDevInfo->ui32ClientBuildOptions)
++ {
++ ui32BuildOptionsMismatch = ui32BuildOptions ^ psDevInfo->ui32ClientBuildOptions;
++ if ( (psDevInfo->ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Mismatch in client-side and KM driver build options; "
++ "extra options present in client-side driver: (0x%lx). Please check sgx_options.h",
++ psDevInfo->ui32ClientBuildOptions & ui32BuildOptionsMismatch ));
++ }
++
++ if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Mismatch in client-side and KM driver build options; "
++ "extra options present in KM: (0x%lx). Please check sgx_options.h",
++ ui32BuildOptions & ui32BuildOptionsMismatch ));
++ }
++ eError = PVRSRV_ERROR_BUILD_MISMATCH;
++ goto chk_exit;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: Client-side and KM driver build options match. [ OK ]"));
++ }
++
++#if !defined (NO_HARDWARE)
++ psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
++
++
++ psSGXMiscInfoInt = psMemInfo->pvLinAddrKM;
++ psSGXMiscInfoInt->ui32MiscInfoFlags = 0;
++ psSGXMiscInfoInt->ui32MiscInfoFlags |= PVRSRV_USSE_MISCINFO_GET_STRUCT_SIZES;
++ eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode);
++
++
++ if(eError != PVRSRV_OK)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Unable to validate device DDK version"));
++ goto chk_exit;
++ }
++ psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures;
++ if( (psSGXFeatures->ui32DDKVersion !=
++ ((PVRVERSION_MAJ << 16) |
++ (PVRVERSION_MIN << 8) |
++ PVRVERSION_BRANCH) ) ||
++ (psSGXFeatures->ui32DDKBuild != PVRVERSION_BUILD) )
++ {
++ PVR_LOG(("(FAIL) SGXInit: Incompatible driver DDK revision (%ld)/device DDK revision (%ld).",
++ PVRVERSION_BUILD, psSGXFeatures->ui32DDKBuild));
++ eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH;
++ PVR_DBG_BREAK;
++ goto chk_exit;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: driver DDK (%ld) and device DDK (%ld) match. [ OK ]",
++ PVRVERSION_BUILD, psSGXFeatures->ui32DDKBuild));
++ }
++
++
++ if (psSGXFeatures->ui32CoreRevSW == 0)
++ {
++
++
++ PVR_LOG(("SGXInit: HW core rev (%lx) check skipped.",
++ psSGXFeatures->ui32CoreRev));
++ }
++ else
++ {
++
++ bCheckCoreRev = IMG_TRUE;
++ for(i=0; i<ui32NumCoreExceptions; i+=2)
++ {
++ if( (psSGXFeatures->ui32CoreRev==aui32CoreRevExceptions[i]) &&
++ (psSGXFeatures->ui32CoreRevSW==aui32CoreRevExceptions[i+1]) )
++ {
++ PVR_LOG(("SGXInit: HW core rev (%lx), SW core rev (%lx) check skipped.",
++ psSGXFeatures->ui32CoreRev,
++ psSGXFeatures->ui32CoreRevSW));
++ bCheckCoreRev = IMG_FALSE;
++ }
++ }
++
++ if (bCheckCoreRev)
++ {
++ if (psSGXFeatures->ui32CoreRev != psSGXFeatures->ui32CoreRevSW)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Incompatible HW core rev (%lx) and SW core rev (%lx).",
++ psSGXFeatures->ui32CoreRev, psSGXFeatures->ui32CoreRevSW));
++ eError = PVRSRV_ERROR_BUILD_MISMATCH;
++ goto chk_exit;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: HW core rev (%lx) and SW core rev (%lx) match. [ OK ]",
++ psSGXFeatures->ui32CoreRev, psSGXFeatures->ui32CoreRevSW));
++ }
++ }
++ }
++
++
++ psSGXStructSizes = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXStructSizes;
++
++ bStructSizesFailed = IMG_FALSE;
++
++ CHECK_SIZE(HOST_CTL);
++ CHECK_SIZE(COMMAND);
++#if defined(SGX_FEATURE_2D_HARDWARE)
++ CHECK_SIZE(2DCMD);
++ CHECK_SIZE(2DCMD_SHARED);
++#endif
++ CHECK_SIZE(CMDTA);
++ CHECK_SIZE(CMDTA_SHARED);
++ CHECK_SIZE(TRANSFERCMD);
++ CHECK_SIZE(TRANSFERCMD_SHARED);
++
++ CHECK_SIZE(3DREGISTERS);
++ CHECK_SIZE(HWPBDESC);
++ CHECK_SIZE(HWRENDERCONTEXT);
++ CHECK_SIZE(HWRENDERDETAILS);
++ CHECK_SIZE(HWRTDATA);
++ CHECK_SIZE(HWRTDATASET);
++ CHECK_SIZE(HWTRANSFERCONTEXT);
++
++ if (bStructSizesFailed == IMG_TRUE)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Mismatch in SGXMKIF structure sizes."));
++ eError = PVRSRV_ERROR_BUILD_MISMATCH;
++ goto chk_exit;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: SGXMKIF structure sizes match. [ OK ]"));
++ }
++
++
++ ui32BuildOptions = psSGXFeatures->ui32BuildOptions;
++ if (ui32BuildOptions != (SGX_BUILD_OPTIONS))
++ {
++ ui32BuildOptionsMismatch = ui32BuildOptions ^ (SGX_BUILD_OPTIONS);
++ if ( ((SGX_BUILD_OPTIONS) & ui32BuildOptionsMismatch) != 0)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Mismatch in driver and microkernel build options; "
++ "extra options present in driver: (0x%lx). Please check sgx_options.h",
++ (SGX_BUILD_OPTIONS) & ui32BuildOptionsMismatch ));
++ }
++
++ if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
++ {
++ PVR_LOG(("(FAIL) SGXInit: Mismatch in driver and microkernel build options; "
++ "extra options present in microkernel: (0x%lx). Please check sgx_options.h",
++ ui32BuildOptions & ui32BuildOptionsMismatch ));
++ }
++ eError = PVRSRV_ERROR_BUILD_MISMATCH;
++ goto chk_exit;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXInit: Driver and microkernel build options match. [ OK ]"));
++ }
++#endif
++
++ eError = PVRSRV_OK;
++chk_exit:
++#if defined(IGNORE_SGX_INIT_COMPATIBILITY_CHECK)
++ return PVRSRV_OK;
++#else
++ return eError;
++#endif
++}
++
++static
++PVRSRV_ERROR SGXGetMiscInfoUkernel(PVRSRV_SGXDEV_INFO *psDevInfo,
++ PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ SGXMKIF_COMMAND sCommandData;
++ PVRSRV_SGX_MISCINFO_INFO *psSGXMiscInfoInt;
++ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++ SGX_MISCINFO_STRUCT_SIZES *psSGXStructSizes;
++
++ PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
++
++ if (! psMemInfo->pvLinAddrKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: Invalid address."));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ psSGXMiscInfoInt = psMemInfo->pvLinAddrKM;
++ psSGXFeatures = &psSGXMiscInfoInt->sSGXFeatures;
++ psSGXStructSizes = &psSGXMiscInfoInt->sSGXStructSizes;
++
++ psSGXMiscInfoInt->ui32MiscInfoFlags &= ~PVRSRV_USSE_MISCINFO_READY;
++
++
++ OSMemSet(psSGXFeatures, 0, sizeof(*psSGXFeatures));
++ OSMemSet(psSGXStructSizes, 0, sizeof(*psSGXStructSizes));
++
++
++ sCommandData.ui32Data[1] = psMemInfo->sDevVAddr.uiAddr;
++
++ eError = SGXScheduleCCBCommandKM(psDeviceNode,
++ SGXMKIF_CMD_GETMISCINFO,
++ &sCommandData,
++ KERNEL_ID,
++ 0);
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: SGXScheduleCCBCommandKM failed."));
++ return eError;
++ }
++
++
++#if !defined(NO_HARDWARE)
++ {
++ IMG_BOOL bExit;
++
++ bExit = IMG_FALSE;
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if ((psSGXMiscInfoInt->ui32MiscInfoFlags & PVRSRV_USSE_MISCINFO_READY) != 0)
++ {
++ bExit = IMG_TRUE;
++ break;
++ }
++ } END_LOOP_UNTIL_TIMEOUT();
++
++
++ if (!bExit)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXGetMiscInfoUkernel: Timeout occurred waiting for misc info."));
++ return PVRSRV_ERROR_TIMEOUT;
++ }
++ }
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO *psDevInfo,
++ SGX_MISC_INFO *psMiscInfo,
++ PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_HANDLE hDevMemContext)
++{
++ PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
++ IMG_UINT32 *pui32MiscInfoFlags = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->ui32MiscInfoFlags;
++
++
++ *pui32MiscInfoFlags = 0;
++
++#if !defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ PVR_UNREFERENCED_PARAMETER(hDevMemContext);
++#endif
++
++ switch(psMiscInfo->eRequest)
++ {
++#if defined(SGX_FEATURE_DATA_BREAKPOINTS)
++ case SGX_MISC_INFO_REQUEST_SET_BREAKPOINT:
++ {
++ IMG_UINT32 ui32RegOffset;
++ IMG_UINT32 ui32RegVal;
++ IMG_UINT32 ui32BaseRegOffset;
++ IMG_UINT32 ui32BaseRegVal;
++ IMG_UINT32 ui32MaskRegOffset;
++ IMG_UINT32 ui32MaskRegVal;
++
++ switch(psMiscInfo->uData.sSGXBreakpointInfo.ui32BPIndex)
++ {
++ case 0:
++ ui32RegOffset = EUR_CR_BREAKPOINT0;
++ ui32BaseRegOffset = EUR_CR_BREAKPOINT0_BASE;
++ ui32MaskRegOffset = EUR_CR_BREAKPOINT0_MASK;
++ break;
++ case 1:
++ ui32RegOffset = EUR_CR_BREAKPOINT1;
++ ui32BaseRegOffset = EUR_CR_BREAKPOINT1_BASE;
++ ui32MaskRegOffset = EUR_CR_BREAKPOINT1_MASK;
++ break;
++ case 2:
++ ui32RegOffset = EUR_CR_BREAKPOINT2;
++ ui32BaseRegOffset = EUR_CR_BREAKPOINT2_BASE;
++ ui32MaskRegOffset = EUR_CR_BREAKPOINT2_MASK;
++ break;
++ case 3:
++ ui32RegOffset = EUR_CR_BREAKPOINT3;
++ ui32BaseRegOffset = EUR_CR_BREAKPOINT3_BASE;
++ ui32MaskRegOffset = EUR_CR_BREAKPOINT3_MASK;
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,"SGXGetMiscInfoKM: SGX_MISC_INFO_REQUEST_SET_BREAKPOINT invalid BP idx %d", psMiscInfo->uData.sSGXBreakpointInfo.ui32BPIndex));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ if(psMiscInfo->uData.sSGXBreakpointInfo.bBPEnable)
++ {
++
++ IMG_DEV_VIRTADDR sBPDevVAddr = psMiscInfo->uData.sSGXBreakpointInfo.sBPDevVAddr;
++
++
++ ui32MaskRegVal = EUR_CR_BREAKPOINT0_MASK_REGION_MASK | EUR_CR_BREAKPOINT0_MASK_DM_MASK;
++
++
++ ui32BaseRegVal = sBPDevVAddr.uiAddr & EUR_CR_BREAKPOINT0_BASE_ADDRESS_MASK;
++
++
++ ui32RegVal = EUR_CR_BREAKPOINT0_CTRL_WENABLE_MASK
++ | EUR_CR_BREAKPOINT0_CTRL_WENABLE_MASK
++ | EUR_CR_BREAKPOINT0_CTRL_TRAPENABLE_MASK;
++ }
++ else
++ {
++
++ ui32RegVal = ui32BaseRegVal = ui32MaskRegVal = 0;
++ }
++
++
++
++
++
++
++
++
++
++
++ return PVRSRV_OK;
++ }
++#endif
++
++ case SGX_MISC_INFO_REQUEST_CLOCKSPEED:
++ {
++ psMiscInfo->uData.ui32SGXClockSpeed = psDevInfo->ui32CoreClockSpeed;
++ return PVRSRV_OK;
++ }
++
++ case SGX_MISC_INFO_REQUEST_SGXREV:
++ {
++ PVRSRV_ERROR eError;
++ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++
++ eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "An error occurred in SGXGetMiscInfoUkernel: %d\n",
++ eError));
++ return eError;
++ }
++ psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures;
++
++
++ psMiscInfo->uData.sSGXFeatures = *psSGXFeatures;
++
++
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXGetMiscInfoKM: Core 0x%lx, sw ID 0x%lx, sw Rev 0x%lx\n",
++ psSGXFeatures->ui32CoreRev,
++ psSGXFeatures->ui32CoreIdSW,
++ psSGXFeatures->ui32CoreRevSW));
++ PVR_DPF((PVR_DBG_MESSAGE, "SGXGetMiscInfoKM: DDK version 0x%lx, DDK build 0x%lx\n",
++ psSGXFeatures->ui32DDKVersion,
++ psSGXFeatures->ui32DDKBuild));
++
++
++ return PVRSRV_OK;
++ }
++
++ case SGX_MISC_INFO_REQUEST_DRIVER_SGXREV:
++ {
++ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++
++ psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures;
++
++
++ OSMemSet(psMemInfo->pvLinAddrKM, 0,
++ sizeof(PVRSRV_SGX_MISCINFO_INFO));
++
++ psSGXFeatures->ui32DDKVersion =
++ (PVRVERSION_MAJ << 16) |
++ (PVRVERSION_MIN << 8) |
++ PVRVERSION_BRANCH;
++ psSGXFeatures->ui32DDKBuild = PVRVERSION_BUILD;
++
++
++ psSGXFeatures->ui32BuildOptions = (SGX_BUILD_OPTIONS);
++
++
++ psMiscInfo->uData.sSGXFeatures = *psSGXFeatures;
++ return PVRSRV_OK;
++ }
++
++#if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG)
++ case SGX_MISC_INFO_REQUEST_MEMREAD:
++ {
++ PVRSRV_ERROR eError;
++ PPVRSRV_KERNEL_MEM_INFO psMemInfo = psDevInfo->psKernelSGXMiscMemInfo;
++ PVRSRV_SGX_MISCINFO_FEATURES *psSGXFeatures;
++ PVRSRV_SGX_MISCINFO_MEMREAD *psSGXMemReadData;
++
++ psSGXMemReadData = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXMemReadData;
++
++
++ *pui32MiscInfoFlags |= PVRSRV_USSE_MISCINFO_MEMREAD;
++
++
++ if(psMiscInfo->hDevMemContext != IMG_NULL)
++ {
++ SGXGetMMUPDAddrKM( (IMG_HANDLE)psDeviceNode, hDevMemContext, &psSGXMemReadData->sPDDevPAddr);
++ }
++ else
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ if(psMiscInfo->sDevVAddr.uiAddr != 0)
++ {
++ psSGXMemReadData->sDevVAddr = psMiscInfo->sDevVAddr;
++ }
++ else
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ eError = SGXGetMiscInfoUkernel(psDevInfo, psDeviceNode);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "An error occurred in SGXGetMiscInfoUkernel: %d\n",
++ eError));
++ return eError;
++ }
++ psSGXFeatures = &((PVRSRV_SGX_MISCINFO_INFO*)(psMemInfo->pvLinAddrKM))->sSGXFeatures;
++
++#if !defined SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++ if(*pui32MiscInfoFlags & PVRSRV_USSE_MISCINFO_MEMREAD_FAIL)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++#endif
++
++ psMiscInfo->uData.sSGXFeatures = *psSGXFeatures;
++ return PVRSRV_OK;
++ }
++#endif
++
++#ifdef SUPPORT_SGX_HWPERF
++ case SGX_MISC_INFO_REQUEST_SET_HWPERF_STATUS:
++ {
++ SGXMKIF_HWPERF_CB *psHWPerfCB = psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++ IMG_UINT ui32MatchingFlags;
++
++
++ if ((psMiscInfo->uData.ui32NewHWPerfStatus & ~(PVRSRV_SGX_HWPERF_GRAPHICS_ON | PVRSRV_SGX_HWPERF_MK_EXECUTION_ON)) != 0)
++ {
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ ui32MatchingFlags = psMiscInfo->uData.ui32NewHWPerfStatus & psDevInfo->psSGXHostCtl->ui32HWPerfFlags;
++ if((ui32MatchingFlags & PVRSRV_SGX_HWPERF_GRAPHICS_ON) == 0UL)
++ {
++ psHWPerfCB->ui32OrdinalGRAPHICS = 0xffffffff;
++ }
++ if((ui32MatchingFlags & PVRSRV_SGX_HWPERF_MK_EXECUTION_ON) == 0UL)
++ {
++ psHWPerfCB->ui32OrdinalMK_EXECUTION = 0xffffffffUL;
++ }
++
++
++ psDevInfo->psSGXHostCtl->ui32HWPerfFlags = psMiscInfo->uData.ui32NewHWPerfStatus;
++ #if defined(PDUMP)
++ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "SGX ukernel HWPerf status %lu\n",
++ psDevInfo->psSGXHostCtl->ui32HWPerfFlags);
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32HWPerfFlags),
++ sizeof(psDevInfo->psSGXHostCtl->ui32HWPerfFlags), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++ #endif
++
++ return PVRSRV_OK;
++ }
++ case SGX_MISC_INFO_REQUEST_HWPERF_CB_ON:
++ {
++
++ SGXMKIF_HWPERF_CB *psHWPerfCB = psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++ psHWPerfCB->ui32OrdinalGRAPHICS = 0xffffffffUL;
++
++ psDevInfo->psSGXHostCtl->ui32HWPerfFlags |= PVRSRV_SGX_HWPERF_GRAPHICS_ON;
++ return PVRSRV_OK;
++ }
++ case SGX_MISC_INFO_REQUEST_HWPERF_CB_OFF:
++ {
++
++ psDevInfo->psSGXHostCtl->ui32HWPerfFlags = 0;
++ return PVRSRV_OK;
++ }
++ case SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB:
++ {
++
++ SGX_MISC_INFO_HWPERF_RETRIEVE_CB *psRetrieve = &psMiscInfo->uData.sRetrieveCB;
++ SGXMKIF_HWPERF_CB *psHWPerfCB = psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++ IMG_UINT i;
++
++ for (i = 0; psHWPerfCB->ui32Woff != psHWPerfCB->ui32Roff && i < psRetrieve->ui32ArraySize; i++)
++ {
++ SGXMKIF_HWPERF_CB_ENTRY *psData = &psHWPerfCB->psHWPerfCBData[psHWPerfCB->ui32Roff];
++
++
++
++ psRetrieve->psHWPerfData[i].ui32FrameNo = psData->ui32FrameNo;
++ psRetrieve->psHWPerfData[i].ui32Type = (psData->ui32Type & PVRSRV_SGX_HWPERF_TYPE_OP_MASK);
++ psRetrieve->psHWPerfData[i].ui32StartTime = psData->ui32Time;
++ psRetrieve->psHWPerfData[i].ui32StartTimeWraps = psData->ui32TimeWraps;
++ psRetrieve->psHWPerfData[i].ui32EndTime = psData->ui32Time;
++ psRetrieve->psHWPerfData[i].ui32EndTimeWraps = psData->ui32TimeWraps;
++ psRetrieve->psHWPerfData[i].ui32ClockSpeed = psDevInfo->ui32CoreClockSpeed;
++ psRetrieve->psHWPerfData[i].ui32TimeMax = psDevInfo->ui32uKernelTimerClock;
++ psHWPerfCB->ui32Roff = (psHWPerfCB->ui32Roff + 1) & (SGXMKIF_HWPERF_CB_SIZE - 1);
++ }
++ psRetrieve->ui32DataCount = i;
++ psRetrieve->ui32Time = OSClockus();
++ return PVRSRV_OK;
++ }
++#endif
++ case SGX_MISC_INFO_DUMP_DEBUG_INFO:
++ {
++ PVR_LOG(("User requested SGX debug info"));
++
++
++ SGXDumpDebugInfo(psDeviceNode, IMG_FALSE);
++
++ return PVRSRV_OK;
++ }
++
++ case SGX_MISC_INFO_PANIC:
++ {
++ PVR_LOG(("User requested SGX panic"));
++
++ SGXPanic(psDeviceNode);
++
++ return PVRSRV_OK;
++ }
++
++ default:
++ {
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++}
++
++#if defined(SUPPORT_SGX_HWPERF)
++IMG_EXPORT
++PVRSRV_ERROR SGXReadDiffCountersKM(IMG_HANDLE hDevHandle,
++ IMG_UINT32 ui32Reg,
++ IMG_UINT32 *pui32Old,
++ IMG_BOOL bNew,
++ IMG_UINT32 ui32New,
++ IMG_UINT32 ui32NewReset,
++ IMG_UINT32 ui32CountersReg,
++ IMG_UINT32 ui32Reg2,
++ IMG_BOOL *pbActive,
++ PVRSRV_SGXDEV_DIFF_INFO *psDiffs)
++{
++ PVRSRV_ERROR eError;
++ SYS_DATA *psSysData;
++ PVRSRV_POWER_DEV *psPowerDevice;
++ IMG_BOOL bPowered = IMG_FALSE;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++
++
++ if(bNew)
++ {
++ psDevInfo->ui32HWGroupRequested = ui32New;
++ }
++ psDevInfo->ui32HWReset |= ui32NewReset;
++
++
++ eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++
++ SysAcquireData(&psSysData);
++
++
++ psPowerDevice = (PVRSRV_POWER_DEV*)
++ List_PVRSRV_POWER_DEV_Any_va(psSysData->psPowerDeviceList,
++ MatchPowerDeviceIndex_AnyVaCb,
++ psDeviceNode->sDevId.ui32DeviceIndex);
++
++ if (psPowerDevice)
++ {
++ bPowered = (IMG_BOOL)(psPowerDevice->eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON);
++ }
++
++
++
++ *pbActive = bPowered;
++
++
++
++ {
++ IMG_UINT32 ui32rval = 0;
++
++
++ if(bPowered)
++ {
++ IMG_UINT32 i;
++
++
++ *pui32Old = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32Reg);
++
++ for (i = 0; i < PVRSRV_SGX_DIFF_NUM_COUNTERS; ++i)
++ {
++ psDiffs->aui32Counters[i] = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32CountersReg + (i * 4));
++ }
++
++ if(ui32Reg2)
++ {
++ ui32rval = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32Reg2);
++ }
++
++
++
++ if (psDevInfo->ui32HWGroupRequested != *pui32Old)
++ {
++
++ if(psDevInfo->ui32HWReset != 0)
++ {
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32Reg, psDevInfo->ui32HWGroupRequested | psDevInfo->ui32HWReset);
++ psDevInfo->ui32HWReset = 0;
++ }
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32Reg, psDevInfo->ui32HWGroupRequested);
++ }
++ }
++
++ psDiffs->ui32Time[0] = OSClockus();
++ psDiffs->ui32Time[1] = psDevInfo->psSGXHostCtl->ui32TimeWraps;
++ psDiffs->ui32Time[2] = ui32rval;
++
++ psDiffs->ui32Marker[0] = psDevInfo->ui32KickTACounter;
++ psDiffs->ui32Marker[1] = psDevInfo->ui32KickTARenderCounter;
++ }
++
++
++ PVRSRVPowerUnlock(KERNEL_ID);
++
++ SGXTestActivePowerEvent(psDeviceNode, KERNEL_ID);
++
++ return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR SGXReadHWPerfCBKM(IMG_HANDLE hDevHandle,
++ IMG_UINT32 ui32ArraySize,
++ PVRSRV_SGX_HWPERF_CB_ENTRY *psClientHWPerfEntry,
++ IMG_UINT32 *pui32DataCount,
++ IMG_UINT32 *pui32ClockSpeed,
++ IMG_UINT32 *pui32HostTimeStamp)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ SGXMKIF_HWPERF_CB *psHWPerfCB = psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++ IMG_UINT i;
++
++ for (i = 0;
++ psHWPerfCB->ui32Woff != psHWPerfCB->ui32Roff && i < ui32ArraySize;
++ i++)
++ {
++ SGXMKIF_HWPERF_CB_ENTRY *psMKPerfEntry = &psHWPerfCB->psHWPerfCBData[psHWPerfCB->ui32Roff];
++
++ psClientHWPerfEntry[i].ui32FrameNo = psMKPerfEntry->ui32FrameNo;
++ psClientHWPerfEntry[i].ui32Type = psMKPerfEntry->ui32Type;
++ psClientHWPerfEntry[i].ui32Ordinal = psMKPerfEntry->ui32Ordinal;
++ psClientHWPerfEntry[i].ui32Clocksx16 = SGXConvertTimeStamp(psDevInfo,
++ psMKPerfEntry->ui32TimeWraps,
++ psMKPerfEntry->ui32Time);
++ OSMemCopy(&psClientHWPerfEntry[i].ui32Counters[0],
++ &psMKPerfEntry->ui32Counters[0],
++ sizeof(psMKPerfEntry->ui32Counters));
++
++ psHWPerfCB->ui32Roff = (psHWPerfCB->ui32Roff + 1) & (SGXMKIF_HWPERF_CB_SIZE - 1);
++ }
++
++ *pui32DataCount = i;
++ *pui32ClockSpeed = psDevInfo->ui32CoreClockSpeed;
++ *pui32HostTimeStamp = OSClockus();
++
++ return eError;
++}
++#else
++#endif
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxkick.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxkick.c
+new file mode 100644
+index 0000000..2848313
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxkick.c
+@@ -0,0 +1,744 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++#include "services_headers.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#if defined (PDUMP)
++#include "sgxapi_km.h"
++#include "pdump_km.h"
++#endif
++#include "sgx_bridge_km.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++IMG_EXPORT
++PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle, SGX_CCB_KICK *psCCBKick)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *) psCCBKick->hCCBKernelMemInfo;
++ SGXMKIF_CMDTA_SHARED *psTACmd;
++ IMG_UINT32 i;
++#if defined(SUPPORT_SGX_HWPERF)
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ PVRSRV_SGXDEV_INFO *psDevInfo;
++
++ psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
++ psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++#endif
++
++#if defined(SUPPORT_SGX_HWPERF)
++ if (psCCBKick->bKickRender)
++ {
++ ++psDevInfo->ui32KickTARenderCounter;
++ }
++ ++psDevInfo->ui32KickTACounter;
++#endif
++
++ if (!CCB_OFFSET_IS_VALID(SGXMKIF_CMDTA_SHARED, psCCBMemInfo, psCCBKick, ui32CCBOffset))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: Invalid CCB offset"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ psTACmd = CCB_DATA_FROM_OFFSET(SGXMKIF_CMDTA_SHARED, psCCBMemInfo, psCCBKick, ui32CCBOffset);
++
++
++ if (psCCBKick->hTA3DSyncInfo)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo;
++ psTACmd->sTA3DDependency.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++ psTACmd->sTA3DDependency.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ if (psCCBKick->bTADependency)
++ {
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++ }
++
++ if (psCCBKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo;
++
++ psTACmd->sTATQSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ psTACmd->sTATQSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++ psTACmd->ui32TATQSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ psTACmd->ui32TATQSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ if (psCCBKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo;
++
++ psTACmd->s3DTQSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ psTACmd->s3DTQSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++ psTACmd->ui323DTQSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ psTACmd->ui323DTQSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ psTACmd->ui32NumTAStatusVals = psCCBKick->ui32NumTAStatusVals;
++ if (psCCBKick->ui32NumTAStatusVals != 0)
++ {
++
++ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
++ {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ psTACmd->sCtlTAStatusInfo[i] = psCCBKick->asTAStatusUpdate[i].sCtlStatus;
++#else
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
++ psTACmd->sCtlTAStatusInfo[i].sStatusDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ psTACmd->sCtlTAStatusInfo[i].ui32StatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
++#endif
++ }
++ }
++
++ psTACmd->ui32Num3DStatusVals = psCCBKick->ui32Num3DStatusVals;
++ if (psCCBKick->ui32Num3DStatusVals != 0)
++ {
++
++ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
++ {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ psTACmd->sCtl3DStatusInfo[i] = psCCBKick->as3DStatusUpdate[i].sCtlStatus;
++#else
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++ psTACmd->sCtl3DStatusInfo[i].sStatusDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ psTACmd->sCtl3DStatusInfo[i].ui32StatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
++#endif
++ }
++ }
++
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++
++ psTACmd->ui32NumTASrcSyncs = psCCBKick->ui32NumTASrcSyncs;
++ for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i];
++
++ psTACmd->asTASrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psTACmd->asTASrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++
++ psTACmd->asTASrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++ psTACmd->asTASrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ psTACmd->ui32NumTADstSyncs = psCCBKick->ui32NumTADstSyncs;
++ for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i];
++
++ psTACmd->asTADstSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psTACmd->asTADstSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++
++ psTACmd->asTADstSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psTACmd->asTADstSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++
++ psTACmd->ui32Num3DSrcSyncs = psCCBKick->ui32Num3DSrcSyncs;
++ for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i];
++
++ psTACmd->as3DSrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psTACmd->as3DSrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++
++ psTACmd->as3DSrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++ psTACmd->as3DSrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++#else
++
++ psTACmd->ui32NumSrcSyncs = psCCBKick->ui32NumSrcSyncs;
++ for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
++
++ psTACmd->asSrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psTACmd->asSrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++
++ psTACmd->asSrcSyncs[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++ psTACmd->asSrcSyncs[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++#endif
++
++ if (psCCBKick->bFirstKickOrResume && psCCBKick->ui32NumDstSyncObjects > 0)
++ {
++ PVRSRV_KERNEL_MEM_INFO *psHWDstSyncListMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo;
++ SGXMKIF_HWDEVICE_SYNC_LIST *psHWDeviceSyncList = psHWDstSyncListMemInfo->pvLinAddrKM;
++ IMG_UINT32 ui32NumDstSyncs = psCCBKick->ui32NumDstSyncObjects;
++
++ PVR_ASSERT(((PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo)->ui32AllocSize >= (sizeof(SGXMKIF_HWDEVICE_SYNC_LIST) +
++ (sizeof(PVRSRV_DEVICE_SYNC_OBJECT) * ui32NumDstSyncs)));
++
++ psHWDeviceSyncList->ui32NumSyncObjects = ui32NumDstSyncs;
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM())
++ {
++ PDUMPCOMMENT("HWDeviceSyncList for TACmd\r\n");
++ PDUMPMEM(IMG_NULL,
++ psHWDstSyncListMemInfo,
++ 0,
++ sizeof(SGXMKIF_HWDEVICE_SYNC_LIST),
++ 0,
++ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
++ }
++#endif
++
++ for (i=0; i<ui32NumDstSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->pahDstSyncHandles[i];
++
++ if (psSyncInfo)
++ {
++ psHWDeviceSyncList->asSyncData[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psHWDeviceSyncList->asSyncData[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++ psHWDeviceSyncList->asSyncData[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++
++ #if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM())
++ {
++ IMG_UINT32 ui32ModifiedValue;
++ IMG_UINT32 ui32SyncOffset = offsetof(SGXMKIF_HWDEVICE_SYNC_LIST, asSyncData)
++ + (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT));
++ IMG_UINT32 ui32WOpsOffset = ui32SyncOffset
++ + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal);
++ IMG_UINT32 ui32ROpsOffset = ui32SyncOffset
++ + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal);
++
++ PDUMPCOMMENT("HWDeviceSyncObject for RT: %i\r\n", i);
++
++ PDUMPMEM(IMG_NULL,
++ psHWDstSyncListMemInfo,
++ ui32SyncOffset,
++ sizeof(PVRSRV_DEVICE_SYNC_OBJECT),
++ 0,
++ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
++
++ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
++ {
++
++ PDUMPCOMMENT("Init RT ROpsComplete\r\n", i);
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT("Init RT WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
++
++ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify RT %d WOpPendingVal in HWDevSyncList\r\n", i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psHWDstSyncListMemInfo,
++ ui32WOpsOffset,
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
++
++ ui32ModifiedValue = 0;
++ PDUMPCOMMENT("Modify RT %d ROpsPendingVal in HWDevSyncList\r\n", i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psHWDstSyncListMemInfo,
++ ui32ROpsOffset,
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psHWDstSyncListMemInfo));
++ }
++ #endif
++ }
++ else
++ {
++ psHWDeviceSyncList->asSyncData[i].sWriteOpsCompleteDevVAddr.uiAddr = 0;
++ psHWDeviceSyncList->asSyncData[i].sReadOpsCompleteDevVAddr.uiAddr = 0;
++
++ psHWDeviceSyncList->asSyncData[i].ui32ReadOpsPendingVal = 0;
++ psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal = 0;
++ }
++ }
++ }
++
++
++
++
++ psTACmd->ui32CtrlFlags |= SGXMKIF_CMDTA_CTRLFLAGS_READY;
++
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM())
++ {
++ PDUMPCOMMENT("Shared part of TA command\r\n");
++
++ PDUMPMEM(psTACmd,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff,
++ sizeof(SGXMKIF_CMDTA_SHARED),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++ for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++)
++ {
++ IMG_UINT32 ui32ModifiedValue;
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i];
++
++ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
++ {
++
++ PDUMPCOMMENT("Init RT TA-SRC ROpsComplete\r\n", i);
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT("Init RT TA-SRC WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++
++ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify TA SrcSync %d ROpsPendingVal\r\n", i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTASrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Modify TA SrcSync %d WOpPendingVal\r\n", i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTASrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++
++ for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++)
++ {
++ IMG_UINT32 ui32ModifiedValue;
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i];
++
++ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
++ {
++
++ PDUMPCOMMENT("Init RT TA-DST ROpsComplete\r\n", i);
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT("Init RT TA-DST WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
++
++ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify TA DstSync %d WOpPendingVal\r\n", i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTADstSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Modify TA DstSync %d ROpsPendingVal\r\n", i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asTADstSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++
++ for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++)
++ {
++ IMG_UINT32 ui32ModifiedValue;
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i];
++
++ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
++ {
++
++ PDUMPCOMMENT("Init RT 3D-SRC ROpsComplete\r\n", i);
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT("Init RT 3D-SRC WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++
++ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify 3D SrcSync %d ROpsPendingVal\r\n", i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, as3DSrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Modify 3D SrcSync %d WOpPendingVal\r\n", i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, as3DSrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++#else
++ for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
++ {
++ IMG_UINT32 ui32ModifiedValue;
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
++
++ if ((psSyncInfo->psSyncData->ui32LastOpDumpVal == 0) &&
++ (psSyncInfo->psSyncData->ui32LastReadOpDumpVal == 0))
++ {
++
++ PDUMPCOMMENT("Init RT ROpsComplete\r\n", i);
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32ReadOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++
++ PDUMPCOMMENT("Init RT WOpsComplete\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psSyncInfo->psSyncDataMemInfoKM,
++ offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
++ sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
++ 0,
++ MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++ }
++
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++
++ ui32ModifiedValue = psSyncInfo->psSyncData->ui32LastReadOpDumpVal - 1;
++
++ PDUMPCOMMENT("Modify SrcSync %d ROpsPendingVal\r\n", i);
++
++ PDUMPMEM(&ui32ModifiedValue,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asSrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32ReadOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Modify SrcSync %d WOpPendingVal\r\n", i);
++
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, asSrcSyncs) +
++ (i * sizeof(PVRSRV_DEVICE_SYNC_OBJECT)) + offsetof(PVRSRV_DEVICE_SYNC_OBJECT, ui32WriteOpsPendingVal),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++#endif
++
++ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
++ {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PDUMPCOMMENT("Modify TA status value in TA cmd\r\n");
++ PDUMPMEM(&psCCBKick->asTAStatusUpdate[i].ui32LastStatusUpdateDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, sCtlTAStatusInfo[i].ui32StatusValue),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++#else
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
++ PDUMPCOMMENT("Modify TA status value in TA cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, sCtlTAStatusInfo[i].ui32StatusValue),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++#endif
++ }
++
++ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
++ {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PDUMPCOMMENT("Modify 3D status value in TA cmd\r\n");
++ PDUMPMEM(&psCCBKick->as3DStatusUpdate[i].ui32LastStatusUpdateDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, sCtl3DStatusInfo[i].ui32StatusValue),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++#else
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++ PDUMPCOMMENT("Modify 3D status value in TA cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psCCBKick->ui32CCBDumpWOff + offsetof(SGXMKIF_CMDTA_SHARED, sCtl3DStatusInfo[i].ui32StatusValue),
++ sizeof(IMG_UINT32),
++ 0,
++ MAKEUNIQUETAG(psCCBMemInfo));
++#endif
++ }
++ }
++#endif
++
++ eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_TA, &psCCBKick->sCommand, KERNEL_ID, 0);
++ if (eError == PVRSRV_ERROR_RETRY)
++ {
++ if (psCCBKick->bFirstKickOrResume && psCCBKick->ui32NumDstSyncObjects > 0)
++ {
++ for (i=0; i < psCCBKick->ui32NumDstSyncObjects; i++)
++ {
++
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->pahDstSyncHandles[i];
++
++ if (psSyncInfo)
++ {
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM())
++ {
++ psSyncInfo->psSyncData->ui32LastOpDumpVal--;
++ }
++#endif
++ }
++ }
++ }
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++ for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++ for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++ for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++#else
++ for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++#endif
++
++ return eError;
++ }
++ else if (PVRSRV_OK != eError)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: SGXScheduleCCBCommandKM failed."));
++ return eError;
++ }
++
++
++#if defined(NO_HARDWARE)
++
++
++
++ if (psCCBKick->hTA3DSyncInfo)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo;
++
++ if (psCCBKick->bTADependency)
++ {
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++ }
++
++ if (psCCBKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo;
++
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++ if (psCCBKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo;
++
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++
++ for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
++ {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO*)psCCBKick->asTAStatusUpdate[i].hKernelMemInfo;
++
++ *(IMG_UINT32*)((IMG_UINTPTR_T)psKernelMemInfo->pvLinAddrKM
++ + (psTACmd->sCtlTAStatusInfo[i].sStatusDevAddr.uiAddr
++ - psKernelMemInfo->sDevVAddr.uiAddr)) = psTACmd->sCtlTAStatusInfo[i].ui32StatusValue;
++#else
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psTACmd->sCtlTAStatusInfo[i].ui32StatusValue;
++#endif
++ }
++
++#if defined(SUPPORT_SGX_GENERALISED_SYNCOBJECTS)
++
++ for (i=0; i<psCCBKick->ui32NumTASrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTASrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++ for (i=0; i<psCCBKick->ui32NumTADstSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahTADstKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++ for (i=0; i<psCCBKick->ui32Num3DSrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ah3DSrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++#else
++
++ for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++#endif
++
++ if (psCCBKick->bTerminateOrAbort)
++ {
++ if (psCCBKick->ui32NumDstSyncObjects > 0)
++ {
++ PVRSRV_KERNEL_MEM_INFO *psHWDstSyncListMemInfo =
++ (PVRSRV_KERNEL_MEM_INFO *)psCCBKick->hKernelHWSyncListMemInfo;
++ SGXMKIF_HWDEVICE_SYNC_LIST *psHWDeviceSyncList = psHWDstSyncListMemInfo->pvLinAddrKM;
++
++ for (i=0; i<psCCBKick->ui32NumDstSyncObjects; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->pahDstSyncHandles[i];
++ if (psSyncInfo)
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psHWDeviceSyncList->asSyncData[i].ui32WriteOpsPendingVal+1;
++ }
++ }
++
++
++ for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
++ {
++#if defined(SUPPORT_SGX_NEW_STATUS_VALS)
++ PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO*)psCCBKick->as3DStatusUpdate[i].hKernelMemInfo;
++
++ *(IMG_UINT32*)((IMG_UINTPTR_T)psKernelMemInfo->pvLinAddrKM
++ + (psTACmd->sCtl3DStatusInfo[i].sStatusDevAddr.uiAddr
++ - psKernelMemInfo->sDevVAddr.uiAddr)) = psTACmd->sCtl3DStatusInfo[i].ui32StatusValue;
++#else
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psTACmd->sCtl3DStatusInfo[i].ui32StatusValue;
++#endif
++ }
++ }
++#endif
++
++ return eError;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxpower.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxpower.c
+new file mode 100644
+index 0000000..169ae20
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxpower.c
+@@ -0,0 +1,453 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "sgxapi_km.h"
++#include "sgx_mkif_km.h"
++#include "sgxutils.h"
++#include "pdump_km.h"
++
++
++#if defined(SUPPORT_HW_RECOVERY)
++static PVRSRV_ERROR SGXAddTimer(PVRSRV_DEVICE_NODE *psDeviceNode,
++ SGX_TIMING_INFORMATION *psSGXTimingInfo,
++ IMG_HANDLE *phTimer)
++{
++ *phTimer = OSAddTimer(SGXOSTimer, psDeviceNode,
++ 1000 * 50 / psSGXTimingInfo->ui32uKernelFreq);
++ if(*phTimer == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXAddTimer : Failed to register timer callback function"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ return PVRSRV_OK;
++}
++#endif
++
++static PVRSRV_ERROR SGXUpdateTimingInfo(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++#if defined(SGX_DYNAMIC_TIMING_INFO)
++ SGX_TIMING_INFORMATION sSGXTimingInfo = {0};
++#else
++ SGX_DEVICE_MAP *psSGXDeviceMap;
++#endif
++ IMG_UINT32 ui32ActivePowManSampleRate;
++ SGX_TIMING_INFORMATION *psSGXTimingInfo;
++
++
++#if defined(SGX_DYNAMIC_TIMING_INFO)
++ psSGXTimingInfo = &sSGXTimingInfo;
++ SysGetSGXTimingInformation(psSGXTimingInfo);
++#else
++ SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
++ (IMG_VOID**)&psSGXDeviceMap);
++ psSGXTimingInfo = &psSGXDeviceMap->sTimingInfo;
++#endif
++
++#if defined(SUPPORT_HW_RECOVERY)
++ {
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32OlduKernelFreq;
++
++ if (psDevInfo->hTimer != IMG_NULL)
++ {
++ ui32OlduKernelFreq = psDevInfo->ui32CoreClockSpeed / psDevInfo->ui32uKernelTimerClock;
++ if (ui32OlduKernelFreq != psSGXTimingInfo->ui32uKernelFreq)
++ {
++ IMG_HANDLE hNewTimer;
++
++ eError = SGXAddTimer(psDeviceNode, psSGXTimingInfo, &hNewTimer);
++ if (eError == PVRSRV_OK)
++ {
++ eError = OSRemoveTimer(psDevInfo->hTimer);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXUpdateTimingInfo: Failed to remove timer"));
++ }
++ psDevInfo->hTimer = hNewTimer;
++ }
++ else
++ {
++
++ }
++ }
++ }
++ else
++ {
++ eError = SGXAddTimer(psDeviceNode, psSGXTimingInfo, &psDevInfo->hTimer);
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++
++ psDevInfo->psSGXHostCtl->ui32HWRecoverySampleRate =
++ psSGXTimingInfo->ui32uKernelFreq / psSGXTimingInfo->ui32HWRecoveryFreq;
++ }
++#endif
++
++
++ psDevInfo->ui32CoreClockSpeed = psSGXTimingInfo->ui32CoreClockSpeed;
++ psDevInfo->ui32uKernelTimerClock = psSGXTimingInfo->ui32CoreClockSpeed / psSGXTimingInfo->ui32uKernelFreq;
++
++
++ psDevInfo->psSGXHostCtl->ui32uKernelTimerClock = psDevInfo->ui32uKernelTimerClock;
++#if defined(PDUMP)
++ PDUMPCOMMENT("Host Control - Microkernel clock");
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32uKernelTimerClock),
++ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++#endif
++
++ if (psSGXTimingInfo->bEnableActivePM)
++ {
++ ui32ActivePowManSampleRate =
++ psSGXTimingInfo->ui32uKernelFreq * psSGXTimingInfo->ui32ActivePowManLatencyms / 1000;
++
++
++
++
++
++
++
++
++ ui32ActivePowManSampleRate += 1;
++ }
++ else
++ {
++ ui32ActivePowManSampleRate = 0;
++ }
++
++ psDevInfo->psSGXHostCtl->ui32ActivePowManSampleRate = ui32ActivePowManSampleRate;
++#if defined(PDUMP)
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32ActivePowManSampleRate),
++ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++static IMG_VOID SGXStartTimer(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++ #if defined(SUPPORT_HW_RECOVERY)
++ PVRSRV_ERROR eError;
++
++ eError = OSEnableTimer(psDevInfo->hTimer);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXStartTimer : Failed to enable host timer"));
++ }
++ #else
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++ #endif
++}
++
++
++static IMG_VOID SGXPollForClockGating (PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32Register,
++ IMG_UINT32 ui32RegisterValue,
++ IMG_CHAR *pszComment)
++{
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++ PVR_UNREFERENCED_PARAMETER(ui32Register);
++ PVR_UNREFERENCED_PARAMETER(ui32RegisterValue);
++ PVR_UNREFERENCED_PARAMETER(pszComment);
++
++ #if !defined(NO_HARDWARE)
++ PVR_ASSERT(psDevInfo != IMG_NULL);
++
++
++ if (PollForValueKM((IMG_UINT32 *)psDevInfo->pvRegsBaseKM + (ui32Register >> 2),
++ 0,
++ ui32RegisterValue,
++ MAX_HW_TIME_US/WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: %s failed.", pszComment));
++ }
++ #endif
++
++ PDUMPCOMMENT(pszComment);
++ PDUMPREGPOL(ui32Register, 0, ui32RegisterValue);
++}
++
++
++PVRSRV_ERROR SGXPrePowerState (IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON))
++ {
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ IMG_UINT32 ui32PowerCmd, ui32CompleteStatus;
++ SGXMKIF_COMMAND sCommand = {0};
++ IMG_UINT32 ui32Core;
++
++ #if defined(SUPPORT_HW_RECOVERY)
++
++ eError = OSDisableTimer(psDevInfo->hTimer);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Failed to disable timer"));
++ return eError;
++ }
++ #endif
++
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF)
++ {
++
++ ui32PowerCmd = PVRSRV_POWERCMD_POWEROFF;
++ ui32CompleteStatus = PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE;
++ PDUMPCOMMENT("SGX power off request");
++ }
++ else
++ {
++
++ ui32PowerCmd = PVRSRV_POWERCMD_IDLE;
++ ui32CompleteStatus = PVRSRV_USSE_EDM_POWMAN_IDLE_COMPLETE;
++ PDUMPCOMMENT("SGX idle request");
++ }
++
++ sCommand.ui32Data[1] = ui32PowerCmd;
++
++ eError = SGXScheduleCCBCommand(psDevInfo, SGXMKIF_CMD_POWER, &sCommand, KERNEL_ID, 0);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Failed to submit power down command"));
++ return eError;
++ }
++
++
++ #if !defined(NO_HARDWARE)
++ if (PollForValueKM(&psDevInfo->psSGXHostCtl->ui32PowerStatus,
++ ui32CompleteStatus,
++ ui32CompleteStatus,
++ MAX_HW_TIME_US/WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Wait for SGX ukernel power transition failed."));
++ PVR_DBG_BREAK;
++ }
++ #endif
++
++ #if defined(PDUMP)
++ PDUMPCOMMENT("TA/3D CCB Control - Wait for power event on uKernel.");
++ PDUMPMEMPOL(psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32PowerStatus),
++ ui32CompleteStatus,
++ ui32CompleteStatus,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ 0,
++ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++ #endif
++
++ for (ui32Core = 0; ui32Core < SGX_FEATURE_MP_CORE_COUNT; ui32Core++)
++ {
++
++ SGXPollForClockGating(psDevInfo,
++ SGX_MP_CORE_SELECT(psDevInfo->ui32ClkGateStatusReg, ui32Core),
++ psDevInfo->ui32ClkGateStatusMask,
++ "Wait for SGX clock gating");
++ }
++
++ #if defined(SGX_FEATURE_MP)
++
++ SGXPollForClockGating(psDevInfo,
++ psDevInfo->ui32MasterClkGateStatusReg,
++ psDevInfo->ui32MasterClkGateStatusMask,
++ "Wait for SGX master clock gating");
++ #endif
++
++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF)
++ {
++
++ eError = SGXDeinitialise(psDevInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: SGXDeinitialise failed: %lu", eError));
++ return eError;
++ }
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SGXPostPowerState (IMG_HANDLE hDevHandle,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON))
++ {
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
++
++
++ psSGXHostCtl->ui32PowerStatus = 0;
++ #if defined(PDUMP)
++ PDUMPCOMMENT("TA/3D CCB Control - Reset power status");
++ PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32PowerStatus),
++ sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS,
++ MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++ #endif
++
++ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF)
++ {
++ eError = SGXUpdateTimingInfo(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXUpdateTimingInfo failed"));
++ return eError;
++ }
++
++ eError = SGXInitialise(psDevInfo);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXInitialise failed"));
++ return eError;
++ }
++ }
++ else
++ {
++
++
++ SGXMKIF_COMMAND sCommand = {0};
++
++ sCommand.ui32Data[1] = PVRSRV_POWERCMD_RESUME;
++ eError = SGXScheduleCCBCommand(psDevInfo, SGXMKIF_CMD_POWER, &sCommand, ISR_ID, 0);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState failed to schedule CCB command: %lu", eError));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ SGXStartTimer(psDevInfo);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SGXPreClockSpeedChange (IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++
++ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON)
++ {
++ if (bIdleDevice)
++ {
++
++ PDUMPSUSPEND();
++
++ eError = SGXPrePowerState(hDevHandle, PVRSRV_DEV_POWER_STATE_IDLE,
++ PVRSRV_DEV_POWER_STATE_ON);
++
++ if (eError != PVRSRV_OK)
++ {
++ PDUMPRESUME();
++ return eError;
++ }
++ }
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,"SGXPreClockSpeedChange: SGX clock speed was %luHz",
++ psDevInfo->ui32CoreClockSpeed));
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SGXPostClockSpeedChange (IMG_HANDLE hDevHandle,
++ IMG_BOOL bIdleDevice,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ IMG_UINT32 ui32OldClockSpeed = psDevInfo->ui32CoreClockSpeed;
++
++ PVR_UNREFERENCED_PARAMETER(ui32OldClockSpeed);
++
++ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON)
++ {
++ PVRSRV_ERROR eError;
++
++ eError = SGXUpdateTimingInfo(psDeviceNode);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXUpdateTimingInfo failed"));
++ return eError;
++ }
++
++ if (bIdleDevice)
++ {
++ eError = SGXPostPowerState(hDevHandle, PVRSRV_DEV_POWER_STATE_ON,
++ PVRSRV_DEV_POWER_STATE_IDLE);
++
++ PDUMPRESUME();
++
++ if (eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++ else
++ {
++ SGXStartTimer(psDevInfo);
++ }
++
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE,"SGXPostClockSpeedChange: SGX clock speed changed from %luHz to %luHz",
++ ui32OldClockSpeed, psDevInfo->ui32CoreClockSpeed));
++
++ return PVRSRV_OK;
++}
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxreset.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxreset.c
+new file mode 100644
+index 0000000..5cf2519
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxreset.c
+@@ -0,0 +1,489 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "sgxinfokm.h"
++#include "sgxconfig.h"
++
++#include "pdump_km.h"
++
++
++static IMG_VOID SGXResetSoftReset(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_BOOL bResetBIF,
++ IMG_UINT32 ui32PDUMPFlags,
++ IMG_BOOL bPDump)
++{
++ IMG_UINT32 ui32SoftResetRegVal;
++
++#if defined(SGX_FEATURE_MP)
++ ui32SoftResetRegVal =
++ EUR_CR_MASTER_SOFT_RESET_IPF_RESET_MASK |
++ EUR_CR_MASTER_SOFT_RESET_DPM_RESET_MASK |
++ EUR_CR_MASTER_SOFT_RESET_VDM_RESET_MASK;
++
++#if defined(SGX_FEATURE_SYSTEM_CACHE)
++ ui32SoftResetRegVal |= EUR_CR_MASTER_SOFT_RESET_SLC_RESET_MASK;
++#endif
++
++ if (bResetBIF)
++ {
++ ui32SoftResetRegVal |= EUR_CR_MASTER_SOFT_RESET_BIF_RESET_MASK;
++ }
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SOFT_RESET, ui32SoftResetRegVal);
++ if (bPDump)
++ {
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_SOFT_RESET, ui32SoftResetRegVal, ui32PDUMPFlags);
++ }
++#endif
++
++ ui32SoftResetRegVal =
++
++ EUR_CR_SOFT_RESET_DPM_RESET_MASK |
++ EUR_CR_SOFT_RESET_TA_RESET_MASK |
++ EUR_CR_SOFT_RESET_USE_RESET_MASK |
++ EUR_CR_SOFT_RESET_ISP_RESET_MASK |
++ EUR_CR_SOFT_RESET_TSP_RESET_MASK;
++
++#ifdef EUR_CR_SOFT_RESET_TWOD_RESET_MASK
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TWOD_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_TE_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TE_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_MTE_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_MTE_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_ISP2_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_ISP2_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_PDS_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_PDS_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_PBE_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_PBE_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_CACHEL2_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TCU_L2_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_UCACHEL2_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_MADD_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_MADD_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_ITR_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_ITR_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_TEX_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_TEX_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_IDXFIFO_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_VDM_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_VDM_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_DCU_L2_RESET_MASK;
++#endif
++#if defined(EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK)
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_DCU_L0L1_RESET_MASK;
++#endif
++
++#if !defined(PDUMP)
++ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif
++
++ if (bResetBIF)
++ {
++ ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_BIF_RESET_MASK;
++ }
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32SoftResetRegVal);
++ if (bPDump)
++ {
++ PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32SoftResetRegVal, ui32PDUMPFlags);
++ }
++}
++
++
++static IMG_VOID SGXResetSleep(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32PDUMPFlags,
++ IMG_BOOL bPDump)
++{
++#if !defined(PDUMP)
++ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif
++
++
++ OSWaitus(1000 * 1000000 / psDevInfo->ui32CoreClockSpeed);
++ if (bPDump)
++ {
++ PDUMPIDLWITHFLAGS(30, ui32PDUMPFlags);
++#if defined(PDUMP)
++ PDumpRegRead(EUR_CR_SOFT_RESET, ui32PDUMPFlags);
++#endif
++ }
++
++
++
++}
++
++
++static IMG_VOID SGXResetInvalDC(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32PDUMPFlags,
++ IMG_BOOL bPDump)
++{
++ IMG_UINT32 ui32RegVal;
++
++
++#if defined(EUR_CR_BIF_CTRL_INVAL)
++ ui32RegVal = EUR_CR_BIF_CTRL_INVAL_ALL_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL_INVAL, ui32RegVal);
++ if (bPDump)
++ {
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL_INVAL, ui32RegVal, ui32PDUMPFlags);
++ }
++#else
++ ui32RegVal = EUR_CR_BIF_CTRL_INVALDC_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ if (bPDump)
++ {
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++ }
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, bPDump);
++
++ ui32RegVal = 0;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ if (bPDump)
++ {
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++ }
++#endif
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, bPDump);
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ {
++
++
++
++ if (PollForValueKM((IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + EUR_CR_BIF_MEM_REQ_STAT),
++ 0,
++ EUR_CR_BIF_MEM_REQ_STAT_READS_MASK,
++ MAX_HW_TIME_US/WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"Wait for DC invalidate failed."));
++ PVR_DBG_BREAK;
++ }
++
++ if (bPDump)
++ {
++ PDUMPREGPOLWITHFLAGS(EUR_CR_BIF_MEM_REQ_STAT, 0, EUR_CR_BIF_MEM_REQ_STAT_READS_MASK, ui32PDUMPFlags);
++ }
++ }
++#endif
++}
++
++
++IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32PDUMPFlags)
++{
++ IMG_UINT32 ui32RegVal;
++#if defined(EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK)
++ const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK;
++#else
++ const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_MASK;
++#endif
++
++#ifndef PDUMP
++ PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif
++
++ psDevInfo->ui32NumResets++;
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX reset sequence\r\n");
++
++#if defined(FIX_HW_BRN_23944)
++
++ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++ if (ui32RegVal & ui32BifFaultMask)
++ {
++
++ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK | EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++ }
++#endif
++
++
++ SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_TRUE);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++
++
++#if defined(SGX_FEATURE_36BIT_MMU)
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK, ui32PDUMPFlags);
++#endif
++
++ ui32RegVal = 0;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++#if defined(SGX_FEATURE_MP)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BIF_CTRL, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++#endif
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK_SET, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK_SET, ui32RegVal, ui32PDUMPFlags);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags);
++#endif
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal, ui32PDUMPFlags);
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++ {
++ IMG_UINT32 ui32DirList, ui32DirListReg;
++
++ for (ui32DirList = 1;
++ ui32DirList < SGX_FEATURE_BIF_NUM_DIRLISTS;
++ ui32DirList++)
++ {
++ ui32DirListReg = EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (ui32DirList - 1);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32DirListReg, ui32RegVal);
++ PDUMPREGWITHFLAGS(ui32DirListReg, ui32RegVal, ui32PDUMPFlags);
++ }
++ }
++#endif
++
++#if defined(EUR_CR_BIF_MEM_ARB_CONFIG)
++
++
++ ui32RegVal = (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT) |
++ (7UL << EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT) |
++ (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal, ui32PDUMPFlags);
++#endif
++
++#if defined(SGX_FEATURE_SYSTEM_CACHE)
++#if defined(SGX_FEATURE_MP)
++ #if defined(SGX_BYPASS_SYSTEM_CACHE)
++ #error SGX_BYPASS_SYSTEM_CACHE not supported
++ #else
++ ui32RegVal = EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ0_MASK |
++ (0xC << EUR_CR_MASTER_SLC_CTRL_ARB_PAGE_SIZE_SHIFT);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL, ui32RegVal);
++ PDUMPREG(EUR_CR_MASTER_SLC_CTRL, ui32RegVal);
++
++ ui32RegVal = EUR_CR_MASTER_SLC_CTRL_BYPASS_BYP_CC_MASK;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal);
++ PDUMPREG(EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal);
++ #endif
++#else
++ #if defined(SGX_BYPASS_SYSTEM_CACHE)
++
++ ui32RegVal = EUR_CR_MNE_CR_CTRL_BYPASS_ALL_MASK;
++ #else
++ #if defined(FIX_HW_BRN_26620)
++ ui32RegVal = 0;
++ #else
++
++ ui32RegVal = EUR_CR_MNE_CR_CTRL_BYP_CC_MASK;
++ #endif
++ #endif
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MNE_CR_CTRL, ui32RegVal);
++ PDUMPREG(EUR_CR_MNE_CR_CTRL, ui32RegVal);
++#endif
++#endif
++
++
++
++
++
++
++ ui32RegVal = psDevInfo->sBIFResetPDDevPAddr.uiAddr;
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++
++ SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_TRUE);
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++
++
++ for (;;)
++ {
++ IMG_UINT32 ui32BifIntStat = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++ IMG_DEV_VIRTADDR sBifFault;
++ IMG_UINT32 ui32PDIndex, ui32PTIndex;
++
++ if ((ui32BifIntStat & ui32BifFaultMask) == 0)
++ {
++ break;
++ }
++
++
++
++
++ sBifFault.uiAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT);
++ PVR_DPF((PVR_DBG_WARNING, "SGXReset: Page fault 0x%x/0x%x", ui32BifIntStat, sBifFault.uiAddr));
++ ui32PDIndex = sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++ ui32PTIndex = (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++
++ SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_FALSE);
++
++
++ psDevInfo->pui32BIFResetPD[ui32PDIndex] = (psDevInfo->sBIFResetPTDevPAddr.uiAddr
++ >>SGX_MMU_PDE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PDE_PAGE_SIZE_4K
++ | SGX_MMU_PDE_VALID;
++ psDevInfo->pui32BIFResetPT[ui32PTIndex] = (psDevInfo->sBIFResetPageDevPAddr.uiAddr
++ >>SGX_MMU_PTE_ADDR_ALIGNSHIFT)
++ | SGX_MMU_PTE_VALID;
++
++
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32RegVal);
++ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2);
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32RegVal);
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++
++ SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_FALSE);
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++
++ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++
++ psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0;
++ psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0;
++ }
++
++
++
++
++ #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++
++ ui32RegVal = (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT);
++
++ #if defined(SGX_FEATURE_2D_HARDWARE)
++
++ ui32RegVal |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_2D_SHIFT);
++ #endif
++
++ #if defined(FIX_HW_BRN_23410)
++
++ ui32RegVal |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_TA_SHIFT);
++ #endif
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags);
++ #endif
++
++ {
++ IMG_UINT32 ui32EDMDirListReg;
++
++
++ #if (SGX_BIF_DIR_LIST_INDEX_EDM == 0)
++ ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE0;
++ #else
++
++ ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (SGX_BIF_DIR_LIST_INDEX_EDM - 1);
++ #endif
++
++#if defined(FIX_HW_BRN_28011)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, psDevInfo->sKernelPDDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT);
++ PDUMPPDREGWITHFLAGS(EUR_CR_BIF_DIR_LIST_BASE0, psDevInfo->sKernelPDDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG);
++#endif
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32EDMDirListReg, psDevInfo->sKernelPDDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT);
++ PDUMPPDREGWITHFLAGS(ui32EDMDirListReg, psDevInfo->sKernelPDDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG);
++ }
++
++#ifdef SGX_FEATURE_2D_HARDWARE
++
++ #if ((SGX_2D_HEAP_BASE & ~EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK) != 0)
++ #error "SGXReset: SGX_2D_HEAP_BASE doesn't match EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK alignment"
++ #endif
++
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE);
++ PDUMPREGWITHFLAGS(EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE, ui32PDUMPFlags);
++#endif
++
++
++ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++ PVR_DPF((PVR_DBG_MESSAGE,"Soft Reset of SGX"));
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++
++ ui32RegVal = 0;
++#if defined(SGX_FEATURE_MP)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SOFT_RESET, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_MASTER_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++#endif
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++ PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++
++
++ SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX reset sequence\r\n");
++}
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxtransfer.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxtransfer.c
+new file mode 100644
+index 0000000..f851b75
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxtransfer.c
+@@ -0,0 +1,543 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(TRANSFER_QUEUE)
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxinfo.h"
++#include "sysconfig.h"
++#include "regpaths.h"
++#include "pdump_km.h"
++#include "mmu.h"
++#include "pvr_bridge.h"
++#include "sgx_bridge_km.h"
++#include "sgxinfokm.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++IMG_EXPORT PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK *psKick)
++{
++ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psKick->hCCBMemInfo;
++ SGXMKIF_COMMAND sCommand = {0};
++ SGXMKIF_TRANSFERCMD_SHARED *psSharedTransferCmd;
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++ PVRSRV_ERROR eError;
++
++
++ if (!CCB_OFFSET_IS_VALID(SGXMKIF_TRANSFERCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXSubmitTransferKM: Invalid CCB offset"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ psSharedTransferCmd = CCB_DATA_FROM_OFFSET(SGXMKIF_TRANSFERCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset);
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++ psSharedTransferCmd->ui32TASyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ psSharedTransferCmd->ui32TASyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psSharedTransferCmd->sTASyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psSharedTransferCmd->sTASyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++ else
++ {
++ psSharedTransferCmd->sTASyncWriteOpsCompleteDevVAddr.uiAddr = 0;
++ psSharedTransferCmd->sTASyncReadOpsCompleteDevVAddr.uiAddr = 0;
++ }
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++ psSharedTransferCmd->ui323DSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ psSharedTransferCmd->ui323DSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psSharedTransferCmd->s3DSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psSharedTransferCmd->s3DSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++ else
++ {
++ psSharedTransferCmd->s3DSyncWriteOpsCompleteDevVAddr.uiAddr = 0;
++ psSharedTransferCmd->s3DSyncReadOpsCompleteDevVAddr.uiAddr = 0;
++ }
++
++ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL)
++ {
++ if (psKick->ui32NumSrcSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[0];
++
++ psSharedTransferCmd->ui32SrcWriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ psSharedTransferCmd->ui32SrcReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psSharedTransferCmd->sSrcWriteOpsCompleteDevAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psSharedTransferCmd->sSrcReadOpsCompleteDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++ }
++
++ if (psKick->ui32NumDstSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
++
++ psSharedTransferCmd->ui32DstWriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ psSharedTransferCmd->ui32DstReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ psSharedTransferCmd->sDstWriteOpsCompleteDevAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ psSharedTransferCmd->sDstReadOpsCompleteDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++ }
++
++
++ if (psKick->ui32NumSrcSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[0];
++ psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ }
++ if (psKick->ui32NumDstSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++ }
++
++
++ if (psKick->ui32NumDstSync > 1 || psKick->ui32NumSrcSync > 1)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "Transfer command doesn't support more than 1 sync object per src/dst\ndst: %d, src: %d",
++ psKick->ui32NumDstSync, psKick->ui32NumSrcSync));
++ }
++
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM()
++ || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
++ {
++ PDUMPCOMMENT("Shared part of transfer command\r\n");
++ PDUMPMEM(psSharedTransferCmd,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff,
++ sizeof(SGXMKIF_TRANSFERCMD_SHARED),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ if((psKick->ui32NumSrcSync > 0) && ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL))
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[0];
++
++ PDUMPCOMMENT("Hack src surface write op in transfer cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui32SrcWriteOpPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Hack src surface read op in transfer cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui32SrcReadOpPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ }
++ if((psKick->ui32NumDstSync > 0) && ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL))
++ {
++ psSyncInfo = psKick->ahDstSyncInfo[0];
++
++ PDUMPCOMMENT("Hack dest surface write op in transfer cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui32DstWriteOpPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Hack dest surface read op in transfer cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_TRANSFERCMD_SHARED, ui32DstReadOpPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ }
++
++
++ if((psKick->ui32NumSrcSync > 0) && ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING)== 0UL))
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[0];
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++ }
++
++ if((psKick->ui32NumDstSync > 0) && ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL))
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
++ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
++ }
++ }
++#endif
++
++ sCommand.ui32Data[1] = psKick->sHWTransferContextDevVAddr.uiAddr;
++
++ eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_TRANSFER, &sCommand, KERNEL_ID, psKick->ui32PDumpFlags);
++
++ if (eError == PVRSRV_ERROR_RETRY)
++ {
++
++ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_KEEPPENDING) == 0UL)
++ {
++ if (psKick->ui32NumSrcSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[0];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++ if (psKick->ui32NumDstSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM()
++ || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
++ {
++ if (psKick->ui32NumSrcSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[0];
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal--;
++ }
++ if (psKick->ui32NumDstSync > 0)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
++ psSyncInfo->psSyncData->ui32LastOpDumpVal--;
++ }
++ }
++#endif
++ }
++
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++ }
++ else if (PVRSRV_OK != eError)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXSubmitTransferKM: SGXScheduleCCBCommandKM failed."));
++ return eError;
++ }
++
++
++#if defined(NO_HARDWARE)
++ if ((psKick->ui32Flags & SGXMKIF_TQFLAGS_NOSYNCUPDATE) == 0)
++ {
++ IMG_UINT32 i;
++
++
++ for(i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++ for(i = 0; i < psKick->ui32NumDstSync; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[i];
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ }
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++ }
++#endif
++
++ return eError;
++}
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++IMG_EXPORT PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK *psKick)
++
++{
++ PVRSRV_KERNEL_MEM_INFO *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psKick->hCCBMemInfo;
++ SGXMKIF_COMMAND sCommand = {0};
++ SGXMKIF_2DCMD_SHARED *ps2DCmd;
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++ PVRSRV_ERROR eError;
++ IMG_UINT32 i;
++
++ if (!CCB_OFFSET_IS_VALID(SGXMKIF_2DCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXSubmit2DKM: Invalid CCB offset"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++
++ ps2DCmd = CCB_DATA_FROM_OFFSET(SGXMKIF_2DCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset);
++
++ OSMemSet(ps2DCmd, 0, sizeof(*ps2DCmd));
++
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++ ps2DCmd->sTASyncData.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ ps2DCmd->sTASyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ ps2DCmd->sTASyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ ps2DCmd->sTASyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++ ps2DCmd->s3DSyncData.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ ps2DCmd->s3DSyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ ps2DCmd->s3DSyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ ps2DCmd->s3DSyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++
++
++ ps2DCmd->ui32NumSrcSync = psKick->ui32NumSrcSync;
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++
++ ps2DCmd->sSrcSyncData[i].ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ ps2DCmd->sSrcSyncData[i].ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ ps2DCmd->sSrcSyncData[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ ps2DCmd->sSrcSyncData[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = psKick->hDstSyncInfo;
++
++ ps2DCmd->sDstSyncData.ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ ps2DCmd->sDstSyncData.ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++ ps2DCmd->sDstSyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++ ps2DCmd->sDstSyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++ }
++
++
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending++;
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = psKick->hDstSyncInfo;
++ psSyncInfo->psSyncData->ui32WriteOpsPending++;
++ }
++
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM()
++ || ((psKick->ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
++ {
++
++ PDUMPCOMMENT("Shared part of 2D command\r\n");
++ PDUMPMEM(ps2DCmd,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff,
++ sizeof(SGXMKIF_2DCMD_SHARED),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++
++ PDUMPCOMMENT("Hack src surface write op in 2D cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_2DCMD_SHARED, sSrcSyncData[i].ui32WriteOpsPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Hack src surface read op in 2D cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_2DCMD_SHARED, sSrcSyncData[i].ui32ReadOpsPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = psKick->hDstSyncInfo;
++
++ PDUMPCOMMENT("Hack dest surface write op in 2D cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_2DCMD_SHARED, sDstSyncData.ui32WriteOpsPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++
++ PDUMPCOMMENT("Hack dest surface read op in 2D cmd\r\n");
++ PDUMPMEM(&psSyncInfo->psSyncData->ui32LastReadOpDumpVal,
++ psCCBMemInfo,
++ psKick->ui32CCBDumpWOff + offsetof(SGXMKIF_2DCMD_SHARED, sDstSyncData.ui32ReadOpsPendingVal),
++ sizeof(psSyncInfo->psSyncData->ui32LastReadOpDumpVal),
++ psKick->ui32PDumpFlags,
++ MAKEUNIQUETAG(psCCBMemInfo));
++ }
++
++
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal++;
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = psKick->hDstSyncInfo;
++ psSyncInfo->psSyncData->ui32LastOpDumpVal++;
++ }
++ }
++#endif
++
++ sCommand.ui32Data[1] = psKick->sHW2DContextDevVAddr.uiAddr;
++
++ eError = SGXScheduleCCBCommandKM(hDevHandle, SGXMKIF_CMD_2D, &sCommand, KERNEL_ID, psKick->ui32PDumpFlags);
++
++ if (eError == PVRSRV_ERROR_RETRY)
++ {
++#if defined(PDUMP)
++ if (PDumpIsCaptureFrameKM())
++ {
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32LastReadOpDumpVal--;
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = psKick->hDstSyncInfo;
++ psSyncInfo->psSyncData->ui32LastOpDumpVal--;
++ }
++ }
++#endif
++
++ for (i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsPending--;
++ }
++
++ if (psKick->hDstSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = psKick->hDstSyncInfo;
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsPending--;
++ }
++ }
++
++#if defined(NO_HARDWARE)
++
++ for(i = 0; i < psKick->ui32NumSrcSync; i++)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i];
++ psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ }
++
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hDstSyncInfo;
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ if (psKick->hTASyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++
++ if (psKick->h3DSyncInfo != IMG_NULL)
++ {
++ psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++ psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++ }
++#endif
++
++ return eError;
++}
++#endif
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxutils.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxutils.c
+new file mode 100644
+index 0000000..2c31d22
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxutils.c
+@@ -0,0 +1,928 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgx_mkif_km.h"
++#include "sysconfig.h"
++#include "pdump_km.h"
++#include "mmu.h"
++#include "pvr_bridge_km.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++#ifdef __linux__
++#include <linux/tty.h>
++#else
++#include <stdio.h>
++#endif
++#include "ospm_power.h"
++
++#if defined(SYS_CUSTOM_POWERDOWN)
++PVRSRV_ERROR SysPowerDownMISR(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32CallerID);
++#endif
++
++
++
++IMG_VOID SGXPostActivePowerEvent(PVRSRV_DEVICE_NODE * psDeviceNode,
++ IMG_UINT32 ui32CallerID)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
++
++
++ psSGXHostCtl->ui32NumActivePowerEvents++;
++
++ if ((psSGXHostCtl->ui32PowerStatus & PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE) != 0)
++ {
++
++
++
++ if (ui32CallerID == ISR_ID)
++ {
++ psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
++ }
++ else
++ {
++ SGXScheduleProcessQueuesKM(psDeviceNode);
++ }
++ }
++}
++
++
++IMG_VOID SGXTestActivePowerEvent (PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_UINT32 ui32CallerID)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
++
++ if (((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) != 0) &&
++ ((psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) == 0))
++ {
++
++ psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER;
++
++
++ PDUMPSUSPEND();
++
++#if defined(SYS_CUSTOM_POWERDOWN)
++
++
++
++ eError = SysPowerDownMISR(psDeviceNode, ui32CallerID);
++#else
++ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ ui32CallerID, IMG_FALSE);
++ if (eError == PVRSRV_OK)
++ {
++ SGXPostActivePowerEvent(psDeviceNode, ui32CallerID);
++ }
++#endif
++ if (eError == PVRSRV_ERROR_RETRY)
++ {
++
++
++ psSGXHostCtl->ui32InterruptClearFlags &= ~PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER;
++ eError = PVRSRV_OK;
++ }
++
++
++ PDUMPRESUME();
++ }
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXTestActivePowerEvent error:%lu", eError));
++ }
++}
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGXAcquireKernelCCBSlot)
++#endif
++static INLINE SGXMKIF_COMMAND * SGXAcquireKernelCCBSlot(PVRSRV_SGX_CCB_INFO *psCCB)
++{
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ if(((*psCCB->pui32WriteOffset + 1) & 255) != *psCCB->pui32ReadOffset)
++ {
++ return &psCCB->psCommands[*psCCB->pui32WriteOffset];
++ }
++
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++
++ return IMG_NULL;
++}
++
++PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_SGXDEV_INFO *psDevInfo,
++ SGXMKIF_CMD_TYPE eCmdType,
++ SGXMKIF_COMMAND *psCommandData,
++ IMG_UINT32 ui32CallerID,
++ IMG_UINT32 ui32PDumpFlags)
++{
++ PVRSRV_SGX_CCB_INFO *psKernelCCB;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ SGXMKIF_COMMAND *psSGXCommand;
++#if defined(PDUMP)
++ IMG_VOID *pvDumpCommand;
++ IMG_BOOL bPDumpIsSuspended = PDumpIsSuspended();
++#else
++ PVR_UNREFERENCED_PARAMETER(ui32CallerID);
++ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
++#endif
++
++ psKernelCCB = psDevInfo->psKernelCCBInfo;
++
++ psSGXCommand = SGXAcquireKernelCCBSlot(psKernelCCB);
++
++
++ if(!psSGXCommand)
++ {
++ eError = PVRSRV_ERROR_TIMEOUT;
++ goto Exit;
++ }
++
++
++ psCommandData->ui32CacheControl = psDevInfo->ui32CacheControl;
++
++#if defined(PDUMP)
++
++ psDevInfo->sPDContext.ui32CacheControl |= psDevInfo->ui32CacheControl;
++#endif
++
++
++ psDevInfo->ui32CacheControl = 0;
++
++
++ *psSGXCommand = *psCommandData;
++
++ if (eCmdType >= SGXMKIF_CMD_MAX)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommandKM: Unknown command type: %d", eCmdType)) ;
++ eError = PVRSRV_ERROR_GENERIC;
++ goto Exit;
++ }
++
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++ {
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++
++ if (psSysData->bFlushAll)
++ {
++ OSFlushCPUCacheKM();
++
++ psSysData->bFlushAll = IMG_FALSE;
++ }
++ }
++#endif
++
++ psSGXCommand->ui32ServiceAddress = psDevInfo->aui32HostKickAddr[eCmdType];
++
++#if defined(PDUMP)
++ if ((ui32CallerID != ISR_ID) && (bPDumpIsSuspended == IMG_FALSE))
++ {
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Poll for space in the Kernel CCB\r\n");
++ PDUMPMEMPOL(psKernelCCB->psCCBCtlMemInfo,
++ offsetof(PVRSRV_SGX_CCB_CTL, ui32ReadOffset),
++ (psKernelCCB->ui32CCBDumpWOff + 1) & 0xff,
++ 0xff,
++ PDUMP_POLL_OPERATOR_NOTEQUAL,
++ ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB command\r\n");
++ pvDumpCommand = (IMG_VOID *)((IMG_UINT8 *)psKernelCCB->psCCBMemInfo->pvLinAddrKM + (*psKernelCCB->pui32WriteOffset * sizeof(SGXMKIF_COMMAND)));
++
++ PDUMPMEM(pvDumpCommand,
++ psKernelCCB->psCCBMemInfo,
++ psKernelCCB->ui32CCBDumpWOff * sizeof(SGXMKIF_COMMAND),
++ sizeof(SGXMKIF_COMMAND),
++ ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo));
++
++
++ PDUMPMEM(&psDevInfo->sPDContext.ui32CacheControl,
++ psKernelCCB->psCCBMemInfo,
++ psKernelCCB->ui32CCBDumpWOff * sizeof(SGXMKIF_COMMAND) +
++ offsetof(SGXMKIF_COMMAND, ui32CacheControl),
++ sizeof(IMG_UINT32),
++ ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo));
++
++ if (PDumpIsCaptureFrameKM()
++ || ((ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
++ {
++
++ psDevInfo->sPDContext.ui32CacheControl = 0;
++ }
++ }
++#endif
++
++#if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
++
++ eError = PollForValueKM (psKernelCCB->pui32ReadOffset,
++ *psKernelCCB->pui32WriteOffset,
++ 0xFF,
++ MAX_HW_TIME_US/WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT);
++ if (eError != PVRSRV_OK)
++ {
++ eError = PVRSRV_ERROR_TIMEOUT;
++ goto Exit;
++ }
++#endif
++
++
++
++ *psKernelCCB->pui32WriteOffset = (*psKernelCCB->pui32WriteOffset + 1) & 255;
++
++#if defined(PDUMP)
++ if ((ui32CallerID != ISR_ID) && (bPDumpIsSuspended == IMG_FALSE))
++ {
++ #if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Poll for previous Kernel CCB CMD to be read\r\n");
++ PDUMPMEMPOL(psKernelCCB->psCCBCtlMemInfo,
++ offsetof(PVRSRV_SGX_CCB_CTL, ui32ReadOffset),
++ (psKernelCCB->ui32CCBDumpWOff),
++ 0xFF,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
++ #endif
++
++ if (PDumpIsCaptureFrameKM()
++ || ((ui32PDumpFlags & PDUMP_FLAGS_CONTINUOUS) != 0))
++ {
++ psKernelCCB->ui32CCBDumpWOff = (psKernelCCB->ui32CCBDumpWOff + 1) & 0xFF;
++ psDevInfo->ui32KernelCCBEventKickerDumpVal = (psDevInfo->ui32KernelCCBEventKickerDumpVal + 1) & 0xFF;
++ }
++
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB write offset\r\n");
++ PDUMPMEM(&psKernelCCB->ui32CCBDumpWOff,
++ psKernelCCB->psCCBCtlMemInfo,
++ offsetof(PVRSRV_SGX_CCB_CTL, ui32WriteOffset),
++ sizeof(IMG_UINT32),
++ ui32PDumpFlags,
++ MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kernel CCB event kicker\r\n");
++ PDUMPMEM(&psDevInfo->ui32KernelCCBEventKickerDumpVal,
++ psDevInfo->psKernelCCBEventKickerMemInfo,
++ 0,
++ sizeof(IMG_UINT32),
++ ui32PDumpFlags,
++ MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "Kick the SGX microkernel\r\n");
++ #if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
++ PDUMPREGWITHFLAGS(SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0), EUR_CR_EVENT_KICK2_NOW_MASK, ui32PDumpFlags);
++ #else
++ PDUMPREGWITHFLAGS(SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0), EUR_CR_EVENT_KICK_NOW_MASK, ui32PDumpFlags);
++ #endif
++ }
++#endif
++
++ *psDevInfo->pui32KernelCCBEventKicker = (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF;
++#if defined(FIX_HW_BRN_26620) && defined(SGX_FEATURE_SYSTEM_CACHE) && !defined(SGX_BYPASS_SYSTEM_CACHE)
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
++ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK2, 0),
++ EUR_CR_EVENT_KICK2_NOW_MASK);
++#else
++ OSWriteHWReg(psDevInfo->pvRegsBaseKM,
++ SGX_MP_CORE_SELECT(EUR_CR_EVENT_KICK, 0),
++ EUR_CR_EVENT_KICK_NOW_MASK);
++#endif
++
++#if defined(NO_HARDWARE)
++
++ *psKernelCCB->pui32ReadOffset = (*psKernelCCB->pui32ReadOffset + 1) & 255;
++#endif
++
++Exit:
++ return eError;
++}
++
++
++PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE *psDeviceNode,
++ SGXMKIF_CMD_TYPE eCmdType,
++ SGXMKIF_COMMAND *psCommandData,
++ IMG_UINT32 ui32CallerID,
++ IMG_UINT32 ui32PDumpFlags)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++
++
++ PDUMPSUSPEND();
++
++ ospm_power_using_hw_begin(OSPM_GRAPHICS_ISLAND, OSPM_UHB_IGNORE_POWER_OFF);
++
++ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE_ON,
++ ui32CallerID,
++ IMG_TRUE);
++
++ PDUMPRESUME();
++
++ if (eError == PVRSRV_OK)
++ {
++ psDeviceNode->bReProcessDeviceCommandComplete = IMG_FALSE;
++ }
++ else
++ {
++ if (eError == PVRSRV_ERROR_RETRY)
++ {
++ if (ui32CallerID == ISR_ID)
++ {
++
++
++
++ psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
++ eError = PVRSRV_OK;
++ }
++ else
++ {
++
++
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommandKM failed to acquire lock - "
++ "ui32CallerID:%ld eError:%lu", ui32CallerID, eError));
++ }
++
++ ospm_power_using_hw_end(OSPM_GRAPHICS_ISLAND);
++ return eError;
++ }
++
++ eError = SGXScheduleCCBCommand(psDevInfo, eCmdType, psCommandData, ui32CallerID, ui32PDumpFlags);
++
++ PVRSRVPowerUnlock(ui32CallerID);
++
++ ospm_power_using_hw_end(OSPM_GRAPHICS_ISLAND);
++
++ if (ui32CallerID != ISR_ID)
++ {
++
++
++
++ SGXTestActivePowerEvent(psDeviceNode, ui32CallerID);
++ }
++
++ return eError;
++}
++
++
++PVRSRV_ERROR SGXScheduleProcessQueuesKM(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++ SGXMKIF_HOST_CTL *psHostCtl = psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
++ IMG_UINT32 ui32PowerStatus;
++ SGXMKIF_COMMAND sCommand = {0};
++
++ ui32PowerStatus = psHostCtl->ui32PowerStatus;
++ if ((ui32PowerStatus & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0)
++ {
++
++ return PVRSRV_OK;
++ }
++
++ eError = SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_PROCESS_QUEUES, &sCommand, ISR_ID, 0);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXScheduleProcessQueuesKM failed to schedule CCB command: %lu", eError));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ return PVRSRVIsDevicePowered(psDeviceNode->sDevId.ui32DeviceIndex);
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie,
++ SGX_INTERNAL_DEVINFO *psSGXInternalDevInfo)
++{
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++ psSGXInternalDevInfo->ui32Flags = psDevInfo->ui32Flags;
++ psSGXInternalDevInfo->bForcePTOff = (IMG_BOOL)psDevInfo->bForcePTOff;
++
++
++ psSGXInternalDevInfo->hHostCtlKernelMemInfoHandle =
++ (IMG_HANDLE)psDevInfo->psKernelSGXHostCtlMemInfo;
++
++ return PVRSRV_OK;
++}
++
++
++IMG_VOID SGXCleanupRequest(PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWDataDevVAddr,
++ IMG_UINT32 ui32CleanupType)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_SGXDEV_INFO *psSGXDevInfo = psDeviceNode->pvDevice;
++ PVRSRV_KERNEL_MEM_INFO *psSGXHostCtlMemInfo = psSGXDevInfo->psKernelSGXHostCtlMemInfo;
++ SGXMKIF_HOST_CTL *psSGXHostCtl = psSGXHostCtlMemInfo->pvLinAddrKM;
++
++ if ((psSGXHostCtl->ui32PowerStatus & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0)
++ {
++
++ }
++ else
++ {
++ SGXMKIF_COMMAND sCommand = {0};
++
++ PDUMPCOMMENTWITHFLAGS(0, "Request ukernel resouce clean-up");
++ sCommand.ui32Data[0] = ui32CleanupType;
++ sCommand.ui32Data[1] = (psHWDataDevVAddr == IMG_NULL) ? 0 : psHWDataDevVAddr->uiAddr;
++
++ eError = SGXScheduleCCBCommandKM(psDeviceNode, SGXMKIF_CMD_CLEANUP, &sCommand, KERNEL_ID, 0);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXCleanupRequest: Failed to submit clean-up command"));
++ PVR_DBG_BREAK;
++ }
++
++
++ #if !defined(NO_HARDWARE)
++ if(PollForValueKM(&psSGXHostCtl->ui32CleanupStatus,
++ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
++ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
++ MAX_HW_TIME_US/WAIT_TRY_COUNT,
++ WAIT_TRY_COUNT) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SGXCleanupRequest: Wait for uKernel to clean up failed"));
++ PVR_DBG_BREAK;
++ }
++ #endif
++
++ #if defined(PDUMP)
++
++ PDUMPCOMMENTWITHFLAGS(0, "Host Control - Poll for clean-up request to complete");
++ PDUMPMEMPOL(psSGXHostCtlMemInfo,
++ offsetof(SGXMKIF_HOST_CTL, ui32CleanupStatus),
++ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
++ PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE,
++ PDUMP_POLL_OPERATOR_EQUAL,
++ 0,
++ MAKEUNIQUETAG(psSGXHostCtlMemInfo));
++ #endif
++
++ psSGXHostCtl->ui32CleanupStatus &= ~(PVRSRV_USSE_EDM_CLEANUPCMD_COMPLETE);
++ PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo, offsetof(SGXMKIF_HOST_CTL, ui32CleanupStatus), sizeof(IMG_UINT32), 0, MAKEUNIQUETAG(psSGXHostCtlMemInfo));
++ }
++}
++
++
++typedef struct _SGX_HW_RENDER_CONTEXT_CLEANUP_
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_DEV_VIRTADDR sHWRenderContextDevVAddr;
++ IMG_HANDLE hBlockAlloc;
++ PRESMAN_ITEM psResItem;
++} SGX_HW_RENDER_CONTEXT_CLEANUP;
++
++
++static PVRSRV_ERROR SGXCleanupHWRenderContextCallback(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup = pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ SGXCleanupRequest(psCleanup->psDeviceNode,
++ &psCleanup->sHWRenderContextDevVAddr,
++ PVRSRV_CLEANUPCMD_RC);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
++ psCleanup,
++ psCleanup->hBlockAlloc);
++
++
++ return PVRSRV_OK;
++}
++
++typedef struct _SGX_HW_TRANSFER_CONTEXT_CLEANUP_
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
++ IMG_HANDLE hBlockAlloc;
++ PRESMAN_ITEM psResItem;
++} SGX_HW_TRANSFER_CONTEXT_CLEANUP;
++
++
++static PVRSRV_ERROR SGXCleanupHWTransferContextCallback(IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param)
++{
++ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ SGXCleanupRequest(psCleanup->psDeviceNode,
++ &psCleanup->sHWTransferContextDevVAddr,
++ PVRSRV_CLEANUPCMD_TC);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
++ psCleanup,
++ psCleanup->hBlockAlloc);
++
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hBlockAlloc;
++ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
++ PRESMAN_ITEM psResItem;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
++ (IMG_VOID **)&psCleanup,
++ &hBlockAlloc,
++ "SGX Hardware Render Context Cleanup");
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Couldn't allocate memory for SGX_HW_RENDER_CONTEXT_CLEANUP structure"));
++ return IMG_NULL;
++ }
++
++ psCleanup->hBlockAlloc = hBlockAlloc;
++ psCleanup->psDeviceNode = psDeviceNode;
++ psCleanup->sHWRenderContextDevVAddr = *psHWRenderContextDevVAddr;
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_HW_RENDER_CONTEXT,
++ (IMG_VOID *)psCleanup,
++ 0,
++ &SGXCleanupHWRenderContextCallback);
++
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: ResManRegisterRes failed"));
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
++ psCleanup,
++ psCleanup->hBlockAlloc);
++
++
++ return IMG_NULL;
++ }
++
++ psCleanup->psResItem = psResItem;
++
++ return (IMG_HANDLE)psCleanup;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext)
++{
++ PVRSRV_ERROR eError;
++ SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
++
++ PVR_ASSERT(hHWRenderContext != IMG_NULL);
++
++ psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *)hHWRenderContext;
++
++ if (psCleanup == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXUnregisterHWRenderContextKM: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = ResManFreeResByPtr(psCleanup->psResItem);
++
++ return eError;
++}
++
++
++IMG_EXPORT
++IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hBlockAlloc;
++ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
++ PRESMAN_ITEM psResItem;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
++ (IMG_VOID **)&psCleanup,
++ &hBlockAlloc,
++ "SGX Hardware Transfer Context Cleanup");
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Couldn't allocate memory for SGX_HW_TRANSFER_CONTEXT_CLEANUP structure"));
++ return IMG_NULL;
++ }
++
++ psCleanup->hBlockAlloc = hBlockAlloc;
++ psCleanup->psDeviceNode = psDeviceNode;
++ psCleanup->sHWTransferContextDevVAddr = *psHWTransferContextDevVAddr;
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_HW_TRANSFER_CONTEXT,
++ psCleanup,
++ 0,
++ &SGXCleanupHWTransferContextCallback);
++
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: ResManRegisterRes failed"));
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
++ psCleanup,
++ psCleanup->hBlockAlloc);
++
++
++ return IMG_NULL;
++ }
++
++ psCleanup->psResItem = psResItem;
++
++ return (IMG_HANDLE)psCleanup;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE hHWTransferContext)
++{
++ PVRSRV_ERROR eError;
++ SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
++
++ PVR_ASSERT(hHWTransferContext != IMG_NULL);
++
++ psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)hHWTransferContext;
++
++ if (psCleanup == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXUnregisterHWTransferContextKM: invalid parameter"));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ eError = ResManFreeResByPtr(psCleanup->psResItem);
++
++ return eError;
++}
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct _SGX_HW_2D_CONTEXT_CLEANUP_
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
++ IMG_HANDLE hBlockAlloc;
++ PRESMAN_ITEM psResItem;
++} SGX_HW_2D_CONTEXT_CLEANUP;
++
++static PVRSRV_ERROR SGXCleanupHW2DContextCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++ SGX_HW_2D_CONTEXT_CLEANUP *psCleanup = (SGX_HW_2D_CONTEXT_CLEANUP *)pvParam;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ SGXCleanupRequest(psCleanup->psDeviceNode,
++ &psCleanup->sHW2DContextDevVAddr,
++ PVRSRV_CLEANUPCMD_2DC);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
++ psCleanup,
++ psCleanup->hBlockAlloc);
++
++
++ return PVRSRV_OK;
++}
++
++IMG_EXPORT
++IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE psDeviceNode,
++ IMG_DEV_VIRTADDR *psHW2DContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hBlockAlloc;
++ SGX_HW_2D_CONTEXT_CLEANUP *psCleanup;
++ PRESMAN_ITEM psResItem;
++
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
++ (IMG_VOID **)&psCleanup,
++ &hBlockAlloc,
++ "SGX Hardware 2D Context Cleanup");
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Couldn't allocate memory for SGX_HW_2D_CONTEXT_CLEANUP structure"));
++ return IMG_NULL;
++ }
++
++ psCleanup->hBlockAlloc = hBlockAlloc;
++ psCleanup->psDeviceNode = psDeviceNode;
++ psCleanup->sHW2DContextDevVAddr = *psHW2DContextDevVAddr;
++
++ psResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_HW_2D_CONTEXT,
++ psCleanup,
++ 0,
++ &SGXCleanupHW2DContextCallback);
++
++ if (psResItem == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: ResManRegisterRes failed"));
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
++ psCleanup,
++ psCleanup->hBlockAlloc);
++
++
++ return IMG_NULL;
++ }
++
++ psCleanup->psResItem = psResItem;
++
++ return (IMG_HANDLE)psCleanup;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXUnregisterHW2DContextKM(IMG_HANDLE hHW2DContext)
++{
++ PVRSRV_ERROR eError;
++ SGX_HW_2D_CONTEXT_CLEANUP *psCleanup;
++
++ PVR_ASSERT(hHW2DContext != IMG_NULL);
++
++ if (hHW2DContext == IMG_NULL)
++ {
++ return (PVRSRV_ERROR_INVALID_PARAMS);
++ }
++
++ psCleanup = (SGX_HW_2D_CONTEXT_CLEANUP *)hHW2DContext;
++
++ eError = ResManFreeResByPtr(psCleanup->psResItem);
++
++ return eError;
++}
++#endif
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGX2DQuerySyncOpsComplete)
++#endif
++static INLINE
++IMG_BOOL SGX2DQuerySyncOpsComplete(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
++ IMG_UINT32 ui32ReadOpsPending,
++ IMG_UINT32 ui32WriteOpsPending)
++{
++ PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData;
++
++ return (IMG_BOOL)(
++ (psSyncData->ui32ReadOpsComplete >= ui32ReadOpsPending) &&
++ (psSyncData->ui32WriteOpsComplete >= ui32WriteOpsPending)
++ );
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO *psDevInfo,
++ PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
++ IMG_BOOL bWaitForComplete)
++{
++ IMG_UINT32 ui32ReadOpsPending, ui32WriteOpsPending;
++
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++
++ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Start"));
++
++ ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending;
++ ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++ if(SGX2DQuerySyncOpsComplete(psSyncInfo, ui32ReadOpsPending, ui32WriteOpsPending))
++ {
++
++ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: No wait. Blits complete."));
++ return PVRSRV_OK;
++ }
++
++
++ if (!bWaitForComplete)
++ {
++
++ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: No wait. Ops pending."));
++ return PVRSRV_ERROR_CMD_NOT_PROCESSED;
++ }
++
++
++ PVR_DPF((PVR_DBG_MESSAGE, "SGX2DQueryBlitsCompleteKM: Ops pending. Start polling."));
++
++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
++ {
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++
++ if(SGX2DQuerySyncOpsComplete(psSyncInfo, ui32ReadOpsPending, ui32WriteOpsPending))
++ {
++
++ PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Wait over. Blits complete."));
++ return PVRSRV_OK;
++ }
++
++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++ } END_LOOP_UNTIL_TIMEOUT();
++
++
++ PVR_DPF((PVR_DBG_ERROR,"SGX2DQueryBlitsCompleteKM: Timed out. Ops pending."));
++
++#if defined(DEBUG)
++ {
++ PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData;
++
++ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Syncinfo: %p, Syncdata: %p", psSyncInfo, psSyncData));
++
++ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Read ops complete: %d, Read ops pending: %d", psSyncData->ui32ReadOpsComplete, psSyncData->ui32ReadOpsPending));
++ PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Write ops complete: %d, Write ops pending: %d", psSyncData->ui32WriteOpsComplete, psSyncData->ui32WriteOpsPending));
++
++ }
++#endif
++
++ return PVRSRV_ERROR_TIMEOUT;
++}
++
++
++IMG_EXPORT
++IMG_VOID SGXFlushHWRenderTargetKM(IMG_HANDLE psDeviceNode, IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr)
++{
++ PVR_ASSERT(sHWRTDataSetDevVAddr.uiAddr != IMG_NULL);
++
++ SGXCleanupRequest(psDeviceNode,
++ &sHWRTDataSetDevVAddr,
++ PVRSRV_CLEANUPCMD_RT);
++}
++
++
++IMG_UINT32 SGXConvertTimeStamp(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32TimeWraps,
++ IMG_UINT32 ui32Time)
++{
++#if defined(EUR_CR_TIMER)
++ PVR_UNREFERENCED_PARAMETER(psDevInfo);
++ PVR_UNREFERENCED_PARAMETER(ui32TimeWraps);
++ return ui32Time;
++#else
++ IMG_UINT64 ui64Clocks;
++ IMG_UINT32 ui32Clocksx16;
++
++ ui64Clocks = ((IMG_UINT64)ui32TimeWraps * psDevInfo->ui32uKernelTimerClock) +
++ (psDevInfo->ui32uKernelTimerClock - (ui32Time & EUR_CR_EVENT_TIMER_VALUE_MASK));
++ ui32Clocksx16 = (IMG_UINT32)(ui64Clocks / 16);
++
++ return ui32Clocksx16;
++#endif
++}
++
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxutils.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxutils.h
+new file mode 100644
+index 0000000..bc4c053
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/devices/sgx/sgxutils.h
+@@ -0,0 +1,99 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "perproc.h"
++#include "sgxinfokm.h"
++
++#define CCB_OFFSET_IS_VALID(type, psCCBMemInfo, psCCBKick, offset) \
++ ((sizeof(type) <= (psCCBMemInfo)->ui32AllocSize) && \
++ ((psCCBKick)->offset <= (psCCBMemInfo)->ui32AllocSize - sizeof(type)))
++
++#define CCB_DATA_FROM_OFFSET(type, psCCBMemInfo, psCCBKick, offset) \
++ ((type *)(((IMG_CHAR *)(psCCBMemInfo)->pvLinAddrKM) + \
++ (psCCBKick)->offset))
++
++
++IMG_IMPORT
++IMG_VOID SGXTestActivePowerEvent(PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_UINT32 ui32CallerID);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXScheduleCCBCommand(PVRSRV_SGXDEV_INFO *psDevInfo,
++ SGXMKIF_CMD_TYPE eCommandType,
++ SGXMKIF_COMMAND *psCommandData,
++ IMG_UINT32 ui32CallerID,
++ IMG_UINT32 ui32PDumpFlags);
++IMG_IMPORT
++PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE *psDeviceNode,
++ SGXMKIF_CMD_TYPE eCommandType,
++ SGXMKIF_COMMAND *psCommandData,
++ IMG_UINT32 ui32CallerID,
++ IMG_UINT32 ui32PDumpFlags);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXScheduleProcessQueuesKM(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_IMPORT
++IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_IMPORT
++IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++IMG_IMPORT
++IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++IMG_IMPORT
++IMG_VOID SGXFlushHWRenderTargetKM(IMG_HANDLE psSGXDevInfo, IMG_DEV_VIRTADDR psHWRTDataSetDevVAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE hHWTransferContext);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++IMG_IMPORT
++IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE psDeviceNode,
++ IMG_DEV_VIRTADDR *psHW2DContextDevVAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXUnregisterHW2DContextKM(IMG_HANDLE hHW2DContext);
++#endif
++
++IMG_UINT32 SGXConvertTimeStamp(PVRSRV_SGXDEV_INFO *psDevInfo,
++ IMG_UINT32 ui32TimeWraps,
++ IMG_UINT32 ui32Time);
++
++IMG_VOID SGXCleanupRequest(PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_DEV_VIRTADDR *psHWDataDevVAddr,
++ IMG_UINT32 ui32CleanupType);
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/.gitignore b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/.gitignore
+new file mode 100644
+index 0000000..2f89523
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/.gitignore
+@@ -0,0 +1,5 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++*.o
++*.o.cmd
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/env_data.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/env_data.h
+new file mode 100644
+index 0000000..3d41219
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/env_data.h
+@@ -0,0 +1,66 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _ENV_DATA_
++#define _ENV_DATA_
++
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++
++#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
++#include <linux/workqueue.h>
++#endif
++
++#define PVRSRV_MAX_BRIDGE_IN_SIZE 0x1000
++#define PVRSRV_MAX_BRIDGE_OUT_SIZE 0x1000
++
++typedef struct _PVR_PCI_DEV_TAG
++{
++ struct pci_dev *psPCIDev;
++ HOST_PCI_INIT_FLAGS ePCIFlags;
++ IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE];
++} PVR_PCI_DEV;
++
++typedef struct _ENV_DATA_TAG
++{
++ IMG_VOID *pvBridgeData;
++ struct pm_dev *psPowerDevice;
++ IMG_BOOL bLISRInstalled;
++ IMG_BOOL bMISRInstalled;
++ IMG_UINT32 ui32IRQ;
++ IMG_VOID *pvISRCookie;
++#if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
++ struct workqueue_struct *psWorkQueue;
++#endif
++#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
++ struct work_struct sMISRWork;
++ IMG_VOID *pvMISRData;
++#else
++ struct tasklet_struct sMISRTasklet;
++#endif
++} ENV_DATA;
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/env_perproc.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/env_perproc.h
+new file mode 100644
+index 0000000..a6e49db
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/env_perproc.h
+@@ -0,0 +1,56 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __ENV_PERPROC_H__
++#define __ENV_PERPROC_H__
++
++#include <linux/list.h>
++#include <linux/proc_fs.h>
++
++#include "services.h"
++#include "handle.h"
++
++typedef struct _PVRSRV_ENV_PER_PROCESS_DATA_
++{
++ IMG_HANDLE hBlockAlloc;
++ struct proc_dir_entry *psProcDir;
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ struct list_head sDRMAuthListHead;
++#endif
++} PVRSRV_ENV_PER_PROCESS_DATA;
++
++IMG_VOID RemovePerProcessProcDir(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
++
++PVRSRV_ERROR LinuxMMapPerProcessConnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
++
++IMG_VOID LinuxMMapPerProcessDisconnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc);
++
++PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase);
++
++IMG_HANDLE LinuxTerminatingProcessPrivateData(IMG_VOID);
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/event.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/event.c
+new file mode 100644
+index 0000000..33eca49
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/event.c
+@@ -0,0 +1,273 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#include <asm/system.h>
++#endif
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <asm/hardirq.h>
++#include <linux/timer.h>
++#include <linux/capability.h>
++#include <linux/sched.h>
++#include <asm/uaccess.h>
++
++#include "img_types.h"
++#include "services_headers.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "env_data.h"
++#include "proc.h"
++#include "mutex.h"
++#include "lock.h"
++
++typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG
++{
++ rwlock_t sLock;
++ struct list_head sList;
++
++} PVRSRV_LINUX_EVENT_OBJECT_LIST;
++
++
++typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG
++{
++ atomic_t sTimeStamp;
++ IMG_UINT32 ui32TimeStampPrevious;
++#if defined(DEBUG)
++ IMG_UINT ui32Stats;
++#endif
++ wait_queue_head_t sWait;
++ struct list_head sList;
++ IMG_HANDLE hResItem;
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList;
++} PVRSRV_LINUX_EVENT_OBJECT;
++
++PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList)
++{
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList;
++
++ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST),
++ (IMG_VOID **)&psEvenObjectList, IMG_NULL,
++ "Linux Event Object List") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectCreate: failed to allocate memory for event list"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ INIT_LIST_HEAD(&psEvenObjectList->sList);
++
++ rwlock_init(&psEvenObjectList->sLock);
++
++ *phEventObjectList = (IMG_HANDLE *) psEvenObjectList;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList)
++{
++
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList ;
++
++ if(psEvenObjectList)
++ {
++ if (!list_empty(&psEvenObjectList->sList))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectListDestroy: Event List is not empty"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST), psEvenObjectList, IMG_NULL);
++
++ }
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObjectList, IMG_HANDLE hOSEventObject)
++{
++ if(hOSEventObjectList)
++ {
++ if(hOSEventObject)
++ {
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject;
++#if defined(DEBUG)
++ PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectListDelete: Event object waits: %lu", psLinuxEventObject->ui32Stats));
++#endif
++ if(ResManFreeResByPtr(psLinuxEventObject->hResItem) != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++ }
++ }
++ return PVRSRV_ERROR_GENERIC;
++
++}
++
++static PVRSRV_ERROR LinuxEventObjectDeleteCallback(IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = pvParam;
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++ write_lock_bh(&psLinuxEventObjectList->sLock);
++ list_del(&psLinuxEventObject->sList);
++ write_unlock_bh(&psLinuxEventObjectList->sLock);
++
++#if defined(DEBUG)
++ PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectDeleteCallback: Event object waits: %lu", psLinuxEventObject->ui32Stats));
++#endif
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT), psLinuxEventObject, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject)
++ {
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
++ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++
++ psPerProc = PVRSRVPerProcessData(ui32PID);
++ if (psPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: Couldn't find per-process data"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT),
++ (IMG_VOID **)&psLinuxEventObject, IMG_NULL,
++ "Linux Event Object") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory "));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ INIT_LIST_HEAD(&psLinuxEventObject->sList);
++
++ atomic_set(&psLinuxEventObject->sTimeStamp, 0);
++ psLinuxEventObject->ui32TimeStampPrevious = 0;
++
++#if defined(DEBUG)
++ psLinuxEventObject->ui32Stats = 0;
++#endif
++ init_waitqueue_head(&psLinuxEventObject->sWait);
++
++ psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList;
++
++ psLinuxEventObject->hResItem = ResManRegisterRes(psPerProc->hResManContext,
++ RESMAN_TYPE_EVENT_OBJECT,
++ psLinuxEventObject,
++ 0,
++ &LinuxEventObjectDeleteCallback);
++
++ write_lock_bh(&psLinuxEventObjectList->sLock);
++ list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList);
++ write_unlock_bh(&psLinuxEventObjectList->sLock);
++
++ *phOSEventObject = psLinuxEventObject;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList)
++{
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
++ struct list_head *psListEntry, *psListEntryTemp, *psList;
++ psList = &psLinuxEventObjectList->sList;
++
++ list_for_each_safe(psListEntry, psListEntryTemp, psList)
++ {
++
++ psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)list_entry(psListEntry, PVRSRV_LINUX_EVENT_OBJECT, sList);
++
++ atomic_inc(&psLinuxEventObject->sTimeStamp);
++ wake_up_interruptible(&psLinuxEventObject->sWait);
++ }
++
++ return PVRSRV_OK;
++
++}
++
++PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT32 ui32MSTimeout)
++{
++ IMG_UINT32 ui32TimeStamp;
++ DEFINE_WAIT(sWait);
++
++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject;
++
++ IMG_UINT32 ui32TimeOutJiffies = msecs_to_jiffies(ui32MSTimeout);
++
++ do
++ {
++ prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE);
++ ui32TimeStamp = atomic_read(&psLinuxEventObject->sTimeStamp);
++
++ if(psLinuxEventObject->ui32TimeStampPrevious != ui32TimeStamp)
++ {
++ break;
++ }
++
++ LinuxUnLockMutex(&gPVRSRVLock);
++
++ ui32TimeOutJiffies = (IMG_UINT32)schedule_timeout((IMG_INT32)ui32TimeOutJiffies);
++
++ LinuxLockMutex(&gPVRSRVLock);
++#if defined(DEBUG)
++ psLinuxEventObject->ui32Stats++;
++#endif
++
++
++ } while (ui32TimeOutJiffies);
++
++ finish_wait(&psLinuxEventObject->sWait, &sWait);
++
++ psLinuxEventObject->ui32TimeStampPrevious = ui32TimeStamp;
++
++ return ui32TimeOutJiffies ? PVRSRV_OK : PVRSRV_ERROR_TIMEOUT;
++
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/event.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/event.h
+new file mode 100644
+index 0000000..d07bc97
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/event.h
+@@ -0,0 +1,32 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList);
++PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList);
++PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject);
++PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObjectList, IMG_HANDLE hOSEventObject);
++PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList);
++PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT32 ui32MSTimeout);
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/linkage.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/linkage.h
+new file mode 100644
+index 0000000..1ec2696
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/linkage.h
+@@ -0,0 +1,61 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __LINKAGE_H__
++#define __LINKAGE_H__
++
++#if !defined(SUPPORT_DRI_DRM)
++IMG_INT32 PVRSRV_BridgeDispatchKM(struct file *file, IMG_UINT cmd, IMG_UINT32 arg);
++#endif
++
++IMG_VOID PVRDPFInit(IMG_VOID);
++PVRSRV_ERROR PVROSFuncInit(IMG_VOID);
++IMG_VOID PVROSFuncDeInit(IMG_VOID);
++
++#ifdef DEBUG
++IMG_INT PVRDebugProcSetLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data);
++IMG_VOID PVRDebugSetLevel(IMG_UINT32 uDebugLevel);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void ProcSeqShowDebugLevel(struct seq_file *sfile,void* el);
++#else
++IMG_INT PVRDebugProcGetLevel(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data);
++#endif
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++IMG_INT PVRProcSetPowerLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void ProcSeqShowPowerLevel(struct seq_file *sfile,void* el);
++#else
++IMG_INT PVRProcGetPowerLevel(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data);
++#endif
++
++
++#endif
++#endif
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/lock.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/lock.h
+new file mode 100644
+index 0000000..e0bf5ee
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/lock.h
+@@ -0,0 +1,32 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __LOCK_H__
++#define __LOCK_H__
++
++extern PVRSRV_LINUX_MUTEX gPVRSRVLock;
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mm.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mm.c
+new file mode 100644
+index 0000000..97a4750
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mm.c
+@@ -0,0 +1,2360 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
++#include <asm/io.h>
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++#include <linux/wrapper.h>
++#endif
++#include <linux/slab.h>
++#include <linux/highmem.h>
++#include <linux/sched.h>
++
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "syscommon.h"
++#include "mutils.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "proc.h"
++#include "mutex.h"
++#include "lock.h"
++
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ #include "lists.h"
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++typedef enum {
++ DEBUG_MEM_ALLOC_TYPE_KMALLOC,
++ DEBUG_MEM_ALLOC_TYPE_VMALLOC,
++ DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
++ DEBUG_MEM_ALLOC_TYPE_IOREMAP,
++ DEBUG_MEM_ALLOC_TYPE_IO,
++ DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE,
++ DEBUG_MEM_ALLOC_TYPE_COUNT
++}DEBUG_MEM_ALLOC_TYPE;
++
++typedef struct _DEBUG_MEM_ALLOC_REC
++{
++ DEBUG_MEM_ALLOC_TYPE eAllocType;
++ IMG_VOID *pvKey;
++ IMG_VOID *pvCpuVAddr;
++ IMG_UINT32 ulCpuPAddr;
++ IMG_VOID *pvPrivateData;
++ IMG_UINT32 ui32Bytes;
++ pid_t pid;
++ IMG_CHAR *pszFileName;
++ IMG_UINT32 ui32Line;
++
++ struct _DEBUG_MEM_ALLOC_REC *psNext;
++ struct _DEBUG_MEM_ALLOC_REC **ppsThis;
++}DEBUG_MEM_ALLOC_REC;
++
++static IMPLEMENT_LIST_ANY_VA_2(DEBUG_MEM_ALLOC_REC, IMG_BOOL, IMG_FALSE)
++static IMPLEMENT_LIST_ANY_VA(DEBUG_MEM_ALLOC_REC)
++static IMPLEMENT_LIST_FOR_EACH(DEBUG_MEM_ALLOC_REC)
++static IMPLEMENT_LIST_INSERT(DEBUG_MEM_ALLOC_REC)
++static IMPLEMENT_LIST_REMOVE(DEBUG_MEM_ALLOC_REC)
++
++
++static DEBUG_MEM_ALLOC_REC *g_MemoryRecords;
++
++static IMG_UINT32 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT];
++static IMG_UINT32 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT];
++
++static IMG_UINT32 g_SysRAMWaterMark;
++static IMG_UINT32 g_SysRAMHighWaterMark;
++
++static IMG_UINT32 g_IOMemWaterMark;
++static IMG_UINT32 g_IOMemHighWaterMark;
++
++static IMG_VOID DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType,
++ IMG_VOID *pvKey,
++ IMG_VOID *pvCpuVAddr,
++ IMG_UINT32 ulCpuPAddr,
++ IMG_VOID *pvPrivateData,
++ IMG_UINT32 ui32Bytes,
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line);
++
++static IMG_VOID DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, IMG_VOID *pvKey, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++static IMG_CHAR *DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE eAllocType);
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++static struct proc_dir_entry *g_SeqFileMemoryRecords =0;
++static void* ProcSeqNextMemoryRecords(struct seq_file *sfile,void* el,loff_t off);
++static void ProcSeqShowMemoryRecords(struct seq_file *sfile,void* el);
++static void* ProcSeqOff2ElementMemoryRecords(struct seq_file * sfile, loff_t off);
++
++#else
++static off_t printMemoryRecords(IMG_CHAR * buffer, size_t size, off_t off);
++#endif
++
++#endif
++
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++typedef struct _DEBUG_LINUX_MEM_AREA_REC
++{
++ LinuxMemArea *psLinuxMemArea;
++ IMG_UINT32 ui32Flags;
++ pid_t pid;
++
++ struct _DEBUG_LINUX_MEM_AREA_REC *psNext;
++ struct _DEBUG_LINUX_MEM_AREA_REC **ppsThis;
++}DEBUG_LINUX_MEM_AREA_REC;
++
++
++static IMPLEMENT_LIST_ANY_VA(DEBUG_LINUX_MEM_AREA_REC)
++static IMPLEMENT_LIST_FOR_EACH(DEBUG_LINUX_MEM_AREA_REC)
++static IMPLEMENT_LIST_INSERT(DEBUG_LINUX_MEM_AREA_REC)
++static IMPLEMENT_LIST_REMOVE(DEBUG_LINUX_MEM_AREA_REC)
++
++
++
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++static PVRSRV_LINUX_MUTEX g_sDebugMutex;
++#endif
++
++static DEBUG_LINUX_MEM_AREA_REC *g_LinuxMemAreaRecords;
++static IMG_UINT32 g_LinuxMemAreaCount;
++static IMG_UINT32 g_LinuxMemAreaWaterMark;
++static IMG_UINT32 g_LinuxMemAreaHighWaterMark;
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++static struct proc_dir_entry *g_SeqFileMemArea=0;
++
++static void* ProcSeqNextMemArea(struct seq_file *sfile,void* el,loff_t off);
++static void ProcSeqShowMemArea(struct seq_file *sfile,void* el);
++static void* ProcSeqOff2ElementMemArea(struct seq_file *sfile, loff_t off);
++
++#else
++static off_t printLinuxMemAreaRecords(IMG_CHAR * buffer, size_t size, off_t off);
++#endif
++
++#endif
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++#if (defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS))
++static void ProcSeqStartstopDebugMutex(struct seq_file *sfile,IMG_BOOL start);
++#endif
++#endif
++
++static LinuxKMemCache *psLinuxMemAreaCache;
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++static IMG_VOID ReservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length);
++static IMG_VOID UnreservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length);
++#endif
++
++static LinuxMemArea *LinuxMemAreaStructAlloc(IMG_VOID);
++static IMG_VOID LinuxMemAreaStructFree(LinuxMemArea *psLinuxMemArea);
++#if defined(DEBUG_LINUX_MEM_AREAS)
++static IMG_VOID DebugLinuxMemAreaRecordAdd(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Flags);
++static DEBUG_LINUX_MEM_AREA_REC *DebugLinuxMemAreaRecordFind(LinuxMemArea *psLinuxMemArea);
++static IMG_VOID DebugLinuxMemAreaRecordRemove(LinuxMemArea *psLinuxMemArea);
++#endif
++
++PVRSRV_ERROR
++LinuxMMInit(IMG_VOID)
++{
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ LinuxInitMutex(&g_sDebugMutex);
++#endif
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ {
++ IMG_INT iStatus;
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_SeqFileMemArea = CreateProcReadEntrySeq(
++ "mem_areas",
++ NULL,
++ ProcSeqNextMemArea,
++ ProcSeqShowMemArea,
++ ProcSeqOff2ElementMemArea,
++ ProcSeqStartstopDebugMutex
++ );
++ iStatus = !g_SeqFileMemArea ? -1 : 0;
++#else
++ iStatus = CreateProcReadEntry("mem_areas", printLinuxMemAreaRecords);
++#endif
++ if(iStatus!=0)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ }
++#endif
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ {
++ IMG_INT iStatus;
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_SeqFileMemoryRecords =CreateProcReadEntrySeq(
++ "meminfo",
++ NULL,
++ ProcSeqNextMemoryRecords,
++ ProcSeqShowMemoryRecords,
++ ProcSeqOff2ElementMemoryRecords,
++ ProcSeqStartstopDebugMutex
++ );
++
++ iStatus = !g_SeqFileMemoryRecords ? -1 : 0;
++#else
++ iStatus = CreateProcReadEntry("meminfo", printMemoryRecords);
++#endif
++ if(iStatus!=0)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ }
++#endif
++
++ psLinuxMemAreaCache = KMemCacheCreateWrapper("img-mm", sizeof(LinuxMemArea), 0, 0);
++ if(!psLinuxMemAreaCache)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate kmem_cache", __FUNCTION__));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ return PVRSRV_OK;
++}
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++IMG_VOID LinuxMMCleanup_MemAreas_ForEachCb(DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ psLinuxMemArea = psCurrentRecord->psLinuxMemArea;
++ PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Cleaning up Linux memory area (%p), type=%s, size=%ld bytes",
++ __FUNCTION__,
++ psCurrentRecord->psLinuxMemArea,
++ LinuxMemAreaTypeToString(psCurrentRecord->psLinuxMemArea->eAreaType),
++ psCurrentRecord->psLinuxMemArea->ui32ByteSize));
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++}
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++IMG_VOID LinuxMMCleanup_MemRecords_ForEachVa(DEBUG_MEM_ALLOC_REC *psCurrentRecord)
++
++{
++
++ PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Cleaning up memory: "
++ "type=%s "
++ "CpuVAddr=%p "
++ "CpuPAddr=0x%08lx, "
++ "allocated @ file=%s,line=%d",
++ __FUNCTION__,
++ DebugMemAllocRecordTypeToString(psCurrentRecord->eAllocType),
++ psCurrentRecord->pvCpuVAddr,
++ psCurrentRecord->ulCpuPAddr,
++ psCurrentRecord->pszFileName,
++ psCurrentRecord->ui32Line));
++ switch(psCurrentRecord->eAllocType)
++ {
++ case DEBUG_MEM_ALLOC_TYPE_KMALLOC:
++ KFreeWrapper(psCurrentRecord->pvCpuVAddr);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_IOREMAP:
++ IOUnmapWrapper(psCurrentRecord->pvCpuVAddr);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_IO:
++
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO, psCurrentRecord->pvKey, __FILE__, __LINE__);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_VMALLOC:
++ VFreeWrapper(psCurrentRecord->pvCpuVAddr);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES:
++
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, psCurrentRecord->pvKey, __FILE__, __LINE__);
++ break;
++ case DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE:
++ KMemCacheFreeWrapper(psCurrentRecord->pvPrivateData, psCurrentRecord->pvCpuVAddr);
++ break;
++ default:
++ PVR_ASSERT(0);
++ }
++}
++#endif
++
++
++IMG_VOID
++LinuxMMCleanup(IMG_VOID)
++{
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ {
++ if(g_LinuxMemAreaCount)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: There are %d LinuxMemArea allocation unfreed (%ld bytes)",
++ __FUNCTION__, g_LinuxMemAreaCount, g_LinuxMemAreaWaterMark));
++ }
++
++ List_DEBUG_LINUX_MEM_AREA_REC_ForEach(g_LinuxMemAreaRecords,
++ LinuxMMCleanup_MemAreas_ForEachCb);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq( g_SeqFileMemArea );
++#else
++ RemoveProcEntry("mem_areas");
++#endif
++ }
++#endif
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ {
++
++
++ List_DEBUG_MEM_ALLOC_REC_ForEach(g_MemoryRecords,
++ LinuxMMCleanup_MemRecords_ForEachVa);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq( g_SeqFileMemoryRecords );
++#else
++ RemoveProcEntry("meminfo");
++#endif
++
++ }
++#endif
++
++ if(psLinuxMemAreaCache)
++ {
++ KMemCacheDestroyWrapper(psLinuxMemAreaCache);
++ psLinuxMemAreaCache=NULL;
++ }
++}
++
++
++IMG_VOID *
++_KMallocWrapper(IMG_UINT32 ui32ByteSize, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++ IMG_VOID *pvRet;
++ pvRet = kmalloc(ui32ByteSize, GFP_KERNEL);
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ if(pvRet)
++ {
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMALLOC,
++ pvRet,
++ pvRet,
++ 0,
++ NULL,
++ ui32ByteSize,
++ pszFileName,
++ ui32Line
++ );
++ }
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++ return pvRet;
++}
++
++
++IMG_VOID
++_KFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMALLOC, pvCpuVAddr, pszFileName, ui32Line);
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++ kfree(pvCpuVAddr);
++}
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++static IMG_VOID
++DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType,
++ IMG_VOID *pvKey,
++ IMG_VOID *pvCpuVAddr,
++ IMG_UINT32 ulCpuPAddr,
++ IMG_VOID *pvPrivateData,
++ IMG_UINT32 ui32Bytes,
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line)
++{
++ DEBUG_MEM_ALLOC_REC *psRecord;
++
++ LinuxLockMutex(&g_sDebugMutex);
++
++ psRecord = kmalloc(sizeof(DEBUG_MEM_ALLOC_REC), GFP_KERNEL);
++
++ psRecord->eAllocType = eAllocType;
++ psRecord->pvKey = pvKey;
++ psRecord->pvCpuVAddr = pvCpuVAddr;
++ psRecord->ulCpuPAddr = ulCpuPAddr;
++ psRecord->pvPrivateData = pvPrivateData;
++ psRecord->pid = current->pid;
++ psRecord->ui32Bytes = ui32Bytes;
++ psRecord->pszFileName = pszFileName;
++ psRecord->ui32Line = ui32Line;
++
++ List_DEBUG_MEM_ALLOC_REC_Insert(&g_MemoryRecords, psRecord);
++
++ g_WaterMarkData[eAllocType] += ui32Bytes;
++ if(g_WaterMarkData[eAllocType] > g_HighWaterMarkData[eAllocType])
++ {
++ g_HighWaterMarkData[eAllocType] = g_WaterMarkData[eAllocType];
++ }
++
++ if(eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
++ {
++ g_SysRAMWaterMark += ui32Bytes;
++ if(g_SysRAMWaterMark > g_SysRAMHighWaterMark)
++ {
++ g_SysRAMHighWaterMark = g_SysRAMWaterMark;
++ }
++ }
++ else if(eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO)
++ {
++ g_IOMemWaterMark += ui32Bytes;
++ if(g_IOMemWaterMark > g_IOMemHighWaterMark)
++ {
++ g_IOMemHighWaterMark = g_IOMemWaterMark;
++ }
++ }
++
++ LinuxUnLockMutex(&g_sDebugMutex);
++}
++
++
++IMG_BOOL DebugMemAllocRecordRemove_AnyVaCb(DEBUG_MEM_ALLOC_REC *psCurrentRecord, va_list va)
++{
++ DEBUG_MEM_ALLOC_TYPE eAllocType;
++ IMG_VOID *pvKey;
++
++ eAllocType = va_arg(va, DEBUG_MEM_ALLOC_TYPE);
++ pvKey = va_arg(va, IMG_VOID*);
++
++ if(psCurrentRecord->eAllocType == eAllocType
++ && psCurrentRecord->pvKey == pvKey)
++ {
++ eAllocType = psCurrentRecord->eAllocType;
++ g_WaterMarkData[eAllocType] -= psCurrentRecord->ui32Bytes;
++
++ if(eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
++ {
++ g_SysRAMWaterMark -= psCurrentRecord->ui32Bytes;
++ }
++ else if(eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP
++ || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO)
++ {
++ g_IOMemWaterMark -= psCurrentRecord->ui32Bytes;
++ }
++
++ List_DEBUG_MEM_ALLOC_REC_Remove(psCurrentRecord);
++ kfree(psCurrentRecord);
++
++ return IMG_TRUE;
++ }
++ else
++ {
++ return IMG_FALSE;
++ }
++}
++
++
++static IMG_VOID
++DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, IMG_VOID *pvKey, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++ LinuxLockMutex(&g_sDebugMutex);
++
++
++ if(!List_DEBUG_MEM_ALLOC_REC_IMG_BOOL_Any_va(g_MemoryRecords,
++ DebugMemAllocRecordRemove_AnyVaCb,
++ eAllocType,
++ pvKey))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: couldn't find an entry for type=%s with pvKey=%p (called from %s, line %d\n",
++ __FUNCTION__, DebugMemAllocRecordTypeToString(eAllocType), pvKey,
++ pszFileName, ui32Line));
++ }
++
++ LinuxUnLockMutex(&g_sDebugMutex);
++}
++
++
++static IMG_CHAR *
++DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE eAllocType)
++{
++ IMG_CHAR *apszDebugMemoryRecordTypes[] = {
++ "KMALLOC",
++ "VMALLOC",
++ "ALLOC_PAGES",
++ "IOREMAP",
++ "IO",
++ "KMEM_CACHE_ALLOC"
++ };
++ return apszDebugMemoryRecordTypes[eAllocType];
++}
++#endif
++
++
++
++IMG_VOID *
++_VMallocWrapper(IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32AllocFlags,
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line)
++{
++ pgprot_t PGProtFlags;
++ IMG_VOID *pvRet;
++
++ switch(ui32AllocFlags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ case PVRSRV_HAP_CACHED:
++ PGProtFlags = PAGE_KERNEL;
++ break;
++ case PVRSRV_HAP_WRITECOMBINE:
++ PGProtFlags = PGPROT_WC(PAGE_KERNEL);
++ break;
++ case PVRSRV_HAP_UNCACHED:
++ PGProtFlags = PGPROT_UC(PAGE_KERNEL);
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "VMAllocWrapper: unknown mapping flags=0x%08lx",
++ ui32AllocFlags));
++ dump_stack();
++ return NULL;
++ }
++
++
++ pvRet = __vmalloc(ui32Bytes, GFP_KERNEL | __GFP_HIGHMEM, PGProtFlags);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ if(pvRet)
++ {
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_VMALLOC,
++ pvRet,
++ pvRet,
++ 0,
++ NULL,
++ PAGE_ALIGN(ui32Bytes),
++ pszFileName,
++ ui32Line
++ );
++ }
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++
++ return pvRet;
++}
++
++
++IMG_VOID
++_VFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_VMALLOC, pvCpuVAddr, pszFileName, ui32Line);
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++ vfree(pvCpuVAddr);
++}
++
++
++LinuxMemArea *
++NewVMallocLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags)
++{
++ LinuxMemArea *psLinuxMemArea;
++ IMG_VOID *pvCpuVAddr;
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if(!psLinuxMemArea)
++ {
++ goto failed;
++ }
++
++ pvCpuVAddr = VMallocWrapper(ui32Bytes, ui32AreaFlags);
++ if(!pvCpuVAddr)
++ {
++ goto failed;
++ }
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++
++ ReservePages(pvCpuVAddr, ui32Bytes);
++#endif
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_VMALLOC;
++ psLinuxMemArea->uData.sVmalloc.pvVmallocAddress = pvCpuVAddr;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++
++failed:
++ PVR_DPF((PVR_DBG_ERROR, "%s: failed!", __FUNCTION__));
++ if(psLinuxMemArea)
++ LinuxMemAreaStructFree(psLinuxMemArea);
++ return NULL;
++}
++
++
++IMG_VOID
++FreeVMallocLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea);
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_VMALLOC);
++ PVR_ASSERT(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++ UnreservePages(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress,
++ psLinuxMemArea->ui32ByteSize);
++#endif
++
++ PVR_DPF((PVR_DBG_MESSAGE,"%s: pvCpuVAddr: %p",
++ __FUNCTION__, psLinuxMemArea->uData.sVmalloc.pvVmallocAddress));
++ VFreeWrapper(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++static IMG_VOID
++ReservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length)
++{
++ IMG_VOID *pvPage;
++ IMG_VOID *pvEnd = pvAddress + ui32Length;
++
++ for(pvPage = pvAddress; pvPage < pvEnd; pvPage += PAGE_SIZE)
++ {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++ SetPageReserved(vmalloc_to_page(pvPage));
++#else
++ mem_map_reserve(vmalloc_to_page(pvPage));
++#endif
++ }
++}
++
++
++static IMG_VOID
++UnreservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length)
++{
++ IMG_VOID *pvPage;
++ IMG_VOID *pvEnd = pvAddress + ui32Length;
++
++ for(pvPage = pvAddress; pvPage < pvEnd; pvPage += PAGE_SIZE)
++ {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++ ClearPageReserved(vmalloc_to_page(pvPage));
++#else
++ mem_map_unreserve(vmalloc_to_page(pvPage));
++#endif
++ }
++}
++#endif
++
++
++IMG_VOID *
++_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line)
++{
++ IMG_VOID *pvIORemapCookie;
++
++ switch(ui32MappingFlags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ case PVRSRV_HAP_CACHED:
++ pvIORemapCookie = (IMG_VOID *)IOREMAP(BasePAddr.uiAddr, ui32Bytes);
++ break;
++ case PVRSRV_HAP_WRITECOMBINE:
++ pvIORemapCookie = (IMG_VOID *)IOREMAP_WC(BasePAddr.uiAddr, ui32Bytes);
++ break;
++ case PVRSRV_HAP_UNCACHED:
++ pvIORemapCookie = (IMG_VOID *)IOREMAP_UC(BasePAddr.uiAddr, ui32Bytes);
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "IORemapWrapper: unknown mapping flags"));
++ return NULL;
++ }
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ if(pvIORemapCookie)
++ {
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IOREMAP,
++ pvIORemapCookie,
++ pvIORemapCookie,
++ BasePAddr.uiAddr,
++ NULL,
++ ui32Bytes,
++ pszFileName,
++ ui32Line
++ );
++ }
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++
++ return pvIORemapCookie;
++}
++
++
++IMG_VOID
++_IOUnmapWrapper(IMG_VOID *pvIORemapCookie, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IOREMAP, pvIORemapCookie, pszFileName, ui32Line);
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++ iounmap(pvIORemapCookie);
++}
++
++
++LinuxMemArea *
++NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32AreaFlags)
++{
++ LinuxMemArea *psLinuxMemArea;
++ IMG_VOID *pvIORemapCookie;
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if(!psLinuxMemArea)
++ {
++ return NULL;
++ }
++
++ pvIORemapCookie = IORemapWrapper(BasePAddr, ui32Bytes, ui32AreaFlags);
++ if(!pvIORemapCookie)
++ {
++ LinuxMemAreaStructFree(psLinuxMemArea);
++ return NULL;
++ }
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IOREMAP;
++ psLinuxMemArea->uData.sIORemap.pvIORemapCookie = pvIORemapCookie;
++ psLinuxMemArea->uData.sIORemap.CPUPhysAddr = BasePAddr;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++}
++
++
++IMG_VOID
++FreeIORemapLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IOREMAP);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++ IOUnmapWrapper(psLinuxMemArea->uData.sIORemap.pvIORemapCookie);
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++static IMG_BOOL
++TreatExternalPagesAsContiguous(IMG_SYS_PHYADDR *psSysPhysAddr, IMG_UINT32 ui32Bytes, IMG_BOOL bPhysContig)
++{
++ IMG_UINT32 ui32;
++ IMG_UINT32 ui32AddrChk;
++ IMG_UINT32 ui32NumPages = RANGE_TO_PAGES(ui32Bytes);
++
++ for (ui32 = 0, ui32AddrChk = psSysPhysAddr[0].uiAddr;
++ ui32 < ui32NumPages;
++ ui32++, ui32AddrChk = (bPhysContig) ? (ui32AddrChk + PAGE_SIZE) : psSysPhysAddr[ui32].uiAddr)
++ {
++ if (!pfn_valid(PHYS_TO_PFN(ui32AddrChk)))
++ {
++ break;
++ }
++ }
++ if (ui32 == ui32NumPages)
++ {
++ return IMG_FALSE;
++ }
++
++ if (!bPhysContig)
++ {
++ for (ui32 = 0, ui32AddrChk = psSysPhysAddr[0].uiAddr;
++ ui32 < ui32NumPages;
++ ui32++, ui32AddrChk += PAGE_SIZE)
++ {
++ if (psSysPhysAddr[ui32].uiAddr != ui32AddrChk)
++ {
++ return IMG_FALSE;
++ }
++ }
++ }
++
++ return IMG_TRUE;
++}
++
++LinuxMemArea *NewExternalKVLinuxMemArea(IMG_SYS_PHYADDR *pBasePAddr, IMG_VOID *pvCPUVAddr, IMG_UINT32 ui32Bytes, IMG_BOOL bPhysContig, IMG_UINT32 ui32AreaFlags)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if(!psLinuxMemArea)
++ {
++ return NULL;
++ }
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_EXTERNAL_KV;
++ psLinuxMemArea->uData.sExternalKV.pvExternalKV = pvCPUVAddr;
++ psLinuxMemArea->uData.sExternalKV.bPhysContig = (IMG_BOOL)(bPhysContig || TreatExternalPagesAsContiguous(pBasePAddr, ui32Bytes, bPhysContig));
++
++ if (psLinuxMemArea->uData.sExternalKV.bPhysContig)
++ {
++ psLinuxMemArea->uData.sExternalKV.uPhysAddr.SysPhysAddr = *pBasePAddr;
++ }
++ else
++ {
++ psLinuxMemArea->uData.sExternalKV.uPhysAddr.pSysPhysAddr = pBasePAddr;
++ }
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++}
++
++
++IMG_VOID
++FreeExternalKVLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_EXTERNAL_KV);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++LinuxMemArea *
++NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32AreaFlags)
++{
++ LinuxMemArea *psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if(!psLinuxMemArea)
++ {
++ return NULL;
++ }
++
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IO;
++ psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr = BasePAddr.uiAddr;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IO,
++ (IMG_VOID *)BasePAddr.uiAddr,
++ 0,
++ BasePAddr.uiAddr,
++ NULL,
++ ui32Bytes,
++ "unknown",
++ 0
++ );
++#endif
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++}
++
++
++IMG_VOID
++FreeIOLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IO);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO,
++ (IMG_VOID *)psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr, __FILE__, __LINE__);
++#endif
++
++
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++LinuxMemArea *
++NewAllocPagesLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags)
++{
++ LinuxMemArea *psLinuxMemArea;
++ IMG_UINT32 ui32PageCount;
++ struct page **pvPageList;
++ IMG_HANDLE hBlockPageList;
++ IMG_INT32 i;
++ PVRSRV_ERROR eError;
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if(!psLinuxMemArea)
++ {
++ goto failed_area_alloc;
++ }
++
++ ui32PageCount = RANGE_TO_PAGES(ui32Bytes);
++ eError = OSAllocMem(0, sizeof(*pvPageList) * ui32PageCount, (IMG_VOID **)&pvPageList, &hBlockPageList,
++ "Array of pages");
++ if(eError != PVRSRV_OK)
++ {
++ goto failed_page_list_alloc;
++ }
++
++ for(i=0; i<(IMG_INT32)ui32PageCount; i++)
++ {
++ pvPageList[i] = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, 0);
++ if(!pvPageList[i])
++ {
++ goto failed_alloc_pages;
++ }
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++ SetPageReserved(pvPageList[i]);
++#else
++ mem_map_reserve(pvPageList[i]);
++#endif
++#endif
++
++ }
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
++ pvPageList,
++ 0,
++ 0,
++ NULL,
++ PAGE_ALIGN(ui32Bytes),
++ "unknown",
++ 0
++ );
++#endif
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_ALLOC_PAGES;
++ psLinuxMemArea->uData.sPageList.pvPageList = pvPageList;
++ psLinuxMemArea->uData.sPageList.hBlockPageList = hBlockPageList;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++ return psLinuxMemArea;
++
++failed_alloc_pages:
++ for(i--; i >= 0; i--)
++ {
++ __free_pages(pvPageList[i], 0);
++ }
++ (IMG_VOID) OSFreeMem(0, sizeof(*pvPageList) * ui32PageCount, pvPageList, hBlockPageList);
++ psLinuxMemArea->uData.sPageList.pvPageList = IMG_NULL;
++failed_page_list_alloc:
++ LinuxMemAreaStructFree(psLinuxMemArea);
++failed_area_alloc:
++ PVR_DPF((PVR_DBG_ERROR, "%s: failed", __FUNCTION__));
++
++ return NULL;
++}
++
++
++IMG_VOID
++FreeAllocPagesLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++ IMG_UINT32 ui32PageCount;
++ struct page **pvPageList;
++ IMG_HANDLE hBlockPageList;
++ IMG_INT32 i;
++
++ PVR_ASSERT(psLinuxMemArea);
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_ALLOC_PAGES);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++ ui32PageCount = RANGE_TO_PAGES(psLinuxMemArea->ui32ByteSize);
++ pvPageList = psLinuxMemArea->uData.sPageList.pvPageList;
++ hBlockPageList = psLinuxMemArea->uData.sPageList.hBlockPageList;
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, pvPageList, __FILE__, __LINE__);
++#endif
++
++ for(i=0;i<(IMG_INT32)ui32PageCount;i++)
++ {
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++ ClearPageReserved(pvPageList[i]);
++#else
++ mem_map_reserve(pvPageList[i]);
++#endif
++#endif
++ __free_pages(pvPageList[i], 0);
++ }
++
++ (IMG_VOID) OSFreeMem(0, sizeof(*pvPageList) * ui32PageCount, pvPageList, hBlockPageList);
++ psLinuxMemArea->uData.sPageList.pvPageList = IMG_NULL;
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++struct page*
++LinuxMemAreaOffsetToPage(LinuxMemArea *psLinuxMemArea,
++ IMG_UINT32 ui32ByteOffset)
++{
++ IMG_UINT32 ui32PageIndex;
++ IMG_CHAR *pui8Addr;
++
++ switch(psLinuxMemArea->eAreaType)
++ {
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
++ return psLinuxMemArea->uData.sPageList.pvPageList[ui32PageIndex];
++
++ case LINUX_MEM_AREA_VMALLOC:
++ pui8Addr = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++ pui8Addr += ui32ByteOffset;
++ return vmalloc_to_page(pui8Addr);
++
++ case LINUX_MEM_AREA_SUB_ALLOC:
++
++ return LinuxMemAreaOffsetToPage(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea,
++ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset
++ + ui32ByteOffset);
++ default:
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Unsupported request for struct page from LinuxMemArea with type=%s",
++ __FUNCTION__, LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType)));
++ return NULL;
++ }
++}
++
++
++LinuxKMemCache *
++KMemCacheCreateWrapper(IMG_CHAR *pszName,
++ size_t Size,
++ size_t Align,
++ IMG_UINT32 ui32Flags)
++{
++#if defined(DEBUG_LINUX_SLAB_ALLOCATIONS)
++ ui32Flags |= SLAB_POISON|SLAB_RED_ZONE;
++#endif
++ return kmem_cache_create(pszName, Size, Align, ui32Flags, NULL
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
++ , NULL
++#endif
++ );
++}
++
++
++IMG_VOID
++KMemCacheDestroyWrapper(LinuxKMemCache *psCache)
++{
++ kmem_cache_destroy(psCache);
++}
++
++
++IMG_VOID *
++_KMemCacheAllocWrapper(LinuxKMemCache *psCache,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14))
++ gfp_t Flags,
++#else
++ IMG_INT Flags,
++#endif
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line)
++{
++ IMG_VOID *pvRet;
++
++ pvRet = kmem_cache_alloc(psCache, Flags);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE,
++ pvRet,
++ pvRet,
++ 0,
++ psCache,
++ kmem_cache_size(psCache),
++ pszFileName,
++ ui32Line
++ );
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++
++ return pvRet;
++}
++
++
++IMG_VOID
++_KMemCacheFreeWrapper(LinuxKMemCache *psCache, IMG_VOID *pvObject, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE, pvObject, pszFileName, ui32Line);
++#else
++ PVR_UNREFERENCED_PARAMETER(pszFileName);
++ PVR_UNREFERENCED_PARAMETER(ui32Line);
++#endif
++
++ kmem_cache_free(psCache, pvObject);
++}
++
++
++const IMG_CHAR *
++KMemCacheNameWrapper(LinuxKMemCache *psCache)
++{
++ PVR_UNREFERENCED_PARAMETER(psCache);
++
++
++ return "";
++}
++
++
++LinuxMemArea *
++NewSubLinuxMemArea(LinuxMemArea *psParentLinuxMemArea,
++ IMG_UINT32 ui32ByteOffset,
++ IMG_UINT32 ui32Bytes)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ PVR_ASSERT((ui32ByteOffset+ui32Bytes) <= psParentLinuxMemArea->ui32ByteSize);
++
++ psLinuxMemArea = LinuxMemAreaStructAlloc();
++ if(!psLinuxMemArea)
++ {
++ return NULL;
++ }
++
++ psLinuxMemArea->eAreaType = LINUX_MEM_AREA_SUB_ALLOC;
++ psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea = psParentLinuxMemArea;
++ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset = ui32ByteOffset;
++ psLinuxMemArea->ui32ByteSize = ui32Bytes;
++ psLinuxMemArea->ui32AreaFlags = psParentLinuxMemArea->ui32AreaFlags;
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++ INIT_LIST_HEAD(&psLinuxMemArea->sMMapOffsetStructList);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ {
++ DEBUG_LINUX_MEM_AREA_REC *psParentRecord;
++ psParentRecord = DebugLinuxMemAreaRecordFind(psParentLinuxMemArea);
++ DebugLinuxMemAreaRecordAdd(psLinuxMemArea, psParentRecord->ui32Flags);
++ }
++#endif
++
++ return psLinuxMemArea;
++}
++
++
++IMG_VOID
++FreeSubLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++ DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++
++
++ LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++static LinuxMemArea *
++LinuxMemAreaStructAlloc(IMG_VOID)
++{
++#if 0
++ LinuxMemArea *psLinuxMemArea;
++ psLinuxMemArea = kmem_cache_alloc(psLinuxMemAreaCache, GFP_KERNEL);
++ printk(KERN_ERR "%s: psLinuxMemArea=%p\n", __FUNCTION__, psLinuxMemArea);
++ dump_stack();
++ return psLinuxMemArea;
++#else
++ return KMemCacheAllocWrapper(psLinuxMemAreaCache, GFP_KERNEL);
++#endif
++}
++
++
++static IMG_VOID
++LinuxMemAreaStructFree(LinuxMemArea *psLinuxMemArea)
++{
++ KMemCacheFreeWrapper(psLinuxMemAreaCache, psLinuxMemArea);
++
++
++}
++
++
++IMG_VOID
++LinuxMemAreaDeepFree(LinuxMemArea *psLinuxMemArea)
++{
++ switch(psLinuxMemArea->eAreaType)
++ {
++ case LINUX_MEM_AREA_VMALLOC:
++ FreeVMallocLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ FreeAllocPagesLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_IOREMAP:
++ FreeIORemapLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ FreeExternalKVLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_IO:
++ FreeIOLinuxMemArea(psLinuxMemArea);
++ break;
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ FreeSubLinuxMemArea(psLinuxMemArea);
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: Unknown are type (%d)\n",
++ __FUNCTION__, psLinuxMemArea->eAreaType));
++ break;
++ }
++}
++
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++static IMG_VOID
++DebugLinuxMemAreaRecordAdd(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Flags)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psNewRecord;
++ const IMG_CHAR *pi8FlagsString;
++
++ LinuxLockMutex(&g_sDebugMutex);
++
++ if(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ g_LinuxMemAreaWaterMark += psLinuxMemArea->ui32ByteSize;
++ if(g_LinuxMemAreaWaterMark > g_LinuxMemAreaHighWaterMark)
++ {
++ g_LinuxMemAreaHighWaterMark = g_LinuxMemAreaWaterMark;
++ }
++ }
++ g_LinuxMemAreaCount++;
++
++
++ psNewRecord = kmalloc(sizeof(DEBUG_LINUX_MEM_AREA_REC), GFP_KERNEL);
++ if(psNewRecord)
++ {
++
++ psNewRecord->psLinuxMemArea = psLinuxMemArea;
++ psNewRecord->ui32Flags = ui32Flags;
++ psNewRecord->pid = current->pid;
++
++ List_DEBUG_LINUX_MEM_AREA_REC_Insert(&g_LinuxMemAreaRecords, psNewRecord);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: failed to allocate linux memory area record.",
++ __FUNCTION__));
++ }
++
++
++ pi8FlagsString = HAPFlagsToString(ui32Flags);
++ if(strstr(pi8FlagsString, "UNKNOWN"))
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Unexpected flags (0x%08lx) associated with psLinuxMemArea @ 0x%08lx",
++ __FUNCTION__,
++ ui32Flags,
++ psLinuxMemArea));
++
++ }
++
++ LinuxUnLockMutex(&g_sDebugMutex);
++}
++
++
++
++IMG_VOID* MatchLinuxMemArea_AnyVaCb(DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord,
++ va_list va)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ psLinuxMemArea = va_arg(va, LinuxMemArea*);
++ if(psCurrentRecord->psLinuxMemArea == psLinuxMemArea)
++ {
++ return psCurrentRecord;
++ }
++ else
++ {
++ return IMG_NULL;
++ }
++}
++
++
++static DEBUG_LINUX_MEM_AREA_REC *
++DebugLinuxMemAreaRecordFind(LinuxMemArea *psLinuxMemArea)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord;
++
++ LinuxLockMutex(&g_sDebugMutex);
++ psCurrentRecord = List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
++ MatchLinuxMemArea_AnyVaCb,
++ psLinuxMemArea);
++
++ LinuxUnLockMutex(&g_sDebugMutex);
++
++ return psCurrentRecord;
++}
++
++
++static IMG_VOID
++DebugLinuxMemAreaRecordRemove(LinuxMemArea *psLinuxMemArea)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord;
++
++ LinuxLockMutex(&g_sDebugMutex);
++
++ if(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ g_LinuxMemAreaWaterMark -= psLinuxMemArea->ui32ByteSize;
++ }
++ g_LinuxMemAreaCount--;
++
++
++ psCurrentRecord = List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
++ MatchLinuxMemArea_AnyVaCb,
++ psLinuxMemArea);
++ if(psCurrentRecord)
++ {
++
++ List_DEBUG_LINUX_MEM_AREA_REC_Remove(psCurrentRecord);
++ kfree(psCurrentRecord);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: couldn't find an entry for psLinuxMemArea=%p\n",
++ __FUNCTION__, psLinuxMemArea));
++ }
++
++ LinuxUnLockMutex(&g_sDebugMutex);
++}
++#endif
++
++
++IMG_VOID *
++LinuxMemAreaToCpuVAddr(LinuxMemArea *psLinuxMemArea)
++{
++ switch(psLinuxMemArea->eAreaType)
++ {
++ case LINUX_MEM_AREA_VMALLOC:
++ return psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++ case LINUX_MEM_AREA_IOREMAP:
++ return psLinuxMemArea->uData.sIORemap.pvIORemapCookie;
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ return psLinuxMemArea->uData.sExternalKV.pvExternalKV;
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ {
++ IMG_CHAR *pAddr =
++ LinuxMemAreaToCpuVAddr(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea);
++ if(!pAddr)
++ {
++ return NULL;
++ }
++ return pAddr + psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset;
++ }
++ default:
++ return NULL;
++ }
++}
++
++
++IMG_CPU_PHYADDR
++LinuxMemAreaToCpuPAddr(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset)
++{
++ IMG_CPU_PHYADDR CpuPAddr;
++
++ CpuPAddr.uiAddr = 0;
++
++ switch(psLinuxMemArea->eAreaType)
++ {
++ case LINUX_MEM_AREA_IOREMAP:
++ {
++ CpuPAddr = psLinuxMemArea->uData.sIORemap.CPUPhysAddr;
++ CpuPAddr.uiAddr += ui32ByteOffset;
++ break;
++ }
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ {
++ if (psLinuxMemArea->uData.sExternalKV.bPhysContig)
++ {
++ CpuPAddr = SysSysPAddrToCpuPAddr(psLinuxMemArea->uData.sExternalKV.uPhysAddr.SysPhysAddr);
++ CpuPAddr.uiAddr += ui32ByteOffset;
++ }
++ else
++ {
++ IMG_UINT32 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
++ IMG_SYS_PHYADDR SysPAddr = psLinuxMemArea->uData.sExternalKV.uPhysAddr.pSysPhysAddr[ui32PageIndex];
++
++ CpuPAddr = SysSysPAddrToCpuPAddr(SysPAddr);
++ CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(ui32ByteOffset);
++ }
++ break;
++ }
++ case LINUX_MEM_AREA_IO:
++ {
++ CpuPAddr = psLinuxMemArea->uData.sIO.CPUPhysAddr;
++ CpuPAddr.uiAddr += ui32ByteOffset;
++ break;
++ }
++ case LINUX_MEM_AREA_VMALLOC:
++ {
++ IMG_CHAR *pCpuVAddr;
++ pCpuVAddr =
++ (IMG_CHAR *)psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++ pCpuVAddr += ui32ByteOffset;
++ CpuPAddr.uiAddr = VMallocToPhys(pCpuVAddr);
++ break;
++ }
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ {
++ struct page *page;
++ IMG_UINT32 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
++ page = psLinuxMemArea->uData.sPageList.pvPageList[ui32PageIndex];
++ CpuPAddr.uiAddr = page_to_phys(page);
++ CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(ui32ByteOffset);
++ break;
++ }
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ {
++ CpuPAddr =
++ OSMemHandleToCpuPAddr(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea,
++ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset
++ + ui32ByteOffset);
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: Unknown LinuxMemArea type (%d)\n",
++ __FUNCTION__, psLinuxMemArea->eAreaType));
++ break;
++ }
++
++ PVR_ASSERT(CpuPAddr.uiAddr);
++ return CpuPAddr;
++}
++
++
++IMG_BOOL
++LinuxMemAreaPhysIsContig(LinuxMemArea *psLinuxMemArea)
++{
++ switch(psLinuxMemArea->eAreaType)
++ {
++ case LINUX_MEM_AREA_IOREMAP:
++ case LINUX_MEM_AREA_IO:
++ return IMG_TRUE;
++
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ return psLinuxMemArea->uData.sExternalKV.bPhysContig;
++
++ case LINUX_MEM_AREA_VMALLOC:
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ return IMG_FALSE;
++
++ case LINUX_MEM_AREA_SUB_ALLOC:
++
++ return LinuxMemAreaPhysIsContig(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea);
++
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: Unknown LinuxMemArea type (%d)\n",
++ __FUNCTION__, psLinuxMemArea->eAreaType));
++ break;
++ }
++ return IMG_FALSE;
++}
++
++
++const IMG_CHAR *
++LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType)
++{
++
++ switch(eMemAreaType)
++ {
++ case LINUX_MEM_AREA_IOREMAP:
++ return "LINUX_MEM_AREA_IOREMAP";
++ case LINUX_MEM_AREA_EXTERNAL_KV:
++ return "LINUX_MEM_AREA_EXTERNAL_KV";
++ case LINUX_MEM_AREA_IO:
++ return "LINUX_MEM_AREA_IO";
++ case LINUX_MEM_AREA_VMALLOC:
++ return "LINUX_MEM_AREA_VMALLOC";
++ case LINUX_MEM_AREA_SUB_ALLOC:
++ return "LINUX_MEM_AREA_SUB_ALLOC";
++ case LINUX_MEM_AREA_ALLOC_PAGES:
++ return "LINUX_MEM_AREA_ALLOC_PAGES";
++ default:
++ PVR_ASSERT(0);
++ }
++
++ return "";
++}
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++static void ProcSeqStartstopDebugMutex(struct seq_file *sfile, IMG_BOOL start)
++{
++ if(start)
++ {
++ LinuxLockMutex(&g_sDebugMutex);
++ }
++ else
++ {
++ LinuxUnLockMutex(&g_sDebugMutex);
++ }
++}
++#endif
++#endif
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++
++IMG_VOID* DecOffMemAreaRec_AnyVaCb(DEBUG_LINUX_MEM_AREA_REC *psNode, va_list va)
++{
++ off_t *pOff = va_arg(va, off_t*);
++ if (--(*pOff))
++ {
++ return IMG_NULL;
++ }
++ else
++ {
++ return psNode;
++ }
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void* ProcSeqNextMemArea(struct seq_file *sfile,void* el,loff_t off)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psRecord;
++ psRecord = (DEBUG_LINUX_MEM_AREA_REC*)
++ List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
++ DecOffMemAreaRec_AnyVaCb,
++ &off);
++ return (void*)psRecord;
++}
++
++static void* ProcSeqOff2ElementMemArea(struct seq_file * sfile, loff_t off)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psRecord;
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ psRecord = (DEBUG_LINUX_MEM_AREA_REC*)
++ List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
++ DecOffMemAreaRec_AnyVaCb,
++ &off);
++ return (void*)psRecord;
++}
++
++
++static void ProcSeqShowMemArea(struct seq_file *sfile,void* el)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psRecord = (DEBUG_LINUX_MEM_AREA_REC*)el;
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ seq_printf( sfile,
++ "Number of Linux Memory Areas: %lu\n"
++ "At the current water mark these areas correspond to %lu bytes (excluding SUB areas)\n"
++ "At the highest water mark these areas corresponded to %lu bytes (excluding SUB areas)\n"
++ "\nDetails for all Linux Memory Areas:\n"
++ "%s %-24s %s %s %-8s %-5s %s\n",
++ g_LinuxMemAreaCount,
++ g_LinuxMemAreaWaterMark,
++ g_LinuxMemAreaHighWaterMark,
++ "psLinuxMemArea",
++ "LinuxMemType",
++ "CpuVAddr",
++ "CpuPAddr",
++ "Bytes",
++ "Pid",
++ "Flags"
++ );
++#else
++ seq_printf( sfile,
++ "<mem_areas_header>\n"
++ "\t<count>%lu</count>\n"
++ "\t<watermark key=\"mar0\" description=\"current\" bytes=\"%lu\"/>\n"
++ "\t<watermark key=\"mar1\" description=\"high\" bytes=\"%lu\"/>\n"
++ "</mem_areas_header>\n",
++ g_LinuxMemAreaCount,
++ g_LinuxMemAreaWaterMark,
++ g_LinuxMemAreaHighWaterMark
++ );
++#endif
++ return;
++ }
++
++ seq_printf( sfile,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%8p %-24s %8p %08lx %-8ld %-5u %08lx=(%s)\n",
++#else
++ "<linux_mem_area>\n"
++ "\t<pointer>%8p</pointer>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%u</pid>\n"
++ "\t<flags>%08lx</flags>\n"
++ "\t<flags_string>%s</flags_string>\n"
++ "</linux_mem_area>\n",
++#endif
++ psRecord->psLinuxMemArea,
++ LinuxMemAreaTypeToString(psRecord->psLinuxMemArea->eAreaType),
++ LinuxMemAreaToCpuVAddr(psRecord->psLinuxMemArea),
++ LinuxMemAreaToCpuPAddr(psRecord->psLinuxMemArea,0).uiAddr,
++ psRecord->psLinuxMemArea->ui32ByteSize,
++ psRecord->pid,
++ psRecord->ui32Flags,
++ HAPFlagsToString(psRecord->ui32Flags)
++ );
++
++}
++
++#else
++
++static off_t
++printLinuxMemAreaRecords(IMG_CHAR * buffer, size_t count, off_t off)
++{
++ DEBUG_LINUX_MEM_AREA_REC *psRecord;
++ off_t Ret;
++
++ LinuxLockMutex(&g_sDebugMutex);
++
++ if(!off)
++ {
++ if(count < 500)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ Ret = printAppend(buffer, count, 0,
++ "Number of Linux Memory Areas: %lu\n"
++ "At the current water mark these areas correspond to %lu bytes (excluding SUB areas)\n"
++ "At the highest water mark these areas corresponded to %lu bytes (excluding SUB areas)\n"
++ "\nDetails for all Linux Memory Areas:\n"
++ "%s %-24s %s %s %-8s %-5s %s\n",
++ g_LinuxMemAreaCount,
++ g_LinuxMemAreaWaterMark,
++ g_LinuxMemAreaHighWaterMark,
++ "psLinuxMemArea",
++ "LinuxMemType",
++ "CpuVAddr",
++ "CpuPAddr",
++ "Bytes",
++ "Pid",
++ "Flags"
++ );
++#else
++ Ret = printAppend(buffer, count, 0,
++ "<mem_areas_header>\n"
++ "\t<count>%lu</count>\n"
++ "\t<watermark key=\"mar0\" description=\"current\" bytes=\"%lu\"/>\n"
++ "\t<watermark key=\"mar1\" description=\"high\" bytes=\"%lu\"/>\n"
++ "</mem_areas_header>\n",
++ g_LinuxMemAreaCount,
++ g_LinuxMemAreaWaterMark,
++ g_LinuxMemAreaHighWaterMark
++ );
++#endif
++ goto unlock_and_return;
++ }
++
++ psRecord = (DEBUG_LINUX_MEM_AREA_REC*)
++ List_DEBUG_LINUX_MEM_AREA_REC_Any_va(g_LinuxMemAreaRecords,
++ DecOffMemAreaRec_AnyVaCb,
++ &off);
++
++ if(!psRecord)
++ {
++ Ret = END_OF_FILE;
++ goto unlock_and_return;
++ }
++
++ if(count < 500)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ Ret = printAppend(buffer, count, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%8p %-24s %8p %08lx %-8ld %-5u %08lx=(%s)\n",
++#else
++ "<linux_mem_area>\n"
++ "\t<pointer>%8p</pointer>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%u</pid>\n"
++ "\t<flags>%08lx</flags>\n"
++ "\t<flags_string>%s</flags_string>\n"
++ "</linux_mem_area>\n",
++#endif
++ psRecord->psLinuxMemArea,
++ LinuxMemAreaTypeToString(psRecord->psLinuxMemArea->eAreaType),
++ LinuxMemAreaToCpuVAddr(psRecord->psLinuxMemArea),
++ LinuxMemAreaToCpuPAddr(psRecord->psLinuxMemArea,0).uiAddr,
++ psRecord->psLinuxMemArea->ui32ByteSize,
++ psRecord->pid,
++ psRecord->ui32Flags,
++ HAPFlagsToString(psRecord->ui32Flags)
++ );
++
++unlock_and_return:
++ LinuxUnLockMutex(&g_sDebugMutex);
++ return Ret;
++}
++#endif
++
++#endif
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++
++IMG_VOID* DecOffMemAllocRec_AnyVaCb(DEBUG_MEM_ALLOC_REC *psNode, va_list va)
++{
++ off_t *pOff = va_arg(va, off_t*);
++ if (--(*pOff))
++ {
++ return IMG_NULL;
++ }
++ else
++ {
++ return psNode;
++ }
++}
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void* ProcSeqNextMemoryRecords(struct seq_file *sfile,void* el,loff_t off)
++{
++ DEBUG_MEM_ALLOC_REC *psRecord;
++ psRecord = (DEBUG_MEM_ALLOC_REC*)
++ List_DEBUG_MEM_ALLOC_REC_Any_va(g_MemoryRecords,
++ DecOffMemAllocRec_AnyVaCb,
++ &off);
++#if defined(DEBUG_LINUX_XML_PROC_FILES)
++ if(!psRecord)
++ {
++ seq_printf( sfile, "</meminfo>\n");
++ }
++#endif
++
++ return (void*)psRecord;
++}
++
++static void* ProcSeqOff2ElementMemoryRecords(struct seq_file *sfile, loff_t off)
++{
++ DEBUG_MEM_ALLOC_REC *psRecord;
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ psRecord = (DEBUG_MEM_ALLOC_REC*)
++ List_DEBUG_MEM_ALLOC_REC_Any_va(g_MemoryRecords,
++ DecOffMemAllocRec_AnyVaCb,
++ &off);
++
++#if defined(DEBUG_LINUX_XML_PROC_FILES)
++ if(!psRecord)
++ {
++ seq_printf( sfile, "</meminfo>\n");
++ }
++#endif
++
++ return (void*)psRecord;
++}
++
++static void ProcSeqShowMemoryRecords(struct seq_file *sfile,void* el)
++{
++ DEBUG_MEM_ALLOC_REC *psRecord = (DEBUG_MEM_ALLOC_REC*)el;
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via kmalloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via kmalloc",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via vmalloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via vmalloc",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via alloc_pages",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via alloc_pages",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via ioremap",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via ioremap",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes reserved for \"IO\" memory areas",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated for \"IO\" memory areas",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via kmem_cache_alloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via kmem_cache_alloc",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ seq_printf( sfile, "\n");
++
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "The Current Water Mark for memory allocated from system RAM",
++ g_SysRAMWaterMark);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "The Highest Water Mark for memory allocated from system RAM",
++ g_SysRAMHighWaterMark);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "The Current Water Mark for memory allocated from IO memory",
++ g_IOMemWaterMark);
++ seq_printf( sfile, "%-60s: %ld bytes\n",
++ "The Highest Water Mark for memory allocated from IO memory",
++ g_IOMemHighWaterMark);
++
++ seq_printf( sfile, "\n");
++
++ seq_printf( sfile, "Details for all known allocations:\n"
++ "%-16s %-8s %-8s %-10s %-5s %-10s %s\n",
++ "Type",
++ "CpuVAddr",
++ "CpuPAddr",
++ "Bytes",
++ "PID",
++ "PrivateData",
++ "Filename:Line");
++
++#else
++
++
++ seq_printf( sfile, "<meminfo>\n<meminfo_header>\n");
++ seq_printf( sfile,
++ "<watermark key=\"mr0\" description=\"kmalloc_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ seq_printf( sfile,
++ "<watermark key=\"mr1\" description=\"kmalloc_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ seq_printf( sfile,
++ "<watermark key=\"mr2\" description=\"vmalloc_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ seq_printf( sfile,
++ "<watermark key=\"mr3\" description=\"vmalloc_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ seq_printf( sfile,
++ "<watermark key=\"mr4\" description=\"alloc_pages_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ seq_printf( sfile,
++ "<watermark key=\"mr5\" description=\"alloc_pages_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ seq_printf( sfile,
++ "<watermark key=\"mr6\" description=\"ioremap_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ seq_printf( sfile,
++ "<watermark key=\"mr7\" description=\"ioremap_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ seq_printf( sfile,
++ "<watermark key=\"mr8\" description=\"io_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ seq_printf( sfile,
++ "<watermark key=\"mr9\" description=\"io_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ seq_printf( sfile,
++ "<watermark key=\"mr10\" description=\"kmem_cache_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ seq_printf( sfile,
++ "<watermark key=\"mr11\" description=\"kmem_cache_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ seq_printf( sfile,"\n" );
++
++ seq_printf( sfile,
++ "<watermark key=\"mr14\" description=\"system_ram_current\" bytes=\"%ld\"/>\n",
++ g_SysRAMWaterMark);
++ seq_printf( sfile,
++ "<watermark key=\"mr15\" description=\"system_ram_high\" bytes=\"%ld\"/>\n",
++ g_SysRAMHighWaterMark);
++ seq_printf( sfile,
++ "<watermark key=\"mr16\" description=\"system_io_current\" bytes=\"%ld\"/>\n",
++ g_IOMemWaterMark);
++ seq_printf( sfile,
++ "<watermark key=\"mr17\" description=\"system_io_high\" bytes=\"%ld\"/>\n",
++ g_IOMemHighWaterMark);
++
++ seq_printf( sfile, "</meminfo_header>\n");
++
++#endif
++ return;
++ }
++
++ if(psRecord->eAllocType != DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
++ {
++ seq_printf( sfile,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n",
++#else
++ "<allocation>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%d</pid>\n"
++ "\t<private>%s</private>\n"
++ "\t<filename>%s</filename>\n"
++ "\t<line>%ld</line>\n"
++ "</allocation>\n",
++#endif
++ DebugMemAllocRecordTypeToString(psRecord->eAllocType),
++ psRecord->pvCpuVAddr,
++ psRecord->ulCpuPAddr,
++ psRecord->ui32Bytes,
++ psRecord->pid,
++ "NULL",
++ psRecord->pszFileName,
++ psRecord->ui32Line);
++ }
++ else
++ {
++ seq_printf( sfile,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n",
++#else
++ "<allocation>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%d</pid>\n"
++ "\t<private>%s</private>\n"
++ "\t<filename>%s</filename>\n"
++ "\t<line>%ld</line>\n"
++ "</allocation>\n",
++#endif
++ DebugMemAllocRecordTypeToString(psRecord->eAllocType),
++ psRecord->pvCpuVAddr,
++ psRecord->ulCpuPAddr,
++ psRecord->ui32Bytes,
++ psRecord->pid,
++ KMemCacheNameWrapper(psRecord->pvPrivateData),
++ psRecord->pszFileName,
++ psRecord->ui32Line);
++ }
++}
++
++
++
++#else
++
++static off_t
++printMemoryRecords(IMG_CHAR * buffer, size_t count, off_t off)
++{
++ DEBUG_MEM_ALLOC_REC *psRecord;
++ off_t Ret;
++
++ LinuxLockMutex(&g_sDebugMutex);
++
++ if(!off)
++ {
++ if(count < 1000)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++
++ Ret = printAppend(buffer, count, 0, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via kmalloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via kmalloc",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via vmalloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via vmalloc",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via alloc_pages",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via alloc_pages",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via ioremap",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via ioremap",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes reserved for \"IO\" memory areas",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated for \"IO\" memory areas",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Current Water Mark of bytes allocated via kmem_cache_alloc",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "Highest Water Mark of bytes allocated via kmem_cache_alloc",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ Ret = printAppend(buffer, count, Ret, "\n");
++
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "The Current Water Mark for memory allocated from system RAM",
++ g_SysRAMWaterMark);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "The Highest Water Mark for memory allocated from system RAM",
++ g_SysRAMHighWaterMark);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "The Current Water Mark for memory allocated from IO memory",
++ g_IOMemWaterMark);
++ Ret = printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++ "The Highest Water Mark for memory allocated from IO memory",
++ g_IOMemHighWaterMark);
++
++ Ret = printAppend(buffer, count, Ret, "\n");
++
++ Ret = printAppend(buffer, count, Ret, "Details for all known allocations:\n"
++ "%-16s %-8s %-8s %-10s %-5s %-10s %s\n",
++ "Type",
++ "CpuVAddr",
++ "CpuPAddr",
++ "Bytes",
++ "PID",
++ "PrivateData",
++ "Filename:Line");
++
++#else
++
++
++ Ret = printAppend(buffer, count, 0, "<meminfo>\n<meminfo_header>\n");
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr0\" description=\"kmalloc_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr1\" description=\"kmalloc_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr2\" description=\"vmalloc_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr3\" description=\"vmalloc_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr4\" description=\"alloc_pages_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr5\" description=\"alloc_pages_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr6\" description=\"ioremap_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr7\" description=\"ioremap_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr8\" description=\"io_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr9\" description=\"io_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr10\" description=\"kmem_cache_current\" bytes=\"%ld\"/>\n",
++ g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr11\" description=\"kmem_cache_high\" bytes=\"%ld\"/>\n",
++ g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++ Ret = printAppend(buffer, count, Ret, "\n");
++
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr14\" description=\"system_ram_current\" bytes=\"%ld\"/>\n",
++ g_SysRAMWaterMark);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr15\" description=\"system_ram_high\" bytes=\"%ld\"/>\n",
++ g_SysRAMHighWaterMark);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr16\" description=\"system_io_current\" bytes=\"%ld\"/>\n",
++ g_IOMemWaterMark);
++ Ret = printAppend(buffer, count, Ret,
++ "<watermark key=\"mr17\" description=\"system_io_high\" bytes=\"%ld\"/>\n",
++ g_IOMemHighWaterMark);
++
++ Ret = printAppend(buffer, count, Ret, "</meminfo_header>\n");
++
++#endif
++
++ goto unlock_and_return;
++ }
++
++ if(count < 1000)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ psRecord = (DEBUG_MEM_ALLOC_REC*)
++ List_DEBUG_MEM_ALLOC_REC_Any_va(g_MemoryRecords,
++ DecOffMemAllocRec_AnyVaCb,
++ &off);
++ if(!psRecord)
++ {
++#if defined(DEBUG_LINUX_XML_PROC_FILES)
++ if(off == 0)
++ {
++ Ret = printAppend(buffer, count, 0, "</meminfo>\n");
++ goto unlock_and_return;
++ }
++#endif
++ Ret = END_OF_FILE;
++ goto unlock_and_return;
++ }
++
++ if(psRecord->eAllocType != DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
++ {
++ Ret = printAppend(buffer, count, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n",
++#else
++ "<allocation>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%d</pid>\n"
++ "\t<private>%s</private>\n"
++ "\t<filename>%s</filename>\n"
++ "\t<line>%ld</line>\n"
++ "</allocation>\n",
++#endif
++ DebugMemAllocRecordTypeToString(psRecord->eAllocType),
++ psRecord->pvCpuVAddr,
++ psRecord->ulCpuPAddr,
++ psRecord->ui32Bytes,
++ psRecord->pid,
++ "NULL",
++ psRecord->pszFileName,
++ psRecord->ui32Line);
++ }
++ else
++ {
++ Ret = printAppend(buffer, count, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n",
++#else
++ "<allocation>\n"
++ "\t<type>%s</type>\n"
++ "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<bytes>%ld</bytes>\n"
++ "\t<pid>%d</pid>\n"
++ "\t<private>%s</private>\n"
++ "\t<filename>%s</filename>\n"
++ "\t<line>%ld</line>\n"
++ "</allocation>\n",
++#endif
++ DebugMemAllocRecordTypeToString(psRecord->eAllocType),
++ psRecord->pvCpuVAddr,
++ psRecord->ulCpuPAddr,
++ psRecord->ui32Bytes,
++ psRecord->pid,
++ KMemCacheNameWrapper(psRecord->pvPrivateData),
++ psRecord->pszFileName,
++ psRecord->ui32Line);
++ }
++
++unlock_and_return:
++ LinuxUnLockMutex(&g_sDebugMutex);
++ return Ret;
++}
++#endif
++#endif
++
++
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MMAP_AREAS)
++const IMG_CHAR *
++HAPFlagsToString(IMG_UINT32 ui32Flags)
++{
++ static IMG_CHAR szFlags[50];
++ IMG_INT32 i32Pos = 0;
++ IMG_UINT32 ui32CacheTypeIndex, ui32MapTypeIndex;
++ IMG_CHAR *apszCacheTypes[] = {
++ "UNCACHED",
++ "CACHED",
++ "WRITECOMBINE",
++ "UNKNOWN"
++ };
++ IMG_CHAR *apszMapType[] = {
++ "KERNEL_ONLY",
++ "SINGLE_PROCESS",
++ "MULTI_PROCESS",
++ "FROM_EXISTING_PROCESS",
++ "NO_CPU_VIRTUAL",
++ "UNKNOWN"
++ };
++
++
++ if(ui32Flags & PVRSRV_HAP_UNCACHED){
++ ui32CacheTypeIndex=0;
++ }else if(ui32Flags & PVRSRV_HAP_CACHED){
++ ui32CacheTypeIndex=1;
++ }else if(ui32Flags & PVRSRV_HAP_WRITECOMBINE){
++ ui32CacheTypeIndex=2;
++ }else{
++ ui32CacheTypeIndex=3;
++ PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type (%u)",
++ __FUNCTION__, (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)));
++ }
++
++
++ if(ui32Flags & PVRSRV_HAP_KERNEL_ONLY){
++ ui32MapTypeIndex = 0;
++ }else if(ui32Flags & PVRSRV_HAP_SINGLE_PROCESS){
++ ui32MapTypeIndex = 1;
++ }else if(ui32Flags & PVRSRV_HAP_MULTI_PROCESS){
++ ui32MapTypeIndex = 2;
++ }else if(ui32Flags & PVRSRV_HAP_FROM_EXISTING_PROCESS){
++ ui32MapTypeIndex = 3;
++ }else if(ui32Flags & PVRSRV_HAP_NO_CPU_VIRTUAL){
++ ui32MapTypeIndex = 4;
++ }else{
++ ui32MapTypeIndex = 5;
++ PVR_DPF((PVR_DBG_ERROR, "%s: unknown map type (%u)",
++ __FUNCTION__, (ui32Flags & PVRSRV_HAP_MAPTYPE_MASK)));
++ }
++
++ i32Pos = sprintf(szFlags, "%s|", apszCacheTypes[ui32CacheTypeIndex]);
++ if (i32Pos <= 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: sprintf for cache type %u failed (%d)",
++ __FUNCTION__, ui32CacheTypeIndex, i32Pos));
++ szFlags[0] = 0;
++ }
++ else
++ {
++ sprintf(szFlags + i32Pos, "%s", apszMapType[ui32MapTypeIndex]);
++ }
++
++ return szFlags;
++}
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mm.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mm.h
+new file mode 100644
+index 0000000..7d2da4e
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mm.h
+@@ -0,0 +1,331 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_LINUX_MM_H__
++#define __IMG_LINUX_MM_H__
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/list.h>
++
++#include <asm/io.h>
++
++#define PHYS_TO_PFN(phys) ((phys) >> PAGE_SHIFT)
++#define PFN_TO_PHYS(pfn) ((pfn) << PAGE_SHIFT)
++
++#define RANGE_TO_PAGES(range) (((range) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)
++
++#define ADDR_TO_PAGE_OFFSET(addr) (((unsigned long)(addr)) & (PAGE_SIZE - 1))
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10))
++#define REMAP_PFN_RANGE(vma, addr, pfn, size, prot) remap_pfn_range(vma, addr, pfn, size, prot)
++#else
++#define REMAP_PFN_RANGE(vma, addr, pfn, size, prot) remap_page_range(vma, addr, PFN_TO_PHYS(pfn), size, prot)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12))
++#define IO_REMAP_PFN_RANGE(vma, addr, pfn, size, prot) io_remap_pfn_range(vma, addr, pfn, size, prot)
++#else
++#define IO_REMAP_PFN_RANGE(vma, addr, pfn, size, prot) io_remap_page_range(vma, addr, PFN_TO_PHYS(pfn), size, prot)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++#define VM_INSERT_PAGE(vma, addr, page) vm_insert_page(vma, addr, page)
++#else
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10))
++#define VM_INSERT_PAGE(vma, addr, page) remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, vma->vm_page_prot);
++#else
++#define VM_INSERT_PAGE(vma, addr, page) remap_page_range(vma, addr, page_to_phys(page), PAGE_SIZE, vma->vm_page_prot);
++#endif
++#endif
++
++static inline IMG_UINT32 VMallocToPhys(IMG_VOID *pCpuVAddr)
++{
++ return (page_to_phys(vmalloc_to_page(pCpuVAddr)) + ADDR_TO_PAGE_OFFSET(pCpuVAddr));
++
++}
++
++typedef enum {
++ LINUX_MEM_AREA_IOREMAP,
++ LINUX_MEM_AREA_EXTERNAL_KV,
++ LINUX_MEM_AREA_IO,
++ LINUX_MEM_AREA_VMALLOC,
++ LINUX_MEM_AREA_ALLOC_PAGES,
++ LINUX_MEM_AREA_SUB_ALLOC,
++ LINUX_MEM_AREA_TYPE_COUNT
++}LINUX_MEM_AREA_TYPE;
++
++typedef struct _LinuxMemArea LinuxMemArea;
++
++
++struct _LinuxMemArea {
++ LINUX_MEM_AREA_TYPE eAreaType;
++ union _uData
++ {
++ struct _sIORemap
++ {
++
++ IMG_CPU_PHYADDR CPUPhysAddr;
++ IMG_VOID *pvIORemapCookie;
++ }sIORemap;
++ struct _sExternalKV
++ {
++
++ IMG_BOOL bPhysContig;
++ union {
++
++ IMG_SYS_PHYADDR SysPhysAddr;
++ IMG_SYS_PHYADDR *pSysPhysAddr;
++ } uPhysAddr;
++ IMG_VOID *pvExternalKV;
++ }sExternalKV;
++ struct _sIO
++ {
++
++ IMG_CPU_PHYADDR CPUPhysAddr;
++ }sIO;
++ struct _sVmalloc
++ {
++
++ IMG_VOID *pvVmallocAddress;
++ }sVmalloc;
++ struct _sPageList
++ {
++
++ struct page **pvPageList;
++ IMG_HANDLE hBlockPageList;
++ }sPageList;
++ struct _sSubAlloc
++ {
++
++ LinuxMemArea *psParentLinuxMemArea;
++ IMG_UINT32 ui32ByteOffset;
++ }sSubAlloc;
++ }uData;
++
++ IMG_UINT32 ui32ByteSize;
++
++ IMG_UINT32 ui32AreaFlags;
++
++ IMG_BOOL bMMapRegistered;
++
++
++ struct list_head sMMapItem;
++
++
++ struct list_head sMMapOffsetStructList;
++};
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17))
++typedef kmem_cache_t LinuxKMemCache;
++#else
++typedef struct kmem_cache LinuxKMemCache;
++#endif
++
++
++PVRSRV_ERROR LinuxMMInit(IMG_VOID);
++
++
++IMG_VOID LinuxMMCleanup(IMG_VOID);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMallocWrapper(ui32ByteSize) _KMallocWrapper(ui32ByteSize, __FILE__, __LINE__)
++#else
++#define KMallocWrapper(ui32ByteSize) _KMallocWrapper(ui32ByteSize, NULL, 0)
++#endif
++IMG_VOID *_KMallocWrapper(IMG_UINT32 ui32ByteSize, IMG_CHAR *szFileName, IMG_UINT32 ui32Line);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KFreeWrapper(pvCpuVAddr) _KFreeWrapper(pvCpuVAddr, __FILE__, __LINE__)
++#else
++#define KFreeWrapper(pvCpuVAddr) _KFreeWrapper(pvCpuVAddr, NULL, 0)
++#endif
++IMG_VOID _KFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define VMallocWrapper(ui32Bytes, ui32AllocFlags) _VMallocWrapper(ui32Bytes, ui32AllocFlags, __FILE__, __LINE__)
++#else
++#define VMallocWrapper(ui32Bytes, ui32AllocFlags) _VMallocWrapper(ui32Bytes, ui32AllocFlags, NULL, 0)
++#endif
++IMG_VOID *_VMallocWrapper(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AllocFlags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define VFreeWrapper(pvCpuVAddr) _VFreeWrapper(pvCpuVAddr, __FILE__, __LINE__)
++#else
++#define VFreeWrapper(pvCpuVAddr) _VFreeWrapper(pvCpuVAddr, NULL, 0)
++#endif
++IMG_VOID _VFreeWrapper(IMG_VOID *pvCpuVAddr, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++LinuxMemArea *NewVMallocLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeVMallocLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \
++ _IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, __FILE__, __LINE__)
++#else
++#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \
++ _IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, NULL, 0)
++#endif
++IMG_VOID *_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32Line);
++
++
++LinuxMemArea *NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeIORemapLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++LinuxMemArea *NewExternalKVLinuxMemArea(IMG_SYS_PHYADDR *pBasePAddr, IMG_VOID *pvCPUVAddr, IMG_UINT32 ui32Bytes, IMG_BOOL bPhysContig, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeExternalKVLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define IOUnmapWrapper(pvIORemapCookie) \
++ _IOUnmapWrapper(pvIORemapCookie, __FILE__, __LINE__)
++#else
++#define IOUnmapWrapper(pvIORemapCookie) \
++ _IOUnmapWrapper(pvIORemapCookie, NULL, 0)
++#endif
++IMG_VOID _IOUnmapWrapper(IMG_VOID *pvIORemapCookie, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++struct page *LinuxMemAreaOffsetToPage(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset);
++
++
++LinuxKMemCache *KMemCacheCreateWrapper(IMG_CHAR *pszName, size_t Size, size_t Align, IMG_UINT32 ui32Flags);
++
++
++IMG_VOID KMemCacheDestroyWrapper(LinuxKMemCache *psCache);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMemCacheAllocWrapper(psCache, Flags) _KMemCacheAllocWrapper(psCache, Flags, __FILE__, __LINE__)
++#else
++#define KMemCacheAllocWrapper(psCache, Flags) _KMemCacheAllocWrapper(psCache, Flags, NULL, 0)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14))
++IMG_VOID *_KMemCacheAllocWrapper(LinuxKMemCache *psCache, gfp_t Flags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++#else
++IMG_VOID *_KMemCacheAllocWrapper(LinuxKMemCache *psCache, int Flags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMemCacheFreeWrapper(psCache, pvObject) _KMemCacheFreeWrapper(psCache, pvObject, __FILE__, __LINE__)
++#else
++#define KMemCacheFreeWrapper(psCache, pvObject) _KMemCacheFreeWrapper(psCache, pvObject, NULL, 0)
++#endif
++IMG_VOID _KMemCacheFreeWrapper(LinuxKMemCache *psCache, IMG_VOID *pvObject, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++const IMG_CHAR *KMemCacheNameWrapper(LinuxKMemCache *psCache);
++
++
++LinuxMemArea *NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeIOLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++LinuxMemArea *NewAllocPagesLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeAllocPagesLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++LinuxMemArea *NewSubLinuxMemArea(LinuxMemArea *psParentLinuxMemArea,
++ IMG_UINT32 ui32ByteOffset,
++ IMG_UINT32 ui32Bytes);
++
++
++IMG_VOID LinuxMemAreaDeepFree(LinuxMemArea *psLinuxMemArea);
++
++
++#if defined(LINUX_MEM_AREAS_DEBUG)
++IMG_VOID LinuxMemAreaRegister(LinuxMemArea *psLinuxMemArea);
++#else
++#define LinuxMemAreaRegister(X)
++#endif
++
++
++IMG_VOID *LinuxMemAreaToCpuVAddr(LinuxMemArea *psLinuxMemArea);
++
++
++IMG_CPU_PHYADDR LinuxMemAreaToCpuPAddr(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset);
++
++
++#define LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32ByteOffset) PHYS_TO_PFN(LinuxMemAreaToCpuPAddr(psLinuxMemArea, ui32ByteOffset).uiAddr)
++
++IMG_BOOL LinuxMemAreaPhysIsContig(LinuxMemArea *psLinuxMemArea);
++
++static inline LinuxMemArea *
++LinuxMemAreaRoot(LinuxMemArea *psLinuxMemArea)
++{
++ if(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ return psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea;
++ }
++ else
++ {
++ return psLinuxMemArea;
++ }
++}
++
++
++static inline LINUX_MEM_AREA_TYPE
++LinuxMemAreaRootType(LinuxMemArea *psLinuxMemArea)
++{
++ return LinuxMemAreaRoot(psLinuxMemArea)->eAreaType;
++}
++
++
++const IMG_CHAR *LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType);
++
++
++#if defined(DEBUG) || defined(DEBUG_LINUX_MEM_AREAS)
++const IMG_CHAR *HAPFlagsToString(IMG_UINT32 ui32Flags);
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mmap.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mmap.c
+new file mode 100644
+index 0000000..1689bd4
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mmap.c
+@@ -0,0 +1,1148 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++#include <linux/wrapper.h>
++#endif
++#include <linux/slab.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#include <asm/shmparam.h>
++#include <asm/pgtable.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#include <linux/sched.h>
++#include <asm/current.h>
++#endif
++#if defined(SUPPORT_DRI_DRM)
++#include <drm/drmP.h>
++#endif
++
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "pvrmmap.h"
++#include "mutils.h"
++#include "mmap.h"
++#include "mm.h"
++#include "pvr_debug.h"
++#include "osfunc.h"
++#include "proc.h"
++#include "mutex.h"
++#include "handle.h"
++#include "perproc.h"
++#include "env_perproc.h"
++#include "bridged_support.h"
++#if defined(SUPPORT_DRI_DRM)
++#include "pvr_drm.h"
++#endif
++
++#if !defined(PVR_SECURE_HANDLES)
++#error "The mmap code requires PVR_SECURE_HANDLES"
++#endif
++
++static PVRSRV_LINUX_MUTEX g_sMMapMutex;
++
++static LinuxKMemCache *g_psMemmapCache = NULL;
++static LIST_HEAD(g_sMMapAreaList);
++static LIST_HEAD(g_sMMapOffsetStructList);
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++static IMG_UINT32 g_ui32RegisteredAreas = 0;
++static IMG_UINT32 g_ui32TotalByteSize = 0;
++#endif
++
++
++#if defined(PVR_PROC_USE_SEQ_FILE) && defined(DEBUG_LINUX_MMAP_AREAS)
++static struct proc_dir_entry *g_ProcMMap;
++#endif
++
++#define FIRST_PHYSICAL_PFN 0
++#define LAST_PHYSICAL_PFN 0x7fffffffUL
++#define FIRST_SPECIAL_PFN (LAST_PHYSICAL_PFN + 1)
++#define LAST_SPECIAL_PFN 0xffffffffUL
++
++#define MAX_MMAP_HANDLE 0x7fffffffUL
++
++static inline IMG_BOOL
++PFNIsPhysical(IMG_UINT32 pfn)
++{
++
++ return ((pfn >= FIRST_PHYSICAL_PFN) && (pfn <= LAST_PHYSICAL_PFN)) ? IMG_TRUE : IMG_FALSE;
++}
++
++static inline IMG_BOOL
++PFNIsSpecial(IMG_UINT32 pfn)
++{
++
++ return ((pfn >= FIRST_SPECIAL_PFN) && (pfn <= LAST_SPECIAL_PFN)) ? IMG_TRUE : IMG_FALSE;
++}
++
++static inline IMG_HANDLE
++MMapOffsetToHandle(IMG_UINT32 pfn)
++{
++ if (PFNIsPhysical(pfn))
++ {
++ PVR_ASSERT(PFNIsPhysical(pfn));
++ return IMG_NULL;
++ }
++
++ return (IMG_HANDLE)(pfn - FIRST_SPECIAL_PFN);
++}
++
++static inline IMG_UINT32
++HandleToMMapOffset(IMG_HANDLE hHandle)
++{
++ IMG_UINT32 ulHandle = (IMG_UINT32)hHandle;
++
++ if (PFNIsSpecial(ulHandle))
++ {
++ PVR_ASSERT(PFNIsSpecial(ulHandle));
++ return 0;
++ }
++
++ return ulHandle + FIRST_SPECIAL_PFN;
++}
++
++static inline IMG_BOOL
++LinuxMemAreaUsesPhysicalMap(LinuxMemArea *psLinuxMemArea)
++{
++ return LinuxMemAreaPhysIsContig(psLinuxMemArea);
++}
++
++static inline IMG_UINT32
++GetCurrentThreadID(IMG_VOID)
++{
++
++ return (IMG_UINT32)current->pid;
++}
++
++static PKV_OFFSET_STRUCT
++CreateOffsetStruct(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Offset, IMG_UINT32 ui32RealByteSize)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct;
++#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
++ const IMG_CHAR *pszName = LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea));
++#endif
++
++#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s(%s, psLinuxMemArea: 0x%p, ui32AllocFlags: 0x%8lx)",
++ __FUNCTION__, pszName, psLinuxMemArea, psLinuxMemArea->ui32AreaFlags));
++#endif
++
++ PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType != LINUX_MEM_AREA_SUB_ALLOC);
++
++ PVR_ASSERT(psLinuxMemArea->bMMapRegistered);
++
++ psOffsetStruct = KMemCacheAllocWrapper(g_psMemmapCache, GFP_KERNEL);
++ if(psOffsetStruct == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PVRMMapRegisterArea: Couldn't alloc another mapping record from cache"));
++ return IMG_NULL;
++ }
++
++ psOffsetStruct->ui32MMapOffset = ui32Offset;
++
++ psOffsetStruct->psLinuxMemArea = psLinuxMemArea;
++
++ psOffsetStruct->ui32Mapped = 0;
++
++ psOffsetStruct->ui32RealByteSize = ui32RealByteSize;
++
++
++ psOffsetStruct->ui32TID = GetCurrentThreadID();
++
++ psOffsetStruct->ui32PID = OSGetCurrentProcessIDKM();
++
++ psOffsetStruct->bOnMMapList = IMG_FALSE;
++
++ psOffsetStruct->ui32RefCount = 0;
++
++ psOffsetStruct->ui32UserVAddr = 0;
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++
++ psOffsetStruct->pszName = pszName;
++#endif
++
++ list_add_tail(&psOffsetStruct->sAreaItem, &psLinuxMemArea->sMMapOffsetStructList);
++
++ return psOffsetStruct;
++}
++
++
++static IMG_VOID
++DestroyOffsetStruct(PKV_OFFSET_STRUCT psOffsetStruct)
++{
++ list_del(&psOffsetStruct->sAreaItem);
++
++ if (psOffsetStruct->bOnMMapList)
++ {
++ list_del(&psOffsetStruct->sMMapItem);
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Table entry: "
++ "psLinuxMemArea=0x%08lX, CpuPAddr=0x%08lX", __FUNCTION__,
++ psOffsetStruct->psLinuxMemArea,
++ LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea, 0)));
++
++ KMemCacheFreeWrapper(g_psMemmapCache, psOffsetStruct);
++}
++
++
++static inline IMG_VOID
++DetermineUsersSizeAndByteOffset(LinuxMemArea *psLinuxMemArea,
++ IMG_UINT32 *pui32RealByteSize,
++ IMG_UINT32 *pui32ByteOffset)
++{
++ IMG_UINT32 ui32PageAlignmentOffset;
++ IMG_CPU_PHYADDR CpuPAddr;
++
++ CpuPAddr = LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0);
++ ui32PageAlignmentOffset = ADDR_TO_PAGE_OFFSET(CpuPAddr.uiAddr);
++
++ *pui32ByteOffset = ui32PageAlignmentOffset;
++
++ *pui32RealByteSize = PAGE_ALIGN(psLinuxMemArea->ui32ByteSize + ui32PageAlignmentOffset);
++}
++
++
++PVRSRV_ERROR
++PVRMMapOSMemHandleToMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hMHandle,
++ IMG_UINT32 *pui32MMapOffset,
++ IMG_UINT32 *pui32ByteOffset,
++ IMG_UINT32 *pui32RealByteSize,
++ IMG_UINT32 *pui32UserVAddr)
++{
++ LinuxMemArea *psLinuxMemArea;
++ PKV_OFFSET_STRUCT psOffsetStruct;
++ IMG_HANDLE hOSMemHandle;
++ PVRSRV_ERROR eError;
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++ PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <= MAX_MMAP_HANDLE);
++
++ eError = PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle, hMHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle 0x%lx failed", __FUNCTION__, hMHandle));
++
++ goto exit_unlock;
++ }
++
++ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++ DetermineUsersSizeAndByteOffset(psLinuxMemArea,
++ pui32RealByteSize,
++ pui32ByteOffset);
++
++
++ list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
++ {
++ if (psPerProc->ui32PID == psOffsetStruct->ui32PID)
++ {
++
++ PVR_ASSERT(*pui32RealByteSize == psOffsetStruct->ui32RealByteSize);
++
++ *pui32MMapOffset = psOffsetStruct->ui32MMapOffset;
++ *pui32UserVAddr = psOffsetStruct->ui32UserVAddr;
++ psOffsetStruct->ui32RefCount++;
++
++ eError = PVRSRV_OK;
++ goto exit_unlock;
++ }
++ }
++
++
++ *pui32UserVAddr = 0;
++
++ if (LinuxMemAreaUsesPhysicalMap(psLinuxMemArea))
++ {
++ *pui32MMapOffset = LinuxMemAreaToCpuPFN(psLinuxMemArea, 0);
++ PVR_ASSERT(PFNIsPhysical(*pui32MMapOffset));
++ }
++ else
++ {
++ *pui32MMapOffset = HandleToMMapOffset(hMHandle);
++ PVR_ASSERT(PFNIsSpecial(*pui32MMapOffset));
++ }
++
++ psOffsetStruct = CreateOffsetStruct(psLinuxMemArea, *pui32MMapOffset, *pui32RealByteSize);
++ if (psOffsetStruct == IMG_NULL)
++ {
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ goto exit_unlock;
++ }
++
++
++ list_add_tail(&psOffsetStruct->sMMapItem, &g_sMMapOffsetStructList);
++
++ psOffsetStruct->bOnMMapList = IMG_TRUE;
++
++ psOffsetStruct->ui32RefCount++;
++
++ eError = PVRSRV_OK;
++
++exit_unlock:
++ LinuxUnLockMutex(&g_sMMapMutex);
++
++ return eError;
++}
++
++
++PVRSRV_ERROR
++PVRMMapReleaseMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hMHandle,
++ IMG_BOOL *pbMUnmap,
++ IMG_UINT32 *pui32RealByteSize,
++ IMG_UINT32 *pui32UserVAddr)
++{
++ LinuxMemArea *psLinuxMemArea;
++ PKV_OFFSET_STRUCT psOffsetStruct;
++ IMG_HANDLE hOSMemHandle;
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++ PVR_ASSERT(PVRSRVGetMaxHandle(psPerProc->psHandleBase) <= MAX_MMAP_HANDLE);
++
++ eError = PVRSRVLookupOSMemHandle(psPerProc->psHandleBase, &hOSMemHandle, hMHandle);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Lookup of handle 0x%lx failed", __FUNCTION__, hMHandle));
++
++ goto exit_unlock;
++ }
++
++ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++
++ list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
++ {
++ if (psOffsetStruct->ui32PID == ui32PID)
++ {
++ if (psOffsetStruct->ui32RefCount == 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Attempt to release mmap data with zero reference count for offset struct 0x%p, memory area 0x%p", __FUNCTION__, psOffsetStruct, psLinuxMemArea));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto exit_unlock;
++ }
++
++ psOffsetStruct->ui32RefCount--;
++
++ *pbMUnmap = (IMG_BOOL)((psOffsetStruct->ui32RefCount == 0) && (psOffsetStruct->ui32UserVAddr != 0));
++
++ *pui32UserVAddr = (*pbMUnmap) ? psOffsetStruct->ui32UserVAddr : 0;
++ *pui32RealByteSize = (*pbMUnmap) ? psOffsetStruct->ui32RealByteSize : 0;
++
++ eError = PVRSRV_OK;
++ goto exit_unlock;
++ }
++ }
++
++
++ PVR_DPF((PVR_DBG_ERROR, "%s: Mapping data not found for handle 0x%lx (memory area 0x%p)", __FUNCTION__, hMHandle, psLinuxMemArea));
++
++ eError = PVRSRV_ERROR_GENERIC;
++
++exit_unlock:
++ LinuxUnLockMutex(&g_sMMapMutex);
++
++ return eError;
++}
++
++static inline PKV_OFFSET_STRUCT
++FindOffsetStructByOffset(IMG_UINT32 ui32Offset, IMG_UINT32 ui32RealByteSize)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct;
++ IMG_UINT32 ui32TID = GetCurrentThreadID();
++ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++
++ list_for_each_entry(psOffsetStruct, &g_sMMapOffsetStructList, sMMapItem)
++ {
++ if (ui32Offset == psOffsetStruct->ui32MMapOffset && ui32RealByteSize == psOffsetStruct->ui32RealByteSize && psOffsetStruct->ui32PID == ui32PID)
++ {
++
++ if (!PFNIsPhysical(ui32Offset) || psOffsetStruct->ui32TID == ui32TID)
++ {
++ return psOffsetStruct;
++ }
++ }
++ }
++
++ return IMG_NULL;
++}
++
++
++static IMG_BOOL
++DoMapToUser(LinuxMemArea *psLinuxMemArea,
++ struct vm_area_struct* ps_vma,
++ IMG_UINT32 ui32ByteOffset)
++{
++ IMG_UINT32 ui32ByteSize;
++
++ if (psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ return DoMapToUser(LinuxMemAreaRoot(psLinuxMemArea),
++ ps_vma,
++ psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset + ui32ByteOffset);
++ }
++
++
++ ui32ByteSize = ps_vma->vm_end - ps_vma->vm_start;
++ PVR_ASSERT(ADDR_TO_PAGE_OFFSET(ui32ByteSize) == 0);
++
++#if defined (__sparc__)
++
++#error "SPARC not supported"
++#endif
++
++ if (PFNIsPhysical(ps_vma->vm_pgoff))
++ {
++ IMG_INT result;
++
++ PVR_ASSERT(LinuxMemAreaPhysIsContig(psLinuxMemArea));
++ PVR_ASSERT(LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32ByteOffset) == ps_vma->vm_pgoff);
++
++
++ result = IO_REMAP_PFN_RANGE(ps_vma, ps_vma->vm_start, ps_vma->vm_pgoff, ui32ByteSize, ps_vma->vm_page_prot);
++
++ if(result == 0)
++ {
++ return IMG_TRUE;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Failed to map contiguous physical address range (%d), trying non-contiguous path", __FUNCTION__, result));
++ }
++
++ {
++
++ IMG_UINT32 ulVMAPos;
++ IMG_UINT32 ui32ByteEnd = ui32ByteOffset + ui32ByteSize;
++ IMG_UINT32 ui32PA;
++
++
++ for(ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd; ui32PA += PAGE_SIZE)
++ {
++ IMG_UINT32 pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32PA);
++
++ if (!pfn_valid(pfn))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"%s: Error - PFN invalid: 0x%lx", __FUNCTION__, pfn));
++ return IMG_FALSE;
++ }
++ }
++
++
++ ulVMAPos = ps_vma->vm_start;
++ for(ui32PA = ui32ByteOffset; ui32PA < ui32ByteEnd; ui32PA += PAGE_SIZE)
++ {
++ IMG_UINT32 pfn;
++ struct page *psPage;
++ IMG_INT result;
++
++ pfn = LinuxMemAreaToCpuPFN(psLinuxMemArea, ui32PA);
++ PVR_ASSERT(pfn_valid(pfn));
++
++ psPage = pfn_to_page(pfn);
++
++ result = VM_INSERT_PAGE(ps_vma, ulVMAPos, psPage);
++ if(result != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"%s: Error - VM_INSERT_PAGE failed (%d)", __FUNCTION__, result));
++ return IMG_FALSE;
++ }
++ ulVMAPos += PAGE_SIZE;
++ }
++ }
++
++ return IMG_TRUE;
++}
++
++
++static IMG_VOID
++MMapVOpenNoLock(struct vm_area_struct* ps_vma)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
++ PVR_ASSERT(psOffsetStruct != IMG_NULL)
++ psOffsetStruct->ui32Mapped++;
++ PVR_ASSERT(!psOffsetStruct->bOnMMapList);
++
++ if (psOffsetStruct->ui32Mapped > 1)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "%s: Offset structure 0x%p is being shared across processes (psOffsetStruct->ui32Mapped: %lu)", __FUNCTION__, psOffsetStruct, psOffsetStruct->ui32Mapped));
++ PVR_ASSERT((ps_vma->vm_flags & VM_DONTCOPY) == 0);
++ }
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s: psLinuxMemArea 0x%p, KVAddress 0x%p MMapOffset %ld, ui32Mapped %d",
++ __FUNCTION__,
++ psOffsetStruct->psLinuxMemArea,
++ LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
++ psOffsetStruct->ui32MMapOffset,
++ psOffsetStruct->ui32Mapped));
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++ MOD_INC_USE_COUNT;
++#endif
++}
++
++
++static void
++MMapVOpen(struct vm_area_struct* ps_vma)
++{
++ LinuxLockMutex(&g_sMMapMutex);
++
++ MMapVOpenNoLock(ps_vma);
++
++ LinuxUnLockMutex(&g_sMMapMutex);
++}
++
++
++static IMG_VOID
++MMapVCloseNoLock(struct vm_area_struct* ps_vma)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
++ PVR_ASSERT(psOffsetStruct != IMG_NULL)
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s: psLinuxMemArea 0x%p, CpuVAddr 0x%p ui32MMapOffset %ld, ui32Mapped %d",
++ __FUNCTION__,
++ psOffsetStruct->psLinuxMemArea,
++ LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
++ psOffsetStruct->ui32MMapOffset,
++ psOffsetStruct->ui32Mapped));
++#endif
++
++ PVR_ASSERT(!psOffsetStruct->bOnMMapList);
++ psOffsetStruct->ui32Mapped--;
++ if (psOffsetStruct->ui32Mapped == 0)
++ {
++ if (psOffsetStruct->ui32RefCount != 0)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: psOffsetStruct 0x%p has non-zero reference count (ui32RefCount = %lu). User mode address of start of mapping: 0x%lx", __FUNCTION__, psOffsetStruct, psOffsetStruct->ui32RefCount, psOffsetStruct->ui32UserVAddr));
++ }
++
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++
++ ps_vma->vm_private_data = NULL;
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++ MOD_DEC_USE_COUNT;
++#endif
++}
++
++static void
++MMapVClose(struct vm_area_struct* ps_vma)
++{
++ LinuxLockMutex(&g_sMMapMutex);
++
++ MMapVCloseNoLock(ps_vma);
++
++ LinuxUnLockMutex(&g_sMMapMutex);
++}
++
++
++static struct vm_operations_struct MMapIOOps =
++{
++ .open=MMapVOpen,
++ .close=MMapVClose
++};
++
++
++int
++PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma)
++{
++ IMG_UINT32 ui32ByteSize;
++ PKV_OFFSET_STRUCT psOffsetStruct;
++ int iRetVal = 0;
++
++ PVR_UNREFERENCED_PARAMETER(pFile);
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++ ui32ByteSize = ps_vma->vm_end - ps_vma->vm_start;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Received mmap(2) request with ui32MMapOffset 0x%08lx,"
++ " and ui32ByteSize %ld(0x%08lx)",
++ __FUNCTION__,
++ ps_vma->vm_pgoff,
++ ui32ByteSize, ui32ByteSize));
++
++ psOffsetStruct = FindOffsetStructByOffset(ps_vma->vm_pgoff, ui32ByteSize);
++ if (psOffsetStruct == IMG_NULL)
++ {
++#if defined(SUPPORT_DRI_DRM)
++ LinuxUnLockMutex(&g_sMMapMutex);
++
++
++ return drm_mmap(pFile, ps_vma);
++#else
++ PVR_UNREFERENCED_PARAMETER(pFile);
++
++ PVR_DPF((PVR_DBG_ERROR,
++ "%s: Attempted to mmap unregistered area at vm_pgoff %ld",
++ __FUNCTION__, ps_vma->vm_pgoff));
++ iRetVal = -EINVAL;
++#endif
++ goto unlock_and_return;
++ }
++ list_del(&psOffsetStruct->sMMapItem);
++ psOffsetStruct->bOnMMapList = IMG_FALSE;
++
++
++ if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
++ ((ps_vma->vm_flags & VM_SHARED) == 0))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Cannot mmap non-shareable writable areas", __FUNCTION__));
++ iRetVal = -EINVAL;
++ goto unlock_and_return;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped psLinuxMemArea 0x%p\n",
++ __FUNCTION__, psOffsetStruct->psLinuxMemArea));
++
++ ps_vma->vm_flags |= VM_RESERVED;
++ ps_vma->vm_flags |= VM_IO;
++
++
++ ps_vma->vm_flags |= VM_DONTEXPAND;
++
++
++ ps_vma->vm_flags |= VM_DONTCOPY;
++
++ ps_vma->vm_private_data = (void *)psOffsetStruct;
++
++ switch(psOffsetStruct->psLinuxMemArea->ui32AreaFlags & PVRSRV_HAP_CACHETYPE_MASK)
++ {
++ case PVRSRV_HAP_CACHED:
++
++ break;
++ case PVRSRV_HAP_WRITECOMBINE:
++ ps_vma->vm_page_prot = PGPROT_WC(ps_vma->vm_page_prot);
++ break;
++ case PVRSRV_HAP_UNCACHED:
++ ps_vma->vm_page_prot = PGPROT_UC(ps_vma->vm_page_prot);
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type", __FUNCTION__));
++ iRetVal = -EINVAL;
++ goto unlock_and_return;
++ }
++
++
++ ps_vma->vm_ops = &MMapIOOps;
++
++ if(!DoMapToUser(psOffsetStruct->psLinuxMemArea, ps_vma, 0))
++ {
++ iRetVal = -EAGAIN;
++ goto unlock_and_return;
++ }
++
++ PVR_ASSERT(psOffsetStruct->ui32UserVAddr == 0)
++
++ psOffsetStruct->ui32UserVAddr = ps_vma->vm_start;
++
++
++ MMapVOpenNoLock(ps_vma);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped area at offset 0x%08lx\n",
++ __FUNCTION__, ps_vma->vm_pgoff));
++
++unlock_and_return:
++ if (iRetVal != 0 && psOffsetStruct != IMG_NULL)
++ {
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++
++ LinuxUnLockMutex(&g_sMMapMutex);
++
++ return iRetVal;
++}
++
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void ProcSeqStartstopMMapRegistations(struct seq_file *sfile,IMG_BOOL start)
++{
++ if(start)
++ {
++ LinuxLockMutex(&g_sMMapMutex);
++ }
++ else
++ {
++ LinuxUnLockMutex(&g_sMMapMutex);
++ }
++}
++
++
++static void* ProcSeqOff2ElementMMapRegistrations(struct seq_file *sfile, loff_t off)
++{
++ LinuxMemArea *psLinuxMemArea;
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ list_for_each_entry(psLinuxMemArea, &g_sMMapAreaList, sMMapItem)
++ {
++ PKV_OFFSET_STRUCT psOffsetStruct;
++
++ list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
++ {
++ off--;
++ if (off == 0)
++ {
++ PVR_ASSERT(psOffsetStruct->psLinuxMemArea == psLinuxMemArea);
++ return (void*)psOffsetStruct;
++ }
++ }
++ }
++ return (void*)0;
++}
++
++static void* ProcSeqNextMMapRegistrations(struct seq_file *sfile,void* el,loff_t off)
++{
++ return ProcSeqOff2ElementMMapRegistrations(sfile,off);
++}
++
++
++static void ProcSeqShowMMapRegistrations(struct seq_file *sfile,void* el)
++{
++ KV_OFFSET_STRUCT *psOffsetStruct = (KV_OFFSET_STRUCT*)el;
++ LinuxMemArea *psLinuxMemArea;
++ IMG_UINT32 ui32RealByteSize;
++ IMG_UINT32 ui32ByteOffset;
++
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf( sfile,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "Allocations registered for mmap: %lu\n"
++ "In total these areas correspond to %lu bytes\n"
++ "psLinuxMemArea "
++ "UserVAddr "
++ "KernelVAddr "
++ "CpuPAddr "
++ "MMapOffset "
++ "ByteLength "
++ "LinuxMemType "
++ "Pid Name Flags\n",
++#else
++ "<mmap_header>\n"
++ "\t<count>%lu</count>\n"
++ "\t<bytes>%lu</bytes>\n"
++ "</mmap_header>\n",
++#endif
++ g_ui32RegisteredAreas,
++ g_ui32TotalByteSize
++ );
++ return;
++ }
++
++ psLinuxMemArea = psOffsetStruct->psLinuxMemArea;
++
++ DetermineUsersSizeAndByteOffset(psLinuxMemArea,
++ &ui32RealByteSize,
++ &ui32ByteOffset);
++
++ seq_printf( sfile,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-8p %08lx %-8p %08lx %08lx %-8ld %-24s %-5lu %-8s %08lx(%s)\n",
++#else
++ "<mmap_record>\n"
++ "\t<pointer>%-8p</pointer>\n"
++ "\t<user_virtual>%-8lx</user_virtual>\n"
++ "\t<kernel_virtual>%-8p</kernel_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<mmap_offset>%08lx</mmap_offset>\n"
++ "\t<bytes>%-8ld</bytes>\n"
++ "\t<linux_mem_area_type>%-24s</linux_mem_area_type>\n"
++ "\t<pid>%-5lu</pid>\n"
++ "\t<name>%-8s</name>\n"
++ "\t<flags>%08lx</flags>\n"
++ "\t<flags_string>%s</flags_string>\n"
++ "</mmap_record>\n",
++#endif
++ psLinuxMemArea,
++ psOffsetStruct->ui32UserVAddr + ui32ByteOffset,
++ LinuxMemAreaToCpuVAddr(psLinuxMemArea),
++ LinuxMemAreaToCpuPAddr(psLinuxMemArea,0).uiAddr,
++ psOffsetStruct->ui32MMapOffset,
++ psLinuxMemArea->ui32ByteSize,
++ LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType),
++ psOffsetStruct->ui32PID,
++ psOffsetStruct->pszName,
++ psLinuxMemArea->ui32AreaFlags,
++ HAPFlagsToString(psLinuxMemArea->ui32AreaFlags));
++}
++
++#else
++
++static off_t
++PrintMMapRegistrations(IMG_CHAR *buffer, size_t size, off_t off)
++{
++ LinuxMemArea *psLinuxMemArea;
++ off_t Ret;
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++ if(!off)
++ {
++ Ret = printAppend(buffer, size, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "Allocations registered for mmap: %lu\n"
++ "In total these areas correspond to %lu bytes\n"
++ "psLinuxMemArea "
++ "UserVAddr "
++ "KernelVAddr "
++ "CpuPAddr "
++ "MMapOffset "
++ "ByteLength "
++ "LinuxMemType "
++ "Pid Name Flags\n",
++#else
++ "<mmap_header>\n"
++ "\t<count>%lu</count>\n"
++ "\t<bytes>%lu</bytes>\n"
++ "</mmap_header>\n",
++#endif
++ g_ui32RegisteredAreas,
++ g_ui32TotalByteSize
++ );
++
++ goto unlock_and_return;
++ }
++
++ if (size < 135)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ PVR_ASSERT(off != 0);
++ list_for_each_entry(psLinuxMemArea, &g_sMMapAreaList, sMMapItem)
++ {
++ PKV_OFFSET_STRUCT psOffsetStruct;
++
++ list_for_each_entry(psOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
++ {
++ off--;
++ if (off == 0)
++ {
++ IMG_UINT32 ui32RealByteSize;
++ IMG_UINT32 ui32ByteOffset;
++
++ PVR_ASSERT(psOffsetStruct->psLinuxMemArea == psLinuxMemArea);
++
++ DetermineUsersSizeAndByteOffset(psLinuxMemArea,
++ &ui32RealByteSize,
++ &ui32ByteOffset);
++
++ Ret = printAppend (buffer, size, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++ "%-8p %08lx %-8p %08lx %08lx %-8ld %-24s %-5lu %-8s %08lx(%s)\n",
++#else
++ "<mmap_record>\n"
++ "\t<pointer>%-8p</pointer>\n"
++ "\t<user_virtual>%-8lx</user_virtual>\n"
++ "\t<kernel_virtual>%-8p</kernel_virtual>\n"
++ "\t<cpu_physical>%08lx</cpu_physical>\n"
++ "\t<mmap_offset>%08lx</mmap_offset>\n"
++ "\t<bytes>%-8ld</bytes>\n"
++ "\t<linux_mem_area_type>%-24s</linux_mem_area_type>\n"
++ "\t<pid>%-5lu</pid>\n"
++ "\t<name>%-8s</name>\n"
++ "\t<flags>%08lx</flags>\n"
++ "\t<flags_string>%s</flags_string>\n"
++ "</mmap_record>\n",
++#endif
++ psLinuxMemArea,
++ psOffsetStruct->ui32UserVAddr + ui32ByteOffset,
++ LinuxMemAreaToCpuVAddr(psLinuxMemArea),
++ LinuxMemAreaToCpuPAddr(psLinuxMemArea,0).uiAddr,
++ psOffsetStruct->ui32MMapOffset,
++ psLinuxMemArea->ui32ByteSize,
++ LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType),
++ psOffsetStruct->ui32PID,
++ psOffsetStruct->pszName,
++ psLinuxMemArea->ui32AreaFlags,
++ HAPFlagsToString(psLinuxMemArea->ui32AreaFlags));
++ goto unlock_and_return;
++ }
++ }
++ }
++ Ret = END_OF_FILE;
++
++unlock_and_return:
++ LinuxUnLockMutex(&g_sMMapMutex);
++ return Ret;
++}
++#endif
++#endif
++
++
++PVRSRV_ERROR
++PVRMMapRegisterArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVRSRV_ERROR eError;
++#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
++ const IMG_CHAR *pszName = LinuxMemAreaTypeToString(LinuxMemAreaRootType(psLinuxMemArea));
++#endif
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++#if defined(DEBUG) || defined(DEBUG_LINUX_MMAP_AREAS)
++ PVR_DPF((PVR_DBG_MESSAGE,
++ "%s(%s, psLinuxMemArea 0x%p, ui32AllocFlags 0x%8lx)",
++ __FUNCTION__, pszName, psLinuxMemArea, psLinuxMemArea->ui32AreaFlags));
++#endif
++
++ PVR_ASSERT(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC || LinuxMemAreaRoot(psLinuxMemArea)->eAreaType != LINUX_MEM_AREA_SUB_ALLOC);
++
++
++ if(psLinuxMemArea->bMMapRegistered)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: psLinuxMemArea 0x%p is already registered",
++ __FUNCTION__, psLinuxMemArea));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ goto exit_unlock;
++ }
++
++ list_add_tail(&psLinuxMemArea->sMMapItem, &g_sMMapAreaList);
++
++ psLinuxMemArea->bMMapRegistered = IMG_TRUE;
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ g_ui32RegisteredAreas++;
++
++ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ g_ui32TotalByteSize += psLinuxMemArea->ui32ByteSize;
++ }
++#endif
++
++ eError = PVRSRV_OK;
++
++exit_unlock:
++ LinuxUnLockMutex(&g_sMMapMutex);
++
++ return eError;
++}
++
++
++PVRSRV_ERROR
++PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea)
++{
++ PVRSRV_ERROR eError;
++ PKV_OFFSET_STRUCT psOffsetStruct, psTmpOffsetStruct;
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++ PVR_ASSERT(psLinuxMemArea->bMMapRegistered);
++
++ list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct, &psLinuxMemArea->sMMapOffsetStructList, sAreaItem)
++ {
++ if (psOffsetStruct->ui32Mapped != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: psOffsetStruct 0x%p for memory area 0x0x%p is still mapped; psOffsetStruct->ui32Mapped %lu", __FUNCTION__, psOffsetStruct, psLinuxMemArea, psOffsetStruct->ui32Mapped));
++ eError = PVRSRV_ERROR_GENERIC;
++ goto exit_unlock;
++ }
++ else
++ {
++
++ PVR_DPF((PVR_DBG_WARNING, "%s: psOffsetStruct 0x%p was never mapped", __FUNCTION__, psOffsetStruct));
++ }
++
++ PVR_ASSERT((psOffsetStruct->ui32Mapped == 0) && psOffsetStruct->bOnMMapList);
++
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++
++ list_del(&psLinuxMemArea->sMMapItem);
++
++ psLinuxMemArea->bMMapRegistered = IMG_FALSE;
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ g_ui32RegisteredAreas--;
++ if (psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++ {
++ g_ui32TotalByteSize -= psLinuxMemArea->ui32ByteSize;
++ }
++#endif
++
++ eError = PVRSRV_OK;
++
++exit_unlock:
++ LinuxUnLockMutex(&g_sMMapMutex);
++ return eError;
++}
++
++
++PVRSRV_ERROR
++LinuxMMapPerProcessConnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc)
++{
++ PVR_UNREFERENCED_PARAMETER(psEnvPerProc);
++
++ return PVRSRV_OK;
++}
++
++IMG_VOID
++LinuxMMapPerProcessDisconnect(PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc)
++{
++ PKV_OFFSET_STRUCT psOffsetStruct, psTmpOffsetStruct;
++ IMG_BOOL bWarn = IMG_FALSE;
++ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++
++ PVR_UNREFERENCED_PARAMETER(psEnvPerProc);
++
++ LinuxLockMutex(&g_sMMapMutex);
++
++ list_for_each_entry_safe(psOffsetStruct, psTmpOffsetStruct, &g_sMMapOffsetStructList, sMMapItem)
++ {
++ if (psOffsetStruct->ui32PID == ui32PID)
++ {
++ if (!bWarn)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "%s: process has unmapped offset structures. Removing them", __FUNCTION__));
++ bWarn = IMG_TRUE;
++ }
++ PVR_ASSERT(psOffsetStruct->ui32Mapped == 0);
++ PVR_ASSERT(psOffsetStruct->bOnMMapList);
++
++ DestroyOffsetStruct(psOffsetStruct);
++ }
++ }
++
++ LinuxUnLockMutex(&g_sMMapMutex);
++}
++
++
++PVRSRV_ERROR LinuxMMapPerProcessHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
++{
++ PVRSRV_ERROR eError;
++
++ eError = PVRSRVSetMaxHandle(psHandleBase, MAX_MMAP_HANDLE);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"%s: failed to set handle limit (%d)", __FUNCTION__, eError));
++ return eError;
++ }
++
++ return eError;
++}
++
++
++IMG_VOID
++PVRMMapInit(IMG_VOID)
++{
++ LinuxInitMutex(&g_sMMapMutex);
++
++ g_psMemmapCache = KMemCacheCreateWrapper("img-mmap", sizeof(KV_OFFSET_STRUCT), 0, 0);
++ if (!g_psMemmapCache)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate kmem_cache", __FUNCTION__));
++ goto error;
++ }
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_ProcMMap = CreateProcReadEntrySeq("mmap", NULL,
++ ProcSeqNextMMapRegistrations,
++ ProcSeqShowMMapRegistrations,
++ ProcSeqOff2ElementMMapRegistrations,
++ ProcSeqStartstopMMapRegistations
++ );
++#else
++ CreateProcReadEntry("mmap", PrintMMapRegistrations);
++#endif
++#endif
++ return;
++
++error:
++ PVRMMapCleanup();
++ return;
++}
++
++
++IMG_VOID
++PVRMMapCleanup(IMG_VOID)
++{
++ PVRSRV_ERROR eError;
++
++ if (!list_empty(&g_sMMapAreaList))
++ {
++ LinuxMemArea *psLinuxMemArea, *psTmpMemArea;
++
++ PVR_DPF((PVR_DBG_ERROR, "%s: Memory areas are still registered with MMap", __FUNCTION__));
++
++ PVR_TRACE(("%s: Unregistering memory areas", __FUNCTION__));
++ list_for_each_entry_safe(psLinuxMemArea, psTmpMemArea, &g_sMMapAreaList, sMMapItem)
++ {
++ eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRMMapRemoveRegisteredArea failed (%d)", __FUNCTION__, eError));
++ }
++ PVR_ASSERT(eError == PVRSRV_OK);
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++ }
++ }
++ PVR_ASSERT(list_empty((&g_sMMapAreaList)));
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq(g_ProcMMap);
++#else
++ RemoveProcEntry("mmap");
++#endif
++#endif
++
++ if(g_psMemmapCache)
++ {
++ KMemCacheDestroyWrapper(g_psMemmapCache);
++ g_psMemmapCache = NULL;
++ }
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mmap.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mmap.h
+new file mode 100644
+index 0000000..5c9f2b2
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mmap.h
+@@ -0,0 +1,107 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__MMAP_H__)
++#define __MMAP_H__
++
++#include <linux/mm.h>
++#include <linux/list.h>
++
++#include "perproc.h"
++#include "mm.h"
++
++typedef struct KV_OFFSET_STRUCT_TAG
++{
++
++ IMG_UINT32 ui32Mapped;
++
++
++ IMG_UINT32 ui32MMapOffset;
++
++ IMG_UINT32 ui32RealByteSize;
++
++
++ LinuxMemArea *psLinuxMemArea;
++
++
++ IMG_UINT32 ui32TID;
++
++
++ IMG_UINT32 ui32PID;
++
++
++ IMG_BOOL bOnMMapList;
++
++
++ IMG_UINT32 ui32RefCount;
++
++
++ IMG_UINT32 ui32UserVAddr;
++
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++ const IMG_CHAR *pszName;
++#endif
++
++
++ struct list_head sMMapItem;
++
++
++ struct list_head sAreaItem;
++}KV_OFFSET_STRUCT, *PKV_OFFSET_STRUCT;
++
++
++
++IMG_VOID PVRMMapInit(IMG_VOID);
++
++
++IMG_VOID PVRMMapCleanup(IMG_VOID);
++
++
++PVRSRV_ERROR PVRMMapRegisterArea(LinuxMemArea *psLinuxMemArea);
++
++
++PVRSRV_ERROR PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea);
++
++
++PVRSRV_ERROR PVRMMapOSMemHandleToMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hMHandle,
++ IMG_UINT32 *pui32MMapOffset,
++ IMG_UINT32 *pui32ByteOffset,
++ IMG_UINT32 *pui32RealByteSize, IMG_UINT32 *pui32UserVAddr);
++
++PVRSRV_ERROR
++PVRMMapReleaseMMapData(PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_HANDLE hMHandle,
++ IMG_BOOL *pbMUnmap,
++ IMG_UINT32 *pui32RealByteSize,
++ IMG_UINT32 *pui32UserVAddr);
++
++int PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma);
++
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/module.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/module.c
+new file mode 100644
+index 0000000..150fea5
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/module.c
+@@ -0,0 +1,765 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++
++ #if defined(LDM_PLATFORM)
++ #define PVR_LDM_PLATFORM_MODULE
++ #define PVR_LDM_MODULE
++ #else
++ #if defined(LDM_PCI)
++ #define PVR_LDM_PCI_MODULE
++ #define PVR_LDM_MODULE
++ #endif
++ #endif
++#endif
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++
++#if defined(SUPPORT_DRI_DRM)
++#include <drm/drmP.h>
++#if defined(PVR_SECURE_DRM_AUTH_EXPORT)
++#include "env_perproc.h"
++#endif
++#endif
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++#include <linux/platform_device.h>
++#endif
++
++#if defined(PVR_LDM_PCI_MODULE)
++#include <linux/pci.h>
++#endif
++
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++#include <asm/uaccess.h>
++#endif
++
++#include "img_defs.h"
++#include "services.h"
++#include "kerneldisplay.h"
++#include "kernelbuffer.h"
++#include "syscommon.h"
++#include "pvrmmap.h"
++#include "mutils.h"
++#include "mm.h"
++#include "mmap.h"
++#include "mutex.h"
++#include "pvr_debug.h"
++#include "srvkm.h"
++#include "perproc.h"
++#include "handle.h"
++#include "pvr_bridge_km.h"
++#include "proc.h"
++#include "pvrmodule.h"
++#include "private_data.h"
++#include "lock.h"
++#include "linkage.h"
++
++#if defined(SUPPORT_DRI_DRM)
++#include "pvr_drm.h"
++#endif
++#define DRVNAME "pvrsrvkm"
++#define DEVNAME "pvrsrvkm"
++
++#if defined(SUPPORT_DRI_DRM)
++#define PRIVATE_DATA(pFile) ((pFile)->driver_priv)
++#else
++#define PRIVATE_DATA(pFile) ((pFile)->private_data)
++#endif
++
++MODULE_SUPPORTED_DEVICE(DEVNAME);
++#ifdef DEBUG
++static IMG_INT debug = DBGPRIV_WARNING;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++#include <linux/moduleparam.h>
++module_param(debug, int, 0);
++#else
++MODULE_PARM(debug, "i");
++MODULE_PARM_DESC(debug, "Sets the level of debug output (default=0x4)");
++#endif
++#endif
++
++
++extern IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable);
++extern IMG_BOOL PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable);
++
++EXPORT_SYMBOL(PVRGetDisplayClassJTable);
++EXPORT_SYMBOL(PVRGetBufferClassJTable);
++
++
++#if defined(PVR_LDM_MODULE)
++static struct class *psPvrClass;
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++static IMG_INT AssignedMajorNumber;
++
++static IMG_INT PVRSRVOpen(struct inode* pInode, struct file* pFile);
++static IMG_INT PVRSRVRelease(struct inode* pInode, struct file* pFile);
++
++static struct file_operations pvrsrv_fops = {
++ .owner=THIS_MODULE,
++ .unlocked_ioctl=PVRSRV_BridgeDispatchKM,
++ .open=PVRSRVOpen,
++ .release=PVRSRVRelease,
++ .mmap=PVRMMap,
++};
++#endif
++
++PVRSRV_LINUX_MUTEX gPVRSRVLock;
++
++IMG_UINT32 gui32ReleasePID;
++
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++static IMG_UINT32 gPVRPowerLevel;
++#endif
++
++#if defined(PVR_LDM_MODULE)
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++#define LDM_DEV struct platform_device
++#define LDM_DRV struct platform_driver
++#endif
++
++#if defined(PVR_LDM_PCI_MODULE)
++#define LDM_DEV struct pci_dev
++#define LDM_DRV struct pci_driver
++#endif
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++static IMG_INT PVRSRVDriverRemove(LDM_DEV *device);
++static IMG_INT PVRSRVDriverProbe(LDM_DEV *device);
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++static IMG_VOID PVRSRVDriverRemove(LDM_DEV *device);
++static IMG_INT PVRSRVDriverProbe(LDM_DEV *device, const struct pci_device_id *id);
++#endif
++static IMG_INT PVRSRVDriverSuspend(LDM_DEV *device, pm_message_t state);
++static IMG_VOID PVRSRVDriverShutdown(LDM_DEV *device);
++static IMG_INT PVRSRVDriverResume(LDM_DEV *device);
++
++#if defined(PVR_LDM_PCI_MODULE)
++struct pci_device_id powervr_id_table[] __devinitdata = {
++ { PCI_DEVICE(SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV_DEVICE_ID) },
++ { 0 }
++};
++
++MODULE_DEVICE_TABLE(pci, powervr_id_table);
++#endif
++
++static LDM_DRV powervr_driver = {
++#if defined(PVR_LDM_PLATFORM_MODULE)
++ .driver = {
++ .name = DRVNAME,
++ },
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++ .name = DRVNAME,
++ .id_table = powervr_id_table,
++#endif
++ .probe = PVRSRVDriverProbe,
++#if defined(PVR_LDM_PLATFORM_MODULE)
++ .remove = PVRSRVDriverRemove,
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++ .remove = __devexit_p(PVRSRVDriverRemove),
++#endif
++ .suspend = PVRSRVDriverSuspend,
++ .resume = PVRSRVDriverResume,
++ .shutdown = PVRSRVDriverShutdown,
++};
++
++LDM_DEV *gpsPVRLDMDev;
++
++#if defined(MODULE) && defined(PVR_LDM_PLATFORM_MODULE)
++
++static IMG_VOID PVRSRVDeviceRelease(struct device *pDevice)
++{
++ PVR_UNREFERENCED_PARAMETER(pDevice);
++}
++
++static struct platform_device powervr_device = {
++ .name = DEVNAME,
++ .id = -1,
++ .dev = {
++ .release = PVRSRVDeviceRelease
++ }
++};
++
++#endif
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++static IMG_INT PVRSRVDriverProbe(LDM_DEV *pDevice)
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++static IMG_INT __devinit PVRSRVDriverProbe(LDM_DEV *pDevice, const struct pci_device_id *id)
++#endif
++{
++ SYS_DATA *psSysData;
++
++ PVR_TRACE(("PVRSRVDriverProbe(pDevice=%p)", pDevice));
++
++#if 0
++
++ if (PerDeviceSysInitialise((IMG_PVOID)pDevice) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++#endif
++
++ if (SysAcquireData(&psSysData) != PVRSRV_OK)
++ {
++ gpsPVRLDMDev = pDevice;
++
++ if (SysInitialise() != PVRSRV_OK)
++ {
++ return -ENODEV;
++ }
++ }
++
++ return 0;
++}
++
++
++#if defined (PVR_LDM_PLATFORM_MODULE)
++static IMG_INT PVRSRVDriverRemove(LDM_DEV *pDevice)
++#endif
++#if defined(PVR_LDM_PCI_MODULE)
++static IMG_VOID __devexit PVRSRVDriverRemove(LDM_DEV *pDevice)
++#endif
++{
++ SYS_DATA *psSysData;
++
++ PVR_TRACE(("PVRSRVDriverRemove(pDevice=%p)", pDevice));
++
++ if (SysAcquireData(&psSysData) == PVRSRV_OK)
++ {
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++ if (gPVRPowerLevel != 0)
++ {
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) == PVRSRV_OK)
++ {
++ gPVRPowerLevel = 0;
++ }
++ }
++#endif
++ SysDeinitialise(psSysData);
++
++ gpsPVRLDMDev = IMG_NULL;
++ }
++
++#if 0
++ if (PerDeviceSysDeInitialise((IMG_PVOID)pDevice) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++#endif
++
++#if defined (PVR_LDM_PLATFORM_MODULE)
++ return 0;
++#endif
++#if defined (PVR_LDM_PCI_MODULE)
++ return;
++#endif
++}
++
++
++static IMG_VOID PVRSRVDriverShutdown(LDM_DEV *pDevice)
++{
++ PVR_TRACE(("PVRSRVDriverShutdown(pDevice=%p)", pDevice));
++
++ (IMG_VOID) PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3);
++}
++
++#endif
++
++
++#if defined(PVR_LDM_MODULE) || defined(SUPPORT_DRI_DRM)
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT PVRSRVDriverSuspend(struct drm_device *pDevice, pm_message_t state)
++#else
++static IMG_INT PVRSRVDriverSuspend(LDM_DEV *pDevice, pm_message_t state)
++#endif
++{
++#if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM))
++ PVR_TRACE(( "PVRSRVDriverSuspend(pDevice=%p)", pDevice));
++
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++#endif
++ return 0;
++}
++
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT PVRSRVDriverResume(struct drm_device *pDevice)
++#else
++static IMG_INT PVRSRVDriverResume(LDM_DEV *pDevice)
++#endif
++{
++#if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM))
++ PVR_TRACE(("PVRSRVDriverResume(pDevice=%p)", pDevice));
++
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++#endif
++ return 0;
++}
++#endif
++
++
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL) && !defined(SUPPORT_DRI_DRM)
++IMG_INT PVRProcSetPowerLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data)
++{
++ IMG_CHAR data_buffer[2];
++ IMG_UINT32 PVRPowerLevel;
++
++ if (count != sizeof(data_buffer))
++ {
++ return -EINVAL;
++ }
++ else
++ {
++ if (copy_from_user(data_buffer, buffer, count))
++ return -EINVAL;
++ if (data_buffer[count - 1] != '\n')
++ return -EINVAL;
++ PVRPowerLevel = data_buffer[0] - '0';
++ if (PVRPowerLevel != gPVRPowerLevel)
++ {
++ if (PVRPowerLevel != 0)
++ {
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++ }
++ else
++ {
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) != PVRSRV_OK)
++ {
++ return -EINVAL;
++ }
++ }
++
++ gPVRPowerLevel = PVRPowerLevel;
++ }
++ }
++ return (count);
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void ProcSeqShowPowerLevel(struct seq_file *sfile,void* el)
++{
++ seq_printf(sfile, "%lu\n", gPVRPowerLevel);
++}
++
++#else
++IMG_INT PVRProcGetPowerLevel(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data)
++{
++ if (off == 0) {
++ *start = (IMG_CHAR *)1;
++ return printAppend(page, count, 0, "%lu\n", gPVRPowerLevel);
++ }
++ *eof = 1;
++ return 0;
++}
++#endif
++
++#endif
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT PVRSRVOpen(struct drm_device unref__ *dev, struct drm_file *pFile)
++#else
++static IMG_INT PVRSRVOpen(struct inode unref__ * pInode, struct file *pFile)
++#endif
++{
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData;
++ IMG_HANDLE hBlockAlloc;
++ IMG_INT iRet = -ENOMEM;
++ PVRSRV_ERROR eError;
++ IMG_UINT32 ui32PID;
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++#endif
++
++#if defined(SUPPORT_DRI_DRM)
++ PVR_UNREFERENCED_PARAMETER(dev);
++#else
++ PVR_UNREFERENCED_PARAMETER(pInode);
++#endif
++
++ LinuxLockMutex(&gPVRSRVLock);
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ if (PVRSRVProcessConnect(ui32PID) != PVRSRV_OK)
++ goto err_unlock;
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ psEnvPerProc = PVRSRVPerProcessPrivateData(ui32PID);
++ if (psEnvPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: No per-process private data", __FUNCTION__));
++ goto err_unlock;
++ }
++#endif
++
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_FILE_PRIVATE_DATA),
++ (IMG_PVOID *)&psPrivateData,
++ &hBlockAlloc,
++ "File Private Data");
++
++ if(eError != PVRSRV_OK)
++ goto err_unlock;
++
++#if defined(PVR_SECURE_FD_EXPORT)
++ psPrivateData->hKernelMemInfo = NULL;
++#endif
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ psPrivateData->psDRMFile = pFile;
++
++ list_add_tail(&psPrivateData->sDRMAuthListItem, &psEnvPerProc->sDRMAuthListHead);
++#endif
++ psPrivateData->ui32OpenPID = ui32PID;
++ psPrivateData->hBlockAlloc = hBlockAlloc;
++ PRIVATE_DATA(pFile) = psPrivateData;
++ iRet = 0;
++err_unlock:
++ LinuxUnLockMutex(&gPVRSRVLock);
++ return iRet;
++}
++
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT PVRSRVRelease(struct drm_device unref__ *dev, struct drm_file *pFile)
++#else
++static IMG_INT PVRSRVRelease(struct inode unref__ * pInode, struct file *pFile)
++#endif
++{
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData;
++
++#if defined(SUPPORT_DRI_DRM)
++ PVR_UNREFERENCED_PARAMETER(dev);
++#else
++ PVR_UNREFERENCED_PARAMETER(pInode);
++#endif
++
++ LinuxLockMutex(&gPVRSRVLock);
++
++ psPrivateData = PRIVATE_DATA(pFile);
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ list_del(&psPrivateData->sDRMAuthListItem);
++#endif
++
++
++ gui32ReleasePID = psPrivateData->ui32OpenPID;
++ PVRSRVProcessDisconnect(psPrivateData->ui32OpenPID);
++ gui32ReleasePID = 0;
++
++ OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_FILE_PRIVATE_DATA),
++ psPrivateData, psPrivateData->hBlockAlloc);
++ PRIVATE_DATA(pFile) = NULL;
++
++ LinuxUnLockMutex(&gPVRSRVLock);
++ return 0;
++}
++
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT PVRCore_Init(IMG_VOID)
++#else
++static IMG_INT __init PVRCore_Init(IMG_VOID)
++#endif
++{
++ IMG_INT error;
++#if !defined(PVR_LDM_MODULE)
++ PVRSRV_ERROR eError;
++#else
++ struct device *psDev;
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++
++ PVRDPFInit();
++#endif
++ PVR_TRACE(("PVRCore_Init"));
++
++ LinuxInitMutex(&gPVRSRVLock);
++
++#ifdef DEBUG
++ PVRDebugSetLevel(debug);
++#endif
++
++ if (CreateProcEntries ())
++ {
++ error = -ENOMEM;
++ return error;
++ }
++
++ if (PVROSFuncInit() != PVRSRV_OK)
++ {
++ error = -ENOMEM;
++ goto init_failed;
++ }
++
++ PVRLinuxMUtilsInit();
++
++ if(LinuxMMInit() != PVRSRV_OK)
++ {
++ error = -ENOMEM;
++ goto init_failed;
++ }
++
++ LinuxBridgeInit();
++
++ PVRMMapInit();
++
++#if defined(PVR_LDM_MODULE)
++
++#if defined(PVR_LDM_PLATFORM_MODULE)
++ if ((error = platform_driver_register(&powervr_driver)) != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform driver (%d)", error));
++
++ goto init_failed;
++ }
++
++#if defined(MODULE)
++ if ((error = platform_device_register(&powervr_device)) != 0)
++ {
++ platform_driver_unregister(&powervr_driver);
++
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform device (%d)", error));
++
++ goto init_failed;
++ }
++#endif
++#endif
++
++#if defined(PVR_LDM_PCI_MODULE)
++ if ((error = pci_register_driver(&powervr_driver)) != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register PCI driver (%d)", error));
++
++ goto init_failed;
++ }
++#endif
++
++#else
++
++ if ((eError = SysInitialise()) != PVRSRV_OK)
++ {
++ error = -ENODEV;
++#if defined(TCF_REV) && (TCF_REV == 110)
++ if(eError == PVRSRV_ERROR_NOT_SUPPORTED)
++ {
++ printk("\nAtlas wrapper (FPGA image) version mismatch");
++ error = -ENODEV;
++ }
++#endif
++ goto init_failed;
++ }
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++ AssignedMajorNumber = register_chrdev(0, DEVNAME, &pvrsrv_fops);
++
++ if (AssignedMajorNumber <= 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to get major number"));
++
++ error = -EBUSY;
++ goto sys_deinit;
++ }
++
++ PVR_TRACE(("PVRCore_Init: major device %d", AssignedMajorNumber));
++#endif
++
++#if defined(PVR_LDM_MODULE)
++
++ psPvrClass = class_create(THIS_MODULE, "pvr");
++
++ if (IS_ERR(psPvrClass))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to create class (%ld)", PTR_ERR(psPvrClass)));
++ error = -EBUSY;
++ goto unregister_device;
++ }
++
++ psDev = device_create(psPvrClass, NULL, MKDEV(AssignedMajorNumber, 0),
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
++ NULL,
++#endif
++ DEVNAME);
++ if (IS_ERR(psDev))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to create device (%ld)", PTR_ERR(psDev)));
++ error = -EBUSY;
++ goto destroy_class;
++ }
++#endif
++
++ return 0;
++
++#if defined(PVR_LDM_MODULE)
++destroy_class:
++ class_destroy(psPvrClass);
++unregister_device:
++ unregister_chrdev((IMG_UINT)AssignedMajorNumber, DRVNAME);
++#endif
++#if !defined(SUPPORT_DRI_DRM)
++sys_deinit:
++#endif
++#if defined(PVR_LDM_MODULE)
++#if defined(PVR_LDM_PCI_MODULE)
++ pci_unregister_driver(&powervr_driver);
++#endif
++
++#if defined (PVR_LDM_PLATFORM_MODULE)
++#if defined (MODULE)
++ platform_device_unregister(&powervr_device);
++#endif
++ platform_driver_unregister(&powervr_driver);
++#endif
++
++#else
++
++ {
++ SYS_DATA *psSysData;
++
++ SysAcquireData(&psSysData);
++ if (psSysData != IMG_NULL)
++ {
++ SysDeinitialise(psSysData);
++ }
++ }
++#endif
++init_failed:
++ PVRMMapCleanup();
++ LinuxMMCleanup();
++ LinuxBridgeDeInit();
++ PVROSFuncDeInit();
++ RemoveProcEntries();
++
++ return error;
++
++}
++
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_VOID PVRCore_Cleanup(IMG_VOID)
++#else
++static IMG_VOID __exit PVRCore_Cleanup(IMG_VOID)
++#endif
++{
++ SYS_DATA *psSysData;
++
++ PVR_TRACE(("PVRCore_Cleanup"));
++
++ SysAcquireData(&psSysData);
++
++#if defined(PVR_LDM_MODULE)
++ device_destroy(psPvrClass, MKDEV(AssignedMajorNumber, 0));
++ class_destroy(psPvrClass);
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
++ if (
++#endif
++ unregister_chrdev((IMG_UINT)AssignedMajorNumber, DRVNAME)
++#if !(LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
++ ;
++#else
++ )
++ {
++ PVR_DPF((PVR_DBG_ERROR," can't unregister device major %d", AssignedMajorNumber));
++ }
++#endif
++#endif
++
++#if defined(PVR_LDM_MODULE)
++
++#if defined(PVR_LDM_PCI_MODULE)
++ pci_unregister_driver(&powervr_driver);
++#endif
++
++#if defined (PVR_LDM_PLATFORM_MODULE)
++#if defined (MODULE)
++ platform_device_unregister(&powervr_device);
++#endif
++ platform_driver_unregister(&powervr_driver);
++#endif
++
++#else
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++ if (gPVRPowerLevel != 0)
++ {
++ if (PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0) == PVRSRV_OK)
++ {
++ gPVRPowerLevel = 0;
++ }
++ }
++#endif
++
++ SysDeinitialise(psSysData);
++#endif
++
++ PVRMMapCleanup();
++
++ LinuxMMCleanup();
++
++ LinuxBridgeDeInit();
++
++ PVROSFuncDeInit();
++
++ RemoveProcEntries();
++
++ PVR_TRACE(("PVRCore_Cleanup: unloading"));
++}
++
++#if !defined(SUPPORT_DRI_DRM)
++module_init(PVRCore_Init);
++module_exit(PVRCore_Cleanup);
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutex.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutex.c
+new file mode 100644
+index 0000000..d66e697
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutex.c
+@@ -0,0 +1,136 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/errno.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++#include <linux/mutex.h>
++#else
++#include <asm/semaphore.h>
++#endif
++#include <linux/module.h>
++
++#include <img_defs.h>
++#include <services.h>
++
++#include "mutex.h"
++
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++
++IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ mutex_init(psPVRSRVMutex);
++}
++
++IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ mutex_lock(psPVRSRVMutex);
++}
++
++PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ if(mutex_lock_interruptible(psPVRSRVMutex) == -EINTR)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ else
++ {
++ return PVRSRV_OK;
++ }
++}
++
++IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ return mutex_trylock(psPVRSRVMutex);
++}
++
++IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ mutex_unlock(psPVRSRVMutex);
++}
++
++IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ return (IMG_BOOL)mutex_is_locked(psPVRSRVMutex);
++}
++
++
++#else
++
++
++IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ init_MUTEX(&psPVRSRVMutex->sSemaphore);
++ atomic_set(&psPVRSRVMutex->Count, 0);
++}
++
++IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ down(&psPVRSRVMutex->sSemaphore);
++ atomic_dec(&psPVRSRVMutex->Count);
++}
++
++PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ if(down_interruptible(&psPVRSRVMutex->sSemaphore) == -EINTR)
++ {
++
++ return PVRSRV_ERROR_GENERIC;
++ }else{
++ atomic_dec(&psPVRSRVMutex->Count);
++ return PVRSRV_OK;
++ }
++}
++
++IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ IMG_INT32 Status = down_trylock(&psPVRSRVMutex->sSemaphore);
++ if(Status == 0)
++ {
++ atomic_dec(&psPVRSRVMutex->Count);
++ }
++
++ return Status;
++}
++
++IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ atomic_inc(&psPVRSRVMutex->Count);
++ up(&psPVRSRVMutex->sSemaphore);
++}
++
++IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++ IMG_INT32 iCount;
++
++ iCount = atomic_read(&psPVRSRVMutex->Count);
++
++ return (IMG_BOOL)iCount;
++}
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutex.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutex.h
+new file mode 100644
+index 0000000..b24a599
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutex.h
+@@ -0,0 +1,70 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __INCLUDED_LINUX_MUTEX_H_
++#define __INCLUDED_LINUX_MUTEX_H_
++
++#include <linux/version.h>
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++#include <linux/mutex.h>
++#else
++#include <asm/semaphore.h>
++#endif
++
++
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++
++typedef struct mutex PVRSRV_LINUX_MUTEX;
++
++#else
++
++
++typedef struct {
++ struct semaphore sSemaphore;
++
++ atomic_t Count;
++}PVRSRV_LINUX_MUTEX;
++
++#endif
++
++
++extern IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutils.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutils.c
+new file mode 100644
+index 0000000..83eab51
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutils.c
+@@ -0,0 +1,133 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++#include <linux/version.h>
++
++#include <linux/spinlock.h>
++#include <linux/mm.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++
++#include "img_defs.h"
++#include "pvr_debug.h"
++#include "mutils.h"
++
++#if defined(SUPPORT_LINUX_X86_PAT)
++#define PAT_LINUX_X86_WC 1
++
++#define PAT_X86_ENTRY_BITS 8
++
++#define PAT_X86_BIT_PWT 1U
++#define PAT_X86_BIT_PCD 2U
++#define PAT_X86_BIT_PAT 4U
++#define PAT_X86_BIT_MASK (PAT_X86_BIT_PAT | PAT_X86_BIT_PCD | PAT_X86_BIT_PWT)
++
++static IMG_BOOL g_write_combining_available = IMG_FALSE;
++
++#define PROT_TO_PAT_INDEX(v, B) ((v & _PAGE_ ## B) ? PAT_X86_BIT_ ## B : 0)
++
++static inline IMG_UINT
++pvr_pat_index(pgprotval_t prot_val)
++{
++ IMG_UINT ret = 0;
++ pgprotval_t val = prot_val & _PAGE_CACHE_MASK;
++
++ ret |= PROT_TO_PAT_INDEX(val, PAT);
++ ret |= PROT_TO_PAT_INDEX(val, PCD);
++ ret |= PROT_TO_PAT_INDEX(val, PWT);
++
++ return ret;
++}
++
++static inline IMG_UINT
++pvr_pat_entry(u64 pat, IMG_UINT index)
++{
++ return (IMG_UINT)(pat >> (index * PAT_X86_ENTRY_BITS)) & PAT_X86_BIT_MASK;
++}
++
++static IMG_VOID
++PVRLinuxX86PATProbe(IMG_VOID)
++{
++
++ if (cpu_has_pat)
++ {
++ u64 pat;
++ IMG_UINT pat_index;
++ IMG_UINT pat_entry;
++
++ PVR_TRACE(("%s: PAT available", __FUNCTION__));
++
++ rdmsrl(MSR_IA32_CR_PAT, pat);
++ PVR_TRACE(("%s: Top 32 bits of PAT: 0x%.8x", __FUNCTION__, (IMG_UINT)(pat >> 32)));
++ PVR_TRACE(("%s: Bottom 32 bits of PAT: 0x%.8x", __FUNCTION__, (IMG_UINT)(pat)));
++
++ pat_index = pvr_pat_index(_PAGE_CACHE_WC);
++ PVR_TRACE(("%s: PAT index for write combining: %u", __FUNCTION__, pat_index));
++
++ pat_entry = pvr_pat_entry(pat, pat_index);
++ PVR_TRACE(("%s: PAT entry for write combining: 0x%.2x (should be 0x%.2x)", __FUNCTION__, pat_entry, PAT_LINUX_X86_WC));
++
++#if defined(SUPPORT_LINUX_X86_WRITECOMBINE)
++ g_write_combining_available = (IMG_BOOL)(pat_entry == PAT_LINUX_X86_WC);
++#endif
++ }
++#if defined(DEBUG)
++#if defined(SUPPORT_LINUX_X86_WRITECOMBINE)
++ if (g_write_combining_available)
++ {
++ PVR_TRACE(("%s: Write combining available via PAT", __FUNCTION__));
++ }
++ else
++ {
++ PVR_TRACE(("%s: Write combining not available", __FUNCTION__));
++ }
++#else
++ PVR_TRACE(("%s: Write combining disabled in driver build", __FUNCTION__));
++#endif
++#endif
++}
++
++pgprot_t
++pvr_pgprot_writecombine(pgprot_t prot)
++{
++
++
++ return (g_write_combining_available) ?
++ __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_MASK) | _PAGE_CACHE_WC) : pgprot_noncached(prot);
++}
++#endif
++
++IMG_VOID
++PVRLinuxMUtilsInit(IMG_VOID)
++{
++#if defined(SUPPORT_LINUX_X86_PAT)
++ PVRLinuxX86PATProbe();
++#endif
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutils.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutils.h
+new file mode 100644
+index 0000000..943c2bd
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/mutils.h
+@@ -0,0 +1,101 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_LINUX_MUTILS_H__
++#define __IMG_LINUX_MUTILS_H__
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++
++#if !(defined(__i386__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)))
++#if defined(SUPPORT_LINUX_X86_PAT)
++#undef SUPPORT_LINUX_X86_PAT
++#endif
++#endif
++
++#if defined(SUPPORT_LINUX_X86_PAT)
++ pgprot_t pvr_pgprot_writecombine(pgprot_t prot);
++ #define PGPROT_WC(pv) pvr_pgprot_writecombine(pv)
++#else
++ #if defined(__arm__) || defined(__sh__)
++ #define PGPROT_WC(pv) pgprot_writecombine(pv)
++ #else
++ #if defined(__i386__)
++ #define PGPROT_WC(pv) pgprot_noncached(pv)
++ #else
++ #define PGPROT_WC(pv) pgprot_noncached(pv)
++ #error Unsupported architecture!
++ #endif
++ #endif
++#endif
++
++#define PGPROT_UC(pv) pgprot_noncached(pv)
++
++#if defined(__i386__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
++ #define IOREMAP(pa, bytes) ioremap_cache(pa, bytes)
++#else
++ #if defined(__arm__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++ #define IOREMAP(pa, bytes) ioremap_cached(pa, bytes)
++ #else
++ #define IOREMAP(pa, bytes) ioremap(pa, bytes)
++ #endif
++#endif
++
++#if defined(SUPPORT_LINUX_X86_PAT)
++ #if defined(SUPPORT_LINUX_X86_WRITECOMBINE)
++ #define IOREMAP_WC(pa, bytes) ioremap_wc(pa, bytes)
++ #else
++ #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes)
++ #endif
++#else
++ #if defined(__arm__)
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
++ #define IOREMAP_WC(pa, bytes) ioremap_wc(pa, bytes)
++ #else
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++ #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes)
++ #else
++ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17))
++ #define IOREMAP_WC(pa, bytes) __ioremap(pa, bytes, L_PTE_BUFFERABLE)
++ #else
++ #define IOREMAP_WC(pa, bytes) __ioremap(pa, bytes, , L_PTE_BUFFERABLE, 1)
++ #endif
++ #endif
++ #endif
++ #else
++ #define IOREMAP_WC(pa, bytes) ioremap_nocache(pa, bytes)
++ #endif
++#endif
++
++#define IOREMAP_UC(pa, bytes) ioremap_nocache(pa, bytes)
++
++IMG_VOID PVRLinuxMUtilsInit(IMG_VOID);
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/osfunc.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/osfunc.c
+new file mode 100644
+index 0000000..0e2b68c
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/osfunc.c
+@@ -0,0 +1,2564 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#include <asm/system.h>
++#endif
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++#include <asm/cacheflush.h>
++#endif
++#include <linux/mm.h>
++#include <linux/pagemap.h>
++#include <linux/hugetlb.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <asm/hardirq.h>
++#include <linux/timer.h>
++#include <linux/capability.h>
++#include <asm/uaccess.h>
++#include <linux/spinlock.h>
++#if defined(PVR_LINUX_MISR_USING_WORKQUEUE) || \
++ defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE) || \
++ defined(PVR_LINUX_TIMERS_USING_WORKQUEUES) || \
++ defined(PVR_LINUX_USING_WORKQUEUES)
++#include <linux/workqueue.h>
++#endif
++
++#include "img_types.h"
++#include "services_headers.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "env_data.h"
++#include "proc.h"
++#include "mutex.h"
++#include "event.h"
++#include "linkage.h"
++
++#define EVENT_OBJECT_TIMEOUT_MS (100)
++
++#if defined(SUPPORT_CPU_CACHED_BUFFERS) || \
++ defined(SUPPORT_CACHEFLUSH_ON_ALLOC)
++
++#if defined(__i386__)
++static void per_cpu_cache_flush(void *arg)
++{
++ PVR_UNREFERENCED_PARAMETER(arg);
++ wbinvd();
++}
++#endif
++
++#if !defined(SUPPORT_CPU_CACHED_BUFFERS)
++static
++#endif
++IMG_VOID OSFlushCPUCacheKM(IMG_VOID)
++{
++#if defined(__arm__)
++ flush_cache_all();
++#elif defined(__i386__)
++
++ on_each_cpu(per_cpu_cache_flush, NULL, 1);
++#else
++#error "Implement full CPU cache flush for this CPU!"
++#endif
++}
++
++#endif
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++
++IMG_VOID OSFlushCPUCacheRangeKM(IMG_VOID *pvRangeAddrStart,
++ IMG_VOID *pvRangeAddrEnd)
++{
++ PVR_UNREFERENCED_PARAMETER(pvRangeAddrStart);
++ PVR_UNREFERENCED_PARAMETER(pvRangeAddrEnd);
++
++
++ OSFlushCPUCacheKM();
++}
++
++#endif
++
++#define HOST_ALLOC_MEM_USING_KMALLOC ((IMG_HANDLE)0)
++#define HOST_ALLOC_MEM_USING_VMALLOC ((IMG_HANDLE)1)
++
++#if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvCpuVAddr, IMG_HANDLE *phBlockAlloc)
++#else
++PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvCpuVAddr, IMG_HANDLE *phBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line)
++#endif
++{
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ *ppvCpuVAddr = _KMallocWrapper(ui32Size, pszFilename, ui32Line);
++#else
++ *ppvCpuVAddr = KMallocWrapper(ui32Size);
++#endif
++ if(*ppvCpuVAddr)
++ {
++ if (phBlockAlloc)
++ {
++
++ *phBlockAlloc = HOST_ALLOC_MEM_USING_KMALLOC;
++ }
++ }
++ else
++ {
++ if (!phBlockAlloc)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ *ppvCpuVAddr = _VMallocWrapper(ui32Size, PVRSRV_HAP_CACHED, pszFilename, ui32Line);
++#else
++ *ppvCpuVAddr = VMallocWrapper(ui32Size, PVRSRV_HAP_CACHED);
++#endif
++ if (!*ppvCpuVAddr)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++
++ *phBlockAlloc = HOST_ALLOC_MEM_USING_VMALLOC;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++#if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID pvCpuVAddr, IMG_HANDLE hBlockAlloc)
++#else
++PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID pvCpuVAddr, IMG_HANDLE hBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line)
++#endif
++{
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++
++ if (hBlockAlloc == HOST_ALLOC_MEM_USING_VMALLOC)
++ {
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ _VFreeWrapper(pvCpuVAddr, pszFilename, ui32Line);
++#else
++ VFreeWrapper(pvCpuVAddr);
++#endif
++ }
++ else
++ {
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ _KFreeWrapper(pvCpuVAddr, pszFilename, ui32Line);
++#else
++ KFreeWrapper(pvCpuVAddr);
++#endif
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSAllocPages_Impl(IMG_UINT32 ui32AllocFlags,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PageSize,
++ IMG_VOID **ppvCpuVAddr,
++ IMG_HANDLE *phOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ PVR_UNREFERENCED_PARAMETER(ui32PageSize);
++
++#if 0
++
++ if(ui32AllocFlags & PVRSRV_HAP_SINGLE_PROCESS)
++ {
++ ui32AllocFlags &= ~PVRSRV_HAP_SINGLE_PROCESS;
++ ui32AllocFlags |= PVRSRV_HAP_MULTI_PROCESS;
++ }
++#endif
++
++ switch(ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ {
++ psLinuxMemArea = NewVMallocLinuxMemArea(ui32Size, ui32AllocFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ break;
++ }
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ {
++
++
++ psLinuxMemArea = NewAllocPagesLinuxMemArea(ui32Size, ui32AllocFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++
++#if defined(VIVT_CACHE) || defined(__sh__)
++
++ ui32AllocFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++ psLinuxMemArea = NewVMallocLinuxMemArea(ui32Size, ui32AllocFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "OSAllocPages: invalid flags 0x%x\n", ui32AllocFlags));
++ *ppvCpuVAddr = NULL;
++ *phOSMemHandle = (IMG_HANDLE)0;
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++#if defined(SUPPORT_CACHEFLUSH_ON_ALLOC)
++
++ if(ui32AllocFlags & (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_UNCACHED))
++ {
++ OSFlushCPUCacheKM();
++ }
++#endif
++
++ *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
++ *phOSMemHandle = psLinuxMemArea;
++
++ LinuxMemAreaRegister(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSFreePages(IMG_UINT32 ui32AllocFlags, IMG_UINT32 ui32Bytes, IMG_VOID *pvCpuVAddr, IMG_HANDLE hOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++
++ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++ switch(ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ break;
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ case PVRSRV_HAP_MULTI_PROCESS:
++ if(PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSFreePages(ui32AllocFlags=0x%08X, ui32Bytes=%ld, "
++ "pvCpuVAddr=%p, hOSMemHandle=%p) FAILED!",
++ ui32AllocFlags, ui32Bytes, pvCpuVAddr, hOSMemHandle));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR,"%s: invalid flags 0x%x\n",
++ __FUNCTION__, ui32AllocFlags));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSGetSubMemHandle(IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32ByteOffset,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE *phOSMemHandleRet)
++{
++ LinuxMemArea *psParentLinuxMemArea, *psLinuxMemArea;
++ PVRSRV_ERROR eError;
++
++ psParentLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++ psLinuxMemArea = NewSubLinuxMemArea(psParentLinuxMemArea, ui32ByteOffset, ui32Bytes);
++ if(!psLinuxMemArea)
++ {
++ *phOSMemHandleRet = NULL;
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ *phOSMemHandleRet = psLinuxMemArea;
++
++
++ if(ui32Flags & PVRSRV_HAP_KERNEL_ONLY)
++ {
++ return PVRSRV_OK;
++ }
++
++ eError = PVRMMapRegisterArea(psLinuxMemArea);
++ if(eError != PVRSRV_OK)
++ {
++ goto failed_register_area;
++ }
++
++ return PVRSRV_OK;
++
++failed_register_area:
++ *phOSMemHandleRet = NULL;
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++ return eError;
++}
++
++PVRSRV_ERROR
++OSReleaseSubMemHandle(IMG_VOID *hOSMemHandle, IMG_UINT32 ui32Flags)
++{
++ LinuxMemArea *psLinuxMemArea;
++ PVRSRV_ERROR eError;
++
++ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++ PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC);
++
++ if((ui32Flags & PVRSRV_HAP_KERNEL_ONLY) == 0)
++ {
++ eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
++ if(eError != PVRSRV_OK)
++ {
++ return eError;
++ }
++ }
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++
++IMG_CPU_PHYADDR
++OSMemHandleToCpuPAddr(IMG_VOID *hOSMemHandle, IMG_UINT32 ui32ByteOffset)
++{
++ PVR_ASSERT(hOSMemHandle);
++
++ return LinuxMemAreaToCpuPAddr(hOSMemHandle, ui32ByteOffset);
++}
++
++
++
++IMG_VOID OSMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_UINT32 ui32Size)
++{
++#if defined(USE_UNOPTIMISED_MEMCPY)
++ IMG_UINT8 *Src,*Dst;
++ IMG_INT i;
++
++ Src=(IMG_UINT8 *)pvSrc;
++ Dst=(IMG_UINT8 *)pvDst;
++ for(i=0;i<ui32Size;i++)
++ {
++ Dst[i]=Src[i];
++ }
++#else
++ memcpy(pvDst, pvSrc, ui32Size);
++#endif
++}
++
++
++IMG_VOID OSMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size)
++{
++#if defined(USE_UNOPTIMISED_MEMSET)
++ IMG_UINT8 *Buff;
++ IMG_INT i;
++
++ Buff=(IMG_UINT8 *)pvDest;
++ for(i=0;i<ui32Size;i++)
++ {
++ Buff[i]=ui8Value;
++ }
++#else
++ memset(pvDest, (IMG_INT) ui8Value, (size_t) ui32Size);
++#endif
++}
++
++
++IMG_CHAR *OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc)
++{
++ return (strcpy(pszDest, pszSrc));
++}
++
++IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, IMG_UINT32 ui32Size, const IMG_CHAR *pszFormat, ...)
++{
++ va_list argList;
++ IMG_INT32 iCount;
++
++ va_start(argList, pszFormat);
++ iCount = vsnprintf(pStr, (size_t)ui32Size, pszFormat, argList);
++ va_end(argList);
++
++ return iCount;
++}
++
++IMG_VOID OSBreakResourceLock (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
++{
++ volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
++
++ if(*pui32Access)
++ {
++ if(psResource->ui32ID == ui32ID)
++ {
++ psResource->ui32ID = 0;
++ *pui32Access = 0;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"OSBreakResourceLock: Resource is not locked for this process."));
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"OSBreakResourceLock: Resource is not locked"));
++ }
++}
++
++
++PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE *psResource)
++{
++ psResource->ui32ID = 0;
++ psResource->ui32Lock = 0;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSDestroyResource (PVRSRV_RESOURCE *psResource)
++{
++ OSBreakResourceLock (psResource, psResource->ui32ID);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSInitEnvData(IMG_PVOID *ppvEnvSpecificData)
++{
++ ENV_DATA *psEnvData;
++
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), (IMG_VOID **)&psEnvData, IMG_NULL,
++ "Environment Data") != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE,
++ &psEnvData->pvBridgeData, IMG_NULL,
++ "Bridge Data") != PVRSRV_OK)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), psEnvData, IMG_NULL);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++
++ psEnvData->bMISRInstalled = IMG_FALSE;
++ psEnvData->bLISRInstalled = IMG_FALSE;
++
++
++ *ppvEnvSpecificData = psEnvData;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSDeInitEnvData(IMG_PVOID pvEnvSpecificData)
++{
++ ENV_DATA *psEnvData = (ENV_DATA*)pvEnvSpecificData;
++
++ PVR_ASSERT(!psEnvData->bMISRInstalled);
++ PVR_ASSERT(!psEnvData->bLISRInstalled);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE, psEnvData->pvBridgeData, IMG_NULL);
++ psEnvData->pvBridgeData = IMG_NULL;
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), pvEnvSpecificData, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++
++
++IMG_VOID OSReleaseThreadQuanta(IMG_VOID)
++{
++ schedule();
++}
++
++
++
++IMG_UINT32 OSClockus(IMG_VOID)
++{
++ IMG_UINT32 time, j = jiffies;
++
++ time = j * (1000000 / HZ);
++
++ return time;
++}
++
++
++
++IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus)
++{
++ udelay(ui32Timeus);
++}
++
++
++IMG_UINT32 OSGetCurrentProcessIDKM(IMG_VOID)
++{
++ if (in_interrupt())
++ {
++ return KERNEL_ID;
++ }
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++ return (IMG_UINT32)current->pgrp;
++#else
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
++ return (IMG_UINT32)task_tgid_nr(current);
++#else
++ return (IMG_UINT32)current->tgid;
++#endif
++#endif
++}
++
++
++IMG_UINT32 OSGetPageSize(IMG_VOID)
++{
++#if defined(__sh__)
++ IMG_UINT32 ui32ReturnValue = PAGE_SIZE;
++
++ return (ui32ReturnValue);
++#else
++ return PAGE_SIZE;
++#endif
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++static irqreturn_t DeviceISRWrapper(int irq, void *dev_id
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++ , struct pt_regs *regs
++#endif
++ )
++{
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ IMG_BOOL bStatus = IMG_FALSE;
++
++ PVR_UNREFERENCED_PARAMETER(irq);
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++ PVR_UNREFERENCED_PARAMETER(regs);
++#endif
++ psDeviceNode = (PVRSRV_DEVICE_NODE*)dev_id;
++ if(!psDeviceNode)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "DeviceISRWrapper: invalid params\n"));
++ goto out;
++ }
++
++ bStatus = PVRSRVDeviceLISR(psDeviceNode);
++
++ if (bStatus)
++ {
++ OSScheduleMISR((IMG_VOID *)psDeviceNode->psSysData);
++ }
++
++out:
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++ return bStatus ? IRQ_HANDLED : IRQ_NONE;
++#endif
++}
++
++
++
++static irqreturn_t SystemISRWrapper(int irq, void *dev_id
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++ , struct pt_regs *regs
++#endif
++ )
++{
++ SYS_DATA *psSysData;
++ IMG_BOOL bStatus = IMG_FALSE;
++
++ PVR_UNREFERENCED_PARAMETER(irq);
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++ PVR_UNREFERENCED_PARAMETER(regs);
++#endif
++ psSysData = (SYS_DATA *)dev_id;
++ if(!psSysData)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "SystemISRWrapper: invalid params\n"));
++ goto out;
++ }
++
++ bStatus = PVRSRVSystemLISR(psSysData);
++
++ if (bStatus)
++ {
++ OSScheduleMISR((IMG_VOID *)psSysData);
++ }
++
++out:
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++ return bStatus ? IRQ_HANDLED : IRQ_NONE;
++#endif
++}
++PVRSRV_ERROR OSInstallDeviceLISR(IMG_VOID *pvSysData,
++ IMG_UINT32 ui32Irq,
++ IMG_CHAR *pszISRName,
++ IMG_VOID *pvDeviceNode)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bLISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallDeviceLISR: An ISR has already been installed: IRQ %d cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing device LISR %s on IRQ %d with cookie %x", pszISRName, ui32Irq, pvDeviceNode));
++
++ if(request_irq(ui32Irq, DeviceISRWrapper,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22))
++ SA_SHIRQ
++#else
++ IRQF_SHARED
++#endif
++ , pszISRName, pvDeviceNode))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"OSInstallDeviceLISR: Couldn't install device LISR on IRQ %d", ui32Irq));
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psEnvData->ui32IRQ = ui32Irq;
++ psEnvData->pvISRCookie = pvDeviceNode;
++ psEnvData->bLISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSUninstallDeviceLISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bLISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUninstallDeviceLISR: No LISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling device LISR on IRQ %d with cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++
++ free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie);
++
++ psEnvData->bLISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSInstallSystemLISR(IMG_VOID *pvSysData, IMG_UINT32 ui32Irq)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bLISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallSystemLISR: An LISR has already been installed: IRQ %d cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing system LISR on IRQ %d with cookie %x", ui32Irq, pvSysData));
++
++ if(request_irq(ui32Irq, SystemISRWrapper,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22))
++ SA_SHIRQ
++#else
++ IRQF_SHARED
++#endif
++ , "PowerVR", pvSysData))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"OSInstallSystemLISR: Couldn't install system LISR on IRQ %d", ui32Irq));
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psEnvData->ui32IRQ = ui32Irq;
++ psEnvData->pvISRCookie = pvSysData;
++ psEnvData->bLISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSUninstallSystemLISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bLISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUninstallSystemLISR: No LISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling system LISR on IRQ %d with cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++
++ free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie);
++
++ psEnvData->bLISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++#if defined(PVR_LINUX_MISR_USING_PRIVATE_WORKQUEUE)
++static void MISRWrapper(
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++ void *data
++#else
++ struct work_struct *data
++#endif
++)
++{
++ ENV_DATA *psEnvData = container_of(data, ENV_DATA, sMISRWork);
++ SYS_DATA *psSysData = (SYS_DATA *)psEnvData->pvMISRData;
++
++ PVRSRVMISR(psSysData);
++}
++
++
++PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing MISR with cookie %p", pvSysData));
++
++ psEnvData->psWorkQueue = create_singlethread_workqueue("pvr_workqueue");
++
++ if (psEnvData->psWorkQueue == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: create_singlethreaded_workqueue failed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ INIT_WORK(&psEnvData->sMISRWork, MISRWrapper
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++ , (void *)&psEnvData->sMISRWork
++#endif
++ );
++
++ psEnvData->pvMISRData = pvSysData;
++ psEnvData->bMISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling MISR"));
++
++ destroy_workqueue(psEnvData->psWorkQueue);
++
++ psEnvData->bMISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ queue_work(psEnvData->psWorkQueue, &psEnvData->sMISRWork);
++ }
++
++ return PVRSRV_OK;
++}
++#else
++#if defined(PVR_LINUX_MISR_USING_WORKQUEUE)
++static void MISRWrapper(
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++ void *data
++#else
++ struct work_struct *data
++#endif
++)
++{
++ ENV_DATA *psEnvData = container_of(data, ENV_DATA, sMISRWork);
++ SYS_DATA *psSysData = (SYS_DATA *)psEnvData->pvMISRData;
++
++ PVRSRVMISR(psSysData);
++}
++
++
++PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing MISR with cookie %x", pvSysData));
++
++ INIT_WORK(&psEnvData->sMISRWork, MISRWrapper
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++ , (void *)&psEnvData->sMISRWork
++#endif
++ );
++
++ psEnvData->pvMISRData = pvSysData;
++ psEnvData->bMISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling MISR"));
++
++ flush_scheduled_work();
++
++ psEnvData->bMISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ schedule_work(&psEnvData->sMISRWork);
++ }
++
++ return PVRSRV_OK;
++}
++
++#else
++
++
++static void MISRWrapper(unsigned long data)
++{
++ SYS_DATA *psSysData;
++
++ psSysData = (SYS_DATA *)data;
++
++ PVRSRVMISR(psSysData);
++}
++
++
++PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Installing MISR with cookie %x", pvSysData));
++
++ tasklet_init(&psEnvData->sMISRTasklet, MISRWrapper, (unsigned long)pvSysData);
++
++ psEnvData->bMISRInstalled = IMG_TRUE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++ if (!psEnvData->bMISRInstalled)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ PVR_TRACE(("Uninstalling MISR"));
++
++ tasklet_kill(&psEnvData->sMISRTasklet);
++
++ psEnvData->bMISRInstalled = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData)
++{
++ SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++ ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData;
++
++ if (psEnvData->bMISRInstalled)
++ {
++ tasklet_schedule(&psEnvData->sMISRTasklet);
++ }
++
++ return PVRSRV_OK;
++}
++
++#endif
++#endif
++
++#endif
++
++IMG_VOID OSPanic(IMG_VOID)
++{
++ BUG();
++}
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#define OS_TAS(p) xchg((p), 1)
++#else
++#define OS_TAS(p) tas(p)
++#endif
++PVRSRV_ERROR OSLockResource ( PVRSRV_RESOURCE *psResource,
++ IMG_UINT32 ui32ID)
++
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(!OS_TAS(&psResource->ui32Lock))
++ psResource->ui32ID = ui32ID;
++ else
++ eError = PVRSRV_ERROR_GENERIC;
++
++ return eError;
++}
++
++
++PVRSRV_ERROR OSUnlockResource (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
++{
++ volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(*pui32Access)
++ {
++ if(psResource->ui32ID == ui32ID)
++ {
++ psResource->ui32ID = 0;
++ *pui32Access = 0;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"OSUnlockResource: Resource %p is not locked with expected value.", psResource));
++ PVR_DPF((PVR_DBG_MESSAGE,"Should be %x is actually %x", ui32ID, psResource->ui32ID));
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"OSUnlockResource: Resource %p is not locked", psResource));
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++
++ return eError;
++}
++
++
++IMG_BOOL OSIsResourceLocked (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
++{
++ volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
++
++ return (*(volatile IMG_UINT32 *)pui32Access == 1) && (psResource->ui32ID == ui32ID)
++ ? IMG_TRUE
++ : IMG_FALSE;
++}
++
++
++IMG_CPU_PHYADDR OSMapLinToCPUPhys(IMG_VOID *pvLinAddr)
++{
++ IMG_CPU_PHYADDR CpuPAddr;
++
++ CpuPAddr.uiAddr = (IMG_UINTPTR_T)VMallocToPhys(pvLinAddr);
++
++ return CpuPAddr;
++}
++
++
++IMG_VOID *
++OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_HANDLE *phOSMemHandle)
++{
++ if(phOSMemHandle)
++ {
++ *phOSMemHandle = (IMG_HANDLE)0;
++ }
++
++ if(ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY)
++ {
++ IMG_VOID *pvIORemapCookie;
++ pvIORemapCookie = IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags);
++ if(pvIORemapCookie == IMG_NULL)
++ {
++ return NULL;
++ }
++ return pvIORemapCookie;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY "
++ " (Use OSReservePhys otherwise)"));
++ return NULL;
++ }
++}
++
++IMG_BOOL
++OSUnMapPhysToLin(IMG_VOID *pvLinAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32MappingFlags, IMG_HANDLE hPageAlloc)
++{
++ PVR_TRACE(("%s: unmapping %d bytes from 0x%08x", __FUNCTION__, ui32Bytes, pvLinAddr));
++
++ PVR_UNREFERENCED_PARAMETER(hPageAlloc);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++
++ if(ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY)
++ {
++ IOUnmapWrapper(pvLinAddr);
++ return IMG_TRUE;
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSUnMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY "
++ " (Use OSUnReservePhys otherwise)"));
++ return IMG_FALSE;
++ }
++}
++
++static PVRSRV_ERROR
++RegisterExternalMem(IMG_SYS_PHYADDR *pBasePAddr,
++ IMG_VOID *pvCPUVAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_BOOL bPhysContig,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_HANDLE *phOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ {
++ psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags);
++
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ {
++ psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags);
++
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++
++#if defined(VIVT_CACHE) || defined(__sh__)
++
++ ui32MappingFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++ psLinuxMemArea = NewExternalKVLinuxMemArea(pBasePAddr, pvCPUVAddr, ui32Bytes, bPhysContig, ui32MappingFlags);
++
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR,"OSRegisterMem : invalid flags 0x%x\n", ui32MappingFlags));
++ *phOSMemHandle = (IMG_HANDLE)0;
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea;
++
++ LinuxMemAreaRegister(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSRegisterMem(IMG_CPU_PHYADDR BasePAddr,
++ IMG_VOID *pvCPUVAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_HANDLE *phOSMemHandle)
++{
++ IMG_SYS_PHYADDR SysPAddr = SysCpuPAddrToSysPAddr(BasePAddr);
++
++ return RegisterExternalMem(&SysPAddr, pvCPUVAddr, ui32Bytes, IMG_TRUE, ui32MappingFlags, phOSMemHandle);
++}
++
++
++PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR *pBasePAddr, IMG_VOID *pvCPUVAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32MappingFlags, IMG_HANDLE *phOSMemHandle)
++{
++ return RegisterExternalMem(pBasePAddr, pvCPUVAddr, ui32Bytes, IMG_FALSE, ui32MappingFlags, phOSMemHandle);
++}
++
++
++PVRSRV_ERROR
++OSUnRegisterMem (IMG_VOID *pvCpuVAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_HANDLE hOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++
++ switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ break;
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++ if(PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s(%p, %d, 0x%08X, %p) FAILED!",
++ __FUNCTION__, pvCpuVAddr, ui32Bytes,
++ ui32MappingFlags, hOSMemHandle));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUnRegisterMem : invalid flags 0x%x", ui32MappingFlags));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID *pvCpuVAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle)
++{
++ return OSUnRegisterMem(pvCpuVAddr, ui32Bytes, ui32Flags, hOSMemHandle);
++}
++
++PVRSRV_ERROR
++OSReservePhys(IMG_CPU_PHYADDR BasePAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_VOID **ppvCpuVAddr,
++ IMG_HANDLE *phOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++#if 0
++
++ if(ui32MappingFlags & PVRSRV_HAP_SINGLE_PROCESS)
++ {
++ ui32MappingFlags &= ~PVRSRV_HAP_SINGLE_PROCESS;
++ ui32MappingFlags |= PVRSRV_HAP_MULTI_PROCESS;
++ }
++#endif
++
++ switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ {
++
++ psLinuxMemArea = NewIORemapLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ {
++
++ psLinuxMemArea = NewIOLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++
++#if defined(VIVT_CACHE) || defined(__sh__)
++
++ ui32MappingFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++ psLinuxMemArea = NewIORemapLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags);
++ if(!psLinuxMemArea)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ PVRMMapRegisterArea(psLinuxMemArea);
++ break;
++ }
++ default:
++ PVR_DPF((PVR_DBG_ERROR,"OSMapPhysToLin : invalid flags 0x%x\n", ui32MappingFlags));
++ *ppvCpuVAddr = NULL;
++ *phOSMemHandle = (IMG_HANDLE)0;
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea;
++ *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
++
++ LinuxMemAreaRegister(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR
++OSUnReservePhys(IMG_VOID *pvCpuVAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32MappingFlags,
++ IMG_HANDLE hOSMemHandle)
++{
++ LinuxMemArea *psLinuxMemArea;
++
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++
++ psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++ switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++ {
++ case PVRSRV_HAP_KERNEL_ONLY:
++ break;
++ case PVRSRV_HAP_SINGLE_PROCESS:
++ case PVRSRV_HAP_MULTI_PROCESS:
++ {
++ if(PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s(%p, %d, 0x%08X, %p) FAILED!",
++ __FUNCTION__, pvCpuVAddr, ui32Bytes,
++ ui32MappingFlags, hOSMemHandle));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSUnMapPhysToLin : invalid flags 0x%x", ui32MappingFlags));
++ return PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++
++ LinuxMemAreaDeepFree(psLinuxMemArea);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSBaseAllocContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR *pvLinAddr, IMG_CPU_PHYADDR *psPhysAddr)
++{
++#if !defined(NO_HARDWARE)
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++ PVR_UNREFERENCED_PARAMETER(pvLinAddr);
++ PVR_UNREFERENCED_PARAMETER(psPhysAddr);
++ PVR_DPF((PVR_DBG_ERROR, "%s: Not available", __FUNCTION__));
++
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++#else
++ IMG_VOID *pvKernLinAddr;
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ pvKernLinAddr = _KMallocWrapper(ui32Size, __FILE__, __LINE__);
++#else
++ pvKernLinAddr = KMallocWrapper(ui32Size);
++#endif
++ if (!pvKernLinAddr)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ *pvLinAddr = pvKernLinAddr;
++
++ psPhysAddr->uiAddr = virt_to_phys(pvKernLinAddr);
++
++ return PVRSRV_OK;
++#endif
++}
++
++
++PVRSRV_ERROR OSBaseFreeContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR pvLinAddr, IMG_CPU_PHYADDR psPhysAddr)
++{
++#if !defined(NO_HARDWARE)
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++ PVR_UNREFERENCED_PARAMETER(pvLinAddr);
++ PVR_UNREFERENCED_PARAMETER(psPhysAddr.uiAddr);
++
++ PVR_DPF((PVR_DBG_WARNING, "%s: Not available", __FUNCTION__));
++#else
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++ PVR_UNREFERENCED_PARAMETER(psPhysAddr.uiAddr);
++
++ KFreeWrapper(pvLinAddr);
++#endif
++ return PVRSRV_OK;
++}
++
++IMG_UINT32 OSReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset)
++{
++#if !defined(NO_HARDWARE)
++ return (IMG_UINT32) readl((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
++#else
++ return *(IMG_UINT32 *)((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
++#endif
++}
++
++IMG_VOID OSWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
++{
++#if !defined(NO_HARDWARE)
++ writel(ui32Value, (IMG_PBYTE)pvLinRegBaseAddr+ui32Offset);
++#else
++ *(IMG_UINT32 *)((IMG_PBYTE)pvLinRegBaseAddr+ui32Offset) = ui32Value;
++#endif
++}
++
++#if defined(CONFIG_PCI) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14))
++
++PVRSRV_PCI_DEV_HANDLE OSPCISetDev(IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags)
++{
++ int err;
++ IMG_UINT32 i;
++ PVR_PCI_DEV *psPVRPCI;
++
++ PVR_TRACE(("OSPCISetDev"));
++
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psPVRPCI), (IMG_VOID **)&psPVRPCI, IMG_NULL,
++ "PCI Device") != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCISetDev: Couldn't allocate PVR PCI structure"));
++ return IMG_NULL;
++ }
++
++ psPVRPCI->psPCIDev = (struct pci_dev *)pvPCICookie;
++ psPVRPCI->ePCIFlags = eFlags;
++
++ err = pci_enable_device(psPVRPCI->psPCIDev);
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCISetDev: Couldn't enable device (%d)", err));
++ return IMG_NULL;
++ }
++
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
++ {
++ pci_set_master(psPVRPCI->psPCIDev);
++ }
++
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI)
++ {
++#if defined(CONFIG_PCI_MSI)
++ if (psPVRPCI->psPCIDev->device == PSB_SYS_SGX_DEV_DEVICE_ID_1 ||
++ psPVRPCI->psPCIDev->device == PSB_SYS_SGX_DEV_DEVICE_ID_2) // Disable MSI for Menlow
++ psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI;
++ else if(!psPVRPCI->psPCIDev->msi_enabled)
++ {
++ err = pci_enable_msi(psPVRPCI->psPCIDev);
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "OSPCISetDev: Couldn't enable MSI (%d)", err));
++ psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI;
++ }
++ }
++#else
++ PVR_DPF((PVR_DBG_WARNING, "OSPCISetDev: MSI support not enabled in the kernel"));
++#endif
++ }
++
++
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++ {
++ psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
++ }
++
++ return (PVRSRV_PCI_DEV_HANDLE)psPVRPCI;
++}
++
++PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags)
++{
++ struct pci_dev *psPCIDev;
++
++ psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, NULL);
++ if (psPCIDev == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: Couldn't acquire device"));
++ return IMG_NULL;
++ }
++
++ return OSPCISetDev((IMG_VOID *)psPCIDev, eFlags);
++}
++
++PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++
++ *pui32IRQ = psPVRPCI->psPCIDev->irq;
++
++ return PVRSRV_OK;
++}
++
++enum HOST_PCI_ADDR_RANGE_FUNC
++{
++ HOST_PCI_ADDR_RANGE_FUNC_LEN,
++ HOST_PCI_ADDR_RANGE_FUNC_START,
++ HOST_PCI_ADDR_RANGE_FUNC_END,
++ HOST_PCI_ADDR_RANGE_FUNC_REQUEST,
++ HOST_PCI_ADDR_RANGE_FUNC_RELEASE
++};
++
++static IMG_UINT32 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc,
++ PVRSRV_PCI_DEV_HANDLE hPVRPCI,
++ IMG_UINT32 ui32Index)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++
++ if (ui32Index >= DEVICE_COUNT_RESOURCE)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Index out of range"));
++ return 0;
++
++ }
++
++ switch (eFunc)
++ {
++ case HOST_PCI_ADDR_RANGE_FUNC_LEN:
++ return pci_resource_len(psPVRPCI->psPCIDev, ui32Index);
++ case HOST_PCI_ADDR_RANGE_FUNC_START:
++ return pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
++ case HOST_PCI_ADDR_RANGE_FUNC_END:
++ return pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
++ case HOST_PCI_ADDR_RANGE_FUNC_REQUEST:
++ {
++ int err;
++
++ err = pci_request_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index, "PowerVR");
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err));
++ return 0;
++ }
++ psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE;
++ return 1;
++ }
++ case HOST_PCI_ADDR_RANGE_FUNC_RELEASE:
++ if (psPVRPCI->abPCIResourceInUse[ui32Index])
++ {
++ pci_release_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index);
++ psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE;
++ }
++ return 1;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Unknown function"));
++ break;
++ }
++
++ return 0;
++}
++
++IMG_UINT32 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, hPVRPCI, ui32Index);
++}
++
++IMG_UINT32 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, hPVRPCI, ui32Index);
++}
++
++IMG_UINT32 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, hPVRPCI, ui32Index);
++}
++
++PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI,
++ IMG_UINT32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, hPVRPCI, ui32Index) == 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
++{
++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, hPVRPCI, ui32Index) == 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++ int i;
++
++ PVR_TRACE(("OSPCIReleaseDev"));
++
++
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++ {
++ if (psPVRPCI->abPCIResourceInUse[i])
++ {
++ PVR_TRACE(("OSPCIReleaseDev: Releasing Address range %d", i));
++ pci_release_region(psPVRPCI->psPCIDev, i);
++ psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
++ }
++ }
++
++#if defined(CONFIG_PCI_MSI)
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI)
++ {
++ pci_disable_msi(psPVRPCI->psPCIDev);
++ }
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
++ {
++ pci_clear_master(psPVRPCI->psPCIDev);
++ }
++#endif
++ pci_disable_device(psPVRPCI->psPCIDev);
++
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psPVRPCI), (IMG_VOID *)psPVRPCI, IMG_NULL);
++
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++ int i;
++ int err;
++
++ PVR_TRACE(("OSPCISuspendDev"));
++
++
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++ {
++ if (psPVRPCI->abPCIResourceInUse[i])
++ {
++ pci_release_region(psPVRPCI->psPCIDev, i);
++ }
++ }
++
++ err = pci_save_state(psPVRPCI->psPCIDev);
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: pci_save_state_failed (%d)", err));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ pci_disable_device(psPVRPCI->psPCIDev);
++
++ err = pci_set_power_state(psPVRPCI->psPCIDev, PCI_D3hot);//pci_choose_state(psPVRPCI->psPCIDev, PMSG_SUSPEND));
++ switch(err)
++ {
++ case 0:
++ break;
++ case -EIO:
++ PVR_DPF((PVR_DBG_WARNING, "OSPCISuspendDev: device doesn't support PCI PM"));
++ break;
++ case -EINVAL:
++ PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: can't enter requested power state"));
++ break;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: pci_set_power_state failed (%d)", err));
++ break;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI)
++{
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++ int err;
++ int i;
++
++ PVR_TRACE(("OSPCIResumeDev"));
++
++ err = pci_set_power_state(psPVRPCI->psPCIDev, PCI_D0);//pci_choose_state(psPVRPCI->psPCIDev, PMSG_ON));
++ switch(err)
++ {
++ case 0:
++ break;
++ case -EIO:
++ PVR_DPF((PVR_DBG_WARNING, "OSPCIResumeDev: device doesn't support PCI PM"));
++ break;
++ case -EINVAL:
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: can't enter requested power state"));
++ return PVRSRV_ERROR_GENERIC;
++ default:
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_set_power_state failed (%d)", err));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ err = pci_restore_state(psPVRPCI->psPCIDev);
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_restore_state failed (%d)", err));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ err = pci_enable_device(psPVRPCI->psPCIDev);
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: Couldn't enable device (%d)", err));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
++ pci_set_master(psPVRPCI->psPCIDev);
++
++
++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++ {
++ if (psPVRPCI->abPCIResourceInUse[i])
++ {
++ err = pci_request_region(psPVRPCI->psPCIDev, i, "PowerVR");
++ if (err != 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)", i, err));
++ }
++ }
++
++ }
++
++ return PVRSRV_OK;
++}
++
++#endif
++
++#define OS_MAX_TIMERS 8
++
++typedef struct TIMER_CALLBACK_DATA_TAG
++{
++ IMG_BOOL bInUse;
++ PFN_TIMER_FUNC pfnTimerFunc;
++ IMG_VOID *pvData;
++ struct timer_list sTimer;
++ IMG_UINT32 ui32Delay;
++ IMG_BOOL bActive;
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ struct work_struct sWork;
++#endif
++}TIMER_CALLBACK_DATA;
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++static struct workqueue_struct *psTimerWorkQueue;
++#endif
++
++static TIMER_CALLBACK_DATA sTimers[OS_MAX_TIMERS];
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++DEFINE_MUTEX(sTimerStructLock);
++#else
++static spinlock_t sTimerStructLock = SPIN_LOCK_UNLOCKED;
++#endif
++
++static void OSTimerCallbackBody(TIMER_CALLBACK_DATA *psTimerCBData)
++{
++ if (!psTimerCBData->bActive)
++ return;
++
++
++ psTimerCBData->pfnTimerFunc(psTimerCBData->pvData);
++
++
++ mod_timer(&psTimerCBData->sTimer, psTimerCBData->ui32Delay + jiffies);
++}
++
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++static void OSTimerWorkQueueCallBack(struct work_struct *psWork)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = container_of(psWork, TIMER_CALLBACK_DATA, sWork);
++
++ OSTimerCallbackBody(psTimerCBData);
++}
++#endif
++
++static IMG_VOID OSTimerCallbackWrapper(IMG_UINT32 ui32Data)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = (TIMER_CALLBACK_DATA*)ui32Data;
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ int res;
++
++ res = queue_work(psTimerWorkQueue, &psTimerCBData->sWork);
++ if (res == 0)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "OSTimerCallbackWrapper: work already queued"));
++ }
++#else
++ OSTimerCallbackBody(psTimerCBData);
++#endif
++}
++
++
++IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID *pvData, IMG_UINT32 ui32MsTimeout)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData;
++ IMG_UINT32 ui32i;
++#if !defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ unsigned long ulLockFlags;
++#endif
++
++
++ if(!pfnTimerFunc)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback"));
++ return IMG_NULL;
++ }
++
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ mutex_lock(&sTimerStructLock);
++#else
++ spin_lock_irqsave(&sTimerStructLock, ulLockFlags);
++#endif
++ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
++ {
++ psTimerCBData = &sTimers[ui32i];
++ if (!psTimerCBData->bInUse)
++ {
++ psTimerCBData->bInUse = IMG_TRUE;
++ break;
++ }
++ }
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ mutex_unlock(&sTimerStructLock);
++#else
++ spin_unlock_irqrestore(&sTimerStructLock, ulLockFlags);
++#endif
++ if (ui32i >= OS_MAX_TIMERS)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: all timers are in use"));
++ return IMG_NULL;
++ }
++
++ psTimerCBData->pfnTimerFunc = pfnTimerFunc;
++ psTimerCBData->pvData = pvData;
++ psTimerCBData->bActive = IMG_FALSE;
++
++
++
++
++ psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000)
++ ? 1
++ : ((HZ * ui32MsTimeout) / 1000);
++
++ init_timer(&psTimerCBData->sTimer);
++
++
++ psTimerCBData->sTimer.function = (IMG_VOID *)OSTimerCallbackWrapper;
++ psTimerCBData->sTimer.data = (IMG_UINT32)psTimerCBData;
++ psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
++
++ return (IMG_HANDLE)(ui32i + 1);
++}
++
++
++static inline TIMER_CALLBACK_DATA *GetTimerStructure(IMG_HANDLE hTimer)
++{
++ IMG_UINT32 ui32i = ((IMG_UINT32)hTimer) - 1;
++
++ PVR_ASSERT(ui32i < OS_MAX_TIMERS);
++
++ return &sTimers[ui32i];
++}
++
++PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
++
++ PVR_ASSERT(psTimerCBData->bInUse);
++ PVR_ASSERT(!psTimerCBData->bActive);
++
++
++ psTimerCBData->bInUse = IMG_FALSE;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
++
++ PVR_ASSERT(psTimerCBData->bInUse);
++ PVR_ASSERT(!psTimerCBData->bActive);
++
++
++ psTimerCBData->bActive = IMG_TRUE;
++
++
++ psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
++
++
++ add_timer(&psTimerCBData->sTimer);
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer)
++{
++ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
++
++ PVR_ASSERT(psTimerCBData->bInUse);
++ PVR_ASSERT(psTimerCBData->bActive);
++
++
++ psTimerCBData->bActive = IMG_FALSE;
++ smp_mb();
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ flush_workqueue(psTimerWorkQueue);
++#endif
++
++
++ del_timer_sync(&psTimerCBData->sTimer);
++
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++
++ flush_workqueue(psTimerWorkQueue);
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, PVRSRV_EVENTOBJECT *psEventObject)
++{
++
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(psEventObject)
++ {
++ if(pszName)
++ {
++
++ strncpy(psEventObject->szName, pszName, EVENTOBJNAME_MAXLENGTH);
++ }
++ else
++ {
++
++ static IMG_UINT16 ui16NameIndex = 0;
++ snprintf(psEventObject->szName, EVENTOBJNAME_MAXLENGTH, "PVRSRV_EVENTOBJECT_%d", ui16NameIndex++);
++ }
++
++ if(LinuxEventObjectListCreate(&psEventObject->hOSEventKM) != PVRSRV_OK)
++ {
++ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: psEventObject is not a valid pointer"));
++ eError = PVRSRV_ERROR_GENERIC;
++ }
++
++ return eError;
++
++}
++
++
++PVRSRV_ERROR OSEventObjectDestroy(PVRSRV_EVENTOBJECT *psEventObject)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(psEventObject)
++ {
++ if(psEventObject->hOSEventKM)
++ {
++ LinuxEventObjectListDestroy(psEventObject->hOSEventKM);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: hOSEventKM is not a valid pointer"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: psEventObject is not a valid pointer"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM)
++{
++ PVRSRV_ERROR eError;
++
++ if(hOSEventKM)
++ {
++ eError = LinuxEventObjectWait(hOSEventKM, EVENT_OBJECT_TIMEOUT_MS);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWait: hOSEventKM is not a valid handle"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR OSEventObjectOpen(PVRSRV_EVENTOBJECT *psEventObject,
++ IMG_HANDLE *phOSEvent)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(psEventObject)
++ {
++ if(LinuxEventObjectAdd(psEventObject->hOSEventKM, phOSEvent) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: psEventObject is not a valid pointer"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR OSEventObjectClose(PVRSRV_EVENTOBJECT *psEventObject,
++ IMG_HANDLE hOSEventKM)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ if(psEventObject)
++ {
++ if(LinuxEventObjectDelete(psEventObject->hOSEventKM, hOSEventKM) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectDelete: failed"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: psEventObject is not a valid pointer"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++
++}
++
++PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hOSEventKM)
++{
++ PVRSRV_ERROR eError;
++
++ if(hOSEventKM)
++ {
++ eError = LinuxEventObjectSignal(hOSEventKM);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectSignal: hOSEventKM is not a valid handle"));
++ eError = PVRSRV_ERROR_INVALID_PARAMS;
++ }
++
++ return eError;
++}
++
++IMG_BOOL OSProcHasPrivSrvInit(IMG_VOID)
++{
++ return (capable(CAP_SYS_MODULE) != 0) ? IMG_TRUE : IMG_FALSE;
++}
++
++PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess,
++ IMG_VOID *pvDest,
++ IMG_VOID *pvSrc,
++ IMG_UINT32 ui32Bytes)
++{
++ PVR_UNREFERENCED_PARAMETER(pvProcess);
++
++ if(copy_to_user(pvDest, pvSrc, ui32Bytes)==0)
++ return PVRSRV_OK;
++ else
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR OSCopyFromUser( IMG_PVOID pvProcess,
++ IMG_VOID *pvDest,
++ IMG_VOID *pvSrc,
++ IMG_UINT32 ui32Bytes)
++{
++ PVR_UNREFERENCED_PARAMETER(pvProcess);
++
++ if(copy_from_user(pvDest, pvSrc, ui32Bytes)==0)
++ return PVRSRV_OK;
++ else
++ return PVRSRV_ERROR_GENERIC;
++}
++
++IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, IMG_VOID *pvUserPtr, IMG_UINT32 ui32Bytes)
++{
++ IMG_INT linuxType;
++
++ if (eVerification == PVR_VERIFY_READ)
++ {
++ linuxType = VERIFY_READ;
++ }
++ else
++ {
++ PVR_ASSERT(eVerification == PVR_VERIFY_WRITE);
++ linuxType = VERIFY_WRITE;
++ }
++
++ return access_ok(linuxType, pvUserPtr, ui32Bytes);
++}
++
++typedef enum _eWrapMemType_
++{
++ WRAP_TYPE_CLEANUP,
++ WRAP_TYPE_GET_USER_PAGES,
++ WRAP_TYPE_FIND_VMA_PAGES,
++ WRAP_TYPE_FIND_VMA_PFN
++} eWrapMemType;
++
++typedef struct _sWrapMemInfo_
++{
++ eWrapMemType eType;
++ IMG_INT iNumPages;
++ struct page **ppsPages;
++ IMG_SYS_PHYADDR *psPhysAddr;
++ IMG_INT iPageOffset;
++ IMG_INT iContiguous;
++#if defined(DEBUG)
++ IMG_UINT32 ulStartAddr;
++ IMG_UINT32 ulBeyondEndAddr;
++ struct vm_area_struct *psVMArea;
++#endif
++ IMG_BOOL bWrapWorkaround;
++} sWrapMemInfo;
++
++static IMG_VOID CheckPagesContiguous(sWrapMemInfo *psInfo)
++{
++ IMG_INT i;
++ IMG_UINT32 ui32AddrChk;
++
++ BUG_ON(psInfo == IMG_NULL);
++
++ psInfo->iContiguous = 1;
++
++ for (i = 0, ui32AddrChk = psInfo->psPhysAddr[0].uiAddr;
++ i < psInfo->iNumPages;
++ i++, ui32AddrChk += PAGE_SIZE)
++ {
++ if (psInfo->psPhysAddr[i].uiAddr != ui32AddrChk)
++ {
++ psInfo->iContiguous = 0;
++ break;
++ }
++ }
++}
++
++static struct page *CPUVAddrToPage(struct vm_area_struct *psVMArea, IMG_UINT32 ulCPUVAddr)
++{
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10))
++ pgd_t *psPGD;
++ pud_t *psPUD;
++ pmd_t *psPMD;
++ pte_t *psPTE;
++ struct mm_struct *psMM = psVMArea->vm_mm;
++ IMG_UINT32 ulPFN;
++ spinlock_t *psPTLock;
++ struct page *psPage;
++
++ psPGD = pgd_offset(psMM, ulCPUVAddr);
++ if (pgd_none(*psPGD) || pgd_bad(*psPGD))
++ return NULL;
++
++ psPUD = pud_offset(psPGD, ulCPUVAddr);
++ if (pud_none(*psPUD) || pud_bad(*psPUD))
++ return NULL;
++
++ psPMD = pmd_offset(psPUD, ulCPUVAddr);
++ if (pmd_none(*psPMD) || pmd_bad(*psPMD))
++ return NULL;
++
++ psPage = NULL;
++
++ psPTE = (pte_t *)pte_offset_map_lock(psMM, psPMD, ulCPUVAddr, &psPTLock);
++ if ((pte_none(*psPTE) != 0) || (pte_present(*psPTE) == 0) || (pte_write(*psPTE) == 0))
++ goto exit_unlock;
++
++ ulPFN = pte_pfn(*psPTE);
++ if (!pfn_valid(ulPFN))
++ goto exit_unlock;
++
++ psPage = pfn_to_page(ulPFN);
++
++ get_page(psPage);
++
++exit_unlock:
++ pte_unmap_unlock(psPTE, psPTLock);
++
++ return psPage;
++#else
++ return NULL;
++#endif
++}
++PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem)
++{
++ sWrapMemInfo *psInfo = (sWrapMemInfo *)hOSWrapMem;
++ IMG_INT i;
++
++ BUG_ON(psInfo == IMG_NULL);
++
++ switch (psInfo->eType)
++ {
++ case WRAP_TYPE_CLEANUP:
++ break;
++ case WRAP_TYPE_FIND_VMA_PFN:
++ break;
++ case WRAP_TYPE_GET_USER_PAGES:
++ {
++ for (i = 0; i < psInfo->iNumPages; i++)
++ {
++ struct page *psPage = psInfo->ppsPages[i];
++
++
++ if (!PageReserved(psPage));
++ {
++ SetPageDirty(psPage);
++ }
++ page_cache_release(psPage);
++ }
++ break;
++ }
++ case WRAP_TYPE_FIND_VMA_PAGES:
++ {
++ for (i = 0; i < psInfo->iNumPages; i++)
++ {
++ if(psInfo->bWrapWorkaround)
++ put_page(psInfo->ppsPages[i]);
++ else
++ put_page_testzero(psInfo->ppsPages[i]);
++ }
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSReleasePhysPageAddr: Unknown wrap type (%d)", psInfo->eType));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ }
++
++ if (psInfo->ppsPages != IMG_NULL)
++ {
++ kfree(psInfo->ppsPages);
++ }
++
++ if (psInfo->psPhysAddr != IMG_NULL)
++ {
++ kfree(psInfo->psPhysAddr);
++ }
++
++ kfree(psInfo);
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID* pvCPUVAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ IMG_HANDLE *phOSWrapMem,
++ IMG_BOOL bWrapWorkaround)
++{
++ IMG_UINT32 ulStartAddrOrig = (IMG_UINT32) pvCPUVAddr;
++ IMG_UINT32 ulAddrRangeOrig = (IMG_UINT32) ui32Bytes;
++ IMG_UINT32 ulBeyondEndAddrOrig = ulStartAddrOrig + ulAddrRangeOrig;
++ IMG_UINT32 ulStartAddr;
++ IMG_UINT32 ulAddrRange;
++ IMG_UINT32 ulBeyondEndAddr;
++ IMG_UINT32 ulAddr;
++ IMG_INT iNumPagesMapped;
++ IMG_INT i;
++ struct vm_area_struct *psVMArea;
++ sWrapMemInfo *psInfo;
++
++
++ ulStartAddr = ulStartAddrOrig & PAGE_MASK;
++ ulBeyondEndAddr = PAGE_ALIGN(ulBeyondEndAddrOrig);
++ ulAddrRange = ulBeyondEndAddr - ulStartAddr;
++
++
++ psInfo = kmalloc(sizeof(*psInfo), GFP_KERNEL);
++ if (psInfo == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Couldn't allocate information structure"));
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ memset(psInfo, 0, sizeof(*psInfo));
++ psInfo->bWrapWorkaround = bWrapWorkaround;
++
++#if defined(DEBUG)
++ psInfo->ulStartAddr = ulStartAddrOrig;
++ psInfo->ulBeyondEndAddr = ulBeyondEndAddrOrig;
++#endif
++
++ psInfo->iNumPages = (IMG_INT)(ulAddrRange >> PAGE_SHIFT);
++ psInfo->iPageOffset = (IMG_INT)(ulStartAddrOrig & ~PAGE_MASK);
++
++
++ psInfo->psPhysAddr = kmalloc((size_t)psInfo->iNumPages * sizeof(*psInfo->psPhysAddr), GFP_KERNEL);
++ if (psInfo->psPhysAddr == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Couldn't allocate page array"));
++ goto error_free;
++ }
++
++
++ psInfo->ppsPages = kmalloc((size_t)psInfo->iNumPages * sizeof(*psInfo->ppsPages), GFP_KERNEL);
++ if (psInfo->ppsPages == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Couldn't allocate page array"));
++ goto error_free;
++ }
++
++
++ down_read(&current->mm->mmap_sem);
++ iNumPagesMapped = get_user_pages(current, current->mm, ulStartAddr, psInfo->iNumPages, 1, 0, psInfo->ppsPages, NULL);
++ up_read(&current->mm->mmap_sem);
++
++ if (iNumPagesMapped >= 0)
++ {
++
++ if (iNumPagesMapped != psInfo->iNumPages)
++ {
++ PVR_TRACE(("OSAcquirePhysPageAddr: Couldn't map all the pages needed (wanted: %d, got %d)", psInfo->iNumPages, iNumPagesMapped));
++
++
++ for (i = 0; i < iNumPagesMapped; i++)
++ {
++ page_cache_release(psInfo->ppsPages[i]);
++
++ }
++ goto error_free;
++ }
++
++
++ for (i = 0; i < psInfo->iNumPages; i++)
++ {
++ IMG_CPU_PHYADDR CPUPhysAddr;
++
++ CPUPhysAddr.uiAddr = page_to_pfn(psInfo->ppsPages[i]) << PAGE_SHIFT;
++ psInfo->psPhysAddr[i] = SysCpuPAddrToSysPAddr(CPUPhysAddr);
++ psSysPAddr[i] = psInfo->psPhysAddr[i];
++
++ }
++
++ psInfo->eType = WRAP_TYPE_GET_USER_PAGES;
++
++ goto exit_check;
++ }
++
++ PVR_DPF((PVR_DBG_MESSAGE, "OSAcquirePhysPageAddr: get_user_pages failed (%d), trying something else", iNumPagesMapped));
++
++
++ down_read(&current->mm->mmap_sem);
++
++ psVMArea = find_vma(current->mm, ulStartAddrOrig);
++ if (psVMArea == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Couldn't find memory region containing start address %lx", ulStartAddrOrig));
++
++ goto error_release_mmap_sem;
++ }
++#if defined(DEBUG)
++ psInfo->psVMArea = psVMArea;
++#endif
++
++
++ if (ulStartAddrOrig < psVMArea->vm_start)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Start address %lx is outside of the region returned by find_vma", ulStartAddrOrig));
++ goto error_release_mmap_sem;
++ }
++
++
++ if (ulBeyondEndAddrOrig > psVMArea->vm_end)
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: End address %lx is outside of the region returned by find_vma", ulBeyondEndAddrOrig));
++ goto error_release_mmap_sem;
++ }
++
++
++ if ((psVMArea->vm_flags & (VM_IO | VM_RESERVED)) != (VM_IO | VM_RESERVED))
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: Memory region does not represent memory mapped I/O (VMA flags: 0x%lx)", psVMArea->vm_flags));
++ goto error_release_mmap_sem;
++ }
++
++
++ if ((psVMArea->vm_flags & (VM_READ | VM_WRITE)) != (VM_READ | VM_WRITE))
++ {
++ PVR_DPF((PVR_DBG_ERROR,
++ "OSAcquirePhysPageAddr: No read/write access to memory region (VMA flags: 0x%lx)", psVMArea->vm_flags));
++ goto error_release_mmap_sem;
++ }
++
++
++ for (ulAddr = ulStartAddrOrig, i = 0; ulAddr < ulBeyondEndAddrOrig; ulAddr += PAGE_SIZE, i++)
++ {
++ struct page *psPage;
++
++ BUG_ON(i >= psInfo->iNumPages);
++
++ psPage = CPUVAddrToPage(psVMArea, ulAddr);
++ if (psPage == NULL)
++ {
++ IMG_INT j;
++
++ PVR_TRACE(("OSAcquirePhysPageAddr: Couldn't lookup page structure for address 0x%lx, trying something else", ulAddr));
++
++
++ for (j = 0; j < i; j++)
++ {
++ if(psInfo->bWrapWorkaround)
++ put_page(psInfo->ppsPages[j]);
++ else
++ put_page_testzero(psInfo->ppsPages[j]);
++ }
++ break;
++ }
++
++ psInfo->ppsPages[i] = psPage;
++ }
++
++ BUG_ON(i > psInfo->iNumPages);
++ if (i == psInfo->iNumPages)
++ {
++
++ for (i = 0; i < psInfo->iNumPages; i++)
++ {
++ struct page *psPage = psInfo->ppsPages[i];
++ IMG_CPU_PHYADDR CPUPhysAddr;
++
++
++ CPUPhysAddr.uiAddr = page_to_pfn(psPage) << PAGE_SHIFT;
++
++ psInfo->psPhysAddr[i] = SysCpuPAddrToSysPAddr(CPUPhysAddr);
++ psSysPAddr[i] = psInfo->psPhysAddr[i];
++ }
++
++ psInfo->eType = WRAP_TYPE_FIND_VMA_PAGES;
++ }
++ else
++ {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)) && defined(PVR_SECURE_HANDLES)
++
++
++
++ if ((psVMArea->vm_flags & VM_PFNMAP) == 0)
++ {
++ PVR_DPF((PVR_DBG_WARNING,
++ "OSAcquirePhysPageAddr: Region isn't a raw PFN mapping. Giving up."));
++ goto error_release_mmap_sem;
++ }
++
++ for (ulAddr = ulStartAddrOrig, i = 0; ulAddr < ulBeyondEndAddrOrig; ulAddr += PAGE_SIZE, i++)
++ {
++ IMG_CPU_PHYADDR CPUPhysAddr;
++
++ CPUPhysAddr.uiAddr = ((ulAddr - psVMArea->vm_start) + (psVMArea->vm_pgoff << PAGE_SHIFT)) & PAGE_MASK;
++
++ psInfo->psPhysAddr[i] = SysCpuPAddrToSysPAddr(CPUPhysAddr);
++ psSysPAddr[i] = psInfo->psPhysAddr[i];
++ }
++ BUG_ON(i != psInfo->iNumPages);
++
++ psInfo->eType = WRAP_TYPE_FIND_VMA_PFN;
++
++
++ PVR_DPF((PVR_DBG_WARNING,
++ "OSAcquirePhysPageAddr: Region can't be locked down"));
++#else
++ PVR_DPF((PVR_DBG_WARNING,
++ "OSAcquirePhysPageAddr: Raw PFN mappings not supported. Giving up."));
++ goto error_release_mmap_sem;
++#endif
++ }
++
++ up_read(&current->mm->mmap_sem);
++
++exit_check:
++ CheckPagesContiguous(psInfo);
++
++
++
++ *phOSWrapMem = (IMG_HANDLE)psInfo;
++
++ return PVRSRV_OK;
++
++error_release_mmap_sem:
++ up_read(&current->mm->mmap_sem);
++error_free:
++ psInfo->eType = WRAP_TYPE_CLEANUP;
++ OSReleasePhysPageAddr((IMG_HANDLE)psInfo);
++ return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR PVROSFuncInit(IMG_VOID)
++{
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ {
++ IMG_UINT32 ui32i;
++
++ psTimerWorkQueue = create_workqueue("pvr_timer");
++ if (psTimerWorkQueue == NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: couldn't create timer workqueue", __FUNCTION__));
++ return PVRSRV_ERROR_GENERIC;
++
++ }
++
++ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
++ {
++ TIMER_CALLBACK_DATA *psTimerCBData = &sTimers[ui32i];
++
++ INIT_WORK(&psTimerCBData->sWork, OSTimerWorkQueueCallBack);
++ }
++ }
++#endif
++ return PVRSRV_OK;
++}
++
++IMG_VOID PVROSFuncDeInit(IMG_VOID)
++{
++#if defined(PVR_LINUX_TIMERS_USING_WORKQUEUES)
++ if (psTimerWorkQueue != NULL)
++ {
++ destroy_workqueue(psTimerWorkQueue);
++ }
++#endif
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/osperproc.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/osperproc.c
+new file mode 100644
+index 0000000..011c8f3
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/osperproc.c
+@@ -0,0 +1,113 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "osperproc.h"
++
++#include "env_perproc.h"
++#include "proc.h"
++
++extern IMG_UINT32 gui32ReleasePID;
++
++PVRSRV_ERROR OSPerProcessPrivateDataInit(IMG_HANDLE *phOsPrivateData)
++{
++ PVRSRV_ERROR eError;
++ IMG_HANDLE hBlockAlloc;
++ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++
++ eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_ENV_PER_PROCESS_DATA),
++ phOsPrivateData,
++ &hBlockAlloc,
++ "Environment per Process Data");
++
++ if (eError != PVRSRV_OK)
++ {
++ *phOsPrivateData = IMG_NULL;
++
++ PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed (%d)", __FUNCTION__, eError));
++ return eError;
++ }
++
++ psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)*phOsPrivateData;
++ OSMemSet(psEnvPerProc, 0, sizeof(*psEnvPerProc));
++
++ psEnvPerProc->hBlockAlloc = hBlockAlloc;
++
++
++ LinuxMMapPerProcessConnect(psEnvPerProc);
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++
++ INIT_LIST_HEAD(&psEnvPerProc->sDRMAuthListHead);
++#endif
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPerProcessPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
++{
++ PVRSRV_ERROR eError;
++ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++
++ if (hOsPrivateData == IMG_NULL)
++ {
++ return PVRSRV_OK;
++ }
++
++ psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)hOsPrivateData;
++
++
++ LinuxMMapPerProcessDisconnect(psEnvPerProc);
++
++
++ RemovePerProcessProcDir(psEnvPerProc);
++
++ eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++ sizeof(PVRSRV_ENV_PER_PROCESS_DATA),
++ hOsPrivateData,
++ psEnvPerProc->hBlockAlloc);
++
++
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: OSFreeMem failed (%d)", __FUNCTION__, eError));
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
++{
++ return LinuxMMapPerProcessHandleOptions(psHandleBase);
++}
++
++IMG_HANDLE LinuxTerminatingProcessPrivateData(IMG_VOID)
++{
++ if(!gui32ReleasePID)
++ return NULL;
++ return PVRSRVPerProcessPrivateData(gui32ReleasePID);
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pdump.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pdump.c
+new file mode 100644
+index 0000000..11d69d1
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pdump.c
+@@ -0,0 +1,662 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined (SUPPORT_SGX)
++#if defined (PDUMP)
++
++#include <asm/atomic.h>
++#include <stdarg.h>
++#include "sgxdefs.h"
++#include "services_headers.h"
++
++#include "pvrversion.h"
++#include "pvr_debug.h"
++
++#include "dbgdrvif.h"
++#include "sgxmmu.h"
++#include "mm.h"
++#include "pdump_km.h"
++
++#include <linux/tty.h>
++
++static IMG_BOOL PDumpWriteString2 (IMG_CHAR * pszString, IMG_UINT32 ui32Flags);
++static IMG_BOOL PDumpWriteILock (PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32Count, IMG_UINT32 ui32Flags);
++static IMG_VOID DbgSetFrame (PDBG_STREAM psStream, IMG_UINT32 ui32Frame);
++static IMG_UINT32 DbgGetFrame (PDBG_STREAM psStream);
++static IMG_VOID DbgSetMarker (PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
++static IMG_UINT32 DbgWrite (PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags);
++
++#define PDUMP_DATAMASTER_PIXEL (1)
++#define PDUMP_DATAMASTER_EDM (3)
++
++#define MIN(a,b) (a > b ? b : a)
++
++#define MAX_FILE_SIZE 0x40000000
++
++static atomic_t gsPDumpSuspended = ATOMIC_INIT(0);
++
++static PDBGKM_SERVICE_TABLE gpfnDbgDrv = IMG_NULL;
++
++
++
++IMG_CHAR *pszStreamName[PDUMP_NUM_STREAMS] = { "ParamStream2",
++ "ScriptStream2",
++ "DriverInfoStream"};
++typedef struct PDBG_PDUMP_STATE_TAG
++{
++ PDBG_STREAM psStream[PDUMP_NUM_STREAMS];
++ IMG_UINT32 ui32ParamFileNum;
++
++ IMG_CHAR *pszMsg;
++ IMG_CHAR *pszScript;
++ IMG_CHAR *pszFile;
++
++} PDBG_PDUMP_STATE;
++
++static PDBG_PDUMP_STATE gsDBGPdumpState = {{IMG_NULL}, 0, IMG_NULL, IMG_NULL, IMG_NULL};
++
++#define SZ_MSG_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++#define SZ_SCRIPT_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++#define SZ_FILENAME_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++
++
++
++
++IMG_VOID DBGDrvGetServiceTable(IMG_VOID **fn_table);
++
++static inline IMG_BOOL PDumpSuspended(IMG_VOID)
++{
++ return atomic_read(&gsPDumpSuspended) != 0;
++}
++
++PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript,
++ IMG_UINT32 *pui32MaxLen)
++{
++ *phScript = (IMG_HANDLE)gsDBGPdumpState.pszScript;
++ *pui32MaxLen = SZ_SCRIPT_SIZE_MAX;
++ if ((!*phScript) || PDumpSuspended())
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpOSGetMessageString(IMG_HANDLE *phMsg,
++ IMG_UINT32 *pui32MaxLen)
++{
++ *phMsg = (IMG_HANDLE)gsDBGPdumpState.pszMsg;
++ *pui32MaxLen = SZ_MSG_SIZE_MAX;
++ if ((!*phMsg) || PDumpSuspended())
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile,
++ IMG_UINT32 *pui32MaxLen)
++{
++ *ppszFile = gsDBGPdumpState.pszFile;
++ *pui32MaxLen = SZ_FILENAME_SIZE_MAX;
++ if ((!*ppszFile) || PDumpSuspended())
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++ return PVRSRV_OK;
++}
++
++IMG_BOOL PDumpOSWriteString2(IMG_HANDLE hScript, IMG_UINT32 ui32Flags)
++{
++ return PDumpWriteString2(hScript, ui32Flags);
++}
++
++PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...)
++{
++ IMG_CHAR* pszBuf = hBuf;
++ IMG_UINT32 n;
++ va_list vaArgs;
++
++ va_start(vaArgs, pszFormat);
++
++ n = vsnprintf(pszBuf, ui32ScriptSizeMax, pszFormat, vaArgs);
++
++ va_end(vaArgs);
++
++ if (n>=ui32ScriptSizeMax || n==-1)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
++
++ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, PDUMP_va_list vaArgs)
++{
++ IMG_UINT32 n;
++
++ n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs);
++
++ if (n>=ui32ScriptSizeMax || n==-1)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
++
++ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_VOID PDumpOSDebugPrintf(IMG_CHAR* pszFormat, ...)
++{
++
++}
++
++PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...)
++{
++ IMG_UINT32 n;
++ va_list vaArgs;
++
++ va_start(vaArgs, pszFormat);
++
++ n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs);
++
++ va_end(vaArgs);
++
++ if (n>=ui32ScriptSizeMax || n==-1)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
++
++ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
++ }
++
++ return PVRSRV_OK;
++}
++
++IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax)
++{
++ IMG_CHAR* pszBuf = hBuffer;
++ IMG_UINT32 ui32Count = 0;
++
++ while ((pszBuf[ui32Count]!=0) && (ui32Count<ui32BufferSizeMax) )
++ {
++ ui32Count++;
++ }
++ return(ui32Count);
++}
++
++IMG_VOID PDumpOSVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax)
++{
++ IMG_UINT32 ui32Count = 0;
++ IMG_CHAR* pszBuf = hBuffer;
++
++
++ ui32Count = PDumpOSBuflen(hBuffer, ui32BufferSizeMax);
++
++
++ if ((ui32Count >= 1) && (pszBuf[ui32Count-1] != '\n') && (ui32Count<ui32BufferSizeMax))
++ {
++ pszBuf[ui32Count] = '\n';
++ ui32Count++;
++ pszBuf[ui32Count] = '\0';
++ }
++ if ((ui32Count >= 2) && (pszBuf[ui32Count-2] != '\r') && (ui32Count<ui32BufferSizeMax))
++ {
++ pszBuf[ui32Count-1] = '\r';
++ pszBuf[ui32Count] = '\n';
++ ui32Count++;
++ pszBuf[ui32Count] = '\0';
++ }
++}
++
++IMG_HANDLE PDumpOSGetStream(IMG_UINT32 ePDumpStream)
++{
++ return (IMG_HANDLE)gsDBGPdumpState.psStream[ePDumpStream];
++}
++
++IMG_UINT32 PDumpOSGetStreamOffset(IMG_UINT32 ePDumpStream)
++{
++ PDBG_STREAM psStream = gsDBGPdumpState.psStream[ePDumpStream];
++ return gpfnDbgDrv->pfnGetStreamOffset(psStream);
++}
++
++IMG_UINT32 PDumpOSGetParamFileNum(IMG_VOID)
++{
++ return gsDBGPdumpState.ui32ParamFileNum;
++}
++
++IMG_BOOL PDumpOSWriteString(IMG_HANDLE hStream,
++ IMG_UINT8 *psui8Data,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32Flags)
++{
++ PDBG_STREAM psStream = (PDBG_STREAM)hStream;
++ return PDumpWriteILock(psStream,
++ psui8Data,
++ ui32Size,
++ ui32Flags);
++}
++
++IMG_VOID PDumpOSCheckForSplitting(IMG_HANDLE hStream, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags)
++{
++
++ PVR_UNREFERENCED_PARAMETER(hStream);
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++ PVR_UNREFERENCED_PARAMETER(ui32Size);
++}
++
++IMG_BOOL PDumpOSJTInitialised(IMG_VOID)
++{
++ if(gpfnDbgDrv)
++ {
++ return IMG_TRUE;
++ }
++ return IMG_FALSE;
++}
++
++inline IMG_BOOL PDumpOSIsSuspended(IMG_VOID)
++{
++ return atomic_read(&gsPDumpSuspended) != 0;
++}
++
++IMG_VOID PDumpOSCPUVAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT8 *pui8LinAddr,
++ IMG_UINT32 ui32PageSize,
++ IMG_DEV_PHYADDR *psDevPAddr)
++{
++ if(hOSMemHandle)
++ {
++
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(pui8LinAddr);
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset);
++ PVR_ASSERT((sCpuPAddr.uiAddr & (ui32PageSize - 1)) == 0);
++
++
++ *psDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++ }
++ else
++ {
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(ui32Offset);
++
++ sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr);
++ *psDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++ }
++}
++
++IMG_VOID PDumpOSCPUVAddrToPhysPages(IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32Offset,
++ IMG_PUINT8 pui8LinAddr,
++ IMG_UINT32 *pui32PageOffset)
++{
++ if(hOSMemHandle)
++ {
++
++ IMG_CPU_PHYADDR sCpuPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(pui8LinAddr);
++
++ sCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset);
++ *pui32PageOffset = sCpuPAddr.uiAddr & (HOST_PAGESIZE() -1);
++ }
++ else
++ {
++ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++ PVR_UNREFERENCED_PARAMETER(ui32Offset);
++
++ *pui32PageOffset = (IMG_UINT32)pui8LinAddr & (HOST_PAGESIZE() - 1);
++ }
++}
++
++
++
++IMG_VOID PDumpInit(IMG_VOID)
++{
++ IMG_UINT32 i;
++
++
++ if (!gpfnDbgDrv)
++ {
++ DBGDrvGetServiceTable((IMG_VOID **)&gpfnDbgDrv);
++
++
++
++
++ if (gpfnDbgDrv == IMG_NULL)
++ {
++ return;
++ }
++
++ if(!gsDBGPdumpState.pszFile)
++ {
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszFile, 0,
++ "Filename string") != PVRSRV_OK)
++ {
++ goto init_failed;
++ }
++ }
++
++ if(!gsDBGPdumpState.pszMsg)
++ {
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszMsg, 0,
++ "Message string") != PVRSRV_OK)
++ {
++ goto init_failed;
++ }
++ }
++
++ if(!gsDBGPdumpState.pszScript)
++ {
++ if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszScript, 0,
++ "Script string") != PVRSRV_OK)
++ {
++ goto init_failed;
++ }
++ }
++
++ for(i=0; i < PDUMP_NUM_STREAMS; i++)
++ {
++ gsDBGPdumpState.psStream[i] = gpfnDbgDrv->pfnCreateStream(pszStreamName[i],
++ DEBUG_CAPMODE_FRAMED,
++ DEBUG_OUTMODE_STREAMENABLE,
++ 0,
++ 10);
++
++ gpfnDbgDrv->pfnSetCaptureMode(gsDBGPdumpState.psStream[i],DEBUG_CAPMODE_FRAMED,0xFFFFFFFF, 0xFFFFFFFF, 1);
++ gpfnDbgDrv->pfnSetFrame(gsDBGPdumpState.psStream[i],0);
++ }
++
++ PDUMPCOMMENT("Driver Product Name: %s", VS_PRODUCT_NAME);
++ PDUMPCOMMENT("Driver Product Version: %s (%s)", PVRVERSION_STRING, PVRVERSION_FILE);
++ PDUMPCOMMENT("Start of Init Phase");
++ }
++
++ return;
++
++init_failed:
++
++ if(gsDBGPdumpState.pszFile)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszFile, 0);
++ gsDBGPdumpState.pszFile = IMG_NULL;
++ }
++
++ if(gsDBGPdumpState.pszScript)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszScript, 0);
++ gsDBGPdumpState.pszScript = IMG_NULL;
++ }
++
++ if(gsDBGPdumpState.pszMsg)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszMsg, 0);
++ gsDBGPdumpState.pszMsg = IMG_NULL;
++ }
++
++ gpfnDbgDrv = IMG_NULL;
++}
++
++
++IMG_VOID PDumpDeInit(IMG_VOID)
++{
++ IMG_UINT32 i;
++
++ for(i=0; i < PDUMP_NUM_STREAMS; i++)
++ {
++ gpfnDbgDrv->pfnDestroyStream(gsDBGPdumpState.psStream[i]);
++ }
++
++ if(gsDBGPdumpState.pszFile)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszFile, 0);
++ gsDBGPdumpState.pszFile = IMG_NULL;
++ }
++
++ if(gsDBGPdumpState.pszScript)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszScript, 0);
++ gsDBGPdumpState.pszScript = IMG_NULL;
++ }
++
++ if(gsDBGPdumpState.pszMsg)
++ {
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszMsg, 0);
++ gsDBGPdumpState.pszMsg = IMG_NULL;
++ }
++
++ gpfnDbgDrv = IMG_NULL;
++}
++
++PVRSRV_ERROR PDumpStartInitPhaseKM(IMG_VOID)
++{
++ IMG_UINT32 i;
++
++ if (gpfnDbgDrv)
++ {
++ PDUMPCOMMENT("Start Init Phase");
++ for(i=0; i < PDUMP_NUM_STREAMS; i++)
++ {
++ gpfnDbgDrv->pfnStartInitPhase(gsDBGPdumpState.psStream[i]);
++ }
++ }
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpStopInitPhaseKM(IMG_VOID)
++{
++ IMG_UINT32 i;
++
++ if (gpfnDbgDrv)
++ {
++ PDUMPCOMMENT("Stop Init Phase");
++
++ for(i=0; i < PDUMP_NUM_STREAMS; i++)
++ {
++ gpfnDbgDrv->pfnStopInitPhase(gsDBGPdumpState.psStream[i]);
++ }
++ }
++ return PVRSRV_OK;
++}
++
++IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID)
++{
++ return gpfnDbgDrv->pfnIsLastCaptureFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]);
++}
++
++
++IMG_BOOL PDumpIsCaptureFrameKM(IMG_VOID)
++{
++ if (PDumpSuspended())
++ {
++ return IMG_FALSE;
++ }
++ return gpfnDbgDrv->pfnIsCaptureFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2], IMG_FALSE);
++}
++
++PVRSRV_ERROR PDumpSetFrameKM(IMG_UINT32 ui32Frame)
++{
++ IMG_UINT32 ui32Stream;
++
++ for (ui32Stream = 0; ui32Stream < PDUMP_NUM_STREAMS; ui32Stream++)
++ {
++ if (gsDBGPdumpState.psStream[ui32Stream])
++ {
++ DbgSetFrame(gsDBGPdumpState.psStream[ui32Stream], ui32Frame);
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpGetFrameKM(IMG_PUINT32 pui32Frame)
++{
++ *pui32Frame = DbgGetFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]);
++
++ return PVRSRV_OK;
++}
++
++
++
++static IMG_BOOL PDumpWriteString2(IMG_CHAR * pszString, IMG_UINT32 ui32Flags)
++{
++ return PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2], (IMG_UINT8 *) pszString, strlen(pszString), ui32Flags);
++}
++
++
++static IMG_BOOL PDumpWriteILock(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32Count, IMG_UINT32 ui32Flags)
++{
++ IMG_UINT32 ui32Written = 0;
++ IMG_UINT32 ui32Off = 0;
++
++ if ((psStream == IMG_NULL) || PDumpSuspended() || ((ui32Flags & PDUMP_FLAGS_NEVER) != 0))
++ {
++ return IMG_TRUE;
++ }
++
++
++
++
++ if (psStream == gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2])
++ {
++ IMG_UINT32 ui32ParamOutPos = gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2]);
++
++ if (ui32ParamOutPos + ui32Count > MAX_FILE_SIZE)
++ {
++ if ((gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2] && PDumpWriteString2("\r\n-- Splitting pdump output file\r\n\r\n", ui32Flags)))
++ {
++ DbgSetMarker(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2], ui32ParamOutPos);
++ gsDBGPdumpState.ui32ParamFileNum++;
++ }
++ }
++ }
++
++
++ while (((IMG_UINT32) ui32Count > 0) && (ui32Written != 0xFFFFFFFF))
++ {
++ ui32Written = DbgWrite(psStream, &pui8Data[ui32Off], ui32Count, ui32Flags);
++
++
++
++
++ if (ui32Written == 0)
++ {
++ OSReleaseThreadQuanta();
++ }
++
++ if (ui32Written != 0xFFFFFFFF)
++ {
++ ui32Off += ui32Written;
++ ui32Count -= ui32Written;
++ }
++ }
++
++ if (ui32Written == 0xFFFFFFFF)
++ {
++ return IMG_FALSE;
++ }
++
++ return IMG_TRUE;
++}
++
++static IMG_VOID DbgSetFrame(PDBG_STREAM psStream, IMG_UINT32 ui32Frame)
++{
++ gpfnDbgDrv->pfnSetFrame(psStream, ui32Frame);
++}
++
++
++static IMG_UINT32 DbgGetFrame(PDBG_STREAM psStream)
++{
++ return gpfnDbgDrv->pfnGetFrame(psStream);
++}
++
++static IMG_VOID DbgSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
++{
++ gpfnDbgDrv->pfnSetMarker(psStream, ui32Marker);
++}
++
++static IMG_UINT32 DbgWrite(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags)
++{
++ IMG_UINT32 ui32BytesWritten;
++
++ if ((ui32Flags & PDUMP_FLAGS_CONTINUOUS) != 0)
++ {
++
++
++ if (((psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) != 0) &&
++ (psStream->ui32Start == 0xFFFFFFFFUL) &&
++ (psStream->ui32End == 0xFFFFFFFFUL) &&
++ psStream->bInitPhaseComplete)
++ {
++ ui32BytesWritten = ui32BCount;
++ }
++ else
++ {
++ ui32BytesWritten = gpfnDbgDrv->pfnDBGDrivWrite2(psStream, pui8Data, ui32BCount, 1);
++ }
++ }
++ else
++ {
++ if (ui32Flags & PDUMP_FLAGS_LASTFRAME)
++ {
++ IMG_UINT32 ui32DbgFlags;
++
++ ui32DbgFlags = 0;
++ if (ui32Flags & PDUMP_FLAGS_RESETLFBUFFER)
++ {
++ ui32DbgFlags |= WRITELF_FLAGS_RESETBUF;
++ }
++
++ ui32BytesWritten = gpfnDbgDrv->pfnWriteLF(psStream, pui8Data, ui32BCount, 1, ui32DbgFlags);
++ }
++ else
++ {
++ ui32BytesWritten = gpfnDbgDrv->pfnWriteBINCM(psStream, pui8Data, ui32BCount, 1);
++ }
++ }
++
++ return ui32BytesWritten;
++}
++
++
++IMG_VOID PDumpSuspendKM(IMG_VOID)
++{
++ atomic_inc(&gsPDumpSuspended);
++}
++
++IMG_VOID PDumpResumeKM(IMG_VOID)
++{
++ atomic_dec(&gsPDumpSuspended);
++}
++
++#endif
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/private_data.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/private_data.h
+new file mode 100644
+index 0000000..0751765
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/private_data.h
+@@ -0,0 +1,67 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __INCLUDED_PRIVATE_DATA_H_
++#define __INCLUDED_PRIVATE_DATA_H_
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++#include <linux/list.h>
++#include <drm/drmP.h>
++#endif
++
++typedef struct
++{
++
++ IMG_UINT32 ui32OpenPID;
++
++#if defined(PVR_SECURE_FD_EXPORT)
++
++ IMG_HANDLE hKernelMemInfo;
++#endif
++
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++
++ struct list_head sDRMAuthListItem;
++
++ struct drm_file *psDRMFile;
++#endif
++
++#if defined(SUPPORT_MEMINFO_IDS)
++
++ IMG_UINT64 ui64Stamp;
++#endif
++
++
++ IMG_HANDLE hBlockAlloc;
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++ IMG_PVOID pPriv;
++#endif
++}
++PVRSRV_FILE_PRIVATE_DATA;
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/proc.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/proc.c
+new file mode 100644
+index 0000000..1ba2466
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/proc.c
+@@ -0,0 +1,970 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++
++#include "services_headers.h"
++
++#include "queue.h"
++#include "resman.h"
++#include "pvrmmap.h"
++#include "pvr_debug.h"
++#include "pvrversion.h"
++#include "proc.h"
++#include "perproc.h"
++#include "env_perproc.h"
++#include "linkage.h"
++
++#include "lists.h"
++DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE);
++
++
++static struct proc_dir_entry * dir;
++
++#ifndef PVR_PROC_USE_SEQ_FILE
++static off_t procDumpSysNodes(IMG_CHAR *buf, size_t size, off_t off);
++static off_t procDumpVersion(IMG_CHAR *buf, size_t size, off_t off);
++#endif
++
++
++static const IMG_CHAR PVRProcDirRoot[] = "pvr";
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++#define PVR_PROC_SEQ_START_TOKEN (void*)1
++static IMG_INT pvr_proc_open(struct inode *inode,struct file *file);
++static void *pvr_proc_seq_start (struct seq_file *m, loff_t *pos);
++static void pvr_proc_seq_stop (struct seq_file *m, void *v);
++static void *pvr_proc_seq_next (struct seq_file *m, void *v, loff_t *pos);
++static int pvr_proc_seq_show (struct seq_file *m, void *v);
++static ssize_t pvr_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos);
++
++static struct file_operations pvr_proc_operations =
++{
++ .open = pvr_proc_open,
++ .read = seq_read,
++ .write = pvr_proc_write,
++ .llseek = seq_lseek,
++ .release = seq_release,
++};
++
++static struct seq_operations pvr_proc_seq_operations =
++{
++ .start = pvr_proc_seq_start,
++ .next = pvr_proc_seq_next,
++ .stop = pvr_proc_seq_stop,
++ .show = pvr_proc_seq_show,
++};
++
++static struct proc_dir_entry* g_pProcQueue;
++static struct proc_dir_entry* g_pProcVersion;
++static struct proc_dir_entry* g_pProcSysNodes;
++
++#ifdef DEBUG
++static struct proc_dir_entry* g_pProcDebugLevel;
++#endif
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++static struct proc_dir_entry* g_pProcPowerLevel;
++#endif
++
++
++static void ProcSeqShowVersion(struct seq_file *sfile,void* el);
++
++static void ProcSeqShowSysNodes(struct seq_file *sfile,void* el);
++static void* ProcSeqOff2ElementSysNodes(struct seq_file * sfile, loff_t off);
++
++#endif
++
++off_t printAppend(IMG_CHAR * buffer, size_t size, off_t off, const IMG_CHAR * format, ...)
++{
++ IMG_INT n;
++ size_t space = size - (size_t)off;
++ va_list ap;
++
++ PVR_ASSERT(space >= 0);
++
++ va_start (ap, format);
++
++ n = vsnprintf (buffer+off, space, format, ap);
++
++ va_end (ap);
++
++ if (n >= (IMG_INT)space || n < 0)
++ {
++
++ buffer[size - 1] = 0;
++ return (off_t)(size - 1);
++ }
++ else
++ {
++ return (off + (off_t)n);
++ }
++}
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++void* ProcSeq1ElementOff2Element(struct seq_file *sfile, loff_t off)
++{
++
++ if(!off)
++ return (void*)2;
++ return NULL;
++}
++
++
++void* ProcSeq1ElementHeaderOff2Element(struct seq_file *sfile, loff_t off)
++{
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++
++ if(off == 1)
++ return (void*)2;
++
++ return NULL;
++}
++
++
++static IMG_INT pvr_proc_open(struct inode *inode,struct file *file)
++{
++ IMG_INT ret = seq_open(file, &pvr_proc_seq_operations);
++
++ struct seq_file *seq = (struct seq_file*)file->private_data;
++ struct proc_dir_entry* pvr_proc_entry = PDE(inode);
++
++
++ seq->private = pvr_proc_entry->data;
++ return ret;
++}
++
++static ssize_t pvr_proc_write(struct file *file, const char __user *buffer,
++ size_t count, loff_t *ppos)
++{
++ struct inode *inode = file->f_path.dentry->d_inode;
++ struct proc_dir_entry * dp;
++
++ dp = PDE(inode);
++
++ if (!dp->write_proc)
++ return -EIO;
++
++ return dp->write_proc(file, buffer, count, dp->data);
++}
++
++
++static void *pvr_proc_seq_start (struct seq_file *proc_seq_file, loff_t *pos)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private;
++ if(handlers->startstop != NULL)
++ handlers->startstop(proc_seq_file, IMG_TRUE);
++ return handlers->off2element(proc_seq_file, *pos);
++}
++
++static void pvr_proc_seq_stop (struct seq_file *proc_seq_file, void *v)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private;
++ if(handlers->startstop != NULL)
++ handlers->startstop(proc_seq_file, IMG_FALSE);
++}
++
++static void *pvr_proc_seq_next (struct seq_file *proc_seq_file, void *v, loff_t *pos)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private;
++ (*pos)++;
++ if( handlers->next != NULL)
++ return handlers->next( proc_seq_file, v, *pos );
++ return handlers->off2element(proc_seq_file, *pos);
++}
++
++static int pvr_proc_seq_show (struct seq_file *proc_seq_file, void *v)
++{
++ PVR_PROC_SEQ_HANDLERS *handlers = (PVR_PROC_SEQ_HANDLERS*)proc_seq_file->private;
++ handlers->show( proc_seq_file,v );
++ return 0;
++}
++
++
++
++static struct proc_dir_entry* CreateProcEntryInDirSeq(
++ struct proc_dir_entry *pdir,
++ const IMG_CHAR * name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ )
++{
++
++ struct proc_dir_entry * file;
++ mode_t mode;
++
++ if (!dir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDirSeq: cannot make proc entry /proc/%s/%s: no parent", PVRProcDirRoot, name));
++ return NULL;
++ }
++
++ mode = S_IFREG;
++
++ if (show_handler)
++ {
++ mode |= S_IRUGO;
++ }
++
++ if (whandler)
++ {
++ mode |= S_IWUSR;
++ }
++
++ file=create_proc_entry(name, mode, pdir);
++
++ if (file)
++ {
++ PVR_PROC_SEQ_HANDLERS *seq_handlers;
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
++ file->owner = THIS_MODULE;
++#endif
++
++ file->proc_fops = &pvr_proc_operations;
++ file->write_proc = whandler;
++
++
++ file->data = kmalloc(sizeof(PVR_PROC_SEQ_HANDLERS), GFP_KERNEL);
++ if(file->data)
++ {
++ seq_handlers = (PVR_PROC_SEQ_HANDLERS*)file->data;
++ seq_handlers->next = next_handler;
++ seq_handlers->show = show_handler;
++ seq_handlers->off2element = off2element_handler;
++ seq_handlers->startstop = startstop_handler;
++ seq_handlers->data = data;
++
++ return file;
++ }
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDirSeq: cannot make proc entry /proc/%s/%s: no memory", PVRProcDirRoot, name));
++ return 0;
++}
++
++
++struct proc_dir_entry* CreateProcReadEntrySeq (
++ const IMG_CHAR * name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler
++ )
++{
++ return CreateProcEntrySeq(name,
++ data,
++ next_handler,
++ show_handler,
++ off2element_handler,
++ startstop_handler,
++ NULL);
++}
++
++struct proc_dir_entry* CreateProcEntrySeq (
++ const IMG_CHAR * name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ )
++{
++ return CreateProcEntryInDirSeq(
++ dir,
++ name,
++ data,
++ next_handler,
++ show_handler,
++ off2element_handler,
++ startstop_handler,
++ NULL
++ );
++}
++
++
++
++struct proc_dir_entry* CreatePerProcessProcEntrySeq (
++ const IMG_CHAR * name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ )
++{
++ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++ IMG_UINT32 ui32PID;
++
++ if (!dir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntrySeq: /proc/%s doesn't exist", PVRProcDirRoot));
++ return NULL;
++ }
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ psPerProc = PVRSRVPerProcessPrivateData(ui32PID);
++ if (!psPerProc)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntrySeq: no per process data"));
++
++ return NULL;
++ }
++
++ if (!psPerProc->psProcDir)
++ {
++ IMG_CHAR dirname[16];
++ IMG_INT ret;
++
++ ret = snprintf(dirname, sizeof(dirname), "%lu", ui32PID);
++
++ if (ret <=0 || ret >= (IMG_INT)sizeof(dirname))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't generate per process proc directory name \"%u\"", ui32PID));
++ return NULL;
++ }
++ else
++ {
++ psPerProc->psProcDir = proc_mkdir(dirname, dir);
++ if (!psPerProc->psProcDir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't create per process proc directory /proc/%s/%u",
++ PVRProcDirRoot, ui32PID));
++ return NULL;
++ }
++ }
++ }
++
++ return CreateProcEntryInDirSeq(psPerProc->psProcDir, name, data, next_handler,
++ show_handler,off2element_handler,startstop_handler,whandler);
++}
++
++
++IMG_VOID RemoveProcEntrySeq( struct proc_dir_entry* proc_entry )
++{
++ if (dir)
++ {
++ void* data = proc_entry->data ;
++ PVR_DPF((PVR_DBG_MESSAGE, "Removing /proc/%s/%s", PVRProcDirRoot, proc_entry->name));
++
++ remove_proc_entry(proc_entry->name, dir);
++ if( data)
++ kfree( data );
++
++ }
++}
++
++IMG_VOID RemovePerProcessProcEntrySeq(struct proc_dir_entry* proc_entry)
++{
++ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++
++ psPerProc = LinuxTerminatingProcessPrivateData();
++ if (!psPerProc)
++ {
++ psPerProc = PVRSRVFindPerProcessPrivateData();
++ if (!psPerProc)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: can't "
++ "remove %s, no per process data", proc_entry->name));
++ return;
++ }
++ }
++
++ if (psPerProc->psProcDir)
++ {
++ void* data = proc_entry->data ;
++ PVR_DPF((PVR_DBG_MESSAGE, "Removing proc entry %s from %s", proc_entry->name, psPerProc->psProcDir->name));
++
++ remove_proc_entry(proc_entry->name, psPerProc->psProcDir);
++ if(data)
++ kfree( data );
++ }
++}
++
++#endif
++
++static IMG_INT pvr_read_proc(IMG_CHAR *page, IMG_CHAR **start, off_t off,
++ IMG_INT count, IMG_INT *eof, IMG_VOID *data)
++{
++ pvr_read_proc_t *pprn = (pvr_read_proc_t *)data;
++
++ off_t len = pprn (page, (size_t)count, off);
++
++ if (len == END_OF_FILE)
++ {
++ len = 0;
++ *eof = 1;
++ }
++ else if (!len)
++ {
++ *start = (IMG_CHAR *) 0;
++ }
++ else
++ {
++ *start = (IMG_CHAR *) 1;
++ }
++
++ return len;
++}
++
++
++static IMG_INT CreateProcEntryInDir(struct proc_dir_entry *pdir, const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data)
++{
++ struct proc_dir_entry * file;
++ mode_t mode;
++
++ if (!pdir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntryInDir: parent directory doesn't exist"));
++
++ return -ENOMEM;
++ }
++
++ mode = S_IFREG;
++
++ if (rhandler)
++ {
++ mode |= S_IRUGO;
++ }
++
++ if (whandler)
++ {
++ mode |= S_IWUSR;
++ }
++
++ file = create_proc_entry(name, mode, pdir);
++
++ if (file)
++ {
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
++ file->owner = THIS_MODULE;
++#endif
++ file->read_proc = rhandler;
++ file->write_proc = whandler;
++ file->data = data;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "Created proc entry %s in %s", name, pdir->name));
++
++ return 0;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntry: cannot create proc entry %s in %s", name, pdir->name));
++
++ return -ENOMEM;
++}
++
++
++IMG_INT CreateProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data)
++{
++ return CreateProcEntryInDir(dir, name, rhandler, whandler, data);
++}
++
++
++IMG_INT CreatePerProcessProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data)
++{
++ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++ IMG_UINT32 ui32PID;
++
++ if (!dir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: /proc/%s doesn't exist", PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ psPerProc = PVRSRVPerProcessPrivateData(ui32PID);
++ if (!psPerProc)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: no per process data"));
++
++ return -ENOMEM;
++ }
++
++ if (!psPerProc->psProcDir)
++ {
++ IMG_CHAR dirname[16];
++ IMG_INT ret;
++
++ ret = snprintf(dirname, sizeof(dirname), "%lu", ui32PID);
++
++ if (ret <=0 || ret >= (IMG_INT)sizeof(dirname))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't generate per process proc directory name \"%u\"", ui32PID));
++
++ return -ENOMEM;
++ }
++ else
++ {
++ psPerProc->psProcDir = proc_mkdir(dirname, dir);
++ if (!psPerProc->psProcDir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: couldn't create per process proc directory /proc/%s/%u", PVRProcDirRoot, ui32PID));
++
++ return -ENOMEM;
++ }
++ }
++ }
++
++ return CreateProcEntryInDir(psPerProc->psProcDir, name, rhandler, whandler, data);
++}
++
++
++IMG_INT CreateProcReadEntry(const IMG_CHAR * name, pvr_read_proc_t handler)
++{
++ struct proc_dir_entry * file;
++
++ if (!dir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcReadEntry: cannot make proc entry /proc/%s/%s: no parent", PVRProcDirRoot, name));
++
++ return -ENOMEM;
++ }
++
++ file = create_proc_read_entry (name, S_IFREG | S_IRUGO, dir, pvr_read_proc, (IMG_VOID *)handler);
++
++ if (file)
++ {
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
++ file->owner = THIS_MODULE;
++#endif
++ return 0;
++ }
++
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcReadEntry: cannot make proc entry /proc/%s/%s: no memory", PVRProcDirRoot, name));
++
++ return -ENOMEM;
++}
++
++
++IMG_INT CreateProcEntries(IMG_VOID)
++{
++ dir = proc_mkdir (PVRProcDirRoot, NULL);
++
++ if (!dir)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: cannot make /proc/%s directory", PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_pProcQueue = CreateProcReadEntrySeq("queue", NULL, NULL, ProcSeqShowQueue, ProcSeqOff2ElementQueue, NULL);
++ g_pProcVersion = CreateProcReadEntrySeq("version", NULL, NULL, ProcSeqShowVersion, ProcSeq1ElementHeaderOff2Element, NULL);
++ g_pProcSysNodes = CreateProcReadEntrySeq("nodes", NULL, NULL, ProcSeqShowSysNodes, ProcSeqOff2ElementSysNodes, NULL);
++
++ if(!g_pProcQueue || !g_pProcVersion || !g_pProcSysNodes)
++#else
++ if (CreateProcReadEntry("queue", QueuePrintQueues) ||
++ CreateProcReadEntry("version", procDumpVersion) ||
++ CreateProcReadEntry("nodes", procDumpSysNodes))
++#endif
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s files", PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++
++
++#ifdef DEBUG
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_pProcDebugLevel = CreateProcEntrySeq("debug_level", NULL, NULL,
++ ProcSeqShowDebugLevel,
++ ProcSeq1ElementOff2Element, NULL,
++ PVRDebugProcSetLevel);
++ if(!g_pProcDebugLevel)
++#else
++ if (CreateProcEntry ("debug_level", PVRDebugProcGetLevel, PVRDebugProcSetLevel, 0))
++#endif
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s/debug_level", PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_pProcPowerLevel = CreateProcEntrySeq("power_control", NULL, NULL,
++ ProcSeqShowPowerLevel,
++ ProcSeq1ElementOff2Element, NULL,
++ PVRProcSetPowerLevel);
++ if(!g_pProcPowerLevel)
++#else
++ if (CreateProcEntry("power_control", PVRProcGetPowerLevel, PVRProcSetPowerLevel, 0))
++#endif
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/%s/power_control", PVRProcDirRoot));
++
++ return -ENOMEM;
++ }
++#endif
++#endif
++
++ return 0;
++}
++
++
++IMG_VOID RemoveProcEntry(const IMG_CHAR * name)
++{
++ if (dir)
++ {
++ remove_proc_entry(name, dir);
++ PVR_DPF((PVR_DBG_MESSAGE, "Removing /proc/%s/%s", PVRProcDirRoot, name));
++ }
++}
++
++
++IMG_VOID RemovePerProcessProcEntry(const IMG_CHAR *name)
++{
++ PVRSRV_ENV_PER_PROCESS_DATA *psPerProc;
++
++ psPerProc = LinuxTerminatingProcessPrivateData();
++ if (!psPerProc)
++ {
++ psPerProc = PVRSRVFindPerProcessPrivateData();
++ if (!psPerProc)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "CreatePerProcessProcEntries: can't "
++ "remove %s, no per process data", name));
++ return;
++ }
++ }
++
++ if (psPerProc->psProcDir)
++ {
++ remove_proc_entry(name, psPerProc->psProcDir);
++
++ PVR_DPF((PVR_DBG_MESSAGE, "Removing proc entry %s from %s", name, psPerProc->psProcDir->name));
++ }
++}
++
++
++IMG_VOID RemovePerProcessProcDir(PVRSRV_ENV_PER_PROCESS_DATA *psPerProc)
++{
++ if (psPerProc->psProcDir)
++ {
++ while (psPerProc->psProcDir->subdir)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "Belatedly removing /proc/%s/%s/%s", PVRProcDirRoot, psPerProc->psProcDir->name, psPerProc->psProcDir->subdir->name));
++
++ RemoveProcEntry(psPerProc->psProcDir->subdir->name);
++ }
++ RemoveProcEntry(psPerProc->psProcDir->name);
++ }
++}
++
++IMG_VOID RemoveProcEntries(IMG_VOID)
++{
++#ifdef DEBUG
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq( g_pProcDebugLevel );
++#else
++ RemoveProcEntry("debug_level");
++#endif
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq( g_pProcPowerLevel );
++#else
++ RemoveProcEntry("power_control");
++#endif
++#endif
++
++#endif
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq(g_pProcQueue);
++ RemoveProcEntrySeq(g_pProcVersion);
++ RemoveProcEntrySeq(g_pProcSysNodes);
++#else
++ RemoveProcEntry("queue");
++ RemoveProcEntry("version");
++ RemoveProcEntry("nodes");
++#endif
++
++ while (dir->subdir)
++ {
++ PVR_DPF((PVR_DBG_WARNING, "Belatedly removing /proc/%s/%s", PVRProcDirRoot, dir->subdir->name));
++
++ RemoveProcEntry(dir->subdir->name);
++ }
++
++ remove_proc_entry(PVRProcDirRoot, NULL);
++}
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void ProcSeqShowVersion(struct seq_file *sfile,void* el)
++{
++ SYS_DATA * psSysData;
++ IMG_CHAR *pszSystemVersionString = "None";
++
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf( sfile,
++ "Version %s (%s) %s\n",
++ PVRVERSION_STRING,
++ PVR_BUILD_TYPE, PVR_BUILD_DIR);
++ return;
++ }
++
++ SysAcquireData(&psSysData);
++
++ if(psSysData->pszVersionString)
++ {
++ pszSystemVersionString = psSysData->pszVersionString;
++ }
++
++ seq_printf( sfile, "System Version String: %s\n", pszSystemVersionString);
++}
++
++#else
++
++static off_t procDumpVersion(IMG_CHAR *buf, size_t size, off_t off)
++{
++ SYS_DATA *psSysData;
++
++ if (off == 0)
++ {
++ return printAppend(buf, size, 0,
++ "Version %s (%s) %s\n",
++ PVRVERSION_STRING,
++ PVR_BUILD_TYPE, PVR_BUILD_DIR);
++ }
++
++ SysAcquireData(&psSysData)
++
++ if (off == 1)
++ {
++ IMG_CHAR *pszSystemVersionString = "None";
++
++ if(psSysData->pszVersionString)
++ {
++ pszSystemVersionString = psSysData->pszVersionString;
++ }
++
++ if(strlen(pszSystemVersionString)
++ + strlen("System Version String: \n")
++ + 1 > size)
++ {
++ return 0;
++ }
++ return printAppend(buf, size, 0,
++ "System Version String: %s\n",
++ pszSystemVersionString);
++ }
++
++ return END_OF_FILE;
++}
++
++#endif
++
++
++static const IMG_CHAR *deviceTypeToString(PVRSRV_DEVICE_TYPE deviceType)
++{
++ switch (deviceType)
++ {
++ default:
++ {
++ static IMG_CHAR text[10];
++
++ sprintf(text, "?%x", (IMG_UINT)deviceType);
++
++ return text;
++ }
++ }
++}
++
++
++static const IMG_CHAR *deviceClassToString(PVRSRV_DEVICE_CLASS deviceClass)
++{
++ switch (deviceClass)
++ {
++ case PVRSRV_DEVICE_CLASS_3D:
++ {
++ return "3D";
++ }
++ case PVRSRV_DEVICE_CLASS_DISPLAY:
++ {
++ return "display";
++ }
++ case PVRSRV_DEVICE_CLASS_BUFFER:
++ {
++ return "buffer";
++ }
++ default:
++ {
++ static IMG_CHAR text[10];
++
++ sprintf(text, "?%x", (IMG_UINT)deviceClass);
++ return text;
++ }
++ }
++}
++
++IMG_VOID* DecOffPsDev_AnyVaCb(PVRSRV_DEVICE_NODE *psNode, va_list va)
++{
++ off_t *pOff = va_arg(va, off_t*);
++ if (--(*pOff))
++ {
++ return IMG_NULL;
++ }
++ else
++ {
++ return psNode;
++ }
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void ProcSeqShowSysNodes(struct seq_file *sfile,void* el)
++{
++ SYS_DATA * psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE*)el;
++
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf( sfile,
++ "Registered nodes\n"
++ "Addr Type Class Index Ref pvDev Size Res\n");
++ return;
++ }
++
++ SysAcquireData(&psSysData);
++
++ seq_printf( sfile,
++ "%p %-8s %-8s %4d %2lu %p %3lu %p\n",
++ psDevNode,
++ deviceTypeToString(psDevNode->sDevId.eDeviceType),
++ deviceClassToString(psDevNode->sDevId.eDeviceClass),
++ psDevNode->sDevId.eDeviceClass,
++ psDevNode->ui32RefCount,
++ psDevNode->pvDevice,
++ psDevNode->ui32pvDeviceSize,
++ psDevNode->hResManContext);
++
++}
++
++static void* ProcSeqOff2ElementSysNodes(struct seq_file * sfile, loff_t off)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode;
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ SysAcquireData(&psSysData);
++
++
++ psDevNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ DecOffPsDev_AnyVaCb,
++ &off);
++
++
++ return (void*)psDevNode;
++}
++
++#else
++
++static
++off_t procDumpSysNodes(IMG_CHAR *buf, size_t size, off_t off)
++{
++ SYS_DATA *psSysData;
++ PVRSRV_DEVICE_NODE *psDevNode;
++ off_t len;
++
++
++ if (size < 80)
++ {
++ return 0;
++ }
++
++ if (off == 0)
++ {
++ return printAppend(buf, size, 0,
++ "Registered nodes\n"
++ "Addr Type Class Index Ref pvDev Size Res\n");
++ }
++
++ SysAcquireData(&psSysData);
++
++
++ psDevNode = (PVRSRV_DEVICE_NODE*)
++ List_PVRSRV_DEVICE_NODE_Any_va(psSysData->psDeviceNodeList,
++ DecOffPsDev_AnyVaCb,
++ &off);
++
++ if (!psDevNode)
++ {
++ return END_OF_FILE;
++ }
++
++ len = printAppend(buf, size, 0,
++ "%p %-8s %-8s %4d %2lu %p %3lu %p\n",
++ psDevNode,
++ deviceTypeToString(psDevNode->sDevId.eDeviceType),
++ deviceClassToString(psDevNode->sDevId.eDeviceClass),
++ psDevNode->sDevId.eDeviceClass,
++ psDevNode->ui32RefCount,
++ psDevNode->pvDevice,
++ psDevNode->ui32pvDeviceSize,
++ psDevNode->hResManContext);
++ return (len);
++}
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/proc.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/proc.h
+new file mode 100644
+index 0000000..3200961
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/proc.h
+@@ -0,0 +1,115 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __SERVICES_PROC_H__
++#define __SERVICES_PROC_H__
++
++#include <asm/system.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++
++#define END_OF_FILE (off_t) -1
++
++typedef off_t (pvr_read_proc_t)(IMG_CHAR *, size_t, off_t);
++
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++#define PVR_PROC_SEQ_START_TOKEN (void*)1
++typedef void* (pvr_next_proc_seq_t)(struct seq_file *,void*,loff_t);
++typedef void* (pvr_off2element_proc_seq_t)(struct seq_file *, loff_t);
++typedef void (pvr_show_proc_seq_t)(struct seq_file *,void*);
++typedef void (pvr_startstop_proc_seq_t)(struct seq_file *, IMG_BOOL start);
++
++typedef struct _PVR_PROC_SEQ_HANDLERS_ {
++ pvr_next_proc_seq_t *next;
++ pvr_show_proc_seq_t *show;
++ pvr_off2element_proc_seq_t *off2element;
++ pvr_startstop_proc_seq_t *startstop;
++ IMG_VOID *data;
++} PVR_PROC_SEQ_HANDLERS;
++
++
++void* ProcSeq1ElementOff2Element(struct seq_file *sfile, loff_t off);
++
++void* ProcSeq1ElementHeaderOff2Element(struct seq_file *sfile, loff_t off);
++
++
++#endif
++
++off_t printAppend(IMG_CHAR * buffer, size_t size, off_t off, const IMG_CHAR * format, ...)
++ __attribute__((format(printf, 4, 5)));
++
++IMG_INT CreateProcEntries(IMG_VOID);
++
++IMG_INT CreateProcReadEntry (const IMG_CHAR * name, pvr_read_proc_t handler);
++
++IMG_INT CreateProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data);
++
++IMG_INT CreatePerProcessProcEntry(const IMG_CHAR * name, read_proc_t rhandler, write_proc_t whandler, IMG_VOID *data);
++
++IMG_VOID RemoveProcEntry(const IMG_CHAR * name);
++
++IMG_VOID RemovePerProcessProcEntry(const IMG_CHAR * name);
++
++IMG_VOID RemoveProcEntries(IMG_VOID);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++struct proc_dir_entry* CreateProcReadEntrySeq (
++ const IMG_CHAR* name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler
++ );
++
++struct proc_dir_entry* CreateProcEntrySeq (
++ const IMG_CHAR* name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ );
++
++struct proc_dir_entry* CreatePerProcessProcEntrySeq (
++ const IMG_CHAR* name,
++ IMG_VOID* data,
++ pvr_next_proc_seq_t next_handler,
++ pvr_show_proc_seq_t show_handler,
++ pvr_off2element_proc_seq_t off2element_handler,
++ pvr_startstop_proc_seq_t startstop_handler,
++ write_proc_t whandler
++ );
++
++
++IMG_VOID RemoveProcEntrySeq(struct proc_dir_entry* proc_entry);
++IMG_VOID RemovePerProcessProcEntrySeq(struct proc_dir_entry* proc_entry);
++
++#endif
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_bridge_k.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_bridge_k.c
+new file mode 100644
+index 0000000..e4e4946
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_bridge_k.c
+@@ -0,0 +1,651 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "img_defs.h"
++#include "services.h"
++#include "pvr_bridge.h"
++#include "perproc.h"
++#include "mutex.h"
++#include "syscommon.h"
++#include "pvr_debug.h"
++#include "proc.h"
++#include "private_data.h"
++#include "linkage.h"
++#include "pvr_bridge_km.h"
++
++#if defined(SUPPORT_DRI_DRM)
++#include <drm/drmP.h>
++#include "pvr_drm.h"
++#if defined(PVR_SECURE_DRM_AUTH_EXPORT)
++#include "env_perproc.h"
++#endif
++#endif
++
++#if defined(SUPPORT_VGX)
++#include "vgx_bridge.h"
++#endif
++
++#if defined(SUPPORT_SGX)
++#include "sgx_bridge.h"
++#endif
++
++#include "bridged_pvr_bridge.h"
++
++#ifdef MODULE_TEST
++#include "pvr_test_bridge.h"
++#include "kern_test.h"
++#endif
++
++
++#if defined(SUPPORT_DRI_DRM)
++#define PRIVATE_DATA(pFile) ((pFile)->driver_priv)
++#else
++#define PRIVATE_DATA(pFile) ((pFile)->private_data)
++#endif
++
++#if defined(DEBUG_BRIDGE_KM)
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++static struct proc_dir_entry *g_ProcBridgeStats =0;
++static void* ProcSeqNextBridgeStats(struct seq_file *sfile,void* el,loff_t off);
++static void ProcSeqShowBridgeStats(struct seq_file *sfile,void* el);
++static void* ProcSeqOff2ElementBridgeStats(struct seq_file * sfile, loff_t off);
++static void ProcSeqStartstopBridgeStats(struct seq_file *sfile,IMG_BOOL start);
++
++#else
++static off_t printLinuxBridgeStats(IMG_CHAR * buffer, size_t size, off_t off);
++#endif
++
++#endif
++
++extern PVRSRV_LINUX_MUTEX gPVRSRVLock;
++
++#if defined(SUPPORT_MEMINFO_IDS)
++static IMG_UINT64 ui64Stamp;
++#endif
++
++PVRSRV_ERROR
++LinuxBridgeInit(IMG_VOID)
++{
++#if defined(DEBUG_BRIDGE_KM)
++ {
++ IMG_INT iStatus;
++#ifdef PVR_PROC_USE_SEQ_FILE
++ g_ProcBridgeStats = CreateProcReadEntrySeq(
++ "bridge_stats",
++ NULL,
++ ProcSeqNextBridgeStats,
++ ProcSeqShowBridgeStats,
++ ProcSeqOff2ElementBridgeStats,
++ ProcSeqStartstopBridgeStats
++ );
++ iStatus = !g_ProcBridgeStats ? -1 : 0;
++#else
++ iStatus = CreateProcReadEntry("bridge_stats", printLinuxBridgeStats);
++#endif
++
++ if(iStatus!=0)
++ {
++ return PVRSRV_ERROR_OUT_OF_MEMORY;
++ }
++ }
++#endif
++ return CommonBridgeInit();
++}
++
++IMG_VOID
++LinuxBridgeDeInit(IMG_VOID)
++{
++#if defined(DEBUG_BRIDGE_KM)
++#ifdef PVR_PROC_USE_SEQ_FILE
++ RemoveProcEntrySeq(g_ProcBridgeStats);
++#else
++ RemoveProcEntry("bridge_stats");
++#endif
++#endif
++}
++
++#if defined(DEBUG_BRIDGE_KM)
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++
++static void ProcSeqStartstopBridgeStats(struct seq_file *sfile,IMG_BOOL start)
++{
++ if(start)
++ {
++ LinuxLockMutex(&gPVRSRVLock);
++ }
++ else
++ {
++ LinuxUnLockMutex(&gPVRSRVLock);
++ }
++}
++
++
++static void* ProcSeqOff2ElementBridgeStats(struct seq_file *sfile, loff_t off)
++{
++ if(!off)
++ {
++ return PVR_PROC_SEQ_START_TOKEN;
++ }
++
++ if(off > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
++ {
++ return (void*)0;
++ }
++
++
++ return (void*)&g_BridgeDispatchTable[off-1];
++}
++
++static void* ProcSeqNextBridgeStats(struct seq_file *sfile,void* el,loff_t off)
++{
++ return ProcSeqOff2ElementBridgeStats(sfile,off);
++}
++
++
++static void ProcSeqShowBridgeStats(struct seq_file *sfile,void* el)
++{
++ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry = ( PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY*)el;
++
++ if(el == PVR_PROC_SEQ_START_TOKEN)
++ {
++ seq_printf(sfile,
++ "Total ioctl call count = %lu\n"
++ "Total number of bytes copied via copy_from_user = %lu\n"
++ "Total number of bytes copied via copy_to_user = %lu\n"
++ "Total number of bytes copied via copy_*_user = %lu\n\n"
++ "%-45s | %-40s | %10s | %20s | %10s\n",
++ g_BridgeGlobalStats.ui32IOCTLCount,
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ "Bridge Name",
++ "Wrapper Function",
++ "Call Count",
++ "copy_from_user Bytes",
++ "copy_to_user Bytes"
++ );
++ return;
++ }
++
++ seq_printf(sfile,
++ "%-45s %-40s %-10lu %-20lu %-10lu\n",
++ psEntry->pszIOCName,
++ psEntry->pszFunctionName,
++ psEntry->ui32CallCount,
++ psEntry->ui32CopyFromUserTotalBytes,
++ psEntry->ui32CopyToUserTotalBytes);
++}
++
++#else
++
++static off_t
++printLinuxBridgeStats(IMG_CHAR * buffer, size_t count, off_t off)
++{
++ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry;
++ off_t Ret;
++
++ LinuxLockMutex(&gPVRSRVLock);
++
++ if(!off)
++ {
++ if(count < 500)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++ Ret = printAppend(buffer, count, 0,
++ "Total ioctl call count = %lu\n"
++ "Total number of bytes copied via copy_from_user = %lu\n"
++ "Total number of bytes copied via copy_to_user = %lu\n"
++ "Total number of bytes copied via copy_*_user = %lu\n\n"
++ "%-45s | %-40s | %10s | %20s | %10s\n",
++ g_BridgeGlobalStats.ui32IOCTLCount,
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++ "Bridge Name",
++ "Wrapper Function",
++ "Call Count",
++ "copy_from_user Bytes",
++ "copy_to_user Bytes"
++ );
++ goto unlock_and_return;
++ }
++
++ if(off > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
++ {
++ Ret = END_OF_FILE;
++ goto unlock_and_return;
++ }
++
++ if(count < 300)
++ {
++ Ret = 0;
++ goto unlock_and_return;
++ }
++
++ psEntry = &g_BridgeDispatchTable[off-1];
++ Ret = printAppend(buffer, count, 0,
++ "%-45s %-40s %-10lu %-20lu %-10lu\n",
++ psEntry->pszIOCName,
++ psEntry->pszFunctionName,
++ psEntry->ui32CallCount,
++ psEntry->ui32CopyFromUserTotalBytes,
++ psEntry->ui32CopyToUserTotalBytes);
++
++unlock_and_return:
++ LinuxUnLockMutex(&gPVRSRVLock);
++ return Ret;
++}
++#endif
++#endif
++
++
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT
++PVRSRV_BridgeDispatchKM(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++#else
++IMG_INT32
++PVRSRV_BridgeDispatchKM(struct file *pFile, IMG_UINT unref__ ioctlCmd, IMG_UINT32 arg)
++#endif
++{
++ IMG_UINT32 cmd;
++#if !defined(SUPPORT_DRI_DRM)
++ PVRSRV_BRIDGE_PACKAGE *psBridgePackageUM = (PVRSRV_BRIDGE_PACKAGE *)arg;
++ PVRSRV_BRIDGE_PACKAGE sBridgePackageKM;
++#endif
++ PVRSRV_BRIDGE_PACKAGE *psBridgePackageKM;
++ IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++ IMG_INT err = -EFAULT;
++
++ LinuxLockMutex(&gPVRSRVLock);
++
++#if defined(SUPPORT_DRI_DRM)
++ PVR_UNREFERENCED_PARAMETER(dev);
++
++ psBridgePackageKM = (PVRSRV_BRIDGE_PACKAGE *)arg;
++ PVR_ASSERT(psBridgePackageKM != IMG_NULL);
++#else
++ PVR_UNREFERENCED_PARAMETER(ioctlCmd);
++
++ psBridgePackageKM = &sBridgePackageKM;
++
++ if(!OSAccessOK(PVR_VERIFY_WRITE,
++ psBridgePackageUM,
++ sizeof(PVRSRV_BRIDGE_PACKAGE)))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Received invalid pointer to function arguments",
++ __FUNCTION__));
++
++ goto unlock_and_return;
++ }
++
++
++ if(OSCopyFromUser(IMG_NULL,
++ psBridgePackageKM,
++ psBridgePackageUM,
++ sizeof(PVRSRV_BRIDGE_PACKAGE))
++ != PVRSRV_OK)
++ {
++ goto unlock_and_return;
++ }
++#endif
++
++ cmd = psBridgePackageKM->ui32BridgeID;
++
++#if defined(MODULE_TEST)
++ switch (cmd)
++ {
++ case PVRSRV_BRIDGE_SERVICES_TEST_MEM1:
++ {
++ PVRSRV_ERROR eError = MemTest1();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++ case PVRSRV_BRIDGE_SERVICES_TEST_MEM2:
++ {
++ PVRSRV_ERROR eError = MemTest2();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_RESOURCE:
++ {
++ PVRSRV_ERROR eError = ResourceTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_EVENTOBJECT:
++ {
++ PVRSRV_ERROR eError = EventObjectTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_MEMMAPPING:
++ {
++ PVRSRV_ERROR eError = MemMappingTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_PROCESSID:
++ {
++ PVRSRV_ERROR eError = ProcessIDTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_CLOCKUSWAITUS:
++ {
++ PVRSRV_ERROR eError = ClockusWaitusTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_TIMER:
++ {
++ PVRSRV_ERROR eError = TimerTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_PRIVSRV:
++ {
++ PVRSRV_ERROR eError = PrivSrvTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++ case PVRSRV_BRIDGE_SERVICES_TEST_COPYDATA:
++ {
++ IMG_UINT32 ui32PID;
++ PVRSRV_PER_PROCESS_DATA *psPerProc;
++ PVRSRV_ERROR eError;
++
++ ui32PID = OSGetCurrentProcessIDKM();
++
++ PVRSRVTrace("PVRSRV_BRIDGE_SERVICES_TEST_COPYDATA %d", ui32PID);
++
++ psPerProc = PVRSRVPerProcessData(ui32PID);
++
++ eError = CopyDataTest(psBridgePackageKM->pvParamIn, psBridgePackageKM->pvParamOut, psPerProc);
++
++ *(PVRSRV_ERROR*)psBridgePackageKM->pvParamOut = eError;
++ err = 0;
++ goto unlock_and_return;
++ }
++
++
++ case PVRSRV_BRIDGE_SERVICES_TEST_POWERMGMT:
++ {
++ PVRSRV_ERROR eError = PowerMgmtTest();
++ if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN))
++ {
++ PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ;
++ pReturn->eError = eError;
++ }
++ }
++ err = 0;
++ goto unlock_and_return;
++
++ }
++#endif
++
++ if(cmd != PVRSRV_BRIDGE_CONNECT_SERVICES)
++ {
++ PVRSRV_ERROR eError;
++
++ eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
++ (IMG_PVOID *)&psPerProc,
++ psBridgePackageKM->hKernelServices,
++ PVRSRV_HANDLE_TYPE_PERPROC_DATA);
++ if(eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid kernel services handle (%d)",
++ __FUNCTION__, eError));
++ goto unlock_and_return;
++ }
++
++ if(psPerProc->ui32PID != ui32PID)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Process %d tried to access data "
++ "belonging to process %d", __FUNCTION__, ui32PID,
++ psPerProc->ui32PID));
++ goto unlock_and_return;
++ }
++ }
++ else
++ {
++
++ psPerProc = PVRSRVPerProcessData(ui32PID);
++ if(psPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BridgeDispatchKM: "
++ "Couldn't create per-process data area"));
++ goto unlock_and_return;
++ }
++ }
++
++ psBridgePackageKM->ui32BridgeID = PVRSRV_GET_BRIDGE_ID(psBridgePackageKM->ui32BridgeID);
++
++#if defined(PVR_SECURE_FD_EXPORT)
++ switch(cmd)
++ {
++ case PVRSRV_BRIDGE_EXPORT_DEVICEMEM:
++ {
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
++
++ if(psPrivateData->hKernelMemInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Can only export one MemInfo "
++ "per file descriptor", __FUNCTION__));
++ err = -EINVAL;
++ goto unlock_and_return;
++ }
++ break;
++ }
++
++ case PVRSRV_BRIDGE_MAP_DEV_MEMORY:
++ {
++ PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN =
++ (PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *)psBridgePackageKM->pvParamIn;
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
++
++ if(!psPrivateData->hKernelMemInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: File descriptor has no "
++ "associated MemInfo handle", __FUNCTION__));
++ err = -EINVAL;
++ goto unlock_and_return;
++ }
++
++ psMapDevMemIN->hKernelMemInfo = psPrivateData->hKernelMemInfo;
++ break;
++ }
++
++ default:
++ {
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
++
++ if(psPrivateData->hKernelMemInfo)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Import/Export handle tried "
++ "to use privileged service", __FUNCTION__));
++ goto unlock_and_return;
++ }
++ break;
++ }
++ }
++#endif
++#if defined(SUPPORT_DRI_DRM) && defined(PVR_SECURE_DRM_AUTH_EXPORT)
++ switch(cmd)
++ {
++ case PVRSRV_BRIDGE_MAP_DEV_MEMORY:
++ case PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY:
++ {
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData;
++ int authenticated = pFile->authenticated;
++ PVRSRV_ENV_PER_PROCESS_DATA *psEnvPerProc;
++
++ if (authenticated)
++ {
++ break;
++ }
++
++
++ psEnvPerProc = (PVRSRV_ENV_PER_PROCESS_DATA *)PVRSRVProcessPrivateData(psPerProc);
++ if (psEnvPerProc == IMG_NULL)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Process private data not allocated", __FUNCTION__));
++ err = -EFAULT;
++ goto unlock_and_return;
++ }
++
++ list_for_each_entry(psPrivateData, &psEnvPerProc->sDRMAuthListHead, sDRMAuthListItem)
++ {
++ struct drm_file *psDRMFile = psPrivateData->psDRMFile;
++
++ if (pFile->master == psDRMFile->master)
++ {
++ authenticated |= psDRMFile->authenticated;
++ if (authenticated)
++ {
++ break;
++ }
++ }
++ }
++
++ if (!authenticated)
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: Not authenticated for mapping device or device class memory", __FUNCTION__));
++ err = -EPERM;
++ goto unlock_and_return;
++ }
++ break;
++ }
++ default:
++ break;
++ }
++#endif
++
++ err = BridgedDispatchKM(psPerProc, psBridgePackageKM);
++ if(err != PVRSRV_OK)
++ goto unlock_and_return;
++
++ switch(cmd)
++ {
++#if defined(PVR_SECURE_FD_EXPORT)
++ case PVRSRV_BRIDGE_EXPORT_DEVICEMEM:
++ {
++ PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *psExportDeviceMemOUT =
++ (PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *)psBridgePackageKM->pvParamOut;
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
++
++ psPrivateData->hKernelMemInfo = psExportDeviceMemOUT->hMemInfo;
++#if defined(SUPPORT_MEMINFO_IDS)
++ psExportDeviceMemOUT->ui64Stamp = psPrivateData->ui64Stamp = ++ui64Stamp;
++#endif
++ break;
++ }
++#endif
++
++#if defined(SUPPORT_MEMINFO_IDS)
++ case PVRSRV_BRIDGE_MAP_DEV_MEMORY:
++ {
++ PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *psMapDeviceMemoryOUT =
++ (PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *)psBridgePackageKM->pvParamOut;
++ PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile);
++ psMapDeviceMemoryOUT->sDstClientMemInfo.ui64Stamp = psPrivateData->ui64Stamp;
++ break;
++ }
++
++ case PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY:
++ {
++ PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psDeviceClassMemoryOUT =
++ (PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *)psBridgePackageKM->pvParamOut;
++ psDeviceClassMemoryOUT->sClientMemInfo.ui64Stamp = ++ui64Stamp;
++ break;
++ }
++#endif
++
++ default:
++ break;
++ }
++
++unlock_and_return:
++ LinuxUnLockMutex(&gPVRSRVLock);
++ return err;
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_debug.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_debug.c
+new file mode 100644
+index 0000000..dbd54b1
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_debug.c
+@@ -0,0 +1,426 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <asm/io.h>
++#include <asm/uaccess.h>
++#include <linux/kernel.h>
++#include <linux/hardirq.h>
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <linux/tty.h>
++#include <stdarg.h>
++#include "img_types.h"
++#include "servicesext.h"
++#include "pvr_debug.h"
++#include "proc.h"
++#include "mutex.h"
++#include "linkage.h"
++
++#if defined(PVRSRV_NEED_PVR_DPF)
++
++#define PVR_MAX_FILEPATH_LEN 256
++
++static IMG_UINT32 gPVRDebugLevel = DBGPRIV_WARNING;
++
++#endif
++
++#define PVR_MAX_MSG_LEN PVR_MAX_DEBUG_MESSAGE_LEN
++
++static IMG_CHAR gszBufferNonIRQ[PVR_MAX_MSG_LEN + 1];
++
++static IMG_CHAR gszBufferIRQ[PVR_MAX_MSG_LEN + 1];
++
++static PVRSRV_LINUX_MUTEX gsDebugMutexNonIRQ;
++
++static spinlock_t gsDebugLockIRQ = SPIN_LOCK_UNLOCKED;
++
++#define USE_SPIN_LOCK (in_interrupt() || !preemptible())
++
++static inline void GetBufferLock(unsigned long *pulLockFlags)
++{
++ if (USE_SPIN_LOCK)
++ {
++ spin_lock_irqsave(&gsDebugLockIRQ, *pulLockFlags);
++ }
++ else
++ {
++ LinuxLockMutex(&gsDebugMutexNonIRQ);
++ }
++}
++
++static inline void ReleaseBufferLock(unsigned long ulLockFlags)
++{
++ if (USE_SPIN_LOCK)
++ {
++ spin_unlock_irqrestore(&gsDebugLockIRQ, ulLockFlags);
++ }
++ else
++ {
++ LinuxUnLockMutex(&gsDebugMutexNonIRQ);
++ }
++}
++
++static inline void SelectBuffer(IMG_CHAR **ppszBuf, IMG_UINT32 *pui32BufSiz)
++{
++ if (USE_SPIN_LOCK)
++ {
++ *ppszBuf = gszBufferIRQ;
++ *pui32BufSiz = sizeof(gszBufferIRQ);
++ }
++ else
++ {
++ *ppszBuf = gszBufferNonIRQ;
++ *pui32BufSiz = sizeof(gszBufferNonIRQ);
++ }
++}
++
++static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR* pszFormat, va_list VArgs)
++{
++ IMG_UINT32 ui32Used;
++ IMG_UINT32 ui32Space;
++ IMG_INT32 i32Len;
++
++ ui32Used = strlen(pszBuf);
++ BUG_ON(ui32Used >= ui32BufSiz);
++ ui32Space = ui32BufSiz - ui32Used;
++
++ i32Len = vsnprintf(&pszBuf[ui32Used], ui32Space, pszFormat, VArgs);
++ pszBuf[ui32BufSiz - 1] = 0;
++
++
++ return (i32Len < 0 || i32Len >= ui32Space);
++}
++
++IMG_VOID PVRDPFInit(IMG_VOID)
++{
++ LinuxInitMutex(&gsDebugMutexNonIRQ);
++}
++
++IMG_VOID PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...)
++{
++ va_list vaArgs;
++ unsigned long ulLockFlags = 0;
++ IMG_CHAR *pszBuf;
++ IMG_UINT32 ui32BufSiz;
++
++ SelectBuffer(&pszBuf, &ui32BufSiz);
++
++ va_start(vaArgs, pszFormat);
++
++ GetBufferLock(&ulLockFlags);
++ strncpy (pszBuf, "PVR_K: ", (ui32BufSiz -1));
++
++ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
++ {
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
++ }
++ else
++ {
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++
++ ReleaseBufferLock(ulLockFlags);
++ va_end(vaArgs);
++
++}
++
++#if defined(PVRSRV_NEED_PVR_ASSERT)
++
++IMG_VOID PVRSRVDebugAssertFail(const IMG_CHAR* pszFile, IMG_UINT32 uLine)
++{
++ PVRSRVDebugPrintf(DBGPRIV_FATAL, pszFile, uLine, "Debug assertion failed!");
++ BUG();
++}
++
++#endif
++
++#if defined(PVRSRV_NEED_PVR_TRACE)
++
++IMG_VOID PVRSRVTrace(const IMG_CHAR* pszFormat, ...)
++{
++ va_list VArgs;
++ unsigned long ulLockFlags = 0;
++ IMG_CHAR *pszBuf;
++ IMG_UINT32 ui32BufSiz;
++
++ SelectBuffer(&pszBuf, &ui32BufSiz);
++
++ va_start(VArgs, pszFormat);
++
++ GetBufferLock(&ulLockFlags);
++
++ strncpy(pszBuf, "PVR: ", (ui32BufSiz -1));
++
++ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs))
++ {
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
++ }
++ else
++ {
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++
++ ReleaseBufferLock(ulLockFlags);
++
++ va_end(VArgs);
++}
++
++#endif
++
++#if defined(PVRSRV_NEED_PVR_DPF)
++
++static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, ...)
++{
++ va_list VArgs;
++ IMG_BOOL bTrunc;
++
++ va_start (VArgs, pszFormat);
++
++ bTrunc = VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs);
++
++ va_end (VArgs);
++
++ return bTrunc;
++}
++
++IMG_VOID PVRSRVDebugPrintf (
++ IMG_UINT32 ui32DebugLevel,
++ const IMG_CHAR* pszFullFileName,
++ IMG_UINT32 ui32Line,
++ const IMG_CHAR* pszFormat,
++ ...
++ )
++{
++ IMG_BOOL bTrace, bDebug;
++ const IMG_CHAR *pszFileName = pszFullFileName;
++ IMG_CHAR *pszLeafName;
++
++ bTrace = gPVRDebugLevel & ui32DebugLevel & DBGPRIV_CALLTRACE;
++ bDebug = ((gPVRDebugLevel & DBGPRIV_ALLLEVELS) >= ui32DebugLevel);
++
++ if (bTrace || bDebug)
++ {
++ va_list vaArgs;
++ unsigned long ulLockFlags = 0;
++ IMG_CHAR *pszBuf;
++ IMG_UINT32 ui32BufSiz;
++
++ SelectBuffer(&pszBuf, &ui32BufSiz);
++
++ va_start(vaArgs, pszFormat);
++
++ GetBufferLock(&ulLockFlags);
++
++
++ if (bDebug)
++ {
++ switch(ui32DebugLevel)
++ {
++ case DBGPRIV_FATAL:
++ {
++ strncpy (pszBuf, "PVR_K:(Fatal): ", (ui32BufSiz -1));
++ break;
++ }
++ case DBGPRIV_ERROR:
++ {
++ strncpy (pszBuf, "PVR_K:(Error): ", (ui32BufSiz -1));
++ break;
++ }
++ case DBGPRIV_WARNING:
++ {
++ strncpy (pszBuf, "PVR_K:(Warning): ", (ui32BufSiz -1));
++ break;
++ }
++ case DBGPRIV_MESSAGE:
++ {
++ strncpy (pszBuf, "PVR_K:(Message): ", (ui32BufSiz -1));
++ break;
++ }
++ case DBGPRIV_VERBOSE:
++ {
++ strncpy (pszBuf, "PVR_K:(Verbose): ", (ui32BufSiz -1));
++ break;
++ }
++ default:
++ {
++ strncpy (pszBuf, "PVR_K:(Unknown message level)", (ui32BufSiz -1));
++ break;
++ }
++ }
++ }
++ else
++ {
++ strncpy (pszBuf, "PVR_K: ", (ui32BufSiz -1));
++ }
++
++ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
++ {
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
++ }
++ else
++ {
++
++ if (!bTrace)
++ {
++#ifdef DEBUG_LOG_PATH_TRUNCATE
++
++ static IMG_CHAR szFileNameRewrite[PVR_MAX_FILEPATH_LEN];
++
++ IMG_CHAR* pszTruncIter;
++ IMG_CHAR* pszTruncBackInter;
++
++
++ pszFileName = pszFullFileName + strlen(DEBUG_LOG_PATH_TRUNCATE)+1;
++
++
++ strncpy(szFileNameRewrite, pszFileName,PVR_MAX_FILEPATH_LEN);
++
++ if(strlen(szFileNameRewrite) == PVR_MAX_FILEPATH_LEN-1) {
++ IMG_CHAR szTruncateMassage[] = "FILENAME TRUNCATED";
++ strcpy(szFileNameRewrite + (PVR_MAX_FILEPATH_LEN - 1 - strlen(szTruncateMassage)), szTruncateMassage);
++ }
++
++ pszTruncIter = szFileNameRewrite;
++ while(*pszTruncIter++ != 0)
++ {
++ IMG_CHAR* pszNextStartPoint;
++
++ if(
++ !( ( *pszTruncIter == '/' && (pszTruncIter-4 >= szFileNameRewrite) ) &&
++ ( *(pszTruncIter-1) == '.') &&
++ ( *(pszTruncIter-2) == '.') &&
++ ( *(pszTruncIter-3) == '/') )
++ ) continue;
++
++
++ pszTruncBackInter = pszTruncIter - 3;
++ while(*(--pszTruncBackInter) != '/')
++ {
++ if(pszTruncBackInter <= szFileNameRewrite) break;
++ }
++ pszNextStartPoint = pszTruncBackInter;
++
++
++ while(*pszTruncIter != 0)
++ {
++ *pszTruncBackInter++ = *pszTruncIter++;
++ }
++ *pszTruncBackInter = 0;
++
++
++ pszTruncIter = pszNextStartPoint;
++ }
++
++ pszFileName = szFileNameRewrite;
++
++ if(*pszFileName == '/') pszFileName++;
++#endif
++
++#if !defined(__sh__)
++ pszLeafName = (IMG_CHAR *)strrchr (pszFileName, '\\');
++
++ if (pszLeafName)
++ {
++ pszFileName = pszLeafName;
++ }
++#endif
++
++ if (BAppend(pszBuf, ui32BufSiz, " [%lu, %s]", ui32Line, pszFileName))
++ {
++ printk(KERN_INFO "PVR_K:(Message Truncated): %s\n", pszBuf);
++ }
++ else
++ {
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++ }
++ else
++ {
++ printk(KERN_INFO "%s\n", pszBuf);
++ }
++ }
++
++ ReleaseBufferLock(ulLockFlags);
++
++ va_end (vaArgs);
++ }
++}
++
++#endif
++
++#if defined(DEBUG)
++
++IMG_VOID PVRDebugSetLevel(IMG_UINT32 uDebugLevel)
++{
++ printk(KERN_INFO "PVR: Setting Debug Level = 0x%x\n",(IMG_UINT)uDebugLevel);
++
++ gPVRDebugLevel = uDebugLevel;
++}
++
++IMG_INT PVRDebugProcSetLevel(struct file *file, const IMG_CHAR *buffer, IMG_UINT32 count, IMG_VOID *data)
++{
++#define _PROC_SET_BUFFER_SZ 2
++ IMG_CHAR data_buffer[_PROC_SET_BUFFER_SZ];
++
++ if (count != _PROC_SET_BUFFER_SZ)
++ {
++ return -EINVAL;
++ }
++ else
++ {
++ if (copy_from_user(data_buffer, buffer, count))
++ return -EINVAL;
++ if (data_buffer[count - 1] != '\n')
++ return -EINVAL;
++ PVRDebugSetLevel(data_buffer[0] - '0');
++ }
++ return (count);
++}
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void ProcSeqShowDebugLevel(struct seq_file *sfile,void* el)
++{
++ seq_printf(sfile, "%lu\n", gPVRDebugLevel);
++}
++
++#else
++IMG_INT PVRDebugProcGetLevel(IMG_CHAR *page, IMG_CHAR **start, off_t off, IMG_INT count, IMG_INT *eof, IMG_VOID *data)
++{
++ if (off == 0) {
++ *start = (IMG_CHAR *)1;
++ return printAppend(page, count, 0, "%lu\n", gPVRDebugLevel);
++ }
++ *eof = 1;
++ return 0;
++}
++#endif
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_drm.c b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_drm.c
+new file mode 100644
+index 0000000..9fa678d
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_drm.c
+@@ -0,0 +1,310 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(SUPPORT_DRI_DRM)
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#include <asm/ioctl.h>
++#include <drm/drmP.h>
++#include <drm/drm.h>
++
++#include "img_defs.h"
++#include "services.h"
++#include "kerneldisplay.h"
++#include "kernelbuffer.h"
++#include "syscommon.h"
++#include "pvrmmap.h"
++#include "mm.h"
++#include "mmap.h"
++#include "mutex.h"
++#include "pvr_debug.h"
++#include "srvkm.h"
++#include "perproc.h"
++#include "handle.h"
++#include "pvr_bridge_km.h"
++#include "pvr_bridge.h"
++#include "proc.h"
++#include "pvrmodule.h"
++#include "pvrversion.h"
++#include "lock.h"
++#include "linkage.h"
++#include "pvr_drm_shared.h"
++#include "pvr_drm.h"
++
++#define MAKENAME_HELPER(x, y) x ## y
++#define MAKENAME(x, y) MAKENAME_HELPER(x, y)
++
++#define PVR_DRM_NAME "pvrsrvkm"
++#define PVR_DRM_DESC "Imagination Technologies PVR DRM"
++
++#define PVR_PCI_IDS \
++ {SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++ {0, 0, 0}
++
++struct pci_dev *gpsPVRLDMDev;
++struct drm_device *gpsPVRDRMDev;
++
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
++#error "Linux kernel version 2.6.25 or later required for PVR DRM support"
++#endif
++
++#define PVR_DRM_FILE struct drm_file *
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++static struct pci_device_id asPciIdList[] = {
++ PVR_PCI_IDS
++};
++#endif
++
++IMG_INT PVRSRVDrmLoad(struct drm_device *dev, unsigned long flags)
++{
++ IMG_INT iRes;
++
++ PVR_TRACE(("PVRSRVDrmLoad"));
++
++ gpsPVRDRMDev = dev;
++ gpsPVRLDMDev = dev->pdev;
++
++#if defined(PDUMP)
++ iRes = dbgdrv_init();
++ if (iRes != 0)
++ {
++ return iRes;
++ }
++#endif
++
++ iRes = PVRCore_Init();
++ if (iRes != 0)
++ {
++ goto exit_dbgdrv_cleanup;
++ }
++
++#if defined(DISPLAY_CONTROLLER)
++ iRes = PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Init)(dev);
++ if (iRes != 0)
++ {
++ goto exit_pvrcore_cleanup;
++ }
++#endif
++ return 0;
++
++#if defined(DISPLAY_CONTROLLER)
++exit_pvrcore_cleanup:
++ PVRCore_Cleanup();
++#endif
++exit_dbgdrv_cleanup:
++#if defined(PDUMP)
++ dbgdrv_cleanup();
++#endif
++ return iRes;
++}
++
++IMG_INT PVRSRVDrmUnload(struct drm_device *dev)
++{
++ PVR_TRACE(("PVRSRVDrmUnload"));
++
++#if defined(DISPLAY_CONTROLLER)
++ PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Cleanup)(dev);
++#endif
++
++ PVRCore_Cleanup();
++
++#if defined(PDUMP)
++ dbgdrv_cleanup();
++#endif
++
++ return 0;
++}
++
++IMG_INT PVRSRVDrmOpen(struct drm_device *dev, struct drm_file *file)
++{
++ return PVRSRVOpen(dev, file);
++}
++
++IMG_VOID PVRSRVDrmPostClose(struct drm_device *dev, struct drm_file *file)
++{
++ PVRSRVRelease(dev, file);
++}
++
++DRI_DRM_STATIC IMG_INT
++PVRDRMIsMaster(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return 0;
++}
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++IMG_INT
++PVRDRM_Dummy_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return 0;
++}
++#endif
++
++static IMG_INT
++PVRDRMPCIBusIDField(struct drm_device *dev, IMG_UINT32 *pui32Field, IMG_UINT32 ui32FieldType)
++{
++ struct pci_dev *psPCIDev = (struct pci_dev *)dev->pdev;
++
++ switch (ui32FieldType)
++ {
++ case PVR_DRM_PCI_DOMAIN:
++ *pui32Field = pci_domain_nr(psPCIDev->bus);
++ break;
++
++ case PVR_DRM_PCI_BUS:
++ *pui32Field = psPCIDev->bus->number;
++ break;
++
++ case PVR_DRM_PCI_DEV:
++ *pui32Field = PCI_SLOT(psPCIDev->devfn);
++ break;
++
++ case PVR_DRM_PCI_FUNC:
++ *pui32Field = PCI_FUNC(psPCIDev->devfn);
++ break;
++
++ default:
++ return -EFAULT;
++ }
++
++ return 0;
++}
++
++DRI_DRM_STATIC IMG_INT
++PVRDRMUnprivCmd(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ IMG_UINT32 *pui32Args = (IMG_UINT32 *)arg;
++ IMG_UINT32 ui32Cmd = pui32Args[0];
++ IMG_UINT32 ui32Arg1 = pui32Args[1];
++ IMG_UINT32 *pui32OutArg = (IMG_UINT32 *)arg;
++ IMG_INT ret = 0;
++
++ LinuxLockMutex(&gPVRSRVLock);
++
++ switch (ui32Cmd)
++ {
++ case PVR_DRM_UNPRIV_INIT_SUCCESFUL:
++ *pui32OutArg = PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL) ? 1 : 0;
++ break;
++
++ case PVR_DRM_UNPRIV_BUSID_TYPE:
++ *pui32OutArg = PVR_DRM_BUS_TYPE_PCI;
++ break;
++
++ case PVR_DRM_UNPRIV_BUSID_FIELD:
++ ret = PVRDRMPCIBusIDField(dev, pui32OutArg, ui32Arg1);
++
++ default:
++ ret = -EFAULT;
++ }
++
++ LinuxUnLockMutex(&gPVRSRVLock);
++
++ return ret;
++}
++
++#if 0
++struct drm_ioctl_desc sPVRDrmIoctls[] = {
++ DRM_IOCTL_DEF(PVR_DRM_SRVKM_IOCTL, PVRSRV_BridgeDispatchKM, 0),
++ DRM_IOCTL_DEF(PVR_DRM_IS_MASTER_IOCTL, PVRDRMIsMaster, DRM_MASTER),
++ DRM_IOCTL_DEF(PVR_DRM_UNPRIV_IOCTL, PVRDRMUnprivCmd, 0),
++#if defined(PDUMP)
++ DRM_IOCTL_DEF(PVR_DRM_DBGDRV_IOCTL, dbgdrv_ioctl, 0),
++#endif
++};
++
++static IMG_INT pvr_max_ioctl = DRM_ARRAY_SIZE(sPVRDrmIoctls);
++
++static struct drm_driver sPVRDrmDriver =
++{
++ .driver_features = 0,
++ .dev_priv_size = sizeof(sPVRDrmBuffer),
++ .load = PVRSRVDrmLoad,
++ .unload = PVRSRVDrmUnload,
++ .open = PVRSRVDrmOpen,
++ .postclose = PVRSRVDrmPostClose,
++ .suspend = PVRSRVDriverSuspend,
++ .resume = PVRSRVDriverResume,
++ .get_map_ofs = drm_core_get_map_ofs,
++ .get_reg_ofs = drm_core_get_reg_ofs,
++ .ioctls = sPVRDrmIoctls,
++ .fops =
++ {
++ .owner = THIS_MODULE,
++ .open = drm_open,
++ .release = drm_release,
++ .ioctl = drm_ioctl,
++ .mmap = PVRMMap,
++ .poll = drm_poll,
++ .fasync = drm_fasync,
++ },
++ .pci_driver =
++ {
++ .name = PVR_DRM_NAME,
++ .id_table = asPciIdList,
++ },
++
++ .name = PVR_DRM_NAME,
++ .desc = PVR_DRM_DESC,
++ .date = PVR_BUILD_DATE,
++ .major = PVRVERSION_MAJ,
++ .minor = PVRVERSION_MIN,
++ .patchlevel = PVRVERSION_BUILD,
++};
++
++static IMG_INT __init PVRSRVDrmInit(IMG_VOID)
++{
++ IMG_INT iRes;
++ sPVRDrmDriver.num_ioctls = pvr_max_ioctl;
++
++
++ PVRDPFInit();
++
++ iRes = drm_init(&sPVRDrmDriver);
++
++ return iRes;
++}
++
++static IMG_VOID __exit PVRSRVDrmExit(IMG_VOID)
++{
++ drm_exit(&sPVRDrmDriver);
++}
++
++module_init(PVRSRVDrmInit);
++module_exit(PVRSRVDrmExit);
++#endif
++#endif
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_drm.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_drm.h
+new file mode 100644
+index 0000000..fd8c81d
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/env/linux/pvr_drm.h
+@@ -0,0 +1,80 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__PVR_DRM_H__)
++#define __PVR_DRM_H__
++
++#include "pvr_drm_shared.h"
++
++#if defined(SUPPORT_DRI_DRM)
++#define PVR_DRM_MAKENAME_HELPER(x, y) x ## y
++#define PVR_DRM_MAKENAME(x, y) PVR_DRM_MAKENAME_HELPER(x, y)
++
++IMG_INT PVRCore_Init(IMG_VOID);
++IMG_VOID PVRCore_Cleanup(IMG_VOID);
++IMG_INT PVRSRVOpen(struct drm_device *dev, struct drm_file *pFile);
++IMG_INT PVRSRVRelease(struct drm_device *dev, struct drm_file *pFile);
++IMG_INT PVRSRVDriverSuspend(struct drm_device *pDevice, pm_message_t state);
++IMG_INT PVRSRVDriverResume(struct drm_device *pDevice);
++
++IMG_INT PVRSRV_BridgeDispatchKM(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++#define DRI_DRM_STATIC
++IMG_INT PVRSRVDrmLoad(struct drm_device *dev, unsigned long flags);
++IMG_INT PVRSRVDrmUnload(struct drm_device *dev);
++IMG_INT PVRSRVDrmOpen(struct drm_device *dev, struct drm_file *file);
++IMG_VOID PVRSRVDrmPostClose(struct drm_device *dev, struct drm_file *file);
++IMG_INT PVRDRMIsMaster(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++IMG_INT PVRDRMUnprivCmd(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++IMG_INT PVRDRM_Dummy_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++#else
++#define DRI_DRM_STATIC static
++#endif
++
++#if defined(DISPLAY_CONTROLLER)
++extern int PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Init)(struct drm_device *);
++extern void PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Cleanup)(struct drm_device *);
++#endif
++
++#if defined(PDUMP)
++int dbgdrv_init(void);
++void dbgdrv_cleanup(void);
++IMG_INT dbgdrv_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++#endif
++
++#if !defined(SUPPORT_DRI_DRM_EXT)
++#define PVR_DRM_SRVKM_IOCTL _IO(0, PVR_DRM_SRVKM_CMD)
++#define PVR_DRM_IS_MASTER_IOCTL _IO(0, PVR_DRM_IS_MASTER_CMD)
++#define PVR_DRM_UNPRIV_IOCTL _IO(0, PVR_DRM_UNPRIV_CMD)
++#define PVR_DRM_DBGDRV_IOCTL _IO(0, PVR_DRM_DBGDRV_CMD)
++#endif
++
++#endif
++
++#endif
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgx535defs.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgx535defs.h
+new file mode 100644
+index 0000000..a683e9b
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgx535defs.h
+@@ -0,0 +1,637 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SGX535DEFS_KM_H_
++#define _SGX535DEFS_KM_H_
++
++#define EUR_CR_CLKGATECTL 0x0000
++#define EUR_CR_CLKGATECTL_2D_CLKG_MASK 0x00000003UL
++#define EUR_CR_CLKGATECTL_2D_CLKG_SHIFT 0
++#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK 0x00000030UL
++#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT 4
++#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK 0x00000300UL
++#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT 8
++#define EUR_CR_CLKGATECTL_TA_CLKG_MASK 0x00003000UL
++#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT 12
++#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK 0x00030000UL
++#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT 16
++#define EUR_CR_CLKGATECTL_USE_CLKG_MASK 0x00300000UL
++#define EUR_CR_CLKGATECTL_USE_CLKG_SHIFT 20
++#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000UL
++#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24
++#define EUR_CR_CLKGATESTATUS 0x0004
++#define EUR_CR_CLKGATESTATUS_2D_CLKS_MASK 0x00000001UL
++#define EUR_CR_CLKGATESTATUS_2D_CLKS_SHIFT 0
++#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK 0x00000010UL
++#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 4
++#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK 0x00000100UL
++#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 8
++#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK 0x00001000UL
++#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT 12
++#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK 0x00010000UL
++#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 16
++#define EUR_CR_CLKGATESTATUS_USE_CLKS_MASK 0x00100000UL
++#define EUR_CR_CLKGATESTATUS_USE_CLKS_SHIFT 20
++#define EUR_CR_CLKGATECTLOVR 0x0008
++#define EUR_CR_CLKGATECTLOVR_2D_CLKO_MASK 0x00000003UL
++#define EUR_CR_CLKGATECTLOVR_2D_CLKO_SHIFT 0
++#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK 0x00000030UL
++#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 4
++#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK 0x00000300UL
++#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 8
++#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK 0x00003000UL
++#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT 12
++#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK 0x00030000UL
++#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 16
++#define EUR_CR_CLKGATECTLOVR_USE_CLKO_MASK 0x00300000UL
++#define EUR_CR_CLKGATECTLOVR_USE_CLKO_SHIFT 20
++#define EUR_CR_CORE_ID 0x0010
++#define EUR_CR_CORE_ID_CONFIG_MASK 0x0000FFFFUL
++#define EUR_CR_CORE_ID_CONFIG_SHIFT 0
++#define EUR_CR_CORE_ID_ID_MASK 0xFFFF0000UL
++#define EUR_CR_CORE_ID_ID_SHIFT 16
++#define EUR_CR_CORE_REVISION 0x0014
++#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FFUL
++#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0
++#define EUR_CR_CORE_REVISION_MINOR_MASK 0x0000FF00UL
++#define EUR_CR_CORE_REVISION_MINOR_SHIFT 8
++#define EUR_CR_CORE_REVISION_MAJOR_MASK 0x00FF0000UL
++#define EUR_CR_CORE_REVISION_MAJOR_SHIFT 16
++#define EUR_CR_CORE_REVISION_DESIGNER_MASK 0xFF000000UL
++#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24
++#define EUR_CR_DESIGNER_REV_FIELD1 0x0018
++#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFFUL
++#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0
++#define EUR_CR_DESIGNER_REV_FIELD2 0x001C
++#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFFUL
++#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0
++#define EUR_CR_SOFT_RESET 0x0080
++#define EUR_CR_SOFT_RESET_BIF_RESET_MASK 0x00000001UL
++#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT 0
++#define EUR_CR_SOFT_RESET_TWOD_RESET_MASK 0x00000002UL
++#define EUR_CR_SOFT_RESET_TWOD_RESET_SHIFT 1
++#define EUR_CR_SOFT_RESET_DPM_RESET_MASK 0x00000004UL
++#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT 2
++#define EUR_CR_SOFT_RESET_TA_RESET_MASK 0x00000008UL
++#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT 3
++#define EUR_CR_SOFT_RESET_USE_RESET_MASK 0x00000010UL
++#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT 4
++#define EUR_CR_SOFT_RESET_ISP_RESET_MASK 0x00000020UL
++#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT 5
++#define EUR_CR_SOFT_RESET_TSP_RESET_MASK 0x00000040UL
++#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT 6
++#define EUR_CR_EVENT_HOST_ENABLE2 0x0110
++#define EUR_CR_EVENT_HOST_ENABLE2_BIF_REQUESTER_FAULT_MASK 0x00000010UL
++#define EUR_CR_EVENT_HOST_ENABLE2_BIF_REQUESTER_FAULT_SHIFT 4
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_DHOST_FREE_LOAD_MASK 0x00000008UL
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_DHOST_FREE_LOAD_SHIFT 3
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_HOST_FREE_LOAD_MASK 0x00000004UL
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_HOST_FREE_LOAD_SHIFT 2
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002UL
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001UL
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0
++#define EUR_CR_EVENT_HOST_CLEAR2 0x0114
++#define EUR_CR_EVENT_HOST_CLEAR2_BIF_REQUESTER_FAULT_MASK 0x00000010UL
++#define EUR_CR_EVENT_HOST_CLEAR2_BIF_REQUESTER_FAULT_SHIFT 4
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_DHOST_FREE_LOAD_MASK 0x00000008UL
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_DHOST_FREE_LOAD_SHIFT 3
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_HOST_FREE_LOAD_MASK 0x00000004UL
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_HOST_FREE_LOAD_SHIFT 2
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002UL
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001UL
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0
++#define EUR_CR_EVENT_STATUS2 0x0118
++#define EUR_CR_EVENT_STATUS2_BIF_REQUESTER_FAULT_MASK 0x00000010UL
++#define EUR_CR_EVENT_STATUS2_BIF_REQUESTER_FAULT_SHIFT 4
++#define EUR_CR_EVENT_STATUS2_DPM_DHOST_FREE_LOAD_MASK 0x00000008UL
++#define EUR_CR_EVENT_STATUS2_DPM_DHOST_FREE_LOAD_SHIFT 3
++#define EUR_CR_EVENT_STATUS2_DPM_HOST_FREE_LOAD_MASK 0x00000004UL
++#define EUR_CR_EVENT_STATUS2_DPM_HOST_FREE_LOAD_SHIFT 2
++#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002UL
++#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001UL
++#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0
++#define EUR_CR_EVENT_STATUS 0x012CUL
++#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000UL
++#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_STATUS_TIMER_MASK 0x20000000UL
++#define EUR_CR_EVENT_STATUS_TIMER_SHIFT 29
++#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000UL
++#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK 0x08000000UL
++#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000UL
++#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000UL
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000UL
++#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000UL
++#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000UL
++#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000UL
++#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK 0x00100000UL
++#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000UL
++#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000UL
++#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK 0x00020000UL
++#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000UL
++#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000UL
++#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK 0x00004000UL
++#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000UL
++#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000UL
++#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK 0x00000800UL
++#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK 0x00000400UL
++#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200UL
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100UL
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080UL
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040UL
++#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020UL
++#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010UL
++#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008UL
++#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004UL
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002UL
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001UL
++#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0
++#define EUR_CR_EVENT_HOST_ENABLE 0x0130
++#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29
++#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_MASK 0x08000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000UL
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000UL
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000UL
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000UL
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000UL
++#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000UL
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000UL
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000UL
++#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000UL
++#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000UL
++#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000UL
++#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800UL
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400UL
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001UL
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0
++#define EUR_CR_EVENT_HOST_CLEAR 0x0134
++#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK 0x20000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29
++#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK 0x08000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000UL
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000UL
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000UL
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000UL
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000UL
++#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000UL
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000UL
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000UL
++#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000UL
++#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000UL
++#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000UL
++#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800UL
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400UL
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001UL
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0
++#define EUR_CR_PDS 0x0ABC
++#define EUR_CR_PDS_DOUT_TIMEOUT_DISABLE_MASK 0x00000040UL
++#define EUR_CR_PDS_DOUT_TIMEOUT_DISABLE_SHIFT 6
++#define EUR_CR_PDS_EXEC_BASE 0x0AB8
++#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK 0xFFF00000UL
++#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT 20
++#define EUR_CR_EVENT_KICKER 0x0AC4
++#define EUR_CR_EVENT_KICKER_ADDRESS_MASK 0xFFFFFFF0UL
++#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT 4
++#define EUR_CR_EVENT_KICK 0x0AC8
++#define EUR_CR_EVENT_KICK_NOW_MASK 0x00000001UL
++#define EUR_CR_EVENT_KICK_NOW_SHIFT 0
++#define EUR_CR_EVENT_TIMER 0x0ACC
++#define EUR_CR_EVENT_TIMER_ENABLE_MASK 0x01000000UL
++#define EUR_CR_EVENT_TIMER_ENABLE_SHIFT 24
++#define EUR_CR_EVENT_TIMER_VALUE_MASK 0x00FFFFFFUL
++#define EUR_CR_EVENT_TIMER_VALUE_SHIFT 0
++#define EUR_CR_PDS_INV0 0x0AD0
++#define EUR_CR_PDS_INV0_DSC_MASK 0x00000001UL
++#define EUR_CR_PDS_INV0_DSC_SHIFT 0
++#define EUR_CR_PDS_INV1 0x0AD4
++#define EUR_CR_PDS_INV1_DSC_MASK 0x00000001UL
++#define EUR_CR_PDS_INV1_DSC_SHIFT 0
++#define EUR_CR_PDS_INV2 0x0AD8
++#define EUR_CR_PDS_INV2_DSC_MASK 0x00000001UL
++#define EUR_CR_PDS_INV2_DSC_SHIFT 0
++#define EUR_CR_PDS_INV3 0x0ADC
++#define EUR_CR_PDS_INV3_DSC_MASK 0x00000001UL
++#define EUR_CR_PDS_INV3_DSC_SHIFT 0
++#define EUR_CR_PDS_INV_CSC 0x0AE0
++#define EUR_CR_PDS_INV_CSC_KICK_MASK 0x00000001UL
++#define EUR_CR_PDS_INV_CSC_KICK_SHIFT 0
++#define EUR_CR_PDS_PC_BASE 0x0B2C
++#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK 0x3FFFFFFFUL
++#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_CTRL 0x0C00
++#define EUR_CR_BIF_CTRL_NOREORDER_MASK 0x00000001UL
++#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT 0
++#define EUR_CR_BIF_CTRL_PAUSE_MASK 0x00000002UL
++#define EUR_CR_BIF_CTRL_PAUSE_SHIFT 1
++#define EUR_CR_BIF_CTRL_FLUSH_MASK 0x00000004UL
++#define EUR_CR_BIF_CTRL_FLUSH_SHIFT 2
++#define EUR_CR_BIF_CTRL_INVALDC_MASK 0x00000008UL
++#define EUR_CR_BIF_CTRL_INVALDC_SHIFT 3
++#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK 0x00000010UL
++#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT 4
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK 0x00000400UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_MASK 0x00000800UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_SHIFT 11
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK 0x00010000UL
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_SHIFT 16
++#define EUR_CR_BIF_INT_STAT 0x0C04
++#define EUR_CR_BIF_INT_STAT_FAULT_MASK 0x00003FFFUL
++#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT 0
++#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK 0x00004000UL
++#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT 14
++#define EUR_CR_BIF_FAULT 0x0C08
++#define EUR_CR_BIF_FAULT_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_FAULT_ADDR_SHIFT 12
++#define EUR_CR_BIF_TILE0 0x0C0C
++#define EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE0_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE0_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE1 0x0C10
++#define EUR_CR_BIF_TILE1_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE1_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE1_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE1_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE1_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE1_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE2 0x0C14
++#define EUR_CR_BIF_TILE2_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE2_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE2_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE2_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE2_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE2_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE3 0x0C18
++#define EUR_CR_BIF_TILE3_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE3_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE3_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE3_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE3_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE3_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE4 0x0C1C
++#define EUR_CR_BIF_TILE4_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE4_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE4_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE4_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE4_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE4_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE5 0x0C20
++#define EUR_CR_BIF_TILE5_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE5_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE5_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE5_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE5_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE5_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE6 0x0C24
++#define EUR_CR_BIF_TILE6_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE6_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE6_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE6_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE6_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE6_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE7 0x0C28
++#define EUR_CR_BIF_TILE7_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE7_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE7_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE7_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE7_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE7_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE8 0x0C2C
++#define EUR_CR_BIF_TILE8_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE8_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE8_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE8_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE8_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE8_CFG_SHIFT 24
++#define EUR_CR_BIF_TILE9 0x0C30
++#define EUR_CR_BIF_TILE9_MIN_ADDRESS_MASK 0x00000FFFUL
++#define EUR_CR_BIF_TILE9_MIN_ADDRESS_SHIFT 0
++#define EUR_CR_BIF_TILE9_MAX_ADDRESS_MASK 0x00FFF000UL
++#define EUR_CR_BIF_TILE9_MAX_ADDRESS_SHIFT 12
++#define EUR_CR_BIF_TILE9_CFG_MASK 0x0F000000UL
++#define EUR_CR_BIF_TILE9_CFG_SHIFT 24
++#define EUR_CR_BIF_DIR_LIST_BASE1 0x0C38
++#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE1_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE2 0x0C3C
++#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE2_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE3 0x0C40
++#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE3_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE4 0x0C44
++#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE4_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE5 0x0C48
++#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE5_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE6 0x0C4C
++#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE6_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE7 0x0C50
++#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE7_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE8 0x0C54
++#define EUR_CR_BIF_DIR_LIST_BASE8_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE8_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE9 0x0C58
++#define EUR_CR_BIF_DIR_LIST_BASE9_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE9_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE10 0x0C5C
++#define EUR_CR_BIF_DIR_LIST_BASE10_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE10_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE11 0x0C60
++#define EUR_CR_BIF_DIR_LIST_BASE11_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE11_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE12 0x0C64
++#define EUR_CR_BIF_DIR_LIST_BASE12_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE12_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE13 0x0C68
++#define EUR_CR_BIF_DIR_LIST_BASE13_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE13_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE14 0x0C6C
++#define EUR_CR_BIF_DIR_LIST_BASE14_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE14_ADDR_SHIFT 12
++#define EUR_CR_BIF_DIR_LIST_BASE15 0x0C70
++#define EUR_CR_BIF_DIR_LIST_BASE15_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE15_ADDR_SHIFT 12
++#define EUR_CR_BIF_BANK_SET 0x0C74
++#define EUR_CR_BIF_BANK_SET_SELECT_MASK 0x000003FFUL
++#define EUR_CR_BIF_BANK_SET_SELECT_SHIFT 0
++#define EUR_CR_BIF_BANK0 0x0C78
++#define EUR_CR_BIF_BANK0_INDEX_EDM_MASK 0x0000000FUL
++#define EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT 0
++#define EUR_CR_BIF_BANK0_INDEX_TA_MASK 0x000000F0UL
++#define EUR_CR_BIF_BANK0_INDEX_TA_SHIFT 4
++#define EUR_CR_BIF_BANK0_INDEX_HOST_MASK 0x00000F00UL
++#define EUR_CR_BIF_BANK0_INDEX_HOST_SHIFT 8
++#define EUR_CR_BIF_BANK0_INDEX_3D_MASK 0x0000F000UL
++#define EUR_CR_BIF_BANK0_INDEX_3D_SHIFT 12
++#define EUR_CR_BIF_BANK0_INDEX_2D_MASK 0x000F0000UL
++#define EUR_CR_BIF_BANK0_INDEX_2D_SHIFT 16
++#define EUR_CR_BIF_BANK1 0x0C7C
++#define EUR_CR_BIF_BANK1_INDEX_EDM_MASK 0x0000000FUL
++#define EUR_CR_BIF_BANK1_INDEX_EDM_SHIFT 0
++#define EUR_CR_BIF_BANK1_INDEX_TA_MASK 0x000000F0UL
++#define EUR_CR_BIF_BANK1_INDEX_TA_SHIFT 4
++#define EUR_CR_BIF_BANK1_INDEX_HOST_MASK 0x00000F00UL
++#define EUR_CR_BIF_BANK1_INDEX_HOST_SHIFT 8
++#define EUR_CR_BIF_BANK1_INDEX_3D_MASK 0x0000F000UL
++#define EUR_CR_BIF_BANK1_INDEX_3D_SHIFT 12
++#define EUR_CR_BIF_BANK1_INDEX_2D_MASK 0x000F0000UL
++#define EUR_CR_BIF_BANK1_INDEX_2D_SHIFT 16
++#define EUR_CR_BIF_ADT_TTE 0x0C80
++#define EUR_CR_BIF_ADT_TTE_VALUE_MASK 0x000000FFUL
++#define EUR_CR_BIF_ADT_TTE_VALUE_SHIFT 0
++#define EUR_CR_BIF_DIR_LIST_BASE0 0x0C84
++#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000UL
++#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12
++#define EUR_CR_BIF_TWOD_REQ_BASE 0x0C88
++#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK 0xFFF00000UL
++#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_SHIFT 20
++#define EUR_CR_BIF_TA_REQ_BASE 0x0C90
++#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK 0xFFF00000UL
++#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT 20
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1 0x0C94
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_MMU_MASK 0x00000007UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_MMU_SHIFT 0
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_CACHE_MASK 0x00000038UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_CACHE_SHIFT 3
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_VDM_MASK 0x000001C0UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_VDM_SHIFT 6
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_TE_MASK 0x00000E00UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_TE_SHIFT 9
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_TWOD_MASK 0x00007000UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_TWOD_SHIFT 12
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_PBE_MASK 0x00038000UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_1_PBE_SHIFT 15
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2 0x0C98
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_HOST_MASK 0x00000007UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_HOST_SHIFT 0
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_USE_MASK 0x00000038UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_USE_SHIFT 3
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_ISP_MASK 0x000001C0UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_ISP_SHIFT 6
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_TSPP_MASK 0x00000E00UL
++#define EUR_CR_BIF_MEM_ARB_FLOWRATES_2_TSPP_SHIFT 9
++#define EUR_CR_BIF_MEM_ARB_CONFIG 0x0CA0
++#define EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_MASK 0x0000000FUL
++#define EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT 0
++#define EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_MASK 0x00000FF0UL
++#define EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT 4
++#define EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_MASK 0x00FFF000UL
++#define EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT 12
++#define EUR_CR_BIF_MEM_REQ_STAT 0x0CA8
++#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK 0x000000FFUL
++#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0
++#define EUR_CR_BIF_3D_REQ_BASE 0x0CAC
++#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK 0xFFF00000UL
++#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT 20
++#define EUR_CR_BIF_ZLS_REQ_BASE 0x0CB0
++#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK 0xFFF00000UL
++#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT 20
++#define EUR_CR_BIF_BANK_STATUS 0x0CB4
++#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_MASK 0x00000001UL
++#define EUR_CR_BIF_BANK_STATUS_3D_CURRENT_BANK_SHIFT 0
++#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_MASK 0x00000002UL
++#define EUR_CR_BIF_BANK_STATUS_TA_CURRENT_BANK_SHIFT 1
++#define EUR_CR_2D_BLIT_STATUS 0x0E04
++#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFFUL
++#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0
++#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK 0x01000000UL
++#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT 24
++#define EUR_CR_2D_VIRTUAL_FIFO_0 0x0E10
++#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001UL
++#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000EUL
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0UL
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000UL
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12
++#define EUR_CR_2D_VIRTUAL_FIFO_1 0x0E14
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFFUL
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000UL
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000UL
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24
++#define EUR_CR_2D_SOCIF 0x0E18
++#define EUR_CR_2D_SOCIF_FREESPACE_MASK 0x000000FFUL
++#define EUR_CR_2D_SOCIF_FREESPACE_SHIFT 0
++#define EUR_CR_2D_ALPHA 0x0E1C
++#define EUR_CR_2D_ALPHA_COMPONENT_ONE_MASK 0x0000FF00UL
++#define EUR_CR_2D_ALPHA_COMPONENT_ONE_SHIFT 8
++#define EUR_CR_2D_ALPHA_COMPONENT_ZERO_MASK 0x000000FFUL
++#define EUR_CR_2D_ALPHA_COMPONENT_ZERO_SHIFT 0
++#define EUR_CR_USE_CODE_BASE(X) (0x0A0C + (4 * (X)))
++#define EUR_CR_USE_CODE_BASE_ADDR_MASK 0x01FFFFFFUL
++#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT 0
++#define EUR_CR_USE_CODE_BASE_DM_MASK 0x06000000UL
++#define EUR_CR_USE_CODE_BASE_DM_SHIFT 25
++#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16
++#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16
++
++#define EUR_CR_MNE_CR_CTRL 0x0D00
++#define EUR_CR_MNE_CR_CTRL_BYP_CC_MASK 0x00008000UL
++#define EUR_CR_MNE_CR_CTRL_INVAL 0x0D20
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxdefs.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxdefs.h
+new file mode 100644
+index 0000000..fbffbf0
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxdefs.h
+@@ -0,0 +1,82 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SGXDEFS_H_
++#define _SGXDEFS_H_
++
++#include "sgxerrata.h"
++#include "sgxfeaturedefs.h"
++
++#if defined(SGX520)
++#include "sgx520defs.h"
++#else
++#if defined(SGX530)
++#include "sgx530defs.h"
++#else
++#if defined(SGX535)
++#include "sgx535defs.h"
++#else
++#if defined(SGX535_V1_1)
++#include "sgx535defs.h"
++#else
++#if defined(SGX540)
++#include "sgx540defs.h"
++#else
++#if defined(SGX541)
++#include "sgx541defs.h"
++#else
++#if defined(SGX543)
++#include "sgx543defs.h"
++#else
++#if defined(SGX545)
++#include "sgx545defs.h"
++#else
++#if defined(SGX531)
++#include "sgx531defs.h"
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++
++#if defined(SGX_FEATURE_MP)
++#if defined(SGX541)
++#if SGX_CORE_REV == 100
++#include "sgx541_100mpdefs.h"
++#else
++#include "sgx541mpdefs.h"
++#endif
++#else
++#include "sgxmpdefs.h"
++#endif
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxerrata.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxerrata.h
+new file mode 100644
+index 0000000..fe3e619
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxerrata.h
+@@ -0,0 +1,308 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SGXERRATA_KM_H_
++#define _SGXERRATA_KM_H_
++
++
++#if defined(SGX520) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 100
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX520 Core Revision unspecified"
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if defined(SGX530) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 103
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == 110
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == 111
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == 120
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == 121
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == 125
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX530 Core Revision unspecified"
++ #endif
++ #endif
++ #endif
++ #endif
++ #endif
++#endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if defined(SGX531) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 101
++ #define FIX_HW_BRN_26620
++ #define FIX_HW_BRN_28011
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX531 Core Revision unspecified"
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if (defined(SGX535) || defined(SGX535_V1_1)) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 111
++ #define FIX_HW_BRN_23281
++ #define FIX_HW_BRN_23410
++ #define FIX_HW_BRN_22693
++ #define FIX_HW_BRN_22934
++ #define FIX_HW_BRN_22997
++ #define FIX_HW_BRN_23030
++ #else
++ #if SGX_CORE_REV == 1111
++ #define FIX_HW_BRN_23281
++ #define FIX_HW_BRN_23410
++ #define FIX_HW_BRN_22693
++ #define FIX_HW_BRN_22934
++ #define FIX_HW_BRN_22997
++ #define FIX_HW_BRN_23030
++ #else
++ #if SGX_CORE_REV == 112
++ #define FIX_HW_BRN_23281
++ #define FIX_HW_BRN_23410
++ #define FIX_HW_BRN_22693
++ #define FIX_HW_BRN_22934
++ #define FIX_HW_BRN_22997
++ #define FIX_HW_BRN_23030
++ #else
++ #if SGX_CORE_REV == 113
++ #define FIX_HW_BRN_22934
++ #define FIX_HW_BRN_23281
++ #define FIX_HW_BRN_23944
++ #define FIX_HW_BRN_23410
++ #else
++ #if SGX_CORE_REV == 121
++ #define FIX_HW_BRN_22934
++ #define FIX_HW_BRN_23944
++ #define FIX_HW_BRN_23410
++ #else
++ #if SGX_CORE_REV == 126
++ #define FIX_HW_BRN_22934
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX535 Core Revision unspecified"
++
++ #endif
++ #endif
++ #endif
++ #endif
++ #endif
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if defined(SGX540) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 101
++ #define FIX_HW_BRN_25499
++ #define FIX_HW_BRN_25503
++ #define FIX_HW_BRN_26620
++ #define FIX_HW_BRN_28011
++ #else
++ #if SGX_CORE_REV == 110
++ #define FIX_HW_BRN_25503
++ #define FIX_HW_BRN_26620
++ #define FIX_HW_BRN_28011
++ #else
++ #if SGX_CORE_REV == 120
++ #define FIX_HW_BRN_28011
++ #else
++ #if SGX_CORE_REV == 121
++ #define FIX_HW_BRN_28011
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX540 Core Revision unspecified"
++ #endif
++ #endif
++ #endif
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if defined(SGX541) && !defined(SGX_CORE_DEFINED)
++ #if defined(SGX_FEATURE_MP)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 100
++ #define FIX_HW_BRN_27270
++ #define FIX_HW_BRN_28011
++ #define FIX_HW_BRN_27510
++
++ #else
++ #if SGX_CORE_REV == 101
++
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX541 Core Revision unspecified"
++ #endif
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++ #else
++ #error "sgxerrata.h: SGX541 only supports MP configs (SGX_FEATURE_MP)"
++ #endif
++#endif
++
++#if defined(SGX543) && !defined(SGX_CORE_DEFINED)
++ #if defined(SGX_FEATURE_MP)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 100
++
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX543 Core Revision unspecified"
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++ #else
++ #error "sgxerrata.h: SGX543 only supports MP configs (SGX_FEATURE_MP)"
++ #endif
++#endif
++
++#if defined(SGX545) && !defined(SGX_CORE_DEFINED)
++
++ #define SGX_CORE_REV_HEAD 0
++ #if defined(USE_SGX_CORE_REV_HEAD)
++
++ #define SGX_CORE_REV SGX_CORE_REV_HEAD
++ #endif
++
++ #if SGX_CORE_REV == 100
++ #define FIX_HW_BRN_26620
++ #define FIX_HW_BRN_27266
++ #define FIX_HW_BRN_27456
++ #else
++ #if SGX_CORE_REV == 109
++
++ #else
++ #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++
++ #else
++ #error "sgxerrata.h: SGX545 Core Revision unspecified"
++ #endif
++ #endif
++ #endif
++
++ #define SGX_CORE_DEFINED
++#endif
++
++#if !defined(SGX_CORE_DEFINED)
++#if defined (__GNUC__)
++ #warning "sgxerrata.h: SGX Core Version unspecified"
++#else
++ #pragma message("sgxerrata.h: SGX Core Version unspecified")
++#endif
++#endif
++
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h
+new file mode 100644
+index 0000000..782f613
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h
+@@ -0,0 +1,163 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(SGX520)
++ #define SGX_CORE_FRIENDLY_NAME "SGX520"
++ #define SGX_CORE_ID SGX_CORE_ID_520
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++#else
++#if defined(SGX530)
++ #define SGX_CORE_FRIENDLY_NAME "SGX530"
++ #define SGX_CORE_ID SGX_CORE_ID_530
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++#else
++#if defined(SGX535)
++ #define SGX_CORE_FRIENDLY_NAME "SGX535"
++ #define SGX_CORE_ID SGX_CORE_ID_535
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
++ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++ #define SGX_FEATURE_BIF_NUM_DIRLISTS (16)
++ #define SGX_FEATURE_2D_HARDWARE
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SUPPORT_SGX_GENERAL_MAPPING_HEAP
++#else
++#if defined(SGX540)
++ #define SGX_CORE_FRIENDLY_NAME "SGX540"
++ #define SGX_CORE_ID SGX_CORE_ID_540
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SGX_FEATURE_MULTI_EVENT_KICK
++#else
++#if defined(SGX541)
++ #define SGX_CORE_FRIENDLY_NAME "SGX541"
++ #define SGX_CORE_ID SGX_CORE_ID_541
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
++ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++ #define SGX_FEATURE_BIF_NUM_DIRLISTS (8)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SGX_FEATURE_SPM_MODE_0
++ #define SGX_FEATURE_MULTI_EVENT_KICK
++#else
++#if defined(SGX543)
++ #define SGX_CORE_FRIENDLY_NAME "SGX543"
++ #define SGX_CORE_ID SGX_CORE_ID_543
++ #define SGX_FEATURE_USE_NO_INSTRUCTION_PAIRING
++ #define SGX_FEATURE_USE_UNLIMITED_PHASES
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
++ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++ #define SGX_FEATURE_BIF_NUM_DIRLISTS (8)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SGX_FEATURE_MONOLITHIC_UKERNEL
++ #define SGX_FEATURE_MULTI_EVENT_KICK
++ #define SGX_FEATURE_DATA_BREAKPOINTS
++#else
++#if defined(SGX531)
++ #define SGX_CORE_FRIENDLY_NAME "SGX531"
++ #define SGX_CORE_ID SGX_CORE_ID_531
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (28)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SGX_FEATURE_MULTI_EVENT_KICK
++#else
++#if defined(SGX545)
++ #define SGX_CORE_FRIENDLY_NAME "SGX545"
++ #define SGX_CORE_ID SGX_CORE_ID_545
++ #define SGX_FEATURE_ADDRESS_SPACE_SIZE (32)
++ #define SGX_FEATURE_AUTOCLOCKGATING
++ #define SGX_FEATURE_USE_NO_INSTRUCTION_PAIRING
++ #define SGX_FEATURE_USE_UNLIMITED_PHASES
++ #define SGX_FEATURE_DXT_TEXTURES
++ #define SGX_FEATURE_VOLUME_TEXTURES
++ #define SGX_FEATURE_HOST_ALLOC_FROM_DPM
++ #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++ #define SGX_FEATURE_BIF_NUM_DIRLISTS (16)
++ #define SGX_FEATURE_NUM_USE_PIPES (4)
++ #define SGX_FEATURE_TEXTURESTRIDE_EXTENSION
++ #define SGX_FEATURE_PDS_DATA_INTERLEAVE_2DWORDS
++ #define SGX_FEATURE_MONOLITHIC_UKERNEL
++ #define SGX_FEATURE_ZLS_EXTERNALZ
++ #define SGX_FEATURE_VDM_CONTEXT_SWITCH_REV_2
++ #define SGX_FEATURE_ISP_CONTEXT_SWITCH_REV_2
++ #define SGX_FEATURE_NUM_PDS_PIPES (2)
++ #define SGX_FEATURE_NATIVE_BACKWARD_BLIT
++ #define SGX_FEATURE_MAX_TA_RENDER_TARGETS (512)
++ #define SGX_FEATURE_SPM_MODE_0
++ #define SGX_FEATURE_SECONDARY_REQUIRES_USE_KICK
++ #define SGX_FEATURE_DCU
++
++
++ #define SGX_FEATURE_BIF_WIDE_TILING_AND_4K_ADDRESS
++ #define SGX_FEATURE_MULTI_EVENT_KICK
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++#endif
++
++#if defined(FIX_HW_BRN_22693)
++#undef SGX_FEATURE_AUTOCLOCKGATING
++#endif
++
++#if defined(FIX_HW_BRN_27266)
++#undef SGX_FEATURE_36BIT_MMU
++#endif
++
++#if defined(FIX_HW_BRN_27456)
++#undef SGX_FEATURE_BIF_WIDE_TILING_AND_4K_ADDRESS
++#endif
++
++#if defined(FIX_HW_BRN_22934) \
++ || defined(FIX_HW_BRN_25499)
++#undef SGX_FEATURE_MULTI_EVENT_KICK
++#endif
++
++#if defined(SGX_FEATURE_SYSTEM_CACHE)
++ #if defined(SGX_FEATURE_36BIT_MMU)
++ #error SGX_FEATURE_SYSTEM_CACHE is incompatible with SGX_FEATURE_36BIT_MMU
++ #endif
++ #if defined(FIX_HW_BRN_26620) && !defined(SGX_FEATURE_MULTI_EVENT_KICK)
++ #define SGX_BYPASS_SYSTEM_CACHE
++ #endif
++#endif
++
++#if defined(SGX_FEATURE_MP)
++#if !defined(SGX_FEATURE_MP_CORE_COUNT)
++#error SGX_FEATURE_MP_CORE_COUNT must be defined when SGX_FEATURE_MP is defined
++#endif
++#else
++#define SGX_FEATURE_MP_CORE_COUNT (1)
++#endif
++
++#if defined(SUPPORT_SGX_LOW_LATENCY_SCHEDULING) && !defined(SUPPORT_SGX_PRIORITY_SCHEDULING)
++#define SUPPORT_SGX_PRIORITY_SCHEDULING
++#endif
++
++#include "img_types.h"
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxmmu.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxmmu.h
+new file mode 100644
+index 0000000..309de47
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/hwdefs/sgxmmu.h
+@@ -0,0 +1,79 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SGXMMU_KM_H__)
++#define __SGXMMU_KM_H__
++
++#define SGX_MMU_PAGE_SHIFT (12)
++#define SGX_MMU_PAGE_SIZE (1UL<<SGX_MMU_PAGE_SHIFT)
++#define SGX_MMU_PAGE_MASK (SGX_MMU_PAGE_SIZE - 1UL)
++
++#define SGX_MMU_PD_SHIFT (10)
++#define SGX_MMU_PD_SIZE (1UL<<SGX_MMU_PD_SHIFT)
++#define SGX_MMU_PD_MASK (0xFFC00000UL)
++
++#if defined(SGX_FEATURE_36BIT_MMU)
++ #define SGX_MMU_PDE_ADDR_MASK (0xFFFFFF00UL)
++ #define SGX_MMU_PDE_ADDR_ALIGNSHIFT (4)
++#else
++ #define SGX_MMU_PDE_ADDR_MASK (0xFFFFF000UL)
++ #define SGX_MMU_PDE_ADDR_ALIGNSHIFT (0)
++#endif
++#define SGX_MMU_PDE_VALID (0x00000001UL)
++#define SGX_MMU_PDE_PAGE_SIZE_4K (0x00000000UL)
++#if defined(SGX_FEATURE_VARIABLE_MMU_PAGE_SIZE)
++ #define SGX_MMU_PDE_PAGE_SIZE_16K (0x00000002UL)
++ #define SGX_MMU_PDE_PAGE_SIZE_64K (0x00000004UL)
++ #define SGX_MMU_PDE_PAGE_SIZE_256K (0x00000006UL)
++ #define SGX_MMU_PDE_PAGE_SIZE_1M (0x00000008UL)
++ #define SGX_MMU_PDE_PAGE_SIZE_4M (0x0000000AUL)
++ #define SGX_MMU_PDE_PAGE_SIZE_MASK (0x0000000EUL)
++#else
++ #define SGX_MMU_PDE_WRITEONLY (0x00000002UL)
++ #define SGX_MMU_PDE_READONLY (0x00000004UL)
++ #define SGX_MMU_PDE_CACHECONSISTENT (0x00000008UL)
++ #define SGX_MMU_PDE_EDMPROTECT (0x00000010UL)
++#endif
++
++#define SGX_MMU_PT_SHIFT (10)
++#define SGX_MMU_PT_SIZE (1UL<<SGX_MMU_PT_SHIFT)
++#define SGX_MMU_PT_MASK (0x003FF000UL)
++
++#if defined(SGX_FEATURE_36BIT_MMU)
++ #define SGX_MMU_PTE_ADDR_MASK (0xFFFFFF00UL)
++ #define SGX_MMU_PTE_ADDR_ALIGNSHIFT (4)
++#else
++ #define SGX_MMU_PTE_ADDR_MASK (0xFFFFF000UL)
++ #define SGX_MMU_PTE_ADDR_ALIGNSHIFT (0)
++#endif
++#define SGX_MMU_PTE_VALID (0x00000001UL)
++#define SGX_MMU_PTE_WRITEONLY (0x00000002UL)
++#define SGX_MMU_PTE_READONLY (0x00000004UL)
++#define SGX_MMU_PTE_CACHECONSISTENT (0x00000008UL)
++#define SGX_MMU_PTE_EDMPROTECT (0x00000010UL)
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/buffer_manager.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/buffer_manager.h
+new file mode 100644
+index 0000000..a47086d
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/buffer_manager.h
+@@ -0,0 +1,213 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _BUFFER_MANAGER_H_
++#define _BUFFER_MANAGER_H_
++
++#include "img_types.h"
++#include "ra.h"
++#include "perproc.h"
++
++#if defined(__cplusplus)
++extern "C"{
++#endif
++
++typedef struct _BM_HEAP_ BM_HEAP;
++
++struct _BM_MAPPING_
++{
++ enum
++ {
++ hm_wrapped = 1,
++ hm_wrapped_scatter,
++ hm_wrapped_virtaddr,
++ hm_wrapped_scatter_virtaddr,
++ hm_env,
++ hm_contiguous
++ } eCpuMemoryOrigin;
++
++ BM_HEAP *pBMHeap;
++ RA_ARENA *pArena;
++
++ IMG_CPU_VIRTADDR CpuVAddr;
++ IMG_CPU_PHYADDR CpuPAddr;
++ IMG_DEV_VIRTADDR DevVAddr;
++ IMG_SYS_PHYADDR *psSysAddr;
++ IMG_SIZE_T uSize;
++ IMG_HANDLE hOSMemHandle;
++ IMG_UINT32 ui32Flags;
++};
++
++typedef struct _BM_BUF_
++{
++ IMG_CPU_VIRTADDR *CpuVAddr;
++ IMG_VOID *hOSMemHandle;
++ IMG_CPU_PHYADDR CpuPAddr;
++ IMG_DEV_VIRTADDR DevVAddr;
++
++ BM_MAPPING *pMapping;
++ IMG_UINT32 ui32RefCount;
++} BM_BUF;
++
++struct _BM_HEAP_
++{
++ IMG_UINT32 ui32Attribs;
++ BM_CONTEXT *pBMContext;
++ RA_ARENA *pImportArena;
++ RA_ARENA *pLocalDevMemArena;
++ RA_ARENA *pVMArena;
++ DEV_ARENA_DESCRIPTOR sDevArena;
++ MMU_HEAP *pMMUHeap;
++
++ struct _BM_HEAP_ *psNext;
++ struct _BM_HEAP_ **ppsThis;
++};
++
++struct _BM_CONTEXT_
++{
++ MMU_CONTEXT *psMMUContext;
++
++
++ BM_HEAP *psBMHeap;
++
++
++ BM_HEAP *psBMSharedHeap;
++
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++
++
++ HASH_TABLE *pBufferHash;
++
++
++ IMG_HANDLE hResItem;
++
++ IMG_UINT32 ui32RefCount;
++
++
++
++ struct _BM_CONTEXT_ *psNext;
++ struct _BM_CONTEXT_ **ppsThis;
++};
++
++
++
++typedef IMG_VOID *BM_HANDLE;
++
++#define BP_POOL_MASK 0x7
++
++#define BP_CONTIGUOUS (1 << 3)
++#define BP_PARAMBUFFER (1 << 4)
++
++#define BM_MAX_DEVMEM_ARENAS 2
++
++IMG_HANDLE
++BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode,
++ IMG_DEV_PHYADDR *psPDDevPAddr,
++ PVRSRV_PER_PROCESS_DATA *psPerProc,
++ IMG_BOOL *pbCreated);
++
++
++PVRSRV_ERROR
++BM_DestroyContext (IMG_HANDLE hBMContext,
++ IMG_BOOL *pbCreated);
++
++
++IMG_HANDLE
++BM_CreateHeap (IMG_HANDLE hBMContext,
++ DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo);
++
++IMG_VOID
++BM_DestroyHeap (IMG_HANDLE hDevMemHeap);
++
++
++IMG_BOOL
++BM_Reinitialise (PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_BOOL
++BM_Alloc (IMG_HANDLE hDevMemHeap,
++ IMG_DEV_VIRTADDR *psDevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 *pui32Flags,
++ IMG_UINT32 uDevVAddrAlignment,
++ BM_HANDLE *phBuf);
++
++IMG_BOOL
++BM_Wrap ( IMG_HANDLE hDevMemHeap,
++ IMG_SIZE_T ui32Size,
++ IMG_SIZE_T ui32Offset,
++ IMG_BOOL bPhysContig,
++ IMG_SYS_PHYADDR *psSysAddr,
++ IMG_VOID *pvCPUVAddr,
++ IMG_UINT32 *pui32Flags,
++ BM_HANDLE *phBuf);
++
++IMG_VOID
++BM_Free (BM_HANDLE hBuf,
++ IMG_UINT32 ui32Flags);
++
++
++IMG_CPU_VIRTADDR
++BM_HandleToCpuVaddr (BM_HANDLE hBuf);
++
++IMG_DEV_VIRTADDR
++BM_HandleToDevVaddr (BM_HANDLE hBuf);
++
++IMG_SYS_PHYADDR
++BM_HandleToSysPaddr (BM_HANDLE hBuf);
++
++IMG_HANDLE
++BM_HandleToOSMemHandle (BM_HANDLE hBuf);
++
++IMG_BOOL
++BM_ContiguousStatistics (IMG_UINT32 uFlags,
++ IMG_UINT32 *pTotalBytes,
++ IMG_UINT32 *pAvailableBytes);
++
++
++IMG_VOID BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_DEV_VIRTADDR sDevVPageAddr,
++ IMG_DEV_PHYADDR *psDevPAddr);
++
++PVRSRV_ERROR BM_GetHeapInfo(IMG_HANDLE hDevMemHeap,
++ PVRSRV_HEAP_INFO *psHeapInfo);
++
++MMU_CONTEXT* BM_GetMMUContext(IMG_HANDLE hDevMemHeap);
++
++MMU_CONTEXT* BM_GetMMUContextFromMemContext(IMG_HANDLE hDevMemContext);
++
++IMG_HANDLE BM_GetMMUHeap(IMG_HANDLE hDevMemHeap);
++
++PVRSRV_DEVICE_NODE* BM_GetDeviceNode(IMG_HANDLE hDevMemContext);
++
++
++IMG_HANDLE BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/device.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/device.h
+new file mode 100644
+index 0000000..90c8c7a
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/device.h
+@@ -0,0 +1,278 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __DEVICE_H__
++#define __DEVICE_H__
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++#include "ra.h"
++#include "resman.h"
++
++typedef struct _BM_CONTEXT_ BM_CONTEXT;
++
++typedef struct _MMU_HEAP_ MMU_HEAP;
++typedef struct _MMU_CONTEXT_ MMU_CONTEXT;
++
++#define PVRSRV_BACKINGSTORE_SYSMEM_CONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+0))
++#define PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+1))
++#define PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+2))
++#define PVRSRV_BACKINGSTORE_LOCALMEM_NONCONTIG (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+3))
++
++typedef IMG_UINT32 DEVICE_MEMORY_HEAP_TYPE;
++#define DEVICE_MEMORY_HEAP_PERCONTEXT 0
++#define DEVICE_MEMORY_HEAP_KERNEL 1
++#define DEVICE_MEMORY_HEAP_SHARED 2
++#define DEVICE_MEMORY_HEAP_SHARED_EXPORTED 3
++
++#define PVRSRV_DEVICE_NODE_FLAGS_PORT80DISPLAY 1
++#define PVRSRV_DEVICE_NODE_FLAGS_MMU_OPT_INV 2
++
++typedef struct _DEVICE_MEMORY_HEAP_INFO_
++{
++
++ IMG_UINT32 ui32HeapID;
++
++
++ IMG_CHAR *pszName;
++
++
++ IMG_CHAR *pszBSName;
++
++
++ IMG_DEV_VIRTADDR sDevVAddrBase;
++
++
++ IMG_UINT32 ui32HeapSize;
++
++
++ IMG_UINT32 ui32Attribs;
++
++
++ DEVICE_MEMORY_HEAP_TYPE DevMemHeapType;
++
++
++ IMG_HANDLE hDevMemHeap;
++
++
++ RA_ARENA *psLocalDevMemArena;
++
++
++ IMG_UINT32 ui32DataPageSize;
++
++} DEVICE_MEMORY_HEAP_INFO;
++
++typedef struct _DEVICE_MEMORY_INFO_
++{
++
++ IMG_UINT32 ui32AddressSpaceSizeLog2;
++
++
++
++
++ IMG_UINT32 ui32Flags;
++
++
++ IMG_UINT32 ui32HeapCount;
++
++
++ IMG_UINT32 ui32SyncHeapID;
++
++
++ IMG_UINT32 ui32MappingHeapID;
++
++
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++
++
++ BM_CONTEXT *pBMKernelContext;
++
++
++ BM_CONTEXT *pBMContext;
++
++} DEVICE_MEMORY_INFO;
++
++
++typedef struct DEV_ARENA_DESCRIPTOR_TAG
++{
++ IMG_UINT32 ui32HeapID;
++
++ IMG_CHAR *pszName;
++
++ IMG_DEV_VIRTADDR BaseDevVAddr;
++
++ IMG_UINT32 ui32Size;
++
++ DEVICE_MEMORY_HEAP_TYPE DevMemHeapType;
++
++
++ IMG_UINT32 ui32DataPageSize;
++
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeapInfo;
++
++} DEV_ARENA_DESCRIPTOR;
++
++typedef struct _SYS_DATA_TAG_ *PSYS_DATA;
++
++typedef struct _PVRSRV_DEVICE_NODE_
++{
++ PVRSRV_DEVICE_IDENTIFIER sDevId;
++ IMG_UINT32 ui32RefCount;
++
++
++
++
++ PVRSRV_ERROR (*pfnInitDevice) (IMG_VOID*);
++
++ PVRSRV_ERROR (*pfnDeInitDevice) (IMG_VOID*);
++
++
++ PVRSRV_ERROR (*pfnInitDeviceCompatCheck) (struct _PVRSRV_DEVICE_NODE_*);
++
++
++ PVRSRV_ERROR (*pfnMMUInitialise)(struct _PVRSRV_DEVICE_NODE_*, MMU_CONTEXT**, IMG_DEV_PHYADDR*);
++ IMG_VOID (*pfnMMUFinalise)(MMU_CONTEXT*);
++ IMG_VOID (*pfnMMUInsertHeap)(MMU_CONTEXT*, MMU_HEAP*);
++ MMU_HEAP* (*pfnMMUCreate)(MMU_CONTEXT*,DEV_ARENA_DESCRIPTOR*,RA_ARENA**);
++ IMG_VOID (*pfnMMUDelete)(MMU_HEAP*);
++ IMG_BOOL (*pfnMMUAlloc)(MMU_HEAP*pMMU,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uDevVAddrAlignment,
++ IMG_DEV_VIRTADDR *pDevVAddr);
++ IMG_VOID (*pfnMMUFree)(MMU_HEAP*,IMG_DEV_VIRTADDR,IMG_UINT32);
++ IMG_VOID (*pfnMMUEnable)(MMU_HEAP*);
++ IMG_VOID (*pfnMMUDisable)(MMU_HEAP*);
++ IMG_VOID (*pfnMMUMapPages)(MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR devVAddr,
++ IMG_SYS_PHYADDR SysPAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag);
++ IMG_VOID (*pfnMMUMapShadow)(MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR MapBaseDevVAddr,
++ IMG_SIZE_T uSize,
++ IMG_CPU_VIRTADDR CpuVAddr,
++ IMG_HANDLE hOSMemHandle,
++ IMG_DEV_VIRTADDR *pDevVAddr,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag);
++ IMG_VOID (*pfnMMUUnmapPages)(MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR dev_vaddr,
++ IMG_UINT32 ui32PageCount,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_VOID (*pfnMMUMapScatter)(MMU_HEAP *pMMU,
++ IMG_DEV_VIRTADDR DevVAddr,
++ IMG_SYS_PHYADDR *psSysAddr,
++ IMG_SIZE_T uSize,
++ IMG_UINT32 ui32MemFlags,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_DEV_PHYADDR (*pfnMMUGetPhysPageAddr)(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr);
++ IMG_DEV_PHYADDR (*pfnMMUGetPDDevPAddr)(MMU_CONTEXT *pMMUContext);
++
++
++ IMG_BOOL (*pfnDeviceISR)(IMG_VOID*);
++
++ IMG_VOID *pvISRData;
++
++ IMG_UINT32 ui32SOCInterruptBit;
++
++ IMG_VOID (*pfnDeviceMISR)(IMG_VOID*);
++
++
++ IMG_VOID (*pfnDeviceCommandComplete)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
++
++ IMG_BOOL bReProcessDeviceCommandComplete;
++
++
++ DEVICE_MEMORY_INFO sDevMemoryInfo;
++
++
++ IMG_VOID *pvDevice;
++ IMG_UINT32 ui32pvDeviceSize;
++
++
++ PRESMAN_CONTEXT hResManContext;
++
++
++ PSYS_DATA psSysData;
++
++
++ RA_ARENA *psLocalDevMemArena;
++
++ IMG_UINT32 ui32Flags;
++
++ struct _PVRSRV_DEVICE_NODE_ *psNext;
++ struct _PVRSRV_DEVICE_NODE_ **ppsThis;
++} PVRSRV_DEVICE_NODE;
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterDevice(PSYS_DATA psSysData,
++ PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
++ IMG_UINT32 ui32SOCInterruptBit,
++ IMG_UINT32 *pui32DeviceIndex );
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInitialiseDevice(IMG_UINT32 ui32DevIndex);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFinaliseSystem(IMG_BOOL bInitSuccesful);
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex);
++
++#if !defined(USE_CODE)
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PollForValueKM(volatile IMG_UINT32* pui32LinMemAddr,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Waitus,
++ IMG_UINT32 ui32Tries);
++
++#endif
++
++
++#if defined (USING_ISR_INTERRUPTS)
++PVRSRV_ERROR IMG_CALLCONV PollForInterruptKM(IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Waitus,
++ IMG_UINT32 ui32Tries);
++#endif
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInit(PSYS_DATA psSysData);
++IMG_VOID IMG_CALLCONV PVRSRVDeInit(PSYS_DATA psSysData);
++IMG_BOOL IMG_CALLCONV PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode);
++IMG_BOOL IMG_CALLCONV PVRSRVSystemLISR(IMG_VOID *pvSysData);
++IMG_VOID IMG_CALLCONV PVRSRVMISR(IMG_VOID *pvSysData);
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/handle.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/handle.h
+new file mode 100644
+index 0000000..fda74f1
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/handle.h
+@@ -0,0 +1,382 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __HANDLE_H__
++#define __HANDLE_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "img_types.h"
++#include "hash.h"
++#include "resman.h"
++
++typedef enum
++{
++ PVRSRV_HANDLE_TYPE_NONE = 0,
++ PVRSRV_HANDLE_TYPE_PERPROC_DATA,
++ PVRSRV_HANDLE_TYPE_DEV_NODE,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT,
++ PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++ PVRSRV_HANDLE_TYPE_MEM_INFO,
++ PVRSRV_HANDLE_TYPE_SYNC_INFO,
++ PVRSRV_HANDLE_TYPE_DISP_INFO,
++ PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++ PVRSRV_HANDLE_TYPE_BUF_INFO,
++ PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++ PVRSRV_HANDLE_TYPE_BUF_BUFFER,
++ PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT,
++ PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT,
++ PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT,
++ PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++ PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++ PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
++ PVRSRV_HANDLE_TYPE_MMAP_INFO,
++ PVRSRV_HANDLE_TYPE_SOC_TIMER
++} PVRSRV_HANDLE_TYPE;
++
++typedef enum
++{
++
++ PVRSRV_HANDLE_ALLOC_FLAG_NONE = 0,
++
++ PVRSRV_HANDLE_ALLOC_FLAG_SHARED = 0x01,
++
++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI = 0x02,
++
++ PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 0x04
++} PVRSRV_HANDLE_ALLOC_FLAG;
++
++struct _PVRSRV_HANDLE_BASE_;
++typedef struct _PVRSRV_HANDLE_BASE_ PVRSRV_HANDLE_BASE;
++
++#ifdef PVR_SECURE_HANDLES
++extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase;
++
++#define KERNEL_HANDLE_BASE (gpsKernelHandleBase)
++
++PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag);
++
++PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
++
++PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle);
++
++PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor);
++
++PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32BatchSize);
++
++PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase);
++
++IMG_VOID PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32MaxHandle);
++
++IMG_UINT32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase);
++
++PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID);
++
++PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID);
++
++#else
++
++#define KERNEL_HANDLE_BASE IMG_NULL
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVAllocHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag)
++{
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(eFlag);
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ *phHandle = pvData;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVAllocSubHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent)
++{
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(eFlag);
++ PVR_UNREFERENCED_PARAMETER(hParent);
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ *phHandle = pvData;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVFindHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType)
++{
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ *phHandle = pvData;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVLookupHandleAnyType)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ *peType = PVRSRV_HANDLE_TYPE_NONE;
++
++ *ppvData = hHandle;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVLookupHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++ PVR_UNREFERENCED_PARAMETER(eType);
++
++ *ppvData = hHandle;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVLookupSubHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(hAncestor);
++
++ *ppvData = hHandle;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVGetParentHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(hHandle);
++
++ *phParent = IMG_NULL;
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVLookupAndReleaseHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ *ppvData = hHandle;
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVReleaseHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++ PVR_UNREFERENCED_PARAMETER(hHandle);
++ PVR_UNREFERENCED_PARAMETER(eType);
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVNewHandleBatch)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVNewHandleBatch(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32BatchSize)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++ PVR_UNREFERENCED_PARAMETER(ui32BatchSize);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVCommitHandleBatch)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVCommitHandleBatch(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVReleaseHandleBatch)
++#endif
++static INLINE
++IMG_VOID PVRSRVReleaseHandleBatch(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVSetMaxHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVSetMaxHandle(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32MaxHandle)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++ PVR_UNREFERENCED_PARAMETER(ui32MaxHandle);
++
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVGetMaxHandle)
++#endif
++static INLINE
++IMG_UINT32 PVRSRVGetMaxHandle(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ return 0;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVEnableHandlePurging)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVEnableHandlePurging(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVPurgeHandles)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVAllocHandleBase)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase)
++{
++ *ppsBase = IMG_NULL;
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVFreeHandleBase)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psBase);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVHandleInit)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID)
++{
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVHandleDeInit)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID)
++{
++ return PVRSRV_OK;
++}
++
++#endif
++
++#define PVRSRVAllocHandleNR(psBase, phHandle, pvData, eType, eFlag) \
++ (IMG_VOID)PVRSRVAllocHandle(psBase, phHandle, pvData, eType, eFlag)
++
++#define PVRSRVAllocSubHandleNR(psBase, phHandle, pvData, eType, eFlag, hParent) \
++ (IMG_VOID)PVRSRVAllocSubHandle(psBase, phHandle, pvData, eType, eFlag, hParent)
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/hash.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/hash.h
+new file mode 100644
+index 0000000..d45f4a9
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/hash.h
+@@ -0,0 +1,73 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _HASH_H_
++#define _HASH_H_
++
++#include "img_types.h"
++#include "osfunc.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++typedef IMG_UINT32 HASH_FUNC(IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen);
++typedef IMG_BOOL HASH_KEY_COMP(IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2);
++
++typedef struct _HASH_TABLE_ HASH_TABLE;
++
++IMG_UINT32 HASH_Func_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen);
++
++IMG_BOOL HASH_Key_Comp_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2);
++
++HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, IMG_SIZE_T uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp);
++
++HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen);
++
++IMG_VOID HASH_Delete (HASH_TABLE *pHash);
++
++IMG_BOOL HASH_Insert_Extended (HASH_TABLE *pHash, IMG_VOID *pKey, IMG_UINTPTR_T v);
++
++IMG_BOOL HASH_Insert (HASH_TABLE *pHash, IMG_UINTPTR_T k, IMG_UINTPTR_T v);
++
++IMG_UINTPTR_T HASH_Remove_Extended(HASH_TABLE *pHash, IMG_VOID *pKey);
++
++IMG_UINTPTR_T HASH_Remove (HASH_TABLE *pHash, IMG_UINTPTR_T k);
++
++IMG_UINTPTR_T HASH_Retrieve_Extended (HASH_TABLE *pHash, IMG_VOID *pKey);
++
++IMG_UINTPTR_T HASH_Retrieve (HASH_TABLE *pHash, IMG_UINTPTR_T k);
++
++#ifdef HASH_TRACE
++IMG_VOID HASH_Dump (HASH_TABLE *pHash);
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/lists.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/lists.h
+new file mode 100644
+index 0000000..76d5af2
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/lists.h
+@@ -0,0 +1,176 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __LISTS_UTILS__
++#define __LISTS_UTILS__
++
++#include <stdarg.h>
++#include "img_types.h"
++
++#define DECLARE_LIST_FOR_EACH(TYPE) \
++IMG_VOID List_##TYPE##_ForEach(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode))
++
++#define IMPLEMENT_LIST_FOR_EACH(TYPE) \
++IMG_VOID List_##TYPE##_ForEach(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode))\
++{\
++ while(psHead)\
++ {\
++ pfnCallBack(psHead);\
++ psHead = psHead->psNext;\
++ }\
++}
++
++
++#define DECLARE_LIST_FOR_EACH_VA(TYPE) \
++IMG_VOID List_##TYPE##_ForEach_va(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode, va_list va), ...)
++
++#define IMPLEMENT_LIST_FOR_EACH_VA(TYPE) \
++IMG_VOID List_##TYPE##_ForEach_va(TYPE *psHead, IMG_VOID(*pfnCallBack)(TYPE* psNode, va_list va), ...) \
++{\
++ va_list ap;\
++ while(psHead)\
++ {\
++ va_start(ap, pfnCallBack);\
++ pfnCallBack(psHead, ap);\
++ psHead = psHead->psNext;\
++ va_end(ap);\
++ }\
++}
++
++
++#define DECLARE_LIST_ANY(TYPE) \
++IMG_VOID* List_##TYPE##_Any(TYPE *psHead, IMG_VOID* (*pfnCallBack)(TYPE* psNode))
++
++#define IMPLEMENT_LIST_ANY(TYPE) \
++IMG_VOID* List_##TYPE##_Any(TYPE *psHead, IMG_VOID* (*pfnCallBack)(TYPE* psNode))\
++{ \
++ IMG_VOID *pResult;\
++ TYPE *psNextNode;\
++ pResult = IMG_NULL;\
++ psNextNode = psHead;\
++ while(psHead && !pResult)\
++ {\
++ psNextNode = psNextNode->psNext;\
++ pResult = pfnCallBack(psHead);\
++ psHead = psNextNode;\
++ }\
++ return pResult;\
++}
++
++
++#define DECLARE_LIST_ANY_VA(TYPE) \
++IMG_VOID* List_##TYPE##_Any_va(TYPE *psHead, IMG_VOID*(*pfnCallBack)(TYPE* psNode, va_list va), ...)
++
++#define IMPLEMENT_LIST_ANY_VA(TYPE) \
++IMG_VOID* List_##TYPE##_Any_va(TYPE *psHead, IMG_VOID*(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
++{\
++ va_list ap;\
++ TYPE *psNextNode;\
++ IMG_VOID* pResult = IMG_NULL;\
++ while(psHead && !pResult)\
++ {\
++ psNextNode = psHead->psNext;\
++ va_start(ap, pfnCallBack);\
++ pResult = pfnCallBack(psHead, ap);\
++ va_end(ap);\
++ psHead = psNextNode;\
++ }\
++ return pResult;\
++}
++
++#define DECLARE_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
++RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))
++
++#define IMPLEMENT_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
++RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))\
++{ \
++ RTYPE result;\
++ TYPE *psNextNode;\
++ result = CONTINUE;\
++ psNextNode = psHead;\
++ while(psHead && result == CONTINUE)\
++ {\
++ psNextNode = psNextNode->psNext;\
++ result = pfnCallBack(psHead);\
++ psHead = psNextNode;\
++ }\
++ return result;\
++}
++
++
++#define DECLARE_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
++RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)
++
++#define IMPLEMENT_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
++RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
++{\
++ va_list ap;\
++ TYPE *psNextNode;\
++ RTYPE result = CONTINUE;\
++ while(psHead && result == CONTINUE)\
++ {\
++ psNextNode = psHead->psNext;\
++ va_start(ap, pfnCallBack);\
++ result = pfnCallBack(psHead, ap);\
++ va_end(ap);\
++ psHead = psNextNode;\
++ }\
++ return result;\
++}
++
++
++#define DECLARE_LIST_REMOVE(TYPE) \
++IMG_VOID List_##TYPE##_Remove(TYPE *psNode)
++
++#define IMPLEMENT_LIST_REMOVE(TYPE) \
++IMG_VOID List_##TYPE##_Remove(TYPE *psNode)\
++{\
++ (*psNode->ppsThis)=psNode->psNext;\
++ if(psNode->psNext)\
++ {\
++ psNode->psNext->ppsThis = psNode->ppsThis;\
++ }\
++}
++
++#define DECLARE_LIST_INSERT(TYPE) \
++IMG_VOID List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)
++
++#define IMPLEMENT_LIST_INSERT(TYPE) \
++IMG_VOID List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)\
++{\
++ psNewNode->ppsThis = ppsHead;\
++ psNewNode->psNext = *ppsHead;\
++ *ppsHead = psNewNode;\
++ if(psNewNode->psNext)\
++ {\
++ psNewNode->psNext->ppsThis = &(psNewNode->psNext);\
++ }\
++}
++
++
++#define IS_LAST_ELEMENT(x) ((x)->psNext == IMG_NULL)
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/metrics.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/metrics.h
+new file mode 100644
+index 0000000..2632f8d
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/metrics.h
+@@ -0,0 +1,130 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _METRICS_
++#define _METRICS_
++
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++
++#if defined(DEBUG) || defined(TIMING)
++
++
++typedef struct
++{
++ IMG_UINT32 ui32Start;
++ IMG_UINT32 ui32Stop;
++ IMG_UINT32 ui32Total;
++ IMG_UINT32 ui32Count;
++} Temporal_Data;
++
++extern Temporal_Data asTimers[];
++
++extern IMG_UINT32 PVRSRVTimeNow(IMG_VOID);
++extern IMG_VOID PVRSRVSetupMetricTimers(IMG_VOID *pvDevInfo);
++extern IMG_VOID PVRSRVOutputMetricTotals(IMG_VOID);
++
++
++#define PVRSRV_TIMER_DUMMY 0
++
++#define PVRSRV_TIMER_EXAMPLE_1 1
++#define PVRSRV_TIMER_EXAMPLE_2 2
++
++
++#define PVRSRV_NUM_TIMERS (PVRSRV_TIMER_EXAMPLE_2 + 1)
++
++#define PVRSRV_TIME_START(X) { \
++ asTimers[X].ui32Count += 1; \
++ asTimers[X].ui32Count |= 0x80000000L; \
++ asTimers[X].ui32Start = PVRSRVTimeNow(); \
++ asTimers[X].ui32Stop = 0; \
++ }
++
++#define PVRSRV_TIME_SUSPEND(X) { \
++ asTimers[X].ui32Stop += PVRSRVTimeNow() - asTimers[X].ui32Start; \
++ }
++
++#define PVRSRV_TIME_RESUME(X) { \
++ asTimers[X].ui32Start = PVRSRVTimeNow(); \
++ }
++
++#define PVRSRV_TIME_STOP(X) { \
++ asTimers[X].ui32Stop += PVRSRVTimeNow() - asTimers[X].ui32Start; \
++ asTimers[X].ui32Total += asTimers[X].ui32Stop; \
++ asTimers[X].ui32Count &= 0x7FFFFFFFL; \
++ }
++
++#define PVRSRV_TIME_RESET(X) { \
++ asTimers[X].ui32Start = 0; \
++ asTimers[X].ui32Stop = 0; \
++ asTimers[X].ui32Total = 0; \
++ asTimers[X].ui32Count = 0; \
++ }
++
++
++#if defined(__sh__)
++
++#define TST_REG ((volatile IMG_UINT8 *) (psDevInfo->pvSOCRegsBaseKM))
++
++#define TCOR_2 ((volatile IMG_UINT *) (psDevInfo->pvSOCRegsBaseKM+28))
++#define TCNT_2 ((volatile IMG_UINT *) (psDevInfo->pvSOCRegsBaseKM+32))
++#define TCR_2 ((volatile IMG_UINT16 *)(psDevInfo->pvSOCRegsBaseKM+36))
++
++#define TIMER_DIVISOR 4
++
++#endif
++
++
++
++
++
++#else
++
++
++
++#define PVRSRV_TIME_START(X)
++#define PVRSRV_TIME_SUSPEND(X)
++#define PVRSRV_TIME_RESUME(X)
++#define PVRSRV_TIME_STOP(X)
++#define PVRSRV_TIME_RESET(X)
++
++#define PVRSRVSetupMetricTimers(X)
++#define PVRSRVOutputMetricTotals()
++
++
++
++#endif
++
++#if defined(__cplusplus)
++}
++#endif
++
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/osfunc.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/osfunc.h
+new file mode 100644
+index 0000000..7686c69
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/osfunc.h
+@@ -0,0 +1,487 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifdef DEBUG_RELEASE_BUILD
++#pragma optimize( "", off )
++#define DEBUG 1
++#endif
++
++#ifndef __OSFUNC_H__
++#define __OSFUNC_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#if defined(__linux__) && defined(__KERNEL__)
++#include <linux/hardirq.h>
++#include <linux/string.h>
++#endif
++
++
++
++ #define PVRSRV_PAGEABLE_SELECT PVRSRV_OS_PAGEABLE_HEAP
++
++#define KERNEL_ID 0xffffffffL
++#define POWER_MANAGER_ID 0xfffffffeL
++#define ISR_ID 0xfffffffdL
++#define TIMER_ID 0xfffffffcL
++
++
++#define HOST_PAGESIZE OSGetPageSize
++#define HOST_PAGEMASK (~(HOST_PAGESIZE()-1))
++#define HOST_PAGEALIGN(addr) (((addr)+HOST_PAGESIZE()-1)&HOST_PAGEMASK)
++
++#define PVRSRV_OS_HEAP_MASK 0xf
++#define PVRSRV_OS_PAGEABLE_HEAP 0x1
++#define PVRSRV_OS_NON_PAGEABLE_HEAP 0x2
++
++
++IMG_UINT32 OSClockus(IMG_VOID);
++IMG_SIZE_T OSGetPageSize(IMG_VOID);
++PVRSRV_ERROR OSInstallDeviceLISR(IMG_VOID *pvSysData,
++ IMG_UINT32 ui32Irq,
++ IMG_CHAR *pszISRName,
++ IMG_VOID *pvDeviceNode);
++PVRSRV_ERROR OSUninstallDeviceLISR(IMG_VOID *pvSysData);
++PVRSRV_ERROR OSInstallSystemLISR(IMG_VOID *pvSysData, IMG_UINT32 ui32Irq);
++PVRSRV_ERROR OSUninstallSystemLISR(IMG_VOID *pvSysData);
++PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData);
++PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData);
++IMG_CPU_PHYADDR OSMapLinToCPUPhys(IMG_VOID* pvLinAddr);
++IMG_VOID OSMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_SIZE_T ui32Size);
++IMG_VOID *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE *phOSMemHandle);
++IMG_BOOL OSUnMapPhysToLin(IMG_VOID *pvLinAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle);
++
++PVRSRV_ERROR OSReservePhys(IMG_CPU_PHYADDR BasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_VOID **ppvCpuVAddr, IMG_HANDLE *phOSMemHandle);
++PVRSRV_ERROR OSUnReservePhys(IMG_VOID *pvCpuVAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle);
++
++#if defined(SUPPORT_CPU_CACHED_BUFFERS)
++IMG_VOID OSFlushCPUCacheKM(IMG_VOID);
++IMG_VOID OSFlushCPUCacheRangeKM(IMG_VOID *pvRangeAddrStart,
++ IMG_VOID *pvRangeAddrEnd);
++#endif
++
++#if defined(__linux__)
++PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR *pBasePAddr,
++ IMG_VOID *pvCpuVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE *phOSMemHandle);
++PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID *pvCpuVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hOSMemHandle);
++#else
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSRegisterDiscontigMem)
++#endif
++static INLINE PVRSRV_ERROR OSRegisterDiscontigMem(IMG_SYS_PHYADDR *pBasePAddr,
++ IMG_VOID *pvCpuVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE *phOSMemHandle)
++{
++ PVR_UNREFERENCED_PARAMETER(pBasePAddr);
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ PVR_UNREFERENCED_PARAMETER(phOSMemHandle);
++
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSUnRegisterDiscontigMem)
++#endif
++static INLINE PVRSRV_ERROR OSUnRegisterDiscontigMem(IMG_VOID *pvCpuVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hOSMemHandle)
++{
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++#endif
++
++
++#if defined(__linux__)
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSReserveDiscontigPhys)
++#endif
++static INLINE PVRSRV_ERROR OSReserveDiscontigPhys(IMG_SYS_PHYADDR *pBasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_VOID **ppvCpuVAddr, IMG_HANDLE *phOSMemHandle)
++{
++#if defined(__linux__)
++ *ppvCpuVAddr = IMG_NULL;
++ return OSRegisterDiscontigMem(pBasePAddr, *ppvCpuVAddr, ui32Bytes, ui32Flags, phOSMemHandle);
++#else
++ extern IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr(IMG_SYS_PHYADDR SysPAddr);
++
++
++ return OSReservePhys(SysSysPAddrToCpuPAddr(pBasePAddr[0]), ui32Bytes, ui32Flags, ppvCpuVAddr, phOSMemHandle);
++#endif
++}
++
++static INLINE PVRSRV_ERROR OSUnReserveDiscontigPhys(IMG_VOID *pvCpuVAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle)
++{
++#if defined(__linux__)
++ OSUnRegisterDiscontigMem(pvCpuVAddr, ui32Bytes, ui32Flags, hOSMemHandle);
++#endif
++
++ return PVRSRV_OK;
++}
++#else
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSReserveDiscontigPhys)
++#endif
++static INLINE PVRSRV_ERROR OSReserveDiscontigPhys(IMG_SYS_PHYADDR *pBasePAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_VOID **ppvCpuVAddr, IMG_HANDLE *phOSMemHandle)
++{
++ PVR_UNREFERENCED_PARAMETER(pBasePAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ PVR_UNREFERENCED_PARAMETER(ppvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(phOSMemHandle);
++
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSUnReserveDiscontigPhys)
++#endif
++static INLINE PVRSRV_ERROR OSUnReserveDiscontigPhys(IMG_VOID *pvCpuVAddr, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle)
++{
++ PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++
++ return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++#endif
++
++PVRSRV_ERROR OSRegisterMem(IMG_CPU_PHYADDR BasePAddr,
++ IMG_VOID *pvCpuVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE *phOSMemHandle);
++PVRSRV_ERROR OSUnRegisterMem(IMG_VOID *pvCpuVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hOSMemHandle);
++
++
++
++#if defined(__linux__)
++PVRSRV_ERROR OSGetSubMemHandle(IMG_HANDLE hOSMemHandle,
++ IMG_UINTPTR_T ui32ByteOffset,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE *phOSMemHandleRet);
++PVRSRV_ERROR OSReleaseSubMemHandle(IMG_HANDLE hOSMemHandle, IMG_UINT32 ui32Flags);
++#else
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSGetSubMemHandle)
++#endif
++static INLINE PVRSRV_ERROR OSGetSubMemHandle(IMG_HANDLE hOSMemHandle,
++ IMG_UINTPTR_T ui32ByteOffset,
++ IMG_SIZE_T ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE *phOSMemHandleRet)
++{
++ PVR_UNREFERENCED_PARAMETER(ui32ByteOffset);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++
++ *phOSMemHandleRet = hOSMemHandle;
++ return PVRSRV_OK;
++}
++
++static INLINE PVRSRV_ERROR OSReleaseSubMemHandle(IMG_HANDLE hOSMemHandle, IMG_UINT32 ui32Flags)
++{
++ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++ PVR_UNREFERENCED_PARAMETER(ui32Flags);
++ return PVRSRV_OK;
++}
++#endif
++
++IMG_UINT32 OSGetCurrentProcessIDKM(IMG_VOID);
++IMG_UINT32 OSGetCurrentThreadID( IMG_VOID );
++IMG_VOID OSMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_SIZE_T ui32Size);
++
++PVRSRV_ERROR OSAllocPages_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_UINT32 ui32PageSize, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phPageAlloc);
++PVRSRV_ERROR OSFreePages(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID pvLinAddr, IMG_HANDLE hPageAlloc);
++
++
++#ifdef PVRSRV_LOG_MEMORY_ALLOCS
++ #define OSAllocMem(flags, size, linAddr, blockAlloc, logStr) \
++ (PVR_TRACE(("OSAllocMem(" #flags ", " #size ", " #linAddr ", " #blockAlloc "): " logStr " (size = 0x%lx)", size)), \
++ OSAllocMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__))
++
++ #define OSAllocPages(flags, size, pageSize, linAddr, pageAlloc) \
++ (PVR_TRACE(("OSAllocPages(" #flags ", " #size ", " #pageSize ", " #linAddr ", " #pageAlloc "): (size = 0x%lx)", size)), \
++ OSAllocPages_Impl(flags, size, pageSize, linAddr, pageAlloc))
++
++ #define OSFreeMem(flags, size, linAddr, blockAlloc) \
++ (PVR_TRACE(("OSFreeMem(" #flags ", " #size ", " #linAddr ", " #blockAlloc "): (pointer = 0x%X)", linAddr)), \
++ OSFreeMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__))
++#else
++ #define OSAllocMem(flags, size, linAddr, blockAlloc, logString) \
++ OSAllocMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__)
++
++ #define OSAllocPages OSAllocPages_Impl
++
++ #define OSFreeMem(flags, size, linAddr, blockAlloc) \
++ OSFreeMem_Debug_Wrapper(flags, size, linAddr, blockAlloc, __FILE__, __LINE__)
++#endif
++
++#ifdef PVRSRV_DEBUG_OS_MEMORY
++
++ PVRSRV_ERROR OSAllocMem_Debug_Wrapper(IMG_UINT32 ui32Flags,
++ IMG_UINT32 ui32Size,
++ IMG_PVOID *ppvCpuVAddr,
++ IMG_HANDLE *phBlockAlloc,
++ IMG_CHAR *pszFilename,
++ IMG_UINT32 ui32Line);
++
++ PVRSRV_ERROR OSFreeMem_Debug_Wrapper(IMG_UINT32 ui32Flags,
++ IMG_UINT32 ui32Size,
++ IMG_PVOID pvCpuVAddr,
++ IMG_HANDLE hBlockAlloc,
++ IMG_CHAR *pszFilename,
++ IMG_UINT32 ui32Line);
++
++
++ typedef struct
++ {
++ IMG_UINT8 sGuardRegionBefore[8];
++ IMG_CHAR sFileName[128];
++ IMG_UINT32 uLineNo;
++ IMG_SIZE_T uSize;
++ IMG_SIZE_T uSizeParityCheck;
++ enum valid_tag
++ { isFree = 0x277260FF,
++ isAllocated = 0x260511AA
++ } eValid;
++ } OSMEM_DEBUG_INFO;
++
++ #define TEST_BUFFER_PADDING_STATUS (sizeof(OSMEM_DEBUG_INFO))
++ #define TEST_BUFFER_PADDING_AFTER (8)
++ #define TEST_BUFFER_PADDING (TEST_BUFFER_PADDING_STATUS + TEST_BUFFER_PADDING_AFTER)
++#else
++ #define OSAllocMem_Debug_Wrapper OSAllocMem_Debug_Linux_Memory_Allocations
++ #define OSFreeMem_Debug_Wrapper OSFreeMem_Debug_Linux_Memory_Allocations
++#endif
++
++#if defined(__linux__) && defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++ PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line);
++ PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID pvLinAddr, IMG_HANDLE hBlockAlloc, IMG_CHAR *pszFilename, IMG_UINT32 ui32Line);
++
++ #define OSAllocMem_Debug_Linux_Memory_Allocations OSAllocMem_Impl
++ #define OSFreeMem_Debug_Linux_Memory_Allocations OSFreeMem_Impl
++#else
++ PVRSRV_ERROR OSAllocMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phBlockAlloc);
++ PVRSRV_ERROR OSFreeMem_Impl(IMG_UINT32 ui32Flags, IMG_SIZE_T ui32Size, IMG_PVOID pvLinAddr, IMG_HANDLE hBlockAlloc);
++
++ #define OSAllocMem_Debug_Linux_Memory_Allocations(flags, size, addr, blockAlloc, file, line) \
++ OSAllocMem_Impl(flags, size, addr, blockAlloc)
++ #define OSFreeMem_Debug_Linux_Memory_Allocations(flags, size, addr, blockAlloc, file, line) \
++ OSFreeMem_Impl(flags, size, addr, blockAlloc)
++#endif
++
++
++
++#if defined(__linux__)
++IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(IMG_VOID *hOSMemHandle, IMG_SIZE_T ui32ByteOffset);
++#else
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSMemHandleToCpuPAddr)
++#endif
++static INLINE IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(IMG_HANDLE hOSMemHandle, IMG_SIZE_T ui32ByteOffset)
++{
++ IMG_CPU_PHYADDR sCpuPAddr;
++ PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++ PVR_UNREFERENCED_PARAMETER(ui32ByteOffset);
++ sCpuPAddr.uiAddr = 0;
++ return sCpuPAddr;
++}
++#endif
++PVRSRV_ERROR OSInitEnvData(IMG_PVOID *ppvEnvSpecificData);
++PVRSRV_ERROR OSDeInitEnvData(IMG_PVOID pvEnvSpecificData);
++IMG_CHAR* OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc);
++IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, IMG_SIZE_T ui32Size, const IMG_CHAR *pszFormat, ...);
++#define OSStringLength(pszString) strlen(pszString)
++
++PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName,
++ PVRSRV_EVENTOBJECT *psEventObject);
++PVRSRV_ERROR OSEventObjectDestroy(PVRSRV_EVENTOBJECT *psEventObject);
++PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hOSEventKM);
++PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM);
++PVRSRV_ERROR OSEventObjectOpen(PVRSRV_EVENTOBJECT *psEventObject,
++ IMG_HANDLE *phOSEvent);
++PVRSRV_ERROR OSEventObjectClose(PVRSRV_EVENTOBJECT *psEventObject,
++ IMG_HANDLE hOSEventKM);
++
++
++PVRSRV_ERROR OSBaseAllocContigMemory(IMG_SIZE_T ui32Size, IMG_CPU_VIRTADDR *pLinAddr, IMG_CPU_PHYADDR *pPhysAddr);
++PVRSRV_ERROR OSBaseFreeContigMemory(IMG_SIZE_T ui32Size, IMG_CPU_VIRTADDR LinAddr, IMG_CPU_PHYADDR PhysAddr);
++
++IMG_PVOID MapUserFromKernel(IMG_PVOID pvLinAddrKM,IMG_SIZE_T ui32Size,IMG_HANDLE *phMemBlock);
++IMG_PVOID OSMapHWRegsIntoUserSpace(IMG_HANDLE hDevCookie, IMG_SYS_PHYADDR sRegAddr, IMG_UINT32 ulSize, IMG_PVOID *ppvProcess);
++IMG_VOID OSUnmapHWRegsFromUserSpace(IMG_HANDLE hDevCookie, IMG_PVOID pvUserAddr, IMG_PVOID pvProcess);
++
++IMG_VOID UnmapUserFromKernel(IMG_PVOID pvLinAddrUM, IMG_SIZE_T ui32Size, IMG_HANDLE hMemBlock);
++
++PVRSRV_ERROR OSMapPhysToUserSpace(IMG_HANDLE hDevCookie,
++ IMG_SYS_PHYADDR sCPUPhysAddr,
++ IMG_SIZE_T uiSizeInBytes,
++ IMG_UINT32 ui32CacheFlags,
++ IMG_PVOID *ppvUserAddr,
++ IMG_SIZE_T *puiActualSize,
++ IMG_HANDLE hMappingHandle);
++
++PVRSRV_ERROR OSUnmapPhysToUserSpace(IMG_HANDLE hDevCookie,
++ IMG_PVOID pvUserAddr,
++ IMG_PVOID pvProcess);
++
++PVRSRV_ERROR OSLockResource(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
++PVRSRV_ERROR OSUnlockResource(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
++IMG_BOOL OSIsResourceLocked(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
++PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE *psResource);
++PVRSRV_ERROR OSDestroyResource(PVRSRV_RESOURCE *psResource);
++IMG_VOID OSBreakResourceLock(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
++IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus);
++IMG_VOID OSReleaseThreadQuanta(IMG_VOID);
++IMG_UINT32 OSPCIReadDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg);
++IMG_VOID OSPCIWriteDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg, IMG_UINT32 ui32Value);
++
++#ifndef OSReadHWReg
++IMG_UINT32 OSReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
++#endif
++#ifndef OSWriteHWReg
++IMG_VOID OSWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
++#endif
++
++typedef IMG_VOID (*PFN_TIMER_FUNC)(IMG_VOID*);
++IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID *pvData, IMG_UINT32 ui32MsTimeout);
++PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer);
++PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer);
++PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer);
++
++PVRSRV_ERROR OSGetSysMemSize(IMG_SIZE_T *pui32Bytes);
++
++typedef enum _HOST_PCI_INIT_FLAGS_
++{
++ HOST_PCI_INIT_FLAG_BUS_MASTER = 0x00000001,
++ HOST_PCI_INIT_FLAG_MSI = 0x00000002,
++ HOST_PCI_INIT_FLAG_FORCE_I32 = 0x7fffffff
++} HOST_PCI_INIT_FLAGS;
++
++struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_;
++typedef struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_ *PVRSRV_PCI_DEV_HANDLE;
++
++PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags);
++PVRSRV_PCI_DEV_HANDLE OSPCISetDev(IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags);
++PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
++PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ);
++IMG_UINT32 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++IMG_UINT32 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++IMG_UINT32 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
++PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI);
++
++PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData);
++
++IMG_VOID OSPanic(IMG_VOID);
++
++IMG_BOOL OSProcHasPrivSrvInit(IMG_VOID);
++
++typedef enum _img_verify_test
++{
++ PVR_VERIFY_WRITE = 0,
++ PVR_VERIFY_READ
++} IMG_VERIFY_TEST;
++
++IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, IMG_VOID *pvUserPtr, IMG_SIZE_T ui32Bytes);
++
++PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess, IMG_VOID *pvDest, IMG_VOID *pvSrc, IMG_SIZE_T ui32Bytes);
++PVRSRV_ERROR OSCopyFromUser(IMG_PVOID pvProcess, IMG_VOID *pvDest, IMG_VOID *pvSrc, IMG_SIZE_T ui32Bytes);
++
++#if defined(__linux__)
++PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID* pvCPUVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ IMG_HANDLE *phOSWrapMem,
++ IMG_BOOL bWrapWorkaround);
++PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem);
++#else
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSAcquirePhysPageAddr)
++#endif
++static INLINE PVRSRV_ERROR OSAcquirePhysPageAddr(IMG_VOID* pvCPUVAddr,
++ IMG_SIZE_T ui32Bytes,
++ IMG_SYS_PHYADDR *psSysPAddr,
++ IMG_HANDLE *phOSWrapMem,
++ IMG_BOOL bWrapWorkaround)
++{
++ PVR_UNREFERENCED_PARAMETER(pvCPUVAddr);
++ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++ PVR_UNREFERENCED_PARAMETER(psSysPAddr);
++ PVR_UNREFERENCED_PARAMETER(phOSWrapMem);
++ PVR_UNREFERENCED_PARAMETER(bWrapWorkaround);
++ return PVRSRV_OK;
++}
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSReleasePhysPageAddr)
++#endif
++static INLINE PVRSRV_ERROR OSReleasePhysPageAddr(IMG_HANDLE hOSWrapMem)
++{
++ PVR_UNREFERENCED_PARAMETER(hOSWrapMem);
++ return PVRSRV_OK;
++}
++#endif
++
++#if defined(__linux__) && defined(__KERNEL__)
++#define OS_SUPPORTS_IN_LISR
++static inline IMG_BOOL OSInLISR(IMG_VOID unref__ *pvSysData)
++{
++ return in_irq();
++}
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/osperproc.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/osperproc.h
+new file mode 100644
+index 0000000..80a912f
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/osperproc.h
+@@ -0,0 +1,76 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __OSPERPROC_H__
++#define __OSPERPROC_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#if defined(__linux__)
++PVRSRV_ERROR OSPerProcessPrivateDataInit(IMG_HANDLE *phOsPrivateData);
++PVRSRV_ERROR OSPerProcessPrivateDataDeInit(IMG_HANDLE hOsPrivateData);
++
++PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase);
++#else
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSPerProcessPrivateDataInit)
++#endif
++static INLINE PVRSRV_ERROR OSPerProcessPrivateDataInit(IMG_HANDLE *phOsPrivateData)
++{
++ PVR_UNREFERENCED_PARAMETER(phOsPrivateData);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSPerProcessPrivateDataDeInit)
++#endif
++static INLINE PVRSRV_ERROR OSPerProcessPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
++{
++ PVR_UNREFERENCED_PARAMETER(hOsPrivateData);
++
++ return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSPerProcessSetHandleOptions)
++#endif
++static INLINE PVRSRV_ERROR OSPerProcessSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
++{
++ PVR_UNREFERENCED_PARAMETER(psHandleBase);
++
++ return PVRSRV_OK;
++}
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/pdump_km.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/pdump_km.h
+new file mode 100644
+index 0000000..c780e22
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/pdump_km.h
+@@ -0,0 +1,451 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _PDUMP_KM_H_
++#define _PDUMP_KM_H_
++
++#if (defined(LINUX) && (defined(SUPPORT_SGX) || defined(SUPPORT_MSVDX)))
++
++#define SGX_SUPPORT_COMMON_PDUMP
++
++#if defined(SGX_SUPPORT_COMMON_PDUMP)
++#include <pdump_osfunc.h>
++#endif
++#endif
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++#define PDUMP_FLAGS_NEVER 0x08000000UL
++#define PDUMP_FLAGS_TOOUT2MEM 0x10000000UL
++#define PDUMP_FLAGS_LASTFRAME 0x20000000UL
++#define PDUMP_FLAGS_RESETLFBUFFER 0x40000000UL
++#define PDUMP_FLAGS_CONTINUOUS 0x80000000UL
++
++#define PDUMP_PD_UNIQUETAG (IMG_HANDLE)0
++#define PDUMP_PT_UNIQUETAG (IMG_HANDLE)0
++
++#define PDUMP_STREAM_PARAM2 0
++#define PDUMP_STREAM_SCRIPT2 1
++#define PDUMP_STREAM_DRIVERINFO 2
++#define PDUMP_NUM_STREAMS 3
++
++
++#ifndef PDUMP
++#define MAKEUNIQUETAG(hMemInfo) (0)
++#endif
++
++#ifdef PDUMP
++
++#define MAKEUNIQUETAG(hMemInfo) (((BM_BUF *)(((PVRSRV_KERNEL_MEM_INFO *)hMemInfo)->sMemBlk.hBuffer))->pMapping)
++
++ IMG_IMPORT PVRSRV_ERROR PDumpMemPolKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Value,
++ IMG_UINT32 ui32Mask,
++ PDUMP_POLL_OPERATOR eOperator,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_IMPORT PVRSRV_ERROR PDumpMemUM(PVRSRV_PER_PROCESS_DATA *psProcData,
++ IMG_PVOID pvAltLinAddr,
++ IMG_PVOID pvLinAddr,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_IMPORT PVRSRV_ERROR PDumpMemKM(IMG_PVOID pvAltLinAddr,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++ PVRSRV_ERROR PDumpMemPagesKM(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_DEV_PHYADDR *pPages,
++ IMG_UINT32 ui32NumPages,
++ IMG_DEV_VIRTADDR sDevAddr,
++ IMG_UINT32 ui32Start,
++ IMG_UINT32 ui32Length,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++
++ PVRSRV_ERROR PDumpMem2KM(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32Bytes,
++ IMG_UINT32 ui32Flags,
++ IMG_BOOL bInitialisePages,
++ IMG_HANDLE hUniqueTag1,
++ IMG_HANDLE hUniqueTag2);
++ IMG_VOID PDumpInitCommon(IMG_VOID);
++ IMG_VOID PDumpDeInitCommon(IMG_VOID);
++ IMG_VOID PDumpInit(IMG_VOID);
++ IMG_VOID PDumpDeInit(IMG_VOID);
++ PVRSRV_ERROR PDumpStartInitPhaseKM(IMG_VOID);
++ PVRSRV_ERROR PDumpStopInitPhaseKM(IMG_VOID);
++ IMG_IMPORT PVRSRV_ERROR PDumpSetFrameKM(IMG_UINT32 ui32Frame);
++ IMG_IMPORT PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags);
++ IMG_IMPORT PVRSRV_ERROR PDumpDriverInfoKM(IMG_CHAR *pszString, IMG_UINT32 ui32Flags);
++
++ PVRSRV_ERROR PDumpRegWithFlagsKM(IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue,
++ IMG_UINT32 ui32Flags);
++ PVRSRV_ERROR PDumpRegPolWithFlagsKM(IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue,
++ IMG_UINT32 ui32Mask,
++ IMG_UINT32 ui32Flags);
++ PVRSRV_ERROR PDumpRegPolKM(IMG_UINT32 ui32RegAddr,
++ IMG_UINT32 ui32RegValue,
++ IMG_UINT32 ui32Mask);
++
++ IMG_IMPORT PVRSRV_ERROR PDumpBitmapKM(IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_UINT32 ui32Width,
++ IMG_UINT32 ui32Height,
++ IMG_UINT32 ui32StrideInBytes,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ IMG_UINT32 ui32Size,
++ PDUMP_PIXEL_FORMAT ePixelFormat,
++ PDUMP_MEM_FORMAT eMemFormat,
++ IMG_UINT32 ui32PDumpFlags);
++ IMG_IMPORT PVRSRV_ERROR PDumpReadRegKM(IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_UINT32 ui32Address,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags);
++
++ IMG_BOOL PDumpIsSuspended(IMG_VOID);
++
++#if defined(SGX_SUPPORT_COMMON_PDUMP) || !defined(SUPPORT_VGX)
++
++ PVRSRV_ERROR PDumpRegKM(IMG_UINT32 dwReg,
++ IMG_UINT32 dwData);
++ PVRSRV_ERROR PDumpComment(IMG_CHAR* pszFormat, ...);
++ PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags,
++ IMG_CHAR* pszFormat,
++ ...);
++
++ PVRSRV_ERROR PDumpPDReg(IMG_UINT32 ui32Reg,
++ IMG_UINT32 ui32dwData,
++ IMG_HANDLE hUniqueTag);
++ PVRSRV_ERROR PDumpPDRegWithFlags(IMG_UINT32 ui32Reg,
++ IMG_UINT32 ui32Data,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++#else
++ IMG_VOID PDumpRegKM(IMG_UINT32 dwReg,
++ IMG_UINT32 dwData);
++ IMG_VOID PDumpComment(IMG_CHAR* pszFormat, ...);
++ IMG_VOID PDumpCommentWithFlags(IMG_UINT32 ui32Flags,
++ IMG_CHAR* pszFormat,
++ ...);
++
++
++ IMG_VOID PDumpPDReg(IMG_UINT32 ui32Reg,
++ IMG_UINT32 ui32dwData,
++ IMG_HANDLE hUniqueTag);
++ IMG_VOID PDumpPDRegWithFlags(IMG_UINT32 ui32Reg,
++ IMG_UINT32 ui32Data,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++#endif
++
++ IMG_VOID PDumpMsvdxRegRead(const IMG_CHAR* const pRegRegion,
++ const IMG_UINT32 dwRegOffset);
++
++ IMG_VOID PDumpMsvdxRegWrite(const IMG_CHAR* const pRegRegion,
++ const IMG_UINT32 dwRegOffset,
++ const IMG_UINT32 dwData);
++
++ PVRSRV_ERROR PDumpMsvdxRegPol(const IMG_CHAR* const pRegRegion,
++ const IMG_UINT32 ui32Offset,
++ const IMG_UINT32 ui32CheckFuncIdExt,
++ const IMG_UINT32 ui32RequValue,
++ const IMG_UINT32 ui32Enable,
++ const IMG_UINT32 ui32PollCount,
++ const IMG_UINT32 ui32TimeOut);
++
++ PVRSRV_ERROR PDumpMsvdxWriteRef(const IMG_CHAR* const pRegRegion,
++ const IMG_UINT32 ui32VLROffset,
++ const IMG_UINT32 ui32Physical );
++
++ IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID);
++ IMG_IMPORT IMG_BOOL PDumpIsCaptureFrameKM(IMG_VOID);
++
++ IMG_VOID PDumpMallocPagesPhys(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_UINT32 ui32DevVAddr,
++ IMG_PUINT32 pui32PhysPages,
++ IMG_UINT32 ui32NumPages,
++ IMG_HANDLE hUniqueTag);
++ PVRSRV_ERROR PDumpSetMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CHAR *pszMemSpace,
++ IMG_UINT32 *pui32MMUContextID,
++ IMG_UINT32 ui32MMUType,
++ IMG_HANDLE hUniqueTag1,
++ IMG_VOID *pvPDCPUAddr);
++ PVRSRV_ERROR PDumpClearMMUContext(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CHAR *pszMemSpace,
++ IMG_UINT32 ui32MMUContextID,
++ IMG_UINT32 ui32MMUType);
++
++ PVRSRV_ERROR PDumpPDDevPAddrKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 ui32Offset,
++ IMG_DEV_PHYADDR sPDDevPAddr,
++ IMG_HANDLE hUniqueTag1,
++ IMG_HANDLE hUniqueTag2);
++
++ IMG_BOOL PDumpTestNextFrame(IMG_UINT32 ui32CurrentFrame);
++
++
++#if defined(LINUX)
++#define COMMON_PDUMP_OS_SUPPORT
++#endif
++
++#if defined (COMMON_PDUMP_OS_SUPPORT) && !defined(SUPPORT_VGX)
++
++ PVRSRV_ERROR PDumpTASignatureRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_UINT32 ui32TAKickCount,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters);
++
++ PVRSRV_ERROR PDump3DSignatureRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters);
++
++ PVRSRV_ERROR PDumpCounterRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters);
++
++ PVRSRV_ERROR PDumpRegRead(const IMG_UINT32 dwRegOffset, IMG_UINT32 ui32Flags);
++
++ PVRSRV_ERROR PDumpCycleCountRegRead(const IMG_UINT32 dwRegOffset, IMG_BOOL bLastFrame);
++
++ PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags);
++ PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks);
++
++ PVRSRV_ERROR PDumpMallocPages(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_UINT32 ui32DevVAddr,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32NumBytes,
++ IMG_UINT32 ui32PageSize,
++ IMG_HANDLE hUniqueTag);
++ PVRSRV_ERROR PDumpMallocPageTable(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_HANDLE hUniqueTag);
++ PVRSRV_ERROR PDumpFreePages(struct _BM_HEAP_ *psBMHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_UINT32 ui32PageSize,
++ IMG_HANDLE hUniqueTag,
++ IMG_BOOL bInterleaved);
++ PVRSRV_ERROR PDumpFreePageTable(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_IMPORT PVRSRV_ERROR PDumpHWPerfCBKM(IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags);
++
++ PVRSRV_ERROR PDumpCBP(PPVRSRV_KERNEL_MEM_INFO psROffMemInfo,
++ IMG_UINT32 ui32ROffOffset,
++ IMG_UINT32 ui32WPosVal,
++ IMG_UINT32 ui32PacketSize,
++ IMG_UINT32 ui32BufferSize,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++
++#else
++ IMG_VOID PDumpTASignatureRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_UINT32 ui32TAKickCount,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters);
++ IMG_VOID PDump3DSignatureRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters);
++ IMG_VOID PDumpCounterRegisters(IMG_UINT32 ui32DumpFrameNum,
++ IMG_BOOL bLastFrame,
++ IMG_UINT32 *pui32Registers,
++ IMG_UINT32 ui32NumRegisters);
++
++ IMG_VOID PDumpRegRead(const IMG_UINT32 dwRegOffset, IMG_UINT32 ui32Flags);
++ IMG_VOID PDumpCycleCountRegRead(const IMG_UINT32 dwRegOffset, IMG_BOOL bLastFrame);
++
++ IMG_VOID PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags);
++ IMG_VOID PDumpIDL(IMG_UINT32 ui32Clocks);
++
++
++ IMG_VOID PDumpMallocPages(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_UINT32 ui32DevVAddr,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32NumBytes,
++ IMG_UINT32 ui32PageSize,
++ IMG_HANDLE hUniqueTag);
++ IMG_VOID PDumpMallocPageTable(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_HANDLE hUniqueTag);
++ IMG_VOID PDumpFreePages(struct _BM_HEAP_ *psBMHeap,
++ IMG_DEV_VIRTADDR sDevVAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_UINT32 ui32PageSize,
++ IMG_HANDLE hUniqueTag,
++ IMG_BOOL bInterleaved);
++ IMG_VOID PDumpFreePageTable(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_VIRTADDR pvLinAddr,
++ IMG_UINT32 ui32NumBytes,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_IMPORT IMG_VOID PDumpHWPerfCBKM(IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ IMG_DEV_VIRTADDR sDevBaseAddr,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags);
++
++ IMG_VOID PDumpCBP(PPVRSRV_KERNEL_MEM_INFO psROffMemInfo,
++ IMG_UINT32 ui32ROffOffset,
++ IMG_UINT32 ui32WPosVal,
++ IMG_UINT32 ui32PacketSize,
++ IMG_UINT32 ui32BufferSize,
++ IMG_UINT32 ui32Flags,
++ IMG_HANDLE hUniqueTag);
++
++#endif
++
++ IMG_VOID PDumpVGXMemToFile(IMG_CHAR *pszFileName,
++ IMG_UINT32 ui32FileOffset,
++ PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++ IMG_UINT32 uiAddr,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32PDumpFlags,
++ IMG_HANDLE hUniqueTag);
++
++ IMG_VOID PDumpSuspendKM(IMG_VOID);
++ IMG_VOID PDumpResumeKM(IMG_VOID);
++
++ #define PDUMPMEMPOL PDumpMemPolKM
++ #define PDUMPMEM PDumpMemKM
++ #define PDUMPMEM2 PDumpMem2KM
++ #define PDUMPMEMUM PDumpMemUM
++ #define PDUMPINIT PDumpInitCommon
++ #define PDUMPDEINIT PDumpDeInitCommon
++ #define PDUMPISLASTFRAME PDumpIsLastCaptureFrameKM
++ #define PDUMPTESTFRAME PDumpIsCaptureFrameKM
++ #define PDUMPTESTNEXTFRAME PDumpTestNextFrame
++ #define PDUMPREGWITHFLAGS PDumpRegWithFlagsKM
++ #define PDUMPREG PDumpRegKM
++ #define PDUMPCOMMENT PDumpComment
++ #define PDUMPCOMMENTWITHFLAGS PDumpCommentWithFlags
++ #define PDUMPREGPOL PDumpRegPolKM
++ #define PDUMPREGPOLWITHFLAGS PDumpRegPolWithFlagsKM
++ #define PDUMPMALLOCPAGES PDumpMallocPages
++ #define PDUMPMALLOCPAGETABLE PDumpMallocPageTable
++ #define PDUMPSETMMUCONTEXT PDumpSetMMUContext
++ #define PDUMPCLEARMMUCONTEXT PDumpClearMMUContext
++ #define PDUMPFREEPAGES PDumpFreePages
++ #define PDUMPFREEPAGETABLE PDumpFreePageTable
++ #define PDUMPPDREG PDumpPDReg
++ #define PDUMPPDREGWITHFLAGS PDumpPDRegWithFlags
++ #define PDUMPCBP PDumpCBP
++ #define PDUMPMALLOCPAGESPHYS PDumpMallocPagesPhys
++ #define PDUMPENDINITPHASE PDumpStopInitPhaseKM
++ #define PDUMPMSVDXREGWRITE PDumpMsvdxRegWrite
++ #define PDUMPMSVDXREGREAD PDumpMsvdxRegRead
++ #define PDUMPMSVDXPOL PDumpMsvdxRegPol
++ #define PDUMPMSVDXWRITEREF PDumpMsvdxWriteRef
++ #define PDUMPBITMAPKM PDumpBitmapKM
++ #define PDUMPDRIVERINFO PDumpDriverInfoKM
++ #define PDUMPIDLWITHFLAGS PDumpIDLWithFlags
++ #define PDUMPIDL PDumpIDL
++ #define PDUMPSUSPEND PDumpSuspendKM
++ #define PDUMPRESUME PDumpResumeKM
++
++#else
++ #if ((defined(LINUX) || defined(GCC_IA32)) || defined(GCC_ARM))
++ #define PDUMPMEMPOL(args...)
++ #define PDUMPMEM(args...)
++ #define PDUMPMEM2(args...)
++ #define PDUMPMEMUM(args...)
++ #define PDUMPINIT(args...)
++ #define PDUMPDEINIT(args...)
++ #define PDUMPISLASTFRAME(args...)
++ #define PDUMPTESTFRAME(args...)
++ #define PDUMPTESTNEXTFRAME(args...)
++ #define PDUMPREGWITHFLAGS(args...)
++ #define PDUMPREG(args...)
++ #define PDUMPCOMMENT(args...)
++ #define PDUMPREGPOL(args...)
++ #define PDUMPREGPOLWITHFLAGS(args...)
++ #define PDUMPMALLOCPAGES(args...)
++ #define PDUMPMALLOCPAGETABLE(args...)
++ #define PDUMPSETMMUCONTEXT(args...)
++ #define PDUMPCLEARMMUCONTEXT(args...)
++ #define PDUMPFREEPAGES(args...)
++ #define PDUMPFREEPAGETABLE(args...)
++ #define PDUMPPDREG(args...)
++ #define PDUMPPDREGWITHFLAGS(args...)
++ #define PDUMPSYNC(args...)
++ #define PDUMPCOPYTOMEM(args...)
++ #define PDUMPWRITE(args...)
++ #define PDUMPCBP(args...)
++ #define PDUMPCOMMENTWITHFLAGS(args...)
++ #define PDUMPMALLOCPAGESPHYS(args...)
++ #define PDUMPENDINITPHASE(args...)
++ #define PDUMPMSVDXREG(args...)
++ #define PDUMPMSVDXREGWRITE(args...)
++ #define PDUMPMSVDXREGREAD(args...)
++ #define PDUMPMSVDXPOLEQ(args...)
++ #define PDUMPMSVDXPOL(args...)
++ #define PDUMPBITMAPKM(args...)
++ #define PDUMPDRIVERINFO(args...)
++ #define PDUMPIDLWITHFLAGS(args...)
++ #define PDUMPIDL(args...)
++ #define PDUMPSUSPEND(args...)
++ #define PDUMPRESUME(args...)
++ #define PDUMPMSVDXWRITEREF(args...)
++ #else
++ #error Compiler not specified
++ #endif
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/pdump_osfunc.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/pdump_osfunc.h
+new file mode 100644
+index 0000000..7c6db05
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/pdump_osfunc.h
+@@ -0,0 +1,137 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PDUMP_OSFUNC_H__
++#define __PDUMP_OSFUNC_H__
++
++#include <stdarg.h>
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++
++#define MAX_PDUMP_STRING_LENGTH (256)
++#define PDUMP_GET_SCRIPT_STRING() \
++ IMG_HANDLE hScript; \
++ IMG_UINT32 ui32MaxLen; \
++ PVRSRV_ERROR eError; \
++ eError = PDumpOSGetScriptString(&hScript, &ui32MaxLen);\
++ if(eError != PVRSRV_OK) return eError;
++
++#define PDUMP_GET_MSG_STRING() \
++ IMG_HANDLE hMsg; \
++ IMG_UINT32 ui32MaxLen; \
++ PVRSRV_ERROR eError; \
++ eError = PDumpOSGetMessageString(&hMsg, &ui32MaxLen);\
++ if(eError != PVRSRV_OK) return eError;
++
++#define PDUMP_GET_FILE_STRING() \
++ IMG_CHAR *pszFileName; \
++ IMG_UINT32 ui32MaxLen; \
++ PVRSRV_ERROR eError; \
++ eError = PDumpOSGetFilenameString(&pszFileName, &ui32MaxLen);\
++ if(eError != PVRSRV_OK) return eError;
++
++#define PDUMP_GET_SCRIPT_AND_FILE_STRING() \
++ IMG_HANDLE hScript; \
++ IMG_CHAR *pszFileName; \
++ IMG_UINT32 ui32MaxLenScript; \
++ IMG_UINT32 ui32MaxLenFileName; \
++ PVRSRV_ERROR eError; \
++ eError = PDumpOSGetScriptString(&hScript, &ui32MaxLenScript);\
++ if(eError != PVRSRV_OK) return eError; \
++ eError = PDumpOSGetFilenameString(&pszFileName, &ui32MaxLenFileName);\
++ if(eError != PVRSRV_OK) return eError;
++
++
++
++ PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript, IMG_UINT32 *pui32MaxLen);
++
++
++ PVRSRV_ERROR PDumpOSGetMessageString(IMG_HANDLE *phMsg, IMG_UINT32 *pui32MaxLen);
++
++
++ PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile, IMG_UINT32 *pui32MaxLen);
++
++
++
++
++#define PDUMP_va_list va_list
++#define PDUMP_va_start va_start
++#define PDUMP_va_end va_end
++
++
++
++IMG_HANDLE PDumpOSGetStream(IMG_UINT32 ePDumpStream);
++
++IMG_UINT32 PDumpOSGetStreamOffset(IMG_UINT32 ePDumpStream);
++
++IMG_UINT32 PDumpOSGetParamFileNum(IMG_VOID);
++
++IMG_VOID PDumpOSCheckForSplitting(IMG_HANDLE hStream, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags);
++
++IMG_BOOL PDumpOSIsSuspended(IMG_VOID);
++
++IMG_BOOL PDumpOSJTInitialised(IMG_VOID);
++
++IMG_BOOL PDumpOSWriteString(IMG_HANDLE hDbgStream,
++ IMG_UINT8 *psui8Data,
++ IMG_UINT32 ui32Size,
++ IMG_UINT32 ui32Flags);
++
++IMG_BOOL PDumpOSWriteString2(IMG_HANDLE hScript, IMG_UINT32 ui32Flags);
++
++PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...);
++
++IMG_VOID PDumpOSDebugPrintf(IMG_CHAR* pszFormat, ...);
++
++PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...);
++
++PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszMsg, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, PDUMP_va_list vaArgs);
++
++IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax);
++
++IMG_VOID PDumpOSVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax);
++
++IMG_VOID PDumpOSCPUVAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32Offset,
++ IMG_UINT8 *pui8LinAddr,
++ IMG_UINT32 ui32PageSize,
++ IMG_DEV_PHYADDR *psDevPAddr);
++
++IMG_VOID PDumpOSCPUVAddrToPhysPages(IMG_HANDLE hOSMemHandle,
++ IMG_UINT32 ui32Offset,
++ IMG_PUINT8 pui8LinAddr,
++ IMG_UINT32 *pui32PageOffset);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/perproc.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/perproc.h
+new file mode 100644
+index 0000000..233bb59
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/perproc.h
+@@ -0,0 +1,110 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __PERPROC_H__
++#define __PERPROC_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "img_types.h"
++#include "resman.h"
++
++#include "handle.h"
++
++typedef struct _PVRSRV_PER_PROCESS_DATA_
++{
++ IMG_UINT32 ui32PID;
++ IMG_HANDLE hBlockAlloc;
++ PRESMAN_CONTEXT hResManContext;
++ IMG_HANDLE hPerProcData;
++ PVRSRV_HANDLE_BASE *psHandleBase;
++#if defined (PVR_SECURE_HANDLES)
++
++ IMG_BOOL bHandlesBatched;
++#endif
++ IMG_UINT32 ui32RefCount;
++
++
++ IMG_BOOL bInitProcess;
++
++
++ IMG_HANDLE hOsPrivateData;
++} PVRSRV_PER_PROCESS_DATA;
++
++PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(IMG_UINT32 ui32PID);
++
++PVRSRV_ERROR PVRSRVPerProcessDataConnect(IMG_UINT32 ui32PID);
++IMG_VOID PVRSRVPerProcessDataDisconnect(IMG_UINT32 ui32PID);
++
++PVRSRV_ERROR PVRSRVPerProcessDataInit(IMG_VOID);
++PVRSRV_ERROR PVRSRVPerProcessDataDeInit(IMG_VOID);
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVFindPerProcessData)
++#endif
++static INLINE
++PVRSRV_PER_PROCESS_DATA *PVRSRVFindPerProcessData(IMG_VOID)
++{
++ return PVRSRVPerProcessData(OSGetCurrentProcessIDKM());
++}
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVProcessPrivateData)
++#endif
++static INLINE
++IMG_HANDLE PVRSRVProcessPrivateData(PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++ return (psPerProc != IMG_NULL) ? psPerProc->hOsPrivateData : IMG_NULL;
++}
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVPerProcessPrivateData)
++#endif
++static INLINE
++IMG_HANDLE PVRSRVPerProcessPrivateData(IMG_UINT32 ui32PID)
++{
++ return PVRSRVProcessPrivateData(PVRSRVPerProcessData(ui32PID));
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVFindPerProcessPrivateData)
++#endif
++static INLINE
++IMG_HANDLE PVRSRVFindPerProcessPrivateData(IMG_VOID)
++{
++ return PVRSRVProcessPrivateData(PVRSRVFindPerProcessData());
++}
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/power.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/power.h
+new file mode 100644
+index 0000000..cd8d737
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/power.h
+@@ -0,0 +1,133 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef POWER_H
++#define POWER_H
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++
++
++typedef struct _PVRSRV_POWER_DEV_TAG_
++{
++ PFN_PRE_POWER pfnPrePower;
++ PFN_POST_POWER pfnPostPower;
++ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange;
++ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange;
++ IMG_HANDLE hDevCookie;
++ IMG_UINT32 ui32DeviceIndex;
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState;
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState;
++ struct _PVRSRV_POWER_DEV_TAG_ *psNext;
++ struct _PVRSRV_POWER_DEV_TAG_ **ppsThis;
++
++} PVRSRV_POWER_DEV;
++
++typedef enum _PVRSRV_INIT_SERVER_STATE_
++{
++ PVRSRV_INIT_SERVER_Unspecified = -1,
++ PVRSRV_INIT_SERVER_RUNNING = 0,
++ PVRSRV_INIT_SERVER_RAN = 1,
++ PVRSRV_INIT_SERVER_SUCCESSFUL = 2,
++ PVRSRV_INIT_SERVER_NUM = 3,
++ PVRSRV_INIT_SERVER_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_INIT_SERVER_STATE, *PPVRSRV_INIT_SERVER_STATE;
++
++IMG_IMPORT
++IMG_BOOL PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetInitServerState(PVRSRV_INIT_SERVER_STATE eInitServerState, IMG_BOOL bState);
++
++
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32 ui32CallerID,
++ IMG_BOOL bSystemPowerEvent);
++IMG_IMPORT
++IMG_VOID PVRSRVPowerUnlock(IMG_UINT32 ui32CallerID);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ IMG_UINT32 ui32CallerID,
++ IMG_BOOL bRetainMutex);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(PVRSRV_SYS_POWER_STATE eNewPowerState);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(PVRSRV_SYS_POWER_STATE eNewPowerState);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetPowerStateKM (PVRSRV_SYS_POWER_STATE ePVRState);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVRegisterPowerDevice(IMG_UINT32 ui32DeviceIndex,
++ PFN_PRE_POWER pfnPrePower,
++ PFN_POST_POWER pfnPostPower,
++ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange,
++ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange,
++ IMG_HANDLE hDevCookie,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
++ PVRSRV_DEV_POWER_STATE eDefaultPowerState);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVRemovePowerDevice (IMG_UINT32 ui32DeviceIndex);
++
++IMG_IMPORT
++IMG_BOOL PVRSRVIsDevicePowered(IMG_UINT32 ui32DeviceIndex);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(IMG_UINT32 ui32DeviceIndex,
++ IMG_BOOL bIdleDevice,
++ IMG_VOID *pvInfo);
++
++IMG_IMPORT
++IMG_VOID PVRSRVDevicePostClockSpeedChange(IMG_UINT32 ui32DeviceIndex,
++ IMG_BOOL bIdleDevice,
++ IMG_VOID *pvInfo);
++
++
++/*
++ * PVRSRVPowerOnSystemWithDevice
++ *
++ * Description: Power on the System if it is off, but instead of powering all
++ * of the devices to their "default" state, only turn on the specified
++ * device index.
++ */
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVPowerOnSystemWithDevice(IMG_UINT32 ui32DeviceIndex,
++ IMG_UINT32 ui32CallerID,
++ IMG_BOOL bRetainMutex);
++
++#if defined (__cplusplus)
++}
++#endif
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/queue.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/queue.h
+new file mode 100644
+index 0000000..0646137
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/queue.h
+@@ -0,0 +1,119 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef QUEUE_H
++#define QUEUE_H
++
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++#define UPDATE_QUEUE_ROFF(psQueue, ui32Size) \
++ psQueue->ui32ReadOffset = (psQueue->ui32ReadOffset + ui32Size) \
++ & (psQueue->ui32QueueSize - 1);
++
++ typedef struct _COMMAND_COMPLETE_DATA_
++ {
++ IMG_BOOL bInUse;
++
++ IMG_UINT32 ui32DstSyncCount;
++ IMG_UINT32 ui32SrcSyncCount;
++ PVRSRV_SYNC_OBJECT *psDstSync;
++ PVRSRV_SYNC_OBJECT *psSrcSync;
++ IMG_UINT32 ui32AllocSize;
++ }COMMAND_COMPLETE_DATA, *PCOMMAND_COMPLETE_DATA;
++
++#if !defined(USE_CODE)
++IMG_VOID QueueDumpDebugInfo(IMG_VOID);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVProcessQueues (IMG_UINT32 ui32CallerID,
++ IMG_BOOL bFlush);
++
++#if defined(__linux__) && defined(__KERNEL__)
++#include <linux/types.h>
++#include <linux/seq_file.h>
++off_t
++QueuePrintQueues (IMG_CHAR * buffer, size_t size, off_t off);
++
++#ifdef PVR_PROC_USE_SEQ_FILE
++void* ProcSeqOff2ElementQueue(struct seq_file * sfile, loff_t off);
++void ProcSeqShowQueue(struct seq_file *sfile,void* el);
++#endif
++
++#endif
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_SIZE_T ui32QueueSize,
++ PVRSRV_QUEUE_INFO **ppsQueueInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO *psQueue,
++ PVRSRV_COMMAND **ppsCommand,
++ IMG_UINT32 ui32DevIndex,
++ IMG_UINT16 CommandType,
++ IMG_UINT32 ui32DstSyncCount,
++ PVRSRV_KERNEL_SYNC_INFO *apsDstSync[],
++ IMG_UINT32 ui32SrcSyncCount,
++ PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
++ IMG_SIZE_T ui32DataByteSize );
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO *psQueue,
++ IMG_SIZE_T ui32ParamSize,
++ IMG_VOID **ppvSpace);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO *psQueue,
++ PVRSRV_COMMAND *psCommand);
++
++IMG_IMPORT
++IMG_VOID PVRSRVCommandCompleteKM(IMG_HANDLE hCmdCookie, IMG_BOOL bScheduleMISR);
++
++IMG_VOID PVRSRVCommandCompleteCallbacks(IMG_VOID);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(IMG_UINT32 ui32DevIndex,
++ PFN_CMD_PROC *ppfnCmdProcList,
++ IMG_UINT32 ui32MaxSyncsPerCmd[][2],
++ IMG_UINT32 ui32CmdCount);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(IMG_UINT32 ui32DevIndex,
++ IMG_UINT32 ui32CmdCount);
++
++#endif
++
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/ra.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/ra.h
+new file mode 100644
+index 0000000..3cb7e78
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/ra.h
+@@ -0,0 +1,155 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _RA_H_
++#define _RA_H_
++
++#include "img_types.h"
++#include "hash.h"
++#include "osfunc.h"
++
++typedef struct _RA_ARENA_ RA_ARENA;
++typedef struct _BM_MAPPING_ BM_MAPPING;
++
++
++
++#define RA_STATS
++
++
++struct _RA_STATISTICS_
++{
++
++ IMG_SIZE_T uSpanCount;
++
++
++ IMG_SIZE_T uLiveSegmentCount;
++
++
++ IMG_SIZE_T uFreeSegmentCount;
++
++
++ IMG_SIZE_T uTotalResourceCount;
++
++
++ IMG_SIZE_T uFreeResourceCount;
++
++
++ IMG_SIZE_T uCumulativeAllocs;
++
++
++ IMG_SIZE_T uCumulativeFrees;
++
++
++ IMG_SIZE_T uImportCount;
++
++
++ IMG_SIZE_T uExportCount;
++};
++typedef struct _RA_STATISTICS_ RA_STATISTICS;
++
++struct _RA_SEGMENT_DETAILS_
++{
++ IMG_SIZE_T uiSize;
++ IMG_CPU_PHYADDR sCpuPhyAddr;
++ IMG_HANDLE hSegment;
++};
++typedef struct _RA_SEGMENT_DETAILS_ RA_SEGMENT_DETAILS;
++
++RA_ARENA *
++RA_Create (IMG_CHAR *name,
++ IMG_UINTPTR_T base,
++ IMG_SIZE_T uSize,
++ BM_MAPPING *psMapping,
++ IMG_SIZE_T uQuantum,
++ IMG_BOOL (*imp_alloc)(IMG_VOID *_h,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize,
++ BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags,
++ IMG_UINTPTR_T *pBase),
++ IMG_VOID (*imp_free) (IMG_VOID *,
++ IMG_UINTPTR_T,
++ BM_MAPPING *),
++ IMG_VOID (*backingstore_free) (IMG_VOID *,
++ IMG_SIZE_T,
++ IMG_SIZE_T,
++ IMG_HANDLE),
++ IMG_VOID *import_handle);
++
++IMG_VOID
++RA_Delete (RA_ARENA *pArena);
++
++IMG_BOOL
++RA_TestDelete (RA_ARENA *pArena);
++
++IMG_BOOL
++RA_Add (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize);
++
++IMG_BOOL
++RA_Alloc (RA_ARENA *pArena,
++ IMG_SIZE_T uSize,
++ IMG_SIZE_T *pActualSize,
++ BM_MAPPING **ppsMapping,
++ IMG_UINT32 uFlags,
++ IMG_UINT32 uAlignment,
++ IMG_UINT32 uAlignmentOffset,
++ IMG_UINTPTR_T *pBase);
++
++IMG_VOID
++RA_Free (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_BOOL bFreeBackingStore);
++
++
++#ifdef RA_STATS
++
++#define CHECK_SPACE(total) \
++{ \
++ if(total<100) \
++ return PVRSRV_ERROR_INVALID_PARAMS; \
++}
++
++#define UPDATE_SPACE(str, count, total) \
++{ \
++ if(count == -1) \
++ return PVRSRV_ERROR_INVALID_PARAMS; \
++ else \
++ { \
++ str += count; \
++ total -= count; \
++ } \
++}
++
++
++IMG_BOOL RA_GetNextLiveSegment(IMG_HANDLE hArena, RA_SEGMENT_DETAILS *psSegDetails);
++
++
++PVRSRV_ERROR RA_GetStats(RA_ARENA *pArena,
++ IMG_CHAR **ppszStr,
++ IMG_UINT32 *pui32StrLen);
++
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/resman.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/resman.h
+new file mode 100644
+index 0000000..c5571f7
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/resman.h
+@@ -0,0 +1,113 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef __RESMAN_H__
++#define __RESMAN_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++enum {
++
++ RESMAN_TYPE_SHARED_PB_DESC = 1,
++ RESMAN_TYPE_SHARED_PB_DESC_CREATE_LOCK,
++ RESMAN_TYPE_HW_RENDER_CONTEXT,
++ RESMAN_TYPE_HW_TRANSFER_CONTEXT,
++ RESMAN_TYPE_HW_2D_CONTEXT,
++ RESMAN_TYPE_TRANSFER_CONTEXT,
++
++
++
++
++
++ RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN_REF,
++ RESMAN_TYPE_DISPLAYCLASS_DEVICE,
++
++
++ RESMAN_TYPE_BUFFERCLASS_DEVICE,
++
++
++ RESMAN_TYPE_OS_USERMODE_MAPPING,
++
++
++ RESMAN_TYPE_DEVICEMEM_CONTEXT,
++ RESMAN_TYPE_DEVICECLASSMEM_MAPPING,
++ RESMAN_TYPE_DEVICEMEM_MAPPING,
++ RESMAN_TYPE_DEVICEMEM_WRAP,
++ RESMAN_TYPE_DEVICEMEM_ALLOCATION,
++ RESMAN_TYPE_EVENT_OBJECT,
++ RESMAN_TYPE_SHARED_MEM_INFO,
++ RESMAN_TYPE_MODIFY_SYNC_OPS,
++
++
++ RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION
++};
++
++#define RESMAN_CRITERIA_ALL 0x00000000
++#define RESMAN_CRITERIA_RESTYPE 0x00000001
++#define RESMAN_CRITERIA_PVOID_PARAM 0x00000002
++#define RESMAN_CRITERIA_UI32_PARAM 0x00000004
++
++typedef PVRSRV_ERROR (*RESMAN_FREE_FN)(IMG_PVOID pvParam, IMG_UINT32 ui32Param);
++
++typedef struct _RESMAN_ITEM_ *PRESMAN_ITEM;
++typedef struct _RESMAN_CONTEXT_ *PRESMAN_CONTEXT;
++
++PVRSRV_ERROR ResManInit(IMG_VOID);
++IMG_VOID ResManDeInit(IMG_VOID);
++
++PRESMAN_ITEM ResManRegisterRes(PRESMAN_CONTEXT hResManContext,
++ IMG_UINT32 ui32ResType,
++ IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param,
++ RESMAN_FREE_FN pfnFreeResource);
++
++PVRSRV_ERROR ResManFreeResByPtr(PRESMAN_ITEM psResItem);
++
++PVRSRV_ERROR ResManFreeResByCriteria(PRESMAN_CONTEXT hResManContext,
++ IMG_UINT32 ui32SearchCriteria,
++ IMG_UINT32 ui32ResType,
++ IMG_PVOID pvParam,
++ IMG_UINT32 ui32Param);
++
++PVRSRV_ERROR ResManDissociateRes(PRESMAN_ITEM psResItem,
++ PRESMAN_CONTEXT psNewResManContext);
++
++PVRSRV_ERROR ResManFindResourceByPtr(PRESMAN_CONTEXT hResManContext,
++ PRESMAN_ITEM psItem);
++
++PVRSRV_ERROR PVRSRVResManConnect(IMG_HANDLE hPerProc,
++ PRESMAN_CONTEXT *phResManContext);
++IMG_VOID PVRSRVResManDisconnect(PRESMAN_CONTEXT hResManContext,
++ IMG_BOOL bKernelContext);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/services_headers.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/services_headers.h
+new file mode 100644
+index 0000000..eb00dbb
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/services_headers.h
+@@ -0,0 +1,49 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef SERVICES_HEADERS_H
++#define SERVICES_HEADERS_H
++
++#ifdef DEBUG_RELEASE_BUILD
++#pragma optimize( "", off )
++#define DEBUG 1
++#endif
++
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "power.h"
++#include "resman.h"
++#include "queue.h"
++#include "srvkm.h"
++#include "kerneldisplay.h"
++#include "syscommon.h"
++#include "pvr_debug.h"
++#include "metrics.h"
++#include "osfunc.h"
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/srvkm.h b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/srvkm.h
+new file mode 100644
+index 0000000..a344253
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/srvkm/include/srvkm.h
+@@ -0,0 +1,69 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef SRVKM_H
++#define SRVKM_H
++
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++
++ #ifdef PVR_DISABLE_LOGGING
++ #define PVR_LOG(X)
++ #else
++ #define PVR_LOG(X) PVRSRVReleasePrintf X
++ #endif
++
++ IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVReleasePrintf(const IMG_CHAR *pszFormat,
++ ...);
++
++ IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVProcessConnect(IMG_UINT32 ui32PID);
++ IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVProcessDisconnect(IMG_UINT32 ui32PID);
++
++ IMG_VOID IMG_CALLCONV PVRSRVSetDCState(IMG_UINT32 ui32State);
++
++ PVRSRV_ERROR IMG_CALLCONV PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, IMG_SIZE_T *puiBufSize, IMG_BOOL bSave);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#define LOOP_UNTIL_TIMEOUT(TIMEOUT) \
++{\
++ IMG_UINT32 uiOffset, uiStart, uiCurrent, uiNotLastLoop; \
++ for(uiOffset = 0, uiStart = OSClockus(), uiCurrent = uiStart + 1, uiNotLastLoop = 1;\
++ ((uiCurrent - uiStart + uiOffset) < TIMEOUT) || uiNotLastLoop--; \
++ uiCurrent = OSClockus(), \
++ uiOffset = uiCurrent < uiStart ? IMG_UINT32_MAX - uiStart : uiOffset, \
++ uiStart = uiCurrent < uiStart ? 0 : uiStart)
++
++#define END_LOOP_UNTIL_TIMEOUT() \
++}
++
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/include/syscommon.h b/drivers/gpu/drm/mrst/pvr/services4/system/include/syscommon.h
+new file mode 100644
+index 0000000..20b83c1
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/include/syscommon.h
+@@ -0,0 +1,217 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _SYSCOMMON_H
++#define _SYSCOMMON_H
++
++#include "sysconfig.h"
++#include "sysinfo.h"
++#include "servicesint.h"
++#include "queue.h"
++#include "power.h"
++#include "resman.h"
++#include "ra.h"
++#include "device.h"
++#include "buffer_manager.h"
++
++#if defined(NO_HARDWARE) && defined(__linux__) && defined(__KERNEL__)
++#include <asm/io.h>
++#endif
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++typedef struct _SYS_DEVICE_ID_TAG
++{
++ IMG_UINT32 uiID;
++ IMG_BOOL bInUse;
++
++} SYS_DEVICE_ID;
++
++
++#define SYS_MAX_LOCAL_DEVMEM_ARENAS 4
++
++typedef struct _SYS_DATA_TAG_
++{
++ IMG_UINT32 ui32NumDevices;
++ SYS_DEVICE_ID sDeviceID[SYS_DEVICE_COUNT];
++ PVRSRV_DEVICE_NODE *psDeviceNodeList;
++ PVRSRV_POWER_DEV *psPowerDeviceList;
++ PVRSRV_RESOURCE sPowerStateChangeResource;
++ PVRSRV_SYS_POWER_STATE eCurrentPowerState;
++ PVRSRV_SYS_POWER_STATE eFailedPowerState;
++ IMG_UINT32 ui32CurrentOSPowerState;
++ PVRSRV_QUEUE_INFO *psQueueList;
++ PVRSRV_KERNEL_SYNC_INFO *psSharedSyncInfoList;
++ IMG_PVOID pvEnvSpecificData;
++ IMG_PVOID pvSysSpecificData;
++ PVRSRV_RESOURCE sQProcessResource;
++ IMG_VOID *pvSOCRegsBase;
++ IMG_HANDLE hSOCTimerRegisterOSMemHandle;
++ IMG_UINT32 *pvSOCTimerRegisterKM;
++ IMG_VOID *pvSOCClockGateRegsBase;
++ IMG_UINT32 ui32SOCClockGateRegsSize;
++ PFN_CMD_PROC *ppfnCmdProcList[SYS_DEVICE_COUNT];
++
++
++
++ PCOMMAND_COMPLETE_DATA *ppsCmdCompleteData[SYS_DEVICE_COUNT];
++
++
++ IMG_BOOL bReProcessQueues;
++
++ RA_ARENA *apsLocalDevMemArena[SYS_MAX_LOCAL_DEVMEM_ARENAS];
++
++ IMG_CHAR *pszVersionString;
++ PVRSRV_EVENTOBJECT *psGlobalEventObject;
++
++ IMG_BOOL bFlushAll;
++
++} SYS_DATA;
++
++
++
++PVRSRV_ERROR SysInitialise(IMG_VOID);
++PVRSRV_ERROR SysFinalise(IMG_VOID);
++
++PVRSRV_ERROR SysDeinitialise(SYS_DATA *psSysData);
++PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_VOID **ppvDeviceMap);
++
++IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
++IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_UINT32 SysGetInterruptSource(SYS_DATA *psSysData,
++ PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits);
++
++PVRSRV_ERROR SysResetDevice(IMG_UINT32 ui32DeviceIndex);
++
++PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState);
++PVRSRV_ERROR SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState);
++PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
++
++#if defined(SYS_CUSTOM_POWERLOCK_WRAP)
++PVRSRV_ERROR SysPowerLockWrap(SYS_DATA *psSysData);
++IMG_VOID SysPowerLockUnwrap(SYS_DATA *psSysData);
++#endif
++
++PVRSRV_ERROR SysOEMFunction ( IMG_UINT32 ui32ID,
++ IMG_VOID *pvIn,
++ IMG_UINT32 ulInSize,
++ IMG_VOID *pvOut,
++ IMG_UINT32 ulOutSize);
++
++
++IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_CPU_PHYADDR cpu_paddr);
++IMG_DEV_PHYADDR SysSysPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr);
++IMG_SYS_PHYADDR SysDevPAddrToSysPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR SysPAddr);
++IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR SysPAddr);
++IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr);
++#if defined(PVR_LMA)
++IMG_BOOL SysVerifyCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_CPU_PHYADDR CpuPAddr);
++IMG_BOOL SysVerifySysPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr);
++#endif
++
++extern SYS_DATA* gpsSysData;
++
++#if !defined(USE_CODE)
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SysAcquireData)
++#endif
++static INLINE PVRSRV_ERROR SysAcquireData(SYS_DATA **ppsSysData)
++{
++
++ *ppsSysData = gpsSysData;
++
++
++
++
++
++ if (!gpsSysData)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SysInitialiseCommon)
++#endif
++static INLINE PVRSRV_ERROR SysInitialiseCommon(SYS_DATA *psSysData)
++{
++ PVRSRV_ERROR eError;
++
++
++ eError = PVRSRVInit(psSysData);
++
++ return eError;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SysDeinitialiseCommon)
++#endif
++static INLINE IMG_VOID SysDeinitialiseCommon(SYS_DATA *psSysData)
++{
++
++ PVRSRVDeInit(psSysData);
++
++ OSDestroyResource(&psSysData->sPowerStateChangeResource);
++}
++#endif
++
++
++#if !(defined(NO_HARDWARE) && defined(__linux__) && defined(__KERNEL__))
++#define SysReadHWReg(p, o) OSReadHWReg(p, o)
++#define SysWriteHWReg(p, o, v) OSWriteHWReg(p, o, v)
++#else
++static inline IMG_UINT32 SysReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset)
++{
++ return (IMG_UINT32) readl(pvLinRegBaseAddr + ui32Offset);
++}
++
++static inline IMG_VOID SysWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
++{
++ writel(ui32Value, pvLinRegBaseAddr + ui32Offset);
++}
++#endif
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/.gitignore b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/.gitignore
+new file mode 100644
+index 0000000..2f89523
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/.gitignore
+@@ -0,0 +1,5 @@
++bin_pc_i686*
++tmp_pc_i686*
++host_pc_i686*
++*.o
++*.o.cmd
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/oemfuncs.h b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/oemfuncs.h
+new file mode 100644
+index 0000000..0d3b6d7
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/oemfuncs.h
+@@ -0,0 +1,72 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__OEMFUNCS_H__)
++#define __OEMFUNCS_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#define OEM_EXCHANGE_POWER_STATE (1<<0)
++#define OEM_DEVICE_MEMORY_POWER (1<<1)
++#define OEM_DISPLAY_POWER (1<<2)
++#define OEM_GET_EXT_FUNCS (1<<3)
++
++typedef struct OEM_ACCESS_INFO_TAG
++{
++ IMG_UINT32 ui32Size;
++ IMG_UINT32 ui32FBPhysBaseAddress;
++ IMG_UINT32 ui32FBMemAvailable;
++ IMG_UINT32 ui32SysPhysBaseAddress;
++ IMG_UINT32 ui32SysSize;
++ IMG_UINT32 ui32DevIRQ;
++} OEM_ACCESS_INFO, *POEM_ACCESS_INFO;
++
++typedef IMG_UINT32 (*PFN_SRV_BRIDGEDISPATCH)( IMG_UINT32 Ioctl,
++ IMG_BYTE *pInBuf,
++ IMG_UINT32 InBufLen,
++ IMG_BYTE *pOutBuf,
++ IMG_UINT32 OutBufLen,
++ IMG_UINT32 *pdwBytesTransferred);
++
++
++typedef PVRSRV_ERROR (*PFN_SRV_READREGSTRING)(PPVRSRV_REGISTRY_INFO psRegInfo);
++
++
++typedef struct PVRSRV_DC_OEM_JTABLE_TAG
++{
++ PFN_SRV_BRIDGEDISPATCH pfnOEMBridgeDispatch;
++ PFN_SRV_READREGSTRING pfnOEMReadRegistryString;
++ PFN_SRV_READREGSTRING pfnOEMWriteRegistryString;
++
++} PVRSRV_DC_OEM_JTABLE;
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/ospm_power.c b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/ospm_power.c
+new file mode 100644
+index 0000000..b7fa0c4
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/ospm_power.c
+@@ -0,0 +1,479 @@
++/**************************************************************************
++ * Copyright (c) 2009, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Benjamin Defnet <benjamin.r.defnet@intel.com>
++ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
++ *
++ **************************************************************************/
++
++#include "ospm_power.h"
++#include "psb_drv.h"
++#include "psb_msvdx.h"
++#include "lnc_topaz.h"
++#include "servicesext.h"
++#include "power.h"
++#include "services.h"
++#include "osfunc.h"
++#include <linux/mutex.h>
++
++extern IMG_UINT32 gui32SGXDeviceID;
++extern IMG_UINT32 gui32MRSTDisplayDeviceID;
++extern IMG_UINT32 gui32MRSTMSVDXDeviceID;
++extern IMG_UINT32 gui32MRSTTOPAZDeviceID;
++
++struct drm_device *gpDrmDevice = NULL;
++static struct mutex g_ospm_mutex;
++static bool gbSuspendInProgress = false;
++static bool gbResumeInProgress = false;
++static int g_hw_power_status_mask;
++static atomic_t g_display_access_count;
++static atomic_t g_graphics_access_count;
++static atomic_t g_videoenc_access_count;
++static atomic_t g_videodec_access_count;
++
++/*
++ * ospm_power_init
++ *
++ * Description: Initialize this ospm power management module
++ */
++void ospm_power_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private;
++ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++
++ pci_write_config_dword(pci_root, 0xD0, 0xd0047800);
++ pci_read_config_dword(pci_root, 0xD4, &dev_priv->ospm_base);
++ dev_priv->ospm_base &= 0xffff;
++
++ dev_priv->apm_reg = MSG_READ32(PSB_PUNIT_PORT, PSB_APMBA);
++ dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
++
++ gpDrmDevice = dev;
++ mutex_init(&g_ospm_mutex);
++ g_hw_power_status_mask = OSPM_ALL_ISLANDS;
++ atomic_set(&g_display_access_count, 0);
++ atomic_set(&g_graphics_access_count, 0);
++ atomic_set(&g_videoenc_access_count, 0);
++ atomic_set(&g_videodec_access_count, 0);
++
++
++#ifdef OSPM_STAT
++ dev_priv->graphics_state = PSB_PWR_STATE_ON;
++ dev_priv->gfx_last_mode_change = jiffies;
++ dev_priv->gfx_on_time = 0;
++ dev_priv->gfx_off_time = 0;
++#endif
++}
++
++/*
++ * ospm_power_uninit
++ *
++ * Description: Uninitialize this ospm power management module
++ */
++void ospm_power_uninit(void)
++{
++ mutex_destroy(&g_ospm_mutex);
++}
++
++/*
++ * ospm_power_suspend
++ *
++ * Description: OSPM is telling our driver to suspend so save state
++ * and power down all hardware.
++ */
++int ospm_power_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ struct drm_device *dev = pci_get_drvdata(pdev);
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct drm_connector *connector;
++ int ret = 0;
++ bool bDisplayOff = false;
++
++ mutex_lock(&g_ospm_mutex);
++
++ if (atomic_read(&g_graphics_access_count) ||
++ atomic_read(&g_videoenc_access_count) ||
++ atomic_read(&g_videodec_access_count) ||
++ atomic_read(&g_display_access_count))
++ ret = -EBUSY;
++ //SGX will be powered off when idle due to D0i3 support. If we don't wait
++ //for D0i3, then we hit cases where user mode driver gets stuck waiting
++ //for command completion when SGX is powered off.
++ else if (ospm_power_is_hw_on(OSPM_GRAPHICS_ISLAND))
++ ret = -EBUSY;
++ else if (psb_check_msvdx_idle(dev))
++ ret = -EBUSY;
++ else if (IS_MRST(dev) && !dev_priv->topaz_disabled && lnc_check_topaz_idle(dev))
++ ret = -EBUSY;
++
++ gbSuspendInProgress = true;
++
++ if (!ret) {
++ PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D3);
++ bDisplayOff = true;
++ } else if (!atomic_read(&g_display_access_count)) {
++ //At least power down the display
++ PVRSRVSetDevicePowerStateKM(gui32MRSTDisplayDeviceID,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ KERNEL_ID,
++ IMG_FALSE);
++ bDisplayOff = true;
++ }
++
++ if (bDisplayOff) {
++ //Set dpms status to off so that an "xset dpms force on" from the
++ //OSPM Framework (or elsewhere) actually executes
++ list_for_each_entry(connector, &mode_config->connector_list, head) {
++ connector->dpms = DRM_MODE_DPMS_OFF;
++ }
++ }
++
++ gbSuspendInProgress = false;
++
++ mutex_unlock(&g_ospm_mutex);
++ return ret;
++}
++
++/*
++ * ospm_power_resume
++ *
++ * Description: OSPM is telling our driver to resume so restore state
++ * and power up necessary hardware.
++ */
++int ospm_power_resume(struct pci_dev *pdev)
++{
++ struct drm_device *dev = pci_get_drvdata(pdev);
++ struct drm_mode_config *mode_config = &dev->mode_config;
++ struct drm_connector *connector;
++
++ mutex_lock(&g_ospm_mutex);
++ gbResumeInProgress = true;
++ PVRSRVSetPowerStateKM(PVRSRV_SYS_POWER_STATE_D0);
++
++ //Set dpms status to on. We should probably only do this for
++ //connectors that were on prior to the suspend, but for Moorestown
++ //we only have one connector so just brute force it.
++ list_for_each_entry(connector, &mode_config->connector_list, head) {
++ connector->dpms = DRM_MODE_DPMS_ON;
++ }
++
++ gbResumeInProgress = false;
++ mutex_unlock(&g_ospm_mutex);
++ return 0;
++}
++
++
++/*
++ * ospm_power_island_down
++ *
++ * Description: Cut power to the specified island(s) (powergating)
++ */
++void ospm_power_island_down(int hw_islands)
++{
++ u32 pwr_cnt = 0;
++ u32 pwr_mask = 0;
++ u32 pwr_sts;
++
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++
++ g_hw_power_status_mask &= ~hw_islands;
++
++ if (hw_islands & OSPM_GRAPHICS_ISLAND) {
++ pwr_cnt |= PSB_PWRGT_GFX_MASK;
++ pwr_mask |= PSB_PWRGT_GFX_MASK;
++ #ifdef OSPM_STAT
++ if (dev_priv->graphics_state == PSB_PWR_STATE_ON) {
++ dev_priv->gfx_on_time += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
++ dev_priv->gfx_last_mode_change = jiffies;
++ dev_priv->graphics_state = PSB_PWR_STATE_OFF;
++ dev_priv->gfx_off_cnt++;
++ }
++ #endif
++ }
++ if (hw_islands & OSPM_VIDEO_ENC_ISLAND) {
++ pwr_cnt |= PSB_PWRGT_VID_ENC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_ENC_MASK;
++ }
++ if (hw_islands & OSPM_VIDEO_DEC_ISLAND) {
++ pwr_cnt |= PSB_PWRGT_VID_DEC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_DEC_MASK;
++ }
++ if (pwr_cnt) {
++ pwr_cnt |= inl(dev_priv->apm_base);
++ outl(pwr_cnt, dev_priv->apm_base);
++ while (true) {
++ pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
++ if ((pwr_sts & pwr_mask) == pwr_mask)
++ break;
++ else
++ udelay(10);
++ }
++ }
++
++ if (hw_islands & OSPM_DISPLAY_ISLAND) {
++ pwr_mask = PSB_PWRGT_DISPLAY_MASK;
++ outl(PSB_PWRGT_DISPLAY_MASK, (dev_priv->ospm_base + PSB_PM_SSC));
++ while (true) {
++ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
++ if ((pwr_sts & pwr_mask) == pwr_mask)
++ break;
++ else
++ udelay(10);
++ }
++ }
++}
++
++/*
++ * ospm_power_island_up
++ *
++ * Description: Restore power to the specified island(s) (powergating)
++ */
++void ospm_power_island_up(int hw_islands)
++{
++ u32 pwr_cnt;
++ u32 pwr_sts;
++ u32 pwr_mask;
++
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++
++ if (IS_MRST(gpDrmDevice) &&
++ (hw_islands & (OSPM_GRAPHICS_ISLAND | OSPM_VIDEO_ENC_ISLAND |
++ OSPM_VIDEO_DEC_ISLAND))) {
++ pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
++ pwr_mask = 0;
++ if (hw_islands & OSPM_GRAPHICS_ISLAND) {
++ pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
++ pwr_mask |= PSB_PWRGT_GFX_MASK;
++ #ifdef OSPM_STAT
++ if (dev_priv->graphics_state == PSB_PWR_STATE_OFF) {
++ dev_priv->gfx_off_time += (jiffies - dev_priv->gfx_last_mode_change) * 1000 / HZ;
++ dev_priv->gfx_last_mode_change = jiffies;
++ dev_priv->graphics_state = PSB_PWR_STATE_ON;
++ dev_priv->gfx_on_cnt++;
++ }
++ #endif
++ }
++ if (hw_islands & OSPM_VIDEO_ENC_ISLAND) {
++ pwr_cnt &= ~PSB_PWRGT_VID_ENC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_ENC_MASK;
++ }
++ if (hw_islands & OSPM_VIDEO_DEC_ISLAND) {
++ pwr_cnt &= ~PSB_PWRGT_VID_DEC_MASK;
++ pwr_mask |= PSB_PWRGT_VID_DEC_MASK;
++ }
++
++ outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
++ while (true) {
++ pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
++ if ((pwr_sts & pwr_mask) == 0)
++ break;
++ else
++ udelay(10);
++ }
++ }
++
++ if (hw_islands & OSPM_DISPLAY_ISLAND) {
++ pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
++ pwr_cnt &= ~PSB_PWRGT_DISPLAY_MASK;
++ pwr_mask = PSB_PWRGT_DISPLAY_MASK;
++ outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC));
++ while (true) {
++ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
++ if ((pwr_sts & pwr_mask) == 0)
++ break;
++ else
++ udelay(10);
++ }
++ }
++
++ g_hw_power_status_mask |= hw_islands;
++}
++
++/*
++ * ospm_power_using_hw_begin
++ *
++ * Description: Notify PowerMgmt module that you will be accessing the
++ * specified island's hw so don't power it off. If the island is off,
++ * this function will behave differently depending on the type param.
++ *
++ * OSPM_UHB_FORCE_POWER_ON:
++ * Power on the specified island.
++ * OSPM_UHB_IGNORE_POWER_OFF:
++ * Increment the access counters. The caller is expected to power on
++ * the island if necessary.
++ * OSPM_UHB_ONLY_IF_ON:
++ * Return false and the caller is expected to not access the hw.
++ *
++ * NOTE *** If this is called from and interrupt handler or other atomic
++ * context, then it will return false if we are in the middle of a
++ * power state transition and the caller will be expected to handle that
++ * even if type is OSPM_UHB_FORCE_POWER_ON.
++ */
++bool ospm_power_using_hw_begin(int hw_island, UHBUsage usage)
++{
++ bool ret = false;
++ bool b_island_is_off = false;
++ bool b_atomic = (in_interrupt() || in_atomic());
++ bool b_force_on = (usage == OSPM_UHB_FORCE_POWER_ON);
++ bool b_ignore_off = (usage == OSPM_UHB_IGNORE_POWER_OFF);
++ IMG_UINT32 deviceID = 0;
++
++ if (!b_atomic)
++ mutex_lock(&g_ospm_mutex);
++ else if ((gbSuspendInProgress || gbResumeInProgress) && b_force_on)
++ goto FailExit;
++
++ b_island_is_off = hw_island & (OSPM_ALL_ISLANDS & ~g_hw_power_status_mask);
++
++ if (b_island_is_off && !b_force_on && !b_ignore_off)
++ goto FailExit;
++
++ if (b_island_is_off && b_force_on) {
++ switch(hw_island)
++ {
++ case OSPM_GRAPHICS_ISLAND:
++ deviceID = gui32SGXDeviceID;
++ break;
++ case OSPM_DISPLAY_ISLAND:
++ deviceID = gui32MRSTDisplayDeviceID;
++ break;
++ case OSPM_VIDEO_DEC_ISLAND:
++ deviceID = gui32MRSTMSVDXDeviceID;
++ break;
++ case OSPM_VIDEO_ENC_ISLAND:
++ deviceID = gui32MRSTTOPAZDeviceID;
++ break;
++ }
++
++ if (PVRSRVPowerOnSystemWithDevice(deviceID, b_atomic ? ISR_ID : KERNEL_ID, IMG_FALSE) != PVRSRV_OK)
++ goto FailExit;
++ }
++
++ switch(hw_island)
++ {
++ case OSPM_GRAPHICS_ISLAND:
++ atomic_inc(&g_graphics_access_count);
++ case OSPM_VIDEO_ENC_ISLAND:
++ atomic_inc(&g_videoenc_access_count);
++ case OSPM_VIDEO_DEC_ISLAND:
++ atomic_inc(&g_videodec_access_count);
++ case OSPM_DISPLAY_ISLAND:
++ atomic_inc(&g_display_access_count);
++ }
++
++ ret = true;
++FailExit:
++
++ if (!b_atomic)
++ mutex_unlock(&g_ospm_mutex);
++
++ return ret;
++}
++
++
++/*
++ * ospm_power_using_hw_end
++ *
++ * Description: Notify PowerMgmt module that you are done accessing the
++ * specified island's hw so feel free to power it off. Note that this
++ * function doesn't actually power off the islands.
++ */
++void ospm_power_using_hw_end(int hw_island)
++{
++ switch(hw_island)
++ {
++ case OSPM_GRAPHICS_ISLAND:
++ atomic_dec(&g_graphics_access_count);
++ case OSPM_VIDEO_ENC_ISLAND:
++ atomic_dec(&g_videoenc_access_count);
++ case OSPM_VIDEO_DEC_ISLAND:
++ atomic_dec(&g_videodec_access_count);
++ case OSPM_DISPLAY_ISLAND:
++ atomic_dec(&g_display_access_count);
++ }
++
++ WARN_ON(atomic_read(&g_graphics_access_count) < 0);
++ WARN_ON(atomic_read(&g_videoenc_access_count) < 0);
++ WARN_ON(atomic_read(&g_videodec_access_count) < 0);
++ WARN_ON(atomic_read(&g_display_access_count) < 0);
++}
++
++/*
++ * ospm_power_is_hw_on
++ *
++ * Description: do an instantaneous check for if the specified islands
++ * are on. Only use this in cases where you know the g_state_change_mutex
++ * is already held such as in irq install/uninstall. Otherwise, use
++ * ospm_power_using_hw_begin().
++ */
++bool ospm_power_is_hw_on(int hw_islands)
++{
++ return ((g_hw_power_status_mask & hw_islands) == hw_islands);
++}
++
++void ospm_apm_power_down_msvdx(struct drm_device *dev)
++{
++ uint32_t ui32_reg_value = 0;
++ mutex_lock(&g_ospm_mutex);
++
++ if (atomic_read(&g_videodec_access_count))
++ goto out;
++ if (psb_check_msvdx_idle(dev))
++ goto out;
++
++ /* FIXME: workaround for HSD3469585
++ * re-enable DRAM Self Refresh Mode
++ * by setting DUNIT.DPMC0
++ */
++ ui32_reg_value = MSG_READ32(0x1, 0x4);
++ MSG_WRITE32(0x1, 0x4, (ui32_reg_value | (0x1 << 7)));
++
++ gbSuspendInProgress = true;
++ PVRSRVSetDevicePowerStateKM(gui32MRSTMSVDXDeviceID,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ ISR_ID,
++ IMG_FALSE);
++ gbSuspendInProgress = false;
++out:
++ mutex_unlock(&g_ospm_mutex);
++ return;
++}
++
++void ospm_apm_power_down_topaz(struct drm_device *dev)
++{
++ mutex_lock(&g_ospm_mutex);
++
++ if (atomic_read(&g_videoenc_access_count))
++ goto out;
++ if (lnc_check_topaz_idle(dev))
++ goto out;
++
++ gbSuspendInProgress = true;
++ PVRSRVSetDevicePowerStateKM(gui32MRSTTOPAZDeviceID,
++ PVRSRV_DEV_POWER_STATE_OFF,
++ ISR_ID,
++ IMG_FALSE);
++ gbSuspendInProgress = false;
++out:
++ mutex_unlock(&g_ospm_mutex);
++ return;
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/ospm_power.h b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/ospm_power.h
+new file mode 100644
+index 0000000..835bfae
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/ospm_power.h
+@@ -0,0 +1,79 @@
++/**************************************************************************
++ * Copyright (c) 2009, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Benjamin Defnet <benjamin.r.defnet@intel.com>
++ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
++ *
++ **************************************************************************/
++
++#ifndef _OSPM_POWER_H_
++#define _OSPM_POWER_H_
++
++#include <linux/pci.h>
++#include <drm/drmP.h>
++
++#define OSPM_GRAPHICS_ISLAND 0x1
++#define OSPM_VIDEO_ENC_ISLAND 0x2
++#define OSPM_VIDEO_DEC_ISLAND 0x4
++#define OSPM_DISPLAY_ISLAND 0x8
++#define OSPM_ALL_ISLANDS 0xf
++
++
++typedef enum _UHBUsage
++{
++ OSPM_UHB_ONLY_IF_ON = 0,
++ OSPM_UHB_FORCE_POWER_ON,
++ OSPM_UHB_IGNORE_POWER_OFF,
++} UHBUsage;
++
++
++void ospm_power_init(struct drm_device *dev);
++void ospm_power_uninit(void);
++
++/*
++ * OSPM will call these functions
++ */
++int ospm_power_suspend(struct pci_dev *pdev, pm_message_t state);
++int ospm_power_resume(struct pci_dev *pdev);
++
++/*
++ * These are the functions the driver should use to wrap all hw access
++ * (i.e. register reads and writes)
++ */
++bool ospm_power_using_hw_begin(int hw_island, UHBUsage type);
++void ospm_power_using_hw_end(int hw_island);
++
++/*
++ * Power up/down different hw component rails/islands
++ */
++void ospm_power_island_down(int hw_islands);
++void ospm_power_island_up(int hw_islands);
++
++/*
++ * Use this function to do an instantaneous check for if the hw is on.
++ * Only use this in cases where you know the g_state_change_mutex
++ * is already held such as in irq install/uninstall and you need to
++ * prevent a deadlock situation. Otherwise use ospm_power_using_hw_begin().
++ */
++bool ospm_power_is_hw_on(int hw_islands);
++
++/* Use these functions to power down video HW for D0i3 purpose */
++void ospm_apm_power_down_msvdx(struct drm_device *dev);
++void ospm_apm_power_down_topaz(struct drm_device *dev);
++
++#endif /*_OSPM_POWER_H_*/
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_export.c b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_export.c
+new file mode 100644
+index 0000000..6c56df5
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_export.c
+@@ -0,0 +1,135 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++
++#include "pvr_drm_shared.h"
++
++#include "services_headers.h"
++#include "private_data.h"
++#include "pvr_drm.h"
++
++#include "pvr_bridge.h"
++#include "linkage.h"
++#include "mmap.h"
++
++#if defined(PDUMP)
++#include "client/linuxsrv.h"
++#endif
++
++#include "sys_pvr_drm_import.h"
++
++#include "sys_pvr_drm_export.h"
++
++int
++SYSPVRInit(void)
++{
++ PVRDPFInit();
++
++ return 0;
++}
++
++
++int
++SYSPVRLoad(struct drm_device *dev, unsigned long flags)
++{
++ return PVRSRVDrmLoad(dev, flags);
++}
++
++int
++SYSPVROpen(struct drm_device *dev, struct drm_file *pFile)
++{
++ return PVRSRVDrmOpen(dev, pFile);
++}
++
++int
++SYSPVRUnload(struct drm_device *dev)
++{
++ return PVRSRVDrmUnload(dev);
++}
++
++void
++SYSPVRPostClose(struct drm_device *dev, struct drm_file *file)
++{
++ return PVRSRVDrmPostClose(dev, file);
++}
++
++int
++SYSPVRBridgeDispatch(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRSRV_BridgeDispatchKM(dev, arg, pFile);
++}
++
++int
++SYSPVRDCDriverIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRDRM_Dummy_ioctl(dev, arg, pFile);
++
++}
++
++int
++SYSPVRBCDriverIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRDRM_Dummy_ioctl(dev, arg, pFile);
++
++}
++
++int
++SYSPVRIsMaster(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRDRMIsMaster(dev, arg, pFile);
++}
++
++int
++SYSPVRUnprivCmd(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++ return PVRDRMUnprivCmd(dev, arg, pFile);
++}
++
++int
++SYSPVRMMap(struct file* pFile, struct vm_area_struct* ps_vma)
++{
++ int ret;
++
++ ret = PVRMMap(pFile, ps_vma);
++ if (ret == -ENOENT)
++ {
++ ret = drm_mmap(pFile, ps_vma);
++ }
++
++ return ret;
++}
++
++int
++SYSPVRDBGDrivIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++{
++#if defined(PDUMP)
++ return dbgdrv_ioctl(dev, arg, pFile);
++#else
++ return -EINVAL;
++#endif
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_export.h b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_export.h
+new file mode 100644
+index 0000000..c73cea1
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_export.h
+@@ -0,0 +1,87 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYS_PVR_DRM_EXPORT_H__)
++#define __SYS_PVR_DRM_EXPORT_H__
++
++#include "pvr_drm_shared.h"
++
++#if defined(__KERNEL__)
++
++#include "services_headers.h"
++#include "private_data.h"
++#include "pvr_drm.h"
++
++#include "pvr_bridge.h"
++
++#if defined(PDUMP)
++#include "client/linuxsrv.h"
++#endif
++
++#define PVR_DRM_SRVKM_IOCTL \
++ DRM_IOW(DRM_COMMAND_BASE + PVR_DRM_SRVKM_CMD, PVRSRV_BRIDGE_PACKAGE)
++
++#define PVR_DRM_DISP_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_DISP_CMD)
++
++#define PVR_DRM_BC_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_BC_CMD)
++
++#define PVR_DRM_IS_MASTER_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_IS_MASTER_CMD)
++
++#define PVR_DRM_UNPRIV_IOCTL \
++ DRM_IOWR(DRM_COMMAND_BASE + PVR_DRM_UNPRIV_CMD, IMG_UINT32)
++
++#if defined(PDUMP)
++#define PVR_DRM_DBGDRV_IOCTL \
++ DRM_IOW(DRM_COMMAND_BASE + PVR_DRM_DBGDRV_CMD, IOCTL_PACKAGE)
++#else
++#define PVR_DRM_DBGDRV_IOCTL \
++ DRM_IO(DRM_COMMAND_BASE + PVR_DRM_DBGDRV_CMD)
++#endif
++
++int SYSPVRInit(void);
++int SYSPVRLoad(struct drm_device *dev, unsigned long flags);
++int SYSPVROpen(struct drm_device *dev, struct drm_file *pFile);
++int SYSPVRUnload(struct drm_device *dev);
++void SYSPVRPostClose(struct drm_device *dev, struct drm_file *file);
++int SYSPVRBridgeDispatch(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++int SYSPVRDCDriverIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++int SYSPVRBCDriverIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++int SYSPVRIsMaster(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++int SYSPVRUnprivCmd(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++
++int SYSPVRMMap(struct file* pFile, struct vm_area_struct* ps_vma);
++
++int SYSPVRDBGDrivIoctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile);
++
++int SYSPVRServiceSGXInterrupt(struct drm_device *dev);
++
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_import.h b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_import.h
+new file mode 100644
+index 0000000..1efeb75
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sys_pvr_drm_import.h
+@@ -0,0 +1,45 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYS_PVR_DRM_IMPORT_H__)
++#define __SYS_PVR_DRM_IMPORT_H__
++
++#if defined(__KERNEL__)
++#include "psb_drm.h"
++#endif
++
++#define DRM_PSB_PLACEMENT_OFFSET 0x13
++
++#if 0
++#define DRM_PVR_RESERVED1 0x0D
++#define DRM_PVR_RESERVED2 0x0E
++#define DRM_PVR_RESERVED3 0x0F
++#define DRM_PVR_RESERVED4 0x10
++#define DRM_PVR_RESERVED5 0x11
++#define DRM_PVR_RESERVED6 0x12
++#endif
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysconfig.c b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysconfig.c
+new file mode 100644
+index 0000000..955f793
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysconfig.c
+@@ -0,0 +1,1022 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if defined(LDM_PCI) || defined(SUPPORT_DRI_DRM)
++#include "linux/pci.h"
++#endif
++#if defined(SUPPORT_DRI_DRM)
++#include "drm/drmP.h"
++#endif
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "kerneldisplay.h"
++#include "oemfuncs.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "pdump_km.h"
++#include "syslocal.h"
++#include "env_data.h"
++#include "ospm_power.h"
++#include "psb_drv.h"
++#include "sysirq.h"
++#include "msvdx_power.h"
++#include "topaz_power.h"
++#include "sys_pvr_drm_export.h"
++
++/* Graphics MSI address and data region in PCIx */
++#define MRST_PCIx_MSI_ADDR_LOC 0x94
++#define MRST_PCIx_MSI_DATA_LOC 0x98
++
++#define SYS_SGX_CLOCK_SPEED (400000000)
++#define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ (100)
++#define SYS_SGX_PDS_TIMER_FREQ (1000)
++#define SYS_SGX_ACTIVE_POWER_LATENCY_MS (50)
++
++#if defined(SUPPORT_DRI_DRM_EXT)
++#define DRI_DRM_STATIC
++#else
++#define DRI_DRM_STATIC static
++#endif
++
++SYS_DATA* gpsSysData = (SYS_DATA*)IMG_NULL;
++SYS_DATA gsSysData;
++
++static SYS_SPECIFIC_DATA gsSysSpecificData;
++
++IMG_UINT32 gui32SGXDeviceID;
++static SGX_DEVICE_MAP gsSGXDeviceMap;
++extern IMG_UINT32 gui32MRSTDisplayDeviceID;
++IMG_UINT32 gui32MRSTMSVDXDeviceID;
++IMG_UINT32 gui32MRSTTOPAZDeviceID;
++
++extern struct drm_device *gpDrmDevice;
++
++#if !defined(NO_HARDWARE)
++IMG_CPU_VIRTADDR gsPoulsboRegsCPUVaddr;
++
++IMG_CPU_VIRTADDR gsPoulsboDisplayRegsCPUVaddr;
++#endif
++
++#ifdef LDM_PCI
++extern struct pci_dev *gpsPVRLDMDev;
++#endif
++
++#define POULSBO_ADDR_RANGE_INDEX (MMADR_INDEX - 4)
++#define POULSBO_HP_ADDR_RANGE_INDEX (GMADR_INDEX - 4)
++static PVRSRV_ERROR PCIInitDev(SYS_DATA *psSysData)
++{
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++#ifdef LDM_PCI
++ psSysSpecData->hSGXPCI = OSPCISetDev((IMG_VOID *)psSysSpecData->psPCIDev, HOST_PCI_INIT_FLAG_BUS_MASTER | HOST_PCI_INIT_FLAG_MSI);
++#else
++ psSysSpecData->hSGXPCI = OSPCIAcquireDev(SYS_SGX_DEV_VENDOR_ID, gpDrmDevice->pci_device, HOST_PCI_INIT_FLAG_BUS_MASTER | HOST_PCI_INIT_FLAG_MSI);
++#endif
++ if (!psSysSpecData->hSGXPCI)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Failed to acquire PCI device"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_PCI_ACQUIRE_DEV);
++
++ PVR_TRACE(("PCI memory region: %x to %x", OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX), OSPCIAddrRangeEnd(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX)));
++ PVR_TRACE(("Host Port region: %x to %x", OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX), OSPCIAddrRangeEnd(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX)));
++
++
++ if (OSPCIAddrRangeLen(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX) < (IS_MRST(gpDrmDevice)? POULSBO_MAX_OFFSET:PSB_POULSBO_MAX_OFFSET))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Device memory region isn't big enough"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++
++ if (OSPCIRequestAddrRange(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Device memory region not available"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_SGX_ADDR_RANGE);
++
++
++ if (OSPCIRequestAddrRange(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"PCIInitDev: Host Port region not available"));
++ return PVRSRV_ERROR_GENERIC;
++ }
++ SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_HOST_PORT_RANGE);
++
++ return PVRSRV_OK;
++}
++
++static IMG_VOID PCIDeInitDev(SYS_DATA *psSysData)
++{
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_SGX_ADDR_RANGE))
++ {
++ OSPCIReleaseAddrRange(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX);
++ }
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PCI_REQUEST_HOST_PORT_RANGE))
++ {
++ OSPCIReleaseAddrRange(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX);
++ }
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PCI_ACQUIRE_DEV))
++ {
++ OSPCIReleaseDev(psSysSpecData->hSGXPCI);
++ }
++}
++static PVRSRV_ERROR SysLocateDevices(SYS_DATA *psSysData)
++{
++ IMG_UINT32 ui32BaseAddr = 0;
++ IMG_UINT32 ui32IRQ = 0;
++ IMG_UINT32 ui32HostPortAddr = 0;
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++ ui32BaseAddr = OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_ADDR_RANGE_INDEX);
++ ui32HostPortAddr = OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, POULSBO_HP_ADDR_RANGE_INDEX);
++ if (OSPCIIRQ(psSysSpecData->hSGXPCI, &ui32IRQ) != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysLocateDevices: Couldn't get IRQ"));
++ return PVRSRV_ERROR_INVALID_DEVICE;
++ }
++
++ PVR_TRACE(("ui32BaseAddr: %p", ui32BaseAddr));
++ PVR_TRACE(("ui32HostPortAddr: %p", ui32HostPortAddr));
++ PVR_TRACE(("IRQ: %d", ui32IRQ));
++
++
++ gsSGXDeviceMap.ui32Flags = 0x0;
++ gsSGXDeviceMap.ui32IRQ = ui32IRQ;
++
++ if (IS_MRST(gpDrmDevice))
++ gsSGXDeviceMap.sRegsSysPBase.uiAddr = ui32BaseAddr + SGX_REGS_OFFSET;
++ else
++ gsSGXDeviceMap.sRegsSysPBase.uiAddr = ui32BaseAddr + PSB_SGX_REGS_OFFSET;
++
++ gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
++ gsSGXDeviceMap.ui32RegsSize = SGX_REG_SIZE;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++
++ gsSGXDeviceMap.ui32Flags = SGX_HOSTPORT_PRESENT;
++ gsSGXDeviceMap.sHPSysPBase.uiAddr = ui32HostPortAddr;
++ gsSGXDeviceMap.sHPCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sHPSysPBase);
++
++ if (IS_MRST(gpDrmDevice))
++ gsSGXDeviceMap.ui32HPSize = SYS_SGX_HP_SIZE;
++ else
++ gsSGXDeviceMap.ui32HPSize = PSB_SYS_SGX_HP_SIZE;
++#endif
++
++#if defined(MRST_SLAVEPORT)
++
++ gsSGXDeviceMap.sSPSysPBase.uiAddr = ui32BaseAddr + MRST_SGX_SP_OFFSET;
++ gsSGXDeviceMap.sSPCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sSPSysPBase);
++ gsSGXDeviceMap.ui32SPSize = SGX_SP_SIZE;
++#endif
++
++
++
++
++ gsSGXDeviceMap.sLocalMemSysPBase.uiAddr = 0;
++ gsSGXDeviceMap.sLocalMemDevPBase.uiAddr = 0;
++ gsSGXDeviceMap.sLocalMemCpuPBase.uiAddr = 0;
++ gsSGXDeviceMap.ui32LocalMemSize = 0;
++
++
++ {
++ IMG_SYS_PHYADDR sPoulsboRegsCpuPBase;
++ sPoulsboRegsCpuPBase.uiAddr = ui32BaseAddr + POULSBO_REGS_OFFSET;
++ gsPoulsboRegsCPUVaddr = OSMapPhysToLin(SysSysPAddrToCpuPAddr(sPoulsboRegsCpuPBase),
++ POULSBO_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ sPoulsboRegsCpuPBase.uiAddr = ui32BaseAddr + POULSBO_DISPLAY_REGS_OFFSET;
++ gsPoulsboDisplayRegsCPUVaddr = OSMapPhysToLin(SysSysPAddrToCpuPAddr(sPoulsboRegsCpuPBase),
++ POULSBO_DISPLAY_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++
++ return PVRSRV_OK;
++}
++
++
++#define VERSION_STR_MAX_LEN_TEMPLATE "SGX revision = 000.000.000"
++static PVRSRV_ERROR SysCreateVersionString(SYS_DATA *psSysData)
++{
++ IMG_UINT32 ui32MaxStrLen;
++ PVRSRV_ERROR eError;
++ IMG_INT32 i32Count;
++ IMG_CHAR *pszVersionString;
++ IMG_UINT32 ui32SGXRevision = 0;
++ IMG_VOID *pvSGXRegs;
++
++ pvSGXRegs = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ if (pvSGXRegs != IMG_NULL)
++ {
++ ui32SGXRevision = OSReadHWReg(pvSGXRegs, EUR_CR_CORE_REVISION);
++ OSUnMapPhysToLin(pvSGXRegs,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysCreateVersionString: Couldn't map SGX registers"));
++ }
++
++ ui32MaxStrLen = OSStringLength(VERSION_STR_MAX_LEN_TEMPLATE);
++ eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32MaxStrLen + 1,
++ (IMG_PVOID *)&pszVersionString,
++ IMG_NULL,
++ "Version String");
++ if(eError != PVRSRV_OK)
++ {
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ i32Count = OSSNPrintf(pszVersionString, ui32MaxStrLen + 1,
++ "SGX revision = %u.%u.%u",
++ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAJOR_MASK)
++ >> EUR_CR_CORE_REVISION_MAJOR_SHIFT),
++ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MINOR_MASK)
++ >> EUR_CR_CORE_REVISION_MINOR_SHIFT),
++ (IMG_UINT)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAINTENANCE_MASK)
++ >> EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT)
++ );
++ if(i32Count == -1)
++ {
++ ui32MaxStrLen = OSStringLength(VERSION_STR_MAX_LEN_TEMPLATE);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32MaxStrLen + 1,
++ pszVersionString,
++ IMG_NULL);
++
++ return PVRSRV_ERROR_GENERIC;
++ }
++
++ psSysData->pszVersionString = pszVersionString;
++
++ return PVRSRV_OK;
++}
++
++static IMG_VOID SysFreeVersionString(SYS_DATA *psSysData)
++{
++ if(psSysData->pszVersionString)
++ {
++ IMG_UINT32 ui32MaxStrLen;
++ ui32MaxStrLen = OSStringLength(VERSION_STR_MAX_LEN_TEMPLATE);
++ OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++ ui32MaxStrLen+1,
++ psSysData->pszVersionString,
++ IMG_NULL);
++ psSysData->pszVersionString = IMG_NULL;
++ }
++}
++
++extern int drm_psb_ospm;
++
++PVRSRV_ERROR SysInitialise(IMG_VOID)
++{
++ IMG_UINT32 i = 0;
++ PVRSRV_ERROR eError;
++ PVRSRV_DEVICE_NODE *psDeviceNode;
++ SGX_TIMING_INFORMATION* psTimingInfo;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++
++ gpsSysData = &gsSysData;
++ OSMemSet(gpsSysData, 0, sizeof(SYS_DATA));
++
++ gpsSysData->pvSysSpecificData = &gsSysSpecificData;
++ gsSysSpecificData.ui32SysSpecificData = 0;
++#ifdef LDM_PCI
++
++ PVR_ASSERT(gpsPVRLDMDev != IMG_NULL);
++ gsSysSpecificData.psPCIDev = gpsPVRLDMDev;
++#endif
++
++ eError = OSInitEnvData(&gpsSysData->pvEnvSpecificData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to setup env structure"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++ psTimingInfo = &gsSGXDeviceMap.sTimingInfo;
++ psTimingInfo->ui32CoreClockSpeed = SYS_SGX_CLOCK_SPEED;
++ psTimingInfo->ui32HWRecoveryFreq = SYS_SGX_HWRECOVERY_TIMEOUT_FREQ;
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++ psTimingInfo->bEnableActivePM = (drm_psb_ospm != 0);
++ printk(KERN_ERR "SGX APM is %s\n", (drm_psb_ospm != 0)? "enabled":"disabled");
++#else
++ psTimingInfo->bEnableActivePM = IMG_FALSE;
++#endif
++ psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS;
++ psTimingInfo->ui32uKernelFreq = SYS_SGX_PDS_TIMER_FREQ;
++
++ eError = PCIInitDev(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++ gpsSysData->ui32NumDevices = SYS_DEVICE_COUNT;
++
++
++ for(i=0; i<SYS_DEVICE_COUNT; i++)
++ {
++ gpsSysData->sDeviceID[i].uiID = i;
++ gpsSysData->sDeviceID[i].bInUse = IMG_FALSE;
++ }
++
++ gpsSysData->psDeviceNodeList = IMG_NULL;
++ gpsSysData->psQueueList = IMG_NULL;
++
++ eError = SysInitialiseCommon(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed in SysInitialiseCommon"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++
++
++
++ eError = SysLocateDevices(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to locate devices"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++
++
++ eError = PVRSRVRegisterDevice(gpsSysData, SGXRegisterDevice,
++ DEVICE_SGX_INTERRUPT, &gui32SGXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++
++ /* register MSVDX, with 0 interrupt bit, no interrupt will be served */
++ eError = PVRSRVRegisterDevice(gpsSysData, MSVDXRegisterDevice,
++ DEVICE_MSVDX_INTERRUPT, &gui32MRSTMSVDXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register MSVDXdevice!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++ if (IS_MRST(gpDrmDevice) && !dev_priv->topaz_disabled)
++ {
++ /* register TOPAZ, with 0 interrupt bit, no interrupt will be served */
++ eError = PVRSRVRegisterDevice(gpsSysData, TOPAZRegisterDevice,
++ DEVICE_TOPAZ_INTERRUPT, &gui32MRSTTOPAZDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register TOPAZdevice!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ }
++
++ psDeviceNode = gpsSysData->psDeviceNodeList;
++
++ while(psDeviceNode)
++ {
++
++ switch(psDeviceNode->sDevId.eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ DEVICE_MEMORY_INFO *psDevMemoryInfo;
++ DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++
++
++ psDeviceNode->psLocalDevMemArena = IMG_NULL;
++
++
++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++ psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++
++
++ for(i=0; i<psDevMemoryInfo->ui32HeapCount; i++)
++ {
++ psDeviceMemoryHeap[i].ui32Attribs |= PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG;
++#ifdef OEM_CUSTOMISE
++
++#endif
++ }
++
++ break;
++ }
++ case PVRSRV_DEVICE_TYPE_MSVDX:
++ /* nothing need to do here */
++ break;
++ case PVRSRV_DEVICE_TYPE_TOPAZ:
++ break;
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to find SGX device node!"));
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++ }
++
++
++ psDeviceNode = psDeviceNode->psNext;
++ }
++
++ PDUMPINIT();
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PDUMP_INIT);
++
++
++ eError = PVRSRVInitialiseDevice (gui32SGXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_SGX_INITIALISED);
++
++ eError = PVRSRVInitialiseDevice (gui32MRSTMSVDXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++
++ if (IS_MRST(gpDrmDevice) && !dev_priv->topaz_disabled)
++ {
++ eError = PVRSRVInitialiseDevice (gui32MRSTTOPAZDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return eError;
++ }
++ }
++
++ if (!sysirq_init(gpDrmDevice))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++ SysDeinitialise(gpsSysData);
++ gpsSysData = IMG_NULL;
++ return PVRSRV_ERROR_INIT_FAILURE;
++ }
++
++ return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SysFinalise(IMG_VOID)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++
++ eError = SysCreateVersionString(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to create a system version string"));
++ }
++ else
++ {
++ PVR_DPF((PVR_DBG_WARNING, "SysFinalise: Version string: %s", gpsSysData->pszVersionString));
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR SysDeinitialise (SYS_DATA *psSysData)
++{
++ PVRSRV_ERROR eError;
++
++ SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++ sysirq_uninit(gpDrmDevice);
++
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_SGX_INITIALISED))
++ {
++
++ eError = PVRSRVDeinitialiseDevice(gui32SGXDeviceID);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init the device"));
++ return eError;
++ }
++ }
++
++ SysFreeVersionString(psSysData);
++
++ PCIDeInitDev(psSysData);
++
++ eError = OSDeInitEnvData(psSysData->pvEnvSpecificData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init env structure"));
++ return eError;
++ }
++
++ SysDeinitialiseCommon(gpsSysData);
++
++
++#if !defined(NO_HARDWARE)
++
++ OSUnMapPhysToLin(gsPoulsboRegsCPUVaddr,
++ POULSBO_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ OSUnMapPhysToLin(gsPoulsboDisplayRegsCPUVaddr,
++ POULSBO_DISPLAY_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++#endif
++ if (SYS_SPECIFIC_DATA_TEST(psSysSpecData, SYS_SPECIFIC_DATA_PDUMP_INIT))
++ {
++ PDUMPDEINIT();
++ }
++
++ gpsSysData = IMG_NULL;
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_VOID **ppvDeviceMap)
++{
++ switch(eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++
++ *ppvDeviceMap = (IMG_VOID*)&gsSGXDeviceMap;
++ break;
++ }
++ default:
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysGetDeviceMemoryMap: unsupported device type"));
++ }
++ }
++ return PVRSRV_OK;
++}
++
++
++IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType,
++ IMG_CPU_PHYADDR CpuPAddr)
++{
++ IMG_DEV_PHYADDR DevPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ DevPAddr.uiAddr = CpuPAddr.uiAddr;
++
++ return DevPAddr;
++}
++
++
++IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR sys_paddr)
++{
++ IMG_CPU_PHYADDR cpu_paddr;
++
++
++ cpu_paddr.uiAddr = sys_paddr.uiAddr;
++ return cpu_paddr;
++}
++
++IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr)
++{
++ IMG_SYS_PHYADDR sys_paddr;
++
++
++ sys_paddr.uiAddr = cpu_paddr.uiAddr;
++ return sys_paddr;
++}
++
++
++IMG_DEV_PHYADDR SysSysPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr)
++{
++ IMG_DEV_PHYADDR DevPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ DevPAddr.uiAddr = SysPAddr.uiAddr;
++
++ return DevPAddr;
++}
++
++
++IMG_SYS_PHYADDR SysDevPAddrToSysPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR DevPAddr)
++{
++ IMG_SYS_PHYADDR SysPAddr;
++
++ PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++
++ SysPAddr.uiAddr = DevPAddr.uiAddr;
++
++ return SysPAddr;
++}
++
++
++IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++
++ psDeviceNode->ui32SOCInterruptBit = DEVICE_DISP_INTERRUPT;
++}
++
++
++IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++}
++
++PVRSRV_ERROR SysOEMFunction ( IMG_UINT32 ui32ID,
++ IMG_VOID *pvIn,
++ IMG_UINT32 ulInSize,
++ IMG_VOID *pvOut,
++ IMG_UINT32 ulOutSize)
++{
++ if (ulInSize || pvIn);
++
++ if ((ui32ID == OEM_GET_EXT_FUNCS) &&
++ (ulOutSize == sizeof(PVRSRV_DC_OEM_JTABLE)))
++ {
++ PVRSRV_DC_OEM_JTABLE *psOEMJTable = (PVRSRV_DC_OEM_JTABLE*)pvOut;
++
++ psOEMJTable->pfnOEMReadRegistryString = IMG_NULL;
++ psOEMJTable->pfnOEMWriteRegistryString = IMG_NULL;
++
++ return PVRSRV_OK;
++ }
++
++ return PVRSRV_ERROR_INVALID_PARAMS;
++}
++
++
++PVRSRV_ERROR SysMapInRegisters(IMG_VOID)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNodeList;
++
++ psDeviceNodeList = gpsSysData->psDeviceNodeList;
++
++ while (psDeviceNodeList)
++ {
++ switch(psDeviceNodeList->sDevId.eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNodeList->pvDevice;
++
++ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS))
++ {
++ psDevInfo->pvRegsBaseKM = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ if (!psDevInfo->pvRegsBaseKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysMapInRegisters : Failed to map in SGX registers\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS);
++ }
++ psDevInfo->ui32RegSize = gsSGXDeviceMap.ui32RegsSize;
++ psDevInfo->sRegsPhysBase = gsSGXDeviceMap.sRegsSysPBase;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ if (gsSGXDeviceMap.ui32Flags & SGX_HOSTPORT_PRESENT)
++ {
++ if (SYS_SPECIFIC_DATA_TEST(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP))
++ {
++
++ psDevInfo->pvHostPortBaseKM = OSMapPhysToLin(gsSGXDeviceMap.sHPCpuPBase,
++ gsSGXDeviceMap.ui32HPSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++ if (!psDevInfo->pvHostPortBaseKM)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysMapInRegisters : Failed to map in host port\n"));
++ return PVRSRV_ERROR_BAD_MAPPING;
++ }
++ SYS_SPECIFIC_DATA_CLEAR(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP);
++ }
++ psDevInfo->ui32HPSize = gsSGXDeviceMap.ui32HPSize;
++ psDevInfo->sHPSysPAddr = gsSGXDeviceMap.sHPSysPBase;
++ }
++#endif
++ break;
++ }
++ default:
++ break;
++ }
++ psDeviceNodeList = psDeviceNodeList->psNext;
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysUnmapRegisters(IMG_VOID)
++{
++ PVRSRV_DEVICE_NODE *psDeviceNodeList;
++
++ psDeviceNodeList = gpsSysData->psDeviceNodeList;
++
++ while (psDeviceNodeList)
++ {
++ switch (psDeviceNodeList->sDevId.eDeviceType)
++ {
++ case PVRSRV_DEVICE_TYPE_SGX:
++ {
++ PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNodeList->pvDevice;
++#if !(defined(NO_HARDWARE) && defined(__linux__))
++
++ if (psDevInfo->pvRegsBaseKM)
++ {
++ OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
++ gsSGXDeviceMap.ui32RegsSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS);
++ }
++#endif
++
++ psDevInfo->pvRegsBaseKM = IMG_NULL;
++ psDevInfo->ui32RegSize = 0;
++ psDevInfo->sRegsPhysBase.uiAddr = 0;
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ if (gsSGXDeviceMap.ui32Flags & SGX_HOSTPORT_PRESENT)
++ {
++
++ if (psDevInfo->pvHostPortBaseKM)
++ {
++ OSUnMapPhysToLin(psDevInfo->pvHostPortBaseKM,
++ gsSGXDeviceMap.ui32HPSize,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++ SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP);
++
++ psDevInfo->pvHostPortBaseKM = IMG_NULL;
++ }
++
++ psDevInfo->ui32HPSize = 0;
++ psDevInfo->sHPSysPAddr.uiAddr = 0;
++ }
++#endif
++ break;
++ }
++ default:
++ break;
++ }
++ psDeviceNodeList = psDeviceNodeList->psNext;
++ }
++
++#if !(defined(NO_HARDWARE) && defined(__linux__))
++
++ OSUnMapPhysToLin(gsPoulsboRegsCPUVaddr,
++ POULSBO_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++
++ OSUnMapPhysToLin(gsPoulsboDisplayRegsCPUVaddr,
++ POULSBO_DISPLAY_REG_SIZE,
++ PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++ IMG_NULL);
++
++#endif
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysSystemPrePowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError= PVRSRV_OK;
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)(gsSysSpecificData.hSGXPCI);
++
++ if (eNewPowerState != gpsSysData->eCurrentPowerState)
++ {
++ if ((eNewPowerState == PVRSRV_SYS_POWER_STATE_D3) &&
++ (gpsSysData->eCurrentPowerState < PVRSRV_SYS_POWER_STATE_D3))
++ {
++ drm_irq_uninstall(gpDrmDevice);
++
++ SysUnmapRegisters();
++
++ //Save some pci state that won't get saved properly by pci_save_state()
++ pci_read_config_dword(psPVRPCI->psPCIDev, 0x5C, &gsSysSpecificData.saveBSM);
++ pci_read_config_dword(psPVRPCI->psPCIDev, 0xFC, &gsSysSpecificData.saveVBT);
++ pci_read_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_ADDR_LOC, &gsSysSpecificData.msi_addr);
++ pci_read_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_DATA_LOC, &gsSysSpecificData.msi_data);
++
++ eError = OSPCISuspendDev(gsSysSpecificData.hSGXPCI);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPrePowerState: OSPCISuspendDev failed (%d)", eError));
++ }
++ }
++ }
++
++ return eError;
++}
++
++PVRSRV_ERROR SysSystemPostPowerState(PVRSRV_SYS_POWER_STATE eNewPowerState)
++{
++ PVRSRV_ERROR eError = PVRSRV_OK;
++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)(gsSysSpecificData.hSGXPCI);
++
++ if (eNewPowerState != gpsSysData->eCurrentPowerState)
++ {
++ if ((gpsSysData->eCurrentPowerState == PVRSRV_SYS_POWER_STATE_D3) &&
++ (eNewPowerState < PVRSRV_SYS_POWER_STATE_D3))
++ {
++ eError = OSPCIResumeDev(gsSysSpecificData.hSGXPCI);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: OSPCIResumeDev failed (%d)", eError));
++ return eError;
++ }
++
++ //Restore some pci state that will not have gotten restored properly by pci_restore_state()
++ pci_write_config_dword(psPVRPCI->psPCIDev, 0x5c, gsSysSpecificData.saveBSM);
++ pci_write_config_dword(psPVRPCI->psPCIDev, 0xFC, gsSysSpecificData.saveVBT);
++ pci_write_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_ADDR_LOC, gsSysSpecificData.msi_addr);
++ pci_write_config_dword(psPVRPCI->psPCIDev, MRST_PCIx_MSI_DATA_LOC, gsSysSpecificData.msi_data);
++
++ eError = SysLocateDevices(gpsSysData);
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: Failed to locate devices"));
++ return eError;
++ }
++
++ eError = SysMapInRegisters();
++ if (eError != PVRSRV_OK)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: Failed to map in registers"));
++ return eError;
++ }
++
++ drm_irq_install(gpDrmDevice);
++ }
++ }
++ return eError;
++}
++
++
++PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF))
++ {
++ if (ui32DeviceIndex == gui32SGXDeviceID)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"SysDevicePrePowerState: Remove SGX power"));
++ sysirq_uninstall_islands(gpDrmDevice, OSPM_GRAPHICS_ISLAND);
++ ospm_power_island_down(OSPM_GRAPHICS_ISLAND);
++ }
++ else if (ui32DeviceIndex == gui32MRSTDisplayDeviceID)
++ {
++ sysirq_uninstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND);
++ ospm_power_island_down(OSPM_DISPLAY_ISLAND);
++ }
++ else if (ui32DeviceIndex == gui32MRSTMSVDXDeviceID)
++ {
++ sysirq_uninstall_islands(gpDrmDevice, OSPM_VIDEO_DEC_ISLAND);
++ if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ ospm_power_island_down(OSPM_VIDEO_DEC_ISLAND);
++ } else {
++ ospm_power_island_up(OSPM_DISPLAY_ISLAND);
++ ospm_power_island_down(OSPM_VIDEO_DEC_ISLAND);
++ ospm_power_island_down(OSPM_DISPLAY_ISLAND);
++ }
++ }
++ else if (ui32DeviceIndex == gui32MRSTTOPAZDeviceID)
++ {
++ if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ ospm_power_island_down(OSPM_VIDEO_ENC_ISLAND);
++ } else {
++ ospm_power_island_up(OSPM_DISPLAY_ISLAND);
++ ospm_power_island_down(OSPM_VIDEO_ENC_ISLAND);
++ ospm_power_island_down(OSPM_DISPLAY_ISLAND);
++ }
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32 ui32DeviceIndex,
++ PVRSRV_DEV_POWER_STATE eNewPowerState,
++ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
++{
++ if ((eNewPowerState != eCurrentPowerState) &&
++ (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF))
++ {
++ if (ui32DeviceIndex == gui32SGXDeviceID)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"SysDevicePostPowerState: Restore SGX power"));
++ ospm_power_island_up(OSPM_GRAPHICS_ISLAND);
++ sysirq_preinstall_islands(gpDrmDevice, OSPM_GRAPHICS_ISLAND);
++ sysirq_postinstall_islands(gpDrmDevice, OSPM_GRAPHICS_ISLAND);
++ }
++ else if (ui32DeviceIndex == gui32MRSTDisplayDeviceID)
++ {
++ ospm_power_island_up(OSPM_DISPLAY_ISLAND);
++ sysirq_preinstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND);
++ sysirq_postinstall_islands(gpDrmDevice, OSPM_DISPLAY_ISLAND);
++ }
++ else if (ui32DeviceIndex == gui32MRSTMSVDXDeviceID)
++ {
++ if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ ospm_power_island_up(OSPM_VIDEO_DEC_ISLAND);
++ } else {
++ ospm_power_island_up(OSPM_DISPLAY_ISLAND);
++ ospm_power_island_up(OSPM_VIDEO_DEC_ISLAND);
++ ospm_power_island_down(OSPM_DISPLAY_ISLAND);
++ }
++ sysirq_preinstall_islands(gpDrmDevice, OSPM_VIDEO_DEC_ISLAND);
++ sysirq_postinstall_islands(gpDrmDevice, OSPM_VIDEO_DEC_ISLAND);
++ }
++ else if (ui32DeviceIndex == gui32MRSTTOPAZDeviceID)
++ {
++ if (ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ ospm_power_island_up(OSPM_VIDEO_ENC_ISLAND);
++ } else {
++ ospm_power_island_up(OSPM_DISPLAY_ISLAND);
++ ospm_power_island_up(OSPM_VIDEO_ENC_ISLAND);
++ ospm_power_island_down(OSPM_DISPLAY_ISLAND);
++ }
++ }
++ }
++
++ return PVRSRV_OK;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysconfig.h b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysconfig.h
+new file mode 100644
+index 0000000..0476e2c
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysconfig.h
+@@ -0,0 +1,139 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SOCCONFIG_H__)
++#define __SOCCONFIG_H__
++#include "syscommon.h"
++
++#define VS_PRODUCT_NAME "SGX Moorestown"
++
++#define SYS_NO_POWER_LOCK_TIMEOUT
++
++#define SGX_FEATURE_HOST_PORT
++
++#define SYS_SGX_USSE_COUNT (2)
++
++#define POULSBO_REGS_OFFSET 0x00000
++#define POULSBO_REG_SIZE 0x2100
++
++#define SGX_REGS_OFFSET 0x80000
++#define PSB_SGX_REGS_OFFSET 0x40000
++#define SGX_REG_SIZE 0x4000
++#define MSVDX_REGS_OFFSET 0x50000
++
++#ifdef SUPPORT_MSVDX
++#define POULSBO_MAX_OFFSET (MSVDX_REGS_OFFSET + MSVDX_REG_SIZE)
++#else
++#define POULSBO_MAX_OFFSET (SGX_REGS_OFFSET + SGX_REG_SIZE)
++#define PSB_POULSBO_MAX_OFFSET (PSB_SGX_REGS_OFFSET + SGX_REG_SIZE)
++#endif
++
++#define SYS_SGX_DEV_VENDOR_ID 0x8086
++#define PSB_SYS_SGX_DEV_DEVICE_ID_1 0x8108
++#define PSB_SYS_SGX_DEV_DEVICE_ID_2 0x8109
++
++#define SYS_SGX_DEVICE_IDS \
++ {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
++ {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
++ {0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100}, \
++ {0, 0, 0}
++
++
++#define MMADR_INDEX 4
++#define IOPORT_INDEX 5
++#define GMADR_INDEX 6
++#define MMUADR_INDEX 7
++#define FBADR_INDEX 23
++#define FBSIZE_INDEX 24
++
++#define DISPLAY_SURFACE_SIZE (4 * 1024 * 1024)
++
++#define DEVICE_SGX_INTERRUPT (1<<0)
++#define DEVICE_MSVDX_INTERRUPT (1<<1)
++#define DEVICE_DISP_INTERRUPT (1<<2)
++#define DEVICE_TOPAZ_INTERRUPT (1<<3)
++
++#define POULSBO_DISP_MASK (1<<17)
++#define POULSBO_THALIA_MASK (1<<18)
++#define POULSBO_MSVDX_MASK (1<<19)
++#define POULSBO_VSYNC_PIPEA_VBLANK_MASK (1<<7)
++#define POULSBO_VSYNC_PIPEA_EVENT_MASK (1<<6)
++#define POULSBO_VSYNC_PIPEB_VBLANK_MASK (1<<5)
++#define POULSBO_VSYNC_PIPEB_EVENT_MASK (1<<4)
++
++#define POULSBO_DISPLAY_REGS_OFFSET 0x70000
++#define POULSBO_DISPLAY_REG_SIZE 0x2000
++
++#define POULSBO_DISPLAY_A_CONFIG 0x00008
++#define POULSBO_DISPLAY_A_STATUS_SELECT 0x00024
++#define POULSBO_DISPLAY_B_CONFIG 0x01008
++#define POULSBO_DISPLAY_B_STATUS_SELECT 0x01024
++
++#define POULSBO_DISPLAY_PIPE_ENABLE (1<<31)
++#define POULSBO_DISPLAY_VSYNC_STS_EN (1<<25)
++#define POULSBO_DISPLAY_VSYNC_STS (1<<9)
++
++#if defined(SGX_FEATURE_HOST_PORT)
++ #define SYS_SGX_HP_SIZE 0x8000000
++ #define PSB_SYS_SGX_HP_SIZE 0x4000000
++
++ #define SYS_SGX_HOSTPORT_BASE_DEVVADDR 0xD0000000
++ #if defined(FIX_HW_BRN_22997) && defined(FIX_HW_BRN_23030)
++
++
++
++ #define SYS_SGX_HOSTPORT_BRN23030_OFFSET 0x7C00000
++ #endif
++#endif
++
++
++typedef struct
++{
++ union
++ {
++#if !defined(VISTA)
++ IMG_UINT8 aui8PCISpace[256];
++ IMG_UINT16 aui16PCISpace[128];
++ IMG_UINT32 aui32PCISpace[64];
++#endif
++ struct
++ {
++ IMG_UINT16 ui16VenID;
++ IMG_UINT16 ui16DevID;
++ IMG_UINT16 ui16PCICmd;
++ IMG_UINT16 ui16PCIStatus;
++ }s;
++ }u;
++} PCICONFIG_SPACE, *PPCICONFIG_SPACE;
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysinfo.h b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysinfo.h
+new file mode 100644
+index 0000000..97d02dd
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysinfo.h
+@@ -0,0 +1,43 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYSINFO_H__)
++#define __SYSINFO_H__
++
++#define MAX_HW_TIME_US (500000)
++#define WAIT_TRY_COUNT (10000)
++
++typedef enum _SYS_DEVICE_TYPE_
++{
++ SYS_DEVICE_SGX = 0,
++
++ SYS_DEVICE_FORCE_I16 = 0x7fff
++
++} SYS_DEVICE_TYPE;
++
++#define SYS_DEVICE_COUNT 4
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysirq.c b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysirq.c
+new file mode 100644
+index 0000000..d71196e
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysirq.c
+@@ -0,0 +1,565 @@
++/**************************************************************************
++ * Copyright (c) 2009, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Benjamin Defnet <benjamin.r.defnet@intel.com>
++ *
++ **************************************************************************/
++
++#include "sysirq.h"
++#include "sysconfig.h"
++#include "psb_drv.h"
++#include "ospm_power.h"
++#include "lnc_topaz.h"
++#include "psb_msvdx.h"
++#include "psb_intel_reg.h"
++
++extern SYS_DATA* gpsSysData;
++extern struct drm_device *gpDrmDevice;
++
++void sysirq_preinstall_islands(struct drm_device *dev, int hw_islands);
++int sysirq_postinstall_islands(struct drm_device *dev, int hw_islands);
++static void sysirq_enable_pipestat(struct drm_psb_private *dev_priv, u32 mask);
++static void sysirq_disable_pipestat(struct drm_psb_private *dev_priv, u32 mask);
++
++bool sysirq_init(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ OSInstallMISR(gpsSysData);
++
++ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
++ PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
++
++ dev_priv->vdc_irq_mask = 0;
++ dev_priv->pipestat[0] = 0;
++ dev_priv->pipestat[1] = 0;
++
++ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
++
++ if (drm_vblank_init(dev, PSB_NUM_PIPE) != 0)
++ return false;
++
++ if (drm_irq_install(dev) != 0)
++ return false;
++
++ dev->vblank_disable_allowed = 1;
++ dev_priv->vblanksEnabledForFlips = false;
++
++ return true;
++}
++
++void sysirq_uninit(struct drm_device *dev)
++{
++ drm_irq_uninstall(dev);
++ drm_vblank_cleanup(dev);
++ OSUninstallMISR(gpsSysData);
++}
++
++void sysirq_preinstall(struct drm_device *dev)
++{
++ sysirq_preinstall_islands(dev, OSPM_ALL_ISLANDS);
++}
++
++void sysirq_preinstall_islands(struct drm_device *dev, int hw_islands)
++{
++#if defined (SYS_USING_INTERRUPTS)
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++ if ((hw_islands & OSPM_DISPLAY_ISLAND) && ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ if (dev->vblank_enabled[0] || dev_priv->vblanksEnabledForFlips)
++ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
++ if (dev_priv->psb_dpst_state)
++ dev_priv->vdc_irq_mask |= _PSB_DPST_PIPEA_FLAG;
++ }
++ if ((hw_islands & OSPM_GRAPHICS_ISLAND) && ospm_power_is_hw_on(OSPM_GRAPHICS_ISLAND))
++ dev_priv->vdc_irq_mask |= _PSB_IRQ_SGX_FLAG;
++ if ((hw_islands & OSPM_VIDEO_DEC_ISLAND) && ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND))
++ dev_priv->vdc_irq_mask |= _PSB_IRQ_MSVDX_FLAG;
++ if (IS_MRST(dev) && (hw_islands & OSPM_VIDEO_ENC_ISLAND) && !dev_priv->topaz_disabled &&
++ ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND))
++ dev_priv->vdc_irq_mask |= _LNC_IRQ_TOPAZ_FLAG;
++
++ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++#endif
++}
++
++
++int sysirq_postinstall(struct drm_device *dev)
++{
++ return sysirq_postinstall_islands(dev, OSPM_ALL_ISLANDS);
++}
++
++int sysirq_postinstall_islands(struct drm_device *dev, int hw_islands)
++{
++#if defined (SYS_USING_INTERRUPTS)
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++ if ((hw_islands & OSPM_DISPLAY_ISLAND) && ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ if (IS_POULSBO(dev))
++ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
++
++ if (dev_priv->vdc_irq_mask & _PSB_VSYNC_PIPEA_FLAG) {
++ if (IS_MRST(dev))
++ sysirq_enable_pipestat(dev_priv,
++ PIPE_START_VBLANK_INTERRUPT_ENABLE |
++ PIPE_VBLANK_INTERRUPT_ENABLE);
++ else
++ sysirq_enable_pipestat(dev_priv,
++ PIPE_VBLANK_INTERRUPT_ENABLE);
++
++ } else {
++ sysirq_disable_pipestat(dev_priv,
++ PIPE_VBLANK_INTERRUPT_ENABLE |
++ PIPE_START_VBLANK_INTERRUPT_ENABLE);
++ }
++
++ if (dev_priv->vdc_irq_mask & _PSB_DPST_PIPEA_FLAG) {
++ printk(KERN_ALERT "TURNING ON DPST\n");
++ sysirq_turn_on_dpst(dev);
++ } else {
++ printk(KERN_ALERT "TURNING OFF DPST\n");
++ sysirq_turn_off_dpst(dev);
++ }
++ }
++
++ if (IS_MRST(dev) && (hw_islands & OSPM_VIDEO_ENC_ISLAND) && !dev_priv->topaz_disabled &&
++ ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND))
++ lnc_topaz_enableirq(dev);
++
++ if ((hw_islands & OSPM_VIDEO_DEC_ISLAND) && ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND))
++ psb_msvdx_enableirq(dev);
++
++ /*This register is safe even if display island is off*/
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++#endif
++ return 0;
++}
++
++void sysirq_uninstall(struct drm_device *dev)
++{
++ sysirq_uninstall_islands(dev, OSPM_ALL_ISLANDS);
++}
++
++void sysirq_uninstall_islands(struct drm_device *dev, int hw_islands)
++{
++#if defined (SYS_USING_INTERRUPTS)
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++ if ((hw_islands & OSPM_DISPLAY_ISLAND) && ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND)) {
++ if (dev_priv->vdc_irq_mask & _PSB_VSYNC_PIPEA_FLAG)
++ sysirq_disable_pipestat(dev_priv,
++ PIPE_VBLANK_INTERRUPT_ENABLE |
++ PIPE_START_VBLANK_INTERRUPT_ENABLE);
++ if (dev_priv->vdc_irq_mask & _PSB_DPST_PIPEA_FLAG)
++ sysirq_turn_off_dpst(dev);
++
++ dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
++ _PSB_IRQ_MSVDX_FLAG |
++ _LNC_IRQ_TOPAZ_FLAG;
++ }
++
++ if (hw_islands & OSPM_GRAPHICS_ISLAND)
++ dev_priv->vdc_irq_mask &= ~_PSB_IRQ_SGX_FLAG;
++
++ if (hw_islands & OSPM_VIDEO_DEC_ISLAND)
++ dev_priv->vdc_irq_mask &= ~_PSB_IRQ_MSVDX_FLAG;
++
++ if (hw_islands & OSPM_VIDEO_ENC_ISLAND)
++ dev_priv->vdc_irq_mask &= ~_LNC_IRQ_TOPAZ_FLAG;
++
++ /*These two registers are safe even if display island is off*/
++ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++
++ wmb();
++
++ /*This register is safe even if display island is off*/
++ PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
++
++ if (IS_MRST(dev) && (hw_islands & OSPM_VIDEO_ENC_ISLAND) && !dev_priv->topaz_disabled &&
++ ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND))
++ lnc_topaz_disableirq(dev);
++
++ if ((hw_islands & OSPM_VIDEO_DEC_ISLAND) && ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND))
++ psb_msvdx_disableirq(dev);
++
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++#endif
++}
++
++irqreturn_t sysirq_handler(DRM_IRQ_ARGS)
++{
++ bool bStatus = false;
++#if defined(SYS_USING_INTERRUPTS)
++ struct drm_device *dev = (struct drm_device *) arg;
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++
++ spin_lock(&dev_priv->irqmask_lock);
++
++ /* Now process all of the other interrupts */
++ bStatus = PVRSRVSystemLISR(gpsSysData);
++
++ if (bStatus)
++ {
++ OSScheduleMISR((IMG_VOID *)gpsSysData);
++ }
++
++ spin_unlock(&dev_priv->irqmask_lock);
++
++#endif
++ return bStatus ? IRQ_HANDLED : IRQ_NONE;
++}
++
++
++IMG_UINT32 SysGetInterruptSource(SYS_DATA* psSysData, PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++
++ IMG_UINT32 ui32Devices = 0;
++ IMG_UINT32 ui32Data, ui32DIMMask;
++
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++
++ ui32Data = PSB_RVDC32(PSB_INT_IDENTITY_R);
++
++ if ((ui32Data & _PSB_IRQ_SGX_FLAG) && ospm_power_is_hw_on(OSPM_GRAPHICS_ISLAND))
++ {
++ ui32Devices |= DEVICE_SGX_INTERRUPT;
++ }
++
++ if ((ui32Data & _PSB_IRQ_MSVDX_FLAG) && ospm_power_is_hw_on(OSPM_VIDEO_DEC_ISLAND)) {
++ ui32Devices |= DEVICE_MSVDX_INTERRUPT;
++ }
++
++ if ((ui32Data & _LNC_IRQ_TOPAZ_FLAG) && ospm_power_is_hw_on(OSPM_VIDEO_ENC_ISLAND)) {
++ ui32Devices |= DEVICE_TOPAZ_INTERRUPT;
++ }
++
++ ui32DIMMask = PSB_RVDC32(PSB_INT_ENABLE_R);
++ ui32DIMMask &= ~(_PSB_IRQ_SGX_FLAG | _PSB_IRQ_MSVDX_FLAG | _LNC_IRQ_TOPAZ_FLAG);
++
++ if ((ui32Data & ui32DIMMask) && ospm_power_is_hw_on(OSPM_DISPLAY_ISLAND))
++ {
++ ui32Devices |= DEVICE_DISP_INTERRUPT;
++ }
++
++ return (ui32Devices);
++}
++
++IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) gpDrmDevice->dev_private;
++ IMG_UINT32 ui32Data;
++
++ PVR_UNREFERENCED_PARAMETER(psSysData);
++ PVR_UNREFERENCED_PARAMETER(ui32ClearBits);
++
++ ui32Data = PSB_RVDC32(PSB_INT_IDENTITY_R);
++ ui32Data &= dev_priv->vdc_irq_mask;
++ PSB_WVDC32(ui32Data, PSB_INT_IDENTITY_R);
++ ui32Data = PSB_RVDC32(PSB_INT_IDENTITY_R);
++}
++
++void sysirq_turn_on_dpst(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ u32 hist_reg;
++ u32 pwm_reg;
++ u32 pipea_stat;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ PSB_WVDC32(BIT31, HISTOGRAM_LOGIC_CONTROL);
++ hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
++ PSB_WVDC32(BIT31, HISTOGRAM_INT_CONTROL);
++ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++
++ PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC);
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++ PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE | PWM_PHASEIN_INT_ENABLE,
++ PWM_CONTROL_LOGIC);
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++ PSB_WVDC32(pipea_stat | PIPE_DPST_EVENT_ENABLE, PIPEASTAT);
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++
++ PSB_WVDC32(pipea_stat | PIPE_DPST_EVENT_STATUS, PIPEASTAT);
++ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++ PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR,HISTOGRAM_INT_CONTROL);
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++ PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE, PWM_CONTROL_LOGIC);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++}
++
++int sysirq_enable_dpst(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ /* enable DPST */
++ dev_priv->vdc_irq_mask |= _PSB_DPST_PIPEA_FLAG;
++ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++
++ sysirq_turn_on_dpst(dev);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++ return 0;
++}
++
++void sysirq_turn_off_dpst(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ u32 hist_reg;
++ u32 pwm_reg;
++ u32 pipea_stat;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
++ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++ PSB_WVDC32(pipea_stat & ~PIPE_DPST_EVENT_ENABLE, PIPEASTAT);
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++ PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE), PWM_CONTROL_LOGIC);
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++}
++
++int sysirq_disable_dpst(struct drm_device *dev)
++{
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++ u32 hist_reg;
++ u32 pwm_reg;
++ u32 pipea_stat;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ dev_priv->vdc_irq_mask &= ~_PSB_DPST_PIPEA_FLAG;
++ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++
++ PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
++ hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++ PSB_WVDC32(pipea_stat & ~PIPE_DPST_EVENT_ENABLE, PIPEASTAT);
++ pipea_stat = PSB_RVDC32(PIPEASTAT);
++
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++ PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE), PWM_CONTROL_LOGIC);
++ pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++
++ return 0;
++}
++
++/* Called from drm generic code, passed 'crtc' which
++ * we use as a pipe index
++ */
++int sysirq_enable_vblank(struct drm_device *dev, int pipe)
++{
++#if defined(SYS_USING_INTERRUPTS)
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++ u32 pipeconf = 0;
++
++ if (pipe != 0)
++ return -EINVAL;
++
++ //Check if already enabled
++ if (dev_priv->vdc_irq_mask & _PSB_VSYNC_PIPEA_FLAG)
++ return 0;
++
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ pipeconf = REG_READ(pipeconf_reg);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ if (!(pipeconf & PIPEACONF_ENABLE))
++ return -EINVAL;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ if (pipe == 0)
++ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
++ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++ if (IS_MRST(dev)) {
++ sysirq_enable_pipestat(dev_priv,
++ PIPE_START_VBLANK_INTERRUPT_ENABLE |
++ PIPE_VBLANK_INTERRUPT_ENABLE);
++ } else
++ sysirq_enable_pipestat(dev_priv,
++ PIPE_VBLANK_INTERRUPT_ENABLE);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++#endif
++ return 0;
++}
++
++
++/* Called from drm generic code, passed 'crtc' which
++ * we use as a pipe index
++ */
++void sysirq_disable_vblank(struct drm_device *dev, int pipe)
++{
++#if defined(SYS_USING_INTERRUPTS)
++ struct drm_psb_private *dev_priv =
++ (struct drm_psb_private *) dev->dev_private;
++ unsigned long irqflags;
++
++ //Don't disable if flips currently require vblanks to be enabled
++ if (dev_priv->vblanksEnabledForFlips)
++ return;
++
++ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ if (pipe == 0)
++ dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
++ PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++ PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++ sysirq_disable_pipestat(dev_priv,
++ PIPE_VBLANK_INTERRUPT_ENABLE |
++ PIPE_START_VBLANK_INTERRUPT_ENABLE);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++#endif
++}
++
++
++static void
++sysirq_enable_pipestat(struct drm_psb_private *dev_priv, u32 mask)
++{
++ if ((dev_priv->pipestat[0] & mask) != mask) {
++ dev_priv->pipestat[0] |= mask;
++ /* Enable the interrupt, clear any pending status */
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ u32 writeVal = PSB_RVDC32(PIPEASTAT);
++ writeVal |= (mask | (mask >> 16));
++ PSB_WVDC32(writeVal, PIPEASTAT);
++ (void) PSB_RVDC32(PIPEASTAT);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ }
++}
++
++static void
++sysirq_disable_pipestat(struct drm_psb_private *dev_priv, u32 mask)
++{
++ if ((dev_priv->pipestat[0] & mask) != 0) {
++ dev_priv->pipestat[0] &= ~mask;
++ if (ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON)) {
++ u32 writeVal = PSB_RVDC32(PIPEASTAT);
++ writeVal &= ~mask;
++ PSB_WVDC32(writeVal, PIPEASTAT);
++ (void) PSB_RVDC32(PIPEASTAT);
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++ }
++ }
++}
++
++
++/* Called from drm generic code, passed a 'crtc', which
++ * we use as a pipe index
++ */
++u32 sysirq_get_vblank_counter(struct drm_device *dev, int pipe)
++{
++ u32 count = 0;
++#if defined(SYS_USING_INTERRUPTS)
++ unsigned long high_frame;
++ unsigned long low_frame;
++ u32 high1, high2, low;
++
++ if (pipe != 0)
++ return 0;
++
++ if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, OSPM_UHB_ONLY_IF_ON))
++ return 0;
++
++ high_frame = PIPEAFRAMEHIGH;
++ low_frame = PIPEAFRAMEPIXEL;
++
++ if (!(REG_READ(PIPEACONF) & PIPEACONF_ENABLE)) {
++ DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
++ goto sysirq_get_vblank_counter_exit;
++ }
++
++ /*
++ * High & low register fields aren't synchronized, so make sure
++ * we get a low value that's stable across two reads of the high
++ * register.
++ */
++ do {
++ high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
++ PIPE_FRAME_HIGH_SHIFT);
++ low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
++ PIPE_FRAME_LOW_SHIFT);
++ high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
++ PIPE_FRAME_HIGH_SHIFT);
++ } while (high1 != high2);
++
++ count = (high1 << 8) | low;
++
++sysirq_get_vblank_counter_exit:
++
++ ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND);
++#endif
++ return count;
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysirq.h b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysirq.h
+new file mode 100644
+index 0000000..fef16be
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysirq.h
+@@ -0,0 +1,49 @@
++/**************************************************************************
++ * Copyright (c) 2009, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Benjamin Defnet <benjamin.r.defnet@intel.com>
++ * Rajesh Poornachandran <rajesh.poornachandran@intel.com>
++ *
++ **************************************************************************/
++
++#ifndef _SYSIRQ_H_
++#define _SYSIRQ_H_
++
++#include <drm/drmP.h>
++
++bool sysirq_init(struct drm_device *dev);
++void sysirq_uninit(struct drm_device *dev);
++
++void sysirq_preinstall(struct drm_device *dev);
++int sysirq_postinstall(struct drm_device *dev);
++void sysirq_uninstall(struct drm_device *dev);
++irqreturn_t sysirq_handler(DRM_IRQ_ARGS);
++
++void sysirq_preinstall_islands(struct drm_device *dev, int hw_islands);
++int sysirq_postinstall_islands(struct drm_device *dev, int hw_islands);
++void sysirq_uninstall_islands(struct drm_device *dev, int hw_islands);
++
++int sysirq_enable_dpst(struct drm_device *dev);
++int sysirq_disable_dpst(struct drm_device *dev);
++void sysirq_turn_on_dpst(struct drm_device *dev);
++void sysirq_turn_off_dpst(struct drm_device *dev);
++int sysirq_enable_vblank(struct drm_device *dev, int pipe);
++void sysirq_disable_vblank(struct drm_device *dev, int pipe);
++u32 sysirq_get_vblank_counter(struct drm_device *dev, int pipe);
++
++#endif //_SYSIRQ_H_
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/syslocal.h b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/syslocal.h
+new file mode 100644
+index 0000000..8e97cab
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/syslocal.h
+@@ -0,0 +1,82 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#if !defined(__SYSLOCAL_H__)
++#define __SYSLOCAL_H__
++
++#define SYS_SPECIFIC_DATA_PCI_ACQUIRE_DEV 0x00000001
++#define SYS_SPECIFIC_DATA_PCI_REQUEST_SGX_ADDR_RANGE 0x00000002
++#define SYS_SPECIFIC_DATA_PCI_REQUEST_HOST_PORT_RANGE 0x00000004
++#if defined(NO_HARDWARE)
++#define SYS_SPECIFIC_DATA_ALLOC_DUMMY_SGX_REGS 0x00000008
++#if defined(SUPPORT_MSVDX)
++#define SYS_SPECIFIC_DATA_ALLOC_DUMMY_MSVDX_REGS 0x00000020
++#endif
++#endif
++#define SYS_SPECIFIC_DATA_SGX_INITIALISED 0x00000040
++#if defined(SUPPORT_MSVDX)
++#define SYS_SPECIFIC_DATA_MSVDX_INITIALISED 0x00000080
++#endif
++#define SYS_SPECIFIC_DATA_MISR_INSTALLED 0x00000100
++#define SYS_SPECIFIC_DATA_LISR_INSTALLED 0x00000200
++#define SYS_SPECIFIC_DATA_PDUMP_INIT 0x00000400
++#define SYS_SPECIFIC_DATA_IRQ_ENABLED 0x00000800
++
++#define SYS_SPECIFIC_DATA_PM_UNMAP_SGX_REGS 0x00001000
++#define SYS_SPECIFIC_DATA_PM_UNMAP_SGX_HP 0x00004000
++#define SYS_SPECIFIC_DATA_PM_UNMAP_MSVDX_REGS 0x00008000
++#define SYS_SPECIFIC_DATA_PM_IRQ_DISABLE 0x00010000
++#define SYS_SPECIFIC_DATA_PM_UNINSTALL_LISR 0x00020000
++
++#define SYS_SPECIFIC_DATA_SET(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData |= (flag)))
++
++#define SYS_SPECIFIC_DATA_CLEAR(psSysSpecData, flag) ((IMG_VOID)((psSysSpecData)->ui32SysSpecificData &= ~(flag)))
++
++#define SYS_SPECIFIC_DATA_TEST(psSysSpecData, flag) (((psSysSpecData)->ui32SysSpecificData & (flag)) != 0)
++
++
++typedef struct _SYS_SPECIFIC_DATA_TAG_
++{
++
++ IMG_UINT32 ui32SysSpecificData;
++#ifdef __linux__
++ PVRSRV_PCI_DEV_HANDLE hSGXPCI;
++#endif
++#ifdef LDM_PCI
++ struct pci_dev *psPCIDev;
++#endif
++ /* MSI reg save */
++ uint32_t msi_addr;
++ uint32_t msi_data;
++
++ uint32_t saveBSM;
++ uint32_t saveVBT;
++} SYS_SPECIFIC_DATA;
++
++
++#endif
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysutils.c b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysutils.c
+new file mode 100644
+index 0000000..b89a1da
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/services4/system/moorestown/sysutils.c
+@@ -0,0 +1,30 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "sysinfo.h"
++#include "syslocal.h"
++
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/client/linuxsrv.h b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/client/linuxsrv.h
+new file mode 100644
+index 0000000..adfcd75
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/client/linuxsrv.h
+@@ -0,0 +1,48 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _LINUXSRV_H__
++#define _LINUXSRV_H__
++
++typedef struct tagIOCTL_PACKAGE
++{
++ IMG_UINT32 ui32Cmd;
++ IMG_UINT32 ui32Size;
++ IMG_VOID *pInBuffer;
++ IMG_UINT32 ui32InBufferSize;
++ IMG_VOID *pOutBuffer;
++ IMG_UINT32 ui32OutBufferSize;
++} IOCTL_PACKAGE;
++
++IMG_UINT32 DeviceIoControl(IMG_UINT32 hDevice,
++ IMG_UINT32 ui32ControlCode,
++ IMG_VOID *pInBuffer,
++ IMG_UINT32 ui32InBufferSize,
++ IMG_VOID *pOutBuffer,
++ IMG_UINT32 ui32OutBufferSize,
++ IMG_UINT32 *pui32BytesReturned);
++
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/dbgdriv.c b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/dbgdriv.c
+new file mode 100644
+index 0000000..b769273
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/dbgdriv.c
+@@ -0,0 +1,2075 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++#ifdef LINUX
++#include <linux/string.h>
++#endif
++
++#include "img_types.h"
++#include "pvr_debug.h"
++#include "dbgdrvif.h"
++#include "dbgdriv.h"
++#include "hotkey.h"
++#include "hostfunc.h"
++
++
++
++
++#define LAST_FRAME_BUF_SIZE 1024
++
++typedef struct _DBG_LASTFRAME_BUFFER_ {
++ PDBG_STREAM psStream;
++ IMG_UINT8 ui8Buffer[LAST_FRAME_BUF_SIZE];
++ IMG_UINT32 ui32BufLen;
++ struct _DBG_LASTFRAME_BUFFER_ *psNext;
++} *PDBG_LASTFRAME_BUFFER;
++
++
++static PDBG_STREAM g_psStreamList = 0;
++static PDBG_LASTFRAME_BUFFER g_psLFBufferList;
++
++static IMG_UINT32 g_ui32LOff = 0;
++static IMG_UINT32 g_ui32Line = 0;
++static IMG_UINT32 g_ui32MonoLines = 25;
++
++static IMG_BOOL g_bHotkeyMiddump = IMG_FALSE;
++static IMG_UINT32 g_ui32HotkeyMiddumpStart = 0xffffffff;
++static IMG_UINT32 g_ui32HotkeyMiddumpEnd = 0xffffffff;
++
++IMG_VOID * g_pvAPIMutex=IMG_NULL;
++
++extern IMG_UINT32 g_ui32HotKeyFrame;
++extern IMG_BOOL g_bHotKeyPressed;
++extern IMG_BOOL g_bHotKeyRegistered;
++
++IMG_BOOL gbDumpThisFrame = IMG_FALSE;
++
++
++IMG_UINT32 SpaceInStream(PDBG_STREAM psStream);
++IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize);
++PDBG_LASTFRAME_BUFFER FindLFBuf(PDBG_STREAM psStream);
++
++DBGKM_SERVICE_TABLE g_sDBGKMServices =
++{
++ sizeof (DBGKM_SERVICE_TABLE),
++ ExtDBGDrivCreateStream,
++ ExtDBGDrivDestroyStream,
++ ExtDBGDrivFindStream,
++ ExtDBGDrivWriteString,
++ ExtDBGDrivReadString,
++ ExtDBGDrivWrite,
++ ExtDBGDrivRead,
++ ExtDBGDrivSetCaptureMode,
++ ExtDBGDrivSetOutputMode,
++ ExtDBGDrivSetDebugLevel,
++ ExtDBGDrivSetFrame,
++ ExtDBGDrivGetFrame,
++ ExtDBGDrivOverrideMode,
++ ExtDBGDrivDefaultMode,
++ ExtDBGDrivWrite2,
++ ExtDBGDrivWriteStringCM,
++ ExtDBGDrivWriteCM,
++ ExtDBGDrivSetMarker,
++ ExtDBGDrivGetMarker,
++ ExtDBGDrivStartInitPhase,
++ ExtDBGDrivStopInitPhase,
++ ExtDBGDrivIsCaptureFrame,
++ ExtDBGDrivWriteLF,
++ ExtDBGDrivReadLF,
++ ExtDBGDrivGetStreamOffset,
++ ExtDBGDrivSetStreamOffset,
++ ExtDBGDrivIsLastCaptureFrame,
++ ExtDBGDrivWaitForEvent
++};
++
++
++
++
++
++IMG_VOID * IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR * pszName, IMG_UINT32 ui32CapMode, IMG_UINT32 ui32OutMode, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size)
++{
++ IMG_VOID * pvRet;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ pvRet=DBGDrivCreateStream(pszName, ui32CapMode, ui32OutMode, ui32Flags, ui32Size);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return pvRet;
++}
++
++void IMG_CALLCONV ExtDBGDrivDestroyStream(PDBG_STREAM psStream)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivDestroyStream(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++IMG_VOID * IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream)
++{
++ IMG_VOID * pvRet;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ pvRet=DBGDrivFindStream(pszName, bResetStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return pvRet;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivWriteString(psStream, pszString, ui32Level);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivReadString(psStream, pszString, ui32Limit);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivWrite(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 * pui8OutBuf)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivRead(psStream, bReadInitBuffer, ui32OutBuffSize, pui8OutBuf);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++void IMG_CALLCONV ExtDBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32End,IMG_UINT32 ui32SampleRate)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetCaptureMode(psStream, ui32Mode, ui32Start, ui32End, ui32SampleRate);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++void IMG_CALLCONV ExtDBGDrivSetOutputMode(PDBG_STREAM psStream,IMG_UINT32 ui32OutMode)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetOutputMode(psStream, ui32OutMode);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++void IMG_CALLCONV ExtDBGDrivSetDebugLevel(PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetDebugLevel(psStream, ui32DebugLevel);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++void IMG_CALLCONV ExtDBGDrivSetFrame(PDBG_STREAM psStream,IMG_UINT32 ui32Frame)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetFrame(psStream, ui32Frame);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(PDBG_STREAM psStream)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivGetFrame(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_BOOL IMG_CALLCONV ExtDBGDrivIsLastCaptureFrame(PDBG_STREAM psStream)
++{
++ IMG_BOOL bRet;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ bRet = DBGDrivIsLastCaptureFrame(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return bRet;
++}
++
++IMG_BOOL IMG_CALLCONV ExtDBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame)
++{
++ IMG_BOOL bRet;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ bRet = DBGDrivIsCaptureFrame(psStream, bCheckPreviousFrame);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return bRet;
++}
++
++void IMG_CALLCONV ExtDBGDrivOverrideMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivOverrideMode(psStream, ui32Mode);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++void IMG_CALLCONV ExtDBGDrivDefaultMode(PDBG_STREAM psStream)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivDefaultMode(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivWrite2(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivWriteStringCM(psStream, pszString, ui32Level);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret=DBGDrivWriteCM(psStream, pui8InBuf, ui32InBuffSize, ui32Level);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++void IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetMarker(psStream, ui32Marker);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream)
++{
++ IMG_UINT32 ui32Marker;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Marker = DBGDrivGetMarker(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Marker;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 * pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret = DBGDrivWriteLF(psStream, pui8InBuf, ui32InBuffSize, ui32Level, ui32Flags);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 * pui8OutBuf)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret = DBGDrivReadLF(psStream, ui32OutBuffSize, pui8OutBuf);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++
++IMG_VOID IMG_CALLCONV ExtDBGDrivStartInitPhase(PDBG_STREAM psStream)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivStartInitPhase(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++IMG_VOID IMG_CALLCONV ExtDBGDrivStopInitPhase(PDBG_STREAM psStream)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivStopInitPhase(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return;
++}
++
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetStreamOffset(PDBG_STREAM psStream)
++{
++ IMG_UINT32 ui32Ret;
++
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ ui32Ret = DBGDrivGetStreamOffset(psStream);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++
++ return ui32Ret;
++}
++
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetStreamOffset(PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset)
++{
++
++ HostAquireMutex(g_pvAPIMutex);
++
++ DBGDrivSetStreamOffset(psStream, ui32StreamOffset);
++
++
++ HostReleaseMutex(g_pvAPIMutex);
++}
++
++IMG_VOID IMG_CALLCONV ExtDBGDrivWaitForEvent(DBG_EVENT eEvent)
++{
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ DBGDrivWaitForEvent(eEvent);
++#else
++ PVR_UNREFERENCED_PARAMETER(eEvent);
++#endif
++}
++
++IMG_UINT32 AtoI(IMG_CHAR *szIn)
++{
++ IMG_INT iLen = 0;
++ IMG_UINT32 ui32Value = 0;
++ IMG_UINT32 ui32Digit=1;
++ IMG_UINT32 ui32Base=10;
++ IMG_INT iPos;
++ IMG_CHAR bc;
++
++
++ while (szIn[iLen] > 0)
++ {
++ iLen ++;
++ }
++
++
++ if (iLen == 0)
++ {
++ return (0);
++ }
++
++
++ iPos=0;
++ while (szIn[iPos] == '0')
++ {
++ iPos++;
++ }
++ if (szIn[iPos] == '\0')
++ {
++ return 0;
++ }
++ if (szIn[iPos] == 'x' || szIn[iPos] == 'X')
++ {
++ ui32Base=16;
++ szIn[iPos]='0';
++ }
++
++
++ for (iPos = iLen - 1; iPos >= 0; iPos --)
++ {
++ bc = szIn[iPos];
++
++ if ( (bc >= 'a') && (bc <= 'f') && ui32Base == 16)
++ {
++ bc -= 'a' - 0xa;
++ }
++ else
++ if ( (bc >= 'A') && (bc <= 'F') && ui32Base == 16)
++ {
++ bc -= 'A' - 0xa;
++ }
++ else
++ if ((bc >= '0') && (bc <= '9'))
++ {
++ bc -= '0';
++ }
++ else
++ return (0);
++
++ ui32Value += (IMG_UINT32)bc * ui32Digit;
++
++ ui32Digit = ui32Digit * ui32Base;
++ }
++ return (ui32Value);
++}
++
++
++IMG_BOOL StreamValid(PDBG_STREAM psStream)
++{
++ PDBG_STREAM psThis;
++
++ psThis = g_psStreamList;
++
++ while (psThis)
++ {
++ if (psStream && (psThis == psStream))
++ {
++ return(IMG_TRUE);
++ }
++ else
++ {
++ psThis = psThis->psNext;
++ }
++ }
++
++ return(IMG_FALSE);
++}
++
++
++void Write(PDBG_STREAM psStream,IMG_UINT8 * pui8Data,IMG_UINT32 ui32InBuffSize)
++{
++
++
++ if ((psStream->ui32WPtr + ui32InBuffSize) > psStream->ui32Size)
++ {
++ IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32WPtr;
++ IMG_UINT32 ui32B2 = ui32InBuffSize - ui32B1;
++
++
++ HostMemCopy((IMG_VOID *)(psStream->ui32Base + psStream->ui32WPtr),
++ (IMG_VOID *) pui8Data,
++ ui32B1);
++
++
++ HostMemCopy((IMG_VOID *)psStream->ui32Base,
++ (IMG_VOID *)((IMG_UINT32) pui8Data + ui32B1),
++ ui32B2);
++
++
++ psStream->ui32WPtr = ui32B2;
++ }
++ else
++ {
++ HostMemCopy((IMG_VOID *)(psStream->ui32Base + psStream->ui32WPtr),
++ (IMG_VOID *) pui8Data,
++ ui32InBuffSize);
++
++ psStream->ui32WPtr += ui32InBuffSize;
++
++ if (psStream->ui32WPtr == psStream->ui32Size)
++ {
++ psStream->ui32WPtr = 0;
++ }
++ }
++ psStream->ui32DataWritten += ui32InBuffSize;
++}
++
++
++void MonoOut(IMG_CHAR * pszString,IMG_BOOL bNewLine)
++{
++ IMG_UINT32 i;
++ IMG_CHAR * pScreen;
++
++ pScreen = (IMG_CHAR *) DBGDRIV_MONOBASE;
++
++ pScreen += g_ui32Line * 160;
++
++
++
++ i=0;
++ do
++ {
++ pScreen[g_ui32LOff + (i*2)] = pszString[i];
++ pScreen[g_ui32LOff + (i*2)+1] = 127;
++ i++;
++ }
++ while ((pszString[i] != 0) && (i < 4096));
++
++ g_ui32LOff += i * 2;
++
++ if (bNewLine)
++ {
++ g_ui32LOff = 0;
++ g_ui32Line++;
++ }
++
++
++
++ if (g_ui32Line == g_ui32MonoLines)
++ {
++ g_ui32Line = g_ui32MonoLines - 1;
++
++ HostMemCopy((IMG_VOID *)DBGDRIV_MONOBASE,(IMG_VOID *)(DBGDRIV_MONOBASE + 160),160 * (g_ui32MonoLines - 1));
++
++ HostMemSet((IMG_VOID *)(DBGDRIV_MONOBASE + (160 * (g_ui32MonoLines - 1))),0,160);
++ }
++}
++
++
++
++void AppendName(IMG_CHAR * pszOut,IMG_CHAR * pszBase,IMG_CHAR * pszName)
++{
++ IMG_UINT32 i;
++ IMG_UINT32 ui32Off;
++
++ i = 0;
++
++ while (pszBase[i] != 0)
++ {
++ pszOut[i] = pszBase[i];
++ i++;
++ }
++
++ ui32Off = i;
++ i = 0;
++
++ while (pszName[i] != 0)
++ {
++ pszOut[ui32Off+i] = pszName[i];
++ i++;
++ }
++
++ pszOut[ui32Off+i] = pszName[i];
++}
++
++
++IMG_VOID * IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR * pszName,
++ IMG_UINT32 ui32CapMode,
++ IMG_UINT32 ui32OutMode,
++ IMG_UINT32 ui32Flags,
++ IMG_UINT32 ui32Size)
++{
++ PDBG_STREAM psStream;
++ PDBG_STREAM psInitStream;
++ PDBG_LASTFRAME_BUFFER psLFBuffer;
++ IMG_UINT32 ui32Off;
++ IMG_VOID * pvBase;
++
++
++
++
++ psStream = (PDBG_STREAM) DBGDrivFindStream(pszName, IMG_FALSE);
++
++ if (psStream)
++ {
++ return ((IMG_VOID *) psStream);
++ }
++
++
++
++ psStream = HostNonPageablePageAlloc(1);
++ psInitStream = HostNonPageablePageAlloc(1);
++ psLFBuffer = HostNonPageablePageAlloc(1);
++ if (
++ (!psStream) ||
++ (!psInitStream) ||
++ (!psLFBuffer)
++ )
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc control structs\n\r"));
++ return((IMG_VOID *) 0);
++ }
++
++
++ if ((ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ {
++ pvBase = HostNonPageablePageAlloc(ui32Size);
++ }
++ else
++ {
++ pvBase = HostPageablePageAlloc(ui32Size);
++ }
++
++ if (!pvBase)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc Stream buffer\n\r"));
++ HostNonPageablePageFree(psStream);
++ return((IMG_VOID *) 0);
++ }
++
++
++
++ psStream->psNext = 0;
++ psStream->ui32Flags = ui32Flags;
++ psStream->ui32Base = (IMG_UINT32)pvBase;
++ psStream->ui32Size = ui32Size * 4096UL;
++ psStream->ui32RPtr = 0;
++ psStream->ui32WPtr = 0;
++ psStream->ui32DataWritten = 0;
++ psStream->ui32CapMode = ui32CapMode;
++ psStream->ui32OutMode = ui32OutMode;
++ psStream->ui32DebugLevel = DEBUG_LEVEL_0;
++ psStream->ui32DefaultMode = ui32CapMode;
++ psStream->ui32Start = 0;
++ psStream->ui32End = 0;
++ psStream->ui32Current = 0;
++ psStream->ui32SampleRate = 1;
++ psStream->ui32Access = 0;
++ psStream->ui32Timeout = 0;
++ psStream->ui32Marker = 0;
++ psStream->bInitPhaseComplete = IMG_FALSE;
++
++
++ if ((ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ {
++ pvBase = HostNonPageablePageAlloc(ui32Size);
++ }
++ else
++ {
++ pvBase = HostPageablePageAlloc(ui32Size);
++ }
++
++ if (!pvBase)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc InitStream buffer\n\r"));
++
++ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ {
++ HostNonPageablePageFree((IMG_VOID *)psStream->ui32Base);
++ }
++ else
++ {
++ HostPageablePageFree((IMG_VOID *)psStream->ui32Base);
++ }
++ HostNonPageablePageFree(psStream);
++ return((IMG_VOID *) 0);
++ }
++
++ psInitStream->psNext = 0;
++ psInitStream->ui32Flags = ui32Flags;
++ psInitStream->ui32Base = (IMG_UINT32)pvBase;
++ psInitStream->ui32Size = ui32Size * 4096UL;
++ psInitStream->ui32RPtr = 0;
++ psInitStream->ui32WPtr = 0;
++ psInitStream->ui32DataWritten = 0;
++ psInitStream->ui32CapMode = ui32CapMode;
++ psInitStream->ui32OutMode = ui32OutMode;
++ psInitStream->ui32DebugLevel = DEBUG_LEVEL_0;
++ psInitStream->ui32DefaultMode = ui32CapMode;
++ psInitStream->ui32Start = 0;
++ psInitStream->ui32End = 0;
++ psInitStream->ui32Current = 0;
++ psInitStream->ui32SampleRate = 1;
++ psInitStream->ui32Access = 0;
++ psInitStream->ui32Timeout = 0;
++ psInitStream->ui32Marker = 0;
++ psInitStream->bInitPhaseComplete = IMG_FALSE;
++
++ psStream->psInitStream = psInitStream;
++
++
++ psLFBuffer->psStream = psStream;
++ psLFBuffer->ui32BufLen = 0UL;
++
++ g_bHotkeyMiddump = IMG_FALSE;
++ g_ui32HotkeyMiddumpStart = 0xffffffffUL;
++ g_ui32HotkeyMiddumpEnd = 0xffffffffUL;
++
++
++
++ ui32Off = 0;
++
++ do
++ {
++ psStream->szName[ui32Off] = pszName[ui32Off];
++
++ ui32Off++;
++ }
++ while ((pszName[ui32Off] != 0) && (ui32Off < (4096UL - sizeof(DBG_STREAM))));
++
++ psStream->szName[ui32Off] = pszName[ui32Off];
++
++
++
++ psStream->psNext = g_psStreamList;
++ g_psStreamList = psStream;
++
++ psLFBuffer->psNext = g_psLFBufferList;
++ g_psLFBufferList = psLFBuffer;
++
++
++ return((IMG_VOID *) psStream);
++}
++
++void IMG_CALLCONV DBGDrivDestroyStream(PDBG_STREAM psStream)
++{
++ PDBG_STREAM psStreamThis;
++ PDBG_STREAM psStreamPrev;
++ PDBG_LASTFRAME_BUFFER psLFBuffer;
++ PDBG_LASTFRAME_BUFFER psLFThis;
++ PDBG_LASTFRAME_BUFFER psLFPrev;
++
++ PVR_DPF((PVR_DBG_MESSAGE, "DBGDriv: Destroying stream %s\r\n", psStream->szName ));
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psLFBuffer = FindLFBuf(psStream);
++
++
++
++ psStreamThis = g_psStreamList;
++ psStreamPrev = 0;
++
++ while (psStreamThis)
++ {
++ if (psStreamThis == psStream)
++ {
++ if (psStreamPrev)
++ {
++ psStreamPrev->psNext = psStreamThis->psNext;
++ }
++ else
++ {
++ g_psStreamList = psStreamThis->psNext;
++ }
++
++ psStreamThis = 0;
++ }
++ else
++ {
++ psStreamPrev = psStreamThis;
++ psStreamThis = psStreamThis->psNext;
++ }
++ }
++
++ psLFThis = g_psLFBufferList;
++ psLFPrev = 0;
++
++ while (psLFThis)
++ {
++ if (psLFThis == psLFBuffer)
++ {
++ if (psLFPrev)
++ {
++ psLFPrev->psNext = psLFThis->psNext;
++ }
++ else
++ {
++ g_psLFBufferList = psLFThis->psNext;
++ }
++
++ psLFThis = 0;
++ }
++ else
++ {
++ psLFPrev = psLFThis;
++ psLFThis = psLFThis->psNext;
++ }
++ }
++
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_HOTKEY)
++ {
++ DeactivateHotKeys();
++ }
++
++
++
++ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ {
++ HostNonPageablePageFree((IMG_VOID *)psStream->ui32Base);
++ HostNonPageablePageFree((IMG_VOID *)psStream->psInitStream->ui32Base);
++ }
++ else
++ {
++ HostPageablePageFree((IMG_VOID *)psStream->ui32Base);
++ HostPageablePageFree((IMG_VOID *)psStream->psInitStream->ui32Base);
++ }
++
++ HostNonPageablePageFree(psStream->psInitStream);
++ HostNonPageablePageFree(psStream);
++ HostNonPageablePageFree(psLFBuffer);
++
++ if (g_psStreamList == 0)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Stream list now empty" ));
++ }
++
++ return;
++}
++
++IMG_VOID * IMG_CALLCONV DBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream)
++{
++ PDBG_STREAM psStream;
++ PDBG_STREAM psThis;
++ IMG_UINT32 ui32Off;
++ IMG_BOOL bAreSame;
++
++ psStream = 0;
++
++
++
++ for (psThis = g_psStreamList; psThis != IMG_NULL; psThis = psThis->psNext)
++ {
++ bAreSame = IMG_TRUE;
++ ui32Off = 0;
++
++ if (strlen(psThis->szName) == strlen(pszName))
++ {
++ while ((psThis->szName[ui32Off] != 0) && (pszName[ui32Off] != 0) && (ui32Off < 128) && bAreSame)
++ {
++ if (psThis->szName[ui32Off] != pszName[ui32Off])
++ {
++ bAreSame = IMG_FALSE;
++ }
++
++ ui32Off++;
++ }
++ }
++ else
++ {
++ bAreSame = IMG_FALSE;
++ }
++
++ if (bAreSame)
++ {
++ psStream = psThis;
++ break;
++ }
++ }
++
++ if(bResetStream && psStream)
++ {
++ static IMG_CHAR szComment[] = "-- Init phase terminated\r\n";
++ psStream->psInitStream->ui32RPtr = 0;
++ psStream->ui32RPtr = 0;
++ psStream->ui32WPtr = 0;
++ psStream->ui32DataWritten = psStream->psInitStream->ui32DataWritten;
++ if (psStream->bInitPhaseComplete == IMG_FALSE)
++ {
++ if (psStream->ui32Flags & DEBUG_FLAGS_TEXTSTREAM)
++ {
++ DBGDrivWrite2(psStream, (IMG_UINT8 *)szComment, sizeof(szComment) - 1, 0x01);
++ }
++ psStream->bInitPhaseComplete = IMG_TRUE;
++ }
++ }
++
++ return((IMG_VOID *) psStream);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED)
++ {
++ if ((psStream->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0)
++ {
++ return(0);
++ }
++ }
++ else
++ {
++ if (psStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
++ {
++ if ((psStream->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE))
++ {
++ return(0);
++ }
++ }
++ }
++
++ return(DBGDrivWriteString(psStream,pszString,ui32Level));
++
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Len;
++ IMG_UINT32 ui32Space;
++ IMG_UINT32 ui32WPtr;
++ IMG_UINT8 * pui8Buffer;
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if ((psStream->ui32DebugLevel & ui32Level) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++
++ if ((psStream->ui32OutMode & DEBUG_OUTMODE_ASYNC) == 0)
++ {
++ if (psStream->ui32OutMode & DEBUG_OUTMODE_STANDARDDBG)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"%s: %s\r\n",psStream->szName, pszString));
++ }
++
++
++
++ if (psStream->ui32OutMode & DEBUG_OUTMODE_MONO)
++ {
++ MonoOut(psStream->szName,IMG_FALSE);
++ MonoOut(": ",IMG_FALSE);
++ MonoOut(pszString,IMG_TRUE);
++ }
++ }
++
++
++
++ if (
++ !(
++ ((psStream->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE) != 0) ||
++ ((psStream->ui32OutMode & DEBUG_OUTMODE_ASYNC) != 0)
++ )
++ )
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ ui32Space=SpaceInStream(psStream);
++
++ if(ui32Space > 0)
++ {
++ ui32Space--;
++ }
++
++ ui32Len = 0;
++ ui32WPtr = psStream->ui32WPtr;
++ pui8Buffer = (IMG_UINT8 *) psStream->ui32Base;
++
++ while((pszString[ui32Len] != 0) && (ui32Len < ui32Space))
++ {
++ pui8Buffer[ui32WPtr] = (IMG_UINT8)pszString[ui32Len];
++ ui32Len++;
++ ui32WPtr++;
++ if (ui32WPtr == psStream->ui32Size)
++ {
++ ui32WPtr = 0;
++ }
++ }
++
++ if (ui32Len < ui32Space)
++ {
++
++ pui8Buffer[ui32WPtr] = (IMG_UINT8)pszString[ui32Len];
++ ui32Len++;
++ ui32WPtr++;
++ if (ui32WPtr == psStream->ui32Size)
++ {
++ ui32WPtr = 0;
++ }
++
++
++ psStream->ui32WPtr = ui32WPtr;
++ psStream->ui32DataWritten+= ui32Len;
++ } else
++ {
++ ui32Len = 0;
++ }
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ if (ui32Len)
++ {
++ HostSignalEvent(DBG_EVENT_STREAM_DATA);
++ }
++#endif
++
++ return(ui32Len);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit)
++{
++ IMG_UINT32 ui32OutLen;
++ IMG_UINT32 ui32Len;
++ IMG_UINT32 ui32Offset;
++ IMG_UINT8 *pui8Buff;
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0);
++ }
++
++
++
++ pui8Buff = (IMG_UINT8 *) psStream->ui32Base;
++ ui32Offset = psStream->ui32RPtr;
++
++ if (psStream->ui32RPtr == psStream->ui32WPtr)
++ {
++ return(0);
++ }
++
++
++
++ ui32Len = 0;
++ while((pui8Buff[ui32Offset] != 0) && (ui32Offset != psStream->ui32WPtr))
++ {
++ ui32Offset++;
++ ui32Len++;
++
++
++
++ if (ui32Offset == psStream->ui32Size)
++ {
++ ui32Offset = 0;
++ }
++ }
++
++ ui32OutLen = ui32Len + 1;
++
++
++
++ if (ui32Len > ui32Limit)
++ {
++ return(0);
++ }
++
++
++
++ ui32Offset = psStream->ui32RPtr;
++ ui32Len = 0;
++
++ while ((pui8Buff[ui32Offset] != 0) && (ui32Len < ui32Limit))
++ {
++ pszString[ui32Len] = (IMG_CHAR)pui8Buff[ui32Offset];
++ ui32Offset++;
++ ui32Len++;
++
++
++
++ if (ui32Offset == psStream->ui32Size)
++ {
++ ui32Offset = 0;
++ }
++ }
++
++ pszString[ui32Len] = (IMG_CHAR)pui8Buff[ui32Offset];
++
++ psStream->ui32RPtr = ui32Offset + 1;
++
++ if (psStream->ui32RPtr == psStream->ui32Size)
++ {
++ psStream->ui32RPtr = 0;
++ }
++
++ return(ui32OutLen);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivWrite(PDBG_STREAM psMainStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Space;
++ DBG_STREAM *psStream;
++
++
++
++ if (!StreamValid(psMainStream))
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if ((psMainStream->ui32DebugLevel & ui32Level) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if (psMainStream->ui32CapMode & DEBUG_CAPMODE_FRAMED)
++ {
++ if ((psMainStream->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++ }
++ else if (psMainStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
++ {
++ if ((psMainStream->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE))
++ return(0xFFFFFFFFUL);
++ }
++
++ if(psMainStream->bInitPhaseComplete)
++ {
++ psStream = psMainStream;
++ }
++ else
++ {
++ psStream = psMainStream->psInitStream;
++ }
++
++
++
++ ui32Space=SpaceInStream(psStream);
++
++
++
++ if ((psStream->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE) == 0)
++ {
++ return(0);
++ }
++
++ if (ui32Space < 8)
++ {
++ return(0);
++ }
++
++
++
++ if (ui32Space <= (ui32InBuffSize + 4))
++ {
++ ui32InBuffSize = ui32Space - 8;
++ }
++
++
++
++ Write(psStream,(IMG_UINT8 *) &ui32InBuffSize,4);
++ Write(psStream,pui8InBuf,ui32InBuffSize);
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ if (ui32InBuffSize)
++ {
++ HostSignalEvent(DBG_EVENT_STREAM_DATA);
++ }
++#endif
++ return(ui32InBuffSize);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED)
++ {
++ if ((psStream->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++ }
++ else
++ {
++ if (psStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
++ {
++ if ((psStream->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE))
++ {
++ return(0xFFFFFFFFUL);
++ }
++ }
++ }
++
++ return(DBGDrivWrite2(psStream,pui8InBuf,ui32InBuffSize,ui32Level));
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psMainStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level)
++{
++ IMG_UINT32 ui32Space;
++ DBG_STREAM *psStream;
++
++
++
++ if (!StreamValid(psMainStream))
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if ((psMainStream->ui32DebugLevel & ui32Level) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++ if(psMainStream->bInitPhaseComplete)
++ {
++ psStream = psMainStream;
++ }
++ else
++ {
++ psStream = psMainStream->psInitStream;
++ }
++
++
++
++ ui32Space=SpaceInStream(psStream);
++
++
++
++ if ((psStream->ui32OutMode & DEBUG_OUTMODE_STREAMENABLE) == 0)
++ {
++ return(0);
++ }
++
++
++
++ if (psStream->ui32Flags & DEBUG_FLAGS_NO_BUF_EXPANDSION)
++ {
++
++
++
++ if (ui32Space < 32)
++ {
++ return(0);
++ }
++ }
++ else
++ {
++ if ((ui32Space < 32) || (ui32Space <= (ui32InBuffSize + 4)))
++ {
++ IMG_UINT32 ui32NewBufSize;
++
++
++
++ ui32NewBufSize = 2 * psStream->ui32Size;
++
++ if (ui32InBuffSize > psStream->ui32Size)
++ {
++ ui32NewBufSize += ui32InBuffSize;
++ }
++
++
++
++ if (!ExpandStreamBuffer(psStream,ui32NewBufSize))
++ {
++ if (ui32Space < 32)
++ {
++ return(0);
++ }
++ }
++
++
++
++ ui32Space = SpaceInStream(psStream);
++ }
++ }
++
++
++
++ if (ui32Space <= (ui32InBuffSize + 4))
++ {
++ ui32InBuffSize = ui32Space - 4;
++ }
++
++
++
++ Write(psStream,pui8InBuf,ui32InBuffSize);
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ if (ui32InBuffSize)
++ {
++ HostSignalEvent(DBG_EVENT_STREAM_DATA);
++ }
++#endif
++ return(ui32InBuffSize);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psMainStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 * pui8OutBuf)
++{
++ IMG_UINT32 ui32Data;
++ DBG_STREAM *psStream;
++
++
++
++ if (!StreamValid(psMainStream))
++ {
++ return(0);
++ }
++
++ if(bReadInitBuffer)
++ {
++ psStream = psMainStream->psInitStream;
++ }
++ else
++ {
++ psStream = psMainStream;
++ }
++
++ if (psStream->ui32RPtr == psStream->ui32WPtr)
++ {
++ return(0);
++ }
++
++
++
++ if (psStream->ui32RPtr <= psStream->ui32WPtr)
++ {
++ ui32Data = psStream->ui32WPtr - psStream->ui32RPtr;
++ }
++ else
++ {
++ ui32Data = psStream->ui32WPtr + (psStream->ui32Size - psStream->ui32RPtr);
++ }
++
++
++
++ if (ui32Data > ui32OutBuffSize)
++ {
++ ui32Data = ui32OutBuffSize;
++ }
++
++
++
++ if ((psStream->ui32RPtr + ui32Data) > psStream->ui32Size)
++ {
++ IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32RPtr;
++ IMG_UINT32 ui32B2 = ui32Data - ui32B1;
++
++
++ HostMemCopy((IMG_VOID *) pui8OutBuf,
++ (IMG_VOID *)(psStream->ui32Base + psStream->ui32RPtr),
++ ui32B1);
++
++
++ HostMemCopy((IMG_VOID *)((IMG_UINT32) pui8OutBuf + ui32B1),
++ (IMG_VOID *)psStream->ui32Base,
++ ui32B2);
++
++
++ psStream->ui32RPtr = ui32B2;
++ }
++ else
++ {
++ HostMemCopy((IMG_VOID *) pui8OutBuf,
++ (IMG_VOID *)(psStream->ui32Base + psStream->ui32RPtr),
++ ui32Data);
++
++
++ psStream->ui32RPtr += ui32Data;
++
++
++ if (psStream->ui32RPtr == psStream->ui32Size)
++ {
++ psStream->ui32RPtr = 0;
++ }
++ }
++
++ return(ui32Data);
++}
++
++void IMG_CALLCONV DBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32End,IMG_UINT32 ui32SampleRate)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32CapMode = ui32Mode;
++ psStream->ui32DefaultMode = ui32Mode;
++ psStream->ui32Start = ui32Start;
++ psStream->ui32End = ui32End;
++ psStream->ui32SampleRate = ui32SampleRate;
++
++
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_HOTKEY)
++ {
++ ActivateHotKeys(psStream);
++ }
++}
++
++void IMG_CALLCONV DBGDrivSetOutputMode(PDBG_STREAM psStream,IMG_UINT32 ui32OutMode)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32OutMode = ui32OutMode;
++}
++
++void IMG_CALLCONV DBGDrivSetDebugLevel(PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32DebugLevel = ui32DebugLevel;
++}
++
++void IMG_CALLCONV DBGDrivSetFrame(PDBG_STREAM psStream,IMG_UINT32 ui32Frame)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32Current = ui32Frame;
++
++ if ((ui32Frame >= psStream->ui32Start) &&
++ (ui32Frame <= psStream->ui32End) &&
++ (((ui32Frame - psStream->ui32Start) % psStream->ui32SampleRate) == 0))
++ {
++ psStream->ui32Flags |= DEBUG_FLAGS_ENABLESAMPLE;
++ }
++ else
++ {
++ psStream->ui32Flags &= ~DEBUG_FLAGS_ENABLESAMPLE;
++ }
++
++ if (g_bHotkeyMiddump)
++ {
++ if ((ui32Frame >= g_ui32HotkeyMiddumpStart) &&
++ (ui32Frame <= g_ui32HotkeyMiddumpEnd) &&
++ (((ui32Frame - g_ui32HotkeyMiddumpStart) % psStream->ui32SampleRate) == 0))
++ {
++ psStream->ui32Flags |= DEBUG_FLAGS_ENABLESAMPLE;
++ }
++ else
++ {
++ psStream->ui32Flags &= ~DEBUG_FLAGS_ENABLESAMPLE;
++ if (psStream->ui32Current > g_ui32HotkeyMiddumpEnd)
++ {
++ g_bHotkeyMiddump = IMG_FALSE;
++ }
++ }
++ }
++
++
++ if (g_bHotKeyRegistered)
++ {
++ g_bHotKeyRegistered = IMG_FALSE;
++
++ PVR_DPF((PVR_DBG_MESSAGE,"Hotkey pressed (%08x)!\n",psStream));
++
++ if (!g_bHotKeyPressed)
++ {
++
++
++ g_ui32HotKeyFrame = psStream->ui32Current + 2;
++
++
++
++ g_bHotKeyPressed = IMG_TRUE;
++ }
++
++
++
++ if (((psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) != 0) &&
++ ((psStream->ui32CapMode & DEBUG_CAPMODE_HOTKEY) != 0))
++ {
++ if (!g_bHotkeyMiddump)
++ {
++
++ g_ui32HotkeyMiddumpStart = g_ui32HotKeyFrame + 1;
++ g_ui32HotkeyMiddumpEnd = 0xffffffff;
++ g_bHotkeyMiddump = IMG_TRUE;
++ PVR_DPF((PVR_DBG_MESSAGE,"Sampling every %d frame(s)\n", psStream->ui32SampleRate));
++ }
++ else
++ {
++
++ g_ui32HotkeyMiddumpEnd = g_ui32HotKeyFrame;
++ PVR_DPF((PVR_DBG_MESSAGE,"Turning off sampling\n"));
++ }
++ }
++
++ }
++
++
++
++ if (psStream->ui32Current > g_ui32HotKeyFrame)
++ {
++ g_bHotKeyPressed = IMG_FALSE;
++ }
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(PDBG_STREAM psStream)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0);
++ }
++
++ return(psStream->ui32Current);
++}
++
++IMG_BOOL IMG_CALLCONV DBGDrivIsLastCaptureFrame(PDBG_STREAM psStream)
++{
++ IMG_UINT32 ui32NextFrame;
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return IMG_FALSE;
++ }
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED)
++ {
++ ui32NextFrame = psStream->ui32Current + psStream->ui32SampleRate;
++ if (ui32NextFrame > psStream->ui32End)
++ {
++ return IMG_TRUE;
++ }
++ }
++ return IMG_FALSE;
++}
++
++IMG_BOOL IMG_CALLCONV DBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame)
++{
++ IMG_UINT32 ui32FrameShift = bCheckPreviousFrame ? 1UL : 0UL;
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return IMG_FALSE;
++ }
++
++ if (psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED)
++ {
++
++ if (g_bHotkeyMiddump)
++ {
++ if ((psStream->ui32Current >= (g_ui32HotkeyMiddumpStart - ui32FrameShift)) &&
++ (psStream->ui32Current <= (g_ui32HotkeyMiddumpEnd - ui32FrameShift)) &&
++ ((((psStream->ui32Current + ui32FrameShift) - g_ui32HotkeyMiddumpStart) % psStream->ui32SampleRate) == 0))
++ {
++ return IMG_TRUE;
++ }
++ }
++ else
++ {
++ if ((psStream->ui32Current >= (psStream->ui32Start - ui32FrameShift)) &&
++ (psStream->ui32Current <= (psStream->ui32End - ui32FrameShift)) &&
++ ((((psStream->ui32Current + ui32FrameShift) - psStream->ui32Start) % psStream->ui32SampleRate) == 0))
++ {
++ return IMG_TRUE;
++ }
++ }
++ }
++ else if (psStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
++ {
++ if ((psStream->ui32Current == (g_ui32HotKeyFrame-ui32FrameShift)) && (g_bHotKeyPressed))
++ {
++ return IMG_TRUE;
++ }
++ }
++ return IMG_FALSE;
++}
++
++void IMG_CALLCONV DBGDrivOverrideMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32CapMode = ui32Mode;
++}
++
++void IMG_CALLCONV DBGDrivDefaultMode(PDBG_STREAM psStream)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32CapMode = psStream->ui32DefaultMode;
++}
++
++void IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return;
++ }
++
++ psStream->ui32Marker = ui32Marker;
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream)
++{
++
++
++ if (!StreamValid(psStream))
++ {
++ return 0;
++ }
++
++ return psStream->ui32Marker;
++}
++
++
++IMG_UINT32 IMG_CALLCONV DBGDrivGetStreamOffset(PDBG_STREAM psMainStream)
++{
++ PDBG_STREAM psStream;
++
++
++
++ if (!StreamValid(psMainStream))
++ {
++ return 0;
++ }
++
++ if(psMainStream->bInitPhaseComplete)
++ {
++ psStream = psMainStream;
++ }
++ else
++ {
++ psStream = psMainStream->psInitStream;
++ }
++
++ return psStream->ui32DataWritten;
++}
++
++IMG_VOID IMG_CALLCONV DBGDrivSetStreamOffset(PDBG_STREAM psMainStream, IMG_UINT32 ui32StreamOffset)
++{
++ PDBG_STREAM psStream;
++
++
++
++ if (!StreamValid(psMainStream))
++ {
++ return;
++ }
++
++ if(psMainStream->bInitPhaseComplete)
++ {
++ psStream = psMainStream;
++ }
++ else
++ {
++ psStream = psMainStream->psInitStream;
++ }
++
++ psStream->ui32DataWritten = ui32StreamOffset;
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivGetServiceTable(void)
++{
++ return((IMG_UINT32) &g_sDBGKMServices);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 * pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags)
++{
++ PDBG_LASTFRAME_BUFFER psLFBuffer;
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if ((psStream->ui32DebugLevel & ui32Level) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++
++
++
++ if ((psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) != 0)
++ {
++ if ((psStream->ui32Flags & DEBUG_FLAGS_ENABLESAMPLE) == 0)
++ {
++ return(0xFFFFFFFFUL);
++ }
++ }
++ else if (psStream->ui32CapMode == DEBUG_CAPMODE_HOTKEY)
++ {
++ if ((psStream->ui32Current != g_ui32HotKeyFrame) || (g_bHotKeyPressed == IMG_FALSE))
++ return(0xFFFFFFFFUL);
++ }
++
++ psLFBuffer = FindLFBuf(psStream);
++
++ if (ui32Flags & WRITELF_FLAGS_RESETBUF)
++ {
++
++
++ ui32InBuffSize = (ui32InBuffSize > LAST_FRAME_BUF_SIZE) ? LAST_FRAME_BUF_SIZE : ui32InBuffSize;
++ HostMemCopy((IMG_VOID *)psLFBuffer->ui8Buffer, (IMG_VOID *)pui8InBuf, ui32InBuffSize);
++ psLFBuffer->ui32BufLen = ui32InBuffSize;
++ }
++ else
++ {
++
++
++ ui32InBuffSize = ((psLFBuffer->ui32BufLen + ui32InBuffSize) > LAST_FRAME_BUF_SIZE) ? (LAST_FRAME_BUF_SIZE - psLFBuffer->ui32BufLen) : ui32InBuffSize;
++ HostMemCopy((IMG_VOID *)(&psLFBuffer->ui8Buffer[psLFBuffer->ui32BufLen]), (IMG_VOID *)pui8InBuf, ui32InBuffSize);
++ psLFBuffer->ui32BufLen += ui32InBuffSize;
++ }
++
++ return(ui32InBuffSize);
++}
++
++IMG_UINT32 IMG_CALLCONV DBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 * pui8OutBuf)
++{
++ PDBG_LASTFRAME_BUFFER psLFBuffer;
++ IMG_UINT32 ui32Data;
++
++
++
++ if (!StreamValid(psStream))
++ {
++ return(0);
++ }
++
++ psLFBuffer = FindLFBuf(psStream);
++
++
++
++ ui32Data = (ui32OutBuffSize < psLFBuffer->ui32BufLen) ? ui32OutBuffSize : psLFBuffer->ui32BufLen;
++
++
++
++ HostMemCopy((IMG_VOID *)pui8OutBuf, (IMG_VOID *)psLFBuffer->ui8Buffer, ui32Data);
++
++ return ui32Data;
++}
++
++IMG_VOID IMG_CALLCONV DBGDrivStartInitPhase(PDBG_STREAM psStream)
++{
++ psStream->bInitPhaseComplete = IMG_FALSE;
++}
++
++IMG_VOID IMG_CALLCONV DBGDrivStopInitPhase(PDBG_STREAM psStream)
++{
++ psStream->bInitPhaseComplete = IMG_TRUE;
++}
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++IMG_VOID IMG_CALLCONV DBGDrivWaitForEvent(DBG_EVENT eEvent)
++{
++ HostWaitForEvent(eEvent);
++}
++#endif
++
++IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize)
++{
++ IMG_VOID * pvNewBuf;
++ IMG_UINT32 ui32NewSizeInPages;
++ IMG_UINT32 ui32NewWOffset;
++ IMG_UINT32 ui32SpaceInOldBuf;
++
++
++
++ if (psStream->ui32Size >= ui32NewSize)
++ {
++ return IMG_FALSE;
++ }
++
++
++
++ ui32SpaceInOldBuf = SpaceInStream(psStream);
++
++
++
++ ui32NewSizeInPages = ((ui32NewSize + 0xfffUL) & ~0xfffUL) / 4096UL;
++
++ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ {
++ pvNewBuf = HostNonPageablePageAlloc(ui32NewSizeInPages);
++ }
++ else
++ {
++ pvNewBuf = HostPageablePageAlloc(ui32NewSizeInPages);
++ }
++
++ if (pvNewBuf == IMG_NULL)
++ {
++ return IMG_FALSE;
++ }
++
++
++
++
++ if (psStream->ui32RPtr <= psStream->ui32WPtr)
++ {
++
++
++ HostMemCopy((IMG_VOID *)pvNewBuf, (IMG_VOID *)(psStream->ui32Base + psStream->ui32RPtr), psStream->ui32WPtr - psStream->ui32RPtr);
++ }
++ else
++ {
++ IMG_UINT32 ui32FirstCopySize;
++
++
++
++ ui32FirstCopySize = psStream->ui32Size - psStream->ui32RPtr;
++
++ HostMemCopy((IMG_VOID *)pvNewBuf, (IMG_VOID *)(psStream->ui32Base + psStream->ui32RPtr), ui32FirstCopySize);
++
++
++
++ HostMemCopy((IMG_VOID *)((IMG_UINT32)pvNewBuf + ui32FirstCopySize), (IMG_VOID *)psStream->ui32Base, psStream->ui32WPtr);
++ }
++
++
++
++ ui32NewWOffset = psStream->ui32Size - ui32SpaceInOldBuf;
++
++
++
++ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
++ {
++ HostNonPageablePageFree((IMG_VOID *)psStream->ui32Base);
++ }
++ else
++ {
++ HostPageablePageFree((IMG_VOID *)psStream->ui32Base);
++ }
++
++
++
++ psStream->ui32Base = (IMG_UINT32)pvNewBuf;
++ psStream->ui32RPtr = 0;
++ psStream->ui32WPtr = ui32NewWOffset;
++ psStream->ui32Size = ui32NewSizeInPages * 4096;
++
++ return IMG_TRUE;
++}
++
++IMG_UINT32 SpaceInStream(PDBG_STREAM psStream)
++{
++ IMG_UINT32 ui32Space;
++
++ if (psStream->ui32RPtr > psStream->ui32WPtr)
++ {
++ ui32Space = psStream->ui32RPtr - psStream->ui32WPtr;
++ }
++ else
++ {
++ ui32Space = psStream->ui32RPtr + (psStream->ui32Size - psStream->ui32WPtr);
++ }
++
++ return ui32Space;
++}
++
++
++void DestroyAllStreams(void)
++{
++ while (g_psStreamList != IMG_NULL)
++ {
++ DBGDrivDestroyStream(g_psStreamList);
++ }
++ return;
++}
++
++PDBG_LASTFRAME_BUFFER FindLFBuf(PDBG_STREAM psStream)
++{
++ PDBG_LASTFRAME_BUFFER psLFBuffer;
++
++ psLFBuffer = g_psLFBufferList;
++
++ while (psLFBuffer)
++ {
++ if (psLFBuffer->psStream == psStream)
++ {
++ break;
++ }
++
++ psLFBuffer = psLFBuffer->psNext;
++ }
++
++ return psLFBuffer;
++}
++
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/dbgdriv.h b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/dbgdriv.h
+new file mode 100644
+index 0000000..1c9b1c5
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/dbgdriv.h
+@@ -0,0 +1,116 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _DBGDRIV_
++#define _DBGDRIV_
++
++#define BUFFER_SIZE 64*PAGESIZE
++
++#define DBGDRIV_VERSION 0x100
++#define MAX_PROCESSES 2
++#define BLOCK_USED 0x01
++#define BLOCK_LOCKED 0x02
++#define DBGDRIV_MONOBASE 0x000B0000
++
++
++extern IMG_VOID * g_pvAPIMutex;
++
++IMG_VOID * IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR * pszName,
++ IMG_UINT32 ui32CapMode,
++ IMG_UINT32 ui32OutMode,
++ IMG_UINT32 ui32Flags,
++ IMG_UINT32 ui32Pages);
++IMG_VOID IMG_CALLCONV DBGDrivDestroyStream(PDBG_STREAM psStream);
++IMG_VOID * IMG_CALLCONV DBGDrivFindStream(IMG_CHAR *pszName, IMG_BOOL bResetStream);
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteString(PDBG_STREAM psStream, IMG_CHAR *pszString, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV DBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR *pszString, IMG_UINT32 ui32Limit);
++IMG_UINT32 IMG_CALLCONV DBGDrivWrite(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBufferSize, IMG_UINT8 *pui8OutBuf);
++IMG_VOID IMG_CALLCONV DBGDrivSetCaptureMode(PDBG_STREAM psStream, IMG_UINT32 ui32Mode, IMG_UINT32 ui32Start, IMG_UINT32 ui32Stop, IMG_UINT32 ui32SampleRate);
++IMG_VOID IMG_CALLCONV DBGDrivSetOutputMode(PDBG_STREAM psStream, IMG_UINT32 ui32OutMode);
++IMG_VOID IMG_CALLCONV DBGDrivSetDebugLevel(PDBG_STREAM psStream, IMG_UINT32 ui32DebugLevel);
++IMG_VOID IMG_CALLCONV DBGDrivSetFrame(PDBG_STREAM psStream, IMG_UINT32 ui32Frame);
++IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV DBGDrivOverrideMode(PDBG_STREAM psStream, IMG_UINT32 ui32Mode);
++IMG_VOID IMG_CALLCONV DBGDrivDefaultMode(PDBG_STREAM psStream);
++IMG_UINT32 IMG_CALLCONV DBGDrivGetServiceTable(IMG_VOID);
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteStringCM(PDBG_STREAM psStream, IMG_CHAR *pszString, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++IMG_VOID IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
++IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream);
++IMG_BOOL IMG_CALLCONV DBGDrivIsLastCaptureFrame(PDBG_STREAM psStream);
++IMG_BOOL IMG_CALLCONV DBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame);
++IMG_UINT32 IMG_CALLCONV DBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags);
++IMG_UINT32 IMG_CALLCONV DBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 *pui8OutBuf);
++IMG_VOID IMG_CALLCONV DBGDrivStartInitPhase(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV DBGDrivStopInitPhase(PDBG_STREAM psStream);
++IMG_UINT32 IMG_CALLCONV DBGDrivGetStreamOffset(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV DBGDrivSetStreamOffset(PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset);
++IMG_VOID IMG_CALLCONV DBGDrivWaitForEvent(DBG_EVENT eEvent);
++
++IMG_VOID DestroyAllStreams(IMG_VOID);
++
++IMG_UINT32 AtoI(IMG_CHAR *szIn);
++
++IMG_VOID HostMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size);
++IMG_VOID HostMemCopy(IMG_VOID *pvDest, IMG_VOID *pvSrc, IMG_UINT32 ui32Size);
++IMG_BOOL StreamValid(PDBG_STREAM psStream);
++IMG_VOID Write(PDBG_STREAM psStream,IMG_UINT8 *pui8Data, IMG_UINT32 ui32InBuffSize);
++IMG_VOID MonoOut(IMG_CHAR *pszString, IMG_BOOL bNewLine);
++
++
++IMG_VOID * IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR *pszName, IMG_UINT32 ui32CapMode, IMG_UINT32 ui32OutMode, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size);
++IMG_VOID IMG_CALLCONV ExtDBGDrivDestroyStream(PDBG_STREAM psStream);
++IMG_VOID * IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR *pszName, IMG_BOOL bResetStream);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteString(PDBG_STREAM psStream,IMG_CHAR *pszString, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadString(PDBG_STREAM psStream,IMG_CHAR *pszString, IMG_UINT32 ui32Limit);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite(PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 *pui8OutBuf);
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetCaptureMode(PDBG_STREAM psStream, IMG_UINT32 ui32Mode, IMG_UINT32 ui32Start, IMG_UINT32 ui32End, IMG_UINT32 ui32SampleRate);
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetOutputMode(PDBG_STREAM psStream, IMG_UINT32 ui32OutMode);
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetDebugLevel(PDBG_STREAM psStream, IMG_UINT32 ui32DebugLevel);
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetFrame(PDBG_STREAM psStream, IMG_UINT32 ui32Frame);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV ExtDBGDrivOverrideMode(PDBG_STREAM psStream, IMG_UINT32 ui32Mode);
++IMG_VOID IMG_CALLCONV ExtDBGDrivDefaultMode(PDBG_STREAM psStream);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteStringCM(PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteCM(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV ExtDBGDrivStartInitPhase(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV ExtDBGDrivStopInitPhase(PDBG_STREAM psStream);
++IMG_BOOL IMG_CALLCONV ExtDBGDrivIsLastCaptureFrame(PDBG_STREAM psStream);
++IMG_BOOL IMG_CALLCONV ExtDBGDrivIsCaptureFrame(PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivWriteLF(PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivReadLF(PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 *pui8OutBuf);
++IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetStreamOffset(PDBG_STREAM psStream);
++IMG_VOID IMG_CALLCONV ExtDBGDrivSetStreamOffset(PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset);
++IMG_VOID IMG_CALLCONV ExtDBGDrivWaitForEvent(DBG_EVENT eEvent);
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hostfunc.h b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hostfunc.h
+new file mode 100644
+index 0000000..3a29db6
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hostfunc.h
+@@ -0,0 +1,58 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _HOSTFUNC_
++#define _HOSTFUNC_
++
++#define HOST_PAGESIZE (4096)
++#define DBG_MEMORY_INITIALIZER (0xe2)
++
++IMG_UINT32 HostReadRegistryDWORDFromString(IMG_CHAR *pcKey, IMG_CHAR *pcValueName, IMG_UINT32 *pui32Data);
++
++IMG_VOID * HostPageablePageAlloc(IMG_UINT32 ui32Pages);
++IMG_VOID HostPageablePageFree(IMG_VOID * pvBase);
++IMG_VOID * HostNonPageablePageAlloc(IMG_UINT32 ui32Pages);
++IMG_VOID HostNonPageablePageFree(IMG_VOID * pvBase);
++
++IMG_VOID * HostMapKrnBufIntoUser(IMG_VOID * pvKrnAddr, IMG_UINT32 ui32Size, IMG_VOID * *ppvMdl);
++IMG_VOID HostUnMapKrnBufFromUser(IMG_VOID * pvUserAddr, IMG_VOID * pvMdl, IMG_VOID * pvProcess);
++
++IMG_VOID HostCreateRegDeclStreams(IMG_VOID);
++
++IMG_VOID * HostCreateMutex(IMG_VOID);
++IMG_VOID HostAquireMutex(IMG_VOID * pvMutex);
++IMG_VOID HostReleaseMutex(IMG_VOID * pvMutex);
++IMG_VOID HostDestroyMutex(IMG_VOID * pvMutex);
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++IMG_INT32 HostCreateEventObjects(IMG_VOID);
++IMG_VOID HostWaitForEvent(DBG_EVENT eEvent);
++IMG_VOID HostSignalEvent(DBG_EVENT eEvent);
++IMG_VOID HostDestroyEventObjects(IMG_VOID);
++#endif
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hotkey.c b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hotkey.c
+new file mode 100644
+index 0000000..1997ad0
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hotkey.c
+@@ -0,0 +1,135 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++#if !defined(LINUX)
++#include <ntddk.h>
++#include <windef.h>
++#endif
++
++#include "img_types.h"
++#include "pvr_debug.h"
++#include "dbgdrvif.h"
++#include "dbgdriv.h"
++#include "hotkey.h"
++#include "hostfunc.h"
++
++
++
++
++
++IMG_UINT32 g_ui32HotKeyFrame = 0xFFFFFFFF;
++IMG_BOOL g_bHotKeyPressed = IMG_FALSE;
++IMG_BOOL g_bHotKeyRegistered = IMG_FALSE;
++
++PRIVATEHOTKEYDATA g_PrivateHotKeyData;
++
++
++IMG_VOID ReadInHotKeys(IMG_VOID)
++{
++ g_PrivateHotKeyData.ui32ScanCode = 0x58;
++ g_PrivateHotKeyData.ui32ShiftState = 0x0;
++
++
++
++#if 0
++ if (_RegOpenKey(HKEY_LOCAL_MACHINE,pszRegPath,&hKey) == ERROR_SUCCESS)
++ {
++
++
++ QueryReg(hKey,"ui32ScanCode",&g_PrivateHotKeyData.ui32ScanCode);
++ QueryReg(hKey,"ui32ShiftState",&g_PrivateHotKeyData.ui32ShiftState);
++ }
++#else
++ HostReadRegistryDWORDFromString("DEBUG\\Streams", "ui32ScanCode" , &g_PrivateHotKeyData.ui32ScanCode);
++ HostReadRegistryDWORDFromString("DEBUG\\Streams", "ui32ShiftState", &g_PrivateHotKeyData.ui32ShiftState);
++#endif
++}
++
++IMG_VOID RegisterKeyPressed(IMG_UINT32 dwui32ScanCode, PHOTKEYINFO pInfo)
++{
++ PDBG_STREAM psStream;
++
++ PVR_UNREFERENCED_PARAMETER(pInfo);
++
++ if (dwui32ScanCode == g_PrivateHotKeyData.ui32ScanCode)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"PDUMP Hotkey pressed !\n"));
++
++ psStream = (PDBG_STREAM) g_PrivateHotKeyData.sHotKeyInfo.pvStream;
++
++ if (!g_bHotKeyPressed)
++ {
++
++
++ g_ui32HotKeyFrame = psStream->ui32Current + 2;
++
++
++
++ g_bHotKeyPressed = IMG_TRUE;
++ }
++ }
++}
++
++IMG_VOID ActivateHotKeys(PDBG_STREAM psStream)
++{
++
++
++ ReadInHotKeys();
++
++
++
++ if (!g_PrivateHotKeyData.sHotKeyInfo.hHotKey)
++ {
++ if (g_PrivateHotKeyData.ui32ScanCode != 0)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"Activate HotKey for PDUMP.\n"));
++
++
++
++ g_PrivateHotKeyData.sHotKeyInfo.pvStream = psStream;
++
++ DefineHotKey(g_PrivateHotKeyData.ui32ScanCode, g_PrivateHotKeyData.ui32ShiftState, &g_PrivateHotKeyData.sHotKeyInfo);
++ }
++ else
++ {
++ g_PrivateHotKeyData.sHotKeyInfo.hHotKey = 0;
++ }
++ }
++}
++
++IMG_VOID DeactivateHotKeys(IMG_VOID)
++{
++ if (g_PrivateHotKeyData.sHotKeyInfo.hHotKey != 0)
++ {
++ PVR_DPF((PVR_DBG_MESSAGE,"Deactivate HotKey.\n"));
++
++ RemoveHotKey(g_PrivateHotKeyData.sHotKeyInfo.hHotKey);
++ g_PrivateHotKeyData.sHotKeyInfo.hHotKey = 0;
++ }
++}
++
++
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hotkey.h b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hotkey.h
+new file mode 100644
+index 0000000..d9c9458
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/hotkey.h
+@@ -0,0 +1,60 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _HOTKEY_
++#define _HOTKEY_
++
++
++typedef struct _hotkeyinfo
++{
++ IMG_UINT8 ui8ScanCode;
++ IMG_UINT8 ui8Type;
++ IMG_UINT8 ui8Flag;
++ IMG_UINT8 ui8Filler1;
++ IMG_UINT32 ui32ShiftState;
++ IMG_UINT32 ui32HotKeyProc;
++ IMG_VOID *pvStream;
++ IMG_UINT32 hHotKey;
++} HOTKEYINFO, *PHOTKEYINFO;
++
++typedef struct _privatehotkeydata
++{
++ IMG_UINT32 ui32ScanCode;
++ IMG_UINT32 ui32ShiftState;
++ HOTKEYINFO sHotKeyInfo;
++} PRIVATEHOTKEYDATA, *PPRIVATEHOTKEYDATA;
++
++
++IMG_VOID ReadInHotKeys (IMG_VOID);
++IMG_VOID ActivateHotKeys(PDBG_STREAM psStream);
++IMG_VOID DeactivateHotKeys(IMG_VOID);
++
++IMG_VOID RemoveHotKey (IMG_UINT32 hHotKey);
++IMG_VOID DefineHotKey (IMG_UINT32 ui32ScanCode, IMG_UINT32 ui32ShiftState, PHOTKEYINFO psInfo);
++IMG_VOID RegisterKeyPressed (IMG_UINT32 ui32ScanCode, PHOTKEYINFO psInfo);
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/ioctl.c b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/ioctl.c
+new file mode 100644
+index 0000000..a624635
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/ioctl.c
+@@ -0,0 +1,371 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++
++
++#ifdef LINUX
++#include <asm/uaccess.h>
++#endif
++
++#include "img_types.h"
++#include "dbgdrvif.h"
++#include "dbgdriv.h"
++#include "hotkey.h"
++
++
++IMG_UINT32 DBGDIOCDrivCreateStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_CREATESTREAM psIn;
++ IMG_VOID * *ppvOut;
++ #ifdef LINUX
++ static IMG_CHAR name[32];
++ #endif
++
++ psIn = (PDBG_IN_CREATESTREAM) pvInBuffer;
++ ppvOut = (IMG_VOID * *) pvOutBuffer;
++
++ #ifdef LINUX
++
++ if(copy_from_user(name, psIn->pszName, 32) != 0)
++ {
++ return IMG_FALSE;
++ }
++
++ *ppvOut = ExtDBGDrivCreateStream(name, psIn->ui32CapMode, psIn->ui32OutMode, 0, psIn->ui32Pages);
++
++ #else
++ *ppvOut = ExtDBGDrivCreateStream(psIn->pszName, psIn->ui32CapMode, psIn->ui32OutMode, DEBUG_FLAGS_NO_BUF_EXPANDSION, psIn->ui32Pages);
++ #endif
++
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivDestroyStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pStream;
++ PDBG_STREAM psStream;
++
++ pStream = (IMG_UINT32 *) pvInBuffer;
++ psStream = (PDBG_STREAM) *pStream;
++
++ PVR_UNREFERENCED_PARAMETER( pvOutBuffer);
++
++ ExtDBGDrivDestroyStream(psStream);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivGetStream(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_FINDSTREAM psParams;
++ IMG_UINT32 * pui32Stream;
++
++ psParams = (PDBG_IN_FINDSTREAM)pvInBuffer;
++ pui32Stream = (IMG_UINT32 *)pvOutBuffer;
++
++ *pui32Stream = (IMG_UINT32)ExtDBGDrivFindStream(psParams->pszName, psParams->bResetStream);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWriteString(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_WRITESTRING psParams;
++ IMG_UINT32 * pui32OutLen;
++
++ psParams = (PDBG_IN_WRITESTRING) pvInBuffer;
++ pui32OutLen = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32OutLen = ExtDBGDrivWriteString((PDBG_STREAM) psParams->pvStream,psParams->pszString,psParams->ui32Level);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWriteStringCM(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_WRITESTRING psParams;
++ IMG_UINT32 * pui32OutLen;
++
++ psParams = (PDBG_IN_WRITESTRING) pvInBuffer;
++ pui32OutLen = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32OutLen = ExtDBGDrivWriteStringCM((PDBG_STREAM) psParams->pvStream,psParams->pszString,psParams->ui32Level);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivReadString(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32OutLen;
++ PDBG_IN_READSTRING psParams;
++
++ psParams = (PDBG_IN_READSTRING) pvInBuffer;
++ pui32OutLen = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32OutLen = ExtDBGDrivReadString(psParams->pvStream,psParams->pszString,psParams->ui32StringLen);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWrite(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32BytesCopied;
++ PDBG_IN_WRITE psInParams;
++
++ psInParams = (PDBG_IN_WRITE) pvInBuffer;
++ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32BytesCopied = ExtDBGDrivWrite((PDBG_STREAM) psInParams->pvStream,psInParams->pui8InBuffer,psInParams->ui32TransferSize,psInParams->ui32Level);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWrite2(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32BytesCopied;
++ PDBG_IN_WRITE psInParams;
++
++ psInParams = (PDBG_IN_WRITE) pvInBuffer;
++ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32BytesCopied = ExtDBGDrivWrite2((PDBG_STREAM) psInParams->pvStream,psInParams->pui8InBuffer,psInParams->ui32TransferSize,psInParams->ui32Level);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWriteCM(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32BytesCopied;
++ PDBG_IN_WRITE psInParams;
++
++ psInParams = (PDBG_IN_WRITE) pvInBuffer;
++ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32BytesCopied = ExtDBGDrivWriteCM((PDBG_STREAM) psInParams->pvStream,psInParams->pui8InBuffer,psInParams->ui32TransferSize,psInParams->ui32Level);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivRead(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32BytesCopied;
++ PDBG_IN_READ psInParams;
++
++ psInParams = (PDBG_IN_READ) pvInBuffer;
++ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32BytesCopied = ExtDBGDrivRead((PDBG_STREAM) psInParams->pvStream,psInParams->bReadInitBuffer, psInParams->ui32OutBufferSize,psInParams->pui8OutBuffer);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivSetCaptureMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_SETDEBUGMODE psParams;
++
++ psParams = (PDBG_IN_SETDEBUGMODE) pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivSetCaptureMode((PDBG_STREAM) psParams->pvStream,
++ psParams->ui32Mode,
++ psParams->ui32Start,
++ psParams->ui32End,
++ psParams->ui32SampleRate);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivSetOutMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_SETDEBUGOUTMODE psParams;
++
++ psParams = (PDBG_IN_SETDEBUGOUTMODE) pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivSetOutputMode((PDBG_STREAM) psParams->pvStream,psParams->ui32Mode);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivSetDebugLevel(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_SETDEBUGLEVEL psParams;
++
++ psParams = (PDBG_IN_SETDEBUGLEVEL) pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivSetDebugLevel((PDBG_STREAM) psParams->pvStream,psParams->ui32Level);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivSetFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_SETFRAME psParams;
++
++ psParams = (PDBG_IN_SETFRAME) pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivSetFrame((PDBG_STREAM) psParams->pvStream,psParams->ui32Frame);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivGetFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pStream;
++ PDBG_STREAM psStream;
++ IMG_UINT32 * pui32Current;
++
++ pStream = (IMG_UINT32 *) pvInBuffer;
++ psStream = (PDBG_STREAM) *pStream;
++ pui32Current = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32Current = ExtDBGDrivGetFrame(psStream);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivIsCaptureFrame(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_ISCAPTUREFRAME psParams;
++ IMG_UINT32 * pui32Current;
++
++ psParams = (PDBG_IN_ISCAPTUREFRAME) pvInBuffer;
++ pui32Current = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32Current = ExtDBGDrivIsCaptureFrame((PDBG_STREAM) psParams->pvStream, psParams->bCheckPreviousFrame);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivOverrideMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_OVERRIDEMODE psParams;
++
++ psParams = (PDBG_IN_OVERRIDEMODE) pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER( pvOutBuffer);
++
++ ExtDBGDrivOverrideMode((PDBG_STREAM) psParams->pvStream,psParams->ui32Mode);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivDefaultMode(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pStream;
++ PDBG_STREAM psStream;
++
++ pStream = (IMG_UINT32 *) pvInBuffer;
++ psStream = (PDBG_STREAM) *pStream;
++
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivDefaultMode(psStream);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivSetMarker(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_SETMARKER psParams;
++
++ psParams = (PDBG_IN_SETMARKER) pvInBuffer;
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivSetMarker((PDBG_STREAM) psParams->pvStream, psParams->ui32Marker);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivGetMarker(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pStream;
++ PDBG_STREAM psStream;
++ IMG_UINT32 * pui32Current;
++
++ pStream = (IMG_UINT32 *) pvInBuffer;
++ psStream = (PDBG_STREAM) *pStream;
++ pui32Current = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32Current = ExtDBGDrivGetMarker(psStream);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivGetServiceTable(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32Out;
++
++ PVR_UNREFERENCED_PARAMETER(pvInBuffer);
++ pui32Out = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32Out = DBGDrivGetServiceTable();
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWriteLF(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ PDBG_IN_WRITE_LF psInParams;
++ IMG_UINT32 * pui32BytesCopied;
++
++ psInParams = (PDBG_IN_WRITE_LF) pvInBuffer;
++ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32BytesCopied = ExtDBGDrivWriteLF(psInParams->pvStream,
++ psInParams->pui8InBuffer,
++ psInParams->ui32BufferSize,
++ psInParams->ui32Level,
++ psInParams->ui32Flags);
++
++ return IMG_TRUE;
++}
++
++IMG_UINT32 DBGDIOCDrivReadLF(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ IMG_UINT32 * pui32BytesCopied;
++ PDBG_IN_READ psInParams;
++
++ psInParams = (PDBG_IN_READ) pvInBuffer;
++ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
++
++ *pui32BytesCopied = ExtDBGDrivReadLF((PDBG_STREAM) psInParams->pvStream,psInParams->ui32OutBufferSize,psInParams->pui8OutBuffer);
++
++ return(IMG_TRUE);
++}
++
++IMG_UINT32 DBGDIOCDrivWaitForEvent(IMG_VOID * pvInBuffer, IMG_VOID * pvOutBuffer)
++{
++ DBG_EVENT eEvent = (DBG_EVENT)(*(IMG_UINT32 *)pvInBuffer);
++
++ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
++
++ ExtDBGDrivWaitForEvent(eEvent);
++
++ return(IMG_TRUE);
++}
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/ioctl.h b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/ioctl.h
+new file mode 100644
+index 0000000..061be9a
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/common/ioctl.h
+@@ -0,0 +1,87 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#ifndef _IOCTL_
++#define _IOCTL_
++
++
++IMG_UINT32 DBGDIOCDrivCreateStream(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivDestroyStream(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivGetStream(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWriteString(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivReadString(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWrite(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWrite2(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivRead(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivSetCaptureMode(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivSetOutMode(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivSetDebugLevel(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivSetFrame(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivGetFrame(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivOverrideMode(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivDefaultMode(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivGetServiceTable(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWriteStringCM(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWriteCM(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivSetMarker(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivGetMarker(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivIsCaptureFrame(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWriteLF(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivReadLF(IMG_VOID *, IMG_VOID *);
++IMG_UINT32 DBGDIOCDrivWaitForEvent(IMG_VOID*, IMG_VOID *);
++
++IMG_UINT32 (*g_DBGDrivProc[])(IMG_VOID *, IMG_VOID *) =
++{
++ DBGDIOCDrivCreateStream,
++ DBGDIOCDrivDestroyStream,
++ DBGDIOCDrivGetStream,
++ DBGDIOCDrivWriteString,
++ DBGDIOCDrivReadString,
++ DBGDIOCDrivWrite,
++ DBGDIOCDrivRead,
++ DBGDIOCDrivSetCaptureMode,
++ DBGDIOCDrivSetOutMode,
++ DBGDIOCDrivSetDebugLevel,
++ DBGDIOCDrivSetFrame,
++ DBGDIOCDrivGetFrame,
++ DBGDIOCDrivOverrideMode,
++ DBGDIOCDrivDefaultMode,
++ DBGDIOCDrivGetServiceTable,
++ DBGDIOCDrivWrite2,
++ DBGDIOCDrivWriteStringCM,
++ DBGDIOCDrivWriteCM,
++ DBGDIOCDrivSetMarker,
++ DBGDIOCDrivGetMarker,
++ DBGDIOCDrivIsCaptureFrame,
++ DBGDIOCDrivWriteLF,
++ DBGDIOCDrivReadLF,
++ DBGDIOCDrivWaitForEvent
++};
++
++#define MAX_DBGVXD_W32_API (sizeof(g_DBGDrivProc)/sizeof(IMG_UINT32))
++
++#endif
++
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/hostfunc.c b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/hostfunc.c
+new file mode 100644
+index 0000000..3ccec84
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/hostfunc.c
+@@ -0,0 +1,302 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/errno.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <asm/page.h>
++#include <linux/vmalloc.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++#include <linux/mutex.h>
++#else
++#include <asm/semaphore.h>
++#endif
++#include <linux/hardirq.h>
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++#include <linux/sched.h>
++#include <linux/wait.h>
++#include <linux/jiffies.h>
++#include <linux/delay.h>
++#endif
++
++#include "img_types.h"
++#include "pvr_debug.h"
++
++#include "dbgdrvif.h"
++#include "dbgdriv/common/hostfunc.h"
++
++#if !defined(SUPPORT_DRI_DRM)
++IMG_UINT32 gPVRDebugLevel = DBGPRIV_WARNING;
++
++#define PVR_STRING_TERMINATOR '\0'
++#define PVR_IS_FILE_SEPARATOR(character) ( ((character) == '\\') || ((character) == '/') )
++
++void PVRSRVDebugPrintf (
++ IMG_UINT32 ui32DebugLevel,
++ const IMG_CHAR* pszFileName,
++ IMG_UINT32 ui32Line,
++ const IMG_CHAR* pszFormat,
++ ...
++ )
++{
++ IMG_BOOL bTrace, bDebug;
++#if !defined(__sh__)
++ IMG_CHAR *pszLeafName;
++
++ pszLeafName = (char *)strrchr (pszFileName, '\\');
++
++ if (pszLeafName)
++ {
++ pszFileName = pszLeafName;
++ }
++#endif
++
++ bTrace = gPVRDebugLevel & ui32DebugLevel & DBGPRIV_CALLTRACE;
++ bDebug = ((gPVRDebugLevel & DBGPRIV_ALLLEVELS) >= ui32DebugLevel);
++
++ if (bTrace || bDebug)
++ {
++ va_list vaArgs;
++ static char szBuffer[256];
++
++ va_start (vaArgs, pszFormat);
++
++
++ if (bDebug)
++ {
++ switch(ui32DebugLevel)
++ {
++ case DBGPRIV_FATAL:
++ {
++ strncpy (szBuffer, "PVR_K:(Fatal): ", sizeof(szBuffer));
++ break;
++ }
++ case DBGPRIV_ERROR:
++ {
++ strncpy (szBuffer, "PVR_K:(Error): ", sizeof(szBuffer));
++ break;
++ }
++ case DBGPRIV_WARNING:
++ {
++ strncpy (szBuffer, "PVR_K:(Warning): ", sizeof(szBuffer));
++ break;
++ }
++ case DBGPRIV_MESSAGE:
++ {
++ strncpy (szBuffer, "PVR_K:(Message): ", sizeof(szBuffer));
++ break;
++ }
++ case DBGPRIV_VERBOSE:
++ {
++ strncpy (szBuffer, "PVR_K:(Verbose): ", sizeof(szBuffer));
++ break;
++ }
++ default:
++ {
++ strncpy (szBuffer, "PVR_K:(Unknown message level)", sizeof(szBuffer));
++ break;
++ }
++ }
++ }
++ else
++ {
++ strncpy (szBuffer, "PVR_K: ", sizeof(szBuffer));
++ }
++
++ vsnprintf (&szBuffer[strlen(szBuffer)], sizeof(szBuffer), pszFormat, vaArgs);
++
++
++
++ if (!bTrace)
++ {
++ snprintf (&szBuffer[strlen(szBuffer)], sizeof(szBuffer), " [%d, %s]", (int)ui32Line, pszFileName);
++ }
++
++ printk(KERN_INFO "%s\r\n", szBuffer);
++
++ va_end (vaArgs);
++ }
++}
++#endif
++
++IMG_VOID HostMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size)
++{
++ memset(pvDest, (int) ui8Value, (size_t) ui32Size);
++}
++
++IMG_VOID HostMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_UINT32 ui32Size)
++{
++#if defined(USE_UNOPTIMISED_MEMCPY)
++ unsigned char *src,*dst;
++ int i;
++
++ src=(unsigned char *)pvSrc;
++ dst=(unsigned char *)pvDst;
++ for(i=0;i<ui32Size;i++)
++ {
++ dst[i]=src[i];
++ }
++#else
++ memcpy(pvDst, pvSrc, ui32Size);
++#endif
++}
++
++IMG_UINT32 HostReadRegistryDWORDFromString(char *pcKey, char *pcValueName, IMG_UINT32 *pui32Data)
++{
++
++ return 0;
++}
++
++IMG_VOID * HostPageablePageAlloc(IMG_UINT32 ui32Pages)
++{
++ return (void*)vmalloc(ui32Pages * PAGE_SIZE);
++}
++
++IMG_VOID HostPageablePageFree(IMG_VOID * pvBase)
++{
++ vfree(pvBase);
++}
++
++IMG_VOID * HostNonPageablePageAlloc(IMG_UINT32 ui32Pages)
++{
++ return (void*)vmalloc(ui32Pages * PAGE_SIZE);
++}
++
++IMG_VOID HostNonPageablePageFree(IMG_VOID * pvBase)
++{
++ vfree(pvBase);
++}
++
++IMG_VOID * HostMapKrnBufIntoUser(IMG_VOID * pvKrnAddr, IMG_UINT32 ui32Size, IMG_VOID **ppvMdl)
++{
++
++ return IMG_NULL;
++}
++
++IMG_VOID HostUnMapKrnBufFromUser(IMG_VOID * pvUserAddr, IMG_VOID * pvMdl, IMG_VOID * pvProcess)
++{
++
++}
++
++IMG_VOID HostCreateRegDeclStreams(IMG_VOID)
++{
++
++}
++
++IMG_VOID * HostCreateMutex(IMG_VOID)
++{
++ struct semaphore *psSem;
++
++ psSem = kmalloc(sizeof(*psSem), GFP_KERNEL);
++ if (psSem)
++ {
++ init_MUTEX(psSem);
++ }
++
++ return psSem;
++}
++
++IMG_VOID HostAquireMutex(IMG_VOID * pvMutex)
++{
++ BUG_ON(in_interrupt());
++
++#if defined(PVR_DEBUG_DBGDRV_DETECT_HOST_MUTEX_COLLISIONS)
++ if (down_trylock((struct semaphore *)pvMutex))
++ {
++ printk(KERN_INFO "HostAquireMutex: Waiting for mutex\n");
++ down((struct semaphore *)pvMutex);
++ }
++#else
++ down((struct semaphore *)pvMutex);
++#endif
++}
++
++IMG_VOID HostReleaseMutex(IMG_VOID * pvMutex)
++{
++ up((struct semaphore *)pvMutex);
++}
++
++IMG_VOID HostDestroyMutex(IMG_VOID * pvMutex)
++{
++ if (pvMutex)
++ {
++ kfree(pvMutex);
++ }
++}
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++
++#define EVENT_WAIT_TIMEOUT_MS 500
++#define EVENT_WAIT_TIMEOUT_JIFFIES (EVENT_WAIT_TIMEOUT_MS * HZ / 1000)
++
++static int iStreamData;
++static wait_queue_head_t sStreamDataEvent;
++
++IMG_INT32 HostCreateEventObjects(IMG_VOID)
++{
++ init_waitqueue_head(&sStreamDataEvent);
++
++ return 0;
++}
++
++IMG_VOID HostWaitForEvent(DBG_EVENT eEvent)
++{
++ switch(eEvent)
++ {
++ case DBG_EVENT_STREAM_DATA:
++
++ wait_event_interruptible_timeout(sStreamDataEvent, iStreamData != 0, EVENT_WAIT_TIMEOUT_JIFFIES);
++ iStreamData = 0;
++ break;
++ default:
++
++ msleep_interruptible(EVENT_WAIT_TIMEOUT_MS);
++ break;
++ }
++}
++
++IMG_VOID HostSignalEvent(DBG_EVENT eEvent)
++{
++ switch(eEvent)
++ {
++ case DBG_EVENT_STREAM_DATA:
++ iStreamData = 1;
++ wake_up_interruptible(&sStreamDataEvent);
++ break;
++ default:
++ break;
++ }
++}
++
++IMG_VOID HostDestroyEventObjects(IMG_VOID)
++{
++}
++#endif
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/kbuild/Makefile b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/kbuild/Makefile
+new file mode 100644
+index 0000000..5fb9b1e
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/kbuild/Makefile
+@@ -0,0 +1,35 @@
++#
++# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++#
++# This program is free software; you can redistribute it and/or modify it
++# under the terms and conditions of the GNU General Public License,
++# version 2, as published by the Free Software Foundation.
++#
++# This program is distributed in the hope it will be useful but, except
++# as otherwise stated in writing, without any warranty; without even the
++# implied warranty of merchantability or fitness for a particular purpose.
++# See the GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License along with
++# this program; if not, write to the Free Software Foundation, Inc.,
++# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++#
++# The full GNU General Public License is included in this distribution in
++# the file called "COPYING".
++#
++# Contact Information:
++# Imagination Technologies Ltd. <gpl-support@imgtec.com>
++# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++#
++#
++#
++
++include $(EURASIAROOT)/eurasiacon/build/linux/kbuild/Makefile.kbuild_subdir_common
++
++MODULE = dbgdrv
++
++INCLUDES =
++
++SOURCES =
++
++include $(EURASIAROOT)/tools/intern/debug/dbgdriv/linux/makefile.linux.common
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/main.c b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/main.c
+new file mode 100644
+index 0000000..b57cc43
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/main.c
+@@ -0,0 +1,298 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful but, except
++ * as otherwise stated in writing, without any warranty; without even the
++ * implied warranty of merchantability or fitness for a particular purpose.
++ * See the GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++ *
++ ******************************************************************************/
++
++#include <linux/errno.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/kdev_t.h>
++#include <linux/pci.h>
++#include <linux/list.h>
++#include <linux/init.h>
++#include <linux/vmalloc.h>
++#include <linux/version.h>
++
++#if defined(LDM_PLATFORM) && !defined(SUPPORT_DRI_DRM)
++#include <linux/platform_device.h>
++#endif
++
++#if defined(LDM_PCI) && !defined(SUPPORT_DRI_DRM)
++#include <linux/pci.h>
++#endif
++
++#include <asm/uaccess.h>
++
++#if defined(SUPPORT_DRI_DRM)
++#include "drmP.h"
++#include "drm.h"
++#endif
++
++#include "img_types.h"
++#include "client/linuxsrv.h"
++#include "dbgdriv/common/ioctl.h"
++#include "dbgdrvif.h"
++#include "dbgdriv/common/dbgdriv.h"
++#include "dbgdriv/common/hostfunc.h"
++#include "pvr_debug.h"
++#include "pvrmodule.h"
++
++#if defined(SUPPORT_DRI_DRM)
++
++#include "pvr_drm_shared.h"
++#include "pvr_drm.h"
++
++#else
++
++#define DRVNAME "dbgdrv"
++MODULE_SUPPORTED_DEVICE(DRVNAME);
++
++#if (defined(LDM_PLATFORM) || defined(LDM_PCI)) && !defined(SUPPORT_DRI_DRM)
++static struct class *psDbgDrvClass;
++#endif
++
++static int AssignedMajorNumber = 0;
++
++long dbgdrv_ioctl(struct file *, unsigned int, unsigned long);
++
++static int dbgdrv_open(struct inode unref__ * pInode, struct file unref__ * pFile)
++{
++ return 0;
++}
++
++static int dbgdrv_release(struct inode unref__ * pInode, struct file unref__ * pFile)
++{
++ return 0;
++}
++
++static int dbgdrv_mmap(struct file* pFile, struct vm_area_struct* ps_vma)
++{
++ return 0;
++}
++
++static struct file_operations dbgdrv_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = dbgdrv_ioctl,
++ .open = dbgdrv_open,
++ .release = dbgdrv_release,
++ .mmap = dbgdrv_mmap,
++};
++
++#endif
++
++void DBGDrvGetServiceTable(void **fn_table)
++{
++ extern DBGKM_SERVICE_TABLE g_sDBGKMServices;
++
++ *fn_table = &g_sDBGKMServices;
++}
++
++#if defined(SUPPORT_DRI_DRM)
++void dbgdrv_cleanup(void)
++#else
++void cleanup_module(void)
++#endif
++{
++#if !defined(SUPPORT_DRI_DRM)
++#if defined(LDM_PLATFORM) || defined(LDM_PCI)
++ device_destroy(psDbgDrvClass, MKDEV(AssignedMajorNumber, 0));
++ class_destroy(psDbgDrvClass);
++#endif
++ unregister_chrdev(AssignedMajorNumber, DRVNAME);
++#endif
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ HostDestroyEventObjects();
++#endif
++ HostDestroyMutex(g_pvAPIMutex);
++ return;
++}
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT dbgdrv_init(void)
++#else
++int init_module(void)
++#endif
++{
++#if (defined(LDM_PLATFORM) || defined(LDM_PCI)) && !defined(SUPPORT_DRI_DRM)
++ struct device *psDev;
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++ int err = -EBUSY;
++#endif
++
++
++ if ((g_pvAPIMutex=HostCreateMutex()) == IMG_NULL)
++ {
++ return -ENOMEM;
++ }
++
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++
++ (void) HostCreateEventObjects();
++#endif
++
++#if !defined(SUPPORT_DRI_DRM)
++ AssignedMajorNumber =
++ register_chrdev(AssignedMajorNumber, DRVNAME, &dbgdrv_fops);
++
++ if (AssignedMajorNumber <= 0)
++ {
++ PVR_DPF((PVR_DBG_ERROR," unable to get major\n"));
++ goto ErrDestroyEventObjects;
++ }
++
++#if defined(LDM_PLATFORM) || defined(LDM_PCI)
++
++ psDbgDrvClass = class_create(THIS_MODULE, DRVNAME);
++ if (IS_ERR(psDbgDrvClass))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: unable to create class (%ld)",
++ __func__, PTR_ERR(psDbgDrvClass)));
++ goto ErrUnregisterCharDev;
++ }
++
++ psDev = device_create(psDbgDrvClass, NULL, MKDEV(AssignedMajorNumber, 0),
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26))
++ NULL,
++#endif
++ DRVNAME);
++ if (IS_ERR(psDev))
++ {
++ PVR_DPF((PVR_DBG_ERROR, "%s: unable to create device (%ld)",
++ __func__, PTR_ERR(psDev)));
++ goto ErrDestroyClass;
++ }
++#endif
++#endif
++
++ return 0;
++
++#if !defined(SUPPORT_DRI_DRM)
++ErrDestroyEventObjects:
++#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
++ HostDestroyEventObjects();
++#endif
++#if defined(LDM_PLATFORM) || defined(LDM_PCI)
++ErrUnregisterCharDev:
++ unregister_chrdev(AssignedMajorNumber, DRVNAME);
++ErrDestroyClass:
++ class_destroy(psDbgDrvClass);
++#endif
++ return err;
++#endif
++}
++
++#if defined(SUPPORT_DRI_DRM)
++IMG_INT dbgdrv_ioctl(struct drm_device *dev, IMG_VOID *arg, struct drm_file *pFile)
++#else
++long dbgdrv_ioctl(struct file *file, unsigned int ioctlCmd, unsigned long arg)
++#endif
++{
++ IOCTL_PACKAGE *pIP = (IOCTL_PACKAGE *) arg;
++ char *buffer, *in, *out;
++ unsigned int cmd;
++
++ if((pIP->ui32InBufferSize > (PAGE_SIZE >> 1) ) || (pIP->ui32OutBufferSize > (PAGE_SIZE >> 1)))
++ {
++ PVR_DPF((PVR_DBG_ERROR,"Sizes of the buffers are too large, cannot do ioctl\n"));
++ return -1;
++ }
++
++ buffer = (char *) HostPageablePageAlloc(1);
++ if(!buffer)
++ {
++ PVR_DPF((PVR_DBG_ERROR,"Failed to allocate buffer, cannot do ioctl\n"));
++ return -EFAULT;
++ }
++
++ in = buffer;
++ out = buffer + (PAGE_SIZE >>1);
++
++ if(copy_from_user(in, pIP->pInBuffer, pIP->ui32InBufferSize) != 0)
++ {
++ goto init_failed;
++ }
++
++ cmd = ((pIP->ui32Cmd >> 2) & 0xFFF) - 0x801;
++
++ if(pIP->ui32Cmd == DEBUG_SERVICE_READ)
++ {
++ IMG_CHAR *ui8Tmp;
++ IMG_UINT32 *pui32BytesCopied = (IMG_UINT32 *)out;
++ DBG_IN_READ *psReadInParams = (DBG_IN_READ *)in;
++
++ ui8Tmp = vmalloc(psReadInParams->ui32OutBufferSize);
++
++ if(!ui8Tmp)
++ {
++ goto init_failed;
++ }
++
++ *pui32BytesCopied = ExtDBGDrivRead((DBG_STREAM *)psReadInParams->pvStream,
++ psReadInParams->bReadInitBuffer,
++ psReadInParams->ui32OutBufferSize,
++ ui8Tmp);
++
++ if(copy_to_user(psReadInParams->pui8OutBuffer,
++ ui8Tmp,
++ *pui32BytesCopied) != 0)
++ {
++ vfree(ui8Tmp);
++ goto init_failed;
++ }
++
++ vfree(ui8Tmp);
++ }
++ else
++ {
++ (g_DBGDrivProc[cmd])(in, out);
++ }
++
++ if(copy_to_user(pIP->pOutBuffer, out, pIP->ui32OutBufferSize) != 0)
++ {
++ goto init_failed;
++ }
++
++ HostPageablePageFree((IMG_VOID *)buffer);
++ return 0;
++
++init_failed:
++ HostPageablePageFree((IMG_VOID *)buffer);
++ return -EFAULT;
++}
++
++
++void RemoveHotKey(unsigned hHotKey)
++{
++
++}
++
++void DefineHotKey(unsigned ScanCode, unsigned ShiftState, void *pInfo)
++{
++
++}
++
++EXPORT_SYMBOL(DBGDrvGetServiceTable);
+diff --git a/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/makefile.linux.common b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/makefile.linux.common
+new file mode 100644
+index 0000000..105197f
+--- /dev/null
++++ b/drivers/gpu/drm/mrst/pvr/tools/intern/debug/dbgdriv/linux/makefile.linux.common
+@@ -0,0 +1,40 @@
++#
++# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++#
++# This program is free software; you can redistribute it and/or modify it
++# under the terms and conditions of the GNU General Public License,
++# version 2, as published by the Free Software Foundation.
++#
++# This program is distributed in the hope it will be useful but, except
++# as otherwise stated in writing, without any warranty; without even the
++# implied warranty of merchantability or fitness for a particular purpose.
++# See the GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License along with
++# this program; if not, write to the Free Software Foundation, Inc.,
++# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++#
++# The full GNU General Public License is included in this distribution in
++# the file called "COPYING".
++#
++# Contact Information:
++# Imagination Technologies Ltd. <gpl-support@imgtec.com>
++# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++#
++#
++#
++
++ifeq ($(SUPPORT_DRI_DRM),1)
++DBGDRV_SOURCES_ROOT = $(KBUILDROOT)/../tools/intern/debug/dbgdriv
++else
++DBGDRV_SOURCES_ROOT = ../..
++endif
++
++INCLUDES += -I$(EURASIAROOT)/include4 \
++ -I$(EURASIAROOT)/tools/intern/debug
++
++SOURCES += $(DBGDRV_SOURCES_ROOT)/linux/main.c \
++ $(DBGDRV_SOURCES_ROOT)/common/dbgdriv.c \
++ $(DBGDRV_SOURCES_ROOT)/common/ioctl.c \
++ $(DBGDRV_SOURCES_ROOT)/linux/hostfunc.c \
++ $(DBGDRV_SOURCES_ROOT)/common/hotkey.c
+diff --git a/include/drm/drmP.h b/include/drm/drmP.h
+index ffac157..e8673fd 100644
+--- a/include/drm/drmP.h
++++ b/include/drm/drmP.h
+@@ -1131,6 +1131,8 @@ extern int drm_init(struct drm_driver *driver);
+ extern void drm_exit(struct drm_driver *driver);
+ extern long drm_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg);
++extern long drm_unlocked_ioctl(struct file *filp,
++ unsigned int cmd, unsigned long arg);
+ extern long drm_compat_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg);
+ extern int drm_lastclose(struct drm_device *dev);
+@@ -1558,5 +1560,25 @@ static __inline void drm_free_large(void *ptr)
+ }
+ /*@}*/
+
++enum drm_global_types {
++ DRM_GLOBAL_TTM_MEM = 0,
++ DRM_GLOBAL_TTM_BO,
++ DRM_GLOBAL_TTM_OBJECT,
++ DRM_GLOBAL_NUM
++};
++
++struct drm_global_reference {
++ enum drm_global_types global_type;
++ size_t size;
++ void *object;
++ int (*init) (struct drm_global_reference *);
++ void (*release) (struct drm_global_reference *);
++};
++
++extern void drm_global_init(void);
++extern void drm_global_release(void);
++extern int drm_global_item_ref(struct drm_global_reference *ref);
++extern void drm_global_item_unref(struct drm_global_reference *ref);
++
+ #endif /* __KERNEL__ */
+ #endif
+diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
+index c5ba163..e107b17 100644
+--- a/include/drm/drm_mode.h
++++ b/include/drm/drm_mode.h
+@@ -124,6 +124,7 @@ struct drm_mode_crtc {
+ #define DRM_MODE_ENCODER_TMDS 2
+ #define DRM_MODE_ENCODER_LVDS 3
+ #define DRM_MODE_ENCODER_TVDAC 4
++#define DRM_MODE_ENCODER_MIPI 5
+
+ struct drm_mode_get_encoder {
+ __u32 encoder_id;
+@@ -161,6 +162,7 @@ struct drm_mode_get_encoder {
+ #define DRM_MODE_CONNECTOR_HDMIB 12
+ #define DRM_MODE_CONNECTOR_TV 13
+ #define DRM_MODE_CONNECTOR_eDP 14
++#define DRM_MODE_CONNECTOR_MIPI 15
+
+ struct drm_mode_get_connector {
+
+diff --git a/include/linux/backlight.h b/include/linux/backlight.h
+index 8c4f884..05ff433 100644
+--- a/include/linux/backlight.h
++++ b/include/linux/backlight.h
+@@ -92,6 +92,9 @@ struct backlight_device {
+ struct notifier_block fb_notif;
+
+ struct device dev;
++
++ /* Private Backlight Data */
++ void *priv;
+ };
+
+ static inline void backlight_update_status(struct backlight_device *bd)
+--
+1.6.2.5
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-ipc-host-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-ipc-host-driver.patch
new file mode 100644
index 0000000..3eab49a
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-ipc-host-driver.patch
@@ -0,0 +1,268 @@
+From 60bb8e915a1e55c5a562e59e694d37559d62de92 Mon Sep 17 00:00:00 2001
+From: Sreenidhi Gurudatt <sreenidhi.b.gurudatt@intel.com>
+Date: Mon, 3 Aug 2009 14:46:37 +0530
+Subject: [PATCH 073/104] IPC Host driver for MRSTN. It is disabled by default. This driver implements basic ioctls to support testing of IPC driver from user space. It supports Device firmware upgrade feature to be invoked by user-space application.
+
+modified: drivers/misc/Kconfig
+ modified: drivers/misc/Makefile
+ new file: drivers/misc/mrst_test_ipc/Makefile
+ new file: drivers/misc/mrst_test_ipc/ipc_module.c
+
+Signed-off-by: Sreenidhi Gurudatt <sreenidhi.b.gurudatt@intel.com>
+---
+ drivers/misc/Kconfig | 9 ++
+ drivers/misc/Makefile | 1 +
+ drivers/misc/mrst_test_ipc/Makefile | 5 +
+ drivers/misc/mrst_test_ipc/ipc_module.c | 196 +++++++++++++++++++++++++++++++
+ 4 files changed, 211 insertions(+), 0 deletions(-)
+ create mode 100644 drivers/misc/mrst_test_ipc/Makefile
+ create mode 100644 drivers/misc/mrst_test_ipc/ipc_module.c
+
+diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
+index feb9cd9..5a7c342 100644
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -260,6 +260,15 @@ config MRST_RAR_HANDLER
+
+ If unsure, say N.
+
++config MRST_IPC_TEST
++ tristate "IPC driver for testing IPC on Moorestown platform"
++ depends on X86
++ ---help---
++ IPC test driver for Intel Moorestown platform
++ Intel Moorestown platform.
++
++ If unsure, say N.
++
+ config MRST_VIB
+ tristate "vibrator driver for Intel Moorestown platform"
+ help
+diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
+index 0c24f0f..bce8396 100644
+--- a/drivers/misc/Makefile
++++ b/drivers/misc/Makefile
+@@ -14,6 +14,7 @@ obj-$(CONFIG_TIFM_CORE) += tifm_core.o
+ obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
+ obj-$(CONFIG_PHANTOM) += phantom.o
+ obj-$(CONFIG_SGI_IOC4) += ioc4.o
++obj-$(CONFIG_MRST_IPC_TEST) += mrst_test_ipc/
+ obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
+ obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
+ obj-$(CONFIG_SGI_XP) += sgi-xp/
+diff --git a/drivers/misc/mrst_test_ipc/Makefile b/drivers/misc/mrst_test_ipc/Makefile
+new file mode 100644
+index 0000000..047d7db
+--- /dev/null
++++ b/drivers/misc/mrst_test_ipc/Makefile
+@@ -0,0 +1,5 @@
++
++obj-$(CONFIG_MRST_IPC_TEST) := test_ipc_mrst.o
++
++test_ipc_mrst-y := ipc_module.o \
++
+diff --git a/drivers/misc/mrst_test_ipc/ipc_module.c b/drivers/misc/mrst_test_ipc/ipc_module.c
+new file mode 100644
+index 0000000..51ef8de
+--- /dev/null
++++ b/drivers/misc/mrst_test_ipc/ipc_module.c
+@@ -0,0 +1,196 @@
++/*
++ * ipc_module.c - Interface for IPC driver funtions exported by IPC driver
++ * interfaces for Moorsetown platform.
++ *
++ * Copyright (C) 2009 Intel Corp
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This driver provides IOCTL interfaces to call IPC driver module for
++ * Moorestown platform.
++ *
++ * Author: Sreenidhi Gurudatt <sreenidhi.b.gurudatt@intel.com>
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++
++#include <linux/slab.h>
++#include <linux/fs.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/proc_fs.h>
++#include <linux/fcntl.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/ioport.h>
++
++#include <linux/io.h>
++#include <linux/uaccess.h>
++#include <linux/time.h>
++
++#include <asm/ipc_defs.h>
++
++static u32 major;
++#define MAX_FW_SIZE 264192
++
++int init_ipc_driver(void);
++int ipc_ioctl(struct inode *inode, struct file *filp, u32 cmd,
++ unsigned long arg);
++const struct file_operations ipc_fops = {
++ioctl:ipc_ioctl,
++};
++
++
++int ipc_ioctl(struct inode *inode, struct file *filp, u32 cmd,
++ unsigned long arg)
++{
++ u32 upper = 0;
++ struct ipc_pmic_reg_data p_read_reg_data = { 0 };
++ struct ipc_pmic_mod_reg_data p_read_mod_reg_data = { 0 };
++ struct ipc_pmic_reg_data p_write_reg_data = { 0 };
++ u8 ipc_blocking_flag = TRUE;
++
++ int ret_val;
++ /*Device FW upgrade support*/
++ u8 *fw_buf = NULL ;
++
++ switch (cmd) {
++ case IPC_PMIC_REGISTER_READ:
++ {
++ printk(KERN_INFO
++ "ipc_driver IPC_PMIC_REGISTER_READ received\n");
++ ret_val =
++ copy_from_user(&p_read_reg_data,
++ (struct ipc_pmic_reg_data *)arg,
++ sizeof(struct ipc_pmic_reg_data));
++ if (ret_val < 0) {
++ printk(KERN_DEBUG
++ "copy_from_user FAILED!! <%s> function\
++ in <%s> file at <%d> line no\n",\
++ __func__, __FILE__, __LINE__);
++ return E_READ_USER_DATA;
++ }
++
++ ipc_pmic_register_read(&p_read_reg_data);
++ ret_val =
++ copy_to_user((struct ipc_pmic_reg_data *)arg,
++ &p_read_reg_data,
++ sizeof(struct ipc_pmic_reg_data));
++ break;
++ }
++ case IPC_PMIC_REGISTER_READ_MODIFY:
++ {
++ printk(KERN_INFO "ioctl\
++ IPC_PMIC_REGISTER_READ_MODIFY received\n");
++ ret_val =
++ copy_from_user(&p_read_mod_reg_data,
++ (struct ipc_pmic_mod_reg_data *)arg,
++ sizeof(struct ipc_pmic_mod_reg_data));
++ if (ret_val < 0) {
++ printk(KERN_DEBUG
++ "copy_from_user FAILED!! <%s> function\
++ in <%s> file at <%d> line no\n",\
++ __func__, __FILE__, __LINE__);
++ return E_READ_USER_DATA;
++ }
++ ipc_pmic_register_read_modify(&p_read_mod_reg_data);
++ ret_val =
++ copy_to_user((struct ipc_pmic_mod_reg_data *)arg,
++ &p_read_mod_reg_data,
++ sizeof(struct ipc_pmic_mod_reg_data));
++ break;
++ }
++ case IPC_PMIC_REGISTER_WRITE:
++ {
++ ret_val =
++ copy_from_user(&p_write_reg_data,
++ (struct ipc_pmic_reg_data *)arg,
++ sizeof(struct ipc_pmic_reg_data));
++ if (ret_val < 0) {
++ printk(KERN_DEBUG
++ "copy_from_user FAILED!! <%s> function\
++ in <%s> file at <%d> line no\n",\
++ __func__, __FILE__, __LINE__);
++ return E_WRITE_USER_DATA;
++ }
++ ipc_pmic_register_write(&p_write_reg_data,
++ ipc_blocking_flag);
++ ret_val =
++ copy_to_user((struct ipc_pmic_reg_data *)arg,
++ &p_write_reg_data,
++ sizeof(struct ipc_pmic_reg_data));
++ break;
++ }
++ case DEVICE_FW_UPGRADE:
++ {
++ printk(KERN_INFO "ioctl DEVICE_FW_UPGRADE received\n");
++ fw_buf = kmalloc(MAX_FW_SIZE, GFP_KERNEL);
++ if (fw_buf == NULL) {
++ printk(KERN_ERR "ipc_test: kmalloc failed! \n");
++ return -EBUSY;
++ }
++ ret_val = copy_from_user(fw_buf, (u8 *)arg,
++ MAX_FW_SIZE);
++ if (ret_val < 0) {
++ printk(KERN_DEBUG
++ "copy_from_user FAILED!! <%s> function\
++ in <%s> file at <%d> line no\n",\
++ __func__, __FILE__, __LINE__);
++ return -EINVAL;
++ }
++ ipc_device_fw_upgrade(fw_buf, MAX_FW_SIZE);
++ break;
++ }
++ default:
++ {
++ printk(KERN_INFO
++ "ioctl <UNRECOGNIZED> received\n");
++ break;
++ }
++ }
++ return upper;
++}
++
++static int __init ipc_module_init(void)
++{
++ printk(KERN_INFO "Init ipc_module\n");
++
++ major = register_chrdev(0, "mid_ipc", &ipc_fops);
++ if (major < 0) {
++ printk(KERN_ERR "ipc_test : failed to get major\n");
++ return major;
++ }
++
++ init_ipc_driver ( ) ;
++ return SUCCESS;
++
++}
++
++static void __exit ipc_module_exit(void)
++{
++ unregister_chrdev(major, "mid_ipc");
++}
++
++module_init(ipc_module_init);
++module_exit(ipc_module_exit);
++
++MODULE_LICENSE("GPL V2");
++MODULE_DESCRIPTION("Test Driver for MRST IPC driver");
++MODULE_AUTHOR("Sreenidhi Gurudatt")
++
+--
+1.6.2.5
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-keypad-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-keypad-driver.patch
new file mode 100644
index 0000000..453acc3
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-keypad-driver.patch
@@ -0,0 +1,839 @@
+From 3e65e5210bc3c81a58c60e365ee044f2e6044efc Mon Sep 17 00:00:00 2001
+From: Zheng Ba <zheng.ba@intel.com>
+Date: Sun, 22 Nov 2009 16:22:31 +0800
+Subject: [PATCH 047/104] Full keypad controller driver patch for Beta
+
+This patch adds the keypad support for Moorestown platform.
+
+Changes from Alpha2: solved "CRITICAL" issues marked by Klocwork
+ HSD sighting 3469242
+
+Signed-off-by: Zheng Ba <zheng.ba@intel.com>
+---
+ drivers/input/keyboard/Kconfig | 7 +
+ drivers/input/keyboard/Makefile | 1 +
+ drivers/input/keyboard/mrst_keypad.c | 782 ++++++++++++++++++++++++++++++++++
+ 3 files changed, 790 insertions(+), 0 deletions(-)
+ create mode 100644 drivers/input/keyboard/mrst_keypad.c
+
+diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
+index f7a4497..4c2bdaf 100644
+--- a/drivers/input/keyboard/Kconfig
++++ b/drivers/input/keyboard/Kconfig
+@@ -292,6 +292,13 @@ config KEYBOARD_MAX7359
+ To compile this driver as a module, choose M here: the
+ module will be called max7359_keypad.
+
++config KEYBOARD_MRST
++ tristate "MRST keypad support"
++ depends on GPIO_LANGWELL
++ help
++ Say Y if you want to use the mrst keypad
++ depends on GPIO_LANGWELL
++
+ config KEYBOARD_NEWTON
+ tristate "Newton keyboard"
+ select SERIO
+diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
+index 78654ef..0337f76 100644
+--- a/drivers/input/keyboard/Makefile
++++ b/drivers/input/keyboard/Makefile
+@@ -14,6 +14,7 @@ obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
+ obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o
+ obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
+ obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
++obj-$(CONFIG_KEYBOARD_MRST) += mrst_keypad.o
+ obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
+ obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
+ obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
+diff --git a/drivers/input/keyboard/mrst_keypad.c b/drivers/input/keyboard/mrst_keypad.c
+new file mode 100644
+index 0000000..faf3ab7
+--- /dev/null
++++ b/drivers/input/keyboard/mrst_keypad.c
+@@ -0,0 +1,782 @@
++/*
++ * linux/drivers/input/keyboard/mrst_keypad.c
++ *
++ * Driver for the matrix keypad controller on Moorestown platform.
++ *
++ * Copyright (c) 2009 Intel Corporation.
++ * Created: Sep 18, 2008
++ * Updated: Apr 24, 2009
++ *
++ * Based on pxa27x_keypad.c by Rodolfo Giometti <giometti@linux.it>
++ * pxa27x_keypad.c is based on a previous implementation by Kevin O'Connor
++ * <kevin_at_keconnor.net> and Alex Osborne <bobofdoom@gmail.com> and
++ * on some suggestions by Nicolas Pitre <nico@cam.org>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#define DRV_NAME "mrst_keypad"
++#define DRV_VERSION "0.0.1"
++#define MRST_KEYPAD_DRIVER_NAME DRV_NAME " " DRV_VERSION
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/input.h>
++#include <linux/device.h>
++#include <linux/err.h>
++#include <linux/gpio.h>
++
++/*
++ * Keypad Controller registers
++ */
++#define KPC 0x0000 /* Keypad Control register */
++#define KPDK 0x0004 /* Keypad Direct Key register */
++#define KPREC 0x0008 /* Keypad Rotary Encoder register */
++#define KPMK 0x000C /* Keypad Matrix Key register */
++#define KPAS 0x0010 /* Keypad Automatic Scan register */
++
++/* Keypad Automatic Scan Multiple Key Presser register 0-3 */
++#define KPASMKP0 0x0014
++#define KPASMKP1 0x0018
++#define KPASMKP2 0x001C
++#define KPASMKP3 0x0020
++#define KPKDI 0x0024
++
++/* bit definitions */
++#define KPC_MKRN(n) ((((n) - 1) & 0x7) << 26) /* matrix key row number */
++#define KPC_MKCN(n) ((((n) - 1) & 0x7) << 23) /* matrix key col number */
++#define KPC_DKN(n) ((((n) - 1) & 0x7) << 6) /* direct key number */
++
++#define KPC_AS (0x1 << 30) /* Automatic Scan bit */
++#define KPC_ASACT (0x1 << 29) /* Automatic Scan on Activity */
++#define KPC_MI (0x1 << 22) /* Matrix interrupt bit */
++#define KPC_IMKP (0x1 << 21) /* Ignore Multiple Key Press */
++
++#define KPC_MS(n) (0x1 << (13 + (n))) /* Matrix scan line 'n' */
++#define KPC_MS_ALL (0xff << 13)
++
++#define KPC_ME (0x1 << 12) /* Matrix Keypad Enable */
++#define KPC_MIE (0x1 << 11) /* Matrix Interrupt Enable */
++#define KPC_DK_DEB_SEL (0x1 << 9) /* Direct Keypad Debounce Select */
++#define KPC_DI (0x1 << 5) /* Direct key interrupt bit */
++#define KPC_RE_ZERO_DEB (0x1 << 4) /* Rotary Encoder Zero Debounce */
++#define KPC_REE1 (0x1 << 3) /* Rotary Encoder1 Enable */
++#define KPC_REE0 (0x1 << 2) /* Rotary Encoder0 Enable */
++#define KPC_DE (0x1 << 1) /* Direct Keypad Enable */
++#define KPC_DIE (0x1 << 0) /* Direct Keypad interrupt Enable */
++
++#define KPDK_DKP (0x1 << 31)
++#define KPDK_DK(n) ((n) & 0xff)
++
++#define KPREC_OF1 (0x1 << 31)
++#define kPREC_UF1 (0x1 << 30)
++#define KPREC_OF0 (0x1 << 15)
++#define KPREC_UF0 (0x1 << 14)
++
++#define KPREC_RECOUNT0(n) ((n) & 0xff)
++#define KPREC_RECOUNT1(n) (((n) >> 16) & 0xff)
++
++#define KPMK_MKP (0x1 << 31)
++#define KPAS_SO (0x1 << 31)
++#define KPASMKPx_SO (0x1 << 31)
++
++#define KPAS_MUKP(n) (((n) >> 26) & 0x1f)
++#define KPAS_RP(n) (((n) >> 4) & 0xf)
++#define KPAS_CP(n) ((n) & 0xf)
++
++#define KPASMKP_MKC_MASK (0xff)
++
++#define KEYPAD_MATRIX_GPIO_IN_PIN 24
++#define KEYPAD_MATRIX_GPIO_OUT_PIN 32
++#define KEYPAD_DIRECT_GPIO_IN_PIN 40
++
++
++static struct pci_device_id keypad_pci_tbl[] = {
++ {0x8086, 0x0805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
++ {0,}
++};
++MODULE_DEVICE_TABLE(pci, keypad_pci_tbl);
++
++#define keypad_readl(off) readl(keypad->mmio_base + (off))
++#define keypad_writel(off, v) writel((v), keypad->mmio_base + (off))
++
++#define MAX_MATRIX_KEY_NUM (8 * 8)
++#define MAX_DIRECT_KEY_NUM (4)
++
++#define MAX_MATRIX_KEY_ROWS (8)
++#define MAX_MATRIX_KEY_COLS (8)
++#define DEBOUNCE_INTERVAL 100
++
++#define KEY_HALFSHUTTER KEY_PROG1
++#define KEY_FULLSHUTTER KEY_CAMERA
++
++static unsigned int mrst_keycode[MAX_MATRIX_KEY_NUM] = {
++ KEY_F, KEY_D, KEY_E, KEY_GRAVE, KEY_C, KEY_R, KEY_4, KEY_V,
++ KEY_NUMLOCK, KEY_LEFTCTRL, KEY_Z, KEY_W, KEY_2, KEY_X, KEY_S, KEY_3,
++ KEY_EQUAL, KEY_N, KEY_H, KEY_U, KEY_7, KEY_M, KEY_J, KEY_8,
++ KEY_6, KEY_5, KEY_APOSTROPHE, KEY_G, KEY_T, KEY_SPACE, KEY_B, KEY_Y,
++ KEY_MINUS, KEY_0, KEY_LEFT, KEY_SEMICOLON, KEY_P, KEY_DOWN, KEY_UP,
++ KEY_BACKSPACE,
++ KEY_L, KEY_K, KEY_I, KEY_SLASH, KEY_COMMA, KEY_O, KEY_9, KEY_DOT,
++ KEY_Q, KEY_TAB, KEY_ESC, KEY_LEFTSHIFT, KEY_CAPSLOCK, KEY_1, KEY_FN,
++ KEY_A,
++ 0, KEY_RIGHTSHIFT, KEY_ENTER, 0, KEY_RIGHT, 0, 0, 0,
++};
++
++/* NumLk key mapping */
++static unsigned int mrst_keycode_numlck[MAX_MATRIX_KEY_NUM] = {
++ KEY_F, KEY_D, KEY_E, KEY_GRAVE, KEY_C, KEY_R, KEY_4, KEY_V,
++ KEY_NUMLOCK, KEY_LEFTCTRL, KEY_Z, KEY_W, KEY_2, KEY_X, KEY_S, KEY_3,
++ KEY_EQUAL, KEY_N, KEY_H, KEY_KP4, KEY_KP7, KEY_KP0, KEY_KP1, KEY_KP8,
++ KEY_6, KEY_5, KEY_APOSTROPHE, KEY_G, KEY_T, KEY_SPACE, KEY_B, KEY_Y,
++ KEY_MINUS, KEY_KPSLASH, KEY_LEFT, KEY_KPMINUS, KEY_KPASTERISK,
++ KEY_DOWN, KEY_UP, KEY_BACKSPACE,
++ KEY_KP3, KEY_KP2, KEY_KP5, KEY_SLASH, KEY_KPDOT, KEY_KP6, KEY_KP9,
++ KEY_KPPLUS,
++ KEY_Q, KEY_TAB, KEY_ESC, KEY_LEFTSHIFT, KEY_CAPSLOCK, KEY_1, KEY_FN,
++ KEY_A,
++ 0, KEY_RIGHTSHIFT, KEY_ENTER, 0, KEY_RIGHT, 0, 0, 0,
++};
++
++/* Fn key mapping */
++static unsigned int mrst_keycode_fn[MAX_MATRIX_KEY_NUM] = {
++ 0, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0,
++ KEY_LEFTBRACE, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, KEY_HOME, 0, 0, KEY_PAGEDOWN, KEY_PAGEUP, 0,
++ 0, 0, 0, KEY_RIGHTBRACE, KEY_LEFTBRACE, 0, 0, KEY_RIGHTBRACE,
++ 0, 0, 0, KEY_LEFTSHIFT, 0, 0, KEY_FN, 0,
++ 0, KEY_RIGHTSHIFT, 0, 0, KEY_END, 0, 0, 0,
++};
++
++/* direct key map */
++static unsigned int mrst_direct_keycode[MAX_DIRECT_KEY_NUM] = {
++ KEY_VOLUMEUP, KEY_VOLUMEDOWN, KEY_HALFSHUTTER, KEY_FULLSHUTTER,
++};
++
++struct mrst_keypad {
++
++ struct input_dev *input_dev;
++ void __iomem *mmio_base;
++
++ unsigned int matrix_key_rows;
++ unsigned int matrix_key_cols;
++ int matrix_key_map_size;
++
++ /* key debounce interval */
++ unsigned int debounce_interval;
++
++ /* matrix key code map */
++ unsigned int matrix_keycodes[MAX_MATRIX_KEY_NUM];
++
++ /* state row bits of each column scan */
++ uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS];
++ uint32_t direct_key_state;
++
++ unsigned int direct_key_mask;
++
++ int direct_key_num;
++
++ unsigned int direct_key_map[MAX_DIRECT_KEY_NUM];
++
++ /* rotary encoders 0 */
++ int enable_rotary0;
++ int rotary0_rel_code;
++ int rotary0_up_key;
++ int rotary0_down_key;
++
++ /* rotary encoders 1 */
++ int enable_rotary1;
++ int rotary1_rel_code;
++ int rotary1_up_key;
++ int rotary1_down_key;
++
++ int rotary_rel_code[2];
++ int rotary_up_key[2];
++ int rotary_down_key[2];
++
++ /* Fn key */
++ int fn;
++
++ /* Number Lock key */
++ int numlck;
++
++ /* FIXME:
++ * Keypad controller likely issues fake interrupts
++ * when direct key status registers were first initialized
++ * This value assures this interrupt will not be proceeded.
++ */
++ int count;
++};
++
++static void mrst_keypad_build_keycode(struct mrst_keypad *keypad)
++{
++ struct input_dev *input_dev = keypad->input_dev;
++ unsigned int *key;
++ int i, code;
++
++ keypad->matrix_key_rows = MAX_MATRIX_KEY_ROWS;
++ keypad->matrix_key_cols = MAX_MATRIX_KEY_COLS;
++ keypad->matrix_key_map_size = MAX_MATRIX_KEY_NUM;
++ keypad->debounce_interval = DEBOUNCE_INTERVAL;
++
++ /* three sets of keycode here */
++ if (keypad->fn)
++ memcpy(keypad->matrix_keycodes, mrst_keycode_fn,
++ sizeof(keypad->matrix_keycodes));
++ else if (keypad->numlck)
++ memcpy(keypad->matrix_keycodes, mrst_keycode_numlck,
++ sizeof(keypad->matrix_keycodes));
++ else
++ memcpy(keypad->matrix_keycodes, mrst_keycode,
++ sizeof(keypad->matrix_keycodes));
++
++ memcpy(keypad->direct_key_map, mrst_direct_keycode,
++ sizeof(keypad->direct_key_map));
++
++ key = &keypad->matrix_keycodes[0];
++ for (i = 0; i < MAX_MATRIX_KEY_NUM; i++, key++) {
++ code = (*key) & 0xffffff;
++ set_bit(code, input_dev->keybit);
++ }
++
++ key = &keypad->direct_key_map[0];
++ for (i = 0; i < MAX_DIRECT_KEY_NUM; i++, key++) {
++ code = (*key) & 0xffffff;
++ set_bit(code, input_dev->keybit);
++ }
++
++ keypad->direct_key_num = MAX_DIRECT_KEY_NUM;
++ keypad->enable_rotary0 = 0;
++ keypad->enable_rotary1 = 0;
++
++}
++
++static inline unsigned int lookup_matrix_keycode(
++ struct mrst_keypad *keypad, int row, int col)
++{
++ return keypad->matrix_keycodes[(row << 3) + col];
++}
++
++static void handle_constant_keypress(struct mrst_keypad *keypad,
++ int num, int col, int row,
++ int state)
++{
++ struct input_dev *dev = keypad->input_dev;
++
++ switch (num) {
++ case 0:
++ if (keypad->fn)
++ keypad->fn = 0;
++ /* Manually release special keys (Fn combinations) */
++ if (test_bit(KEY_LEFTBRACE, dev->key))
++ input_report_key(dev, KEY_LEFTBRACE, 0);
++ if (test_bit(KEY_RIGHTBRACE, dev->key))
++ input_report_key(dev, KEY_RIGHTBRACE, 0);
++ if (test_bit(KEY_HOME, dev->key))
++ input_report_key(dev, KEY_RIGHTBRACE, 0);
++ if (test_bit(KEY_END, dev->key))
++ input_report_key(dev, KEY_END, 0);
++ if (test_bit(KEY_PAGEUP, dev->key))
++ input_report_key(dev, KEY_RIGHTBRACE, 0);
++ if (test_bit(KEY_PAGEDOWN, dev->key))
++ input_report_key(dev, KEY_RIGHTBRACE, 0);
++
++ return;
++
++ case 1:
++ /* if Fn pressed */
++ if (col == 6 && row == 6)
++ keypad->fn = 1;
++ /* key '[' */
++ else if ((col == 0 && row == 2) && state) {
++ keypad->fn = 0;
++ set_bit(KEY_EQUAL, dev->key);
++ dev->repeat_key = KEY_EQUAL;
++ }
++ /* key ']' */
++ else if ((col == 3 && row == 5) && state) {
++ keypad->fn = 0;
++ set_bit(KEY_SLASH, dev->key);
++ dev->repeat_key = KEY_SLASH;
++ }
++ /* key '{' */
++ else if ((col == 4 && row == 5) && state) {
++ keypad->fn = 0;
++ set_bit(KEY_COMMA, dev->key);
++ dev->repeat_key = KEY_COMMA;
++ }
++ /* key '}' */
++ else if ((col == 7 && row == 5) && state) {
++ keypad->fn = 0;
++ set_bit(KEY_DOT, dev->key);
++ dev->repeat_key = KEY_DOT;
++ }
++
++ return;
++ default:
++ ;
++ }
++}
++
++static void mrst_keypad_scan_matrix(struct mrst_keypad *keypad)
++{
++ int row, col, num_keys_pressed = 0;
++ uint32_t new_state[MAX_MATRIX_KEY_COLS];
++ uint32_t kpas = keypad_readl(KPAS);
++ int status;
++
++ num_keys_pressed = KPAS_MUKP(kpas);
++
++ memset(new_state, 0, sizeof(new_state));
++
++ if (num_keys_pressed == 0) {
++ status = keypad->matrix_key_state[0] & (1 << 0);
++ handle_constant_keypress(keypad, num_keys_pressed, 0, 0,
++ status);
++
++ goto scan;
++ }
++
++ if (num_keys_pressed == 1) {
++ col = KPAS_CP(kpas);
++ row = KPAS_RP(kpas);
++
++ /* if invalid row/col, treat as no key pressed */
++ if (col < MAX_MATRIX_KEY_COLS &&
++ row < MAX_MATRIX_KEY_ROWS) {
++
++ /* if NumLk pressed */
++ if (col == 0 && row == 1)
++ keypad->numlck = !keypad->numlck;
++
++ status = keypad->matrix_key_state[col] & (1 << row);
++ handle_constant_keypress(keypad, num_keys_pressed, col,
++ row, status);
++
++ new_state[col] = (1 << row);
++ }
++
++ goto scan;
++ }
++
++ if (num_keys_pressed > 1) {
++ uint32_t kpasmkp0 = keypad_readl(KPASMKP0);
++ uint32_t kpasmkp1 = keypad_readl(KPASMKP1);
++ uint32_t kpasmkp2 = keypad_readl(KPASMKP2);
++ uint32_t kpasmkp3 = keypad_readl(KPASMKP3);
++
++ new_state[0] = kpasmkp0 & KPASMKP_MKC_MASK;
++ new_state[1] = (kpasmkp0 >> 16) & KPASMKP_MKC_MASK;
++ new_state[2] = kpasmkp1 & KPASMKP_MKC_MASK;
++ new_state[3] = (kpasmkp1 >> 16) & KPASMKP_MKC_MASK;
++ new_state[4] = kpasmkp2 & KPASMKP_MKC_MASK;
++ new_state[5] = (kpasmkp2 >> 16) & KPASMKP_MKC_MASK;
++ new_state[6] = kpasmkp3 & KPASMKP_MKC_MASK;
++ new_state[7] = (kpasmkp3 >> 16) & KPASMKP_MKC_MASK;
++
++ /* if Fn is pressed, all SHIFT is ignored, except when {
++ * or } is pressed */
++ if (new_state[6] & 0x40) {
++ keypad->fn = 1;
++ new_state[3] &= ~0x40;
++ new_state[1] &= ~0x80;
++ }
++
++ if (keypad->fn == 1) {
++ /* if { or } pressed */
++ if ((new_state[4] & 0x20) || (new_state[7] & 0x20)) {
++ /* as if LEFTSHIFT is pressed */
++ new_state[3] |= 0x40;
++ /* as if Fn not pressed */
++ new_state[6] &= ~0x40;
++ }
++ /* if [ or ] pressed */
++ if ((new_state[0] & 0x04) || (new_state[3] & 0x20))
++ /* as if Fn not pressed */
++ new_state[6] &= ~0x40;
++ }
++ }
++
++
++scan:
++ /* re-build keycode */
++ mrst_keypad_build_keycode(keypad);
++
++ for (col = 0; col < keypad->matrix_key_cols; col++) {
++ uint32_t bits_changed;
++
++ bits_changed = keypad->matrix_key_state[col] ^ new_state[col];
++ if (bits_changed == 0)
++ continue;
++
++ for (row = 0; row < keypad->matrix_key_rows; row++) {
++ if ((bits_changed & (1 << row)) == 0)
++ continue;
++
++ input_report_key(keypad->input_dev,
++ lookup_matrix_keycode(keypad, row, col),
++ new_state[col] & (1 << row));
++ }
++ }
++ input_sync(keypad->input_dev);
++ memcpy(keypad->matrix_key_state, new_state, sizeof(new_state));
++}
++
++#define DEFAULT_KPREC (0x007f007f)
++
++static inline int rotary_delta(uint32_t kprec)
++{
++ if (kprec & KPREC_OF0)
++ return (kprec & 0xff) + 0x7f;
++ else if (kprec & KPREC_UF0)
++ return (kprec & 0xff) - 0x7f - 0xff;
++ else
++ return (kprec & 0xff) - 0x7f;
++}
++
++static void report_rotary_event(struct mrst_keypad *keypad, int r, int delta)
++{
++ struct input_dev *dev = keypad->input_dev;
++
++ if (delta == 0)
++ return;
++
++ if (keypad->rotary_up_key[r] && keypad->rotary_down_key[r]) {
++ int keycode = (delta > 0) ? keypad->rotary_up_key[r] :
++ keypad->rotary_down_key[r];
++
++ /* simulate a press-n-release */
++ input_report_key(dev, keycode, 1);
++ input_sync(dev);
++ input_report_key(dev, keycode, 0);
++ input_sync(dev);
++ } else {
++ input_report_rel(dev, keypad->rotary_rel_code[r], delta);
++ input_sync(dev);
++ }
++}
++
++static void mrst_keypad_scan_rotary(struct mrst_keypad *keypad)
++{
++ unsigned int kprec;
++
++ /* read and reset to default count value */
++ kprec = keypad_readl(KPREC);
++ keypad_writel(KPREC, DEFAULT_KPREC);
++
++ if (keypad->enable_rotary0)
++ report_rotary_event(keypad, 0, rotary_delta(kprec));
++
++ if (keypad->enable_rotary1)
++ report_rotary_event(keypad, 1, rotary_delta(kprec >> 16));
++}
++
++static void mrst_keypad_scan_direct(struct mrst_keypad *keypad)
++{
++ unsigned int new_state;
++ uint32_t kpdk, bits_changed;
++ int i;
++
++ kpdk = keypad_readl(KPDK);
++
++ if (keypad->enable_rotary0 || keypad->enable_rotary1)
++ mrst_keypad_scan_rotary(keypad);
++
++ if ((keypad->direct_key_map == NULL) || (++keypad->count == 1)) {
++ keypad->direct_key_state = 0;
++ return;
++ }
++
++ new_state = KPDK_DK(kpdk) & keypad->direct_key_mask;
++ new_state = ~new_state;
++ bits_changed = keypad->direct_key_state ^ new_state;
++
++ if (bits_changed == 0)
++ return;
++
++ for (i = 0; i < keypad->direct_key_num; i++) {
++ if (bits_changed & (1 << i)) {
++ input_report_key(keypad->input_dev,
++ keypad->direct_key_map[i],
++ (new_state & (1 << i)));
++ }
++ }
++ input_sync(keypad->input_dev);
++ keypad->direct_key_state = new_state;
++
++}
++
++static irqreturn_t mrst_keypad_irq_handler(int irq, void *dev_id)
++{
++ struct mrst_keypad *keypad = dev_id;
++ unsigned long kpc = keypad_readl(KPC);
++
++ if (kpc & KPC_DI)
++ mrst_keypad_scan_direct(keypad);
++
++ if (kpc & KPC_MI)
++ mrst_keypad_scan_matrix(keypad);
++
++ return IRQ_HANDLED;
++}
++
++static int mrst_keypad_gpio_init(void)
++{
++ int i, err, cnt = 0;
++ int pins = KEYPAD_MATRIX_GPIO_IN_PIN + MAX_MATRIX_KEY_ROWS +
++ MAX_MATRIX_KEY_COLS + MAX_DIRECT_KEY_NUM;
++
++ /* explicitely tell which pins have been occupied... */
++ for (i = KEYPAD_MATRIX_GPIO_IN_PIN; i < pins; i++, cnt++) {
++ err = gpio_request(i, NULL);
++ if (err) {
++ printk(KERN_ERR "GPIO pin %d failed to request.\n", i);
++ goto err_request;
++ }
++ }
++
++ for (i = 0; i < MAX_MATRIX_KEY_ROWS; i++)
++ gpio_direction_input(KEYPAD_MATRIX_GPIO_IN_PIN + i);
++
++ for (i = 0; i < MAX_MATRIX_KEY_COLS; i++)
++ /* __gpio_set_value(KEYPAD_GPIO_OUT_PIN + i, 1); */
++ /* set action is executed in gpio_direction_output() */
++ gpio_direction_output(KEYPAD_MATRIX_GPIO_OUT_PIN + i, 1);
++
++ for (i = 0; i < MAX_DIRECT_KEY_NUM; i++)
++ gpio_direction_input(KEYPAD_DIRECT_GPIO_IN_PIN + i);
++
++ return 0;
++
++err_request:
++ /* free requested pins... */
++ for (i = KEYPAD_MATRIX_GPIO_IN_PIN + cnt - 1;
++ i >= KEYPAD_MATRIX_GPIO_IN_PIN; i--)
++ gpio_free(i);
++ return err;
++}
++
++static void mrst_keypad_config(struct mrst_keypad *keypad)
++{
++ unsigned int mask = 0, direct_key_num = 0;
++ unsigned long kpc = 0;
++
++ /* enable matrix keys with automatic scan */
++ if (keypad->matrix_key_rows && keypad->matrix_key_cols) {
++ kpc |= KPC_ASACT | KPC_MIE | KPC_ME | KPC_MS_ALL;
++ kpc |= KPC_MKRN(keypad->matrix_key_rows) |
++ KPC_MKCN(keypad->matrix_key_cols);
++ }
++
++ /* enable rotary key, debounce interval same as direct keys */
++ if (keypad->enable_rotary0) {
++ mask |= 0x03;
++ direct_key_num = 2;
++ kpc |= KPC_REE0;
++ }
++
++ if (keypad->enable_rotary1) {
++ mask |= 0x0c;
++ direct_key_num = 4;
++ kpc |= KPC_REE1;
++ }
++
++ if (keypad->direct_key_num > direct_key_num)
++ direct_key_num = keypad->direct_key_num;
++
++ keypad->direct_key_mask = ((2 << direct_key_num) - 1) & ~mask;
++
++ /* enable direct key */
++ if (direct_key_num)
++ kpc |= KPC_DE | KPC_DIE | KPC_DKN(direct_key_num);
++
++ keypad_writel(KPC, kpc);
++ keypad_writel(KPREC, DEFAULT_KPREC);
++ keypad_writel(KPKDI, keypad->debounce_interval);
++}
++
++static int mrst_keypad_open(struct input_dev *dev)
++{
++ struct mrst_keypad *keypad = input_get_drvdata(dev);
++ int err;
++
++ err = mrst_keypad_gpio_init();
++ if (err)
++ return err;
++ mrst_keypad_config(keypad);
++
++ return 0;
++}
++
++static void mrst_keypad_close(struct input_dev *dev)
++{
++ int pins = KEYPAD_MATRIX_GPIO_IN_PIN + MAX_MATRIX_KEY_ROWS +
++ MAX_MATRIX_KEY_COLS + MAX_DIRECT_KEY_NUM;
++
++ int i;
++ /* free occupied pins */
++ for (i = KEYPAD_MATRIX_GPIO_IN_PIN; i < pins; i++)
++ gpio_free(i);
++}
++
++static int __devinit mrst_keypad_probe(struct pci_dev *pdev,
++ const struct pci_device_id *ent)
++{
++ struct mrst_keypad *keypad;
++ struct input_dev *input_dev;
++ int error;
++
++#ifndef MODULE
++ printk(KERN_INFO MRST_KEYPAD_DRIVER_NAME "\n");
++#endif
++
++ keypad = kzalloc(sizeof(struct mrst_keypad), GFP_KERNEL);
++ if (keypad == NULL) {
++ dev_err(&pdev->dev, "failed to allocate driver data\n");
++ return -ENOMEM;
++ }
++
++ error = pci_enable_device(pdev);
++ if (error || (pdev->irq < 0)) {
++ dev_err(&pdev->dev, "failed to enable device/get irq\n");
++ error = -ENXIO;
++ goto failed_free;
++ }
++
++ error = pci_request_regions(pdev, DRV_NAME);
++ if (error) {
++ dev_err(&pdev->dev, "failed to request I/O memory\n");
++ goto failed_free;
++ }
++
++ keypad->mmio_base = ioremap(pci_resource_start(pdev, 0),
++ pci_resource_len(pdev, 0));
++ if (keypad->mmio_base == NULL) {
++ dev_err(&pdev->dev, "failed to remap I/O memory\n");
++ error = -ENXIO;
++ goto failed_free_mem;
++ }
++
++ /* Create and register the input driver. */
++ input_dev = input_allocate_device();
++ if (!input_dev) {
++ dev_err(&pdev->dev, "failed to allocate input device\n");
++ error = -ENOMEM;
++ goto failed_free_io;
++ }
++
++ input_dev->name = pci_name(pdev);
++ input_dev->id.bustype = BUS_PCI;
++ input_dev->open = mrst_keypad_open;
++ input_dev->close = mrst_keypad_close;
++ input_dev->dev.parent = &pdev->dev;
++
++ input_dev->keycode = keypad->matrix_keycodes;
++ input_dev->keycodesize = sizeof(unsigned int);
++ input_dev->keycodemax = ARRAY_SIZE(mrst_keycode);
++
++ keypad->input_dev = input_dev;
++ keypad->fn = 0;
++ keypad->numlck = 0;
++ /*FIXME*/keypad->count = 0;
++ input_set_drvdata(input_dev, keypad);
++
++ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) |
++ BIT_MASK(EV_REL);
++
++ mrst_keypad_build_keycode(keypad);
++ pci_set_drvdata(pdev, keypad);
++
++ error = request_irq(pdev->irq, mrst_keypad_irq_handler, IRQF_SHARED,
++ pci_name(pdev), keypad);
++ if (error) {
++ dev_err(&pdev->dev, "failed to request IRQ\n");
++ goto failed_free_dev;
++ }
++
++ /* Register the input device */
++ error = input_register_device(input_dev);
++ if (error) {
++ dev_err(&pdev->dev, "failed to register input device\n");
++ goto failed_free_irq;
++ }
++
++ printk(KERN_INFO "*** keypad driver load successfully ***\n");
++ return 0;
++
++failed_free_irq:
++ free_irq(pdev->irq, keypad);
++ pci_set_drvdata(pdev, NULL);
++failed_free_dev:
++ input_free_device(input_dev);
++failed_free_io:
++ iounmap(keypad->mmio_base);
++failed_free_mem:
++ pci_release_regions(pdev);
++failed_free:
++ kfree(keypad);
++ return error;
++}
++
++static void __devexit mrst_keypad_remove(struct pci_dev *pdev)
++{
++ struct mrst_keypad *keypad = pci_get_drvdata(pdev);
++ int i;
++ int pins = KEYPAD_MATRIX_GPIO_IN_PIN + MAX_MATRIX_KEY_ROWS +
++ MAX_MATRIX_KEY_COLS + MAX_DIRECT_KEY_NUM;
++
++ for (i = pins - 1; i > KEYPAD_MATRIX_GPIO_IN_PIN; i--)
++ gpio_free(i);
++
++ free_irq(pdev->irq, keypad);
++ input_unregister_device(keypad->input_dev);
++ iounmap(keypad->mmio_base);
++ pci_release_regions(pdev);
++ pci_set_drvdata(pdev, NULL);
++ kfree(keypad);
++}
++
++
++static struct pci_driver mrst_keypad_driver = {
++ .name = DRV_NAME,
++ .id_table = keypad_pci_tbl,
++ .probe = mrst_keypad_probe,
++ .remove = __devexit_p(mrst_keypad_remove),
++#ifdef CONFIG_PM
++ .suspend = NULL,
++ .resume = NULL,
++#endif /* CONFIG_PM */
++};
++
++static int __init mrst_keypad_init(void)
++{
++ return pci_register_driver(&mrst_keypad_driver);
++}
++
++static void __exit mrst_keypad_exit(void)
++{
++ pci_unregister_driver(&mrst_keypad_driver);
++}
++
++module_init(mrst_keypad_init);
++module_exit(mrst_keypad_exit);
++
++MODULE_DESCRIPTION("MRST Keypad Controller Driver");
++MODULE_LICENSE("GPL v2");
+--
+1.6.2.5
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-langwell-dma-driver-3.0.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-langwell-dma-driver-3.0.patch
new file mode 100644
index 0000000..d539b60
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-langwell-dma-driver-3.0.patch
@@ -0,0 +1,2469 @@
+Index: linux-2.6.33/drivers/dma/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/dma/Kconfig
++++ linux-2.6.33/drivers/dma/Kconfig
+@@ -20,6 +20,37 @@ comment "DMA Devices"
+ config ASYNC_TX_DISABLE_CHANNEL_SWITCH
+ bool
+
++config INTEL_LNW_DMAC1
++ bool "Intel MID DMA support for LPE DMA"
++ depends on PCI && X86 && (SND_INTEL_SST||SND_INTEL_LPE)
++ select DMA_ENGINE
++ help
++ Enable support for the Intel(R) MID DMA1 engine present
++ in Intel MID chipsets.
++
++ Say Y here if you have such a chipset.
++
++ If unsure, say N.
++
++config INTEL_LNW_DMAC2
++ bool "Intel MID DMA support for SC DMA"
++ depends on PCI && X86
++ select DMA_ENGINE
++ help
++ Enable support for the Intel(R) MID DMA2 engine present
++ in Intel MID chipsets.
++
++ Say Y here if you have such a chipset.
++
++ If unsure, say N.
++
++config LNW_DMA_DEBUG
++ bool "LNW DMA Debugging Enable"
++ depends on INTEL_LNW_DMAC1 || INTEL_LNW_DMAC2
++ default N
++ help
++ Enable logging in the LNW DMA drivers
++
+ config INTEL_IOATDMA
+ tristate "Intel I/OAT DMA support"
+ depends on PCI && X86
+Index: linux-2.6.33/drivers/dma/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/dma/Makefile
++++ linux-2.6.33/drivers/dma/Makefile
+@@ -1,5 +1,7 @@
+ obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
+ obj-$(CONFIG_NET_DMA) += iovlock.o
++obj-$(CONFIG_INTEL_LNW_DMAC2) += lnw_dmac2.o
++obj-$(CONFIG_INTEL_LNW_DMAC1) += lnw_dmac1.o
+ obj-$(CONFIG_DMATEST) += dmatest.o
+ obj-$(CONFIG_INTEL_IOATDMA) += ioat/
+ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
+Index: linux-2.6.33/drivers/dma/lnw_dma_regs.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/dma/lnw_dma_regs.h
+@@ -0,0 +1,176 @@
++/*
++ * lnw_dma.c - Intel Langwell DMA Drivers
++ *
++ * Copyright (C) 2008-09 Intel Corp
++ * Author: Vinod Koul <vinod.koul@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ *
++ */
++#ifndef __LNW_DMA_REGS_H__
++#define __LNW_DMA_REGS_H__
++
++#include <linux/dmaengine.h>
++#include <linux/dmapool.h>
++#include <linux/pci_ids.h>
++
++#define LNW_DMA_DRIVER_VERSION "0.3.1"
++
++#define DMA_DEBUG
++
++#define REG_BIT0 0x00000001
++#define REG_BIT8 0x00000100
++
++#define UNMASK_INTR_REG(chan_num) \
++ ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
++#define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num)
++
++#define ENABLE_CHANNEL(chan_num) \
++ ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
++
++#define DESCS_PER_CHANNEL 16
++/*DMA Registers*/
++/*registers associated with channel programming*/
++#define DMA_REG_SIZE 0x400
++#define DMA_CH_SIZE 0x58
++
++/*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/
++#define SAR 0x00 /* Source Address Register*/
++#define DAR 0x08 /* Destination Address Register*/
++#define CTL_LOW 0x18 /* Control Register*/
++#define CTL_HIGH 0x1C /* Control Register*/
++#define CFG_LOW 0x40 /* Configuration Register Low*/
++#define CFG_HIGH 0x44 /* Configuration Register high*/
++
++#define STATUS_TFR 0x2E8
++#define STATUS_BLOCK 0x2F0
++#define STATUS_ERR 0x308
++
++#define RAW_TFR 0x2C0
++#define RAW_BLOCK 0x2C8
++#define RAW_ERR 0x2E0
++
++#define MASK_TFR 0x310
++#define MASK_BLOCK 0x318
++#define MASK_SRC_TRAN 0x320
++#define MASK_DST_TRAN 0x328
++#define MASK_ERR 0x330
++
++#define CLEAR_TFR 0x338
++#define CLEAR_BLOCK 0x340
++#define CLEAR_SRC_TRAN 0x348
++#define CLEAR_DST_TRAN 0x350
++#define CLEAR_ERR 0x358
++
++#define INTR_STATUS 0x360
++#define DMA_CFG 0x398
++#define DMA_CHAN_EN 0x3A0
++
++/**
++ * struct lnw_dma_chan - internal representation of a DMA channel
++ */
++struct lnw_dma_chan {
++ struct dma_chan chan;
++ void __iomem *ch_regs;
++ void __iomem *dma_base;
++ int ch_id;
++ spinlock_t lock;
++ dma_cookie_t completed;
++ struct list_head active_list;
++ struct list_head queue;
++ struct list_head free_list;
++ struct lnw_dma_slave *slave;
++ unsigned int descs_allocated;
++ struct lnwdma_device *dma;
++ bool in_use;
++};
++static inline struct lnw_dma_chan *to_lnw_dma_chan(struct dma_chan *chan)
++{
++ return container_of(chan, struct lnw_dma_chan, chan);
++}
++
++/**
++ * struct lnwdma_device - internal representation of a DMA device
++ * @pdev: PCI device
++ * @dma_base: MMIO register space base address of DMA
++ * @lpe_base: MMIO register space base address of LPE
++ * @dma_pool: for allocating DMA descriptors
++ * @common: embedded struct dma_device
++ * @idx: per channel data
++ */
++struct lnwdma_device {
++ struct pci_dev *pdev;
++ void __iomem *dma_base;
++ struct pci_pool *dma_pool;
++ struct dma_device common;
++ struct tasklet_struct tasklet;
++ struct lnw_dma_chan ch[MAX_CHAN];
++};
++
++static inline struct lnwdma_device *to_lnwdma_device(struct dma_device *common)
++{
++ return container_of(common, struct lnwdma_device, common);
++}
++
++struct lnw_dma_desc {
++ void __iomem *block; /*ch ptr*/
++ struct list_head desc_node;
++ struct dma_async_tx_descriptor txd;
++ size_t len;
++ dma_addr_t sar;
++ dma_addr_t dar;
++ u32 cfg_hi;
++ u32 cfg_lo;
++ u32 ctl_lo;
++ u32 ctl_hi;
++ dma_addr_t next;
++ enum dma_data_direction dirn;
++ enum dma_status status;
++ dma_async_tx_callback callback;
++ void *callback_param;
++ enum lnw_dma_width width; /*width of DMA txn*/
++ enum lnw_dma_mode cfg_mode; /*mode configuration*/
++
++};
++
++static inline int test_ch_en(void __iomem *dma, u32 ch_no)
++{
++ u32 en_reg = ioread32(dma + DMA_CHAN_EN);
++ return (en_reg >> ch_no) & 0x1;
++}
++
++static inline struct lnw_dma_desc *to_lnw_dma_desc
++ (struct dma_async_tx_descriptor *txd)
++{
++ return container_of(txd, struct lnw_dma_desc, txd);
++}
++
++#define _dma_printk(level, format, arg...) \
++ printk(level "LNW_DMA: %s %d " format, __func__, __LINE__, ## arg)
++
++#ifdef CONFIG_LNW_DMA_DEBUG
++#define dma_dbg(format, arg...) _dma_printk(KERN_DEBUG, "DBG " format , ## arg)
++#else
++#define dma_dbg(format, arg...) do {} while (0);
++#endif
++
++#define dma_err(format, arg...) _dma_printk(KERN_ERR, "ERR " format , ## arg)
++#define dma_info(format, arg...) \
++ _dma_printk(KERN_INFO , "INFO " format , ## arg)
++
++#endif /*__LNW_DMA_REGS_H__*/
+Index: linux-2.6.33/drivers/dma/lnw_dmac1.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/dma/lnw_dmac1.c
+@@ -0,0 +1,957 @@
++/*
++ * lnw_dmac1.c - Intel Langwell DMA Drivers
++ *
++ * Copyright (C) 2008-09 Intel Corp
++ * Authhor: Vinod Koul <vinod.koul@intel.com>
++ * The driver design is based on dw_dmac driver
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ *
++ */
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/interrupt.h>
++#include <sound/intel_lpe.h>
++#include <linux/lnw_dma.h>
++
++#define MAX_CHAN 2
++#include "lnw_dma_regs.h"
++
++MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
++MODULE_DESCRIPTION("Intel (R) Moorestown Langwell DMAC1 Driver");
++MODULE_LICENSE("GPL v2");
++MODULE_VERSION(LNW_DMA_DRIVER_VERSION);
++
++#define DMA_CH0 6
++#define DMA_CH1 7
++#define CH_BLOCK_SIZE 4095
++
++static int __devinit lnw_dma1_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id);
++static void __devexit lnw_dma1_remove(struct pci_dev *pdev);
++static void enable_dma1_interrupt(struct lnw_dma_chan *lnwc);
++static void disable_dma1_interrupt(struct lnw_dma_chan *lnwc);
++
++struct lnw_device {
++ struct pci_dev *pdev;
++ void __iomem *dma_base;
++ struct lnwdma_device *dma;
++};
++
++/*CH dep code, if ch no's mapping changes only change here*/
++static int get_ch_id(int index)
++{
++ if (index == 0)
++ return DMA_CH0;
++ else if (index == 1)
++ return DMA_CH1;
++ else
++ return -1;
++}
++
++static int get_ch_index(int ch_id)
++{
++ if (ch_id == DMA_CH0)
++ return 0;
++ if (ch_id == DMA_CH1)
++ return 1;
++ else
++ return -1;
++}
++
++static int get_ch_num(int *status)
++{
++ if (*status & (1 << DMA_CH0)) {
++ *status = *status & (~(1 << DMA_CH0));
++ return DMA_CH0;
++ } else if (*status & (1 << DMA_CH1)) {
++ *status = *status & (~(1 << DMA_CH1));
++ return DMA_CH1;
++ } else
++ return -1;
++}
++
++static int get_block_ts(int len, int tx_width)
++{
++ int byte_width = 0, block_ts = 0;
++
++ switch (tx_width) {
++ case LNW_DMA_WIDTH_8BIT:
++ byte_width = 1;
++ break;
++ case LNW_DMA_WIDTH_16BIT:
++ byte_width = 2;
++ break;
++ case LNW_DMA_WIDTH_32BIT:
++ default:
++ byte_width = 4;
++ break;
++ }
++
++ block_ts = len/byte_width;
++ if (block_ts > CH_BLOCK_SIZE)
++ block_ts = 0xFFFF;
++ return block_ts;
++}
++
++static struct lnw_dma_desc *lnwc_desc_get1(struct lnw_dma_chan *lnwc)
++{
++ struct lnw_dma_desc *desc, *_desc;
++ struct lnw_dma_desc *ret = NULL;
++
++ dma_dbg("called \n");
++ spin_lock_bh(&lnwc->lock);
++ list_for_each_entry_safe(desc, _desc, &lnwc->free_list, desc_node) {
++ if (async_tx_test_ack(&desc->txd)) {
++ list_del(&desc->desc_node);
++ ret = desc;
++ dma_dbg("got free desc \n");
++ break;
++ }
++ }
++ spin_unlock_bh(&lnwc->lock);
++ return ret;
++}
++
++
++static void lnwc_desc_put1(struct lnw_dma_chan *lnwc, struct lnw_dma_desc *desc)
++{
++ if (desc) {
++ spin_lock_bh(&lnwc->lock);
++ list_add_tail(&desc->desc_node, &lnwc->free_list);
++ spin_unlock_bh(&lnwc->lock);
++ }
++}
++
++/* Called with dwc->lock held and bh disabled */
++static void lnwc_dostart1(struct lnw_dma_chan *lnwc, struct lnw_dma_desc *first)
++{
++ struct lnwdma_device *lnw = to_lnwdma_device(lnwc->chan.device);
++
++ dma_dbg("called \n");
++ /* ASSERT: channel is idle */
++ if (lnwc->in_use && test_ch_en(lnwc->dma_base, lnwc->ch_id)) {
++ /*error*/
++ dma_err("channel is busy \n");
++ /* The tasklet will hopefully advance the queue... */
++ return;
++ }
++
++ /*write registers and en*/
++ iowrite32(first->sar, lnwc->ch_regs + SAR);
++ iowrite32(first->dar, lnwc->ch_regs + DAR);
++ iowrite32(first->cfg_hi, lnwc->ch_regs + CFG_HIGH);
++ iowrite32(first->cfg_lo, lnwc->ch_regs + CFG_LOW);
++ iowrite32(first->ctl_lo, lnwc->ch_regs + CTL_LOW);
++ iowrite32(first->ctl_hi, lnwc->ch_regs + CTL_HIGH);
++ dma_dbg("TX SAR %lx, DAR %lx, CFGL %x, CFGH %x, CTLH %x, CTLL %x \n",
++ first->sar, first->dar, first->cfg_hi,
++ first->cfg_lo, first->ctl_hi, first->ctl_lo);
++
++ iowrite32(ENABLE_CHANNEL(lnwc->ch_id), lnw->dma_base + DMA_CHAN_EN);
++ first->status = DMA_IN_PROGRESS;
++}
++
++static void
++lnwc_descriptor_complete1(struct lnw_dma_chan *lnwc, struct lnw_dma_desc *desc)
++{
++ struct dma_async_tx_descriptor *txd = &desc->txd;
++ dma_async_tx_callback callback = NULL;
++ dma_async_tx_callback callback_txd = NULL;
++ void *param = NULL;
++ void *param_txd = NULL;
++ u32 sar, dar, len;
++ union lnw_dma_ctl_hi ctl_hi;
++
++ dma_dbg("called \n");
++
++ /*check if full tx is complete or not*/
++ sar = ioread32(lnwc->ch_regs + SAR);
++ dar = ioread32(lnwc->ch_regs + DAR);
++
++ if (desc->dirn == DMA_FROM_DEVICE)
++ len = dar - desc->dar;
++ else
++ len = sar - desc->sar;
++
++ dma_dbg("SAR %x DAR %x, DMA done: %x \n", sar, dar, len);
++ if (desc->len > len) {
++ dma_dbg("dirn = %d\n", desc->dirn);
++ dma_dbg("SAR %x DAR %x, len: %x \n", sar, dar, len);
++ /*we have to copy more bytes*/
++ desc->len -= len;
++ ctl_hi.ctl_hi = desc->ctl_hi;
++ ctl_hi.ctlx.block_ts = get_block_ts(desc->len, desc->width);
++ dma_dbg("setting for %x bytes \n", ctl_hi.ctlx.block_ts);
++ desc->ctl_hi = ctl_hi.ctl_hi;
++ if (desc->cfg_mode == LNW_DMA_MEM_TO_MEM) {
++ sar++;
++ dar++;
++ } else if (desc->dirn == DMA_TO_DEVICE)
++ sar++;
++ else if (desc->dirn == DMA_FROM_DEVICE)
++ dar++;
++ desc->sar = sar;
++ desc->dar = dar;
++ dma_dbg("New SAR %x DAR %x \n", sar, dar);
++ lnwc_dostart1(lnwc, desc);
++ return;
++ }
++
++ lnwc->completed = txd->cookie;
++ callback = desc->callback;
++ param = desc->callback_param;
++ callback_txd = txd->callback;
++ param_txd = txd->callback_param;
++
++ list_move(&desc->desc_node, &lnwc->free_list);
++
++ spin_unlock_bh(&lnwc->lock);
++ dma_dbg("Now we are calling callback \n");
++ if (callback_txd) {
++ dma_dbg("lnw TXD callback set ... calling \n");
++ callback_txd(param_txd);
++ spin_lock_bh(&lnwc->lock);
++ return;
++ }
++ if (callback) {
++ dma_dbg("lnw callback set ... calling \n");
++ callback(param);
++ }
++ spin_lock_bh(&lnwc->lock);
++}
++
++/*check desc, mark as complete when tx is complete*/
++static void
++lnwc_scan_descriptors1(struct lnwdma_device *lnw, struct lnw_dma_chan *lnwc)
++{
++ struct lnw_dma_desc *desc = NULL, *_desc = NULL;
++ u32 status_xfer;
++
++ dma_dbg("called \n");
++ status_xfer = ioread32(lnwc->dma_base + RAW_BLOCK);
++ status_xfer = (status_xfer >> lnwc->ch_id) & 0x1;
++ dma_dbg("ch[%d]: status_xfer %x \n", lnwc->ch_id, status_xfer);
++ if (!status_xfer)
++ return;
++
++ list_for_each_entry_safe(desc, _desc, &lnwc->active_list, desc_node) {
++ if (desc == NULL)
++ continue;
++ if (desc->status == DMA_IN_PROGRESS) {
++ desc->status = DMA_SUCCESS;
++ lnwc_descriptor_complete1(lnwc, desc);
++ }
++ }
++ return;
++}
++
++/*****************************************************************************
++DMA Functions*/
++static dma_cookie_t lnw_dma1_tx_submit(struct dma_async_tx_descriptor *tx)
++{
++ struct lnw_dma_desc *desc = to_lnw_dma_desc(tx);
++ struct lnw_dma_chan *lnwc = to_lnw_dma_chan(tx->chan);
++ dma_cookie_t cookie;
++
++ dma_dbg("called \n");
++ spin_lock_bh(&lnwc->lock);
++ cookie = lnwc->chan.cookie;
++
++ if (++cookie < 0)
++ cookie = 1;
++
++ lnwc->chan.cookie = cookie;
++ desc->txd.cookie = cookie;
++
++
++ if (list_empty(&lnwc->active_list)) {
++ lnwc_dostart1(lnwc, desc);
++ list_add_tail(&desc->desc_node, &lnwc->active_list);
++ } else {
++ list_add_tail(&desc->desc_node, &lnwc->queue);
++ }
++ spin_unlock_bh(&lnwc->lock);
++
++ return cookie;
++}
++
++static void lnw_dma1_issue_pending(struct dma_chan *chan)
++{
++ struct lnw_dma_chan *lnwc = to_lnw_dma_chan(chan);
++
++ spin_lock_bh(&lnwc->lock);
++ if (!list_empty(&lnwc->queue))
++ lnwc_scan_descriptors1(to_lnwdma_device(chan->device), lnwc);
++ spin_unlock_bh(&lnwc->lock);
++}
++
++static enum dma_status
++lnw_dma1_tx_is_complete(struct dma_chan *chan,
++ dma_cookie_t cookie,
++ dma_cookie_t *done,
++ dma_cookie_t *used)
++{
++ struct lnw_dma_chan *lnwc = to_lnw_dma_chan(chan);
++ dma_cookie_t last_used;
++ dma_cookie_t last_complete;
++ int ret;
++
++ last_complete = lnwc->completed;
++ last_used = chan->cookie;
++
++ ret = dma_async_is_complete(cookie, last_complete, last_used);
++ if (ret != DMA_SUCCESS) {
++ lnwc_scan_descriptors1(to_lnwdma_device(chan->device), lnwc);
++
++ last_complete = lnwc->completed;
++ last_used = chan->cookie;
++
++ ret = dma_async_is_complete(cookie, last_complete, last_used);
++ }
++
++ if (done)
++ *done = last_complete;
++ if (used)
++ *used = last_used;
++
++ return ret;
++}
++
++static void lnw_dma1_terminate_all(struct dma_chan *chan)
++{
++ struct lnw_dma_chan *lnwc = to_lnw_dma_chan(chan);
++ struct lnwdma_device *lnw = to_lnwdma_device(chan->device);
++ struct lnw_dma_desc *desc, *_desc;
++ LIST_HEAD(list);
++
++ /* ASSERT: channel is idle */
++ if (lnwc->in_use == false) {
++ /*ch is not in use, wrong call*/
++ return;
++ }
++ spin_lock_bh(&lnwc->lock);
++ list_splice_init(&lnwc->free_list, &list);
++ lnwc->descs_allocated = 0;
++ lnwc->slave = NULL;
++
++ /* Disable interrupts */
++ disable_dma1_interrupt(lnwc);
++
++ spin_unlock_bh(&lnwc->lock);
++ list_for_each_entry_safe(desc, _desc, &list, desc_node) {
++ dma_dbg("freeing descriptor %p\n", desc);
++ pci_pool_free(lnw->dma_pool, desc, desc->txd.phys);
++ }
++ return;
++}
++
++static struct dma_async_tx_descriptor *
++lnw_dma1_prep_slave_sg(struct dma_chan *chan,
++ struct scatterlist *sgl, unsigned int sg_len,
++ enum dma_data_direction direction,
++ unsigned long flags)
++{
++ /*not supported now*/
++ return NULL;
++}
++
++static struct dma_async_tx_descriptor *
++lnw_dma1_prep_memcpy(struct dma_chan *chan, dma_addr_t dest,
++ dma_addr_t src, size_t len, unsigned long flags)
++{
++ struct lnw_dma_chan *lnwc;
++ struct lnw_dma_desc *desc = NULL;
++ struct lnw_dma_slave *lnws;
++ union lnw_dma_ctl_lo ctl_lo;
++ union lnw_dma_ctl_hi ctl_hi;
++ union lnw_dma_cfg_lo cfg_lo;
++ union lnw_dma_cfg_hi cfg_hi;
++ enum lnw_dma_width width = 0;
++
++ dma_dbg("called \n");
++ WARN_ON(!chan);
++ if (!len)
++ return NULL;
++
++ lnws = chan->private;
++ WARN_ON(!lnws);
++
++ lnwc = to_lnw_dma_chan(chan);
++ WARN_ON(!lnwc);
++
++ dma_dbg("called for CH %d\n", lnwc->ch_id);
++ dma_dbg("Cfg passed Mode %x, Dirn %x, HS %x, Width %x \n",
++ lnws->cfg_mode, lnws->dirn, lnws->hs_mode, lnws->src_width);
++
++ /*calculate CFG_LO*/
++ if (lnws->hs_mode == LNW_DMA_SW_HS) {
++ cfg_lo.cfg_lo = 0;
++ cfg_lo.cfgx.hs_sel_dst = 1;
++ cfg_lo.cfgx.hs_sel_src = 1;
++ } else if (lnws->hs_mode == LNW_DMA_HW_HS)
++ cfg_lo.cfg_lo = 0x00000;
++
++ /*calculate CFG_HI*/
++ if (lnws->cfg_mode == LNW_DMA_MEM_TO_MEM) {
++ /*SW HS only*/
++ dma_dbg("CFG: Mem to mem dma \n");
++ cfg_hi.cfg_hi = 0;
++ } else {
++ dma_dbg("HW DMA \n");
++ cfg_hi.cfg_hi = 0;
++ cfg_hi.cfgx.protctl = 0x0; /*default value*/
++ cfg_hi.cfgx.fifo_mode = 1;
++ if (lnws->dirn == DMA_TO_DEVICE) {
++ cfg_hi.cfgx.src_per = 0;
++ cfg_hi.cfgx.dst_per = 3;
++ } else if (lnws->dirn == DMA_FROM_DEVICE) {
++ cfg_hi.cfgx.src_per = 2;
++ cfg_hi.cfgx.dst_per = 0;
++ }
++ }
++
++ /*calculate CTL_HI*/
++ ctl_hi.ctlx.reser = 0;
++ width = lnws->src_width;
++
++ ctl_hi.ctlx.block_ts = get_block_ts(len, width);
++
++ /*calculate CTL_LO*/
++ ctl_lo.ctl_lo = 0;
++ ctl_lo.ctlx.int_en = 1;
++ ctl_lo.ctlx.dst_tr_width = lnws->dst_width;
++ ctl_lo.ctlx.src_tr_width = lnws->src_width;
++ ctl_lo.ctlx.dst_msize = lnws->src_msize;
++ ctl_lo.ctlx.src_msize = lnws->dst_msize;
++
++ if (lnws->cfg_mode == LNW_DMA_MEM_TO_MEM) {
++ dma_dbg("CTL: Mem to mem dma \n");
++ ctl_lo.ctlx.tt_fc = 0;
++ ctl_lo.ctlx.sinc = 0;
++ ctl_lo.ctlx.dinc = 0;
++ } else {
++ if (lnws->dirn == DMA_TO_DEVICE) {
++ dma_dbg("CTL: DMA_TO_DEVICE \n");
++ ctl_lo.ctlx.sinc = 0;
++ ctl_lo.ctlx.dinc = 2;
++ ctl_lo.ctlx.tt_fc = 1;
++ } else if (lnws->dirn == DMA_FROM_DEVICE) {
++ dma_dbg("CTL: DMA_FROM_DEVICE \n");
++ ctl_lo.ctlx.sinc = 2;
++ ctl_lo.ctlx.dinc = 0;
++ ctl_lo.ctlx.tt_fc = 2;
++ }
++ }
++
++ dma_dbg("Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
++ ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
++
++ enable_dma1_interrupt(lnwc);
++
++ desc = lnwc_desc_get1(lnwc);
++ if (desc == NULL)
++ goto err_desc_get;
++ desc->sar = src;
++ desc->dar = dest ;
++ desc->len = len;
++ desc->cfg_hi = cfg_hi.cfg_hi;
++ desc->cfg_lo = cfg_lo.cfg_lo;
++ desc->ctl_lo = ctl_lo.ctl_lo;
++ desc->ctl_hi = ctl_hi.ctl_hi;
++ desc->width = width;
++ desc->dirn = lnws->dirn;
++ if (lnws->callback) {
++ desc->callback = lnws->callback;
++ desc->callback_param = lnws->callback_param;
++ dma_dbg("Callback passed... setting\n");
++ } else
++ desc->callback = NULL;
++ return &desc->txd;
++
++err_desc_get:
++ dma_err("Failed to get desc \n");
++ lnwc_desc_put1(lnwc, desc);
++ return NULL;
++}
++
++static void lnw_dma1_free_chan_resources(struct dma_chan *chan)
++{
++ struct lnw_dma_chan *lnwc = to_lnw_dma_chan(chan);
++ struct lnwdma_device *lnw = to_lnwdma_device(chan->device);
++ struct lnw_dma_desc *desc, *_desc;
++
++ dma_dbg("..called for ch_id %d, lnwch_id %d\n",
++ chan->chan_id, lnwc->ch_id);
++ if (true == lnwc->in_use) {
++ /*trying to free ch in use!!!!!*/
++ dma_err("trying to free ch in use \n");
++ }
++
++ spin_lock_bh(&lnwc->lock);
++ lnwc->descs_allocated = 0;
++ list_for_each_entry_safe(desc, _desc, &lnwc->active_list, desc_node) {
++ dma_dbg("del active \n");
++ list_del(&desc->desc_node);
++ pci_pool_free(lnw->dma_pool, desc, desc->txd.phys);
++ }
++ list_for_each_entry_safe(desc, _desc, &lnwc->free_list, desc_node) {
++ list_del(&desc->desc_node);
++ pci_pool_free(lnw->dma_pool, desc, desc->txd.phys);
++ }
++ list_for_each_entry_safe(desc, _desc, &lnwc->queue, desc_node) {
++ dma_dbg("del queue \n");
++ list_del(&desc->desc_node);
++ pci_pool_free(lnw->dma_pool, desc, desc->txd.phys);
++ }
++ spin_unlock_bh(&lnwc->lock);
++ lnwc->in_use = false;
++ chan->client_count--;
++ /* Disable CH interrupts */
++ iowrite32(MASK_INTR_REG(lnwc->ch_id), lnw->dma_base + MASK_BLOCK);
++ iowrite32(MASK_INTR_REG(lnwc->ch_id), lnw->dma_base + MASK_ERR);
++ dma_dbg("done \n");
++}
++
++static int lnw_dma1_alloc_chan_resources(struct dma_chan *chan)
++{
++ struct lnw_dma_chan *lnwc = to_lnw_dma_chan(chan);
++ struct lnwdma_device *lnw = to_lnwdma_device(chan->device);
++ struct lnw_dma_desc *desc;
++ dma_addr_t phys;
++ int i = 0;
++
++ dma_dbg("called \n");
++
++ /* ASSERT: channel is idle */
++ if (test_ch_en(lnw->dma_base, lnwc->ch_id)) {
++ /*ch is not idle*/
++ dma_err(".ch not idle\n");
++ return -EIO;
++ }
++ dma_dbg("..called for ch_id %d, lnwch_id %d\n",
++ chan->chan_id, lnwc->ch_id);
++ lnwc->completed = chan->cookie = 1;
++
++ chan->client_count++;
++
++ spin_lock_bh(&lnwc->lock);
++ while (lnwc->descs_allocated < DESCS_PER_CHANNEL) {
++ spin_unlock_bh(&lnwc->lock);
++ desc = pci_pool_alloc(lnw->dma_pool, GFP_KERNEL, &phys);
++ if (!desc) {
++ dma_err("desc failed\n");
++ return -ENOMEM;
++ /*check*/
++ }
++ dma_async_tx_descriptor_init(&desc->txd, chan);
++ desc->txd.tx_submit = lnw_dma1_tx_submit;
++ desc->txd.flags = DMA_CTRL_ACK;
++ desc->txd.phys = phys;
++ spin_lock_bh(&lnwc->lock);
++ i = ++lnwc->descs_allocated;
++ list_add_tail(&desc->desc_node, &lnwc->free_list);
++ }
++ spin_unlock_bh(&lnwc->lock);
++ lnwc->in_use = false;
++ dma_dbg("Desc alloc done ret: %d desc\n", i);
++ return i;
++}
++
++static void lnwc_handle_error1(struct lnwdma_device *lnw,
++ struct lnw_dma_chan *lnwc)
++{
++ lnwc_scan_descriptors1(lnw, lnwc);
++}
++
++/******************************************************************************
++* PCI stuff
++*/
++static struct pci_device_id lnw_dma1_ids[] = {
++ { PCI_VENDOR_ID_INTEL, 0x0814, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
++ { 0, }
++};
++
++MODULE_DEVICE_TABLE(pci, lnw_dma1_ids);
++
++static struct pci_driver lnw_dma1_pci = {
++ .name = "Intel LNW DMA1",
++ .id_table = lnw_dma1_ids,
++ .probe = lnw_dma1_probe,
++ .remove = __devexit_p(lnw_dma1_remove),
++};
++
++static void dma_tasklet1(unsigned long data)
++{
++ struct lnwdma_device *lnw = NULL;
++ struct lnw_dma_chan *lnwc = NULL;
++ u32 status;
++ int i, ch_no;
++
++ dma_dbg("called \n");
++ lnw = (struct lnwdma_device *)data;
++ if (lnw == NULL) {
++ dma_err("Null param \n");
++ return;
++ }
++ status = ioread32(lnw->dma_base + RAW_BLOCK);
++ dma_dbg("RAW_TFR %x \n", status);
++ status &= 0xC0;
++ while (status) {
++ /*txn interrupt*/
++ ch_no = get_ch_num(&status);
++ if (ch_no < 0) {
++ dma_err("Ch no is invalid %x, abort!\n", ch_no);
++ return;
++ }
++ dma_dbg("Got Ch %x, new Status %x \n", ch_no, status);
++ i = get_ch_index(ch_no);
++ if (i < 0) {
++ dma_err("Invalid ch index %x\n", i);
++ return;
++ }
++ dma_dbg("Tx complete interrupt %x, Ch No %d Index %d \n",
++ status, ch_no, i);
++ lnwc = &lnw->ch[i];
++ if (lnwc == NULL) {
++ dma_err("Null param lnwc\n");
++ return;
++ }
++ dma_dbg("CH %x \n", lnwc->ch_id);
++ spin_lock_bh(&lnwc->lock);
++ lnwc_scan_descriptors1(lnw, lnwc);
++ dma_dbg("Scan of desc... complete, unmasking\n");
++ iowrite32((1 << lnwc->ch_id),
++ lnw->dma_base + CLEAR_TFR);
++ dma_dbg("Wrote to clear %x\n", (1 << lnwc->ch_id));
++ iowrite32((1 << lnwc->ch_id),
++ lnw->dma_base + CLEAR_BLOCK);
++ iowrite32(UNMASK_INTR_REG(lnwc->ch_id),
++ lnw->dma_base + MASK_TFR);
++ spin_unlock_bh(&lnwc->lock);
++ }
++
++ dma_dbg("Trf interrupt done... \n");
++ status = ioread32(lnw->dma_base + RAW_ERR);
++ status &= 0xC0;
++ while (status) {
++ /*err interrupt*/
++ ch_no = get_ch_num(&status);
++ if (ch_no < 0) {
++ dma_err("Ch no is invalid %x, abort!\n", ch_no);
++ return;
++ }
++ dma_dbg("Got Ch %x, new Status %x \n", ch_no, status);
++ i = get_ch_index(ch_no);
++ if (i < 0) {
++ dma_err("Invalid CH lnwc\n");
++ return;
++ }
++ dma_dbg("Tx error interrupt %x, No %d Index %d \n",
++ status, ch_no, i);
++ lnwc = &lnw->ch[i];
++ if (lnwc == NULL) {
++ dma_err("Null param lnwc\n");
++ return;
++ }
++ spin_lock_bh(&lnwc->lock);
++ lnwc_handle_error1(lnw, lnwc);
++ iowrite32((1 << lnwc->ch_id),
++ lnw->dma_base + CLEAR_ERR);
++ iowrite32(UNMASK_INTR_REG(lnwc->ch_id),
++ lnw->dma_base + MASK_ERR);
++ spin_unlock_bh(&lnwc->lock);
++ }
++ dma_dbg("Exiting takslet... \n");
++ return;
++}
++
++static irqreturn_t lnw_dma1_interrupt(int irq, void *data)
++{
++ struct lnw_device *lnw = data;
++ u32 status;
++ int call_tasklet = 0;
++
++ /*check interrupt src*/
++ lpe_periphral_intr_status(LPE_DMA, &status);
++ if (!status) {
++ /*not our interrupt*/
++ return IRQ_NONE;
++ }
++
++ /*DMA Interrupt*/
++ status = ioread32(lnw->dma_base + RAW_TFR);
++ status &= 0xC0;
++ if (status) {
++ iowrite32((status << 8), lnw->dma_base + MASK_TFR);
++ call_tasklet = 1;
++ }
++ status = ioread32(lnw->dma_base + RAW_ERR);
++ status &= 0xC0;
++ if (status) {
++ iowrite32(MASK_INTR_REG(status), lnw->dma_base + MASK_ERR);
++ call_tasklet = 1;
++ }
++
++ if (call_tasklet)
++ tasklet_schedule(&lnw->dma->tasklet);
++
++ return IRQ_HANDLED;
++}
++
++static void enable_dma1_interrupt(struct lnw_dma_chan *lnwc)
++{
++ dma_dbg("Called for ch_id %d\n", lnwc->ch_id);
++
++ lpe_unmask_periphral_intr(LPE_DMA);
++
++ /*en ch interrupts*/
++ iowrite32(UNMASK_INTR_REG(lnwc->ch_id), lnwc->dma_base + MASK_TFR);
++ iowrite32(UNMASK_INTR_REG(lnwc->ch_id), lnwc->dma_base + MASK_ERR);
++ return;
++}
++
++static void disable_dma1_interrupt(struct lnw_dma_chan *lnwc)
++{
++ /*Check LPE PISR, make sure fwd is disabled*/
++ lpe_mask_periphral_intr(LPE_DMA);
++ iowrite32(MASK_INTR_REG(lnwc->ch_id), lnwc->dma_base + MASK_BLOCK);
++ iowrite32(MASK_INTR_REG(lnwc->ch_id), lnwc->dma_base + MASK_TFR);
++ iowrite32(MASK_INTR_REG(lnwc->ch_id), lnwc->dma_base + MASK_ERR);
++ dma_dbg(" called \n");
++ return;
++}
++
++static int lnw_setup_dma1(struct pci_dev *pdev)
++{
++ struct lnw_device *device = pci_get_drvdata(pdev);
++ struct lnwdma_device *dma = NULL;
++ int err, i;
++
++ dma_dbg("setup_dma called \n");
++ dma = kzalloc(sizeof(*dma), GFP_KERNEL);
++ if (NULL == dma) {
++ dma_err("kzalloc failed \n");
++ err = -ENOMEM;
++ goto err_kzalloc;
++ }
++ device->dma = dma;
++ dma->pdev = pdev;
++ dma->dma_base = device->dma_base;
++
++ /* DMA coherent memory pool for DMA descriptor allocations */
++ dma->dma_pool = pci_pool_create("dma_desc_pool", pdev,
++ sizeof(struct lnw_dma_desc),
++ 32, 0);
++ if (NULL == dma->dma_pool) {
++ dma_err("pci_pool_create failed \n");
++ err = -ENOMEM;
++ kfree(dma);
++ goto err_dma_pool;
++ }
++
++ INIT_LIST_HEAD(&dma->common.channels);
++
++
++ /*init CH structures*/
++ for (i = 0; i < MAX_CHAN; i++) {
++ struct lnw_dma_chan *lnwch = &dma->ch[i];
++
++ lnwch->chan.device = &dma->common;
++ lnwch->chan.cookie = 1;
++ lnwch->chan.chan_id = i;
++ lnwch->ch_id = get_ch_id(i);
++ dma_dbg("Init CH %d, ID %d \n", i, lnwch->ch_id);
++
++ lnwch->dma_base = dma->dma_base;
++ lnwch->ch_regs = dma->dma_base + DMA_CH_SIZE * lnwch->ch_id;
++ lnwch->dma = dma;
++ spin_lock_init(&lnwch->lock);
++
++ INIT_LIST_HEAD(&lnwch->active_list);
++ INIT_LIST_HEAD(&lnwch->queue);
++ INIT_LIST_HEAD(&lnwch->free_list);
++ /*mask interrupts*/
++ iowrite32(MASK_INTR_REG(lnwch->ch_id),
++ dma->dma_base + MASK_BLOCK);
++ iowrite32(MASK_INTR_REG(lnwch->ch_id),
++ dma->dma_base + MASK_SRC_TRAN);
++ iowrite32(MASK_INTR_REG(lnwch->ch_id),
++ dma->dma_base + MASK_DST_TRAN);
++ iowrite32(MASK_INTR_REG(lnwch->ch_id),
++ dma->dma_base + MASK_ERR);
++ iowrite32(MASK_INTR_REG(lnwch->ch_id),
++ dma->dma_base + MASK_TFR);
++
++ disable_dma1_interrupt(lnwch);
++ list_add_tail(&lnwch->chan.device_node, &dma->common.channels);
++ }
++
++ /*init dma structure*/
++ dma_cap_zero(dma->common.cap_mask);
++ dma_cap_set(DMA_MEMCPY, dma->common.cap_mask);
++ dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
++ dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
++ dma->common.dev = &pdev->dev;
++ dma->common.chancnt = MAX_CHAN;
++
++ dma->common.device_alloc_chan_resources =
++ lnw_dma1_alloc_chan_resources;
++ dma->common.device_free_chan_resources =
++ lnw_dma1_free_chan_resources;
++
++ dma->common.device_is_tx_complete = lnw_dma1_tx_is_complete;
++ dma->common.device_prep_dma_memcpy = lnw_dma1_prep_memcpy;
++ dma->common.device_issue_pending = lnw_dma1_issue_pending;
++ dma->common.device_prep_slave_sg = lnw_dma1_prep_slave_sg;
++ dma->common.device_terminate_all = lnw_dma1_terminate_all;
++
++ /*enable dma cntrl*/
++ iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
++
++ /*register irq*/
++ err = request_irq(pdev->irq, lnw_dma1_interrupt,
++ IRQF_SHARED, lnw_dma1_pci.name, device);
++ if (0 != err)
++ goto err_irq;
++
++ /*register device w/ engine*/
++ err = dma_async_device_register(&dma->common);
++ if (0 != err) {
++ dma_err("device_register failed: %d \n", err);
++ goto err_engine;
++ }
++ tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma);
++ dma_dbg("...done \n");
++ return 0;
++
++err_engine:
++ free_irq(pdev->irq, device);
++err_irq:
++ pci_pool_destroy(dma->dma_pool);
++ kfree(dma);
++err_dma_pool:
++err_kzalloc:
++ dma_err("setup_dma failed: %d \n", err);
++ return err;
++
++}
++
++static void lnwdma_shutdown1(struct pci_dev *pdev)
++{
++ struct lnw_device *device = pci_get_drvdata(pdev);
++
++ dma_dbg("shutdown called \n");
++ dma_async_device_unregister(&device->dma->common);
++ pci_pool_destroy(device->dma->dma_pool);
++ if (device->dma_base)
++ iounmap(device->dma_base);
++ free_irq(pdev->irq, device);
++ return;
++}
++
++static int __devinit
++lnw_dma1_probe(struct pci_dev *pdev, const struct pci_device_id *id)
++{
++ struct lnw_device *device = NULL;
++ u32 base_addr = 0, bar_size = 0;
++ int err = 0;
++
++ dma_info("probe called for %x \n", pdev->device);
++ err = pci_enable_device(pdev);
++ if (err)
++ goto err_enable_device;
++
++ err = pci_request_regions(pdev, lnw_dma1_pci.name);
++ if (err)
++ goto err_request_regions;
++
++ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
++ if (err)
++ goto err_set_dma_mask;
++
++ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
++ if (err)
++ goto err_set_dma_mask;
++
++ device = kzalloc(sizeof(*device), GFP_KERNEL);
++ if (!device) {
++ dma_err("kzalloc failed \n");
++ err = -ENOMEM;
++ goto err_kzalloc;
++ }
++ device->pdev = pci_dev_get(pdev);
++
++ base_addr = pci_resource_start(pdev, 0);
++ bar_size = pci_resource_len(pdev, 0);
++ dma_dbg("BAR0 %x Size %x \n", base_addr, bar_size);
++ device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE);
++ if (!device->dma_base) {
++ dma_err("ioremap failed \n");
++ err = -ENOMEM;
++ goto err_ioremap1;
++ }
++ pci_set_drvdata(pdev, device);
++ pci_set_master(pdev);
++
++ err = lnw_setup_dma1(pdev);
++ if (err)
++ goto err_dma;
++
++ return 0;
++
++err_dma:
++ iounmap(device->dma_base);
++err_ioremap1:
++ pci_dev_put(pdev);
++ kfree(device);
++err_kzalloc:
++err_set_dma_mask:
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++err_request_regions:
++err_enable_device:
++ dma_err("Probe failed %d\n", err);
++ return err;
++}
++
++static void __devexit lnw_dma1_remove(struct pci_dev *pdev)
++{
++ struct lnw_device *device = pci_get_drvdata(pdev);
++
++ lnwdma_shutdown1(pdev);
++ pci_dev_put(pdev);
++ kfree(device);
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++}
++
++static int __init lnw_dma1_init(void)
++{
++ dma_info("LNW DMA Driver\n Version %s \n", LNW_DMA_DRIVER_VERSION);
++ return pci_register_driver(&lnw_dma1_pci);
++}
++late_initcall(lnw_dma1_init);
++
++static void __exit lnw_dma1_exit(void)
++{
++ pci_unregister_driver(&lnw_dma1_pci);
++}
++module_exit(lnw_dma1_exit);
++
+Index: linux-2.6.33/drivers/dma/lnw_dmac2.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/dma/lnw_dmac2.c
+@@ -0,0 +1,947 @@
++/*
++ * lnw_dmac2.c - Intel Langwell DMA Drivers
++ *
++ * Copyright (C) 2008-09 Intel Corp
++ * Author: Vinod Koul <vinod.koul@intel.com>
++ * The driver design is based on dw_dmac driver
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ *
++ */
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/interrupt.h>
++#include <linux/lnw_dma.h>
++
++#define MAX_CHAN 2
++#include "lnw_dma_regs.h"
++
++MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
++MODULE_DESCRIPTION("Intel (R) Moorestown Langwell DMAC2 Driver");
++MODULE_LICENSE("GPL v2");
++MODULE_VERSION(LNW_DMA_DRIVER_VERSION);
++
++#define DMA_CH0 0
++#define DMA_CH1 1
++#define CH_BLOCK_SIZE 2047
++
++static int __devinit lnw_dma2_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id);
++static void __devexit lnw_dma2_remove(struct pci_dev *pdev);
++static void enable_dma2_interrupt(struct lnw_dma_chan *lnwc);
++
++struct lnw_device {
++ struct pci_dev *pdev;
++ void __iomem *dma_base;
++ struct lnwdma_device *dma;
++};
++
++/*CH dep code, if ch no's mapping changes only change here*/
++static int get_ch_id(int index)
++{
++ if (index == 0)
++ return DMA_CH0;
++ else if (index == 1)
++ return DMA_CH1;
++ else
++ return -1;
++}
++
++static int get_ch_index(int ch_id)
++{
++ if (ch_id == DMA_CH0)
++ return 0;
++ if (ch_id == DMA_CH1)
++ return 1;
++ else
++ return -1;
++}
++
++static int get_ch_num(int *status)
++{
++ if (*status & (1 << DMA_CH0)) {
++ *status = *status & (~(1 << DMA_CH0));
++ return DMA_CH0;
++ } else if (*status & (1 << DMA_CH1)) {
++ *status = *status & (~(1 << DMA_CH1));
++ return DMA_CH1;
++ } else
++ return -1;
++}
++
++static int get_block_ts(int len, int tx_width)
++{
++ int byte_width = 0, block_ts = 0;
++
++ switch (tx_width) {
++ case LNW_DMA_WIDTH_8BIT:
++ byte_width = 1;
++ break;
++ case LNW_DMA_WIDTH_16BIT:
++ byte_width = 2;
++ break;
++ case LNW_DMA_WIDTH_32BIT:
++ default:
++ byte_width = 4;
++ break;
++ }
++
++ block_ts = len/byte_width;
++ if (block_ts > CH_BLOCK_SIZE)
++ block_ts = 0xFFFF;
++ return block_ts;
++}
++
++static struct lnw_dma_desc *lnwc_desc_get(struct lnw_dma_chan *lnwc)
++{
++ struct lnw_dma_desc *desc, *_desc;
++ struct lnw_dma_desc *ret = NULL;
++
++ dma_dbg("called \n");
++ spin_lock_bh(&lnwc->lock);
++ list_for_each_entry_safe(desc, _desc, &lnwc->free_list, desc_node) {
++ if (async_tx_test_ack(&desc->txd)) {
++ list_del(&desc->desc_node);
++ ret = desc;
++ dma_dbg("got free desc \n");
++ break;
++ }
++ }
++ spin_unlock_bh(&lnwc->lock);
++ return ret;
++}
++
++static void lnwc_desc_put(struct lnw_dma_chan *lnwc, struct lnw_dma_desc *desc)
++{
++ if (desc) {
++ spin_lock_bh(&lnwc->lock);
++ list_add_tail(&desc->desc_node, &lnwc->free_list);
++ spin_unlock_bh(&lnwc->lock);
++ }
++}
++
++/* Called with lock held and bh disabled */
++static void lnwc_dostart(struct lnw_dma_chan *lnwc, struct lnw_dma_desc *first)
++{
++ struct lnwdma_device *lnw = to_lnwdma_device(lnwc->chan.device);
++
++ dma_dbg("called \n");
++ /* channel is idle */
++ if (lnwc->in_use && test_ch_en(lnwc->dma_base, lnwc->ch_id)) {
++ /*error*/
++ dma_err("channel is busy \n");
++ /* The tasklet will hopefully advance the queue... */
++ return;
++ }
++
++ /*write registers and en*/
++ iowrite32(first->sar, lnwc->ch_regs + SAR);
++ iowrite32(first->dar, lnwc->ch_regs + DAR);
++ iowrite32(first->cfg_hi, lnwc->ch_regs + CFG_HIGH);
++ iowrite32(first->cfg_lo, lnwc->ch_regs + CFG_LOW);
++ iowrite32(first->ctl_lo, lnwc->ch_regs + CTL_LOW);
++ iowrite32(first->ctl_hi, lnwc->ch_regs + CTL_HIGH);
++ dma_dbg("TX SAR %lx, DAR %lx, CFGL %x, CFGH %x, CTLH %x, CTLL %x \n",
++ first->sar, first->dar, first->cfg_hi,
++ first->cfg_lo, first->ctl_hi, first->ctl_lo);
++
++ iowrite32(ENABLE_CHANNEL(lnwc->ch_id), lnw->dma_base + DMA_CHAN_EN);
++ first->status = DMA_IN_PROGRESS;
++}
++
++static void
++lnwc_descriptor_complete(struct lnw_dma_chan *lnwc, struct lnw_dma_desc *desc)
++{
++ struct dma_async_tx_descriptor *txd = &desc->txd;
++ dma_async_tx_callback callback = NULL;
++ dma_async_tx_callback callback_txd = NULL;
++ void *param = NULL;
++ void *param_txd = NULL;
++ u32 sar, dar, len;
++ union lnw_dma_ctl_hi ctl_hi;
++
++ dma_dbg("called \n");
++
++ /*check if full tx is complete or not*/
++ sar = ioread32(lnwc->ch_regs + SAR);
++ dar = ioread32(lnwc->ch_regs + DAR);
++
++ if (desc->dirn == DMA_FROM_DEVICE)
++ len = dar - desc->dar;
++ else
++ len = sar - desc->sar;
++
++ dma_dbg("SAR %x DAR %x, DMA done: %x \n", sar, dar, len);
++ if (desc->len > len) {
++ dma_dbg("dirn = %d\n", desc->dirn);
++ dma_dbg("SAR %x DAR %x, len: %x \n", sar, dar, len);
++ /*we have to copy more bytes*/
++ desc->len -= len;
++ ctl_hi.ctl_hi = desc->ctl_hi;
++ ctl_hi.ctlx.block_ts = get_block_ts(desc->len, desc->width);
++ dma_dbg("setting for %x bytes \n", ctl_hi.ctlx.block_ts);
++ desc->ctl_hi = ctl_hi.ctl_hi;
++ if (desc->cfg_mode == LNW_DMA_MEM_TO_MEM) {
++ sar++;
++ dar++;
++ } else if (desc->dirn == DMA_TO_DEVICE)
++ sar++;
++ else if (desc->dirn == DMA_FROM_DEVICE)
++ dar++;
++ desc->sar = sar;
++ desc->dar = dar;
++ dma_dbg("New SAR %x DAR %x \n", sar, dar);
++ lnwc_dostart(lnwc, desc);
++ return;
++ }
++
++ lnwc->completed = txd->cookie;
++ callback = desc->callback;
++ param = desc->callback_param;
++ callback_txd = txd->callback;
++ param_txd = txd->callback_param;
++
++ list_move(&desc->desc_node, &lnwc->free_list);
++
++ spin_unlock_bh(&lnwc->lock);
++ dma_dbg("Now we are calling callback \n");
++ if (callback_txd) {
++ dma_dbg("lnw TXD callback set ... calling \n");
++ callback_txd(param_txd);
++ spin_lock_bh(&lnwc->lock);
++ return;
++ }
++ if (callback) {
++ dma_dbg("lnw callback set ... calling \n");
++ callback(param);
++ }
++ spin_lock_bh(&lnwc->lock);
++
++}
++
++/*check desc, mark as complete when tx is complete*/
++static void
++lnwc_scan_descriptors(struct lnwdma_device *lnw, struct lnw_dma_chan *lnwc)
++{
++ struct lnw_dma_desc *desc = NULL, *_desc = NULL;
++ u32 status_xfer;
++
++ dma_dbg("called \n");
++ status_xfer = ioread32(lnwc->dma_base + RAW_TFR);
++ status_xfer = (status_xfer >> lnwc->ch_id) & 0x1;
++ dma_dbg("ch[%d]: status_xfer %x \n", lnwc->ch_id, status_xfer);
++ if (!status_xfer)
++ return;
++
++ /*tx is complete*/
++ list_for_each_entry_safe(desc, _desc, &lnwc->active_list, desc_node) {
++ if (desc == NULL)
++ continue;
++ if (desc->status == DMA_IN_PROGRESS) {
++ desc->status = DMA_SUCCESS;
++ lnwc_descriptor_complete(lnwc, desc);
++ }
++ }
++ return;
++}
++
++/*****************************************************************************
++DMA Functions*/
++static dma_cookie_t lnw_dma2_tx_submit(struct dma_async_tx_descriptor *tx)
++{
++ struct lnw_dma_desc *desc = to_lnw_dma_desc(tx);
++ struct lnw_dma_chan *lnwc = to_lnw_dma_chan(tx->chan);
++ dma_cookie_t cookie;
++
++ dma_dbg("called \n");
++
++ spin_lock_bh(&lnwc->lock);
++ cookie = lnwc->chan.cookie;
++
++ if (++cookie < 0)
++ cookie = 1;
++
++ lnwc->chan.cookie = cookie;
++ desc->txd.cookie = cookie;
++
++ if (list_empty(&lnwc->active_list)) {
++ lnwc_dostart(lnwc, desc);
++ list_add_tail(&desc->desc_node, &lnwc->active_list);
++ } else {
++ list_add_tail(&desc->desc_node, &lnwc->queue);
++ }
++ spin_unlock_bh(&lnwc->lock);
++
++ return cookie;
++}
++
++static void lnw_dma2_issue_pending(struct dma_chan *chan)
++{
++ struct lnw_dma_chan *lnwc = to_lnw_dma_chan(chan);
++
++ spin_lock_bh(&lnwc->lock);
++ if (!list_empty(&lnwc->queue))
++ lnwc_scan_descriptors(to_lnwdma_device(chan->device), lnwc);
++ spin_unlock_bh(&lnwc->lock);
++}
++
++static enum dma_status
++lnw_dma2_tx_is_complete(struct dma_chan *chan,
++ dma_cookie_t cookie,
++ dma_cookie_t *done,
++ dma_cookie_t *used)
++{
++ struct lnw_dma_chan *lnwc = to_lnw_dma_chan(chan);
++ dma_cookie_t last_used;
++ dma_cookie_t last_complete;
++ int ret;
++
++ last_complete = lnwc->completed;
++ last_used = chan->cookie;
++
++ ret = dma_async_is_complete(cookie, last_complete, last_used);
++ if (ret != DMA_SUCCESS) {
++ lnwc_scan_descriptors(to_lnwdma_device(chan->device), lnwc);
++
++ last_complete = lnwc->completed;
++ last_used = chan->cookie;
++
++ ret = dma_async_is_complete(cookie, last_complete, last_used);
++ }
++
++ if (done)
++ *done = last_complete;
++ if (used)
++ *used = last_used;
++
++ return ret;
++}
++
++static void lnw_dma2_terminate_all(struct dma_chan *chan)
++{
++ struct lnw_dma_chan *lnwc = to_lnw_dma_chan(chan);
++ struct lnwdma_device *lnw = to_lnwdma_device(chan->device);
++ struct lnw_dma_desc *desc, *_desc;
++ LIST_HEAD(list);
++
++ /* ASSERT: channel is idle */
++ if (lnwc->in_use == false) {
++ /*ch is not in use, wrong call*/
++ return;
++ }
++ spin_lock_bh(&lnwc->lock);
++ list_splice_init(&lnwc->free_list, &list);
++ lnwc->descs_allocated = 0;
++ lnwc->slave = NULL;
++
++ /* Disable interrupts*/
++ iowrite32(MASK_INTR_REG(lnwc->ch_id), lnw->dma_base + MASK_BLOCK);
++ iowrite32(MASK_INTR_REG(lnwc->ch_id), lnw->dma_base + MASK_ERR);
++
++ spin_unlock_bh(&lnwc->lock);
++ list_for_each_entry_safe(desc, _desc, &list, desc_node) {
++ dma_dbg("freeing descriptor %p\n", desc);
++ pci_pool_free(lnw->dma_pool, desc, desc->txd.phys);
++ }
++
++ return;
++}
++
++static struct dma_async_tx_descriptor *
++lnw_dma2_prep_slave_sg(struct dma_chan *chan,
++ struct scatterlist *sgl, unsigned int sg_len,
++ enum dma_data_direction direction,
++ unsigned long flags)
++{
++ /*not supported now*/
++ return NULL;
++}
++
++static struct dma_async_tx_descriptor *
++lnw_dma2_prep_memcpy(struct dma_chan *chan, dma_addr_t dest,
++ dma_addr_t src, size_t len, unsigned long flags)
++{
++ struct lnw_dma_chan *lnwc;
++ struct lnw_dma_desc *desc = NULL;
++ struct lnw_dma_slave *lnws;
++ union lnw_dma_ctl_lo ctl_lo;
++ union lnw_dma_ctl_hi ctl_hi;
++ union lnw_dma_cfg_lo cfg_lo;
++ union lnw_dma_cfg_hi cfg_hi;
++ enum lnw_dma_width width = 0;
++
++ dma_dbg("called \n");
++ WARN_ON(!chan);
++ if (!len)
++ return NULL;
++
++ lnws = chan->private;
++ WARN_ON(!lnws);
++
++ lnwc = to_lnw_dma_chan(chan);
++ WARN_ON(!lnwc);
++
++ dma_dbg("called for CH %d\n", lnwc->ch_id);
++ dma_dbg("Cfg passed Mode %x, Dirn %x, HS %x, Width %x \n",
++ lnws->cfg_mode, lnws->dirn, lnws->hs_mode, lnws->src_width);
++
++ /*calculate CFG_LO*/
++ if (lnws->hs_mode == LNW_DMA_SW_HS) {
++ cfg_lo.cfg_lo = 0;
++ cfg_lo.cfgx.hs_sel_dst = 1;
++ cfg_lo.cfgx.hs_sel_src = 1;
++ } else if (lnws->hs_mode == LNW_DMA_HW_HS)
++ cfg_lo.cfg_lo = 0x00000;
++
++ /*calculate CFG_HI*/
++ if (lnws->cfg_mode == LNW_DMA_MEM_TO_MEM) {
++ /*SW HS only*/
++ dma_dbg("CFG: Mem to mem dma \n");
++ cfg_hi.cfg_hi = 0;
++ } else {
++ dma_dbg("HW DMA \n");
++ cfg_hi.cfg_hi = 0;
++ cfg_hi.cfgx.protctl = 0x1; /*default value*/
++ cfg_hi.cfgx.src_per = get_ch_index(lnwc->ch_id);
++ cfg_hi.cfgx.dst_per = get_ch_index(lnwc->ch_id);
++ }
++
++ /*calculate CTL_HI*/
++ ctl_hi.ctlx.reser = 0;
++ width = lnws->src_width;
++ ctl_hi.ctlx.block_ts = get_block_ts(len, width);
++
++ /*calculate CTL_LO*/
++ ctl_lo.ctl_lo = 0;
++ ctl_lo.ctlx.int_en = 1;
++ ctl_lo.ctlx.dst_tr_width = lnws->dst_width;
++ ctl_lo.ctlx.src_tr_width = lnws->src_width;
++ ctl_lo.ctlx.dst_msize = lnws->src_msize;
++ ctl_lo.ctlx.src_msize = lnws->dst_msize;
++
++ if (lnws->cfg_mode == LNW_DMA_MEM_TO_MEM) {
++ dma_dbg("CTL: Mem to mem dma \n");
++ ctl_lo.ctlx.tt_fc = 0;
++ ctl_lo.ctlx.sinc = 0;
++ ctl_lo.ctlx.dinc = 0;
++ } else {
++ if (lnws->dirn == DMA_TO_DEVICE) {
++ dma_dbg("CTL: DMA_TO_DEVICE \n");
++ ctl_lo.ctlx.sinc = 0;
++ ctl_lo.ctlx.dinc = 2;
++ ctl_lo.ctlx.tt_fc = 1;
++ } else if (lnws->dirn == DMA_FROM_DEVICE) {
++ dma_dbg("CTL: DMA_FROM_DEVICE \n");
++ ctl_lo.ctlx.sinc = 2;
++ ctl_lo.ctlx.dinc = 0;
++ ctl_lo.ctlx.tt_fc = 2;
++ }
++ }
++
++ dma_dbg("Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
++ ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi);
++
++ enable_dma2_interrupt(lnwc);
++
++ desc = lnwc_desc_get(lnwc);
++ if (desc == NULL)
++ goto err_desc_get;
++ desc->sar = src;
++ desc->dar = dest ;
++ desc->len = len;
++ desc->cfg_hi = cfg_hi.cfg_hi;
++ desc->cfg_lo = cfg_lo.cfg_lo;
++ desc->ctl_lo = ctl_lo.ctl_lo;
++ desc->ctl_hi = ctl_hi.ctl_hi;
++ desc->width = width;
++ desc->dirn = lnws->dirn;
++ if (lnws->callback) {
++ desc->callback = lnws->callback;
++ desc->callback_param = lnws->callback_param;
++ dma_dbg("Callback passed... setting\n");
++ } else
++ desc->callback = NULL;
++ return &desc->txd;
++
++err_desc_get:
++ dma_err("Failed to get desc \n");
++ lnwc_desc_put(lnwc, desc);
++ return NULL;
++}
++
++
++static void lnw_dma2_free_chan_resources(struct dma_chan *chan)
++{
++ struct lnw_dma_chan *lnwc = to_lnw_dma_chan(chan);
++ struct lnwdma_device *lnw = to_lnwdma_device(chan->device);
++ struct lnw_dma_desc *desc, *_desc;
++
++ dma_dbg("..called for ch_id %d, lnwch_id %d\n",
++ chan->chan_id, lnwc->ch_id);
++ if (true == lnwc->in_use) {
++ /*trying to free ch in use!!!!!*/
++ dma_err("trying to free ch in use \n");
++ }
++
++ spin_lock_bh(&lnwc->lock);
++ lnwc->descs_allocated = 0;
++ list_for_each_entry_safe(desc, _desc, &lnwc->active_list, desc_node) {
++ dma_dbg("del active \n");
++ list_del(&desc->desc_node);
++ pci_pool_free(lnw->dma_pool, desc, desc->txd.phys);
++ }
++ list_for_each_entry_safe(desc, _desc, &lnwc->free_list, desc_node) {
++ list_del(&desc->desc_node);
++ pci_pool_free(lnw->dma_pool, desc, desc->txd.phys);
++ }
++ list_for_each_entry_safe(desc, _desc, &lnwc->queue, desc_node) {
++ dma_dbg("del queue \n");
++ list_del(&desc->desc_node);
++ pci_pool_free(lnw->dma_pool, desc, desc->txd.phys);
++ }
++ spin_unlock_bh(&lnwc->lock);
++ lnwc->in_use = false;
++ chan->client_count--;
++ /* Disable CH interrupts*/
++ iowrite32(MASK_INTR_REG(lnwc->ch_id), lnw->dma_base + MASK_BLOCK);
++ iowrite32(MASK_INTR_REG(lnwc->ch_id), lnw->dma_base + MASK_ERR);
++ dma_dbg("done \n");
++}
++
++static int lnw_dma2_alloc_chan_resources(struct dma_chan *chan)
++{
++ struct lnw_dma_chan *lnwc = to_lnw_dma_chan(chan);
++ struct lnwdma_device *lnw = to_lnwdma_device(chan->device);
++ struct lnw_dma_desc *desc;
++ dma_addr_t phys;
++ int i = 0;
++
++ dma_dbg("called \n");
++
++ /* ASSERT: channel is idle */
++ if (test_ch_en(lnw->dma_base, lnwc->ch_id)) {
++ /*ch is not idle*/
++ dma_err(".ch not idle\n");
++ return -EIO;
++ }
++ dma_dbg("..called for ch_id %d, lnwch_id %d\n",
++ chan->chan_id, lnwc->ch_id);
++ lnwc->completed = chan->cookie = 1;
++
++ chan->client_count++;
++
++ spin_lock_bh(&lnwc->lock);
++ while (lnwc->descs_allocated < DESCS_PER_CHANNEL) {
++ spin_unlock_bh(&lnwc->lock);
++ desc = pci_pool_alloc(lnw->dma_pool, GFP_KERNEL, &phys);
++ if (!desc) {
++ dma_err("desc failed\n");
++ return -ENOMEM;
++ /*check*/
++ }
++ dma_async_tx_descriptor_init(&desc->txd, chan);
++ desc->txd.tx_submit = lnw_dma2_tx_submit;
++ desc->txd.flags = DMA_CTRL_ACK;
++ desc->txd.phys = phys;
++ spin_lock_bh(&lnwc->lock);
++ i = ++lnwc->descs_allocated;
++ list_add_tail(&desc->desc_node, &lnwc->free_list);
++ }
++ spin_unlock_bh(&lnwc->lock);
++ lnwc->in_use = false;
++ dma_dbg("Desc alloc done ret: %d desc\n", i);
++ return i;
++}
++
++static void lnwc_handle_error(struct lnwdma_device *lnw,
++ struct lnw_dma_chan *lnwc)
++{
++ lnwc_scan_descriptors(lnw, lnwc);
++}
++
++/******************************************************************************
++* PCI stuff
++*/
++static struct pci_device_id lnw_dma2_ids[] = {
++ { PCI_VENDOR_ID_INTEL, 0x0813, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
++ { 0, }
++};
++
++MODULE_DEVICE_TABLE(pci, lnw_dma2_ids);
++
++static struct pci_driver lnw_dma2_pci = {
++ .name = "Intel LNW DMA2",
++ .id_table = lnw_dma2_ids,
++ .probe = lnw_dma2_probe,
++ .remove = __devexit_p(lnw_dma2_remove),
++};
++
++static void dma_tasklet(unsigned long data)
++{
++ struct lnwdma_device *lnw = NULL;
++ struct lnw_dma_chan *lnwc = NULL;
++ u32 status;
++ int i, ch_no;
++
++ dma_dbg("called \n");
++ lnw = (struct lnwdma_device *)data;
++ if (lnw == NULL) {
++ dma_err("Null param \n");
++ return;
++ }
++
++ status = ioread32(lnw->dma_base + RAW_TFR);
++ dma_dbg("RAW_TFR %x \n", status);
++ while (status) {
++ /*txn interrupt*/
++ ch_no = get_ch_num(&status);
++ if (ch_no < 0) {
++ dma_err("Ch no is invalid %x, abort!\n", ch_no);
++ return;
++ }
++ dma_dbg("Got Ch %x, new Status %x \n", ch_no, status);
++ i = get_ch_index(ch_no);
++ if (i < 0) {
++ dma_err("Invalid ch index %x\n", i);
++ return;
++ }
++ dma_dbg("Tx complete interrupt %x, Ch No %d Index %d \n",
++ status, ch_no, i);
++ lnwc = &lnw->ch[i];
++ if (lnwc == NULL) {
++ dma_err("Null param lnwc\n");
++ return;
++ }
++ dma_dbg("CH %x \n", lnwc->ch_id);
++ spin_lock_bh(&lnwc->lock);
++ lnwc_scan_descriptors(lnw, lnwc);
++ dma_dbg("Scan of desc... complete, unmasking\n");
++ iowrite32((1 << lnwc->ch_id),
++ lnw->dma_base + CLEAR_TFR);
++ dma_dbg("Wrote to clear %x\n", (1 << lnwc->ch_id));
++ iowrite32((1 << lnwc->ch_id),
++ lnw->dma_base + CLEAR_BLOCK);
++ iowrite32(UNMASK_INTR_REG(lnwc->ch_id),
++ lnw->dma_base + MASK_TFR);
++ spin_unlock_bh(&lnwc->lock);
++ }
++
++ dma_dbg("Trf interrupt done... \n");
++ status = ioread32(lnw->dma_base + RAW_ERR);
++ while (status) {
++ /*err interrupt*/
++ ch_no = get_ch_num(&status);
++ if (ch_no < 0) {
++ dma_err("Ch no is invalid %x, abort!\n", ch_no);
++ return;
++ }
++ dma_dbg("Got Ch %x, new Status %x \n", ch_no, status);
++ i = get_ch_index(ch_no);
++ if (i < 0) {
++ dma_err("Invalid CH lnwc\n");
++ return;
++ }
++ dma_dbg("Tx error interrupt %x, No %d Index %d \n",
++ status, ch_no, i);
++ lnwc = &lnw->ch[i];
++ if (lnwc == NULL) {
++ dma_err("Null param lnwc\n");
++ return;
++ }
++ spin_lock_bh(&lnwc->lock);
++ lnwc_handle_error(lnw, lnwc);
++ iowrite32((1 << lnwc->ch_id),
++ lnw->dma_base + CLEAR_ERR);
++ iowrite32(UNMASK_INTR_REG(lnwc->ch_id),
++ lnw->dma_base + MASK_ERR);
++ spin_unlock_bh(&lnwc->lock);
++ }
++ dma_dbg("Exiting takslet... \n");
++ return;
++}
++
++static irqreturn_t lnw_dma2_interrupt(int irq, void *data)
++{
++ struct lnw_device *lnw = data;
++ u32 status;
++ int call_tasklet = 0;
++
++ /*will mask interrupt for now and schedule tasklet
++ tasklet shud unmask and clear*/
++ status = ioread32(lnw->dma_base + STATUS_TFR);
++ status &= 0x03;
++ if (status) {
++ iowrite32((status << 8), lnw->dma_base + MASK_TFR);
++ call_tasklet = 1;
++ }
++ status = ioread32(lnw->dma_base + STATUS_ERR);
++ status &= 0x03;
++ if (status) {
++ iowrite32(MASK_INTR_REG(status), lnw->dma_base + MASK_ERR);
++ call_tasklet = 1;
++ }
++
++ if (call_tasklet)
++ tasklet_schedule(&lnw->dma->tasklet);
++
++ return IRQ_HANDLED;
++}
++
++static void enable_dma2_interrupt(struct lnw_dma_chan *lnwc)
++{
++ dma_dbg("Called for ch_id %d\n", lnwc->ch_id);
++
++ iowrite32(REG_BIT0, lnwc->dma->dma_base + DMA_CFG);
++ /*en ch interrupts */
++ iowrite32(UNMASK_INTR_REG(lnwc->ch_id), lnwc->dma_base + MASK_TFR);
++ iowrite32(UNMASK_INTR_REG(lnwc->ch_id), lnwc->dma_base + MASK_ERR);
++ return;
++}
++
++static void disable_dma2_interrupt(struct lnw_device *device)
++{
++ u32 status = 0;
++
++ /*todo*/
++ dma_dbg(" called \n");
++ status = 1;
++ return;
++
++}
++
++static int lnw_setup_dma2(struct pci_dev *pdev)
++{
++ struct lnw_device *device = pci_get_drvdata(pdev);
++ struct lnwdma_device *dma = NULL;
++ int err, i;
++
++ dma_dbg("setup_dma called \n");
++ dma = kzalloc(sizeof(*dma), GFP_KERNEL);
++ if (NULL == dma) {
++ dma_err("kzalloc failed \n");
++ err = -ENOMEM;
++ goto err_kzalloc;
++ }
++ device->dma = dma;
++ dma->pdev = pdev;
++ dma->dma_base = device->dma_base;
++
++ /* DMA coherent memory pool for DMA descriptor allocations */
++ dma->dma_pool = pci_pool_create("dma_desc_pool", pdev,
++ sizeof(struct lnw_dma_desc),
++ 32, 0);
++ if (NULL == dma->dma_pool) {
++ dma_err("pci_pool_create failed \n");
++ err = -ENOMEM;
++ kfree(dma);
++ goto err_dma_pool;
++ }
++
++ INIT_LIST_HEAD(&dma->common.channels);
++
++
++ /*init CH structures*/
++ for (i = 0; i < MAX_CHAN; i++) {
++ struct lnw_dma_chan *lnwch = &dma->ch[i];
++
++ lnwch->chan.device = &dma->common;
++ lnwch->chan.cookie = 1;
++ lnwch->chan.chan_id = i;
++ lnwch->ch_id = get_ch_id(i);
++ dma_dbg("Init CH %d, ID %d \n", i, lnwch->ch_id);
++
++ lnwch->dma_base = dma->dma_base;
++ lnwch->ch_regs = dma->dma_base + DMA_CH_SIZE * lnwch->ch_id;
++ lnwch->dma = dma;
++ spin_lock_init(&lnwch->lock);
++
++ INIT_LIST_HEAD(&lnwch->active_list);
++ INIT_LIST_HEAD(&lnwch->queue);
++ INIT_LIST_HEAD(&lnwch->free_list);
++
++ /*mask interrupts*/
++ iowrite32(MASK_INTR_REG(lnwch->ch_id),
++ dma->dma_base + MASK_BLOCK);
++ iowrite32(MASK_INTR_REG(lnwch->ch_id),
++ dma->dma_base + MASK_SRC_TRAN);
++ iowrite32(MASK_INTR_REG(lnwch->ch_id),
++ dma->dma_base + MASK_DST_TRAN);
++ iowrite32(MASK_INTR_REG(lnwch->ch_id),
++ dma->dma_base + MASK_ERR);
++ iowrite32(MASK_INTR_REG(lnwch->ch_id),
++ dma->dma_base + MASK_TFR);
++
++ dma_dbg("Init CH %d, ID %d \n", i, lnwch->ch_id);
++ list_add_tail(&lnwch->chan.device_node, &dma->common.channels);
++ }
++
++ /*init dma structure*/
++ dma_cap_zero(dma->common.cap_mask);
++ dma_cap_set(DMA_MEMCPY, dma->common.cap_mask);
++ dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
++ dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
++ dma->common.dev = &pdev->dev;
++ dma->common.chancnt = MAX_CHAN;
++
++ dma->common.device_alloc_chan_resources =
++ lnw_dma2_alloc_chan_resources;
++ dma->common.device_free_chan_resources =
++ lnw_dma2_free_chan_resources;
++
++ dma->common.device_is_tx_complete = lnw_dma2_tx_is_complete;
++ dma->common.device_prep_dma_memcpy = lnw_dma2_prep_memcpy;
++ dma->common.device_issue_pending = lnw_dma2_issue_pending;
++ dma->common.device_prep_slave_sg = lnw_dma2_prep_slave_sg;
++ dma->common.device_terminate_all = lnw_dma2_terminate_all;
++
++ /*enable dma cntrl*/
++ iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
++
++ disable_dma2_interrupt(device);
++
++ /*register irq*/
++ err = request_irq(pdev->irq, lnw_dma2_interrupt,
++ 0, lnw_dma2_pci.name, device);
++ if (0 != err)
++ goto err_irq;
++
++ /*register device w/ engine*/
++ err = dma_async_device_register(&dma->common);
++ if (0 != err) {
++ dma_err("device_register failed: %d \n", err);
++ goto err_engine;
++ }
++ tasklet_init(&dma->tasklet, dma_tasklet, (unsigned long)dma);
++ dma_dbg("...done\n");
++ return 0;
++
++err_engine:
++ free_irq(pdev->irq, device);
++err_irq:
++ pci_pool_destroy(dma->dma_pool);
++ kfree(dma);
++err_dma_pool:
++err_kzalloc:
++ dma_err("setup_dma failed: %d \n", err);
++ return err;
++
++}
++
++static void lnwdma_shutdown(struct pci_dev *pdev)
++{
++ struct lnw_device *device = pci_get_drvdata(pdev);
++
++ dma_dbg("shutdown called \n");
++ dma_async_device_unregister(&device->dma->common);
++ pci_pool_destroy(device->dma->dma_pool);
++ if (device->dma_base)
++ iounmap(device->dma_base);
++ free_irq(pdev->irq, device);
++ return;
++}
++static int __devinit
++lnw_dma2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
++{
++ struct lnw_device *device = NULL;
++ u32 base_addr = 0, bar_size = 0;
++ int err = 0;
++
++ dma_info("probe called for %x \n", pdev->device);
++ err = pci_enable_device(pdev);
++ if (err)
++ goto err_enable_device;
++
++ err = pci_request_regions(pdev, lnw_dma2_pci.name);
++ if (err)
++ goto err_request_regions;
++
++ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
++ if (err)
++ goto err_set_dma_mask;
++
++ err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
++ if (err)
++ goto err_set_dma_mask;
++
++ device = kzalloc(sizeof(*device), GFP_KERNEL);
++ if (!device) {
++ dma_err("kzalloc failed \n");
++ err = -ENOMEM;
++ goto err_kzalloc;
++ }
++ device->pdev = pci_dev_get(pdev);
++
++ base_addr = pci_resource_start(pdev, 0);
++ bar_size = pci_resource_len(pdev, 0);
++ dma_dbg("BAR0 %x Size %x \n", base_addr, bar_size);
++ device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE);
++ if (!device->dma_base) {
++ dma_err("ioremap failed \n");
++ err = -ENOMEM;
++ goto err_ioremap;
++ }
++
++ pci_set_drvdata(pdev, device);
++ pci_set_master(pdev);
++
++ err = lnw_setup_dma2(pdev);
++ if (err)
++ goto err_dma;
++
++ return 0;
++
++err_dma:
++ iounmap(device->dma_base);
++err_ioremap:
++ pci_dev_put(pdev);
++ kfree(device);
++err_kzalloc:
++err_set_dma_mask:
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++err_request_regions:
++err_enable_device:
++ dma_err("Probe failed %d\n", err);
++ return err;
++}
++
++static void __devexit lnw_dma2_remove(struct pci_dev *pdev)
++{
++ struct lnw_device *device = pci_get_drvdata(pdev);
++
++ lnwdma_shutdown(pdev);
++ pci_dev_put(pdev);
++ kfree(device);
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++}
++
++static int __init lnw_dma2_init(void)
++{
++ dma_info("LNW DMA Driver\n Version %s \n", LNW_DMA_DRIVER_VERSION);
++ return pci_register_driver(&lnw_dma2_pci);
++}
++fs_initcall(lnw_dma2_init);
++
++static void __exit lnw_dma2_exit(void)
++{
++ pci_unregister_driver(&lnw_dma2_pci);
++}
++module_exit(lnw_dma2_exit);
++
+Index: linux-2.6.33/include/linux/lnw_dma.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/include/linux/lnw_dma.h
+@@ -0,0 +1,166 @@
++/*
++ * lnw_dma.c - Intel Langwell DMA Drivers
++ *
++ * Copyright (C) 2008i-09 Intel Corp
++ * Author: Vinod Koul <vinod.koul@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ *
++ */
++#ifndef __LNW_DMA_H__
++#define __LNW_DMA_H__
++
++#include <linux/dmaengine.h>
++
++/*DMA transaction width, src and dstn width would be same
++The DMA length must be width aligned,
++for 32 bit width the length must be 32 bit (4bytes) aligned only*/
++enum lnw_dma_width {
++ LNW_DMA_WIDTH_8BIT = 0x0,
++ LNW_DMA_WIDTH_16BIT = 0x1,
++ LNW_DMA_WIDTH_32BIT = 0x2,
++};
++
++/*DMA mode configurations*/
++enum lnw_dma_mode {
++ LNW_DMA_PER_TO_MEM = 0, /*periphral to memory configuration*/
++ LNW_DMA_MEM_TO_PER, /*memory to periphral configuration*/
++ LNW_DMA_MEM_TO_MEM, /*mem to mem confg (testing only)*/
++};
++
++/*DMA handshaking*/
++enum lnw_dma_hs_mode {
++ LNW_DMA_HW_HS = 0, /*HW Handshaking only*/
++ LNW_DMA_SW_HS = 1, /*SW Handshaking not recommended*/
++};
++
++/*Burst size configuration*/
++enum lnw_dma_msize {
++ LNW_DMA_MSIZE_1 = 0x0,
++ LNW_DMA_MSIZE_4 = 0x1,
++ LNW_DMA_MSIZE_8 = 0x2,
++ LNW_DMA_MSIZE_16 = 0x3,
++ LNW_DMA_MSIZE_32 = 0x4,
++ LNW_DMA_MSIZE_64 = 0x5,
++};
++
++/**
++ * struct lnw_dma_slave - DMA slave structure
++ *
++ * @dma_dev: DMA master client
++ * @tx_reg: physical address of data register used for
++ * memory-to-peripheral transfers
++ * @rx_reg: physical address of data register used for
++ * peripheral-to-memory transfers
++ * @tx_width: tx register width
++ * @rx_width: rx register width
++ * @dirn: DMA trf direction
++
++ * @cfg_hi: Platform-specific initializer for the CFG_HI register
++ * @cfg_lo: Platform-specific initializer for the CFG_LO register
++
++ * @ tx_width: width of src and dstn
++ * @ hs_mode: SW or HW handskaking mode
++ * @ cfg_mode: Mode configuration, DMA mem to mem to dev & mem
++ */
++struct lnw_dma_slave {
++ enum dma_data_direction dirn;
++ enum lnw_dma_width src_width; /*width of DMA src txn*/
++ enum lnw_dma_width dst_width; /*width of DMA dst txn*/
++ enum lnw_dma_hs_mode hs_mode; /*handshaking*/
++ enum lnw_dma_mode cfg_mode; /*mode configuration*/
++ enum lnw_dma_msize src_msize; /*size if src burst*/
++ enum lnw_dma_msize dst_msize; /*size of dst burst*/
++ dma_async_tx_callback callback; /*callback function*/
++ void *callback_param; /*param for callback*/
++};
++
++/*DMA channel control registers*/
++union lnw_dma_ctl_lo {
++ struct {
++ u32 int_en:1; /*enable or disable interrupts*/
++ /*should be 0*/
++ u32 dst_tr_width:3; /*destination transfer width*/
++ /*usually 32 bits = 010*/
++ u32 src_tr_width:3; /*source transfer width*/
++ /*usually 32 bits = 010*/
++ u32 dinc:2; /*destination address inc/dec*/
++ /*For mem:INC=00, Periphral NoINC=11*/
++ u32 sinc:2; /*source address inc or dec, as above*/
++ u32 dst_msize:3; /*destination burst transaction length*/
++ /*always = 16 ie 011*/
++ u32 src_msize:3; /*source burst transaction length*/
++ /*always = 16 ie 011*/
++ u32 reser1:3;
++ u32 tt_fc:3; /*transfer type and flow controller*/
++ /*M-M = 000
++ P-M = 010
++ M-P = 001*/
++ u32 dms:2; /*destination master select = 0*/
++ u32 sms:2; /*source master select = 0*/
++ u32 llp_dst_en:1; /*enable/disable destination LLP = 0*/
++ u32 llp_src_en:1; /*enable/disable source LLP = 0*/
++ u32 reser2:3;
++ } ctlx;
++ u32 ctl_lo;
++};
++
++union lnw_dma_ctl_hi {
++ struct {
++ u32 block_ts:12; /*block transfer size*/
++ /*configured by DMAC*/
++ u32 reser:20;
++ } ctlx;
++ u32 ctl_hi;
++
++};
++
++/*DMA channel configuration registers*/
++union lnw_dma_cfg_lo {
++ struct {
++ u32 reser1:5;
++ u32 ch_prior:3; /*channel priority = 0*/
++ u32 ch_susp:1; /*channel suspend = 0*/
++ u32 fifo_empty:1; /*FIFO empty or not R bit = 0*/
++ u32 hs_sel_dst:1; /*select HW/SW destn handshaking*/
++ /*HW = 0, SW = 1*/
++ u32 hs_sel_src:1; /*select HW/SW src handshaking*/
++ u32 reser2:6;
++ u32 dst_hs_pol:1; /*dest HS interface polarity*/
++ u32 src_hs_pol:1; /*src HS interface polarity*/
++ u32 max_abrst:10; /*max AMBA burst len = 0 (no sw limit*/
++ u32 reload_src:1; /*auto reload src addr =1 if src is P*/
++ u32 reload_dst:1; /*AR destn addr =1 if dstn is P*/
++ } cfgx;
++ u32 cfg_lo;
++};
++
++union lnw_dma_cfg_hi {
++ struct {
++ u32 fcmode:1; /*flow control mode = 1*/
++ u32 fifo_mode:1; /*FIFO mode select = 1*/
++ u32 protctl:3; /*protection control = 0*/
++ u32 rsvd:2;
++ u32 src_per:4; /*src hw HS interface*/
++ u32 dst_per:4; /*dstn hw HS interface*/
++ u32 reser2:17;
++ } cfgx;
++ u32 cfg_hi;
++};
++
++#endif /*__LNW_DMA_H__*/
+Index: linux-2.6.33/include/linux/intel_mid.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/include/linux/intel_mid.h
+@@ -0,0 +1,144 @@
++/*
++ * intel_mid.h - Netlink multicast interface definition for OSPM.
++ *
++ * Copyright (C) 2009 Intel Corp
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * Authors: Sujith Thomas
++ * Rajeev D Muralidhar
++ * Vishwesh M Rudramuni
++ * Nithish Mahalingam
++ * Contact information:
++ * Sujith Thomas <sujith.thomas@intel.com>
++ * Rajeev D Muralidhar <rajeev.d.muralidhar@intel.com>
++ * Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>
++ * Nithish Mahalingam <nithish.mahalingam@intel.com>
++ *
++ */
++
++#ifndef INTEL_MID_H
++#define INTEL_MID_H
++
++#define PMU1_MAX_DEVS 2
++#define PMU2_MAX_DEVS 12
++#define PERIPH_MAX_DEVS 3
++#define MAX_DEVICES (PMU1_MAX_DEVS + PMU2_MAX_DEVS + PERIPH_MAX_DEVS)
++#define WAKE_CAPABLE 0x80000000
++
++struct pci_dev_info {
++ u16 vendor_id;
++ u16 device_id;
++ u16 log_subsysid;
++ u16 phy_susbsysid;
++ u32 capability;
++ struct pci_dev *dev_driver;
++ char *dev_name;
++};
++
++struct mid_ospm {
++ u32 *pmu1_base;
++ u32 *pmu1_pm_base;
++ void __iomem *pmu2_base;
++ u32 *pm_table_base;
++ u32 pmu1_sub_systems;
++ u32 pmu2_sub_systems;
++ u32 pmu_wake_cfg;
++ u32 pmu_wake_ss_states;
++ u32 perepheral_sub_systems;
++ int pmu2_states;
++ int platform_sx_state;
++ int s0ix_retry_enb;
++ int fast_retry_exit;
++ u32 pmode;
++};
++
++extern struct pci_dev_info platform_pci_devices[MAX_DEVICES];
++extern unsigned long g_intel_mid_wakeup_address;
++
++enum pmu_ss_state {
++ SS_STATE_D0I0 = 0,
++ SS_STATE_D0I1 = 1,
++ SS_STATE_D0I2 = 2,
++ SS_STATE_D0I3 = 3
++};
++
++enum eospm_events {
++ OSPM_EVENT_SUBSYS_INACTIVITY,
++ OSPM_EVENT_SUBSYS_WAKE,
++ OSPM_EVENT_SUBSYS_START_PLAY,
++ OSPM_EVENT_SUBSYS_STOP_PLAY,
++ OSPM_EVENT_CMD_SUCCESS,
++ OSPM_EVENT_CMD_ERROR,
++ OSPM_EVENT_CMD_NO_C6_ERROR,
++ OSPM_EVENT_AUDIO_BUF_EMPTY,
++ OSPM_EVENT_AUDIO_BUF_FULL,
++ OSPM_EVENT_THERMAL_AUX0,
++ OSPM_EVENT_THERMAL_AUX1,
++ OSPM_EVENT_THERMAL_CRITICAL,
++ OSPM_EVENT_THERMAL_DEV_FAULT,
++ __OSPM_EVENT_COUNT,
++};
++
++#define AUDIO_SUBSYTEM_ID 25
++#define MID_S0I1_STATE 1
++#define MID_S0I3_STATE 3
++/* Thermal device Id */
++#define TEMP_DEV_ID1 40
++#define TEMP_DEV_ID2 41
++#define TEMP_DEV_ID3 42
++
++/* First 32 (0-31) originators are subsystems
++ Next 8 (0-7) are cmd IDs */
++#define OSPM_CMDID_OFFSET 32
++#define OSPM_MAX_CMD_ID 8
++
++struct ospm_genl_event {
++ u32 orig;
++ enum eospm_events event;
++};
++
++/* attributes of ospm_genl_family */
++enum {
++ OSPM_GENL_ATTR_UNSPEC,
++ OSPM_GENL_ATTR_EVENT, /* OSPM event info needed by user space */
++ __OSPM_GENL_ATTR_MAX,
++};
++#define OSPM_GENL_ATTR_MAX (__OSPM_GENL_ATTR_MAX - 1)
++
++/* commands supported by the ospm_genl_family */
++
++enum {
++ OSPM_GENL_CMD_UNSPEC,
++ OSPM_GENL_CMD_EVENT, /* kernel->user notifications for OSPM events */
++ __OSPM_GENL_CMD_MAX,
++};
++#define OSPM_GENL_CMD_MAX (__OSPM_GENL_CMD_MAX - 1)
++
++#define OSPM_GENL_FAMILY_NAME "ospm_event"
++#define OSPM_GENL_VERSION 0x01
++#define OSPM_GENL_MCAST_GROUP_NAME "ospm_mc_group"
++
++int ospm_generate_netlink_event(u32 orig, enum eospm_events event);
++int ospm_event_genetlink_init(void);
++void ospm_event_genetlink_exit(void);
++
++extern void intel_mid_reserve_bootmem(void);
++extern unsigned long g_intel_mid_wakeup_address;
++extern void find_pci_info(u32 device_id, u32 vendor_id, u32 *index);
++extern int s0ix_non_bsp_init(void);
++
++#endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-mmc-driver-1.0.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-mmc-driver-1.0.patch
new file mode 100644
index 0000000..b93f76a
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-mmc-driver-1.0.patch
@@ -0,0 +1,1367 @@
+From bac1959b76f55a7845b0bb20eafd32e40c49dd74 Mon Sep 17 00:00:00 2001
+From: JiebingLi <jiebing.li@intel.com>
+Date: Wed, 25 Nov 2009 19:43:03 +0800
+Subject: [PATCH 077/104] MMC driver Beta 1.0 It is a consolidated patch against upstream Linux 2.6.31.6
+
+Beta features:
+1. removed CEATA function from MMC bus driver
+2. Added 25MHz support at High-Speed mode for SDIO Comms devices
+3. Added eMMC support for Moorestown platform
+4. Fixed sighting 3469229 and 3452438
+
+Kernel configuration:
+CONFIG_MMC=y
+CONFIG_MRST_LNW_A2_WR=y
+CONFIG_SDIO_SUSPEND=y
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PCI=y
+
+Note: Please DO NOT enable CONFIG_MMC_SDHCI_MRST_EMMC unless you
+ are sure that you want to enable eMMC device for Moorestown.
+
+Signed-off-by: JiebingLi <jiebing.li@intel.com>
+---
+ drivers/mmc/Kconfig | 14 ++
+ drivers/mmc/card/block.c | 3 +-
+ drivers/mmc/core/Kconfig | 11 +
+ drivers/mmc/core/core.c | 6 +
+ drivers/mmc/core/mmc.c | 41 ++++-
+ drivers/mmc/core/sd.c | 8 +
+ drivers/mmc/core/sdio.c | 464 +++++++++++++++++++++++++++++++++++++++++
+ drivers/mmc/core/sdio_bus.c | 38 ++++
+ drivers/mmc/core/sdio_bus.h | 4 +
+ drivers/mmc/host/Kconfig | 10 +
+ drivers/mmc/host/sdhci-pci.c | 81 ++++++--
+ drivers/mmc/host/sdhci.c | 125 ++++++++++-
+ drivers/mmc/host/sdhci.h | 3 +
+ include/linux/mmc/card.h | 12 +
+ include/linux/mmc/host.h | 2 +
+ include/linux/mmc/sdio_func.h | 14 ++
+ include/linux/pci_ids.h | 2 +
+ 17 files changed, 810 insertions(+), 28 deletions(-)
+
+diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
+index f2eeb38..2c19682 100644
+--- a/drivers/mmc/Kconfig
++++ b/drivers/mmc/Kconfig
+@@ -19,6 +19,20 @@ config MMC_DEBUG
+ This is an option for use by developers; most people should
+ say N here. This enables MMC core and driver debugging.
+
++config MRST_LNW_A1_WR
++ bool "software workaround for Moorestown LNW A-1"
++ depends on MMC
++ help
++ This is an option for Moorestown developers to add workaround
++ in the code due to LNW A-1 Silicon restrictions.
++
++config MRST_LNW_A2_WR
++ bool "software workaround for Moorestown LNW A-2"
++ depends on MMC
++ help
++ This is an option for Moorestown developers to add workaround
++ in the code due to LNW A-2 Silicon restrictions.
++
+ if MMC
+
+ source "drivers/mmc/core/Kconfig"
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 1f552c6..f4ebc68 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -534,7 +534,8 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
+ * messages to tell when the card is present.
+ */
+
+- sprintf(md->disk->disk_name, "mmcblk%d", devidx);
++ snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
++ "mmcblk%d", devidx);
+
+ blk_queue_logical_block_size(md->queue.queue, 512);
+
+diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
+index bb22ffd..205186c 100644
+--- a/drivers/mmc/core/Kconfig
++++ b/drivers/mmc/core/Kconfig
+@@ -16,3 +16,14 @@ config MMC_UNSAFE_RESUME
+
+ This option sets a default which can be overridden by the
+ module parameter "removable=0" or "removable=1".
++
++config SDIO_SUSPEND
++ bool "SDIO selective suspend/resume"
++ depends on MMC && PM
++ help
++ If you say Y here, you can use driver calls or the sysfs
++ "power/level" file to suspend or resume the SDIO
++ peripherals.
++
++ If you are unsure about this, say N here.
++
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index 30acd52..951561d 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -213,9 +213,15 @@ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
+ mrq->done_data = &complete;
+ mrq->done = mmc_wait_done;
+
++ if (host->port_mutex)
++ mutex_lock(host->port_mutex);
++
+ mmc_start_request(host, mrq);
+
+ wait_for_completion(&complete);
++
++ if (host->port_mutex)
++ mutex_unlock(host->port_mutex);
+ }
+
+ EXPORT_SYMBOL(mmc_wait_for_req);
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 0eac6c8..15aaa66 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -179,8 +179,10 @@ static int mmc_read_ext_csd(struct mmc_card *card)
+
+ err = mmc_send_ext_csd(card, ext_csd);
+ if (err) {
+- /* If the host or the card can't do the switch,
+- * fail more gracefully. */
++ /*
++ * If the host or the card can't do the switch,
++ * fail more gracefully.
++ */
+ if ((err != -EINVAL)
+ && (err != -ENOSYS)
+ && (err != -EFAULT))
+@@ -294,6 +296,28 @@ static struct device_type mmc_type = {
+ };
+
+ /*
++ * Distinguish the fake MMCA4 MMC card.
++ *
++ * Transcend 2GB MMC card is a kind of MMCA3.31 MMC card.
++ * However, it makes up itself as a MMCA4 one via SPEC_VERS
++ * field of its CSD register. Once it's treated as MMCA4 by
++ * driver, 4 bit bus is activated which leads to data error.
++ */
++static bool fake_mmca4_card(struct mmc_card *card)
++{
++ if (card->cid.manfid == 0x1e &&
++ card->cid.oemid == 0xffff &&
++ card->cid.prod_name[0] == 'M' &&
++ card->cid.prod_name[1] == 'M' &&
++ card->cid.prod_name[2] == 'C' &&
++ card->cid.month == 9 &&
++ card->cid.year == 2008)
++ return true;
++ else
++ return false;
++}
++
++/*
+ * Handle the detection and initialisation of a card.
+ *
+ * In the case of a resume, "oldcard" will contain the card
+@@ -389,6 +413,12 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
+ err = mmc_decode_cid(card);
+ if (err)
+ goto free_card;
++
++ /*
++ * Get card's true specification version
++ */
++ if (fake_mmca4_card(card))
++ card->csd.mmca_vsn = CSD_SPEC_VER_3;
+ }
+
+ /*
+@@ -409,6 +439,11 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
+ goto free_card;
+ }
+
++/*
++ * avoid MMC cards to switch to HS timing
++ * which doesn't work yet due to LNW A-1 Silicon bug
++ */
++#if !defined CONFIG_MRST_LNW_A1_WR && !defined CONFIG_MRST_LNW_A2_WR
+ /*
+ * Activate high speed (if supported)
+ */
+@@ -428,7 +463,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
+ mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+ }
+ }
+-
++#endif
+ /*
+ * Compute bus speed.
+ */
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
+index fdd414e..5aae661 100644
+--- a/drivers/mmc/core/sd.c
++++ b/drivers/mmc/core/sd.c
+@@ -234,6 +234,7 @@ out:
+ return err;
+ }
+
++#if !defined CONFIG_MRST_LNW_A1_WR && !defined CONFIG_MRST_LNW_A2_WR
+ /*
+ * Test if the card supports high-speed mode and, if so, switch to it.
+ */
+@@ -281,6 +282,7 @@ out:
+
+ return err;
+ }
++#endif
+
+ MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
+ card->raw_cid[2], card->raw_cid[3]);
+@@ -460,12 +462,18 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
+ goto free_card;
+ }
+
++/*
++ * avoid SD cards to switch to HS timing
++ * which doesn't work yet due to LNW A-1 Silicon bug
++ */
++#if !defined CONFIG_MRST_LNW_A1_WR && !defined CONFIG_MRST_LNW_A2_WR
+ /*
+ * Attempt to change to high-speed (if supported)
+ */
+ err = mmc_switch_hs(card);
+ if (err)
+ goto free_card;
++#endif
+
+ /*
+ * Compute bus speed.
+diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
+index 06b6408..42977e1 100644
+--- a/drivers/mmc/core/sdio.c
++++ b/drivers/mmc/core/sdio.c
+@@ -24,6 +24,262 @@
+ #include "sdio_ops.h"
+ #include "sdio_cis.h"
+
++#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv)
++
++#ifdef CONFIG_SDIO_SUSPEND
++
++static int sdio_suspend_func(struct mmc_card *card,
++ struct sdio_func *func, pm_message_t msg)
++{
++ struct device *dev;
++ int error = 0;
++
++ dev = &func->dev;
++ BUG_ON(!dev);
++
++ if (dev->bus)
++ if (dev->bus->suspend)
++ error = dev->bus->suspend(dev, msg);
++
++ return error;
++}
++
++static int sdio_resume_func(struct mmc_card *card, struct sdio_func *func)
++{
++ struct device *dev;
++ int error = 0;
++
++ dev = &func->dev;
++ BUG_ON(!dev);
++
++ if (dev->bus)
++ if (dev->bus->resume)
++ error = dev->bus->resume(dev);
++
++ return error;
++}
++
++int sdio_suspend_host(struct mmc_card *card, pm_message_t msg)
++{
++ int ret = 0;
++ int i = 0;
++ struct device *dev;
++ struct sdio_driver *drv;
++
++ mutex_lock(&card->pm_mutex);
++
++ if (!mmc_card_present(card) ||
++ mmc_card_suspended(card))
++ goto done;
++
++ for (i = 0; i < card->sdio_funcs; i++)
++ if (!sdio_func_suspended(card->sdio_func[i])) {
++ dev = &(card->sdio_func[i])->dev;
++ BUG_ON(!dev);
++
++ drv = to_sdio_driver(dev->driver);
++
++ if (dev->driver && drv->suspend)
++ goto done;
++ }
++
++ ret = mmc_suspend_host(card->host, msg);
++
++ if (ret == 0)
++ mmc_card_set_suspended(card);
++
++done:
++ mutex_unlock(&card->pm_mutex);
++
++ return ret;
++}
++
++int sdio_resume_host(struct mmc_card *card)
++{
++ int ret = 0;
++
++ mutex_lock(&card->pm_mutex);
++
++ if (!mmc_card_present(card)) {
++ ret = -ENODEV;
++ goto done;
++ }
++
++ if (mmc_card_suspended(card)) {
++ ret = mmc_resume_host(card->host);
++
++ if (ret == 0)
++ mmc_card_clear_suspended(card);
++ else
++ goto done;
++ }
++
++done:
++ mutex_unlock(&card->pm_mutex);
++
++ return ret;
++}
++
++/*
++ * This routine handles external suspend request coming from sysfs
++ */
++int sdio_external_suspend_device(struct sdio_func *func, pm_message_t msg)
++{
++ int ret = 0;
++ struct mmc_card *card = func->card;
++
++ BUG_ON(!card);
++ BUG_ON(!card->host);
++
++ mutex_lock(&card->pm_mutex);
++
++ if (!sdio_func_present(func) ||
++ sdio_func_suspended(func)) {
++ mutex_unlock(&card->pm_mutex);
++ goto done;
++ }
++
++ /* suspend the function of the SDIO device */
++ ret = sdio_suspend_func(card, func, msg);
++
++ if (ret != 0) {
++ mutex_unlock(&card->pm_mutex);
++ goto done;
++ }
++
++ sdio_func_set_suspended(func);
++
++ mutex_unlock(&card->pm_mutex);
++
++ ret = sdio_suspend_host(card, msg);
++
++done:
++ return ret;
++}
++
++/*
++ * This routine handles external resume request coming from sysfs
++ */
++int sdio_external_resume_device(struct sdio_func *func)
++{
++ int ret = 0;
++ struct mmc_card *card = func->card;
++
++ BUG_ON(!card);
++ BUG_ON(!card->host);
++
++ ret = sdio_resume_host(card);
++ if (ret)
++ goto done;
++
++ mutex_lock(&card->pm_mutex);
++
++ if (sdio_func_suspended(func)) {
++ ret = sdio_resume_func(card, func);
++
++ if (ret != 0) {
++ mutex_unlock(&card->pm_mutex);
++ goto done;
++ } else
++ sdio_func_clear_suspended(func);
++ }
++
++ mutex_unlock(&card->pm_mutex);
++done:
++
++ return ret;
++}
++
++static const char power_group[] = "power";
++
++static const char resume_string[] = "resume";
++static const char suspend_string[] = "suspend";
++
++static ssize_t
++show_level(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct sdio_func *func = container_of(dev, struct sdio_func, dev);
++ const char *p = suspend_string;
++
++ BUG_ON(!func);
++
++ if (sdio_func_suspended(func))
++ p = suspend_string;
++ else
++ p = resume_string;
++
++ return sprintf(buf, "%s\n", p);
++}
++
++static ssize_t
++set_level(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct sdio_func *func = container_of(dev, struct sdio_func, dev);
++ int len = count;
++ char *cp;
++ int ret = 0;
++
++ BUG_ON(!func);
++
++ cp = memchr(buf, '\n', count);
++ if (cp)
++ len = cp - buf;
++
++ down(&dev->sem);
++
++ if (len == sizeof resume_string - 1 &&
++ strncmp(buf, resume_string, len) == 0) {
++ ret = sdio_external_resume_device(func);
++ } else if (len == sizeof suspend_string - 1 &&
++ strncmp(buf, suspend_string, len) == 0) {
++ ret = sdio_external_suspend_device(func, PMSG_SUSPEND);
++ } else {
++ ret = -EINVAL;
++ }
++
++ up(&dev->sem);
++
++ return (ret < 0 ? ret : count);
++}
++
++static DEVICE_ATTR(level, S_IRUGO | S_IWUSR, show_level, set_level);
++
++void sdio_remove_sysfs_file(struct sdio_func *func)
++{
++ struct device *dev = &func->dev;
++ struct sdio_driver *drv = to_sdio_driver(dev->driver);
++
++ if (dev->driver && drv->suspend)
++ sysfs_remove_file_from_group(&dev->kobj,
++ &dev_attr_level.attr,
++ power_group);
++}
++
++int sdio_create_sysfs_file(struct sdio_func *func)
++{
++ int ret;
++ struct device *dev = &func->dev;
++ struct sdio_driver *drv = to_sdio_driver(dev->driver);
++
++ if (dev->driver && drv->suspend) {
++ ret = sysfs_add_file_to_group(&dev->kobj,
++ &dev_attr_level.attr,
++ power_group);
++
++ if (ret)
++ goto error;
++ }
++
++ return 0;
++
++error:
++ sdio_remove_sysfs_file(func);
++ return ret;
++}
++
++#endif /* CONFIG_SDIO_SUSPEND */
++
+ static int sdio_read_fbr(struct sdio_func *func)
+ {
+ int ret;
+@@ -187,6 +443,7 @@ static int sdio_disable_cd(struct mmc_card *card)
+ return mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
+ }
+
++#if !defined(CONFIG_MRST_LNW_A1_WR) && !defined(CONFIG_MRST_LNW_A2_WR)
+ /*
+ * Test if the card supports high-speed mode and, if so, switch to it.
+ */
+@@ -216,6 +473,128 @@ static int sdio_enable_hs(struct mmc_card *card)
+
+ return 0;
+ }
++#else
++static int sdio_enable_hs(struct mmc_card *card)
++{
++ return 0;
++}
++#endif
++
++/*
++ * Handle the re-initialization of a SDIO card.
++ */
++static int mmc_sdio_reinit_card(struct mmc_host *host,
++ struct mmc_card *oldcard)
++{
++ int err = 0;
++ u16 funcs;
++ u32 ocr;
++ struct mmc_card *card;
++
++#if !defined(CONFIG_MRST_LNW_A1_WR) && !defined(CONFIG_MRST_LNW_A2_WR)
++ unsigned int max_dtr;
++#endif
++
++ BUG_ON(!host);
++ WARN_ON(!host->claimed);
++
++ if (!oldcard)
++ goto err;
++
++ card = oldcard;
++
++ err = mmc_send_io_op_cond(host, 0, &ocr);
++ if (err)
++ goto remove;
++
++ /*
++ * Inform the card of the voltage
++ */
++ err = mmc_send_io_op_cond(host, host->ocr, &ocr);
++ if (err)
++ goto remove;
++
++ /*
++ * For SPI, enable CRC as appropriate.
++ */
++ if (mmc_host_is_spi(host)) {
++ err = mmc_spi_set_crc(host, use_spi_crc);
++ if (err)
++ goto remove;
++ }
++
++ funcs = (ocr & 0x70000000) >> 28;
++
++ if (funcs != card->sdio_funcs)
++ printk(KERN_INFO "funcs number is changed from OCR register after suspend!\n");
++
++ if (!mmc_host_is_spi(host)) {
++ err = mmc_send_relative_addr(host, &card->rca);
++ if (err)
++ goto remove;
++
++ mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
++ }
++
++ /*
++ * Select card, as all following commands rely on that.
++ */
++ if (!mmc_host_is_spi(host)) {
++ err = mmc_select_card(card);
++ if (err)
++ goto remove;
++ }
++
++ /*
++ * Read the common CIS tuples.
++ */
++ err = sdio_read_cccr(card);
++ if (err)
++ goto remove;
++
++#if defined(CONFIG_MRST_LNW_A1_WR) || defined(CONFIG_MRST_LNW_A2_WR)
++ /* restricting to 24MHz for Langwell A0 */
++ if (card->cis.max_dtr > 24000000)
++ card->cis.max_dtr = 24000000;
++
++ mmc_set_clock(host, card->cis.max_dtr);
++#else
++ /*
++ * Switch to high-speed (if supported).
++ */
++ err = sdio_enable_hs(card);
++ if (err)
++ goto remove;
++
++ max_dtr = card->cis.max_dtr;
++
++ /*
++ * Change to the card's maximum speed.
++ */
++ if (mmc_card_highspeed(card)) {
++ if (max_dtr > 50000000)
++ max_dtr = 50000000;
++ } else if (max_dtr > 25000000)
++ max_dtr = 25000000;
++
++ mmc_set_clock(host, max_dtr);
++#endif
++
++ /*
++ * Switch to wider bus (if supported).
++ */
++ err = sdio_enable_wide(card);
++ if (err)
++ goto remove;
++
++ host->card = card;
++
++ return 0;
++
++remove:
++err:
++ return err;
++}
+
+ /*
+ * Handle the detection and initialisation of a card.
+@@ -478,6 +857,10 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
+ int i, funcs;
+ struct mmc_card *card;
+
++#if !defined(CONFIG_MRST_LNW_A1_WR) && !defined(CONFIG_MRST_LNW_A2_WR)
++ unsigned int max_dtr;
++#endif
++
+ BUG_ON(!host);
+ WARN_ON(!host->claimed);
+
+@@ -574,3 +957,84 @@ err:
+ return err;
+ }
+
++/*
++ * warn device driver and perform a SDIO device reset.
++ * Assume that device driver knows hot to handle resets.
++ */
++int sdio_reset_device(struct mmc_card *card)
++{
++ int ret = 0;
++ int i = 0;
++ u8 reg = 0;
++
++ BUG_ON(!card);
++ BUG_ON(!card->host);
++ BUG_ON(!card->sdio_func);
++
++ if (!mmc_card_present(card) ||
++ mmc_card_suspended(card)) {
++ dev_dbg(&card->dev, "device reset not allowed\n");
++ return -EINVAL;
++ }
++
++ for (; i < card->sdio_funcs; i++) {
++ struct sdio_func *func = card->sdio_func[i];
++ struct sdio_driver *drv;
++
++ if (func && func->dev.driver) {
++ drv = to_sdio_driver(func->dev.driver);
++ if (drv->pre_reset) {
++ ret = (drv->pre_reset)(func);
++ if (ret)
++ break;
++ }
++ }
++ }
++
++ if (ret)
++ goto err;
++
++ /* reset SDIO card via CMD52 */
++ mmc_claim_host(card->host);
++
++ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_ABORT, 0, &reg);
++
++ if (ret)
++ reg = 0x08;
++ else
++ reg |= 0x08;
++
++ mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_ABORT, reg, NULL);
++
++ /* re-enumerate the device */
++ ret = mmc_sdio_reinit_card(card->host, card);
++
++ mmc_release_host(card->host);
++
++ if (ret)
++ goto err;
++
++ for (i = card->sdio_funcs - 1; i >= 0; i--) {
++ struct sdio_func *func = card->sdio_func[i];
++ struct sdio_driver *drv;
++
++ if (func && func->dev.driver) {
++ drv = to_sdio_driver(func->dev.driver);
++ if (drv->post_reset) {
++ ret = (drv->post_reset)(func);
++ if (ret)
++ break;
++ }
++ }
++ }
++
++ if (ret)
++ goto err;
++
++ return 0;
++
++err:
++ return -EINVAL;
++
++}
++EXPORT_SYMBOL_GPL(sdio_reset_device);
+diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
+index 9e060c8..84414e8 100644
+--- a/drivers/mmc/core/sdio_bus.c
++++ b/drivers/mmc/core/sdio_bus.c
+@@ -124,6 +124,14 @@ static int sdio_bus_probe(struct device *dev)
+ if (!id)
+ return -ENODEV;
+
++ /*
++ * create the user interface to call suspend/resume
++ * from susfs
++ */
++#ifdef CONFIG_SDIO_SUSPEND
++ sdio_create_sysfs_file(func);
++#endif
++
+ /* Set the default block size so the driver is sure it's something
+ * sensible. */
+ sdio_claim_host(func);
+@@ -140,6 +148,10 @@ static int sdio_bus_remove(struct device *dev)
+ struct sdio_driver *drv = to_sdio_driver(dev->driver);
+ struct sdio_func *func = dev_to_sdio_func(dev);
+
++#ifdef CONFIG_SDIO_SUSPEND
++ sdio_remove_sysfs_file(func);
++#endif
++
+ drv->remove(func);
+
+ if (func->irq_handler) {
+@@ -153,6 +165,30 @@ static int sdio_bus_remove(struct device *dev)
+ return 0;
+ }
+
++static int sdio_bus_suspend(struct device *dev, pm_message_t state)
++{
++ struct sdio_driver *drv = to_sdio_driver(dev->driver);
++ struct sdio_func *func = dev_to_sdio_func(dev);
++ int ret = 0;
++
++ if (dev->driver && drv->suspend)
++ ret = drv->suspend(func, state);
++
++ return ret;
++}
++
++static int sdio_bus_resume(struct device *dev)
++{
++ struct sdio_driver *drv = to_sdio_driver(dev->driver);
++ struct sdio_func *func = dev_to_sdio_func(dev);
++ int ret = 0;
++
++ if (dev->driver && drv->resume)
++ ret = drv->resume(func);
++
++ return ret;
++}
++
+ static struct bus_type sdio_bus_type = {
+ .name = "sdio",
+ .dev_attrs = sdio_dev_attrs,
+@@ -160,6 +196,8 @@ static struct bus_type sdio_bus_type = {
+ .uevent = sdio_bus_uevent,
+ .probe = sdio_bus_probe,
+ .remove = sdio_bus_remove,
++ .suspend = sdio_bus_suspend,
++ .resume = sdio_bus_resume,
+ };
+
+ int sdio_register_bus(void)
+diff --git a/drivers/mmc/core/sdio_bus.h b/drivers/mmc/core/sdio_bus.h
+index 567a768..18616b2 100644
+--- a/drivers/mmc/core/sdio_bus.h
++++ b/drivers/mmc/core/sdio_bus.h
+@@ -18,5 +18,9 @@ void sdio_remove_func(struct sdio_func *func);
+ int sdio_register_bus(void);
+ void sdio_unregister_bus(void);
+
++#ifdef CONFIG_SDIO_SUSPEND
++int sdio_create_sysfs_file(struct sdio_func *func);
++void sdio_remove_sysfs_file(struct sdio_func *func);
++#endif
+ #endif
+
+diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
+index ce1d288..521cf2e 100644
+--- a/drivers/mmc/host/Kconfig
++++ b/drivers/mmc/host/Kconfig
+@@ -68,6 +68,16 @@ config MMC_SDHCI_PCI
+
+ If unsure, say N.
+
++config MMC_SDHCI_MRST_EMMC
++ tristate "Enable eMMC device on MRST"
++ depends on MMC_SDHCI && PCI
++ help
++ This enables eMMC device for MRST platform.
++
++ If you're using eMMC device on Moorestown, say Y or M here.
++
++ If unsure, say N.
++
+ config MMC_RICOH_MMC
+ tristate "Ricoh MMC Controller Disabler (EXPERIMENTAL)"
+ depends on MMC_SDHCI_PCI
+diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
+index 5c3a176..53f3719 100644
+--- a/drivers/mmc/host/sdhci-pci.c
++++ b/drivers/mmc/host/sdhci-pci.c
+@@ -38,6 +38,8 @@
+
+ #define MAX_SLOTS 8
+
++static struct mutex port_mutex;
++
+ struct sdhci_pci_chip;
+ struct sdhci_pci_slot;
+
+@@ -364,6 +366,17 @@ static const struct sdhci_pci_fixes sdhci_via = {
+ .probe = via_probe,
+ };
+
++/*
++ * ADMA operation is disabled for Moorestown platform.
++ */
++static const struct sdhci_pci_fixes sdhci_intel_mrst = {
++ .quirks = SDHCI_QUIRK_BROKEN_ADMA |
++#ifdef CONFIG_MMC_SDHCI_MRST_EMMC
++ SDHCI_QUIRK_BROKEN_CARD_DETECTION |
++#endif
++ SDHCI_QUIRK_MRST_RESTRICTION,
++};
++
+ static const struct pci_device_id pci_ids[] __devinitdata = {
+ {
+ .vendor = PCI_VENDOR_ID_RICOH,
+@@ -445,6 +458,22 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
+ .driver_data = (kernel_ulong_t)&sdhci_via,
+ },
+
++ {
++ .vendor = PCI_VENDOR_ID_INTEL,
++ .device = PCI_DEVICE_ID_INTEL_MRST_SD0,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .driver_data = (kernel_ulong_t)&sdhci_intel_mrst,
++ },
++
++ {
++ .vendor = PCI_VENDOR_ID_INTEL,
++ .device = PCI_DEVICE_ID_INTEL_MRST_SD1,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .driver_data = (kernel_ulong_t)&sdhci_intel_mrst,
++ },
++
+ { /* Generic SD host controller */
+ PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
+ },
+@@ -469,11 +498,14 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host)
+ slot = sdhci_priv(host);
+ pdev = slot->chip->pdev;
+
+- if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
+- ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
+- (host->flags & SDHCI_USE_SDMA)) {
+- dev_warn(&pdev->dev, "Will use DMA mode even though HW "
+- "doesn't fully claim to support it.\n");
++ if (!(host->quirks & SDHCI_QUIRK_MRST_RESTRICTION)) {
++ if (((pdev->class & 0xFFFF00) ==
++ (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
++ ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
++ (host->flags & SDHCI_USE_SDMA)) {
++ dev_warn(&pdev->dev, "Will use DMA mode even though HW "
++ "doesn't fully claim to support it.\n");
++ }
+ }
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+@@ -622,6 +654,9 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
+ return ERR_PTR(PTR_ERR(host));
+ }
+
++ if (pdev->device == PCI_DEVICE_ID_INTEL_MRST_SD0)
++ host->mmc->port_mutex = &port_mutex;
++
+ slot = sdhci_priv(host);
+
+ slot->chip = chip;
+@@ -712,22 +747,42 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
+ dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n",
+ (int)pdev->vendor, (int)pdev->device, (int)rev);
+
+- ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
+- if (ret)
+- return ret;
++ /*
++ * slots number is fixed to 2 by Moorestown architecture
++ */
++ if (pdev->device == PCI_DEVICE_ID_INTEL_MRST_SD0) {
++ slots = 2;
++ mutex_init(&port_mutex);
++ } else if (pdev->device == PCI_DEVICE_ID_INTEL_MRST_SD1)
++ slots = 1;
++ else {
++ ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
++
++ if (ret)
++ return ret;
++
++ slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
++ }
+
+- slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
+ dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
+ if (slots == 0)
+ return -ENODEV;
+
+ BUG_ON(slots > MAX_SLOTS);
+
+- ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
+- if (ret)
+- return ret;
++ /*
++ * first BAR is fixed to 0 by Moorestown architecture
++ */
++ if (pdev->device == PCI_DEVICE_ID_INTEL_MRST_SD0 ||
++ pdev->device == PCI_DEVICE_ID_INTEL_MRST_SD1) {
++ first_bar = 0;
++ } else {
++ ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
++ if (ret)
++ return ret;
+
+- first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
++ first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
++ }
+
+ if (first_bar > 5) {
+ dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n");
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index c279fbc..ff26db0 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -161,9 +161,11 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
+ /* hw clears the bit when it's done */
+ while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
+ if (timeout == 0) {
++#ifndef CONFIG_MRST_LNW_A2_WR
+ printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
+ mmc_hostname(host->mmc), (int)mask);
+ sdhci_dumpregs(host);
++#endif
+ return;
+ }
+ timeout--;
+@@ -176,13 +178,25 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
+
+ static void sdhci_init(struct sdhci_host *host)
+ {
++ u32 intmask;
++
++ intmask = sdhci_readl(host, SDHCI_INT_STATUS);
++ sdhci_writel(host,
++ intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE),
++ SDHCI_INT_STATUS);
++
++#ifndef CONFIG_MRST_LNW_A2_WR
+ sdhci_reset(host, SDHCI_RESET_ALL);
++#endif
+
+ sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
+ SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
+ SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
+ SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
+ SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE);
++
++ /* disable wakeup signal during initialization */
++ sdhci_writeb(host, 0x0, SDHCI_WAKE_UP_CONTROL);
+ }
+
+ static void sdhci_reinit(struct sdhci_host *host)
+@@ -465,6 +479,54 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
+ len -= offset;
+ }
+
++#if defined(CONFIG_MRST_LNW_A1_WR) || defined(CONFIG_MRST_LNW_A2_WR)
++ if (len != 65536) {
++ desc[7] = (addr >> 24) & 0xff;
++ desc[6] = (addr >> 16) & 0xff;
++ desc[5] = (addr >> 8) & 0xff;
++ desc[4] = (addr >> 0) & 0xff;
++
++ BUG_ON(len > 65536);
++
++ desc[3] = (len >> 8) & 0xff;
++ desc[2] = (len >> 0) & 0xff;
++
++ desc[1] = 0x00;
++ desc[0] = 0x21; /* tran, valid */
++
++ desc += 8;
++ } else {
++ desc[7] = (addr >> 24) & 0xff;
++ desc[6] = (addr >> 16) & 0xff;
++ desc[5] = (addr >> 8) & 0xff;
++ desc[4] = (addr >> 0) & 0xff;
++
++ desc[3] = (32768 >> 8) & 0xff;
++ desc[2] = (32768 >> 0) & 0xff;
++
++ desc[1] = 0x00;
++ desc[0] = 0x21; /* tran, valid */
++
++ desc += 8;
++
++ /* 2nd */
++ addr += 32768;
++
++ desc[7] = (addr >> 24) & 0xff;
++ desc[6] = (addr >> 16) & 0xff;
++ desc[5] = (addr >> 8) & 0xff;
++ desc[4] = (addr >> 0) & 0xff;
++
++ desc[3] = (32768 >> 8) & 0xff;
++ desc[2] = (32768 >> 0) & 0xff;
++
++ desc[1] = 0x00;
++ desc[0] = 0x21; /* tran, valid */
++
++ desc += 8;
++ }
++#else
++
+ desc[7] = (addr >> 24) & 0xff;
+ desc[6] = (addr >> 16) & 0xff;
+ desc[5] = (addr >> 8) & 0xff;
+@@ -479,7 +541,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
+ desc[0] = 0x21; /* tran, valid */
+
+ desc += 8;
+-
++#endif
+ /*
+ * If this triggers then we have a calculation bug
+ * somewhere. :/
+@@ -487,6 +549,11 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
+ WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
+ }
+
++
++#if defined(CONFIG_MRST_LNW_A1_WR) || defined(CONFIG_MRST_LNW_A2_WR)
++ desc -= 8;
++ desc[0] = 0x23;
++#else
+ /*
+ * Add a terminating entry.
+ */
+@@ -500,7 +567,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
+
+ desc[1] = 0x00;
+ desc[0] = 0x03; /* nop, end, valid */
+-
++#endif
+ /*
+ * Resync align buffer as we might have changed it.
+ */
+@@ -613,11 +680,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
+ break;
+ }
+
+- if (count >= 0xF) {
+- printk(KERN_WARNING "%s: Too large timeout requested!\n",
+- mmc_hostname(host->mmc));
++ if (count >= 0xF)
+ count = 0xE;
+- }
+
+ return count;
+ }
+@@ -928,6 +992,30 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
+ if (cmd->data)
+ flags |= SDHCI_CMD_DATA;
+
++ if (host->quirks & SDHCI_QUIRK_MRST_RESTRICTION) {
++ u16 clk;
++
++ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
++
++ clk |= SDHCI_CLOCK_CARD_EN;
++ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
++
++ /* Wait max 10 ms */
++ timeout = 10;
++ while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
++ & SDHCI_CLOCK_INT_STABLE)) {
++ if (timeout == 0) {
++ printk(KERN_ERR "%s: Internal clock never "
++ "stabilised.\n",
++ mmc_hostname(host->mmc));
++ sdhci_dumpregs(host);
++ return;
++ }
++ timeout--;
++ mdelay(1);
++ }
++ }
++
+ sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
+ }
+
+@@ -1147,14 +1235,22 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+
+- if (ios->bus_width == MMC_BUS_WIDTH_4)
++ if (ios->bus_width == MMC_BUS_WIDTH_8) {
++ ctrl |= SDHCI_CTRL_8BITBUS;
+ ctrl |= SDHCI_CTRL_4BITBUS;
+- else
++ } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
++ ctrl &= ~SDHCI_CTRL_8BITBUS;
++ ctrl |= SDHCI_CTRL_4BITBUS;
++ } else {
++ ctrl &= ~SDHCI_CTRL_8BITBUS;
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
++ }
+
+- if (ios->timing == MMC_TIMING_SD_HS)
++#ifndef CONFIG_MRST_LNW_A2_WR
++ if (ios->timing == MMC_TIMING_SD_HS || ios->timing == MMC_TIMING_MMC_HS)
+ ctrl |= SDHCI_CTRL_HISPD;
+ else
++#endif
+ ctrl &= ~SDHCI_CTRL_HISPD;
+
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+@@ -1354,6 +1450,10 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
+ BUG_ON(intmask == 0);
+
+ if (!host->cmd) {
++ if (host->quirks & SDHCI_QUIRK_MRST_RESTRICTION &&
++ !(strncmp(mmc_hostname(host->mmc), "mmc1", 4)))
++ return;
++
+ printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
+ "though no command operation was in progress.\n",
+ mmc_hostname(host->mmc), (unsigned)intmask);
+@@ -1667,7 +1767,9 @@ int sdhci_add_host(struct sdhci_host *host)
+ if (debug_quirks)
+ host->quirks = debug_quirks;
+
++#ifndef CONFIG_MRST_LNW_A2_WR
+ sdhci_reset(host, SDHCI_RESET_ALL);
++#endif
+
+ host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
+ host->version = (host->version & SDHCI_SPEC_VER_MASK)
+@@ -1787,7 +1889,7 @@ int sdhci_add_host(struct sdhci_host *host)
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+ if (caps & SDHCI_CAN_DO_HISPD)
+- mmc->caps |= MMC_CAP_SD_HIGHSPEED;
++ mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
+
+ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ mmc->caps |= MMC_CAP_NEEDS_POLL;
+@@ -1845,7 +1947,8 @@ int sdhci_add_host(struct sdhci_host *host)
+ } else {
+ mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >>
+ SDHCI_MAX_BLOCK_SHIFT;
+- if (mmc->max_blk_size >= 3) {
++ if ((mmc->max_blk_size >= 3) &&
++ !(host->quirks & SDHCI_QUIRK_MRST_RESTRICTION)) {
+ printk(KERN_WARNING "%s: Invalid maximum block size, "
+ "assuming 512 bytes\n", mmc_hostname(mmc));
+ mmc->max_blk_size = 0;
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 842f46f..f7ba4a2 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -67,6 +67,7 @@
+ #define SDHCI_CTRL_LED 0x01
+ #define SDHCI_CTRL_4BITBUS 0x02
+ #define SDHCI_CTRL_HISPD 0x04
++#define SDHCI_CTRL_8BITBUS 0x20
+ #define SDHCI_CTRL_DMA_MASK 0x18
+ #define SDHCI_CTRL_SDMA 0x00
+ #define SDHCI_CTRL_ADMA1 0x08
+@@ -236,6 +237,8 @@ struct sdhci_host {
+ #define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
+ /* Controller uses SDCLK instead of TMCLK for data timeouts */
+ #define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24)
++/* Controller of Moorestown specific restriction */
++#define SDHCI_QUIRK_MRST_RESTRICTION (1<<25)
+
+ int irq; /* Device IRQ */
+ void __iomem * ioaddr; /* Mapped address */
+diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
+index 2ee22e8..731f984 100644
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -97,6 +97,7 @@ struct mmc_card {
+ #define MMC_STATE_READONLY (1<<1) /* card is read-only */
+ #define MMC_STATE_HIGHSPEED (1<<2) /* card is in high speed mode */
+ #define MMC_STATE_BLOCKADDR (1<<3) /* card uses block-addressing */
++#define MMC_STATE_SUSPENDED (1<<4) /* card uses block-addressing */
+ unsigned int quirks; /* card quirks */
+ #define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */
+
+@@ -109,6 +110,7 @@ struct mmc_card {
+ struct sd_scr scr; /* extra SD information */
+ struct sd_switch_caps sw_caps; /* switch (CMD6) caps */
+
++ /* sdio related info */
+ unsigned int sdio_funcs; /* number of SDIO functions */
+ struct sdio_cccr cccr; /* common card info */
+ struct sdio_cis cis; /* common tuple info */
+@@ -118,6 +120,10 @@ struct mmc_card {
+ struct sdio_func_tuple *tuples; /* unknown common tuples */
+
+ struct dentry *debugfs_root;
++
++#ifdef CONFIG_SDIO_SUSPEND
++ struct mutex pm_mutex;
++#endif
+ };
+
+ #define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC)
+@@ -128,6 +134,7 @@ struct mmc_card {
+ #define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY)
+ #define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED)
+ #define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR)
++#define mmc_card_suspended(c) ((c)->state & MMC_STATE_SUSPENDED)
+
+ #define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
+ #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
+@@ -139,6 +146,11 @@ static inline int mmc_card_lenient_fn0(const struct mmc_card *c)
+ return c->quirks & MMC_QUIRK_LENIENT_FN0;
+ }
+
++#ifdef CONFIG_SDIO_SUSPEND
++#define mmc_card_set_suspended(c) ((c)->state |= MMC_STATE_SUSPENDED)
++#define mmc_card_clear_suspended(c) ((c)->state &= ~MMC_STATE_SUSPENDED)
++#endif
++
+ #define mmc_card_name(c) ((c)->cid.prod_name)
+ #define mmc_card_id(c) (dev_name(&(c)->dev))
+
+diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
+index eaf3636..814450a 100644
+--- a/include/linux/mmc/host.h
++++ b/include/linux/mmc/host.h
+@@ -203,6 +203,8 @@ struct mmc_host {
+
+ struct dentry *debugfs_root;
+
++ struct mutex *port_mutex;
++
+ unsigned long private[0] ____cacheline_aligned;
+ };
+
+diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
+index ac3ab68..a863a9a 100644
+--- a/include/linux/mmc/sdio_func.h
++++ b/include/linux/mmc/sdio_func.h
+@@ -50,6 +50,7 @@ struct sdio_func {
+
+ unsigned int state; /* function state */
+ #define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */
++#define SDIO_STATE_SUSPENDED (1<<1) /* present in sysfs */
+
+ u8 tmpbuf[4]; /* DMA:able scratch buffer */
+
+@@ -60,9 +61,13 @@ struct sdio_func {
+ };
+
+ #define sdio_func_present(f) ((f)->state & SDIO_STATE_PRESENT)
++#define sdio_func_suspended(f) ((f)->state & SDIO_STATE_SUSPENDED)
+
+ #define sdio_func_set_present(f) ((f)->state |= SDIO_STATE_PRESENT)
+
++#define sdio_func_set_suspended(f) ((f)->state |= SDIO_STATE_SUSPENDED)
++#define sdio_func_clear_suspended(f) ((f)->state &= ~SDIO_STATE_SUSPENDED)
++
+ #define sdio_func_id(f) (dev_name(&(f)->dev))
+
+ #define sdio_get_drvdata(f) dev_get_drvdata(&(f)->dev)
+@@ -78,6 +83,11 @@ struct sdio_driver {
+
+ int (*probe)(struct sdio_func *, const struct sdio_device_id *);
+ void (*remove)(struct sdio_func *);
++ int (*suspend)(struct sdio_func *, pm_message_t);
++ int (*resume)(struct sdio_func *);
++
++ int (*pre_reset)(struct sdio_func *);
++ int (*post_reset)(struct sdio_func *);
+
+ struct device_driver drv;
+ };
+@@ -153,5 +163,9 @@ extern unsigned char sdio_f0_readb(struct sdio_func *func,
+ extern void sdio_f0_writeb(struct sdio_func *func, unsigned char b,
+ unsigned int addr, int *err_ret);
+
++extern int sdio_reset_device(struct mmc_card *card);
++
++extern int sdio_suspend_host(struct mmc_card *card, pm_message_t msg);
++extern int sdio_resume_host(struct mmc_card *card);
+ #endif
+
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index cca8a04..2ec7f6c 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2396,6 +2396,8 @@
+ #define PCI_DEVICE_ID_INTEL_82375 0x0482
+ #define PCI_DEVICE_ID_INTEL_82424 0x0483
+ #define PCI_DEVICE_ID_INTEL_82378 0x0484
++#define PCI_DEVICE_ID_INTEL_MRST_SD0 0x0807
++#define PCI_DEVICE_ID_INTEL_MRST_SD1 0x0808
+ #define PCI_DEVICE_ID_INTEL_I960 0x0960
+ #define PCI_DEVICE_ID_INTEL_I960RM 0x0962
+ #define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062
+--
+1.6.2.5
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-nand-driver-2.0.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-nand-driver-2.0.patch
new file mode 100644
index 0000000..418a38d
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-nand-driver-2.0.patch
@@ -0,0 +1,11841 @@
+From ccdae3998beb883307f0ea6aedcfa856f8714137 Mon Sep 17 00:00:00 2001
+From: Alan Olsen <alan.r.olsen@intel.com>
+Date: Mon, 26 Apr 2010 10:50:19 -0700
+Subject: [PATCH] Post Beta nand driver.
+
+Contains the fixes for nand corruption with the watchdog driver.
+
+New features compares to MRST NAND driver Post Alpah2 2.0:
+1. Enable CDMA feature of NAND controller
+
+How to use this driver:
+The same with before. That is, to enable this driver,
+you can set
+CONFIG_MRST_NAND=y
+CONFIG_MRST_NAND_HW=y
+
+Signed-off-by: Gao Yunpeng <yunpeng.gao@intel.com>
+
+Signed-off-by: Alan Olsen <alan.r.olsen@intel.com>
+
+Patch-mainline: 2.6.34?
+---
+ drivers/block/Kconfig | 2 +
+ drivers/block/Makefile | 2 +
+ drivers/block/spectra/Kconfig | 27 +
+ drivers/block/spectra/Makefile | 7 +
+ drivers/block/spectra/README | 29 +
+ drivers/block/spectra/ffsdefs.h | 58 +
+ drivers/block/spectra/ffsport.c | 847 ++++++
+ drivers/block/spectra/ffsport.h | 84 +
+ drivers/block/spectra/flash.c | 4731 +++++++++++++++++++++++++++++++
+ drivers/block/spectra/flash.h | 198 ++
+ drivers/block/spectra/lld.c | 258 ++
+ drivers/block/spectra/lld.h | 111 +
+ drivers/block/spectra/lld_cdma.c | 910 ++++++
+ drivers/block/spectra/lld_cdma.h | 123 +
+ drivers/block/spectra/lld_emu.c | 780 +++++
+ drivers/block/spectra/lld_emu.h | 51 +
+ drivers/block/spectra/lld_nand.c | 2601 +++++++++++++++++
+ drivers/block/spectra/lld_nand.h | 131 +
+ drivers/block/spectra/nand_regs.h | 619 ++++
+ drivers/block/spectra/spectraswconfig.h | 81 +
+ 20 files changed, 11650 insertions(+), 0 deletions(-)
+ create mode 100644 drivers/block/spectra/Kconfig
+ create mode 100644 drivers/block/spectra/Makefile
+ create mode 100644 drivers/block/spectra/README
+ create mode 100644 drivers/block/spectra/ffsdefs.h
+ create mode 100644 drivers/block/spectra/ffsport.c
+ create mode 100644 drivers/block/spectra/ffsport.h
+ create mode 100644 drivers/block/spectra/flash.c
+ create mode 100644 drivers/block/spectra/flash.h
+ create mode 100644 drivers/block/spectra/lld.c
+ create mode 100644 drivers/block/spectra/lld.h
+ create mode 100644 drivers/block/spectra/lld_cdma.c
+ create mode 100644 drivers/block/spectra/lld_cdma.h
+ create mode 100644 drivers/block/spectra/lld_emu.c
+ create mode 100644 drivers/block/spectra/lld_emu.h
+ create mode 100644 drivers/block/spectra/lld_nand.c
+ create mode 100644 drivers/block/spectra/lld_nand.h
+ create mode 100644 drivers/block/spectra/nand_regs.h
+ create mode 100644 drivers/block/spectra/spectraswconfig.h
+
+diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
+index 77bfce5..d62b95d 100644
+--- a/drivers/block/Kconfig
++++ b/drivers/block/Kconfig
+@@ -488,4 +488,6 @@ config BLK_DEV_HD
+
+ If unsure, say N.
+
++source "drivers/block/spectra/Kconfig"
++
+ endif # BLK_DEV
+diff --git a/drivers/block/Makefile b/drivers/block/Makefile
+index aff5ac9..568ba65 100644
+--- a/drivers/block/Makefile
++++ b/drivers/block/Makefile
+@@ -38,4 +38,6 @@ obj-$(CONFIG_BLK_DEV_HD) += hd.o
+ obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
+ obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
+
++obj-$(CONFIG_MRST_NAND) += spectra/
++
+ swim_mod-objs := swim.o swim_asm.o
+diff --git a/drivers/block/spectra/Kconfig b/drivers/block/spectra/Kconfig
+new file mode 100644
+index 0000000..fbece10
+--- /dev/null
++++ b/drivers/block/spectra/Kconfig
+@@ -0,0 +1,27 @@
++
++menuconfig MRST_NAND
++ tristate "Moorestown NAND Flash controller"
++ depends on BLOCK
++ default n
++ ---help---
++ Enable the driver for the NAND Flash controller in Intel Moorestown
++ Platform
++
++choice
++ prompt "Compile for"
++ depends on MRST_NAND
++ default MRST_NAND_HW
++
++config MRST_NAND_HW
++ bool "Actual hardware mode"
++ help
++ Driver communicates with the actual hardware's register interface.
++ in DMA mode.
++
++config MRST_NAND_EMU
++ bool "RAM emulator testing"
++ help
++ Driver emulates Flash on a RAM buffer and / or disk file. Useful to test the behavior of FTL layer.
++
++endchoice
++
+diff --git a/drivers/block/spectra/Makefile b/drivers/block/spectra/Makefile
+new file mode 100644
+index 0000000..261891c
+--- /dev/null
++++ b/drivers/block/spectra/Makefile
+@@ -0,0 +1,7 @@
++#
++# Makefile of Intel Moorestown NAND controller driver
++#
++
++obj-$(CONFIG_MRST_NAND) += spectra.o
++spectra-objs := ffsport.o flash.o lld.o lld_emu.o lld_nand.o lld_cdma.o
++
+diff --git a/drivers/block/spectra/README b/drivers/block/spectra/README
+new file mode 100644
+index 0000000..ecba559
+--- /dev/null
++++ b/drivers/block/spectra/README
+@@ -0,0 +1,29 @@
++This is a driver for NAND controller of Intel Moorestown platform.
++
++This driver is a standalone linux block device driver, it acts as if it's a normal hard disk.
++It includes three layer:
++ block layer interface - file ffsport.c
++ Flash Translation Layer (FTL) - file flash.c (implement the NAND flash Translation Layer, includs address mapping, garbage collection, wear-leveling and so on)
++ Low level layer - file lld_nand.c/lld_cdma.c/lld_emu.c (which implements actual controller hardware registers access)
++
++This driver can be build as modules or build-in.
++
++Dependency:
++This driver has dependency on IA Firmware of Intel Moorestown platform.
++It need the IA Firmware to create the block table for the first time.
++And to validate this driver code without IA Firmware, you can change the
++macro AUTO_FORMAT_FLASH from 0 to 1 in file spectraswconfig.h. Thus the
++driver will erase the whole nand flash and create a new block table.
++
++TODO:
++ - Enable Command DMA feature support
++ - lower the memory footprint
++ - Remove most of the unnecessary global variables
++ - Change all the upcase variable / functions name to lowercase
++ - Some other misc bugs
++
++Please send patches to:
++ Greg Kroah-Hartman <gregkh@suse.de>
++
++And Cc to: Gao Yunpeng <yunpeng.gao@intel.com>
++
+diff --git a/drivers/block/spectra/ffsdefs.h b/drivers/block/spectra/ffsdefs.h
+new file mode 100644
+index 0000000..a9e9cd2
+--- /dev/null
++++ b/drivers/block/spectra/ffsdefs.h
+@@ -0,0 +1,58 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef _FFSDEFS_
++#define _FFSDEFS_
++
++#define CLEAR 0 /*use this to clear a field instead of "fail"*/
++#define SET 1 /*use this to set a field instead of "pass"*/
++#define FAIL 1 /*failed flag*/
++#define PASS 0 /*success flag*/
++#define ERR -1 /*error flag*/
++
++#define ERASE_CMD 10
++#define WRITE_MAIN_CMD 11
++#define READ_MAIN_CMD 12
++#define WRITE_SPARE_CMD 13
++#define READ_SPARE_CMD 14
++#define WRITE_MAIN_SPARE_CMD 15
++#define READ_MAIN_SPARE_CMD 16
++#define MEMCOPY_CMD 17
++#define DUMMY_CMD 99
++
++#define EVENT_PASS 0x00
++#define EVENT_CORRECTABLE_DATA_ERROR_FIXED 0x01
++#define EVENT_UNCORRECTABLE_DATA_ERROR 0x02
++#define EVENT_TIME_OUT 0x03
++#define EVENT_PROGRAM_FAILURE 0x04
++#define EVENT_ERASE_FAILURE 0x05
++#define EVENT_MEMCOPY_FAILURE 0x06
++#define EVENT_FAIL 0x07
++
++#define EVENT_NONE 0x22
++#define EVENT_DMA_CMD_COMP 0x77
++#define EVENT_ECC_TRANSACTION_DONE 0x88
++#define EVENT_DMA_CMD_FAIL 0x99
++
++#define CMD_PASS 0
++#define CMD_FAIL 1
++#define CMD_ABORT 2
++#define CMD_NOT_DONE 3
++
++#endif /* _FFSDEFS_ */
+diff --git a/drivers/block/spectra/ffsport.c b/drivers/block/spectra/ffsport.c
+new file mode 100644
+index 0000000..0b3d49d
+--- /dev/null
++++ b/drivers/block/spectra/ffsport.c
+@@ -0,0 +1,847 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include "ffsport.h"
++#include "flash.h"
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/blkdev.h>
++#include <linux/wait.h>
++#include <linux/mutex.h>
++#include <linux/kthread.h>
++#include <linux/log2.h>
++#include <linux/init.h>
++
++/**** Helper functions used for Div, Remainder operation on u64 ****/
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_Calc_Used_Bits
++* Inputs: Power of 2 number
++* Outputs: Number of Used Bits
++* 0, if the argument is 0
++* Description: Calculate the number of bits used by a given power of 2 number
++* Number can be upto 32 bit
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_Calc_Used_Bits(u32 n)
++{
++ int tot_bits = 0;
++
++ if (n >= 1 << 16) {
++ n >>= 16;
++ tot_bits += 16;
++ }
++
++ if (n >= 1 << 8) {
++ n >>= 8;
++ tot_bits += 8;
++ }
++
++ if (n >= 1 << 4) {
++ n >>= 4;
++ tot_bits += 4;
++ }
++
++ if (n >= 1 << 2) {
++ n >>= 2;
++ tot_bits += 2;
++ }
++
++ if (n >= 1 << 1)
++ tot_bits += 1;
++
++ return ((n == 0) ? (0) : tot_bits);
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_u64_Div
++* Inputs: Number of u64
++* A power of 2 number as Division
++* Outputs: Quotient of the Divisor operation
++* Description: It divides the address by divisor by using bit shift operation
++* (essentially without explicitely using "/").
++* Divisor is a power of 2 number and Divided is of u64
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u64 GLOB_u64_Div(u64 addr, u32 divisor)
++{
++ return (u64)(addr >> GLOB_Calc_Used_Bits(divisor));
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_u64_Remainder
++* Inputs: Number of u64
++* Divisor Type (1 -PageAddress, 2- BlockAddress)
++* Outputs: Remainder of the Division operation
++* Description: It calculates the remainder of a number (of u64) by
++* divisor(power of 2 number ) by using bit shifting and multiply
++* operation(essentially without explicitely using "/").
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
++{
++ u64 result = 0;
++
++ if (divisor_type == 1) { /* Remainder -- Page */
++ result = (addr >> DeviceInfo.nBitsInPageDataSize);
++ result = result * DeviceInfo.wPageDataSize;
++ } else if (divisor_type == 2) { /* Remainder -- Block */
++ result = (addr >> DeviceInfo.nBitsInBlockDataSize);
++ result = result * DeviceInfo.wBlockDataSize;
++ }
++
++ result = addr - result;
++
++ return result;
++}
++
++#define NUM_DEVICES 1
++#define PARTITIONS 8
++
++#define GLOB_SBD_NAME "nd"
++#define GLOB_SBD_IRQ_NUM (29)
++#define GLOB_VERSION "driver version 20091110"
++
++#define GLOB_SBD_IOCTL_GC (0x7701)
++#define GLOB_SBD_IOCTL_WL (0x7702)
++#define GLOB_SBD_IOCTL_FORMAT (0x7703)
++#define GLOB_SBD_IOCTL_ERASE_FLASH (0x7704)
++#define GLOB_SBD_IOCTL_FLUSH_CACHE (0x7705)
++#define GLOB_SBD_IOCTL_COPY_BLK_TABLE (0x7706)
++#define GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE (0x7707)
++#define GLOB_SBD_IOCTL_GET_NAND_INFO (0x7708)
++#define GLOB_SBD_IOCTL_WRITE_DATA (0x7709)
++#define GLOB_SBD_IOCTL_READ_DATA (0x770A)
++
++static u32 reserved_mb_for_os_image = 0;
++
++int nand_debug_level;
++module_param(nand_debug_level, int, 0644);
++MODULE_PARM_DESC(nand_debug_level, "debug level value: 1-3");
++
++MODULE_LICENSE("GPL");
++
++struct spectra_nand_dev {
++ struct pci_dev *dev;
++ u64 size;
++ u16 users;
++ spinlock_t qlock;
++ void __iomem *ioaddr; /* Mapped address */
++ struct request_queue *queue;
++ struct task_struct *thread;
++ struct gendisk *gd;
++ u8 *tmp_buf;
++};
++
++
++static int GLOB_SBD_majornum;
++
++static char *GLOB_version = GLOB_VERSION;
++
++static struct spectra_nand_dev nand_device[NUM_DEVICES];
++
++static struct mutex spectra_lock;
++
++static int res_blks_os = 1;
++
++struct spectra_indentfy_dev_tag IdentifyDeviceData;
++
++static int force_flush_cache(void)
++{
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (ERR == GLOB_FTL_Flush_Cache()) {
++ printk(KERN_ERR "Fail to Flush FTL Cache!\n");
++ return -EFAULT;
++ }
++#if CMD_DMA
++ if (glob_ftl_execute_cmds())
++ return -EIO;
++ else
++ return 0;
++#endif
++ return 0;
++}
++
++struct ioctl_rw_page_info {
++ u8 *data;
++ unsigned int page;
++};
++
++static int ioctl_read_page_data(unsigned long arg)
++{
++ u8 *buf;
++ struct ioctl_rw_page_info info;
++ int result = PASS;
++
++ if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
++ return -EFAULT;
++
++ buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
++ if (!buf) {
++ printk(KERN_ERR "ioctl_read_page_data: "
++ "failed to allocate memory\n");
++ return -ENOMEM;
++ }
++
++ mutex_lock(&spectra_lock);
++ result = GLOB_FTL_Page_Read(buf,
++ (u64)info.page * IdentifyDeviceData.PageDataSize);
++ mutex_unlock(&spectra_lock);
++
++ if (copy_to_user((void __user *)info.data, buf,
++ IdentifyDeviceData.PageDataSize)) {
++ printk(KERN_ERR "ioctl_read_page_data: "
++ "failed to copy user data\n");
++ kfree(buf);
++ return -EFAULT;
++ }
++
++ kfree(buf);
++ return result;
++}
++
++static int ioctl_write_page_data(unsigned long arg)
++{
++ u8 *buf;
++ struct ioctl_rw_page_info info;
++ int result = PASS;
++
++ if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
++ return -EFAULT;
++
++ buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
++ if (!buf) {
++ printk(KERN_ERR "ioctl_write_page_data: "
++ "failed to allocate memory\n");
++ return -ENOMEM;
++ }
++
++ if (copy_from_user(buf, (void __user *)info.data,
++ IdentifyDeviceData.PageDataSize)) {
++ printk(KERN_ERR "ioctl_write_page_data: "
++ "failed to copy user data\n");
++ kfree(buf);
++ return -EFAULT;
++ }
++
++ mutex_lock(&spectra_lock);
++ result = GLOB_FTL_Page_Write(buf,
++ (u64)info.page * IdentifyDeviceData.PageDataSize);
++ mutex_unlock(&spectra_lock);
++
++ kfree(buf);
++ return result;
++}
++
++/* Return how many blocks should be reserved for bad block replacement */
++static int get_res_blk_num_bad_blk(void)
++{
++ return IdentifyDeviceData.wDataBlockNum / 10;
++}
++
++/* Return how many blocks should be reserved for OS image */
++static int get_res_blk_num_os(void)
++{
++ u32 res_blks, blk_size;
++
++ blk_size = IdentifyDeviceData.PageDataSize *
++ IdentifyDeviceData.PagesPerBlock;
++
++ res_blks = (reserved_mb_for_os_image * 1024 * 1024) / blk_size;
++
++ if ((res_blks < 1) || (res_blks >= IdentifyDeviceData.wDataBlockNum))
++ res_blks = 1; /* Reserved 1 block for block table */
++
++ return res_blks;
++}
++
++static void SBD_prepare_flush(struct request_queue *q, struct request *rq)
++{
++ rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
++ /* rq->timeout = 5 * HZ; */
++ rq->cmd[0] = REQ_LB_OP_FLUSH;
++}
++
++/* Transfer a full request. */
++static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
++{
++ u64 start_addr, addr;
++ u32 logical_start_sect, hd_start_sect;
++ u32 nsect, hd_sects;
++ u32 rsect, tsect = 0;
++ char *buf;
++ u32 ratio = IdentifyDeviceData.PageDataSize >> 9;
++
++ start_addr = (u64)(blk_rq_pos(req)) << 9;
++ /* Add a big enough offset to prevent the OS Image from
++ * being accessed or damaged by file system */
++ start_addr += IdentifyDeviceData.PageDataSize *
++ IdentifyDeviceData.PagesPerBlock *
++ res_blks_os;
++
++ if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
++ req->cmd[0] == REQ_LB_OP_FLUSH) {
++ if (force_flush_cache()) /* Fail to flush cache */
++ return -EIO;
++ else
++ return 0;
++ }
++
++ if (!blk_fs_request(req))
++ return -EIO;
++
++ if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(tr->gd)) {
++ printk(KERN_ERR "Spectra error: request over the NAND "
++ "capacity!sector %d, current_nr_sectors %d, "
++ "while capacity is %d\n",
++ (int)blk_rq_pos(req),
++ blk_rq_cur_sectors(req),
++ (int)get_capacity(tr->gd));
++ return -EIO;
++ }
++
++ logical_start_sect = start_addr >> 9;
++ hd_start_sect = logical_start_sect / ratio;
++ rsect = logical_start_sect - hd_start_sect * ratio;
++
++ addr = (u64)hd_start_sect * ratio * 512;
++ buf = req->buffer;
++ nsect = blk_rq_cur_sectors(req);
++
++ if (rsect)
++ tsect = (ratio - rsect) < nsect ? (ratio - rsect) : nsect;
++
++ switch (rq_data_dir(req)) {
++ case READ:
++ /* Read the first NAND page */
++ if (rsect) {
++ if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
++ printk(KERN_ERR "Error in %s, Line %d\n",
++ __FILE__, __LINE__);
++ return -EIO;
++ }
++ memcpy(buf, tr->tmp_buf + (rsect << 9), tsect << 9);
++ addr += IdentifyDeviceData.PageDataSize;
++ buf += tsect << 9;
++ nsect -= tsect;
++ }
++
++ /* Read the other NAND pages */
++ for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
++ if (GLOB_FTL_Page_Read(buf, addr)) {
++ printk(KERN_ERR "Error in %s, Line %d\n",
++ __FILE__, __LINE__);
++ return -EIO;
++ }
++ addr += IdentifyDeviceData.PageDataSize;
++ buf += IdentifyDeviceData.PageDataSize;
++ }
++
++ /* Read the last NAND pages */
++ if (nsect % ratio) {
++ if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
++ printk(KERN_ERR "Error in %s, Line %d\n",
++ __FILE__, __LINE__);
++ return -EIO;
++ }
++ memcpy(buf, tr->tmp_buf, (nsect % ratio) << 9);
++ }
++#if CMD_DMA
++ if (glob_ftl_execute_cmds())
++ return -EIO;
++ else
++ return 0;
++#endif
++ return 0;
++
++ case WRITE:
++ /* Write the first NAND page */
++ if (rsect) {
++ if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
++ printk(KERN_ERR "Error in %s, Line %d\n",
++ __FILE__, __LINE__);
++ return -EIO;
++ }
++ memcpy(tr->tmp_buf + (rsect << 9), buf, tsect << 9);
++ if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
++ printk(KERN_ERR "Error in %s, Line %d\n",
++ __FILE__, __LINE__);
++ return -EIO;
++ }
++ addr += IdentifyDeviceData.PageDataSize;
++ buf += tsect << 9;
++ nsect -= tsect;
++ }
++
++ /* Write the other NAND pages */
++ for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
++ if (GLOB_FTL_Page_Write(buf, addr)) {
++ printk(KERN_ERR "Error in %s, Line %d\n",
++ __FILE__, __LINE__);
++ return -EIO;
++ }
++ addr += IdentifyDeviceData.PageDataSize;
++ buf += IdentifyDeviceData.PageDataSize;
++ }
++
++ /* Write the last NAND pages */
++ if (nsect % ratio) {
++ if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
++ printk(KERN_ERR "Error in %s, Line %d\n",
++ __FILE__, __LINE__);
++ return -EIO;
++ }
++ memcpy(tr->tmp_buf, buf, (nsect % ratio) << 9);
++ if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
++ printk(KERN_ERR "Error in %s, Line %d\n",
++ __FILE__, __LINE__);
++ return -EIO;
++ }
++ }
++#if CMD_DMA
++ if (glob_ftl_execute_cmds())
++ return -EIO;
++ else
++ return 0;
++#endif
++ return 0;
++
++ default:
++ printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
++ return -EIO;
++ }
++}
++
++/* This function is copied from drivers/mtd/mtd_blkdevs.c */
++static int spectra_trans_thread(void *arg)
++{
++ struct spectra_nand_dev *tr = arg;
++ struct request_queue *rq = tr->queue;
++ struct request *req = NULL;
++
++ /* we might get involved when memory gets low, so use PF_MEMALLOC */
++ current->flags |= PF_MEMALLOC;
++
++ spin_lock_irq(rq->queue_lock);
++ while (!kthread_should_stop()) {
++ int res;
++
++ if (!req) {
++ req = blk_fetch_request(rq);
++ if (!req) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ spin_unlock_irq(rq->queue_lock);
++ schedule();
++ spin_lock_irq(rq->queue_lock);
++ continue;
++ }
++ }
++
++ spin_unlock_irq(rq->queue_lock);
++
++ mutex_lock(&spectra_lock);
++ res = do_transfer(tr, req);
++ mutex_unlock(&spectra_lock);
++
++ spin_lock_irq(rq->queue_lock);
++
++ if (!__blk_end_request_cur(req, res))
++ req = NULL;
++ }
++
++ if (req)
++ __blk_end_request_all(req, -EIO);
++
++ spin_unlock_irq(rq->queue_lock);
++
++ return 0;
++}
++
++
++/* Request function that "handles clustering". */
++static void GLOB_SBD_request(struct request_queue *rq)
++{
++ struct spectra_nand_dev *pdev = rq->queuedata;
++ wake_up_process(pdev->thread);
++}
++
++static int GLOB_SBD_open(struct block_device *bdev, fmode_t mode)
++
++{
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++ return 0;
++}
++
++static int GLOB_SBD_release(struct gendisk *disk, fmode_t mode)
++{
++ int ret;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ mutex_lock(&spectra_lock);
++ ret = force_flush_cache();
++ mutex_unlock(&spectra_lock);
++
++ return 0;
++}
++
++static int GLOB_SBD_getgeo(struct block_device *bdev, struct hd_geometry *geo)
++{
++ geo->heads = 4;
++ geo->sectors = 16;
++ geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "heads: %d, sectors: %d, cylinders: %d\n",
++ geo->heads, geo->sectors, geo->cylinders);
++
++ return 0;
++}
++
++int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
++ unsigned int cmd, unsigned long arg)
++{
++ int ret;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ switch (cmd) {
++ case GLOB_SBD_IOCTL_GC:
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Spectra IOCTL: Garbage Collection "
++ "being performed\n");
++ if (PASS != GLOB_FTL_Garbage_Collection())
++ return -EFAULT;
++ return 0;
++
++ case GLOB_SBD_IOCTL_WL:
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Spectra IOCTL: Static Wear Leveling "
++ "being performed\n");
++ if (PASS != GLOB_FTL_Wear_Leveling())
++ return -EFAULT;
++ return 0;
++
++ case GLOB_SBD_IOCTL_FORMAT:
++ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Flash format "
++ "being performed\n");
++ if (PASS != GLOB_FTL_Flash_Format())
++ return -EFAULT;
++ return 0;
++
++ case GLOB_SBD_IOCTL_FLUSH_CACHE:
++ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Cache flush "
++ "being performed\n");
++ mutex_lock(&spectra_lock);
++ ret = force_flush_cache();
++ mutex_unlock(&spectra_lock);
++ return ret;
++
++ case GLOB_SBD_IOCTL_COPY_BLK_TABLE:
++ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
++ "Copy block table\n");
++ if (copy_to_user((void __user *)arg,
++ get_blk_table_start_addr(),
++ get_blk_table_len()))
++ return -EFAULT;
++ return 0;
++
++ case GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE:
++ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
++ "Copy wear leveling table\n");
++ if (copy_to_user((void __user *)arg,
++ get_wear_leveling_table_start_addr(),
++ get_wear_leveling_table_len()))
++ return -EFAULT;
++ return 0;
++
++ case GLOB_SBD_IOCTL_GET_NAND_INFO:
++ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
++ "Get NAND info\n");
++ if (copy_to_user((void __user *)arg, &IdentifyDeviceData,
++ sizeof(IdentifyDeviceData)))
++ return -EFAULT;
++ return 0;
++
++ case GLOB_SBD_IOCTL_WRITE_DATA:
++ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
++ "Write one page data\n");
++ return ioctl_write_page_data(arg);
++
++ case GLOB_SBD_IOCTL_READ_DATA:
++ nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
++ "Read one page data\n");
++ return ioctl_read_page_data(arg);
++ }
++
++ return -ENOTTY;
++}
++
++static struct block_device_operations GLOB_SBD_ops = {
++ .owner = THIS_MODULE,
++ .open = GLOB_SBD_open,
++ .release = GLOB_SBD_release,
++ .locked_ioctl = GLOB_SBD_ioctl,
++ .getgeo = GLOB_SBD_getgeo,
++};
++
++static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
++{
++ int res_blks;
++ u32 sects;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ memset(dev, 0, sizeof(struct spectra_nand_dev));
++
++ nand_dbg_print(NAND_DBG_WARN, "Reserved %d blocks "
++ "for OS image, %d blocks for bad block replacement.\n",
++ get_res_blk_num_os(),
++ get_res_blk_num_bad_blk());
++
++ res_blks = get_res_blk_num_bad_blk() + get_res_blk_num_os();
++
++ dev->size = (u64)IdentifyDeviceData.PageDataSize *
++ IdentifyDeviceData.PagesPerBlock *
++ (IdentifyDeviceData.wDataBlockNum - res_blks);
++
++ res_blks_os = get_res_blk_num_os();
++
++ spin_lock_init(&dev->qlock);
++
++ dev->tmp_buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
++ if (!dev->tmp_buf) {
++ printk(KERN_ERR "Failed to kmalloc memory in %s Line %d, exit.\n",
++ __FILE__, __LINE__);
++ goto out_vfree;
++ }
++
++ dev->queue = blk_init_queue(GLOB_SBD_request, &dev->qlock);
++ if (dev->queue == NULL) {
++ printk(KERN_ERR
++ "Spectra: Request queue could not be initialized."
++ " Aborting\n ");
++ goto out_vfree;
++ }
++ dev->queue->queuedata = dev;
++
++ /* As Linux block layer doens't support >4KB hardware sector, */
++ /* Here we force report 512 byte hardware sector size to Kernel */
++ blk_queue_logical_block_size(dev->queue, 512);
++
++ blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH,
++ SBD_prepare_flush);
++
++ dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
++ if (IS_ERR(dev->thread)) {
++ blk_cleanup_queue(dev->queue);
++ unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
++ return PTR_ERR(dev->thread);
++ }
++
++ dev->gd = alloc_disk(PARTITIONS);
++ if (!dev->gd) {
++ printk(KERN_ERR
++ "Spectra: Could not allocate disk. Aborting \n ");
++ goto out_vfree;
++ }
++ dev->gd->major = GLOB_SBD_majornum;
++ dev->gd->first_minor = which * PARTITIONS;
++ dev->gd->fops = &GLOB_SBD_ops;
++ dev->gd->queue = dev->queue;
++ dev->gd->private_data = dev;
++ snprintf(dev->gd->disk_name, 32, "%s%c", GLOB_SBD_NAME, which + 'a');
++
++ sects = dev->size >> 9;
++ nand_dbg_print(NAND_DBG_WARN, "Capacity sects: %d\n", sects);
++ set_capacity(dev->gd, sects);
++
++ add_disk(dev->gd);
++
++ return 0;
++out_vfree:
++ return -ENOMEM;
++}
++
++/*
++static ssize_t show_nand_block_num(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE, "%d\n",
++ (int)IdentifyDeviceData.wDataBlockNum);
++}
++
++static ssize_t show_nand_pages_per_block(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE, "%d\n",
++ (int)IdentifyDeviceData.PagesPerBlock);
++}
++
++static ssize_t show_nand_page_size(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE, "%d\n",
++ (int)IdentifyDeviceData.PageDataSize);
++}
++
++static DEVICE_ATTR(nand_block_num, 0444, show_nand_block_num, NULL);
++static DEVICE_ATTR(nand_pages_per_block, 0444, show_nand_pages_per_block, NULL);
++static DEVICE_ATTR(nand_page_size, 0444, show_nand_page_size, NULL);
++
++static void create_sysfs_entry(struct device *dev)
++{
++ if (device_create_file(dev, &dev_attr_nand_block_num))
++ printk(KERN_ERR "Spectra: "
++ "failed to create sysfs entry nand_block_num.\n");
++ if (device_create_file(dev, &dev_attr_nand_pages_per_block))
++ printk(KERN_ERR "Spectra: "
++ "failed to create sysfs entry nand_pages_per_block.\n");
++ if (device_create_file(dev, &dev_attr_nand_page_size))
++ printk(KERN_ERR "Spectra: "
++ "failed to create sysfs entry nand_page_size.\n");
++}
++*/
++
++static int GLOB_SBD_init(void)
++{
++ int i;
++
++ /* Set debug output level (0~3) here. 3 is most verbose */
++ nand_debug_level = 0;
++
++ printk(KERN_ALERT "Spectra: %s\n", GLOB_version);
++
++ mutex_init(&spectra_lock);
++
++ GLOB_SBD_majornum = register_blkdev(0, GLOB_SBD_NAME);
++ if (GLOB_SBD_majornum <= 0) {
++ printk(KERN_ERR "Unable to get the major %d for Spectra",
++ GLOB_SBD_majornum);
++ return -EBUSY;
++ }
++
++ if (PASS != GLOB_FTL_Flash_Init()) {
++ printk(KERN_ERR "Spectra: Unable to Initialize Flash Device. "
++ "Aborting\n");
++ goto out_flash_register;
++ }
++
++ /* create_sysfs_entry(&dev->dev); */
++
++ if (PASS != GLOB_FTL_IdentifyDevice(&IdentifyDeviceData)) {
++ printk(KERN_ERR "Spectra: Unable to Read Flash Device. "
++ "Aborting\n");
++ goto out_flash_register;
++ } else {
++ nand_dbg_print(NAND_DBG_WARN, "In GLOB_SBD_init: "
++ "Num blocks=%d, pagesperblock=%d, "
++ "pagedatasize=%d, ECCBytesPerSector=%d\n",
++ (int)IdentifyDeviceData.NumBlocks,
++ (int)IdentifyDeviceData.PagesPerBlock,
++ (int)IdentifyDeviceData.PageDataSize,
++ (int)IdentifyDeviceData.wECCBytesPerSector);
++ }
++
++ printk(KERN_ALERT "Spectra: searching block table, please wait ...\n");
++ if (GLOB_FTL_Init() != PASS) {
++ printk(KERN_ERR "Spectra: Unable to Initialize FTL Layer. "
++ "Aborting\n");
++ goto out_ftl_flash_register;
++ }
++ printk(KERN_ALERT "Spectra: block table has been found.\n");
++
++ for (i = 0; i < NUM_DEVICES; i++)
++ if (SBD_setup_device(&nand_device[i], i) == -ENOMEM)
++ goto out_ftl_flash_register;
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Spectra: module loaded with major number %d\n",
++ GLOB_SBD_majornum);
++
++ return 0;
++
++out_ftl_flash_register:
++ GLOB_FTL_Cache_Release();
++out_flash_register:
++ GLOB_FTL_Flash_Release();
++ unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
++ printk(KERN_ERR "Spectra: Module load failed.\n");
++
++ return -ENOMEM;
++}
++
++static void __exit GLOB_SBD_exit(void)
++{
++ int i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0; i < NUM_DEVICES; i++) {
++ struct spectra_nand_dev *dev = &nand_device[i];
++ if (dev->gd) {
++ del_gendisk(dev->gd);
++ put_disk(dev->gd);
++ }
++ if (dev->queue)
++ blk_cleanup_queue(dev->queue);
++ kfree(dev->tmp_buf);
++ }
++
++ unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
++
++ mutex_lock(&spectra_lock);
++ force_flush_cache();
++ mutex_unlock(&spectra_lock);
++
++ GLOB_FTL_Cache_Release();
++
++ GLOB_FTL_Flash_Release();
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Spectra FTL module (major number %d) unloaded.\n",
++ GLOB_SBD_majornum);
++}
++
++static int __init setup_reserve_space_for_os_image(char *cmdline)
++{
++ unsigned long value;
++ int error;
++
++ printk(KERN_ALERT "Spectra - cmdline: %s\n", cmdline);
++ if (!cmdline)
++ return -EINVAL;
++
++ error = strict_strtoul((const char *)cmdline, 10, &value);
++ if (error)
++ return -EINVAL;
++
++ reserved_mb_for_os_image = value;
++
++ return 0;
++}
++
++early_param("res_nand", setup_reserve_space_for_os_image);
++
++module_init(GLOB_SBD_init);
++module_exit(GLOB_SBD_exit);
+diff --git a/drivers/block/spectra/ffsport.h b/drivers/block/spectra/ffsport.h
+new file mode 100644
+index 0000000..6c5d90c
+--- /dev/null
++++ b/drivers/block/spectra/ffsport.h
+@@ -0,0 +1,84 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef _FFSPORT_
++#define _FFSPORT_
++
++#include "ffsdefs.h"
++
++#if defined __GNUC__
++#define PACKED
++#define PACKED_GNU __attribute__ ((packed))
++#define UNALIGNED
++#endif
++
++#include <linux/semaphore.h>
++#include <linux/string.h> /* for strcpy(), stricmp(), etc */
++#include <linux/mm.h> /* for kmalloc(), kfree() */
++#include <linux/vmalloc.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/init.h>
++
++#include <linux/kernel.h> /* printk() */
++#include <linux/fs.h> /* everything... */
++#include <linux/errno.h> /* error codes */
++#include <linux/types.h> /* size_t */
++#include <linux/genhd.h>
++#include <linux/blkdev.h>
++#include <linux/hdreg.h>
++#include <linux/pci.h>
++#include "flash.h"
++
++#define VERBOSE 1
++
++#define NAND_DBG_WARN 1
++#define NAND_DBG_DEBUG 2
++#define NAND_DBG_TRACE 3
++
++extern int nand_debug_level;
++
++#ifdef VERBOSE
++#define nand_dbg_print(level, args...) \
++ do { \
++ if (level <= nand_debug_level) \
++ printk(KERN_ALERT args); \
++ } while (0)
++#else
++#define nand_dbg_print(level, args...)
++#endif
++
++#ifdef SUPPORT_BIG_ENDIAN
++#define INVERTUINT16(w) ((u16)(((u16)(w)) << 8) | \
++ (u16)((u16)(w) >> 8))
++
++#define INVERTUINT32(dw) (((u32)(dw) << 24) | \
++ (((u32)(dw) << 8) & 0x00ff0000) | \
++ (((u32)(dw) >> 8) & 0x0000ff00) | \
++ ((u32)(dw) >> 24))
++#else
++#define INVERTUINT16(w) w
++#define INVERTUINT32(dw) dw
++#endif
++
++extern int GLOB_Calc_Used_Bits(u32 n);
++extern u64 GLOB_u64_Div(u64 addr, u32 divisor);
++extern u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type);
++
++#endif /* _FFSPORT_ */
+diff --git a/drivers/block/spectra/flash.c b/drivers/block/spectra/flash.c
+new file mode 100644
+index 0000000..134aa51
+--- /dev/null
++++ b/drivers/block/spectra/flash.c
+@@ -0,0 +1,4731 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++
++#include "flash.h"
++#include "ffsdefs.h"
++#include "lld.h"
++#include "lld_nand.h"
++#if CMD_DMA
++#include "lld_cdma.h"
++#endif
++
++#define BLK_FROM_ADDR(addr) ((u32)(addr >> DeviceInfo.nBitsInBlockDataSize))
++#define PAGE_FROM_ADDR(addr, Block) ((u16)((addr - (u64)Block * \
++ DeviceInfo.wBlockDataSize) >> DeviceInfo.nBitsInPageDataSize))
++
++#define IS_SPARE_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
++ BAD_BLOCK) && SPARE_BLOCK == (pbt[blk] & SPARE_BLOCK))
++
++#define IS_DATA_BLOCK(blk) (0 == (pbt[blk] & BAD_BLOCK))
++
++#define IS_DISCARDED_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
++ BAD_BLOCK) && DISCARD_BLOCK == (pbt[blk] & DISCARD_BLOCK))
++
++#define IS_BAD_BLOCK(blk) (BAD_BLOCK == (pbt[blk] & BAD_BLOCK))
++
++#if DEBUG_BNDRY
++void debug_boundary_lineno_error(int chnl, int limit, int no,
++ int lineno, char *filename)
++{
++ if (chnl >= limit)
++ printk(KERN_ERR "Boundary Check Fail value %d >= limit %d, "
++ "at %s:%d. Other info:%d. Aborting...\n",
++ chnl, limit, filename, lineno, no);
++}
++/* static int globalmemsize; */
++#endif
++
++static u16 FTL_Cache_If_Hit(u64 dwPageAddr);
++static int FTL_Cache_Read(u64 dwPageAddr);
++static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr,
++ u16 cache_blk);
++static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
++ u8 cache_blk, u16 flag);
++static int FTL_Cache_Write(void);
++static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr);
++static void FTL_Calculate_LRU(void);
++static u32 FTL_Get_Block_Index(u32 wBlockNum);
++
++static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
++ u8 BT_Tag, u16 *Page);
++static int FTL_Read_Block_Table(void);
++static int FTL_Write_Block_Table(int wForce);
++static int FTL_Write_Block_Table_Data(void);
++static int FTL_Check_Block_Table(int wOldTable);
++static int FTL_Static_Wear_Leveling(void);
++static u32 FTL_Replace_Block_Table(void);
++static int FTL_Write_IN_Progress_Block_Table_Page(void);
++
++static u32 FTL_Get_Page_Num(u64 length);
++static u64 FTL_Get_Physical_Block_Addr(u64 blk_addr);
++
++static u32 FTL_Replace_OneBlock(u32 wBlockNum,
++ u32 wReplaceNum);
++static u32 FTL_Replace_LWBlock(u32 wBlockNum,
++ int *pGarbageCollect);
++static u32 FTL_Replace_MWBlock(void);
++static int FTL_Replace_Block(u64 blk_addr);
++static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
++
++static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr, u64 blk_addr);
++
++struct device_info_tag DeviceInfo;
++struct flash_cache_tag Cache;
++static struct spectra_l2_cache_info cache_l2;
++
++static u8 *cache_l2_page_buf;
++static u8 *cache_l2_blk_buf;
++
++u8 *g_pBlockTable;
++u8 *g_pWearCounter;
++u16 *g_pReadCounter;
++u32 *g_pBTBlocks;
++static u16 g_wBlockTableOffset;
++static u32 g_wBlockTableIndex;
++static u8 g_cBlockTableStatus;
++
++static u8 *g_pTempBuf;
++static u8 *flag_check_blk_table;
++static u8 *tmp_buf_search_bt_in_block;
++static u8 *spare_buf_search_bt_in_block;
++static u8 *spare_buf_bt_search_bt_in_block;
++static u8 *tmp_buf1_read_blk_table;
++static u8 *tmp_buf2_read_blk_table;
++static u8 *flags_static_wear_leveling;
++static u8 *tmp_buf_write_blk_table_data;
++static u8 *tmp_buf_read_disturbance;
++
++u8 *buf_read_page_main_spare;
++u8 *buf_write_page_main_spare;
++u8 *buf_read_page_spare;
++u8 *buf_get_bad_block;
++
++#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
++struct flash_cache_delta_list_tag int_cache[MAX_CHANS + MAX_DESCS];
++struct flash_cache_tag cache_start_copy;
++#endif
++
++int g_wNumFreeBlocks;
++u8 g_SBDCmdIndex;
++
++static u8 *g_pIPF;
++static u8 bt_flag = FIRST_BT_ID;
++static u8 bt_block_changed;
++
++static u16 cache_block_to_write;
++static u8 last_erased = FIRST_BT_ID;
++
++static u8 GC_Called;
++static u8 BT_GC_Called;
++
++#if CMD_DMA
++#define COPY_BACK_BUF_NUM 10
++
++static u8 ftl_cmd_cnt; /* Init value is 0 */
++u8 *g_pBTDelta;
++u8 *g_pBTDelta_Free;
++u8 *g_pBTStartingCopy;
++u8 *g_pWearCounterCopy;
++u16 *g_pReadCounterCopy;
++u8 *g_pBlockTableCopies;
++u8 *g_pNextBlockTable;
++static u8 *cp_back_buf_copies[COPY_BACK_BUF_NUM];
++static int cp_back_buf_idx;
++
++static u8 *g_temp_buf;
++
++#pragma pack(push, 1)
++#pragma pack(1)
++struct BTableChangesDelta {
++ u8 ftl_cmd_cnt;
++ u8 ValidFields;
++ u16 g_wBlockTableOffset;
++ u32 g_wBlockTableIndex;
++ u32 BT_Index;
++ u32 BT_Entry_Value;
++ u32 WC_Index;
++ u8 WC_Entry_Value;
++ u32 RC_Index;
++ u16 RC_Entry_Value;
++};
++
++#pragma pack(pop)
++
++struct BTableChangesDelta *p_BTableChangesDelta;
++#endif
++
++
++#define MARK_BLOCK_AS_BAD(blocknode) (blocknode |= BAD_BLOCK)
++#define MARK_BLK_AS_DISCARD(blk) (blk = (blk & ~SPARE_BLOCK) | DISCARD_BLOCK)
++
++#define FTL_Get_LBAPBA_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
++ sizeof(u32))
++#define FTL_Get_WearCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
++ sizeof(u8))
++#define FTL_Get_ReadCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
++ sizeof(u16))
++#if SUPPORT_LARGE_BLOCKNUM
++#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
++ sizeof(u8) * 3)
++#else
++#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
++ sizeof(u16))
++#endif
++#define FTL_Get_WearCounter_Table_Flash_Size_Bytes \
++ FTL_Get_WearCounter_Table_Mem_Size_Bytes
++#define FTL_Get_ReadCounter_Table_Flash_Size_Bytes \
++ FTL_Get_ReadCounter_Table_Mem_Size_Bytes
++
++static u32 FTL_Get_Block_Table_Flash_Size_Bytes(void)
++{
++ u32 byte_num;
++
++ if (DeviceInfo.MLCDevice) {
++ byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
++ DeviceInfo.wDataBlockNum * sizeof(u8) +
++ DeviceInfo.wDataBlockNum * sizeof(u16);
++ } else {
++ byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
++ DeviceInfo.wDataBlockNum * sizeof(u8);
++ }
++
++ byte_num += 4 * sizeof(u8);
++
++ return byte_num;
++}
++
++static u16 FTL_Get_Block_Table_Flash_Size_Pages(void)
++{
++ return (u16)FTL_Get_Page_Num(FTL_Get_Block_Table_Flash_Size_Bytes());
++}
++
++static int FTL_Copy_Block_Table_To_Flash(u8 *flashBuf, u32 sizeToTx,
++ u32 sizeTxed)
++{
++ u32 wBytesCopied, blk_tbl_size, wBytes;
++ u32 *pbt = (u32 *)g_pBlockTable;
++
++ blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
++ for (wBytes = 0;
++ (wBytes < sizeToTx) && ((wBytes + sizeTxed) < blk_tbl_size);
++ wBytes++) {
++#if SUPPORT_LARGE_BLOCKNUM
++ flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 3]
++ >> (((wBytes + sizeTxed) % 3) ?
++ ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16)) & 0xFF;
++#else
++ flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 2]
++ >> (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
++#endif
++ }
++
++ sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
++ blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
++ wBytesCopied = wBytes;
++ wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
++ (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
++ memcpy(flashBuf + wBytesCopied, g_pWearCounter + sizeTxed, wBytes);
++
++ sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
++
++ if (DeviceInfo.MLCDevice) {
++ blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
++ wBytesCopied += wBytes;
++ for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
++ ((wBytes + sizeTxed) < blk_tbl_size); wBytes++)
++ flashBuf[wBytes + wBytesCopied] =
++ (g_pReadCounter[(wBytes + sizeTxed) / 2] >>
++ (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
++ }
++
++ return wBytesCopied + wBytes;
++}
++
++static int FTL_Copy_Block_Table_From_Flash(u8 *flashBuf,
++ u32 sizeToTx, u32 sizeTxed)
++{
++ u32 wBytesCopied, blk_tbl_size, wBytes;
++ u32 *pbt = (u32 *)g_pBlockTable;
++
++ blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
++ for (wBytes = 0; (wBytes < sizeToTx) &&
++ ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
++#if SUPPORT_LARGE_BLOCKNUM
++ if (!((wBytes + sizeTxed) % 3))
++ pbt[(wBytes + sizeTxed) / 3] = 0;
++ pbt[(wBytes + sizeTxed) / 3] |=
++ (flashBuf[wBytes] << (((wBytes + sizeTxed) % 3) ?
++ ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16));
++#else
++ if (!((wBytes + sizeTxed) % 2))
++ pbt[(wBytes + sizeTxed) / 2] = 0;
++ pbt[(wBytes + sizeTxed) / 2] |=
++ (flashBuf[wBytes] << (((wBytes + sizeTxed) % 2) ?
++ 0 : 8));
++#endif
++ }
++
++ sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
++ blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
++ wBytesCopied = wBytes;
++ wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
++ (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
++ memcpy(g_pWearCounter + sizeTxed, flashBuf + wBytesCopied, wBytes);
++ sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
++
++ if (DeviceInfo.MLCDevice) {
++ wBytesCopied += wBytes;
++ blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
++ for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
++ ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
++ if (((wBytes + sizeTxed) % 2))
++ g_pReadCounter[(wBytes + sizeTxed) / 2] = 0;
++ g_pReadCounter[(wBytes + sizeTxed) / 2] |=
++ (flashBuf[wBytes] <<
++ (((wBytes + sizeTxed) % 2) ? 0 : 8));
++ }
++ }
++
++ return wBytesCopied+wBytes;
++}
++
++static int FTL_Insert_Block_Table_Signature(u8 *buf, u8 tag)
++{
++ int i;
++
++ for (i = 0; i < BTSIG_BYTES; i++)
++ buf[BTSIG_OFFSET + i] =
++ ((tag + (i * BTSIG_DELTA) - FIRST_BT_ID) %
++ (1 + LAST_BT_ID-FIRST_BT_ID)) + FIRST_BT_ID;
++
++ return PASS;
++}
++
++static int FTL_Extract_Block_Table_Tag(u8 *buf, u8 **tagarray)
++{
++ static u8 tag[BTSIG_BYTES >> 1];
++ int i, j, k, tagi, tagtemp, status;
++
++ *tagarray = (u8 *)tag;
++ tagi = 0;
++
++ for (i = 0; i < (BTSIG_BYTES - 1); i++) {
++ for (j = i + 1; (j < BTSIG_BYTES) &&
++ (tagi < (BTSIG_BYTES >> 1)); j++) {
++ tagtemp = buf[BTSIG_OFFSET + j] -
++ buf[BTSIG_OFFSET + i];
++ if (tagtemp && !(tagtemp % BTSIG_DELTA)) {
++ tagtemp = (buf[BTSIG_OFFSET + i] +
++ (1 + LAST_BT_ID - FIRST_BT_ID) -
++ (i * BTSIG_DELTA)) %
++ (1 + LAST_BT_ID - FIRST_BT_ID);
++ status = FAIL;
++ for (k = 0; k < tagi; k++) {
++ if (tagtemp == tag[k])
++ status = PASS;
++ }
++
++ if (status == FAIL) {
++ tag[tagi++] = tagtemp;
++ i = (j == (i + 1)) ? i + 1 : i;
++ j = (j == (i + 1)) ? i + 1 : i;
++ }
++ }
++ }
++ }
++
++ return tagi;
++}
++
++
++static int FTL_Execute_SPL_Recovery(void)
++{
++ u32 j, block, blks;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ int ret;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ blks = DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock;
++ for (j = 0; j <= blks; j++) {
++ block = (pbt[j]);
++ if (((block & BAD_BLOCK) != BAD_BLOCK) &&
++ ((block & SPARE_BLOCK) == SPARE_BLOCK)) {
++ ret = GLOB_LLD_Erase_Block(block & ~BAD_BLOCK);
++ if (FAIL == ret) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, new Bad Block %d "
++ "generated!\n",
++ __FILE__, __LINE__, __func__,
++ (int)(block & ~BAD_BLOCK));
++ MARK_BLOCK_AS_BAD(pbt[j]);
++ }
++ }
++ }
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_IdentifyDevice
++* Inputs: pointer to identify data structure
++* Outputs: PASS / FAIL
++* Description: the identify data structure is filled in with
++* information for the block driver.
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ dev_data->NumBlocks = DeviceInfo.wTotalBlocks;
++ dev_data->PagesPerBlock = DeviceInfo.wPagesPerBlock;
++ dev_data->PageDataSize = DeviceInfo.wPageDataSize;
++ dev_data->wECCBytesPerSector = DeviceInfo.wECCBytesPerSector;
++ dev_data->wDataBlockNum = DeviceInfo.wDataBlockNum;
++
++ return PASS;
++}
++
++/* ..... */
++static int allocate_memory(void)
++{
++ u32 block_table_size, page_size, block_size, mem_size;
++ u32 total_bytes = 0;
++ int i;
++#if CMD_DMA
++ int j;
++#endif
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ page_size = DeviceInfo.wPageSize;
++ block_size = DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
++
++ block_table_size = DeviceInfo.wDataBlockNum *
++ (sizeof(u32) + sizeof(u8) + sizeof(u16));
++ block_table_size += (DeviceInfo.wPageDataSize -
++ (block_table_size % DeviceInfo.wPageDataSize)) %
++ DeviceInfo.wPageDataSize;
++
++ /* Malloc memory for block tables */
++ g_pBlockTable = kmalloc(block_table_size, GFP_ATOMIC);
++ if (!g_pBlockTable)
++ goto block_table_fail;
++ memset(g_pBlockTable, 0, block_table_size);
++ total_bytes += block_table_size;
++
++ g_pWearCounter = (u8 *)(g_pBlockTable +
++ DeviceInfo.wDataBlockNum * sizeof(u32));
++
++ if (DeviceInfo.MLCDevice)
++ g_pReadCounter = (u16 *)(g_pBlockTable +
++ DeviceInfo.wDataBlockNum *
++ (sizeof(u32) + sizeof(u8)));
++
++ /* Malloc memory and init for cache items */
++ for (i = 0; i < CACHE_ITEM_NUM; i++) {
++ Cache.array[i].address = NAND_CACHE_INIT_ADDR;
++ Cache.array[i].use_cnt = 0;
++ Cache.array[i].changed = CLEAR;
++ Cache.array[i].buf = kmalloc(Cache.cache_item_size,
++ GFP_ATOMIC);
++ if (!Cache.array[i].buf)
++ goto cache_item_fail;
++ memset(Cache.array[i].buf, 0, Cache.cache_item_size);
++ total_bytes += Cache.cache_item_size;
++ }
++
++ /* Malloc memory for IPF */
++ g_pIPF = kmalloc(page_size, GFP_ATOMIC);
++ if (!g_pIPF)
++ goto ipf_fail;
++ memset(g_pIPF, 0, page_size);
++ total_bytes += page_size;
++
++ /* Malloc memory for data merging during Level2 Cache flush */
++ cache_l2_page_buf = kmalloc(page_size, GFP_ATOMIC);
++ if (!cache_l2_page_buf)
++ goto cache_l2_page_buf_fail;
++ memset(cache_l2_page_buf, 0xff, page_size);
++ total_bytes += page_size;
++
++ cache_l2_blk_buf = kmalloc(block_size, GFP_ATOMIC);
++ if (!cache_l2_blk_buf)
++ goto cache_l2_blk_buf_fail;
++ memset(cache_l2_blk_buf, 0xff, block_size);
++ total_bytes += block_size;
++
++ /* Malloc memory for temp buffer */
++ g_pTempBuf = kmalloc(Cache.cache_item_size, GFP_ATOMIC);
++ if (!g_pTempBuf)
++ goto Temp_buf_fail;
++ memset(g_pTempBuf, 0, Cache.cache_item_size);
++ total_bytes += Cache.cache_item_size;
++
++ /* Malloc memory for block table blocks */
++ mem_size = (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32);
++ g_pBTBlocks = kmalloc(mem_size, GFP_ATOMIC);
++ if (!g_pBTBlocks)
++ goto bt_blocks_fail;
++ memset(g_pBTBlocks, 0xff, mem_size);
++ total_bytes += mem_size;
++
++ /* Malloc memory for function FTL_Check_Block_Table */
++ flag_check_blk_table = kmalloc(DeviceInfo.wDataBlockNum, GFP_ATOMIC);
++ if (!flag_check_blk_table)
++ goto flag_check_blk_table_fail;
++ total_bytes += DeviceInfo.wDataBlockNum;
++
++ /* Malloc memory for function FTL_Search_Block_Table_IN_Block */
++ tmp_buf_search_bt_in_block = kmalloc(page_size, GFP_ATOMIC);
++ if (!tmp_buf_search_bt_in_block)
++ goto tmp_buf_search_bt_in_block_fail;
++ memset(tmp_buf_search_bt_in_block, 0xff, page_size);
++ total_bytes += page_size;
++
++ mem_size = DeviceInfo.wPageSize - DeviceInfo.wPageDataSize;
++ spare_buf_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
++ if (!spare_buf_search_bt_in_block)
++ goto spare_buf_search_bt_in_block_fail;
++ memset(spare_buf_search_bt_in_block, 0xff, mem_size);
++ total_bytes += mem_size;
++
++ spare_buf_bt_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
++ if (!spare_buf_bt_search_bt_in_block)
++ goto spare_buf_bt_search_bt_in_block_fail;
++ memset(spare_buf_bt_search_bt_in_block, 0xff, mem_size);
++ total_bytes += mem_size;
++
++ /* Malloc memory for function FTL_Read_Block_Table */
++ tmp_buf1_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
++ if (!tmp_buf1_read_blk_table)
++ goto tmp_buf1_read_blk_table_fail;
++ memset(tmp_buf1_read_blk_table, 0xff, page_size);
++ total_bytes += page_size;
++
++ tmp_buf2_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
++ if (!tmp_buf2_read_blk_table)
++ goto tmp_buf2_read_blk_table_fail;
++ memset(tmp_buf2_read_blk_table, 0xff, page_size);
++ total_bytes += page_size;
++
++ /* Malloc memory for function FTL_Static_Wear_Leveling */
++ flags_static_wear_leveling = kmalloc(DeviceInfo.wDataBlockNum,
++ GFP_ATOMIC);
++ if (!flags_static_wear_leveling)
++ goto flags_static_wear_leveling_fail;
++ total_bytes += DeviceInfo.wDataBlockNum;
++
++ /* Malloc memory for function FTL_Write_Block_Table_Data */
++ if (FTL_Get_Block_Table_Flash_Size_Pages() > 3)
++ mem_size = FTL_Get_Block_Table_Flash_Size_Bytes() -
++ 2 * DeviceInfo.wPageSize;
++ else
++ mem_size = DeviceInfo.wPageSize;
++ tmp_buf_write_blk_table_data = kmalloc(mem_size, GFP_ATOMIC);
++ if (!tmp_buf_write_blk_table_data)
++ goto tmp_buf_write_blk_table_data_fail;
++ memset(tmp_buf_write_blk_table_data, 0xff, mem_size);
++ total_bytes += mem_size;
++
++ /* Malloc memory for function FTL_Read_Disturbance */
++ tmp_buf_read_disturbance = kmalloc(block_size, GFP_ATOMIC);
++ if (!tmp_buf_read_disturbance)
++ goto tmp_buf_read_disturbance_fail;
++ memset(tmp_buf_read_disturbance, 0xff, block_size);
++ total_bytes += block_size;
++
++ /* Alloc mem for function NAND_Read_Page_Main_Spare of lld_nand.c */
++ buf_read_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
++ if (!buf_read_page_main_spare)
++ goto buf_read_page_main_spare_fail;
++ total_bytes += DeviceInfo.wPageSize;
++
++ /* Alloc mem for function NAND_Write_Page_Main_Spare of lld_nand.c */
++ buf_write_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
++ if (!buf_write_page_main_spare)
++ goto buf_write_page_main_spare_fail;
++ total_bytes += DeviceInfo.wPageSize;
++
++ /* Alloc mem for function NAND_Read_Page_Spare of lld_nand.c */
++ buf_read_page_spare = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
++ if (!buf_read_page_spare)
++ goto buf_read_page_spare_fail;
++ memset(buf_read_page_spare, 0xff, DeviceInfo.wPageSpareSize);
++ total_bytes += DeviceInfo.wPageSpareSize;
++
++ /* Alloc mem for function NAND_Get_Bad_Block of lld_nand.c */
++ buf_get_bad_block = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
++ if (!buf_get_bad_block)
++ goto buf_get_bad_block_fail;
++ memset(buf_get_bad_block, 0xff, DeviceInfo.wPageSpareSize);
++ total_bytes += DeviceInfo.wPageSpareSize;
++
++#if CMD_DMA
++ g_temp_buf = kmalloc(block_size, GFP_ATOMIC);
++ if (!g_temp_buf)
++ goto temp_buf_fail;
++ memset(g_temp_buf, 0xff, block_size);
++ total_bytes += block_size;
++
++ /* Malloc memory for copy of block table used in CDMA mode */
++ g_pBTStartingCopy = kmalloc(block_table_size, GFP_ATOMIC);
++ if (!g_pBTStartingCopy)
++ goto bt_starting_copy;
++ memset(g_pBTStartingCopy, 0, block_table_size);
++ total_bytes += block_table_size;
++
++ g_pWearCounterCopy = (u8 *)(g_pBTStartingCopy +
++ DeviceInfo.wDataBlockNum * sizeof(u32));
++
++ if (DeviceInfo.MLCDevice)
++ g_pReadCounterCopy = (u16 *)(g_pBTStartingCopy +
++ DeviceInfo.wDataBlockNum *
++ (sizeof(u32) + sizeof(u8)));
++
++ /* Malloc memory for block table copies */
++ mem_size = 5 * DeviceInfo.wDataBlockNum * sizeof(u32) +
++ 5 * DeviceInfo.wDataBlockNum * sizeof(u8);
++ if (DeviceInfo.MLCDevice)
++ mem_size += 5 * DeviceInfo.wDataBlockNum * sizeof(u16);
++ g_pBlockTableCopies = kmalloc(mem_size, GFP_ATOMIC);
++ if (!g_pBlockTableCopies)
++ goto blk_table_copies_fail;
++ memset(g_pBlockTableCopies, 0, mem_size);
++ total_bytes += mem_size;
++ g_pNextBlockTable = g_pBlockTableCopies;
++
++ /* Malloc memory for Block Table Delta */
++ mem_size = MAX_DESCS * sizeof(struct BTableChangesDelta);
++ g_pBTDelta = kmalloc(mem_size, GFP_ATOMIC);
++ if (!g_pBTDelta)
++ goto bt_delta_fail;
++ memset(g_pBTDelta, 0, mem_size);
++ total_bytes += mem_size;
++ g_pBTDelta_Free = g_pBTDelta;
++
++ /* Malloc memory for Copy Back Buffers */
++ for (j = 0; j < COPY_BACK_BUF_NUM; j++) {
++ cp_back_buf_copies[j] = kmalloc(block_size, GFP_ATOMIC);
++ if (!cp_back_buf_copies[j])
++ goto cp_back_buf_copies_fail;
++ memset(cp_back_buf_copies[j], 0, block_size);
++ total_bytes += block_size;
++ }
++ cp_back_buf_idx = 0;
++
++ /* Malloc memory for pending commands list */
++ mem_size = sizeof(struct pending_cmd) * MAX_DESCS;
++ info.pcmds = kzalloc(mem_size, GFP_KERNEL);
++ if (!info.pcmds)
++ goto pending_cmds_buf_fail;
++ total_bytes += mem_size;
++
++ /* Malloc memory for CDMA descripter table */
++ mem_size = sizeof(struct cdma_descriptor) * MAX_DESCS;
++ info.cdma_desc_buf = kzalloc(mem_size, GFP_KERNEL);
++ if (!info.cdma_desc_buf)
++ goto cdma_desc_buf_fail;
++ total_bytes += mem_size;
++
++ /* Malloc memory for Memcpy descripter table */
++ mem_size = sizeof(struct memcpy_descriptor) * MAX_DESCS;
++ info.memcp_desc_buf = kzalloc(mem_size, GFP_KERNEL);
++ if (!info.memcp_desc_buf)
++ goto memcp_desc_buf_fail;
++ total_bytes += mem_size;
++#endif
++
++ nand_dbg_print(NAND_DBG_WARN,
++ "Total memory allocated in FTL layer: %d\n", total_bytes);
++
++ return PASS;
++
++#if CMD_DMA
++memcp_desc_buf_fail:
++ kfree(info.cdma_desc_buf);
++cdma_desc_buf_fail:
++ kfree(info.pcmds);
++pending_cmds_buf_fail:
++cp_back_buf_copies_fail:
++ j--;
++ for (; j >= 0; j--)
++ kfree(cp_back_buf_copies[j]);
++ kfree(g_pBTDelta);
++bt_delta_fail:
++ kfree(g_pBlockTableCopies);
++blk_table_copies_fail:
++ kfree(g_pBTStartingCopy);
++bt_starting_copy:
++ kfree(g_temp_buf);
++temp_buf_fail:
++ kfree(buf_get_bad_block);
++#endif
++
++buf_get_bad_block_fail:
++ kfree(buf_read_page_spare);
++buf_read_page_spare_fail:
++ kfree(buf_write_page_main_spare);
++buf_write_page_main_spare_fail:
++ kfree(buf_read_page_main_spare);
++buf_read_page_main_spare_fail:
++ kfree(tmp_buf_read_disturbance);
++tmp_buf_read_disturbance_fail:
++ kfree(tmp_buf_write_blk_table_data);
++tmp_buf_write_blk_table_data_fail:
++ kfree(flags_static_wear_leveling);
++flags_static_wear_leveling_fail:
++ kfree(tmp_buf2_read_blk_table);
++tmp_buf2_read_blk_table_fail:
++ kfree(tmp_buf1_read_blk_table);
++tmp_buf1_read_blk_table_fail:
++ kfree(spare_buf_bt_search_bt_in_block);
++spare_buf_bt_search_bt_in_block_fail:
++ kfree(spare_buf_search_bt_in_block);
++spare_buf_search_bt_in_block_fail:
++ kfree(tmp_buf_search_bt_in_block);
++tmp_buf_search_bt_in_block_fail:
++ kfree(flag_check_blk_table);
++flag_check_blk_table_fail:
++ kfree(g_pBTBlocks);
++bt_blocks_fail:
++ kfree(g_pTempBuf);
++Temp_buf_fail:
++ kfree(cache_l2_blk_buf);
++cache_l2_blk_buf_fail:
++ kfree(cache_l2_page_buf);
++cache_l2_page_buf_fail:
++ kfree(g_pIPF);
++ipf_fail:
++cache_item_fail:
++ i--;
++ for (; i >= 0; i--)
++ kfree(Cache.array[i].buf);
++ kfree(g_pBlockTable);
++block_table_fail:
++ printk(KERN_ERR "Failed to kmalloc memory in %s Line %d.\n",
++ __FILE__, __LINE__);
++
++ return -ENOMEM;
++}
++
++/* .... */
++static int free_memory(void)
++{
++ int i;
++
++#if CMD_DMA
++ kfree(info.memcp_desc_buf);
++ kfree(info.cdma_desc_buf);
++ kfree(info.pcmds);
++ for (i = COPY_BACK_BUF_NUM - 1; i >= 0; i--)
++ kfree(cp_back_buf_copies[i]);
++ kfree(g_pBTDelta);
++ kfree(g_pBlockTableCopies);
++ kfree(g_pBTStartingCopy);
++ kfree(g_temp_buf);
++ kfree(buf_get_bad_block);
++#endif
++ kfree(buf_read_page_spare);
++ kfree(buf_write_page_main_spare);
++ kfree(buf_read_page_main_spare);
++ kfree(tmp_buf_read_disturbance);
++ kfree(tmp_buf_write_blk_table_data);
++ kfree(flags_static_wear_leveling);
++ kfree(tmp_buf2_read_blk_table);
++ kfree(tmp_buf1_read_blk_table);
++ kfree(spare_buf_bt_search_bt_in_block);
++ kfree(spare_buf_search_bt_in_block);
++ kfree(tmp_buf_search_bt_in_block);
++ kfree(flag_check_blk_table);
++ kfree(g_pBTBlocks);
++ kfree(g_pTempBuf);
++ kfree(g_pIPF);
++ for (i = CACHE_ITEM_NUM - 1; i >= 0; i--)
++ kfree(Cache.array[i].buf);
++ kfree(g_pBlockTable);
++
++ return 0;
++}
++
++static void dump_cache_l2_table(void)
++{
++ struct list_head *p;
++ struct spectra_l2_cache_list *pnd;
++ int n, i;
++
++ n = 0;
++ list_for_each(p, &cache_l2.table.list) {
++ pnd = list_entry(p, struct spectra_l2_cache_list, list);
++ nand_dbg_print(NAND_DBG_WARN, "dump_cache_l2_table node: %d, logical_blk_num: %d\n", n, pnd->logical_blk_num);
++/*
++ for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
++ if (pnd->pages_array[i] != MAX_U32_VALUE)
++ nand_dbg_print(NAND_DBG_WARN, " pages_array[%d]: 0x%x\n", i, pnd->pages_array[i]);
++ }
++*/
++ n++;
++ }
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Init
++* Inputs: none
++* Outputs: PASS=0 / FAIL=1
++* Description: allocates the memory for cache array,
++* important data structures
++* clears the cache array
++* reads the block table from flash into array
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Init(void)
++{
++ int i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ Cache.pages_per_item = 1;
++ Cache.cache_item_size = 1 * DeviceInfo.wPageDataSize;
++
++ if (allocate_memory() != PASS)
++ return FAIL;
++
++#if CMD_DMA
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ memcpy((void *)&cache_start_copy, (void *)&Cache,
++ sizeof(struct flash_cache_tag));
++ memset((void *)&int_cache, -1,
++ sizeof(struct flash_cache_delta_list_tag) *
++ (MAX_CHANS + MAX_DESCS));
++#endif
++ ftl_cmd_cnt = 0;
++#endif
++
++ if (FTL_Read_Block_Table() != PASS)
++ return FAIL;
++
++ /* Init the Level2 Cache data structure */
++ for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
++ cache_l2.blk_array[i] = MAX_U32_VALUE;
++ cache_l2.cur_blk_idx = 0;
++ cache_l2.cur_page_num = 0;
++ INIT_LIST_HEAD(&cache_l2.table.list);
++ cache_l2.table.logical_blk_num = MAX_U32_VALUE;
++
++ dump_cache_l2_table();
++
++ return 0;
++}
++
++
++#if CMD_DMA
++#if 0
++static void save_blk_table_changes(u16 idx)
++{
++ u8 ftl_cmd;
++ u32 *pbt = (u32 *)g_pBTStartingCopy;
++
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ u16 id;
++ u8 cache_blks;
++
++ id = idx - MAX_CHANS;
++ if (int_cache[id].item != -1) {
++ cache_blks = int_cache[id].item;
++ cache_start_copy.array[cache_blks].address =
++ int_cache[id].cache.address;
++ cache_start_copy.array[cache_blks].changed =
++ int_cache[id].cache.changed;
++ }
++#endif
++
++ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
++
++ while (ftl_cmd <= PendingCMD[idx].Tag) {
++ if (p_BTableChangesDelta->ValidFields == 0x01) {
++ g_wBlockTableOffset =
++ p_BTableChangesDelta->g_wBlockTableOffset;
++ } else if (p_BTableChangesDelta->ValidFields == 0x0C) {
++ pbt[p_BTableChangesDelta->BT_Index] =
++ p_BTableChangesDelta->BT_Entry_Value;
++ debug_boundary_error(((
++ p_BTableChangesDelta->BT_Index)),
++ DeviceInfo.wDataBlockNum, 0);
++ } else if (p_BTableChangesDelta->ValidFields == 0x03) {
++ g_wBlockTableOffset =
++ p_BTableChangesDelta->g_wBlockTableOffset;
++ g_wBlockTableIndex =
++ p_BTableChangesDelta->g_wBlockTableIndex;
++ } else if (p_BTableChangesDelta->ValidFields == 0x30) {
++ g_pWearCounterCopy[p_BTableChangesDelta->WC_Index] =
++ p_BTableChangesDelta->WC_Entry_Value;
++ } else if ((DeviceInfo.MLCDevice) &&
++ (p_BTableChangesDelta->ValidFields == 0xC0)) {
++ g_pReadCounterCopy[p_BTableChangesDelta->RC_Index] =
++ p_BTableChangesDelta->RC_Entry_Value;
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "In event status setting read counter "
++ "GLOB_ftl_cmd_cnt %u Count %u Index %u\n",
++ ftl_cmd,
++ p_BTableChangesDelta->RC_Entry_Value,
++ (unsigned int)p_BTableChangesDelta->RC_Index);
++ } else {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "This should never occur \n");
++ }
++ p_BTableChangesDelta += 1;
++ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
++ }
++}
++
++static void discard_cmds(u16 n)
++{
++ u32 *pbt = (u32 *)g_pBTStartingCopy;
++ u8 ftl_cmd;
++ unsigned long k;
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ u8 cache_blks;
++ u16 id;
++#endif
++
++ if ((PendingCMD[n].CMD == WRITE_MAIN_CMD) ||
++ (PendingCMD[n].CMD == WRITE_MAIN_SPARE_CMD)) {
++ for (k = 0; k < DeviceInfo.wDataBlockNum; k++) {
++ if (PendingCMD[n].Block == (pbt[k] & (~BAD_BLOCK)))
++ MARK_BLK_AS_DISCARD(pbt[k]);
++ }
++ }
++
++ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
++ while (ftl_cmd <= PendingCMD[n].Tag) {
++ p_BTableChangesDelta += 1;
++ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
++ }
++
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ id = n - MAX_CHANS;
++
++ if (int_cache[id].item != -1) {
++ cache_blks = int_cache[id].item;
++ if (PendingCMD[n].CMD == MEMCOPY_CMD) {
++ if ((cache_start_copy.array[cache_blks].buf <=
++ PendingCMD[n].DataDestAddr) &&
++ ((cache_start_copy.array[cache_blks].buf +
++ Cache.cache_item_size) >
++ PendingCMD[n].DataDestAddr)) {
++ cache_start_copy.array[cache_blks].address =
++ NAND_CACHE_INIT_ADDR;
++ cache_start_copy.array[cache_blks].use_cnt =
++ 0;
++ cache_start_copy.array[cache_blks].changed =
++ CLEAR;
++ }
++ } else {
++ cache_start_copy.array[cache_blks].address =
++ int_cache[id].cache.address;
++ cache_start_copy.array[cache_blks].changed =
++ int_cache[id].cache.changed;
++ }
++ }
++#endif
++}
++
++static void process_cmd_pass(int *first_failed_cmd, u16 idx)
++{
++ if (0 == *first_failed_cmd)
++ save_blk_table_changes(idx);
++ else
++ discard_cmds(idx);
++}
++
++static void process_cmd_fail_abort(int *first_failed_cmd,
++ u16 idx, int event)
++{
++ u32 *pbt = (u32 *)g_pBTStartingCopy;
++ u8 ftl_cmd;
++ unsigned long i;
++ int erase_fail, program_fail;
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ u8 cache_blks;
++ u16 id;
++#endif
++
++ if (0 == *first_failed_cmd)
++ *first_failed_cmd = PendingCMD[idx].SBDCmdIndex;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Uncorrectable error has occured "
++ "while executing %u Command %u accesing Block %u\n",
++ (unsigned int)p_BTableChangesDelta->ftl_cmd_cnt,
++ PendingCMD[idx].CMD,
++ (unsigned int)PendingCMD[idx].Block);
++
++ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
++ while (ftl_cmd <= PendingCMD[idx].Tag) {
++ p_BTableChangesDelta += 1;
++ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
++ }
++
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ id = idx - MAX_CHANS;
++
++ if (int_cache[id].item != -1) {
++ cache_blks = int_cache[id].item;
++ if ((PendingCMD[idx].CMD == WRITE_MAIN_CMD)) {
++ cache_start_copy.array[cache_blks].address =
++ int_cache[id].cache.address;
++ cache_start_copy.array[cache_blks].changed = SET;
++ } else if ((PendingCMD[idx].CMD == READ_MAIN_CMD)) {
++ cache_start_copy.array[cache_blks].address =
++ NAND_CACHE_INIT_ADDR;
++ cache_start_copy.array[cache_blks].use_cnt = 0;
++ cache_start_copy.array[cache_blks].changed =
++ CLEAR;
++ } else if (PendingCMD[idx].CMD == ERASE_CMD) {
++ /* ? */
++ } else if (PendingCMD[idx].CMD == MEMCOPY_CMD) {
++ /* ? */
++ }
++ }
++#endif
++
++ erase_fail = (event == EVENT_ERASE_FAILURE) &&
++ (PendingCMD[idx].CMD == ERASE_CMD);
++
++ program_fail = (event == EVENT_PROGRAM_FAILURE) &&
++ ((PendingCMD[idx].CMD == WRITE_MAIN_CMD) ||
++ (PendingCMD[idx].CMD == WRITE_MAIN_SPARE_CMD));
++
++ if (erase_fail || program_fail) {
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
++ if (PendingCMD[idx].Block ==
++ (pbt[i] & (~BAD_BLOCK)))
++ MARK_BLOCK_AS_BAD(pbt[i]);
++ }
++ }
++}
++
++static void process_cmd(int *first_failed_cmd, u16 idx, int event)
++{
++ u8 ftl_cmd;
++ int cmd_match = 0;
++
++ if (p_BTableChangesDelta->ftl_cmd_cnt == PendingCMD[idx].Tag)
++ cmd_match = 1;
++
++ if (PendingCMD[idx].Status == CMD_PASS) {
++ process_cmd_pass(first_failed_cmd, idx);
++ } else if ((PendingCMD[idx].Status == CMD_FAIL) ||
++ (PendingCMD[idx].Status == CMD_ABORT)) {
++ process_cmd_fail_abort(first_failed_cmd, idx, event);
++ } else if ((PendingCMD[idx].Status == CMD_NOT_DONE) &&
++ PendingCMD[idx].Tag) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ " Command no. %hu is not executed\n",
++ (unsigned int)PendingCMD[idx].Tag);
++ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
++ while (ftl_cmd <= PendingCMD[idx].Tag) {
++ p_BTableChangesDelta += 1;
++ ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
++ }
++ }
++}
++#endif
++
++static void process_cmd(int *first_failed_cmd, u16 idx, int event)
++{
++ printk(KERN_ERR "temporary workaround function. "
++ "Should not be called! \n");
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Event_Status
++* Inputs: none
++* Outputs: Event Code
++* Description: It is called by SBD after hardware interrupt signalling
++* completion of commands chain
++* It does following things
++* get event status from LLD
++* analyze command chain status
++* determine last command executed
++* analyze results
++* rebuild the block table in case of uncorrectable error
++* return event code
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Event_Status(int *first_failed_cmd)
++{
++ int event_code = PASS;
++ u16 i_P;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ *first_failed_cmd = 0;
++
++ event_code = GLOB_LLD_Event_Status();
++
++ switch (event_code) {
++ case EVENT_PASS:
++ nand_dbg_print(NAND_DBG_DEBUG, "Handling EVENT_PASS\n");
++ break;
++ case EVENT_UNCORRECTABLE_DATA_ERROR:
++ nand_dbg_print(NAND_DBG_DEBUG, "Handling Uncorrectable ECC!\n");
++ break;
++ case EVENT_PROGRAM_FAILURE:
++ case EVENT_ERASE_FAILURE:
++ nand_dbg_print(NAND_DBG_WARN, "Handling Ugly case. "
++ "Event code: 0x%x\n", event_code);
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta;
++ for (i_P = MAX_CHANS; i_P < (ftl_cmd_cnt + MAX_CHANS);
++ i_P++)
++ process_cmd(first_failed_cmd, i_P, event_code);
++ memcpy(g_pBlockTable, g_pBTStartingCopy,
++ DeviceInfo.wDataBlockNum * sizeof(u32));
++ memcpy(g_pWearCounter, g_pWearCounterCopy,
++ DeviceInfo.wDataBlockNum * sizeof(u8));
++ if (DeviceInfo.MLCDevice)
++ memcpy(g_pReadCounter, g_pReadCounterCopy,
++ DeviceInfo.wDataBlockNum * sizeof(u16));
++
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ memcpy((void *)&Cache, (void *)&cache_start_copy,
++ sizeof(struct flash_cache_tag));
++ memset((void *)&int_cache, -1,
++ sizeof(struct flash_cache_delta_list_tag) *
++ (MAX_DESCS + MAX_CHANS));
++#endif
++ break;
++ default:
++ nand_dbg_print(NAND_DBG_WARN,
++ "Handling unexpected event code - 0x%x\n",
++ event_code);
++ event_code = ERR;
++ break;
++ }
++
++ memcpy(g_pBTStartingCopy, g_pBlockTable,
++ DeviceInfo.wDataBlockNum * sizeof(u32));
++ memcpy(g_pWearCounterCopy, g_pWearCounter,
++ DeviceInfo.wDataBlockNum * sizeof(u8));
++ if (DeviceInfo.MLCDevice)
++ memcpy(g_pReadCounterCopy, g_pReadCounter,
++ DeviceInfo.wDataBlockNum * sizeof(u16));
++
++ g_pBTDelta_Free = g_pBTDelta;
++ ftl_cmd_cnt = 0;
++ g_pNextBlockTable = g_pBlockTableCopies;
++ cp_back_buf_idx = 0;
++
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ memcpy((void *)&cache_start_copy, (void *)&Cache,
++ sizeof(struct flash_cache_tag));
++ memset((void *)&int_cache, -1,
++ sizeof(struct flash_cache_delta_list_tag) *
++ (MAX_DESCS + MAX_CHANS));
++#endif
++
++ return event_code;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: glob_ftl_execute_cmds
++* Inputs: none
++* Outputs: none
++* Description: pass thru to LLD
++***************************************************************/
++u16 glob_ftl_execute_cmds(void)
++{
++ nand_dbg_print(NAND_DBG_TRACE,
++ "glob_ftl_execute_cmds: ftl_cmd_cnt %u\n",
++ (unsigned int)ftl_cmd_cnt);
++ g_SBDCmdIndex = 0;
++ return glob_lld_execute_cmds();
++}
++
++#endif
++
++#if !CMD_DMA
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Read Immediate
++* Inputs: pointer to data
++* address of data
++* Outputs: PASS / FAIL
++* Description: Reads one page of data into RAM directly from flash without
++* using or disturbing cache.It is assumed this function is called
++* with CMD-DMA disabled.
++*****************************************************************/
++int GLOB_FTL_Read_Immediate(u8 *read_data, u64 addr)
++{
++ int wResult = FAIL;
++ u32 Block;
++ u16 Page;
++ u32 phy_blk;
++ u32 *pbt = (u32 *)g_pBlockTable;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ Block = BLK_FROM_ADDR(addr);
++ Page = PAGE_FROM_ADDR(addr, Block);
++
++ if (!IS_SPARE_BLOCK(Block))
++ return FAIL;
++
++ phy_blk = pbt[Block];
++ wResult = GLOB_LLD_Read_Page_Main(read_data, phy_blk, Page, 1);
++
++ if (DeviceInfo.MLCDevice) {
++ g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]++;
++ if (g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]
++ >= MAX_READ_COUNTER)
++ FTL_Read_Disturbance(phy_blk);
++ if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++ }
++
++ return wResult;
++}
++#endif
++
++#ifdef SUPPORT_BIG_ENDIAN
++/*********************************************************************
++* Function: FTL_Invert_Block_Table
++* Inputs: none
++* Outputs: none
++* Description: Re-format the block table in ram based on BIG_ENDIAN and
++* LARGE_BLOCKNUM if necessary
++**********************************************************************/
++static void FTL_Invert_Block_Table(void)
++{
++ u32 i;
++ u32 *pbt = (u32 *)g_pBlockTable;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++#ifdef SUPPORT_LARGE_BLOCKNUM
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
++ pbt[i] = INVERTUINT32(pbt[i]);
++ g_pWearCounter[i] = INVERTUINT32(g_pWearCounter[i]);
++ }
++#else
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
++ pbt[i] = INVERTUINT16(pbt[i]);
++ g_pWearCounter[i] = INVERTUINT16(g_pWearCounter[i]);
++ }
++#endif
++}
++#endif
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Flash_Init
++* Inputs: none
++* Outputs: PASS=0 / FAIL=0x01 (based on read ID)
++* Description: The flash controller is initialized
++* The flash device is reset
++* Perform a flash READ ID command to confirm that a
++* valid device is attached and active.
++* The DeviceInfo structure gets filled in
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Flash_Init(void)
++{
++ int status = FAIL;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ g_SBDCmdIndex = 0;
++
++ GLOB_LLD_Flash_Init();
++
++ status = GLOB_LLD_Read_Device_ID();
++
++ return status;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Inputs: none
++* Outputs: PASS=0 / FAIL=0x01 (based on read ID)
++* Description: The flash controller is released
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Flash_Release(void)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ return GLOB_LLD_Flash_Release();
++}
++
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Cache_Release
++* Inputs: none
++* Outputs: none
++* Description: release all allocated memory in GLOB_FTL_Init
++* (allocated in GLOB_FTL_Init)
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++void GLOB_FTL_Cache_Release(void)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ free_memory();
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Cache_If_Hit
++* Inputs: Page Address
++* Outputs: Block number/UNHIT BLOCK
++* Description: Determines if the addressed page is in cache
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static u16 FTL_Cache_If_Hit(u64 page_addr)
++{
++ u16 item;
++ u64 addr;
++ int i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ item = UNHIT_CACHE_ITEM;
++ for (i = 0; i < CACHE_ITEM_NUM; i++) {
++ addr = Cache.array[i].address;
++ if ((page_addr >= addr) &&
++ (page_addr < (addr + Cache.cache_item_size))) {
++ item = i;
++ break;
++ }
++ }
++
++ return item;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Calculate_LRU
++* Inputs: None
++* Outputs: None
++* Description: Calculate the least recently block in a cache and record its
++* index in LRU field.
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static void FTL_Calculate_LRU(void)
++{
++ u16 i, bCurrentLRU, bTempCount;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ bCurrentLRU = 0;
++ bTempCount = MAX_WORD_VALUE;
++
++ for (i = 0; i < CACHE_ITEM_NUM; i++) {
++ if (Cache.array[i].use_cnt < bTempCount) {
++ bCurrentLRU = i;
++ bTempCount = Cache.array[i].use_cnt;
++ }
++ }
++
++ Cache.LRU = bCurrentLRU;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Cache_Read_Page
++* Inputs: pointer to read buffer, logical address and cache item number
++* Outputs: None
++* Description: Read the page from the cached block addressed by blocknumber
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static void FTL_Cache_Read_Page(u8 *data_buf, u64 logic_addr, u16 cache_item)
++{
++ u8 *start_addr;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ start_addr = Cache.array[cache_item].buf;
++ start_addr += (u32)(((logic_addr - Cache.array[cache_item].address) >>
++ DeviceInfo.nBitsInPageDataSize) * DeviceInfo.wPageDataSize);
++
++#if CMD_DMA
++ GLOB_LLD_MemCopy_CMD(data_buf, start_addr,
++ DeviceInfo.wPageDataSize, 0);
++ ftl_cmd_cnt++;
++#else
++ memcpy(data_buf, start_addr, DeviceInfo.wPageDataSize);
++#endif
++
++ if (Cache.array[cache_item].use_cnt < MAX_WORD_VALUE)
++ Cache.array[cache_item].use_cnt++;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Cache_Read_All
++* Inputs: pointer to read buffer,block address
++* Outputs: PASS=0 / FAIL =1
++* Description: It reads pages in cache
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Cache_Read_All(u8 *pData, u64 phy_addr)
++{
++ int wResult = PASS;
++ u32 Block;
++ u32 lba;
++ u16 Page;
++ u16 PageCount;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 i;
++
++ Block = BLK_FROM_ADDR(phy_addr);
++ Page = PAGE_FROM_ADDR(phy_addr, Block);
++ PageCount = Cache.pages_per_item;
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "%s, Line %d, Function: %s, Block: 0x%x\n",
++ __FILE__, __LINE__, __func__, Block);
++
++ lba = 0xffffffff;
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
++ if ((pbt[i] & (~BAD_BLOCK)) == Block) {
++ lba = i;
++ if (IS_SPARE_BLOCK(i) || IS_BAD_BLOCK(i) ||
++ IS_DISCARDED_BLOCK(i)) {
++ /* Add by yunpeng -2008.12.3 */
++#if CMD_DMA
++ GLOB_LLD_MemCopy_CMD(pData, g_temp_buf,
++ PageCount * DeviceInfo.wPageDataSize, 0);
++ ftl_cmd_cnt++;
++#else
++ memset(pData, 0xFF,
++ PageCount * DeviceInfo.wPageDataSize);
++#endif
++ return wResult;
++ } else {
++ continue; /* break ?? */
++ }
++ }
++ }
++
++ if (0xffffffff == lba)
++ printk(KERN_ERR "FTL_Cache_Read_All: Block is not found in BT\n");
++
++#if CMD_DMA
++ wResult = GLOB_LLD_Read_Page_Main_cdma(pData, Block, Page,
++ PageCount, LLD_CMD_FLAG_MODE_CDMA);
++ if (DeviceInfo.MLCDevice) {
++ g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Read Counter modified in ftl_cmd_cnt %u"
++ " Block %u Counter%u\n",
++ ftl_cmd_cnt, (unsigned int)Block,
++ g_pReadCounter[Block -
++ DeviceInfo.wSpectraStartBlock]);
++
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
++ p_BTableChangesDelta->RC_Index =
++ Block - DeviceInfo.wSpectraStartBlock;
++ p_BTableChangesDelta->RC_Entry_Value =
++ g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock];
++ p_BTableChangesDelta->ValidFields = 0xC0;
++
++ ftl_cmd_cnt++;
++
++ if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
++ MAX_READ_COUNTER)
++ FTL_Read_Disturbance(Block);
++ if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++ } else {
++ ftl_cmd_cnt++;
++ }
++#else
++ wResult = GLOB_LLD_Read_Page_Main(pData, Block, Page, PageCount);
++ if (wResult == FAIL)
++ return wResult;
++
++ if (DeviceInfo.MLCDevice) {
++ g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
++ if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
++ MAX_READ_COUNTER)
++ FTL_Read_Disturbance(Block);
++ if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++ }
++#endif
++ return wResult;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Cache_Write_All
++* Inputs: pointer to cache in sys memory
++* address of free block in flash
++* Outputs: PASS=0 / FAIL=1
++* Description: writes all the pages of the block in cache to flash
++*
++* NOTE:need to make sure this works ok when cache is limited
++* to a partial block. This is where copy-back would be
++* activated. This would require knowing which pages in the
++* cached block are clean/dirty.Right now we only know if
++* the whole block is clean/dirty.
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr)
++{
++ u16 wResult = PASS;
++ u32 Block;
++ u16 Page;
++ u16 PageCount;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ nand_dbg_print(NAND_DBG_DEBUG, "This block %d going to be written "
++ "on %d\n", cache_block_to_write,
++ (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize));
++
++ Block = BLK_FROM_ADDR(blk_addr);
++ Page = PAGE_FROM_ADDR(blk_addr, Block);
++ PageCount = Cache.pages_per_item;
++
++#if CMD_DMA
++ if (FAIL == GLOB_LLD_Write_Page_Main_cdma(pData,
++ Block, Page, PageCount)) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, new Bad Block %d generated! "
++ "Need Bad Block replacing.\n",
++ __FILE__, __LINE__, __func__, Block);
++ wResult = FAIL;
++ }
++ ftl_cmd_cnt++;
++#else
++ if (FAIL == GLOB_LLD_Write_Page_Main(pData, Block, Page, PageCount)) {
++ nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s,"
++ " Line %d, Function %s, new Bad Block %d generated!"
++ "Need Bad Block replacing.\n",
++ __FILE__, __LINE__, __func__, Block);
++ wResult = FAIL;
++ }
++#endif
++ return wResult;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Cache_Update_Block
++* Inputs: pointer to buffer,page address,block address
++* Outputs: PASS=0 / FAIL=1
++* Description: It updates the cache
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Cache_Update_Block(u8 *pData,
++ u64 old_page_addr, u64 blk_addr)
++{
++ int i, j;
++ u8 *buf = pData;
++ int wResult = PASS;
++ int wFoundInCache;
++ u64 page_addr;
++ u64 addr;
++ u64 old_blk_addr;
++ u16 page_offset;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ old_blk_addr = (u64)(old_page_addr >>
++ DeviceInfo.nBitsInBlockDataSize) * DeviceInfo.wBlockDataSize;
++ page_offset = (u16)(GLOB_u64_Remainder(old_page_addr, 2) >>
++ DeviceInfo.nBitsInPageDataSize);
++
++ for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
++ page_addr = old_blk_addr + i * DeviceInfo.wPageDataSize;
++ if (i != page_offset) {
++ wFoundInCache = FAIL;
++ for (j = 0; j < CACHE_ITEM_NUM; j++) {
++ addr = Cache.array[j].address;
++ addr = FTL_Get_Physical_Block_Addr(addr) +
++ GLOB_u64_Remainder(addr, 2);
++ if ((addr >= page_addr) && addr <
++ (page_addr + Cache.cache_item_size)) {
++ wFoundInCache = PASS;
++ buf = Cache.array[j].buf;
++ Cache.array[j].changed = SET;
++#if CMD_DMA
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ int_cache[ftl_cmd_cnt].item = j;
++ int_cache[ftl_cmd_cnt].cache.address =
++ Cache.array[j].address;
++ int_cache[ftl_cmd_cnt].cache.changed =
++ Cache.array[j].changed;
++#endif
++#endif
++ break;
++ }
++ }
++ if (FAIL == wFoundInCache) {
++ if (ERR == FTL_Cache_Read_All(g_pTempBuf,
++ page_addr)) {
++ wResult = FAIL;
++ break;
++ }
++ buf = g_pTempBuf;
++ }
++ } else {
++ buf = pData;
++ }
++
++ if (FAIL == FTL_Cache_Write_All(buf,
++ blk_addr + (page_addr - old_blk_addr))) {
++ wResult = FAIL;
++ break;
++ }
++ }
++
++ return wResult;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Copy_Block
++* Inputs: source block address
++* Destination block address
++* Outputs: PASS=0 / FAIL=1
++* Description: used only for static wear leveling to move the block
++* containing static data to new blocks(more worn)
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int FTL_Copy_Block(u64 old_blk_addr, u64 blk_addr)
++{
++ int i, r1, r2, wResult = PASS;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
++ r1 = FTL_Cache_Read_All(g_pTempBuf, old_blk_addr +
++ i * DeviceInfo.wPageDataSize);
++ r2 = FTL_Cache_Write_All(g_pTempBuf, blk_addr +
++ i * DeviceInfo.wPageDataSize);
++ if ((ERR == r1) || (FAIL == r2)) {
++ wResult = FAIL;
++ break;
++ }
++ }
++
++ return wResult;
++}
++
++/* Search the block table to find out the least wear block and then return it */
++static u32 find_least_worn_blk_for_l2_cache(void)
++{
++ int i;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u8 least_wear_cnt = MAX_BYTE_VALUE;
++ u32 least_wear_blk_idx = MAX_U32_VALUE;
++ u32 phy_idx;
++
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
++ if (IS_SPARE_BLOCK(i)) {
++ phy_idx = (u32)((~BAD_BLOCK) & pbt[i]);
++ if (phy_idx > DeviceInfo.wSpectraEndBlock)
++ printk(KERN_ERR "find_least_worn_blk_for_l2_cache: "
++ "Too big phy block num (%d)\n", phy_idx);
++ if (g_pWearCounter[phy_idx -DeviceInfo.wSpectraStartBlock] < least_wear_cnt) {
++ least_wear_cnt = g_pWearCounter[phy_idx - DeviceInfo.wSpectraStartBlock];
++ least_wear_blk_idx = i;
++ }
++ }
++ }
++
++ nand_dbg_print(NAND_DBG_WARN,
++ "find_least_worn_blk_for_l2_cache: "
++ "find block %d with least worn counter (%d)\n",
++ least_wear_blk_idx, least_wear_cnt);
++
++ return least_wear_blk_idx;
++}
++
++
++
++/* Get blocks for Level2 Cache */
++static int get_l2_cache_blks(void)
++{
++ int n;
++ u32 blk;
++ u32 *pbt = (u32 *)g_pBlockTable;
++
++ for (n = 0; n < BLK_NUM_FOR_L2_CACHE; n++) {
++ blk = find_least_worn_blk_for_l2_cache();
++ if (blk > DeviceInfo.wDataBlockNum) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "find_least_worn_blk_for_l2_cache: "
++ "No enough free NAND blocks (n: %d) for L2 Cache!\n", n);
++ return FAIL;
++ }
++ /* Tag the free block as discard in block table */
++ pbt[blk] = (pbt[blk] & (~BAD_BLOCK)) | DISCARD_BLOCK;
++ /* Add the free block to the L2 Cache block array */
++ cache_l2.blk_array[n] = pbt[blk] & (~BAD_BLOCK);
++ }
++
++ return PASS;
++}
++
++static int erase_l2_cache_blocks(void)
++{
++ int i, ret = PASS;
++ u32 pblk, lblk;
++ u64 addr;
++ u32 *pbt = (u32 *)g_pBlockTable;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++) {
++ pblk = cache_l2.blk_array[i];
++
++ /* If the L2 cache block is invalid, then just skip it */
++ if (MAX_U32_VALUE == pblk)
++ continue;
++
++ BUG_ON(pblk > DeviceInfo.wSpectraEndBlock);
++
++ addr = (u64)pblk << DeviceInfo.nBitsInBlockDataSize;
++ if (PASS == GLOB_FTL_Block_Erase(addr)) {
++ /* Get logical block number of the erased block */
++ lblk = FTL_Get_Block_Index(pblk);
++ BUG_ON(BAD_BLOCK == lblk);
++ /* Tag it as free in the block table */
++ pbt[lblk] &= (u32)(~DISCARD_BLOCK);
++ pbt[lblk] |= (u32)(SPARE_BLOCK);
++ } else {
++ MARK_BLOCK_AS_BAD(pbt[lblk]);
++ ret = ERR;
++ }
++ }
++
++ return ret;
++}
++
++/*
++ * Merge the valid data page in the L2 cache blocks into NAND.
++*/
++static int flush_l2_cache(void)
++{
++ struct list_head *p;
++ struct spectra_l2_cache_list *pnd, *tmp_pnd;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 phy_blk, l2_blk;
++ u64 addr;
++ u16 l2_page;
++ int i, ret = PASS;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (list_empty(&cache_l2.table.list)) /* No data to flush */
++ return ret;
++
++ //dump_cache_l2_table();
++
++ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++
++ list_for_each(p, &cache_l2.table.list) {
++ pnd = list_entry(p, struct spectra_l2_cache_list, list);
++ if (IS_SPARE_BLOCK(pnd->logical_blk_num) ||
++ IS_BAD_BLOCK(pnd->logical_blk_num) ||
++ IS_DISCARDED_BLOCK(pnd->logical_blk_num)) {
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
++ memset(cache_l2_blk_buf, 0xff, DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize);
++ } else {
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
++ phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
++ ret = GLOB_LLD_Read_Page_Main(cache_l2_blk_buf,
++ phy_blk, 0, DeviceInfo.wPagesPerBlock);
++ if (ret == FAIL) {
++ printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
++ }
++ }
++
++ for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
++ if (pnd->pages_array[i] != MAX_U32_VALUE) {
++ l2_blk = cache_l2.blk_array[(pnd->pages_array[i] >> 16) & 0xffff];
++ l2_page = pnd->pages_array[i] & 0xffff;
++ ret = GLOB_LLD_Read_Page_Main(cache_l2_page_buf, l2_blk, l2_page, 1);
++ if (ret == FAIL) {
++ printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
++ }
++ memcpy(cache_l2_blk_buf + i * DeviceInfo.wPageDataSize, cache_l2_page_buf, DeviceInfo.wPageDataSize);
++ }
++ }
++
++ /* Find a free block and tag the original block as discarded */
++ addr = (u64)pnd->logical_blk_num << DeviceInfo.nBitsInBlockDataSize;
++ ret = FTL_Replace_Block(addr);
++ if (ret == FAIL) {
++ printk(KERN_ERR "FTL_Replace_Block fail in %s, Line %d\n", __FILE__, __LINE__);
++ }
++
++ /* Write back the updated data into NAND */
++ phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
++ if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "Program NAND block %d fail in %s, Line %d\n",
++ phy_blk, __FILE__, __LINE__);
++ /* This may not be really a bad block. So just tag it as discarded. */
++ /* Then it has a chance to be erased when garbage collection. */
++ /* If it is really bad, then the erase will fail and it will be marked */
++ /* as bad then. Otherwise it will be marked as free and can be used again */
++ MARK_BLK_AS_DISCARD(pbt[pnd->logical_blk_num]);
++ /* Find another free block and write it again */
++ FTL_Replace_Block(addr);
++ phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
++ if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
++ printk(KERN_ERR "Failed to write back block %d when flush L2 cache."
++ "Some data will be lost!\n", phy_blk);
++ MARK_BLOCK_AS_BAD(pbt[pnd->logical_blk_num]);
++ }
++ } else {
++ /* tag the new free block as used block */
++ pbt[pnd->logical_blk_num] &= (~SPARE_BLOCK);
++ }
++ }
++
++ /* Destroy the L2 Cache table and free the memory of all nodes */
++ list_for_each_entry_safe(pnd, tmp_pnd, &cache_l2.table.list, list) {
++ list_del(&pnd->list);
++ kfree(pnd);
++ }
++
++ /* Erase discard L2 cache blocks */
++ if (erase_l2_cache_blocks() != PASS)
++ nand_dbg_print(NAND_DBG_WARN,
++ " Erase L2 cache blocks error in %s, Line %d\n",
++ __FILE__, __LINE__);
++
++ /* Init the Level2 Cache data structure */
++ for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
++ cache_l2.blk_array[i] = MAX_U32_VALUE;
++ cache_l2.cur_blk_idx = 0;
++ cache_l2.cur_page_num = 0;
++ INIT_LIST_HEAD(&cache_l2.table.list);
++ cache_l2.table.logical_blk_num = MAX_U32_VALUE;
++
++ return ret;
++}
++
++/*
++ * Write back a changed victim cache item to the Level2 Cache
++ * and update the L2 Cache table to map the change.
++ * If the L2 Cache is full, then start to do the L2 Cache flush.
++*/
++static int write_back_to_l2_cache(u8 *buf, u64 logical_addr)
++{
++ u32 logical_blk_num;
++ u16 logical_page_num;
++ struct list_head *p;
++ struct spectra_l2_cache_list *pnd, *pnd_new;
++ u32 node_size;
++ int i, found;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ /*
++ * If Level2 Cache table is empty, then it means either:
++ * 1. This is the first time that the function called after FTL_init
++ * or
++ * 2. The Level2 Cache has just been flushed
++ *
++ * So, 'steal' some free blocks from NAND for L2 Cache using
++ * by just mask them as discard in the block table
++ */
++ if (list_empty(&cache_l2.table.list)) {
++ BUG_ON(cache_l2.cur_blk_idx != 0);
++ BUG_ON(cache_l2.cur_page_num!= 0);
++ BUG_ON(cache_l2.table.logical_blk_num != MAX_U32_VALUE);
++ if (FAIL == get_l2_cache_blks()) {
++ GLOB_FTL_Garbage_Collection();
++ if (FAIL == get_l2_cache_blks()) {
++ printk(KERN_ALERT "Fail to get L2 cache blks!\n");
++ return FAIL;
++ }
++ }
++ }
++
++ logical_blk_num = BLK_FROM_ADDR(logical_addr);
++ logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
++ BUG_ON(logical_blk_num == MAX_U32_VALUE);
++
++ /* Write the cache item data into the current position of L2 Cache */
++#if CMD_DMA
++ /*
++ * TODO
++ */
++#else
++ if (FAIL == GLOB_LLD_Write_Page_Main(buf,
++ cache_l2.blk_array[cache_l2.cur_blk_idx],
++ cache_l2.cur_page_num, 1)) {
++ nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
++ "%s, Line %d, new Bad Block %d generated!\n",
++ __FILE__, __LINE__,
++ cache_l2.blk_array[cache_l2.cur_blk_idx]);
++
++ /* TODO: tag the current block as bad and try again */
++
++ return FAIL;
++ }
++#endif
++
++ /*
++ * Update the L2 Cache table.
++ *
++ * First seaching in the table to see whether the logical block
++ * has been mapped. If not, then kmalloc a new node for the
++ * logical block, fill data, and then insert it to the list.
++ * Otherwise, just update the mapped node directly.
++ */
++ found = 0;
++ list_for_each(p, &cache_l2.table.list) {
++ pnd = list_entry(p, struct spectra_l2_cache_list, list);
++ if (pnd->logical_blk_num == logical_blk_num) {
++ pnd->pages_array[logical_page_num] =
++ (cache_l2.cur_blk_idx << 16) |
++ cache_l2.cur_page_num;
++ found = 1;
++ break;
++ }
++ }
++ if (!found) { /* Create new node for the logical block here */
++
++ /* The logical pages to physical pages map array is
++ * located at the end of struct spectra_l2_cache_list.
++ */
++ node_size = sizeof(struct spectra_l2_cache_list) +
++ sizeof(u32) * DeviceInfo.wPagesPerBlock;
++ pnd_new = kmalloc(node_size, GFP_ATOMIC);
++ if (!pnd_new) {
++ printk(KERN_ERR "Failed to kmalloc in %s Line %d\n",
++ __FILE__, __LINE__);
++ /*
++ * TODO: Need to flush all the L2 cache into NAND ASAP
++ * since no memory available here
++ */
++ }
++ pnd_new->logical_blk_num = logical_blk_num;
++ for (i = 0; i < DeviceInfo.wPagesPerBlock; i++)
++ pnd_new->pages_array[i] = MAX_U32_VALUE;
++ pnd_new->pages_array[logical_page_num] =
++ (cache_l2.cur_blk_idx << 16) | cache_l2.cur_page_num;
++ list_add(&pnd_new->list, &cache_l2.table.list);
++ }
++
++ /* Increasing the current position pointer of the L2 Cache */
++ cache_l2.cur_page_num++;
++ if (cache_l2.cur_page_num >= DeviceInfo.wPagesPerBlock) {
++ cache_l2.cur_blk_idx++;
++ if (cache_l2.cur_blk_idx >= BLK_NUM_FOR_L2_CACHE) {
++ /* The L2 Cache is full. Need to flush it now */
++ nand_dbg_print(NAND_DBG_WARN,
++ "L2 Cache is full, will start to flush it\n");
++ flush_l2_cache();
++ } else {
++ cache_l2.cur_page_num = 0;
++ }
++ }
++
++ return PASS;
++}
++
++/*
++ * Seach in the Level2 Cache table to find the cache item.
++ * If find, read the data from the NAND page of L2 Cache,
++ * Otherwise, return FAIL.
++ */
++static int search_l2_cache(u8 *buf, u64 logical_addr)
++{
++ u32 logical_blk_num;
++ u16 logical_page_num;
++ struct list_head *p;
++ struct spectra_l2_cache_list *pnd;
++ u32 tmp = MAX_U32_VALUE;
++ u32 phy_blk;
++ u16 phy_page;
++ int ret = FAIL;
++
++ logical_blk_num = BLK_FROM_ADDR(logical_addr);
++ logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
++
++ list_for_each(p, &cache_l2.table.list) {
++ pnd = list_entry(p, struct spectra_l2_cache_list, list);
++ if (pnd->logical_blk_num == logical_blk_num) {
++ tmp = pnd->pages_array[logical_page_num];
++ break;
++ }
++ }
++
++ if (tmp != MAX_U32_VALUE) { /* Found valid map */
++ phy_blk = cache_l2.blk_array[(tmp >> 16) & 0xFFFF];
++ phy_page = tmp & 0xFFFF;
++#if CMD_DMA
++ /* TODO */
++#else
++ ret = GLOB_LLD_Read_Page_Main(buf, phy_blk, phy_page, 1);
++#endif
++ }
++
++ return ret;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Cache_Write_Back
++* Inputs: pointer to data cached in sys memory
++* address of free block in flash
++* Outputs: PASS=0 / FAIL=1
++* Description: writes all the pages of Cache Block to flash
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr)
++{
++ int i, j, iErase;
++ u64 old_page_addr, addr, phy_addr;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 lba;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ old_page_addr = FTL_Get_Physical_Block_Addr(blk_addr) +
++ GLOB_u64_Remainder(blk_addr, 2);
++
++ iErase = (FAIL == FTL_Replace_Block(blk_addr)) ? PASS : FAIL;
++
++ pbt[BLK_FROM_ADDR(blk_addr)] &= (~SPARE_BLOCK);
++
++#if CMD_DMA
++ p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index = (u32)(blk_addr >>
++ DeviceInfo.nBitsInBlockDataSize);
++ p_BTableChangesDelta->BT_Entry_Value =
++ pbt[(u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize)];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++#endif
++
++ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++
++ for (i = 0; i < RETRY_TIMES; i++) {
++ if (PASS == iErase) {
++ phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
++ if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
++ lba = BLK_FROM_ADDR(blk_addr);
++ MARK_BLOCK_AS_BAD(pbt[lba]);
++ i = RETRY_TIMES;
++ break;
++ }
++ }
++
++ for (j = 0; j < CACHE_ITEM_NUM; j++) {
++ addr = Cache.array[j].address;
++ if ((addr <= blk_addr) &&
++ ((addr + Cache.cache_item_size) > blk_addr))
++ cache_block_to_write = j;
++ }
++
++ phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
++ if (PASS == FTL_Cache_Update_Block(pData,
++ old_page_addr, phy_addr)) {
++ cache_block_to_write = UNHIT_CACHE_ITEM;
++ break;
++ } else {
++ iErase = PASS;
++ }
++ }
++
++ if (i >= RETRY_TIMES) {
++ if (ERR == FTL_Flash_Error_Handle(pData,
++ old_page_addr, blk_addr))
++ return ERR;
++ else
++ return FAIL;
++ }
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Cache_Write_Page
++* Inputs: Pointer to buffer, page address, cache block number
++* Outputs: PASS=0 / FAIL=1
++* Description: It writes the data in Cache Block
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static void FTL_Cache_Write_Page(u8 *pData, u64 page_addr,
++ u8 cache_blk, u16 flag)
++{
++ u8 *pDest;
++ u64 addr;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ addr = Cache.array[cache_blk].address;
++ pDest = Cache.array[cache_blk].buf;
++
++ pDest += (unsigned long)(page_addr - addr);
++ Cache.array[cache_blk].changed = SET;
++#if CMD_DMA
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ int_cache[ftl_cmd_cnt].item = cache_blk;
++ int_cache[ftl_cmd_cnt].cache.address =
++ Cache.array[cache_blk].address;
++ int_cache[ftl_cmd_cnt].cache.changed =
++ Cache.array[cache_blk].changed;
++#endif
++ GLOB_LLD_MemCopy_CMD(pDest, pData, DeviceInfo.wPageDataSize, flag);
++ ftl_cmd_cnt++;
++#else
++ memcpy(pDest, pData, DeviceInfo.wPageDataSize);
++#endif
++ if (Cache.array[cache_blk].use_cnt < MAX_WORD_VALUE)
++ Cache.array[cache_blk].use_cnt++;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Cache_Write
++* Inputs: none
++* Outputs: PASS=0 / FAIL=1
++* Description: It writes least frequently used Cache block to flash if it
++* has been changed
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Cache_Write(void)
++{
++ int i, bResult = PASS;
++ u16 bNO, least_count = 0xFFFF;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ FTL_Calculate_LRU();
++
++ bNO = Cache.LRU;
++ nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: "
++ "Least used cache block is %d\n", bNO);
++
++ if (Cache.array[bNO].changed != SET)
++ return bResult;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: Cache"
++ " Block %d containing logical block %d is dirty\n",
++ bNO,
++ (u32)(Cache.array[bNO].address >>
++ DeviceInfo.nBitsInBlockDataSize));
++#if CMD_DMA
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ int_cache[ftl_cmd_cnt].item = bNO;
++ int_cache[ftl_cmd_cnt].cache.address =
++ Cache.array[bNO].address;
++ int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
++#endif
++#endif
++ bResult = write_back_to_l2_cache(Cache.array[bNO].buf,
++ Cache.array[bNO].address);
++ if (bResult != ERR)
++ Cache.array[bNO].changed = CLEAR;
++
++ least_count = Cache.array[bNO].use_cnt;
++
++ for (i = 0; i < CACHE_ITEM_NUM; i++) {
++ if (i == bNO)
++ continue;
++ if (Cache.array[i].use_cnt > 0)
++ Cache.array[i].use_cnt -= least_count;
++ }
++
++ return bResult;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Cache_Read
++* Inputs: Page address
++* Outputs: PASS=0 / FAIL=1
++* Description: It reads the block from device in Cache Block
++* Set the LRU count to 1
++* Mark the Cache Block as clean
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Cache_Read(u64 logical_addr)
++{
++ u64 item_addr, phy_addr;
++ u16 num;
++ int ret;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ num = Cache.LRU; /* The LRU cache item will be overwritten */
++
++ item_addr = (u64)GLOB_u64_Div(logical_addr, Cache.cache_item_size) *
++ Cache.cache_item_size;
++ Cache.array[num].address = item_addr;
++ Cache.array[num].use_cnt = 1;
++ Cache.array[num].changed = CLEAR;
++
++#if CMD_DMA
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ int_cache[ftl_cmd_cnt].item = num;
++ int_cache[ftl_cmd_cnt].cache.address =
++ Cache.array[num].address;
++ int_cache[ftl_cmd_cnt].cache.changed =
++ Cache.array[num].changed;
++#endif
++#endif
++ /*
++ * Search in L2 Cache. If hit, fill data into L1 Cache item buffer,
++ * Otherwise, read it from NAND
++ */
++ ret = search_l2_cache(Cache.array[num].buf, logical_addr);
++ if (PASS == ret) /* Hit in L2 Cache */
++ return ret;
++
++ /* Compute the physical start address of NAND device according to */
++ /* the logical start address of the cache item (LRU cache item) */
++ phy_addr = FTL_Get_Physical_Block_Addr(item_addr) +
++ GLOB_u64_Remainder(item_addr, 2);
++
++ return FTL_Cache_Read_All(Cache.array[num].buf, phy_addr);
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Check_Block_Table
++* Inputs: ?
++* Outputs: PASS=0 / FAIL=1
++* Description: It checks the correctness of each block table entry
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Check_Block_Table(int wOldTable)
++{
++ u32 i;
++ int wResult = PASS;
++ u32 blk_idx;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u8 *pFlag = flag_check_blk_table;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (NULL != pFlag) {
++ memset(pFlag, FAIL, DeviceInfo.wDataBlockNum);
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
++ blk_idx = (u32)(pbt[i] & (~BAD_BLOCK));
++
++ /*
++ * 20081006/KBV - Changed to pFlag[i] reference
++ * to avoid buffer overflow
++ */
++
++ /*
++ * 2008-10-20 Yunpeng Note: This change avoid
++ * buffer overflow, but changed function of
++ * the code, so it should be re-write later
++ */
++ if ((blk_idx > DeviceInfo.wSpectraEndBlock) ||
++ PASS == pFlag[i]) {
++ wResult = FAIL;
++ break;
++ } else {
++ pFlag[i] = PASS;
++ }
++ }
++ }
++
++ return wResult;
++}
++
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Write_Block_Table
++* Inputs: flasg
++* Outputs: 0=Block Table was updated. No write done. 1=Block write needs to
++* happen. -1 Error
++* Description: It writes the block table
++* Block table always mapped to LBA 0 which inturn mapped
++* to any physical block
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Write_Block_Table(int wForce)
++{
++ u32 *pbt = (u32 *)g_pBlockTable;
++ int wSuccess = PASS;
++ u32 wTempBlockTableIndex;
++ u16 bt_pages, new_bt_offset;
++ u8 blockchangeoccured = 0;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
++
++ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus)
++ return 0;
++
++ if (PASS == wForce) {
++ g_wBlockTableOffset =
++ (u16)(DeviceInfo.wPagesPerBlock - bt_pages);
++#if CMD_DMA
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
++ p_BTableChangesDelta->g_wBlockTableOffset =
++ g_wBlockTableOffset;
++ p_BTableChangesDelta->ValidFields = 0x01;
++#endif
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Inside FTL_Write_Block_Table: block %d Page:%d\n",
++ g_wBlockTableIndex, g_wBlockTableOffset);
++
++ do {
++ new_bt_offset = g_wBlockTableOffset + bt_pages + 1;
++ if ((0 == (new_bt_offset % DeviceInfo.wPagesPerBlock)) ||
++ (new_bt_offset > DeviceInfo.wPagesPerBlock) ||
++ (FAIL == wSuccess)) {
++ wTempBlockTableIndex = FTL_Replace_Block_Table();
++ if (BAD_BLOCK == wTempBlockTableIndex)
++ return ERR;
++ if (!blockchangeoccured) {
++ bt_block_changed = 1;
++ blockchangeoccured = 1;
++ }
++
++ g_wBlockTableIndex = wTempBlockTableIndex;
++ g_wBlockTableOffset = 0;
++ pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
++#if CMD_DMA
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->g_wBlockTableOffset =
++ g_wBlockTableOffset;
++ p_BTableChangesDelta->g_wBlockTableIndex =
++ g_wBlockTableIndex;
++ p_BTableChangesDelta->ValidFields = 0x03;
++
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free +=
++ sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index =
++ BLOCK_TABLE_INDEX;
++ p_BTableChangesDelta->BT_Entry_Value =
++ pbt[BLOCK_TABLE_INDEX];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++#endif
++ }
++
++ wSuccess = FTL_Write_Block_Table_Data();
++ if (FAIL == wSuccess)
++ MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
++ } while (FAIL == wSuccess);
++
++ g_cBlockTableStatus = CURRENT_BLOCK_TABLE;
++
++ return 1;
++}
++
++/******************************************************************
++* Function: GLOB_FTL_Flash_Format
++* Inputs: none
++* Outputs: PASS
++* Description: The block table stores bad block info, including MDF+
++* blocks gone bad over the ages. Therefore, if we have a
++* block table in place, then use it to scan for bad blocks
++* If not, then scan for MDF.
++* Now, a block table will only be found if spectra was already
++* being used. For a fresh flash, we'll go thru scanning for
++* MDF. If spectra was being used, then there is a chance that
++* the MDF has been corrupted. Spectra avoids writing to the
++* first 2 bytes of the spare area to all pages in a block. This
++* covers all known flash devices. However, since flash
++* manufacturers have no standard of where the MDF is stored,
++* this cannot guarantee that the MDF is protected for future
++* devices too. The initial scanning for the block table assures
++* this. It is ok even if the block table is outdated, as all
++* we're looking for are bad block markers.
++* Use this when mounting a file system or starting a
++* new flash.
++*
++*********************************************************************/
++static int FTL_Format_Flash(u8 valid_block_table)
++{
++ u32 i, j;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 tempNode;
++ int ret;
++
++#if CMD_DMA
++ u32 *pbtStartingCopy = (u32 *)g_pBTStartingCopy;
++ if (ftl_cmd_cnt)
++ return FAIL;
++#endif
++
++ if (FAIL == FTL_Check_Block_Table(FAIL))
++ valid_block_table = 0;
++
++ if (valid_block_table) {
++ u8 switched = 1;
++ u32 block, k;
++
++ k = DeviceInfo.wSpectraStartBlock;
++ while (switched && (k < DeviceInfo.wSpectraEndBlock)) {
++ switched = 0;
++ k++;
++ for (j = DeviceInfo.wSpectraStartBlock, i = 0;
++ j <= DeviceInfo.wSpectraEndBlock;
++ j++, i++) {
++ block = (pbt[i] & ~BAD_BLOCK) -
++ DeviceInfo.wSpectraStartBlock;
++ if (block != i) {
++ switched = 1;
++ tempNode = pbt[i];
++ pbt[i] = pbt[block];
++ pbt[block] = tempNode;
++ }
++ }
++ }
++ if ((k == DeviceInfo.wSpectraEndBlock) && switched)
++ valid_block_table = 0;
++ }
++
++ if (!valid_block_table) {
++ memset(g_pBlockTable, 0,
++ DeviceInfo.wDataBlockNum * sizeof(u32));
++ memset(g_pWearCounter, 0,
++ DeviceInfo.wDataBlockNum * sizeof(u8));
++ if (DeviceInfo.MLCDevice)
++ memset(g_pReadCounter, 0,
++ DeviceInfo.wDataBlockNum * sizeof(u16));
++#if CMD_DMA
++ memset(g_pBTStartingCopy, 0,
++ DeviceInfo.wDataBlockNum * sizeof(u32));
++ memset(g_pWearCounterCopy, 0,
++ DeviceInfo.wDataBlockNum * sizeof(u8));
++ if (DeviceInfo.MLCDevice)
++ memset(g_pReadCounterCopy, 0,
++ DeviceInfo.wDataBlockNum * sizeof(u16));
++#endif
++ for (j = DeviceInfo.wSpectraStartBlock, i = 0;
++ j <= DeviceInfo.wSpectraEndBlock;
++ j++, i++) {
++ if (GLOB_LLD_Get_Bad_Block((u32)j))
++ pbt[i] = (u32)(BAD_BLOCK | j);
++ }
++ }
++
++ nand_dbg_print(NAND_DBG_WARN, "Erasing all blocks in the NAND\n");
++
++ for (j = DeviceInfo.wSpectraStartBlock, i = 0;
++ j <= DeviceInfo.wSpectraEndBlock;
++ j++, i++) {
++ if ((pbt[i] & BAD_BLOCK) != BAD_BLOCK) {
++ ret = GLOB_LLD_Erase_Block(j);
++ if (FAIL == ret) {
++ pbt[i] = (u32)(j);
++ MARK_BLOCK_AS_BAD(pbt[i]);
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__, (int)j);
++ } else {
++ pbt[i] = (u32)(SPARE_BLOCK | j);
++ }
++ }
++#if CMD_DMA
++ pbtStartingCopy[i] = pbt[i];
++#endif
++ }
++
++ g_wBlockTableOffset = 0;
++ for (i = 0; (i <= (DeviceInfo.wSpectraEndBlock -
++ DeviceInfo.wSpectraStartBlock))
++ && ((pbt[i] & BAD_BLOCK) == BAD_BLOCK); i++)
++ ;
++ if (i > (DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock)) {
++ printk(KERN_ERR "All blocks bad!\n");
++ return FAIL;
++ } else {
++ g_wBlockTableIndex = pbt[i] & ~BAD_BLOCK;
++ if (i != BLOCK_TABLE_INDEX) {
++ tempNode = pbt[i];
++ pbt[i] = pbt[BLOCK_TABLE_INDEX];
++ pbt[BLOCK_TABLE_INDEX] = tempNode;
++ }
++ }
++ pbt[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
++
++#if CMD_DMA
++ pbtStartingCopy[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
++#endif
++
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ memset(g_pBTBlocks, 0xFF,
++ (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32));
++ g_pBTBlocks[FIRST_BT_ID-FIRST_BT_ID] = g_wBlockTableIndex;
++ FTL_Write_Block_Table(FAIL);
++
++ for (i = 0; i < CACHE_ITEM_NUM; i++) {
++ Cache.array[i].address = NAND_CACHE_INIT_ADDR;
++ Cache.array[i].use_cnt = 0;
++ Cache.array[i].changed = CLEAR;
++ }
++
++#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
++ memcpy((void *)&cache_start_copy, (void *)&Cache,
++ sizeof(struct flash_cache_tag));
++#endif
++ return PASS;
++}
++
++static int force_format_nand(void)
++{
++ u32 i;
++
++ /* Force erase the whole unprotected physical partiton of NAND */
++ printk(KERN_ALERT "Start to force erase whole NAND device ...\n");
++ printk(KERN_ALERT "From phyical block %d to %d\n",
++ DeviceInfo.wSpectraStartBlock, DeviceInfo.wSpectraEndBlock);
++ for (i = DeviceInfo.wSpectraStartBlock; i <= DeviceInfo.wSpectraEndBlock; i++) {
++ if (GLOB_LLD_Erase_Block(i))
++ printk(KERN_ERR "Failed to force erase NAND block %d\n", i);
++ }
++ printk(KERN_ALERT "Force Erase ends. Please reboot the system ...\n");
++ while(1);
++
++ return PASS;
++}
++
++int GLOB_FTL_Flash_Format(void)
++{
++ //return FTL_Format_Flash(1);
++ return force_format_nand();
++
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Search_Block_Table_IN_Block
++* Inputs: Block Number
++* Pointer to page
++* Outputs: PASS / FAIL
++* Page contatining the block table
++* Description: It searches the block table in the block
++* passed as an argument.
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
++ u8 BT_Tag, u16 *Page)
++{
++ u16 i, j, k;
++ u16 Result = PASS;
++ u16 Last_IPF = 0;
++ u8 BT_Found = 0;
++ u8 *tagarray;
++ u8 *tempbuf = tmp_buf_search_bt_in_block;
++ u8 *pSpareBuf = spare_buf_search_bt_in_block;
++ u8 *pSpareBufBTLastPage = spare_buf_bt_search_bt_in_block;
++ u8 bt_flag_last_page = 0xFF;
++ u8 search_in_previous_pages = 0;
++ u16 bt_pages;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Searching block table in %u block\n",
++ (unsigned int)BT_Block);
++
++ bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
++
++ for (i = bt_pages; i < DeviceInfo.wPagesPerBlock;
++ i += (bt_pages + 1)) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Searching last IPF: %d\n", i);
++ Result = GLOB_LLD_Read_Page_Main_Polling(tempbuf,
++ BT_Block, i, 1);
++
++ if (0 == memcmp(tempbuf, g_pIPF, DeviceInfo.wPageDataSize)) {
++ if ((i + bt_pages + 1) < DeviceInfo.wPagesPerBlock) {
++ continue;
++ } else {
++ search_in_previous_pages = 1;
++ Last_IPF = i;
++ }
++ }
++
++ if (!search_in_previous_pages) {
++ if (i != bt_pages) {
++ i -= (bt_pages + 1);
++ Last_IPF = i;
++ }
++ }
++
++ if (0 == Last_IPF)
++ break;
++
++ if (!search_in_previous_pages) {
++ i = i + 1;
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Reading the spare area of Block %u Page %u",
++ (unsigned int)BT_Block, i);
++ Result = GLOB_LLD_Read_Page_Spare(pSpareBuf,
++ BT_Block, i, 1);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Reading the spare area of Block %u Page %u",
++ (unsigned int)BT_Block, i + bt_pages - 1);
++ Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
++ BT_Block, i + bt_pages - 1, 1);
++
++ k = 0;
++ j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
++ if (j) {
++ for (; k < j; k++) {
++ if (tagarray[k] == BT_Tag)
++ break;
++ }
++ }
++
++ if (k < j)
++ bt_flag = tagarray[k];
++ else
++ Result = FAIL;
++
++ if (Result == PASS) {
++ k = 0;
++ j = FTL_Extract_Block_Table_Tag(
++ pSpareBufBTLastPage, &tagarray);
++ if (j) {
++ for (; k < j; k++) {
++ if (tagarray[k] == BT_Tag)
++ break;
++ }
++ }
++
++ if (k < j)
++ bt_flag_last_page = tagarray[k];
++ else
++ Result = FAIL;
++
++ if (Result == PASS) {
++ if (bt_flag == bt_flag_last_page) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Block table is found"
++ " in page after IPF "
++ "at block %d "
++ "page %d\n",
++ (int)BT_Block, i);
++ BT_Found = 1;
++ *Page = i;
++ g_cBlockTableStatus =
++ CURRENT_BLOCK_TABLE;
++ break;
++ } else {
++ Result = FAIL;
++ }
++ }
++ }
++ }
++
++ if (search_in_previous_pages)
++ i = i - bt_pages;
++ else
++ i = i - (bt_pages + 1);
++
++ Result = PASS;
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Reading the spare area of Block %d Page %d",
++ (int)BT_Block, i);
++
++ Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Reading the spare area of Block %u Page %u",
++ (unsigned int)BT_Block, i + bt_pages - 1);
++
++ Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
++ BT_Block, i + bt_pages - 1, 1);
++
++ k = 0;
++ j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
++ if (j) {
++ for (; k < j; k++) {
++ if (tagarray[k] == BT_Tag)
++ break;
++ }
++ }
++
++ if (k < j)
++ bt_flag = tagarray[k];
++ else
++ Result = FAIL;
++
++ if (Result == PASS) {
++ k = 0;
++ j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
++ &tagarray);
++ if (j) {
++ for (; k < j; k++) {
++ if (tagarray[k] == BT_Tag)
++ break;
++ }
++ }
++
++ if (k < j) {
++ bt_flag_last_page = tagarray[k];
++ } else {
++ Result = FAIL;
++ break;
++ }
++
++ if (Result == PASS) {
++ if (bt_flag == bt_flag_last_page) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Block table is found "
++ "in page prior to IPF "
++ "at block %u page %d\n",
++ (unsigned int)BT_Block, i);
++ BT_Found = 1;
++ *Page = i;
++ g_cBlockTableStatus =
++ IN_PROGRESS_BLOCK_TABLE;
++ break;
++ } else {
++ Result = FAIL;
++ break;
++ }
++ }
++ }
++ }
++
++ if (Result == FAIL) {
++ if ((Last_IPF > bt_pages) && (i < Last_IPF) && (!BT_Found)) {
++ BT_Found = 1;
++ *Page = i - (bt_pages + 1);
++ }
++ if ((Last_IPF == bt_pages) && (i < Last_IPF) && (!BT_Found))
++ goto func_return;
++ }
++
++ if (Last_IPF == 0) {
++ i = 0;
++ Result = PASS;
++ nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of "
++ "Block %u Page %u", (unsigned int)BT_Block, i);
++
++ Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Reading the spare area of Block %u Page %u",
++ (unsigned int)BT_Block, i + bt_pages - 1);
++ Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
++ BT_Block, i + bt_pages - 1, 1);
++
++ k = 0;
++ j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
++ if (j) {
++ for (; k < j; k++) {
++ if (tagarray[k] == BT_Tag)
++ break;
++ }
++ }
++
++ if (k < j)
++ bt_flag = tagarray[k];
++ else
++ Result = FAIL;
++
++ if (Result == PASS) {
++ k = 0;
++ j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
++ &tagarray);
++ if (j) {
++ for (; k < j; k++) {
++ if (tagarray[k] == BT_Tag)
++ break;
++ }
++ }
++
++ if (k < j)
++ bt_flag_last_page = tagarray[k];
++ else
++ Result = FAIL;
++
++ if (Result == PASS) {
++ if (bt_flag == bt_flag_last_page) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Block table is found "
++ "in page after IPF at "
++ "block %u page %u\n",
++ (unsigned int)BT_Block,
++ (unsigned int)i);
++ BT_Found = 1;
++ *Page = i;
++ g_cBlockTableStatus =
++ CURRENT_BLOCK_TABLE;
++ goto func_return;
++ } else {
++ Result = FAIL;
++ }
++ }
++ }
++
++ if (Result == FAIL)
++ goto func_return;
++ }
++func_return:
++ return Result;
++}
++
++u8 *get_blk_table_start_addr(void)
++{
++ return g_pBlockTable;
++}
++
++unsigned long get_blk_table_len(void)
++{
++ return DeviceInfo.wDataBlockNum * sizeof(u32);
++}
++
++u8 *get_wear_leveling_table_start_addr(void)
++{
++ return g_pWearCounter;
++}
++
++unsigned long get_wear_leveling_table_len(void)
++{
++ return DeviceInfo.wDataBlockNum * sizeof(u8);
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Read_Block_Table
++* Inputs: none
++* Outputs: PASS / FAIL
++* Description: read the flash spare area and find a block containing the
++* most recent block table(having largest block_table_counter).
++* Find the last written Block table in this block.
++* Check the correctness of Block Table
++* If CDMA is enabled, this function is called in
++* polling mode.
++* We don't need to store changes in Block table in this
++* function as it is called only at initialization
++*
++* Note: Currently this function is called at initialization
++* before any read/erase/write command issued to flash so,
++* there is no need to wait for CDMA list to complete as of now
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Read_Block_Table(void)
++{
++ u16 i = 0;
++ int k, j;
++ u8 *tempBuf, *tagarray;
++ int wResult = FAIL;
++ int status = FAIL;
++ u8 block_table_found = 0;
++ int search_result;
++ u32 Block;
++ u16 Page = 0;
++ u16 PageCount;
++ u16 bt_pages;
++ int wBytesCopied = 0, tempvar;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ tempBuf = tmp_buf1_read_blk_table;
++ bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
++
++ for (j = DeviceInfo.wSpectraStartBlock;
++ j <= (int)DeviceInfo.wSpectraEndBlock;
++ j++) {
++ status = GLOB_LLD_Read_Page_Spare(tempBuf, j, 0, 1);
++ k = 0;
++ i = FTL_Extract_Block_Table_Tag(tempBuf, &tagarray);
++ if (i) {
++ status = GLOB_LLD_Read_Page_Main_Polling(tempBuf,
++ j, 0, 1);
++ for (; k < i; k++) {
++ if (tagarray[k] == tempBuf[3])
++ break;
++ }
++ }
++
++ if (k < i)
++ k = tagarray[k];
++ else
++ continue;
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Block table is contained in Block %d %d\n",
++ (unsigned int)j, (unsigned int)k);
++
++ if (g_pBTBlocks[k-FIRST_BT_ID] == BTBLOCK_INVAL) {
++ g_pBTBlocks[k-FIRST_BT_ID] = j;
++ block_table_found = 1;
++ } else {
++ printk(KERN_ERR "FTL_Read_Block_Table -"
++ "This should never happens. "
++ "Two block table have same counter %u!\n", k);
++ }
++ }
++
++ if (block_table_found) {
++ if (g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL &&
++ g_pBTBlocks[LAST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) {
++ j = LAST_BT_ID;
++ while ((j > FIRST_BT_ID) &&
++ (g_pBTBlocks[j - FIRST_BT_ID] != BTBLOCK_INVAL))
++ j--;
++ if (j == FIRST_BT_ID) {
++ j = LAST_BT_ID;
++ last_erased = LAST_BT_ID;
++ } else {
++ last_erased = (u8)j + 1;
++ while ((j > FIRST_BT_ID) && (BTBLOCK_INVAL ==
++ g_pBTBlocks[j - FIRST_BT_ID]))
++ j--;
++ }
++ } else {
++ j = FIRST_BT_ID;
++ while (g_pBTBlocks[j - FIRST_BT_ID] == BTBLOCK_INVAL)
++ j++;
++ last_erased = (u8)j;
++ while ((j < LAST_BT_ID) && (BTBLOCK_INVAL !=
++ g_pBTBlocks[j - FIRST_BT_ID]))
++ j++;
++ if (g_pBTBlocks[j-FIRST_BT_ID] == BTBLOCK_INVAL)
++ j--;
++ }
++
++ if (last_erased > j)
++ j += (1 + LAST_BT_ID - FIRST_BT_ID);
++
++ for (; (j >= last_erased) && (FAIL == wResult); j--) {
++ i = (j - FIRST_BT_ID) %
++ (1 + LAST_BT_ID - FIRST_BT_ID);
++ search_result =
++ FTL_Search_Block_Table_IN_Block(g_pBTBlocks[i],
++ i + FIRST_BT_ID, &Page);
++ if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
++ block_table_found = 0;
++
++ while ((search_result == PASS) && (FAIL == wResult)) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "FTL_Read_Block_Table:"
++ "Block: %u Page: %u "
++ "contains block table\n",
++ (unsigned int)g_pBTBlocks[i],
++ (unsigned int)Page);
++
++ tempBuf = tmp_buf2_read_blk_table;
++
++ for (k = 0; k < bt_pages; k++) {
++ Block = g_pBTBlocks[i];
++ PageCount = 1;
++
++ status =
++ GLOB_LLD_Read_Page_Main_Polling(
++ tempBuf, Block, Page, PageCount);
++
++ tempvar = k ? 0 : 4;
++
++ wBytesCopied +=
++ FTL_Copy_Block_Table_From_Flash(
++ tempBuf + tempvar,
++ DeviceInfo.wPageDataSize - tempvar,
++ wBytesCopied);
++
++ Page++;
++ }
++
++ wResult = FTL_Check_Block_Table(FAIL);
++ if (FAIL == wResult) {
++ block_table_found = 0;
++ if (Page > bt_pages)
++ Page -= ((bt_pages<<1) + 1);
++ else
++ search_result = FAIL;
++ }
++ }
++ }
++ }
++
++ if (PASS == wResult) {
++ if (!block_table_found)
++ FTL_Execute_SPL_Recovery();
++
++ if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
++ g_wBlockTableOffset = (u16)Page + 1;
++ else
++ g_wBlockTableOffset = (u16)Page - bt_pages;
++
++ g_wBlockTableIndex = (u32)g_pBTBlocks[i];
++
++#if CMD_DMA
++ if (DeviceInfo.MLCDevice)
++ memcpy(g_pBTStartingCopy, g_pBlockTable,
++ DeviceInfo.wDataBlockNum * sizeof(u32)
++ + DeviceInfo.wDataBlockNum * sizeof(u8)
++ + DeviceInfo.wDataBlockNum * sizeof(u16));
++ else
++ memcpy(g_pBTStartingCopy, g_pBlockTable,
++ DeviceInfo.wDataBlockNum * sizeof(u32)
++ + DeviceInfo.wDataBlockNum * sizeof(u8));
++#endif
++ }
++
++ if (FAIL == wResult)
++ printk(KERN_ERR "Yunpeng - "
++ "Can not find valid spectra block table!\n");
++
++#if AUTO_FORMAT_FLASH
++ if (FAIL == wResult) {
++ nand_dbg_print(NAND_DBG_DEBUG, "doing auto-format\n");
++ wResult = FTL_Format_Flash(0);
++ }
++#endif
++
++ return wResult;
++}
++
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Flash_Error_Handle
++* Inputs: Pointer to data
++* Page address
++* Block address
++* Outputs: PASS=0 / FAIL=1
++* Description: It handles any error occured during Spectra operation
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr,
++ u64 blk_addr)
++{
++ u32 i;
++ int j;
++ u32 tmp_node, blk_node = BLK_FROM_ADDR(blk_addr);
++ u64 phy_addr;
++ int wErase = FAIL;
++ int wResult = FAIL;
++ u32 *pbt = (u32 *)g_pBlockTable;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (ERR == GLOB_FTL_Garbage_Collection())
++ return ERR;
++
++ do {
++ for (i = DeviceInfo.wSpectraEndBlock -
++ DeviceInfo.wSpectraStartBlock;
++ i > 0; i--) {
++ if (IS_SPARE_BLOCK(i)) {
++ tmp_node = (u32)(BAD_BLOCK |
++ pbt[blk_node]);
++ pbt[blk_node] = (u32)(pbt[i] &
++ (~SPARE_BLOCK));
++ pbt[i] = tmp_node;
++#if CMD_DMA
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)
++ g_pBTDelta_Free;
++ g_pBTDelta_Free +=
++ sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index =
++ blk_node;
++ p_BTableChangesDelta->BT_Entry_Value =
++ pbt[blk_node];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)
++ g_pBTDelta_Free;
++ g_pBTDelta_Free +=
++ sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index = i;
++ p_BTableChangesDelta->BT_Entry_Value = pbt[i];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++#endif
++ wResult = PASS;
++ break;
++ }
++ }
++
++ if (FAIL == wResult) {
++ if (FAIL == GLOB_FTL_Garbage_Collection())
++ break;
++ else
++ continue;
++ }
++
++ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++
++ phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
++
++ for (j = 0; j < RETRY_TIMES; j++) {
++ if (PASS == wErase) {
++ if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
++ MARK_BLOCK_AS_BAD(pbt[blk_node]);
++ break;
++ }
++ }
++ if (PASS == FTL_Cache_Update_Block(pData,
++ old_page_addr,
++ phy_addr)) {
++ wResult = PASS;
++ break;
++ } else {
++ wResult = FAIL;
++ wErase = PASS;
++ }
++ }
++ } while (FAIL == wResult);
++
++ FTL_Write_Block_Table(FAIL);
++
++ return wResult;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Get_Page_Num
++* Inputs: Size in bytes
++* Outputs: Size in pages
++* Description: It calculates the pages required for the length passed
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static u32 FTL_Get_Page_Num(u64 length)
++{
++ return (u32)((length >> DeviceInfo.nBitsInPageDataSize) +
++ (GLOB_u64_Remainder(length , 1) > 0 ? 1 : 0));
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Get_Physical_Block_Addr
++* Inputs: Block Address (byte format)
++* Outputs: Physical address of the block.
++* Description: It translates LBA to PBA by returning address stored
++* at the LBA location in the block table
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static u64 FTL_Get_Physical_Block_Addr(u64 logical_addr)
++{
++ u32 *pbt;
++ u64 physical_addr;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ pbt = (u32 *)g_pBlockTable;
++ physical_addr = (u64) DeviceInfo.wBlockDataSize *
++ (pbt[BLK_FROM_ADDR(logical_addr)] & (~BAD_BLOCK));
++
++ return physical_addr;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Get_Block_Index
++* Inputs: Physical Block no.
++* Outputs: Logical block no. /BAD_BLOCK
++* Description: It returns the logical block no. for the PBA passed
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static u32 FTL_Get_Block_Index(u32 wBlockNum)
++{
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
++ if (wBlockNum == (pbt[i] & (~BAD_BLOCK)))
++ return i;
++
++ return BAD_BLOCK;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Wear_Leveling
++* Inputs: none
++* Outputs: PASS=0
++* Description: This is static wear leveling (done by explicit call)
++* do complete static wear leveling
++* do complete garbage collection
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Wear_Leveling(void)
++{
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ FTL_Static_Wear_Leveling();
++ GLOB_FTL_Garbage_Collection();
++
++ return PASS;
++}
++
++static void find_least_most_worn(u8 *chg,
++ u32 *least_idx, u8 *least_cnt,
++ u32 *most_idx, u8 *most_cnt)
++{
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 idx;
++ u8 cnt;
++ int i;
++
++ for (i = BLOCK_TABLE_INDEX + 1; i < DeviceInfo.wDataBlockNum; i++) {
++ if (IS_BAD_BLOCK(i) || PASS == chg[i])
++ continue;
++
++ idx = (u32) ((~BAD_BLOCK) & pbt[i]);
++ cnt = g_pWearCounter[idx - DeviceInfo.wSpectraStartBlock];
++
++ if (IS_SPARE_BLOCK(i)) {
++ if (cnt > *most_cnt) {
++ *most_cnt = cnt;
++ *most_idx = idx;
++ }
++ }
++
++ if (IS_DATA_BLOCK(i)) {
++ if (cnt < *least_cnt) {
++ *least_cnt = cnt;
++ *least_idx = idx;
++ }
++ }
++
++ if (PASS == chg[*most_idx] || PASS == chg[*least_idx]) {
++ debug_boundary_error(*most_idx,
++ DeviceInfo.wDataBlockNum, 0);
++ debug_boundary_error(*least_idx,
++ DeviceInfo.wDataBlockNum, 0);
++ continue;
++ }
++ }
++}
++
++static int move_blks_for_wear_leveling(u8 *chg,
++ u32 *least_idx, u32 *rep_blk_num, int *result)
++{
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 rep_blk;
++ int j, ret_cp_blk, ret_erase;
++ int ret = PASS;
++
++ chg[*least_idx] = PASS;
++ debug_boundary_error(*least_idx, DeviceInfo.wDataBlockNum, 0);
++
++ rep_blk = FTL_Replace_MWBlock();
++ if (rep_blk != BAD_BLOCK) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "More than two spare blocks exist so do it\n");
++ nand_dbg_print(NAND_DBG_DEBUG, "Block Replaced is %d\n",
++ rep_blk);
++
++ chg[rep_blk] = PASS;
++
++ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++
++ for (j = 0; j < RETRY_TIMES; j++) {
++ ret_cp_blk = FTL_Copy_Block((u64)(*least_idx) *
++ DeviceInfo.wBlockDataSize,
++ (u64)rep_blk * DeviceInfo.wBlockDataSize);
++ if (FAIL == ret_cp_blk) {
++ ret_erase = GLOB_FTL_Block_Erase((u64)rep_blk
++ * DeviceInfo.wBlockDataSize);
++ if (FAIL == ret_erase)
++ MARK_BLOCK_AS_BAD(pbt[rep_blk]);
++ } else {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "FTL_Copy_Block == OK\n");
++ break;
++ }
++ }
++
++ if (j < RETRY_TIMES) {
++ u32 tmp;
++ u32 old_idx = FTL_Get_Block_Index(*least_idx);
++ u32 rep_idx = FTL_Get_Block_Index(rep_blk);
++ tmp = (u32)(DISCARD_BLOCK | pbt[old_idx]);
++ pbt[old_idx] = (u32)((~SPARE_BLOCK) &
++ pbt[rep_idx]);
++ pbt[rep_idx] = tmp;
++#if CMD_DMA
++ p_BTableChangesDelta = (struct BTableChangesDelta *)
++ g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index = old_idx;
++ p_BTableChangesDelta->BT_Entry_Value = pbt[old_idx];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++
++ p_BTableChangesDelta = (struct BTableChangesDelta *)
++ g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index = rep_idx;
++ p_BTableChangesDelta->BT_Entry_Value = pbt[rep_idx];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++#endif
++ } else {
++ pbt[FTL_Get_Block_Index(rep_blk)] |= BAD_BLOCK;
++#if CMD_DMA
++ p_BTableChangesDelta = (struct BTableChangesDelta *)
++ g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index =
++ FTL_Get_Block_Index(rep_blk);
++ p_BTableChangesDelta->BT_Entry_Value =
++ pbt[FTL_Get_Block_Index(rep_blk)];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++#endif
++ *result = FAIL;
++ ret = FAIL;
++ }
++
++ if (((*rep_blk_num)++) > WEAR_LEVELING_BLOCK_NUM)
++ ret = FAIL;
++ } else {
++ printk(KERN_ERR "Less than 3 spare blocks exist so quit\n");
++ ret = FAIL;
++ }
++
++ return ret;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Static_Wear_Leveling
++* Inputs: none
++* Outputs: PASS=0 / FAIL=1
++* Description: This is static wear leveling (done by explicit call)
++* search for most&least used
++* if difference < GATE:
++* update the block table with exhange
++* mark block table in flash as IN_PROGRESS
++* copy flash block
++* the caller should handle GC clean up after calling this function
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int FTL_Static_Wear_Leveling(void)
++{
++ u8 most_worn_cnt;
++ u8 least_worn_cnt;
++ u32 most_worn_idx;
++ u32 least_worn_idx;
++ int result = PASS;
++ int go_on = PASS;
++ u32 replaced_blks = 0;
++ u8 *chang_flag = flags_static_wear_leveling;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (!chang_flag)
++ return FAIL;
++
++ memset(chang_flag, FAIL, DeviceInfo.wDataBlockNum);
++ while (go_on == PASS) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "starting static wear leveling\n");
++ most_worn_cnt = 0;
++ least_worn_cnt = 0xFF;
++ least_worn_idx = BLOCK_TABLE_INDEX;
++ most_worn_idx = BLOCK_TABLE_INDEX;
++
++ find_least_most_worn(chang_flag, &least_worn_idx,
++ &least_worn_cnt, &most_worn_idx, &most_worn_cnt);
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Used and least worn is block %u, whos count is %u\n",
++ (unsigned int)least_worn_idx,
++ (unsigned int)least_worn_cnt);
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Free and most worn is block %u, whos count is %u\n",
++ (unsigned int)most_worn_idx,
++ (unsigned int)most_worn_cnt);
++
++ if ((most_worn_cnt > least_worn_cnt) &&
++ (most_worn_cnt - least_worn_cnt > WEAR_LEVELING_GATE))
++ go_on = move_blks_for_wear_leveling(chang_flag,
++ &least_worn_idx, &replaced_blks, &result);
++ else
++ go_on = FAIL;
++ }
++
++ return result;
++}
++
++#if CMD_DMA
++static int do_garbage_collection(u32 discard_cnt)
++{
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 pba;
++ u8 bt_block_erased = 0;
++ int i, cnt, ret = FAIL;
++ u64 addr;
++
++ i = 0;
++ while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0) &&
++ ((ftl_cmd_cnt + 28) < 256)) {
++ if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
++ (pbt[i] & DISCARD_BLOCK)) {
++ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++
++ addr = FTL_Get_Physical_Block_Addr((u64)i *
++ DeviceInfo.wBlockDataSize);
++ pba = BLK_FROM_ADDR(addr);
++
++ for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
++ if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "GC will erase BT block %u\n",
++ (unsigned int)pba);
++ discard_cnt--;
++ i++;
++ bt_block_erased = 1;
++ break;
++ }
++ }
++
++ if (bt_block_erased) {
++ bt_block_erased = 0;
++ continue;
++ }
++
++ addr = FTL_Get_Physical_Block_Addr((u64)i *
++ DeviceInfo.wBlockDataSize);
++
++ if (PASS == GLOB_FTL_Block_Erase(addr)) {
++ pbt[i] &= (u32)(~DISCARD_BLOCK);
++ pbt[i] |= (u32)(SPARE_BLOCK);
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)
++ g_pBTDelta_Free;
++ g_pBTDelta_Free +=
++ sizeof(struct BTableChangesDelta);
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt - 1;
++ p_BTableChangesDelta->BT_Index = i;
++ p_BTableChangesDelta->BT_Entry_Value = pbt[i];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++ discard_cnt--;
++ ret = PASS;
++ } else {
++ MARK_BLOCK_AS_BAD(pbt[i]);
++ }
++ }
++
++ i++;
++ }
++
++ return ret;
++}
++
++#else
++static int do_garbage_collection(u32 discard_cnt)
++{
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 pba;
++ u8 bt_block_erased = 0;
++ int i, cnt, ret = FAIL;
++ u64 addr;
++
++ i = 0;
++ while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0)) {
++ if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
++ (pbt[i] & DISCARD_BLOCK)) {
++ if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++
++ addr = FTL_Get_Physical_Block_Addr((u64)i *
++ DeviceInfo.wBlockDataSize);
++ pba = BLK_FROM_ADDR(addr);
++
++ for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
++ if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "GC will erase BT block %d\n",
++ pba);
++ discard_cnt--;
++ i++;
++ bt_block_erased = 1;
++ break;
++ }
++ }
++
++ if (bt_block_erased) {
++ bt_block_erased = 0;
++ continue;
++ }
++
++ /* If the discard block is L2 cache block, then just skip it */
++ for (cnt = 0; cnt < BLK_NUM_FOR_L2_CACHE; cnt++) {
++ if (cache_l2.blk_array[cnt] == pba) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "GC will erase L2 cache blk %d\n",
++ pba);
++ break;
++ }
++ }
++ if (cnt < BLK_NUM_FOR_L2_CACHE) { /* Skip it */
++ discard_cnt--;
++ i++;
++ continue;
++ }
++
++ addr = FTL_Get_Physical_Block_Addr((u64)i *
++ DeviceInfo.wBlockDataSize);
++
++ if (PASS == GLOB_FTL_Block_Erase(addr)) {
++ pbt[i] &= (u32)(~DISCARD_BLOCK);
++ pbt[i] |= (u32)(SPARE_BLOCK);
++ discard_cnt--;
++ ret = PASS;
++ } else {
++ MARK_BLOCK_AS_BAD(pbt[i]);
++ }
++ }
++
++ i++;
++ }
++
++ return ret;
++}
++#endif
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Garbage_Collection
++* Inputs: none
++* Outputs: PASS / FAIL (returns the number of un-erased blocks
++* Description: search the block table for all discarded blocks to erase
++* for each discarded block:
++* set the flash block to IN_PROGRESS
++* erase the block
++* update the block table
++* write the block table to flash
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Garbage_Collection(void)
++{
++ u32 i;
++ u32 wDiscard = 0;
++ int wResult = FAIL;
++ u32 *pbt = (u32 *)g_pBlockTable;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (GC_Called) {
++ printk(KERN_ALERT "GLOB_FTL_Garbage_Collection() "
++ "has been re-entered! Exit.\n");
++ return PASS;
++ }
++
++ GC_Called = 1;
++
++ GLOB_FTL_BT_Garbage_Collection();
++
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
++ if (IS_DISCARDED_BLOCK(i))
++ wDiscard++;
++ }
++
++ if (wDiscard <= 0) {
++ GC_Called = 0;
++ return wResult;
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Found %d discarded blocks\n", wDiscard);
++
++ FTL_Write_Block_Table(FAIL);
++
++ wResult = do_garbage_collection(wDiscard);
++
++ FTL_Write_Block_Table(FAIL);
++
++ GC_Called = 0;
++
++ return wResult;
++}
++
++
++#if CMD_DMA
++static int do_bt_garbage_collection(void)
++{
++ u32 pba, lba;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
++ u64 addr;
++ int i, ret = FAIL;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (BT_GC_Called)
++ return PASS;
++
++ BT_GC_Called = 1;
++
++ for (i = last_erased; (i <= LAST_BT_ID) &&
++ (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
++ FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) &&
++ ((ftl_cmd_cnt + 28)) < 256; i++) {
++ pba = pBTBlocksNode[i - FIRST_BT_ID];
++ lba = FTL_Get_Block_Index(pba);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "do_bt_garbage_collection: pba %d, lba %d\n",
++ pba, lba);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Block Table Entry: %d", pbt[lba]);
++
++ if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
++ (pbt[lba] & DISCARD_BLOCK)) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "do_bt_garbage_collection_cdma: "
++ "Erasing Block tables present in block %d\n",
++ pba);
++ addr = FTL_Get_Physical_Block_Addr((u64)lba *
++ DeviceInfo.wBlockDataSize);
++ if (PASS == GLOB_FTL_Block_Erase(addr)) {
++ pbt[lba] &= (u32)(~DISCARD_BLOCK);
++ pbt[lba] |= (u32)(SPARE_BLOCK);
++
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)
++ g_pBTDelta_Free;
++ g_pBTDelta_Free +=
++ sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt - 1;
++ p_BTableChangesDelta->BT_Index = lba;
++ p_BTableChangesDelta->BT_Entry_Value =
++ pbt[lba];
++
++ p_BTableChangesDelta->ValidFields = 0x0C;
++
++ ret = PASS;
++ pBTBlocksNode[last_erased - FIRST_BT_ID] =
++ BTBLOCK_INVAL;
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "resetting bt entry at index %d "
++ "value %d\n", i,
++ pBTBlocksNode[i - FIRST_BT_ID]);
++ if (last_erased == LAST_BT_ID)
++ last_erased = FIRST_BT_ID;
++ else
++ last_erased++;
++ } else {
++ MARK_BLOCK_AS_BAD(pbt[lba]);
++ }
++ }
++ }
++
++ BT_GC_Called = 0;
++
++ return ret;
++}
++
++#else
++static int do_bt_garbage_collection(void)
++{
++ u32 pba, lba;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
++ u64 addr;
++ int i, ret = FAIL;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (BT_GC_Called)
++ return PASS;
++
++ BT_GC_Called = 1;
++
++ for (i = last_erased; (i <= LAST_BT_ID) &&
++ (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
++ FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL); i++) {
++ pba = pBTBlocksNode[i - FIRST_BT_ID];
++ lba = FTL_Get_Block_Index(pba);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "do_bt_garbage_collection_cdma: pba %d, lba %d\n",
++ pba, lba);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Block Table Entry: %d", pbt[lba]);
++
++ if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
++ (pbt[lba] & DISCARD_BLOCK)) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "do_bt_garbage_collection: "
++ "Erasing Block tables present in block %d\n",
++ pba);
++ addr = FTL_Get_Physical_Block_Addr((u64)lba *
++ DeviceInfo.wBlockDataSize);
++ if (PASS == GLOB_FTL_Block_Erase(addr)) {
++ pbt[lba] &= (u32)(~DISCARD_BLOCK);
++ pbt[lba] |= (u32)(SPARE_BLOCK);
++ ret = PASS;
++ pBTBlocksNode[last_erased - FIRST_BT_ID] =
++ BTBLOCK_INVAL;
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "resetting bt entry at index %d "
++ "value %d\n", i,
++ pBTBlocksNode[i - FIRST_BT_ID]);
++ if (last_erased == LAST_BT_ID)
++ last_erased = FIRST_BT_ID;
++ else
++ last_erased++;
++ } else {
++ MARK_BLOCK_AS_BAD(pbt[lba]);
++ }
++ }
++ }
++
++ BT_GC_Called = 0;
++
++ return ret;
++}
++
++#endif
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_BT_Garbage_Collection
++* Inputs: none
++* Outputs: PASS / FAIL (returns the number of un-erased blocks
++* Description: Erases discarded blocks containing Block table
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_BT_Garbage_Collection(void)
++{
++ return do_bt_garbage_collection();
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Replace_OneBlock
++* Inputs: Block number 1
++* Block number 2
++* Outputs: Replaced Block Number
++* Description: Interchange block table entries at wBlockNum and wReplaceNum
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static u32 FTL_Replace_OneBlock(u32 blk, u32 rep_blk)
++{
++ u32 tmp_blk;
++ u32 replace_node = BAD_BLOCK;
++ u32 *pbt = (u32 *)g_pBlockTable;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (rep_blk != BAD_BLOCK) {
++ if (IS_BAD_BLOCK(blk))
++ tmp_blk = pbt[blk];
++ else
++ tmp_blk = DISCARD_BLOCK | (~SPARE_BLOCK & pbt[blk]);
++
++ replace_node = (u32) ((~SPARE_BLOCK) & pbt[rep_blk]);
++ pbt[blk] = replace_node;
++ pbt[rep_blk] = tmp_blk;
++
++#if CMD_DMA
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index = blk;
++ p_BTableChangesDelta->BT_Entry_Value = pbt[blk];
++
++ p_BTableChangesDelta->ValidFields = 0x0C;
++
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index = rep_blk;
++ p_BTableChangesDelta->BT_Entry_Value = pbt[rep_blk];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++#endif
++ }
++
++ return replace_node;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Write_Block_Table_Data
++* Inputs: Block table size in pages
++* Outputs: PASS=0 / FAIL=1
++* Description: Write block table data in flash
++* If first page and last page
++* Write data+BT flag
++* else
++* Write data
++* BT flag is a counter. Its value is incremented for block table
++* write in a new Block
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Write_Block_Table_Data(void)
++{
++ u64 dwBlockTableAddr, pTempAddr;
++ u32 Block;
++ u16 Page, PageCount;
++ u8 *tempBuf = tmp_buf_write_blk_table_data;
++ int wBytesCopied;
++ u16 bt_pages;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ dwBlockTableAddr =
++ (u64)((u64)g_wBlockTableIndex * DeviceInfo.wBlockDataSize +
++ (u64)g_wBlockTableOffset * DeviceInfo.wPageDataSize);
++ pTempAddr = dwBlockTableAddr;
++
++ bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
++
++ nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: "
++ "page= %d BlockTableIndex= %d "
++ "BlockTableOffset=%d\n", bt_pages,
++ g_wBlockTableIndex, g_wBlockTableOffset);
++
++ Block = BLK_FROM_ADDR(pTempAddr);
++ Page = PAGE_FROM_ADDR(pTempAddr, Block);
++ PageCount = 1;
++
++ if (bt_block_changed) {
++ if (bt_flag == LAST_BT_ID) {
++ bt_flag = FIRST_BT_ID;
++ g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
++ } else if (bt_flag < LAST_BT_ID) {
++ bt_flag++;
++ g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
++ }
++
++ if ((bt_flag > (LAST_BT_ID-4)) &&
++ g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] !=
++ BTBLOCK_INVAL) {
++ bt_block_changed = 0;
++ GLOB_FTL_BT_Garbage_Collection();
++ }
++
++ bt_block_changed = 0;
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Block Table Counter is %u Block %u\n",
++ bt_flag, (unsigned int)Block);
++ }
++
++ memset(tempBuf, 0, 3);
++ tempBuf[3] = bt_flag;
++ wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf + 4,
++ DeviceInfo.wPageDataSize - 4, 0);
++ memset(&tempBuf[wBytesCopied + 4], 0xff,
++ DeviceInfo.wPageSize - (wBytesCopied + 4));
++ FTL_Insert_Block_Table_Signature(&tempBuf[DeviceInfo.wPageDataSize],
++ bt_flag);
++
++#if CMD_DMA
++ memcpy(g_pNextBlockTable, tempBuf,
++ DeviceInfo.wPageSize * sizeof(u8));
++ nand_dbg_print(NAND_DBG_DEBUG, "Writing First Page of Block Table "
++ "Block %u Page %u\n", (unsigned int)Block, Page);
++ if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(g_pNextBlockTable,
++ Block, Page, 1,
++ LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
++ nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
++ "%s, Line %d, Function: %s, "
++ "new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__, Block);
++ goto func_return;
++ }
++
++ ftl_cmd_cnt++;
++ g_pNextBlockTable += ((DeviceInfo.wPageSize * sizeof(u8)));
++#else
++ if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf, Block, Page, 1)) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, Function: %s, "
++ "new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__, Block);
++ goto func_return;
++ }
++#endif
++
++ if (bt_pages > 1) {
++ PageCount = bt_pages - 1;
++ if (PageCount > 1) {
++ wBytesCopied += FTL_Copy_Block_Table_To_Flash(tempBuf,
++ DeviceInfo.wPageDataSize * (PageCount - 1),
++ wBytesCopied);
++
++#if CMD_DMA
++ memcpy(g_pNextBlockTable, tempBuf,
++ (PageCount - 1) * DeviceInfo.wPageDataSize);
++ if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
++ g_pNextBlockTable, Block, Page + 1,
++ PageCount - 1)) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, "
++ "new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__,
++ (int)Block);
++ goto func_return;
++ }
++
++ ftl_cmd_cnt++;
++ g_pNextBlockTable += (PageCount - 1) *
++ DeviceInfo.wPageDataSize * sizeof(u8);
++#else
++ if (FAIL == GLOB_LLD_Write_Page_Main(tempBuf,
++ Block, Page + 1, PageCount - 1)) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, "
++ "new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__,
++ (int)Block);
++ goto func_return;
++ }
++#endif
++ }
++
++ wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf,
++ DeviceInfo.wPageDataSize, wBytesCopied);
++ memset(&tempBuf[wBytesCopied], 0xff,
++ DeviceInfo.wPageSize-wBytesCopied);
++ FTL_Insert_Block_Table_Signature(
++ &tempBuf[DeviceInfo.wPageDataSize], bt_flag);
++#if CMD_DMA
++ memcpy(g_pNextBlockTable, tempBuf,
++ DeviceInfo.wPageSize * sizeof(u8));
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Writing the last Page of Block Table "
++ "Block %u Page %u\n",
++ (unsigned int)Block, Page + bt_pages - 1);
++ if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(
++ g_pNextBlockTable, Block, Page + bt_pages - 1, 1,
++ LLD_CMD_FLAG_MODE_CDMA |
++ LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__, Block);
++ goto func_return;
++ }
++ ftl_cmd_cnt++;
++#else
++ if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf,
++ Block, Page+bt_pages - 1, 1)) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, "
++ "new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__, Block);
++ goto func_return;
++ }
++#endif
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: done\n");
++
++func_return:
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Replace_Block_Table
++* Inputs: None
++* Outputs: PASS=0 / FAIL=1
++* Description: Get a new block to write block table
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static u32 FTL_Replace_Block_Table(void)
++{
++ u32 blk;
++ int gc;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
++
++ if ((BAD_BLOCK == blk) && (PASS == gc)) {
++ GLOB_FTL_Garbage_Collection();
++ blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
++ }
++ if (BAD_BLOCK == blk)
++ printk(KERN_ERR "%s, %s: There is no spare block. "
++ "It should never happen\n",
++ __FILE__, __func__);
++
++ nand_dbg_print(NAND_DBG_DEBUG, "New Block table Block is %d\n", blk);
++
++ return blk;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Replace_LWBlock
++* Inputs: Block number
++* Pointer to Garbage Collect flag
++* Outputs:
++* Description: Determine the least weared block by traversing
++* block table
++* Set Garbage collection to be called if number of spare
++* block is less than Free Block Gate count
++* Change Block table entry to map least worn block for current
++* operation
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static u32 FTL_Replace_LWBlock(u32 wBlockNum, int *pGarbageCollect)
++{
++ u32 i;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u8 wLeastWornCounter = 0xFF;
++ u32 wLeastWornIndex = BAD_BLOCK;
++ u32 wSpareBlockNum = 0;
++ u32 wDiscardBlockNum = 0;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (IS_SPARE_BLOCK(wBlockNum)) {
++ *pGarbageCollect = FAIL;
++ pbt[wBlockNum] = (u32)(pbt[wBlockNum] & (~SPARE_BLOCK));
++#if CMD_DMA
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index = (u32)(wBlockNum);
++ p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++#endif
++ return pbt[wBlockNum];
++ }
++
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
++ if (IS_DISCARDED_BLOCK(i))
++ wDiscardBlockNum++;
++
++ if (IS_SPARE_BLOCK(i)) {
++ u32 wPhysicalIndex = (u32)((~BAD_BLOCK) & pbt[i]);
++ if (wPhysicalIndex > DeviceInfo.wSpectraEndBlock)
++ printk(KERN_ERR "FTL_Replace_LWBlock: "
++ "This should never occur!\n");
++ if (g_pWearCounter[wPhysicalIndex -
++ DeviceInfo.wSpectraStartBlock] <
++ wLeastWornCounter) {
++ wLeastWornCounter =
++ g_pWearCounter[wPhysicalIndex -
++ DeviceInfo.wSpectraStartBlock];
++ wLeastWornIndex = i;
++ }
++ wSpareBlockNum++;
++ }
++ }
++
++ nand_dbg_print(NAND_DBG_WARN,
++ "FTL_Replace_LWBlock: Least Worn Counter %d\n",
++ (int)wLeastWornCounter);
++
++ if ((wDiscardBlockNum >= NUM_FREE_BLOCKS_GATE) ||
++ (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE))
++ *pGarbageCollect = PASS;
++ else
++ *pGarbageCollect = FAIL;
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "FTL_Replace_LWBlock: Discarded Blocks %u Spare"
++ " Blocks %u\n",
++ (unsigned int)wDiscardBlockNum,
++ (unsigned int)wSpareBlockNum);
++
++ return FTL_Replace_OneBlock(wBlockNum, wLeastWornIndex);
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Replace_MWBlock
++* Inputs: None
++* Outputs: most worn spare block no./BAD_BLOCK
++* Description: It finds most worn spare block.
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static u32 FTL_Replace_MWBlock(void)
++{
++ u32 i;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u8 wMostWornCounter = 0;
++ u32 wMostWornIndex = BAD_BLOCK;
++ u32 wSpareBlockNum = 0;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
++ if (IS_SPARE_BLOCK(i)) {
++ u32 wPhysicalIndex = (u32)((~SPARE_BLOCK) & pbt[i]);
++ if (g_pWearCounter[wPhysicalIndex -
++ DeviceInfo.wSpectraStartBlock] >
++ wMostWornCounter) {
++ wMostWornCounter =
++ g_pWearCounter[wPhysicalIndex -
++ DeviceInfo.wSpectraStartBlock];
++ wMostWornIndex = wPhysicalIndex;
++ }
++ wSpareBlockNum++;
++ }
++ }
++
++ if (wSpareBlockNum <= 2)
++ return BAD_BLOCK;
++
++ return wMostWornIndex;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Replace_Block
++* Inputs: Block Address
++* Outputs: PASS=0 / FAIL=1
++* Description: If block specified by blk_addr parameter is not free,
++* replace it with the least worn block.
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Replace_Block(u64 blk_addr)
++{
++ u32 current_blk = BLK_FROM_ADDR(blk_addr);
++ u32 *pbt = (u32 *)g_pBlockTable;
++ int wResult = PASS;
++ int GarbageCollect = FAIL;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (IS_SPARE_BLOCK(current_blk)) {
++ pbt[current_blk] = (~SPARE_BLOCK) & pbt[current_blk];
++#if CMD_DMA
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index = current_blk;
++ p_BTableChangesDelta->BT_Entry_Value = pbt[current_blk];
++ p_BTableChangesDelta->ValidFields = 0x0C ;
++#endif
++ return wResult;
++ }
++
++ FTL_Replace_LWBlock(current_blk, &GarbageCollect);
++
++ if (PASS == GarbageCollect)
++ wResult = GLOB_FTL_Garbage_Collection();
++
++ return wResult;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Is_BadBlock
++* Inputs: block number to test
++* Outputs: PASS (block is BAD) / FAIL (block is not bad)
++* Description: test if this block number is flagged as bad
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Is_BadBlock(u32 wBlockNum)
++{
++ u32 *pbt = (u32 *)g_pBlockTable;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (wBlockNum >= DeviceInfo.wSpectraStartBlock
++ && BAD_BLOCK == (pbt[wBlockNum] & BAD_BLOCK))
++ return PASS;
++ else
++ return FAIL;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Flush_Cache
++* Inputs: none
++* Outputs: PASS=0 / FAIL=1
++* Description: flush all the cache blocks to flash
++* if a cache block is not dirty, don't do anything with it
++* else, write the block and update the block table
++* Note: This function should be called at shutdown/power down.
++* to write important data into device
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Flush_Cache(void)
++{
++ int i, ret;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0; i < CACHE_ITEM_NUM; i++) {
++ if (SET == Cache.array[i].changed) {
++#if CMD_DMA
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++ int_cache[ftl_cmd_cnt].item = i;
++ int_cache[ftl_cmd_cnt].cache.address =
++ Cache.array[i].address;
++ int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
++#endif
++#endif
++ ret = write_back_to_l2_cache(Cache.array[i].buf, Cache.array[i].address);
++ if (PASS == ret) {
++ Cache.array[i].changed = CLEAR;
++ } else {
++ printk(KERN_ALERT "Failed when write back to L2 cache!\n");
++ /* TODO - How to handle this? */
++ }
++ }
++ }
++
++ flush_l2_cache();
++
++ return FTL_Write_Block_Table(FAIL);
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Page_Read
++* Inputs: pointer to data
++* logical address of data (u64 is LBA * Bytes/Page)
++* Outputs: PASS=0 / FAIL=1
++* Description: reads a page of data into RAM from the cache
++* if the data is not already in cache, read from flash to cache
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Page_Read(u8 *data, u64 logical_addr)
++{
++ u16 cache_item;
++ int res = PASS;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "GLOB_FTL_Page_Read - "
++ "page_addr: %llu\n", logical_addr);
++
++ cache_item = FTL_Cache_If_Hit(logical_addr);
++
++ if (UNHIT_CACHE_ITEM == cache_item) {
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "GLOB_FTL_Page_Read: Cache not hit\n");
++ res = FTL_Cache_Write();
++ if (ERR == FTL_Cache_Read(logical_addr))
++ res = ERR;
++ cache_item = Cache.LRU;
++ }
++
++ FTL_Cache_Read_Page(data, logical_addr, cache_item);
++
++ return res;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Page_Write
++* Inputs: pointer to data
++* address of data (ADDRESSTYPE is LBA * Bytes/Page)
++* Outputs: PASS=0 / FAIL=1
++* Description: writes a page of data from RAM to the cache
++* if the data is not already in cache, write back the
++* least recently used block and read the addressed block
++* from flash to cache
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Page_Write(u8 *pData, u64 dwPageAddr)
++{
++ u16 cache_blk;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ int wResult = PASS;
++
++ nand_dbg_print(NAND_DBG_TRACE, "GLOB_FTL_Page_Write - "
++ "dwPageAddr: %llu\n", dwPageAddr);
++
++ cache_blk = FTL_Cache_If_Hit(dwPageAddr);
++
++ if (UNHIT_CACHE_ITEM == cache_blk) {
++ wResult = FTL_Cache_Write();
++ if (IS_BAD_BLOCK(BLK_FROM_ADDR(dwPageAddr))) {
++ wResult = FTL_Replace_Block(dwPageAddr);
++ pbt[BLK_FROM_ADDR(dwPageAddr)] |= SPARE_BLOCK;
++ if (wResult == FAIL)
++ return FAIL;
++ }
++ if (ERR == FTL_Cache_Read(dwPageAddr))
++ wResult = ERR;
++ cache_blk = Cache.LRU;
++ FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
++ } else {
++#if CMD_DMA
++ FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk,
++ LLD_CMD_FLAG_ORDER_BEFORE_REST);
++#else
++ FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
++#endif
++ }
++
++ return wResult;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: GLOB_FTL_Block_Erase
++* Inputs: address of block to erase (now in byte format, should change to
++* block format)
++* Outputs: PASS=0 / FAIL=1
++* Description: erases the specified block
++* increments the erase count
++* If erase count reaches its upper limit,call function to
++* do the ajustment as per the relative erase count values
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int GLOB_FTL_Block_Erase(u64 blk_addr)
++{
++ int status;
++ u32 BlkIdx;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ BlkIdx = (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize);
++
++ if (BlkIdx < DeviceInfo.wSpectraStartBlock) {
++ printk(KERN_ERR "GLOB_FTL_Block_Erase: "
++ "This should never occur\n");
++ return FAIL;
++ }
++
++#if CMD_DMA
++ status = GLOB_LLD_Erase_Block_cdma(BlkIdx, LLD_CMD_FLAG_MODE_CDMA);
++ if (status == FAIL)
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__, BlkIdx);
++#else
++ status = GLOB_LLD_Erase_Block(BlkIdx);
++ if (status == FAIL) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__, BlkIdx);
++ return status;
++ }
++#endif
++
++ if (DeviceInfo.MLCDevice) {
++ g_pReadCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] = 0;
++ if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++ }
++
++ g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock]++;
++
++#if CMD_DMA
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
++ p_BTableChangesDelta->WC_Index =
++ BlkIdx - DeviceInfo.wSpectraStartBlock;
++ p_BTableChangesDelta->WC_Entry_Value =
++ g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock];
++ p_BTableChangesDelta->ValidFields = 0x30;
++
++ if (DeviceInfo.MLCDevice) {
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->RC_Index =
++ BlkIdx - DeviceInfo.wSpectraStartBlock;
++ p_BTableChangesDelta->RC_Entry_Value =
++ g_pReadCounter[BlkIdx -
++ DeviceInfo.wSpectraStartBlock];
++ p_BTableChangesDelta->ValidFields = 0xC0;
++ }
++
++ ftl_cmd_cnt++;
++#endif
++
++ if (g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] == 0xFE)
++ FTL_Adjust_Relative_Erase_Count(BlkIdx);
++
++ return status;
++}
++
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Adjust_Relative_Erase_Count
++* Inputs: index to block that was just incremented and is at the max
++* Outputs: PASS=0 / FAIL=1
++* Description: If any erase counts at MAX, adjusts erase count of every
++* block by substracting least worn
++* counter from counter value of every entry in wear table
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX)
++{
++ u8 wLeastWornCounter = MAX_BYTE_VALUE;
++ u8 wWearCounter;
++ u32 i, wWearIndex;
++ u32 *pbt = (u32 *)g_pBlockTable;
++ int wResult = PASS;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
++ if (IS_BAD_BLOCK(i))
++ continue;
++ wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
++
++ if ((wWearIndex - DeviceInfo.wSpectraStartBlock) < 0)
++ printk(KERN_ERR "FTL_Adjust_Relative_Erase_Count:"
++ "This should never occur\n");
++ wWearCounter = g_pWearCounter[wWearIndex -
++ DeviceInfo.wSpectraStartBlock];
++ if (wWearCounter < wLeastWornCounter)
++ wLeastWornCounter = wWearCounter;
++ }
++
++ if (wLeastWornCounter == 0) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "Adjusting Wear Levelling Counters: Special Case\n");
++ g_pWearCounter[Index_of_MAX -
++ DeviceInfo.wSpectraStartBlock]--;
++#if CMD_DMA
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
++ p_BTableChangesDelta->WC_Index =
++ Index_of_MAX - DeviceInfo.wSpectraStartBlock;
++ p_BTableChangesDelta->WC_Entry_Value =
++ g_pWearCounter[Index_of_MAX -
++ DeviceInfo.wSpectraStartBlock];
++ p_BTableChangesDelta->ValidFields = 0x30;
++#endif
++ FTL_Static_Wear_Leveling();
++ } else {
++ for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
++ if (!IS_BAD_BLOCK(i)) {
++ wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
++ g_pWearCounter[wWearIndex -
++ DeviceInfo.wSpectraStartBlock] =
++ (u8)(g_pWearCounter
++ [wWearIndex -
++ DeviceInfo.wSpectraStartBlock] -
++ wLeastWornCounter);
++#if CMD_DMA
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free +=
++ sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->WC_Index = wWearIndex -
++ DeviceInfo.wSpectraStartBlock;
++ p_BTableChangesDelta->WC_Entry_Value =
++ g_pWearCounter[wWearIndex -
++ DeviceInfo.wSpectraStartBlock];
++ p_BTableChangesDelta->ValidFields = 0x30;
++#endif
++ }
++ }
++
++ return wResult;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Write_IN_Progress_Block_Table_Page
++* Inputs: None
++* Outputs: None
++* Description: It writes in-progress flag page to the page next to
++* block table
++***********************************************************************/
++static int FTL_Write_IN_Progress_Block_Table_Page(void)
++{
++ int wResult = PASS;
++ u16 bt_pages;
++ u16 dwIPFPageAddr;
++#if CMD_DMA
++#else
++ u32 *pbt = (u32 *)g_pBlockTable;
++ u32 wTempBlockTableIndex;
++#endif
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
++
++ dwIPFPageAddr = g_wBlockTableOffset + bt_pages;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Writing IPF at "
++ "Block %d Page %d\n",
++ g_wBlockTableIndex, dwIPFPageAddr);
++
++#if CMD_DMA
++ wResult = GLOB_LLD_Write_Page_Main_Spare_cdma(g_pIPF,
++ g_wBlockTableIndex, dwIPFPageAddr, 1,
++ LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST);
++ if (wResult == FAIL) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__,
++ g_wBlockTableIndex);
++ }
++ g_wBlockTableOffset = dwIPFPageAddr + 1;
++ p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++ p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
++ p_BTableChangesDelta->g_wBlockTableOffset = g_wBlockTableOffset;
++ p_BTableChangesDelta->ValidFields = 0x01;
++ ftl_cmd_cnt++;
++#else
++ wResult = GLOB_LLD_Write_Page_Main_Spare(g_pIPF,
++ g_wBlockTableIndex, dwIPFPageAddr, 1);
++ if (wResult == FAIL) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in %s, Line %d, "
++ "Function: %s, new Bad Block %d generated!\n",
++ __FILE__, __LINE__, __func__,
++ (int)g_wBlockTableIndex);
++ MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
++ wTempBlockTableIndex = FTL_Replace_Block_Table();
++ bt_block_changed = 1;
++ if (BAD_BLOCK == wTempBlockTableIndex)
++ return ERR;
++ g_wBlockTableIndex = wTempBlockTableIndex;
++ g_wBlockTableOffset = 0;
++ /* Block table tag is '00'. Means it's used one */
++ pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
++ return FAIL;
++ }
++ g_wBlockTableOffset = dwIPFPageAddr + 1;
++#endif
++ return wResult;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: FTL_Read_Disturbance
++* Inputs: block address
++* Outputs: PASS=0 / FAIL=1
++* Description: used to handle read disturbance. Data in block that
++* reaches its read limit is moved to new block
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int FTL_Read_Disturbance(u32 blk_addr)
++{
++ int wResult = FAIL;
++ u32 *pbt = (u32 *) g_pBlockTable;
++ u32 dwOldBlockAddr = blk_addr;
++ u32 wBlockNum;
++ u32 i;
++ u32 wLeastReadCounter = 0xFFFF;
++ u32 wLeastReadIndex = BAD_BLOCK;
++ u32 wSpareBlockNum = 0;
++ u32 wTempNode;
++ u32 wReplacedNode;
++ u8 *g_pTempBuf;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++#if CMD_DMA
++ g_pTempBuf = cp_back_buf_copies[cp_back_buf_idx];
++ cp_back_buf_idx++;
++ if (cp_back_buf_idx > COPY_BACK_BUF_NUM) {
++ printk(KERN_ERR "cp_back_buf_copies overflow! Exit."
++ "Maybe too many pending commands in your CDMA chain.\n");
++ return FAIL;
++ }
++#else
++ g_pTempBuf = tmp_buf_read_disturbance;
++#endif
++
++ wBlockNum = FTL_Get_Block_Index(blk_addr);
++
++ do {
++ /* This is a bug.Here 'i' should be logical block number
++ * and start from 1 (0 is reserved for block table).
++ * Have fixed it. - Yunpeng 2008. 12. 19
++ */
++ for (i = 1; i < DeviceInfo.wDataBlockNum; i++) {
++ if (IS_SPARE_BLOCK(i)) {
++ u32 wPhysicalIndex =
++ (u32)((~SPARE_BLOCK) & pbt[i]);
++ if (g_pReadCounter[wPhysicalIndex -
++ DeviceInfo.wSpectraStartBlock] <
++ wLeastReadCounter) {
++ wLeastReadCounter =
++ g_pReadCounter[wPhysicalIndex -
++ DeviceInfo.wSpectraStartBlock];
++ wLeastReadIndex = i;
++ }
++ wSpareBlockNum++;
++ }
++ }
++
++ if (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE) {
++ wResult = GLOB_FTL_Garbage_Collection();
++ if (PASS == wResult)
++ continue;
++ else
++ break;
++ } else {
++ wTempNode = (u32)(DISCARD_BLOCK | pbt[wBlockNum]);
++ wReplacedNode = (u32)((~SPARE_BLOCK) &
++ pbt[wLeastReadIndex]);
++#if CMD_DMA
++ pbt[wBlockNum] = wReplacedNode;
++ pbt[wLeastReadIndex] = wTempNode;
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index = wBlockNum;
++ p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++
++ p_BTableChangesDelta =
++ (struct BTableChangesDelta *)g_pBTDelta_Free;
++ g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
++
++ p_BTableChangesDelta->ftl_cmd_cnt =
++ ftl_cmd_cnt;
++ p_BTableChangesDelta->BT_Index = wLeastReadIndex;
++ p_BTableChangesDelta->BT_Entry_Value =
++ pbt[wLeastReadIndex];
++ p_BTableChangesDelta->ValidFields = 0x0C;
++
++ wResult = GLOB_LLD_Read_Page_Main_cdma(g_pTempBuf,
++ dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock,
++ LLD_CMD_FLAG_MODE_CDMA);
++ if (wResult == FAIL)
++ return wResult;
++
++ ftl_cmd_cnt++;
++
++ if (wResult != FAIL) {
++ if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
++ g_pTempBuf, pbt[wBlockNum], 0,
++ DeviceInfo.wPagesPerBlock)) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in "
++ "%s, Line %d, Function: %s, "
++ "new Bad Block %d "
++ "generated!\n",
++ __FILE__, __LINE__, __func__,
++ (int)pbt[wBlockNum]);
++ wResult = FAIL;
++ MARK_BLOCK_AS_BAD(pbt[wBlockNum]);
++ }
++ ftl_cmd_cnt++;
++ }
++#else
++ wResult = GLOB_LLD_Read_Page_Main(g_pTempBuf,
++ dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock);
++ if (wResult == FAIL)
++ return wResult;
++
++ if (wResult != FAIL) {
++ /* This is a bug. At this time, pbt[wBlockNum]
++ is still the physical address of
++ discard block, and should not be write.
++ Have fixed it as below.
++ -- Yunpeng 2008.12.19
++ */
++ wResult = GLOB_LLD_Write_Page_Main(g_pTempBuf,
++ wReplacedNode, 0,
++ DeviceInfo.wPagesPerBlock);
++ if (wResult == FAIL) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Program fail in "
++ "%s, Line %d, Function: %s, "
++ "new Bad Block %d "
++ "generated!\n",
++ __FILE__, __LINE__, __func__,
++ (int)wReplacedNode);
++ MARK_BLOCK_AS_BAD(wReplacedNode);
++ } else {
++ pbt[wBlockNum] = wReplacedNode;
++ pbt[wLeastReadIndex] = wTempNode;
++ }
++ }
++
++ if ((wResult == PASS) && (g_cBlockTableStatus !=
++ IN_PROGRESS_BLOCK_TABLE)) {
++ g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
++ FTL_Write_IN_Progress_Block_Table_Page();
++ }
++#endif
++ }
++ } while (wResult != PASS)
++ ;
++
++#if CMD_DMA
++ /* ... */
++#endif
++
++ return wResult;
++}
++
+diff --git a/drivers/block/spectra/flash.h b/drivers/block/spectra/flash.h
+new file mode 100644
+index 0000000..5ed0580
+--- /dev/null
++++ b/drivers/block/spectra/flash.h
+@@ -0,0 +1,198 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef _FLASH_INTERFACE_
++#define _FLASH_INTERFACE_
++
++#include "ffsport.h"
++#include "spectraswconfig.h"
++
++#define MAX_BYTE_VALUE 0xFF
++#define MAX_WORD_VALUE 0xFFFF
++#define MAX_U32_VALUE 0xFFFFFFFF
++
++#define MAX_BLOCKNODE_VALUE 0xFFFFFF
++#define DISCARD_BLOCK 0x800000
++#define SPARE_BLOCK 0x400000
++#define BAD_BLOCK 0xC00000
++
++#define UNHIT_CACHE_ITEM 0xFFFF
++
++#define NAND_CACHE_INIT_ADDR 0xffffffffffffffffULL
++
++#define IN_PROGRESS_BLOCK_TABLE 0x00
++#define CURRENT_BLOCK_TABLE 0x01
++
++#define BTSIG_OFFSET (0)
++#define BTSIG_BYTES (5)
++#define BTSIG_DELTA (3)
++
++#define MAX_READ_COUNTER 0x2710
++
++#define FIRST_BT_ID (1)
++#define LAST_BT_ID (254)
++#define BTBLOCK_INVAL (u32)(0xFFFFFFFF)
++
++struct device_info_tag {
++ u16 wDeviceMaker;
++ u16 wDeviceID;
++ u32 wDeviceType;
++ u32 wSpectraStartBlock;
++ u32 wSpectraEndBlock;
++ u32 wTotalBlocks;
++ u16 wPagesPerBlock;
++ u16 wPageSize;
++ u16 wPageDataSize;
++ u16 wPageSpareSize;
++ u16 wNumPageSpareFlag;
++ u16 wECCBytesPerSector;
++ u32 wBlockSize;
++ u32 wBlockDataSize;
++ u32 wDataBlockNum;
++ u8 bPlaneNum;
++ u16 wDeviceMainAreaSize;
++ u16 wDeviceSpareAreaSize;
++ u16 wDevicesConnected;
++ u16 wDeviceWidth;
++ u16 wHWRevision;
++ u16 wHWFeatures;
++
++ u16 wONFIDevFeatures;
++ u16 wONFIOptCommands;
++ u16 wONFITimingMode;
++ u16 wONFIPgmCacheTimingMode;
++
++ u16 MLCDevice;
++ u16 wSpareSkipBytes;
++
++ u8 nBitsInPageNumber;
++ u8 nBitsInPageDataSize;
++ u8 nBitsInBlockDataSize;
++};
++
++extern struct device_info_tag DeviceInfo;
++
++/* Cache item format */
++struct flash_cache_item_tag {
++ u64 address;
++ u16 use_cnt;
++ u16 changed;
++ u8 *buf;
++};
++
++struct flash_cache_tag {
++ u32 cache_item_size; /* Size in bytes of each cache item */
++ u16 pages_per_item; /* How many NAND pages in each cache item */
++ u16 LRU; /* No. of the least recently used cache item */
++ struct flash_cache_item_tag array[CACHE_ITEM_NUM];
++};
++
++/*
++ *Data structure for each list node of the managment table
++ * used for the Level 2 Cache. Each node maps one logical NAND block.
++ */
++struct spectra_l2_cache_list {
++ struct list_head list;
++ u32 logical_blk_num; /* Logical block number */
++ u32 pages_array[]; /* Page map array of this logical block.
++ * Array index is the logical block number,
++ * and for every item of this arry:
++ * high 16 bit is index of the L2 cache block num,
++ * low 16 bit is the phy page num
++ * of the above L2 cache block.
++ * This array will be kmalloc during run time.
++ */
++};
++
++struct spectra_l2_cache_info {
++ u32 blk_array[BLK_NUM_FOR_L2_CACHE];
++ u16 cur_blk_idx; /* idx to the phy block number of current using */
++ u16 cur_page_num; /* pages number of current using */
++ struct spectra_l2_cache_list table; /* First node of the table */
++};
++
++#define RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE 1
++
++#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
++struct flash_cache_mod_item_tag {
++ u64 address;
++ u8 changed;
++};
++
++struct flash_cache_delta_list_tag {
++ u8 item; /* used cache item */
++ struct flash_cache_mod_item_tag cache;
++};
++#endif
++
++extern struct flash_cache_tag Cache;
++
++extern u8 *buf_read_page_main_spare;
++extern u8 *buf_write_page_main_spare;
++extern u8 *buf_read_page_spare;
++extern u8 *buf_get_bad_block;
++extern u8 *cdma_desc_buf;
++extern u8 *memcp_desc_buf;
++
++/* struture used for IndentfyDevice function */
++struct spectra_indentfy_dev_tag {
++ u32 NumBlocks;
++ u16 PagesPerBlock;
++ u16 PageDataSize;
++ u16 wECCBytesPerSector;
++ u32 wDataBlockNum;
++};
++
++int GLOB_FTL_Flash_Init(void);
++int GLOB_FTL_Flash_Release(void);
++/*void GLOB_FTL_Erase_Flash(void);*/
++int GLOB_FTL_Block_Erase(u64 block_addr);
++int GLOB_FTL_Is_BadBlock(u32 block_num);
++int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data);
++int GLOB_FTL_Event_Status(int *);
++u16 glob_ftl_execute_cmds(void);
++
++/*int FTL_Read_Disturbance(ADDRESSTYPE dwBlockAddr);*/
++int FTL_Read_Disturbance(u32 dwBlockAddr);
++
++/*Flash r/w based on cache*/
++int GLOB_FTL_Page_Read(u8 *read_data, u64 page_addr);
++int GLOB_FTL_Page_Write(u8 *write_data, u64 page_addr);
++int GLOB_FTL_Wear_Leveling(void);
++int GLOB_FTL_Flash_Format(void);
++int GLOB_FTL_Init(void);
++int GLOB_FTL_Flush_Cache(void);
++int GLOB_FTL_Garbage_Collection(void);
++int GLOB_FTL_BT_Garbage_Collection(void);
++void GLOB_FTL_Cache_Release(void);
++u8 *get_blk_table_start_addr(void);
++u8 *get_wear_leveling_table_start_addr(void);
++unsigned long get_blk_table_len(void);
++unsigned long get_wear_leveling_table_len(void);
++
++#if DEBUG_BNDRY
++void debug_boundary_lineno_error(int chnl, int limit, int no, int lineno,
++ char *filename);
++#define debug_boundary_error(chnl, limit, no) debug_boundary_lineno_error(chnl,\
++ limit, no, __LINE__, __FILE__)
++#else
++#define debug_boundary_error(chnl, limit, no) ;
++#endif
++
++#endif /*_FLASH_INTERFACE_*/
+diff --git a/drivers/block/spectra/lld.c b/drivers/block/spectra/lld.c
+new file mode 100644
+index 0000000..3f411af
+--- /dev/null
++++ b/drivers/block/spectra/lld.c
+@@ -0,0 +1,258 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include "spectraswconfig.h"
++#include "ffsport.h"
++#include "ffsdefs.h"
++#include "lld.h"
++#include "lld_nand.h"
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++#if FLASH_EMU /* vector all the LLD calls to the LLD_EMU code */
++#include "lld_emu.h"
++#include "lld_cdma.h"
++
++/* common functions: */
++u16 GLOB_LLD_Flash_Reset(void)
++{
++ return emu_Flash_Reset();
++}
++
++u16 GLOB_LLD_Read_Device_ID(void)
++{
++ return emu_Read_Device_ID();
++}
++
++int GLOB_LLD_Flash_Release(void)
++{
++ return emu_Flash_Release();
++}
++
++u16 GLOB_LLD_Flash_Init(void)
++{
++ return emu_Flash_Init();
++}
++
++u16 GLOB_LLD_Erase_Block(u32 block_add)
++{
++ return emu_Erase_Block(block_add);
++}
++
++u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
++ u16 PageCount)
++{
++ return emu_Write_Page_Main(write_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 Page,
++ u16 PageCount)
++{
++ return emu_Read_Page_Main(read_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
++ u32 block, u16 page, u16 page_count)
++{
++ return emu_Read_Page_Main(read_data, block, page, page_count);
++}
++
++u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
++ u16 Page, u16 PageCount)
++{
++ return emu_Write_Page_Main_Spare(write_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
++ u16 Page, u16 PageCount)
++{
++ return emu_Read_Page_Main_Spare(read_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
++ u16 PageCount)
++{
++ return emu_Write_Page_Spare(write_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
++ u16 PageCount)
++{
++ return emu_Read_Page_Spare(read_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Get_Bad_Block(u32 block)
++{
++ return emu_Get_Bad_Block(block);
++}
++
++#endif /* FLASH_EMU */
++
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++#if FLASH_NAND /* vector all the LLD calls to the NAND controller code */
++#include "lld_nand.h"
++#include "lld_cdma.h"
++#include "flash.h"
++
++/* common functions for LLD_NAND */
++void GLOB_LLD_ECC_Control(int enable)
++{
++ NAND_ECC_Ctrl(enable);
++}
++
++/* common functions for LLD_NAND */
++u16 GLOB_LLD_Flash_Reset(void)
++{
++ return NAND_Flash_Reset();
++}
++
++u16 GLOB_LLD_Read_Device_ID(void)
++{
++ return NAND_Read_Device_ID();
++}
++
++u16 GLOB_LLD_UnlockArrayAll(void)
++{
++ return NAND_UnlockArrayAll();
++}
++
++u16 GLOB_LLD_Flash_Init(void)
++{
++ return NAND_Flash_Init();
++}
++
++int GLOB_LLD_Flash_Release(void)
++{
++ return nand_release();
++}
++
++u16 GLOB_LLD_Erase_Block(u32 block_add)
++{
++ return NAND_Erase_Block(block_add);
++}
++
++
++u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
++ u16 PageCount)
++{
++ return NAND_Write_Page_Main(write_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 page,
++ u16 page_count)
++{
++ if (page_count == 1) /* Using polling to improve read speed */
++ return NAND_Read_Page_Main_Polling(read_data, block, page, 1);
++ else
++ return NAND_Read_Page_Main(read_data, block, page, page_count);
++}
++
++u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
++ u32 block, u16 page, u16 page_count)
++{
++ return NAND_Read_Page_Main_Polling(read_data,
++ block, page, page_count);
++}
++
++u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
++ u16 Page, u16 PageCount)
++{
++ return NAND_Write_Page_Main_Spare(write_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
++ u16 PageCount)
++{
++ return NAND_Write_Page_Spare(write_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
++ u16 page, u16 page_count)
++{
++ return NAND_Read_Page_Main_Spare(read_data, block, page, page_count);
++}
++
++u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
++ u16 PageCount)
++{
++ return NAND_Read_Page_Spare(read_data, block, Page, PageCount);
++}
++
++u16 GLOB_LLD_Get_Bad_Block(u32 block)
++{
++ return NAND_Get_Bad_Block(block);
++}
++
++u16 GLOB_LLD_Event_Status(void)
++{
++ return CDMA_Event_Status();
++}
++
++u16 glob_lld_execute_cmds(void)
++{
++ return CDMA_Execute_CMDs();
++}
++
++u16 GLOB_LLD_MemCopy_CMD(u8 *dest, u8 *src,
++ u32 ByteCount, u16 flag)
++{
++ /* Replace the hardware memcopy with software memcpy function */
++ if (CDMA_Execute_CMDs())
++ return FAIL;
++ memcpy(dest, src, ByteCount);
++ return PASS;
++
++ /* return CDMA_MemCopy_CMD(dest, src, ByteCount, flag); */
++}
++
++u16 GLOB_LLD_Erase_Block_cdma(u32 block, u16 flags)
++{
++ return CDMA_Data_CMD(ERASE_CMD, 0, block, 0, 0, flags);
++}
++
++u16 GLOB_LLD_Write_Page_Main_cdma(u8 *data, u32 block, u16 page, u16 count)
++{
++ return CDMA_Data_CMD(WRITE_MAIN_CMD, data, block, page, count, 0);
++}
++
++u16 GLOB_LLD_Read_Page_Main_cdma(u8 *data, u32 block, u16 page,
++ u16 count, u16 flags)
++{
++ return CDMA_Data_CMD(READ_MAIN_CMD, data, block, page, count, flags);
++}
++
++u16 GLOB_LLD_Write_Page_Main_Spare_cdma(u8 *data, u32 block, u16 page,
++ u16 count, u16 flags)
++{
++ return CDMA_Data_CMD(WRITE_MAIN_SPARE_CMD,
++ data, block, page, count, flags);
++}
++
++u16 GLOB_LLD_Read_Page_Main_Spare_cdma(u8 *data,
++ u32 block, u16 page, u16 count)
++{
++ return CDMA_Data_CMD(READ_MAIN_SPARE_CMD, data, block, page, count,
++ LLD_CMD_FLAG_MODE_CDMA);
++}
++
++#endif /* FLASH_NAND */
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++
++/* end of LLD.c */
+diff --git a/drivers/block/spectra/lld.h b/drivers/block/spectra/lld.h
+new file mode 100644
+index 0000000..d3738e0
+--- /dev/null
++++ b/drivers/block/spectra/lld.h
+@@ -0,0 +1,111 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++
++
++#ifndef _LLD_
++#define _LLD_
++
++#include "ffsport.h"
++#include "spectraswconfig.h"
++#include "flash.h"
++
++#define GOOD_BLOCK 0
++#define DEFECTIVE_BLOCK 1
++#define READ_ERROR 2
++
++#define CLK_X 5
++#define CLK_MULTI 4
++
++/* Typedefs */
++
++/* prototypes: API for LLD */
++/* Currently, Write_Page_Main
++ * MemCopy
++ * Read_Page_Main_Spare
++ * do not have flag because they were not implemented prior to this
++ * They are not being added to keep changes to a minimum for now.
++ * Currently, they are not required (only reqd for Wr_P_M_S.)
++ * Later on, these NEED to be changed.
++ */
++
++extern void GLOB_LLD_ECC_Control(int enable);
++
++extern u16 GLOB_LLD_Flash_Reset(void);
++
++extern u16 GLOB_LLD_Read_Device_ID(void);
++
++extern u16 GLOB_LLD_UnlockArrayAll(void);
++
++extern u16 GLOB_LLD_Flash_Init(void);
++
++extern int GLOB_LLD_Flash_Release(void);
++
++extern u16 GLOB_LLD_Erase_Block(u32 block_add);
++
++extern u16 GLOB_LLD_Write_Page_Main(u8 *write_data,
++ u32 block, u16 Page, u16 PageCount);
++
++extern u16 GLOB_LLD_Read_Page_Main(u8 *read_data,
++ u32 block, u16 page, u16 page_count);
++
++extern u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
++ u32 block, u16 page, u16 page_count);
++
++extern u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data,
++ u32 block, u16 Page, u16 PageCount);
++
++extern u16 GLOB_LLD_Write_Page_Spare(u8 *write_data,
++ u32 block, u16 Page, u16 PageCount);
++
++extern u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data,
++ u32 block, u16 page, u16 page_count);
++
++extern u16 GLOB_LLD_Read_Page_Spare(u8 *read_data,
++ u32 block, u16 Page, u16 PageCount);
++
++extern u16 GLOB_LLD_Get_Bad_Block(u32 block);
++
++extern u16 GLOB_LLD_Event_Status(void);
++
++extern u16 GLOB_LLD_MemCopy_CMD(u8 *dest, u8 *src, u32 ByteCount, u16 flag);
++
++extern u16 glob_lld_execute_cmds(void);
++
++extern u16 GLOB_LLD_Erase_Block_cdma(u32 block, u16 flags);
++
++extern u16 GLOB_LLD_Write_Page_Main_cdma(u8 *data,
++ u32 block, u16 page, u16 count);
++
++extern u16 GLOB_LLD_Read_Page_Main_cdma(u8 *data,
++ u32 block, u16 page, u16 count, u16 flags);
++
++extern u16 GLOB_LLD_Write_Page_Main_Spare_cdma(u8 *data,
++ u32 block, u16 page, u16 count, u16 flags);
++
++extern u16 GLOB_LLD_Read_Page_Main_Spare_cdma(u8 *data,
++ u32 block, u16 page, u16 count);
++
++#define LLD_CMD_FLAG_ORDER_BEFORE_REST (0x1)
++#define LLD_CMD_FLAG_MODE_CDMA (0x8)
++
++
++#endif /*_LLD_ */
++
++
+diff --git a/drivers/block/spectra/lld_cdma.c b/drivers/block/spectra/lld_cdma.c
+new file mode 100644
+index 0000000..c6e7610
+--- /dev/null
++++ b/drivers/block/spectra/lld_cdma.c
+@@ -0,0 +1,910 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++
++#include "spectraswconfig.h"
++#include "lld.h"
++#include "lld_nand.h"
++#include "lld_cdma.h"
++#include "lld_emu.h"
++#include "flash.h"
++#include "nand_regs.h"
++
++#define MAX_PENDING_CMDS 4
++#define MODE_02 (0x2 << 26)
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: CDMA_Data_Cmd
++* Inputs: cmd code (aligned for hw)
++* data: pointer to source or destination
++* block: block address
++* page: page address
++* num: num pages to transfer
++* Outputs: PASS
++* Description: This function takes the parameters and puts them
++* into the "pending commands" array.
++* It does not parse or validate the parameters.
++* The array index is same as the tag.
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 CDMA_Data_CMD(u8 cmd, u8 *data, u32 block, u16 page, u16 num, u16 flags)
++{
++ u8 bank;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (0 == cmd)
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "%s, Line %d, Illegal cmd (0)\n", __FILE__, __LINE__);
++
++ /* If a command of another bank comes, then first execute */
++ /* pending commands of the current bank, then set the new */
++ /* bank as current bank */
++ bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++ if (bank != info.flash_bank) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "Will access new bank. old bank: %d, new bank: %d\n",
++ info.flash_bank, bank);
++ if (CDMA_Execute_CMDs()) {
++ printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
++ return FAIL;
++ }
++ info.flash_bank = bank;
++ }
++
++ info.pcmds[info.pcmds_num].CMD = cmd;
++ info.pcmds[info.pcmds_num].DataAddr = data;
++ info.pcmds[info.pcmds_num].Block = block;
++ info.pcmds[info.pcmds_num].Page = page;
++ info.pcmds[info.pcmds_num].PageCount = num;
++ info.pcmds[info.pcmds_num].DataDestAddr = 0;
++ info.pcmds[info.pcmds_num].DataSrcAddr = 0;
++ info.pcmds[info.pcmds_num].MemCopyByteCnt = 0;
++ info.pcmds[info.pcmds_num].Flags = flags;
++ info.pcmds[info.pcmds_num].Status = 0xB0B;
++
++ switch (cmd) {
++ case WRITE_MAIN_SPARE_CMD:
++ Conv_Main_Spare_Data_Log2Phy_Format(data, num);
++ break;
++ case WRITE_SPARE_CMD:
++ Conv_Spare_Data_Log2Phy_Format(data);
++ break;
++ default:
++ break;
++ }
++
++ info.pcmds_num++;
++
++ if (info.pcmds_num >= MAX_PENDING_CMDS) {
++ if (CDMA_Execute_CMDs()) {
++ printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
++ return FAIL;
++ }
++ }
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: CDMA_MemCopy_CMD
++* Inputs: dest: pointer to destination
++* src: pointer to source
++* count: num bytes to transfer
++* Outputs: PASS
++* Description: This function takes the parameters and puts them
++* into the "pending commands" array.
++* It does not parse or validate the parameters.
++* The array index is same as the tag.
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 CDMA_MemCopy_CMD(u8 *dest, u8 *src, u32 byte_cnt, u16 flags)
++{
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ info.pcmds[info.pcmds_num].CMD = MEMCOPY_CMD;
++ info.pcmds[info.pcmds_num].DataAddr = 0;
++ info.pcmds[info.pcmds_num].Block = 0;
++ info.pcmds[info.pcmds_num].Page = 0;
++ info.pcmds[info.pcmds_num].PageCount = 0;
++ info.pcmds[info.pcmds_num].DataDestAddr = dest;
++ info.pcmds[info.pcmds_num].DataSrcAddr = src;
++ info.pcmds[info.pcmds_num].MemCopyByteCnt = byte_cnt;
++ info.pcmds[info.pcmds_num].Flags = flags;
++ info.pcmds[info.pcmds_num].Status = 0xB0B;
++
++ info.pcmds_num++;
++
++ if (info.pcmds_num >= MAX_PENDING_CMDS) {
++ if (CDMA_Execute_CMDs()) {
++ printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
++ return FAIL;
++ }
++ }
++
++ return PASS;
++}
++
++#if 0
++/* Prints the PendingCMDs array */
++void print_pending_cmds(void)
++{
++ u16 i;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0; i < info.pcmds_num; i++) {
++ nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
++ switch (info.pcmds[i].CMD) {
++ case ERASE_CMD:
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Erase Command (0x%x)\n",
++ info.pcmds[i].CMD);
++ break;
++ case WRITE_MAIN_CMD:
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Write Main Command (0x%x)\n",
++ info.pcmds[i].CMD);
++ break;
++ case WRITE_MAIN_SPARE_CMD:
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Write Main Spare Command (0x%x)\n",
++ info.pcmds[i].CMD);
++ break;
++ case READ_MAIN_SPARE_CMD:
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Read Main Spare Command (0x%x)\n",
++ info.pcmds[i].CMD);
++ break;
++ case READ_MAIN_CMD:
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Read Main Command (0x%x)\n",
++ info.pcmds[i].CMD);
++ break;
++ case MEMCOPY_CMD:
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Memcopy Command (0x%x)\n",
++ info.pcmds[i].CMD);
++ break;
++ case DUMMY_CMD:
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Dummy Command (0x%x)\n",
++ info.pcmds[i].CMD);
++ break;
++ default:
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Illegal Command (0x%x)\n",
++ info.pcmds[i].CMD);
++ break;
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG, "DataAddr: 0x%x\n",
++ (u32)info.pcmds[i].DataAddr);
++ nand_dbg_print(NAND_DBG_DEBUG, "Block: %d\n",
++ info.pcmds[i].Block);
++ nand_dbg_print(NAND_DBG_DEBUG, "Page: %d\n",
++ info.pcmds[i].Page);
++ nand_dbg_print(NAND_DBG_DEBUG, "PageCount: %d\n",
++ info.pcmds[i].PageCount);
++ nand_dbg_print(NAND_DBG_DEBUG, "DataDestAddr: 0x%x\n",
++ (u32)info.pcmds[i].DataDestAddr);
++ nand_dbg_print(NAND_DBG_DEBUG, "DataSrcAddr: 0x%x\n",
++ (u32)info.pcmds[i].DataSrcAddr);
++ nand_dbg_print(NAND_DBG_DEBUG, "MemCopyByteCnt: %d\n",
++ info.pcmds[i].MemCopyByteCnt);
++ nand_dbg_print(NAND_DBG_DEBUG, "Flags: 0x%x\n",
++ info.pcmds[i].Flags);
++ nand_dbg_print(NAND_DBG_DEBUG, "Status: 0x%x\n",
++ info.pcmds[i].Status);
++ }
++}
++
++/* Print the CDMA descriptors */
++void print_cdma_descriptors(void)
++{
++ struct cdma_descriptor *pc;
++ int i;
++
++ pc = (struct cdma_descriptor *)info.cdma_desc_buf;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "\nWill dump cdma descriptors:\n");
++
++ for (i = 0; i < info.cdma_num; i++) {
++ nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "NxtPointerHi: 0x%x, NxtPointerLo: 0x%x\n",
++ pc[i].NxtPointerHi, pc[i].NxtPointerLo);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "FlashPointerHi: 0x%x, FlashPointerLo: 0x%x\n",
++ pc[i].FlashPointerHi, pc[i].FlashPointerLo);
++ nand_dbg_print(NAND_DBG_DEBUG, "CommandType: 0x%x\n",
++ pc[i].CommandType);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "MemAddrHi: 0x%x, MemAddrLo: 0x%x\n",
++ pc[i].MemAddrHi, pc[i].MemAddrLo);
++ nand_dbg_print(NAND_DBG_DEBUG, "CommandFlags: 0x%x\n",
++ pc[i].CommandFlags);
++ nand_dbg_print(NAND_DBG_DEBUG, "Channel: %d, Status: 0x%x\n",
++ pc[i].Channel, pc[i].Status);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "MemCopyPointerHi: 0x%x, MemCopyPointerLo: 0x%x\n",
++ pc[i].MemCopyPointerHi, pc[i].MemCopyPointerLo);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Reserved12: 0x%x, Reserved13: 0x%x, "
++ "Reserved14: 0x%x, pcmd: %d\n",
++ pc[i].Reserved12, pc[i].Reserved13,
++ pc[i].Reserved14, pc[i].pcmd);
++ }
++}
++
++/* Print the Memory copy descriptors */
++static void print_memcp_descriptors(void)
++{
++ struct memcpy_descriptor *pm;
++ int i;
++
++ pm = (struct memcpy_descriptor *)info.memcp_desc_buf;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "\nWill dump mem_cpy descriptors:\n");
++
++ for (i = 0; i < info.cdma_num; i++) {
++ nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "NxtPointerHi: 0x%x, NxtPointerLo: 0x%x\n",
++ pm[i].NxtPointerHi, pm[i].NxtPointerLo);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "SrcAddrHi: 0x%x, SrcAddrLo: 0x%x\n",
++ pm[i].SrcAddrHi, pm[i].SrcAddrLo);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "DestAddrHi: 0x%x, DestAddrLo: 0x%x\n",
++ pm[i].DestAddrHi, pm[i].DestAddrLo);
++ nand_dbg_print(NAND_DBG_DEBUG, "XferSize: %d\n",
++ pm[i].XferSize);
++ nand_dbg_print(NAND_DBG_DEBUG, "MemCopyFlags: 0x%x\n",
++ pm[i].MemCopyFlags);
++ nand_dbg_print(NAND_DBG_DEBUG, "MemCopyStatus: %d\n",
++ pm[i].MemCopyStatus);
++ nand_dbg_print(NAND_DBG_DEBUG, "reserved9: 0x%x\n",
++ pm[i].reserved9);
++ nand_dbg_print(NAND_DBG_DEBUG, "reserved10: 0x%x\n",
++ pm[i].reserved10);
++ nand_dbg_print(NAND_DBG_DEBUG, "reserved11: 0x%x\n",
++ pm[i].reserved11);
++ nand_dbg_print(NAND_DBG_DEBUG, "reserved12: 0x%x\n",
++ pm[i].reserved12);
++ nand_dbg_print(NAND_DBG_DEBUG, "reserved13: 0x%x\n",
++ pm[i].reserved13);
++ nand_dbg_print(NAND_DBG_DEBUG, "reserved14: 0x%x\n",
++ pm[i].reserved14);
++ nand_dbg_print(NAND_DBG_DEBUG, "reserved15: 0x%x\n",
++ pm[i].reserved15);
++ }
++}
++#endif
++
++/* Reset cdma_descriptor chain to 0 */
++static void reset_cdma_desc(int i)
++{
++ struct cdma_descriptor *ptr;
++
++ BUG_ON(i >= MAX_DESCS);
++
++ ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
++
++ ptr[i].NxtPointerHi = 0;
++ ptr[i].NxtPointerLo = 0;
++ ptr[i].FlashPointerHi = 0;
++ ptr[i].FlashPointerLo = 0;
++ ptr[i].CommandType = 0;
++ ptr[i].MemAddrHi = 0;
++ ptr[i].MemAddrLo = 0;
++ ptr[i].CommandFlags = 0;
++ ptr[i].Channel = 0;
++ ptr[i].Status = 0;
++ ptr[i].MemCopyPointerHi = 0;
++ ptr[i].MemCopyPointerLo = 0;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: CDMA_UpdateEventStatus
++* Inputs: none
++* Outputs: none
++* Description: This function update the event status of all the channels
++* when an error condition is reported.
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++void CDMA_UpdateEventStatus(void)
++{
++ int i, j, active_chan;
++ struct cdma_descriptor *ptr;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
++
++ for (j = 0; j < info.cdma_num; j++) {
++ /* Check for the descriptor with failure */
++ if ((ptr[j].Status & CMD_DMA_DESC_FAIL))
++ break;
++
++ }
++
++ /* All the previous cmd's status for this channel must be good */
++ for (i = 0; i < j; i++) {
++ if (ptr[i].pcmd != 0xff)
++ info.pcmds[ptr[i].pcmd].Status = CMD_PASS;
++ }
++
++ /* Abort the channel with type 0 reset command. It resets the */
++ /* selected channel after the descriptor completes the flash */
++ /* operation and status has been updated for the descriptor. */
++ /* Memory Copy and Sync associated with this descriptor will */
++ /* not be executed */
++ active_chan = ioread32(FlashReg + CHNL_ACTIVE);
++ if ((active_chan & (1 << info.flash_bank)) == (1 << info.flash_bank)) {
++ iowrite32(MODE_02 | (0 << 4), FlashMem); /* Type 0 reset */
++ iowrite32((0xF << 4) | info.flash_bank, FlashMem + 0x10);
++ } else { /* Should not reached here */
++ printk(KERN_ERR "Error! Used bank is not set in"
++ " reg CHNL_ACTIVE\n");
++ }
++}
++
++static void cdma_trans(u16 chan)
++{
++ u32 addr;
++
++ addr = info.cdma_desc;
++
++ iowrite32(MODE_10 | (chan << 24), FlashMem);
++ iowrite32((1 << 7) | chan, FlashMem + 0x10);
++
++ iowrite32(MODE_10 | (chan << 24) | ((0x0FFFF & (addr >> 16)) << 8),
++ FlashMem);
++ iowrite32((1 << 7) | (1 << 4) | 0, FlashMem + 0x10);
++
++ iowrite32(MODE_10 | (chan << 24) | ((0x0FFFF & addr) << 8), FlashMem);
++ iowrite32((1 << 7) | (1 << 5) | 0, FlashMem + 0x10);
++
++ iowrite32(MODE_10 | (chan << 24), FlashMem);
++ iowrite32((1 << 7) | (1 << 5) | (1 << 4) | 0, FlashMem + 0x10);
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: CDMA_Execute_CMDs (for use with CMD_DMA)
++* Inputs: tag_count: the number of pending cmds to do
++* Outputs: PASS/FAIL
++* Description: Build the SDMA chain(s) by making one CMD-DMA descriptor
++* for each pending command, start the CDMA engine, and return.
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 CDMA_Execute_CMDs(void)
++{
++ int i, ret;
++ u64 flash_add;
++ u32 ptr;
++ dma_addr_t map_addr, next_ptr;
++ u16 status = PASS;
++ u16 tmp_c;
++ struct cdma_descriptor *pc;
++ struct memcpy_descriptor *pm;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ /* No pending cmds to execute, just exit */
++ if (0 == info.pcmds_num) {
++ nand_dbg_print(NAND_DBG_TRACE,
++ "No pending cmds to execute. Just exit.\n");
++ return PASS;
++ }
++
++ for (i = 0; i < MAX_DESCS; i++)
++ reset_cdma_desc(i);
++
++ pc = (struct cdma_descriptor *)info.cdma_desc_buf;
++ pm = (struct memcpy_descriptor *)info.memcp_desc_buf;
++
++ info.cdma_desc = virt_to_bus(info.cdma_desc_buf);
++ info.memcp_desc = virt_to_bus(info.memcp_desc_buf);
++ next_ptr = info.cdma_desc;
++ info.cdma_num = 0;
++
++ for (i = 0; i < info.pcmds_num; i++) {
++ if (info.pcmds[i].Block >= DeviceInfo.wTotalBlocks) {
++ info.pcmds[i].Status = CMD_NOT_DONE;
++ continue;
++ }
++
++ next_ptr += sizeof(struct cdma_descriptor);
++ pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
++ pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
++
++ /* Use the Block offset within a bank */
++ tmp_c = info.pcmds[i].Block /
++ (DeviceInfo.wTotalBlocks / totalUsedBanks);
++ flash_add = (u64)(info.pcmds[i].Block - tmp_c *
++ (DeviceInfo.wTotalBlocks / totalUsedBanks)) *
++ DeviceInfo.wBlockDataSize +
++ (u64)(info.pcmds[i].Page) *
++ DeviceInfo.wPageDataSize;
++
++ ptr = MODE_10 | (info.flash_bank << 24) |
++ (u32)GLOB_u64_Div(flash_add,
++ DeviceInfo.wPageDataSize);
++ pc[info.cdma_num].FlashPointerHi = ptr >> 16;
++ pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
++
++ if ((info.pcmds[i].CMD == WRITE_MAIN_SPARE_CMD) ||
++ (info.pcmds[i].CMD == READ_MAIN_SPARE_CMD)) {
++ /* Descriptor to set Main+Spare Access Mode */
++ pc[info.cdma_num].CommandType = 0x43;
++ pc[info.cdma_num].CommandFlags =
++ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
++ pc[info.cdma_num].MemAddrHi = 0;
++ pc[info.cdma_num].MemAddrLo = 0;
++ pc[info.cdma_num].Channel = 0;
++ pc[info.cdma_num].Status = 0;
++ pc[info.cdma_num].pcmd = i;
++
++ info.cdma_num++;
++ BUG_ON(info.cdma_num >= MAX_DESCS);
++
++ reset_cdma_desc(info.cdma_num);
++ next_ptr += sizeof(struct cdma_descriptor);
++ pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
++ pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
++ pc[info.cdma_num].FlashPointerHi = ptr >> 16;
++ pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
++ }
++
++ switch (info.pcmds[i].CMD) {
++ case ERASE_CMD:
++ pc[info.cdma_num].CommandType = 1;
++ pc[info.cdma_num].CommandFlags =
++ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
++ pc[info.cdma_num].MemAddrHi = 0;
++ pc[info.cdma_num].MemAddrLo = 0;
++ break;
++
++ case WRITE_MAIN_CMD:
++ pc[info.cdma_num].CommandType =
++ 0x2100 | info.pcmds[i].PageCount;
++ pc[info.cdma_num].CommandFlags =
++ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
++ map_addr = virt_to_bus(info.pcmds[i].DataAddr);
++ pc[info.cdma_num].MemAddrHi = map_addr >> 16;
++ pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
++ break;
++
++ case READ_MAIN_CMD:
++ pc[info.cdma_num].CommandType =
++ 0x2000 | info.pcmds[i].PageCount;
++ pc[info.cdma_num].CommandFlags =
++ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
++ map_addr = virt_to_bus(info.pcmds[i].DataAddr);
++ pc[info.cdma_num].MemAddrHi = map_addr >> 16;
++ pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
++ break;
++
++ case WRITE_MAIN_SPARE_CMD:
++ pc[info.cdma_num].CommandType =
++ 0x2100 | info.pcmds[i].PageCount;
++ pc[info.cdma_num].CommandFlags =
++ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
++ map_addr = virt_to_bus(info.pcmds[i].DataAddr);
++ pc[info.cdma_num].MemAddrHi = map_addr >> 16;
++ pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
++ break;
++
++ case READ_MAIN_SPARE_CMD:
++ pc[info.cdma_num].CommandType =
++ 0x2000 | info.pcmds[i].PageCount;
++ pc[info.cdma_num].CommandFlags =
++ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
++ map_addr = virt_to_bus(info.pcmds[i].DataAddr);
++ pc[info.cdma_num].MemAddrHi = map_addr >> 16;
++ pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
++ break;
++
++ case MEMCOPY_CMD:
++ pc[info.cdma_num].CommandType = 0xFFFF; /* NOP cmd */
++ /* Set bit 11 to let the CDMA engine continue to */
++ /* execute only after it has finished processing */
++ /* the memcopy descriptor. */
++ /* Also set bit 10 and bit 9 to 1 */
++ pc[info.cdma_num].CommandFlags = 0x0E40;
++ map_addr = info.memcp_desc + info.cdma_num *
++ sizeof(struct memcpy_descriptor);
++ pc[info.cdma_num].MemCopyPointerHi = map_addr >> 16;
++ pc[info.cdma_num].MemCopyPointerLo = map_addr & 0xffff;
++
++ pm[info.cdma_num].NxtPointerHi = 0;
++ pm[info.cdma_num].NxtPointerLo = 0;
++
++ map_addr = virt_to_bus(info.pcmds[i].DataSrcAddr);
++ pm[info.cdma_num].SrcAddrHi = map_addr >> 16;
++ pm[info.cdma_num].SrcAddrLo = map_addr & 0xffff;
++ map_addr = virt_to_bus(info.pcmds[i].DataDestAddr);
++ pm[info.cdma_num].DestAddrHi = map_addr >> 16;
++ pm[info.cdma_num].DestAddrLo = map_addr & 0xffff;
++
++ pm[info.cdma_num].XferSize =
++ info.pcmds[i].MemCopyByteCnt;
++ pm[info.cdma_num].MemCopyFlags =
++ (0 << 15 | 0 << 14 | 27 << 8 | 0x40);
++ pm[info.cdma_num].MemCopyStatus = 0;
++ break;
++
++ case DUMMY_CMD:
++ default:
++ pc[info.cdma_num].CommandType = 0XFFFF;
++ pc[info.cdma_num].CommandFlags =
++ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
++ pc[info.cdma_num].MemAddrHi = 0;
++ pc[info.cdma_num].MemAddrLo = 0;
++ break;
++ }
++
++ pc[info.cdma_num].Channel = 0;
++ pc[info.cdma_num].Status = 0;
++ pc[info.cdma_num].pcmd = i;
++
++ info.cdma_num++;
++ BUG_ON(info.cdma_num >= MAX_DESCS);
++
++ if ((info.pcmds[i].CMD == WRITE_MAIN_SPARE_CMD) ||
++ (info.pcmds[i].CMD == READ_MAIN_SPARE_CMD)) {
++ /* Descriptor to set back Main Area Access Mode */
++ reset_cdma_desc(info.cdma_num);
++ next_ptr += sizeof(struct cdma_descriptor);
++ pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
++ pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
++
++ pc[info.cdma_num].FlashPointerHi = ptr >> 16;
++ pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
++
++ pc[info.cdma_num].CommandType = 0x42;
++ pc[info.cdma_num].CommandFlags =
++ (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
++ pc[info.cdma_num].MemAddrHi = 0;
++ pc[info.cdma_num].MemAddrLo = 0;
++
++ pc[info.cdma_num].Channel = 0;
++ pc[info.cdma_num].Status = 0;
++ pc[info.cdma_num].pcmd = i;
++
++ info.cdma_num++;
++ BUG_ON(info.cdma_num >= MAX_DESCS);
++ }
++ }
++
++ /* Add a dummy descriptor at end of the CDMA chain */
++ reset_cdma_desc(info.cdma_num);
++ ptr = MODE_10 | (info.flash_bank << 24);
++ pc[info.cdma_num].FlashPointerHi = ptr >> 16;
++ pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
++ pc[info.cdma_num].CommandType = 0xFFFF; /* NOP command */
++ /* Set Command Flags for the last CDMA descriptor: */
++ /* set Continue bit (bit 9) to 0 and Interrupt bit (bit 8) to 1 */
++ pc[info.cdma_num].CommandFlags =
++ (0 << 10) | (0 << 9) | (1 << 8) | 0x40;
++ pc[info.cdma_num].pcmd = 0xff; /* Set it to an illegal value */
++ info.cdma_num++;
++ BUG_ON(info.cdma_num >= MAX_DESCS);
++
++ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
++
++ iowrite32(1, FlashReg + DMA_ENABLE);
++ /* Wait for DMA to be enabled before issuing the next command */
++ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++ cdma_trans(info.flash_bank);
++
++ ret = wait_for_completion_timeout(&info.complete, 50 * HZ);
++ if (!ret)
++ printk(KERN_ERR "Wait for completion timeout "
++ "in %s, Line %d\n", __FILE__, __LINE__);
++ status = info.ret;
++
++ info.pcmds_num = 0; /* Clear the pending cmds number to 0 */
++
++ return status;
++}
++
++int is_cdma_interrupt(void)
++{
++ u32 ints_b0, ints_b1, ints_b2, ints_b3, ints_cdma;
++ u32 int_en_mask;
++ u32 cdma_int_en_mask;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ /* Set the global Enable masks for only those interrupts
++ * that are supported */
++ cdma_int_en_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
++ DMA_INTR__DESC_COMP_CHANNEL1 |
++ DMA_INTR__DESC_COMP_CHANNEL2 |
++ DMA_INTR__DESC_COMP_CHANNEL3 |
++ DMA_INTR__MEMCOPY_DESC_COMP);
++
++ int_en_mask = (INTR_STATUS0__ECC_ERR |
++ INTR_STATUS0__PROGRAM_FAIL |
++ INTR_STATUS0__ERASE_FAIL);
++
++ ints_b0 = ioread32(FlashReg + INTR_STATUS0) & int_en_mask;
++ ints_b1 = ioread32(FlashReg + INTR_STATUS1) & int_en_mask;
++ ints_b2 = ioread32(FlashReg + INTR_STATUS2) & int_en_mask;
++ ints_b3 = ioread32(FlashReg + INTR_STATUS3) & int_en_mask;
++ ints_cdma = ioread32(FlashReg + DMA_INTR) & cdma_int_en_mask;
++
++ nand_dbg_print(NAND_DBG_WARN, "ints_bank0 to ints_bank3: "
++ "0x%x, 0x%x, 0x%x, 0x%x, ints_cdma: 0x%x\n",
++ ints_b0, ints_b1, ints_b2, ints_b3, ints_cdma);
++
++ if (ints_b0 || ints_b1 || ints_b2 || ints_b3 || ints_cdma) {
++ return 1;
++ } else {
++ iowrite32(ints_b0, FlashReg + INTR_STATUS0);
++ iowrite32(ints_b1, FlashReg + INTR_STATUS1);
++ iowrite32(ints_b2, FlashReg + INTR_STATUS2);
++ iowrite32(ints_b3, FlashReg + INTR_STATUS3);
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Not a NAND controller interrupt! Ignore it.\n");
++ return 0;
++ }
++}
++
++static void update_event_status(void)
++{
++ int i;
++ struct cdma_descriptor *ptr;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
++
++ for (i = 0; i < info.cdma_num; i++) {
++ if (ptr[i].pcmd != 0xff)
++ info.pcmds[ptr[i].pcmd].Status = CMD_PASS;
++ if ((ptr[i].CommandType == 0x41) ||
++ (ptr[i].CommandType == 0x42) ||
++ (ptr[i].CommandType == 0x43))
++ continue;
++
++ switch (info.pcmds[ptr[i].pcmd].CMD) {
++ case READ_MAIN_SPARE_CMD:
++ Conv_Main_Spare_Data_Phy2Log_Format(
++ info.pcmds[ptr[i].pcmd].DataAddr,
++ info.pcmds[ptr[i].pcmd].PageCount);
++ break;
++ case READ_SPARE_CMD:
++ Conv_Spare_Data_Phy2Log_Format(
++ info.pcmds[ptr[i].pcmd].DataAddr);
++ break;
++ }
++ }
++}
++
++static u16 do_ecc_for_desc(u32 ch, u8 *buf, u16 page)
++{
++ u16 event = EVENT_NONE;
++ u16 err_byte;
++ u16 err_page = 0;
++ u8 err_sector;
++ u8 err_device;
++ u16 ecc_correction_info;
++ u16 err_address;
++ u32 eccSectorSize;
++ u8 *err_pos;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
++
++ do {
++ if (0 == ch)
++ err_page = ioread32(FlashReg + ERR_PAGE_ADDR0);
++ else if (1 == ch)
++ err_page = ioread32(FlashReg + ERR_PAGE_ADDR1);
++ else if (2 == ch)
++ err_page = ioread32(FlashReg + ERR_PAGE_ADDR2);
++ else if (3 == ch)
++ err_page = ioread32(FlashReg + ERR_PAGE_ADDR3);
++
++ err_address = ioread32(FlashReg + ECC_ERROR_ADDRESS);
++ err_byte = err_address & ECC_ERROR_ADDRESS__OFFSET;
++ err_sector = ((err_address &
++ ECC_ERROR_ADDRESS__SECTOR_NR) >> 12);
++
++ ecc_correction_info = ioread32(FlashReg + ERR_CORRECTION_INFO);
++ err_device = ((ecc_correction_info &
++ ERR_CORRECTION_INFO__DEVICE_NR) >> 8);
++
++ if (ecc_correction_info & ERR_CORRECTION_INFO__ERROR_TYPE) {
++ event = EVENT_UNCORRECTABLE_DATA_ERROR;
++ } else {
++ event = EVENT_CORRECTABLE_DATA_ERROR_FIXED;
++ if (err_byte < ECC_SECTOR_SIZE) {
++ err_pos = buf +
++ (err_page - page) *
++ DeviceInfo.wPageDataSize +
++ err_sector * eccSectorSize +
++ err_byte *
++ DeviceInfo.wDevicesConnected +
++ err_device;
++ *err_pos ^= ecc_correction_info &
++ ERR_CORRECTION_INFO__BYTEMASK;
++ }
++ }
++ } while (!(ecc_correction_info & ERR_CORRECTION_INFO__LAST_ERR_INFO));
++
++ return event;
++}
++
++static u16 process_ecc_int(u32 c, u16 *p_desc_num)
++{
++ struct cdma_descriptor *ptr;
++ u16 j;
++ int event = EVENT_PASS;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (c != info.flash_bank)
++ printk(KERN_ERR "Error!info.flash_bank is %d, while c is %d\n",
++ info.flash_bank, c);
++
++ ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
++
++ for (j = 0; j < info.cdma_num; j++)
++ if ((ptr[j].Status & CMD_DMA_DESC_COMP) != CMD_DMA_DESC_COMP)
++ break;
++
++ *p_desc_num = j; /* Pass the descripter number found here */
++
++ if (j >= info.cdma_num) {
++ printk(KERN_ERR "Can not find the correct descriptor number "
++ "when ecc interrupt triggered!"
++ "info.cdma_num: %d, j: %d\n", info.cdma_num, j);
++ return EVENT_UNCORRECTABLE_DATA_ERROR;
++ }
++
++ event = do_ecc_for_desc(c, info.pcmds[ptr[j].pcmd].DataAddr,
++ info.pcmds[ptr[j].pcmd].Page);
++
++ if (EVENT_UNCORRECTABLE_DATA_ERROR == event) {
++ printk(KERN_ERR "Uncorrectable ECC error!"
++ "info.cdma_num: %d, j: %d, "
++ "pending cmd CMD: 0x%x, "
++ "Block: 0x%x, Page: 0x%x, PageCount: 0x%x\n",
++ info.cdma_num, j,
++ info.pcmds[ptr[j].pcmd].CMD,
++ info.pcmds[ptr[j].pcmd].Block,
++ info.pcmds[ptr[j].pcmd].Page,
++ info.pcmds[ptr[j].pcmd].PageCount);
++
++ if (ptr[j].pcmd != 0xff)
++ info.pcmds[ptr[j].pcmd].Status = CMD_FAIL;
++ CDMA_UpdateEventStatus();
++ }
++
++ return event;
++}
++
++static void process_prog_erase_fail_int(u16 desc_num)
++{
++ struct cdma_descriptor *ptr;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
++
++ if (ptr[desc_num].pcmd != 0xFF)
++ info.pcmds[ptr[desc_num].pcmd].Status = CMD_FAIL;
++
++ CDMA_UpdateEventStatus();
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: CDMA_Event_Status (for use with CMD_DMA)
++* Inputs: none
++* Outputs: Event_Status code
++* Description: This function is called after an interrupt has happened
++* It reads the HW status register and ...tbd
++* It returns the appropriate event status
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 CDMA_Event_Status(void)
++{
++ u32 ints_addr[4] = {INTR_STATUS0, INTR_STATUS1,
++ INTR_STATUS2, INTR_STATUS3};
++ u32 dma_intr_bit[4] = {DMA_INTR__DESC_COMP_CHANNEL0,
++ DMA_INTR__DESC_COMP_CHANNEL1,
++ DMA_INTR__DESC_COMP_CHANNEL2,
++ DMA_INTR__DESC_COMP_CHANNEL3};
++ u32 cdma_int_status, int_status;
++ u32 ecc_enable = 0;
++ u16 event = EVENT_PASS;
++ u16 cur_desc = 0;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ ecc_enable = ioread32(FlashReg + ECC_ENABLE);
++
++ while (1) {
++ int_status = ioread32(FlashReg + ints_addr[info.flash_bank]);
++ if (ecc_enable && (int_status & INTR_STATUS0__ECC_ERR)) {
++ event = process_ecc_int(info.flash_bank, &cur_desc);
++ iowrite32(INTR_STATUS0__ECC_ERR,
++ FlashReg + ints_addr[info.flash_bank]);
++ if (EVENT_UNCORRECTABLE_DATA_ERROR == event) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "ints_bank0 to ints_bank3: "
++ "0x%x, 0x%x, 0x%x, 0x%x, "
++ "ints_cdma: 0x%x\n",
++ ioread32(FlashReg + INTR_STATUS0),
++ ioread32(FlashReg + INTR_STATUS1),
++ ioread32(FlashReg + INTR_STATUS2),
++ ioread32(FlashReg + INTR_STATUS3),
++ ioread32(FlashReg + DMA_INTR));
++ break;
++ }
++ } else if (int_status & INTR_STATUS0__PROGRAM_FAIL) {
++ printk(KERN_ERR "NAND program fail interrupt!\n");
++ process_prog_erase_fail_int(cur_desc);
++ event = EVENT_PROGRAM_FAILURE;
++ break;
++ } else if (int_status & INTR_STATUS0__ERASE_FAIL) {
++ printk(KERN_ERR "NAND erase fail interrupt!\n");
++ process_prog_erase_fail_int(cur_desc);
++ event = EVENT_ERASE_FAILURE;
++ break;
++ } else {
++ cdma_int_status = ioread32(FlashReg + DMA_INTR);
++ if (cdma_int_status & dma_intr_bit[info.flash_bank]) {
++ iowrite32(dma_intr_bit[info.flash_bank],
++ FlashReg + DMA_INTR);
++ update_event_status();
++ event = EVENT_PASS;
++ break;
++ }
++ }
++ }
++
++ int_status = ioread32(FlashReg + ints_addr[info.flash_bank]);
++ iowrite32(int_status, FlashReg + ints_addr[info.flash_bank]);
++ cdma_int_status = ioread32(FlashReg + DMA_INTR);
++ iowrite32(cdma_int_status, FlashReg + DMA_INTR);
++
++ iowrite32(0, FlashReg + DMA_ENABLE);
++ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ return event;
++}
++
++
++
+diff --git a/drivers/block/spectra/lld_cdma.h b/drivers/block/spectra/lld_cdma.h
+new file mode 100644
+index 0000000..854ea06
+--- /dev/null
++++ b/drivers/block/spectra/lld_cdma.h
+@@ -0,0 +1,123 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++/* header for LLD_CDMA.c module */
++
++#ifndef _LLD_CDMA_
++#define _LLD_CDMA_
++
++#include "flash.h"
++
++#define DEBUG_SYNC 1
++
++/*/////////// CDMA specific MACRO definition */
++#define MAX_DESCS (255)
++#define MAX_CHANS (4)
++#define MAX_SYNC_POINTS (16)
++#define MAX_DESC_PER_CHAN (MAX_DESCS * 3 + MAX_SYNC_POINTS + 2)
++
++#define CHANNEL_SYNC_MASK (0x000F)
++#define CHANNEL_DMA_MASK (0x00F0)
++#define CHANNEL_ID_MASK (0x0300)
++#define CHANNEL_CONT_MASK (0x4000)
++#define CHANNEL_INTR_MASK (0x8000)
++
++#define CHANNEL_SYNC_OFFSET (0)
++#define CHANNEL_DMA_OFFSET (4)
++#define CHANNEL_ID_OFFSET (8)
++#define CHANNEL_CONT_OFFSET (14)
++#define CHANNEL_INTR_OFFSET (15)
++
++u16 CDMA_Data_CMD(u8 cmd, u8 *data, u32 block, u16 page, u16 num, u16 flags);
++u16 CDMA_MemCopy_CMD(u8 *dest, u8 *src, u32 byte_cnt, u16 flags);
++u16 CDMA_Execute_CMDs(void);
++void print_pending_cmds(void);
++void print_cdma_descriptors(void);
++
++extern u8 g_SBDCmdIndex;
++extern struct mrst_nand_info info;
++
++
++/*/////////// prototypes: APIs for LLD_CDMA */
++int is_cdma_interrupt(void);
++u16 CDMA_Event_Status(void);
++
++/* CMD-DMA Descriptor Struct. These are defined by the CMD_DMA HW */
++struct cdma_descriptor {
++ u32 NxtPointerHi;
++ u32 NxtPointerLo;
++ u32 FlashPointerHi;
++ u32 FlashPointerLo;
++ u32 CommandType;
++ u32 MemAddrHi;
++ u32 MemAddrLo;
++ u32 CommandFlags;
++ u32 Channel;
++ u32 Status;
++ u32 MemCopyPointerHi;
++ u32 MemCopyPointerLo;
++ u32 Reserved12;
++ u32 Reserved13;
++ u32 Reserved14;
++ u32 pcmd; /* pending cmd num related to this descriptor */
++};
++
++/* This struct holds one MemCopy descriptor as defined by the HW */
++struct memcpy_descriptor {
++ u32 NxtPointerHi;
++ u32 NxtPointerLo;
++ u32 SrcAddrHi;
++ u32 SrcAddrLo;
++ u32 DestAddrHi;
++ u32 DestAddrLo;
++ u32 XferSize;
++ u32 MemCopyFlags;
++ u32 MemCopyStatus;
++ u32 reserved9;
++ u32 reserved10;
++ u32 reserved11;
++ u32 reserved12;
++ u32 reserved13;
++ u32 reserved14;
++ u32 reserved15;
++};
++
++/* Pending CMD table entries (includes MemCopy parameters */
++struct pending_cmd {
++ u8 CMD;
++ u8 *DataAddr;
++ u32 Block;
++ u16 Page;
++ u16 PageCount;
++ u8 *DataDestAddr;
++ u8 *DataSrcAddr;
++ u32 MemCopyByteCnt;
++ u16 Flags;
++ u16 Status;
++};
++
++#if DEBUG_SYNC
++extern u32 debug_sync_cnt;
++#endif
++
++/* Definitions for CMD DMA descriptor chain fields */
++#define CMD_DMA_DESC_COMP 0x8000
++#define CMD_DMA_DESC_FAIL 0x4000
++
++#endif /*_LLD_CDMA_*/
+diff --git a/drivers/block/spectra/lld_emu.c b/drivers/block/spectra/lld_emu.c
+new file mode 100644
+index 0000000..60eb0f6
+--- /dev/null
++++ b/drivers/block/spectra/lld_emu.c
+@@ -0,0 +1,780 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include "flash.h"
++#include "ffsdefs.h"
++#include "lld_emu.h"
++#include "lld.h"
++#if CMD_DMA
++#include "lld_cdma.h"
++#endif
++
++#define GLOB_LLD_PAGES 64
++#define GLOB_LLD_PAGE_SIZE (512+16)
++#define GLOB_LLD_PAGE_DATA_SIZE 512
++#define GLOB_LLD_BLOCKS 2048
++
++#if (CMD_DMA && FLASH_EMU)
++#include "lld_cdma.h"
++u32 totalUsedBanks;
++u32 valid_banks[MAX_CHANS];
++#endif
++
++#if FLASH_EMU /* This is for entire module */
++
++static u8 *flash_memory[GLOB_LLD_BLOCKS * GLOB_LLD_PAGES];
++
++/* Read nand emu file and then fill it's content to flash_memory */
++int emu_load_file_to_mem(void)
++{
++ mm_segment_t fs;
++ struct file *nef_filp = NULL;
++ struct inode *inode = NULL;
++ loff_t nef_size = 0;
++ loff_t tmp_file_offset, file_offset;
++ ssize_t nread;
++ int i, rc = -EINVAL;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ fs = get_fs();
++ set_fs(get_ds());
++
++ nef_filp = filp_open("/root/nand_emu_file", O_RDWR | O_LARGEFILE, 0);
++ if (IS_ERR(nef_filp)) {
++ printk(KERN_ERR "filp_open error: "
++ "Unable to open nand emu file!\n");
++ return PTR_ERR(nef_filp);
++ }
++
++ if (nef_filp->f_path.dentry) {
++ inode = nef_filp->f_path.dentry->d_inode;
++ } else {
++ printk(KERN_ERR "Can not get valid inode!\n");
++ goto out;
++ }
++
++ nef_size = i_size_read(inode->i_mapping->host);
++ if (nef_size <= 0) {
++ printk(KERN_ERR "Invalid nand emu file size: "
++ "0x%llx\n", nef_size);
++ goto out;
++ } else {
++ nand_dbg_print(NAND_DBG_DEBUG, "nand emu file size: %lld\n",
++ nef_size);
++ }
++
++ file_offset = 0;
++ for (i = 0; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++) {
++ tmp_file_offset = file_offset;
++ nread = vfs_read(nef_filp,
++ (char __user *)flash_memory[i],
++ GLOB_LLD_PAGE_SIZE, &tmp_file_offset);
++ if (nread < GLOB_LLD_PAGE_SIZE) {
++ printk(KERN_ERR "%s, Line %d - "
++ "nand emu file partial read: "
++ "%d bytes\n", __FILE__, __LINE__, (int)nread);
++ goto out;
++ }
++ file_offset += GLOB_LLD_PAGE_SIZE;
++ }
++ rc = 0;
++
++out:
++ filp_close(nef_filp, current->files);
++ set_fs(fs);
++ return rc;
++}
++
++/* Write contents of flash_memory to nand emu file */
++int emu_write_mem_to_file(void)
++{
++ mm_segment_t fs;
++ struct file *nef_filp = NULL;
++ struct inode *inode = NULL;
++ loff_t nef_size = 0;
++ loff_t tmp_file_offset, file_offset;
++ ssize_t nwritten;
++ int i, rc = -EINVAL;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ fs = get_fs();
++ set_fs(get_ds());
++
++ nef_filp = filp_open("/root/nand_emu_file", O_RDWR | O_LARGEFILE, 0);
++ if (IS_ERR(nef_filp)) {
++ printk(KERN_ERR "filp_open error: "
++ "Unable to open nand emu file!\n");
++ return PTR_ERR(nef_filp);
++ }
++
++ if (nef_filp->f_path.dentry) {
++ inode = nef_filp->f_path.dentry->d_inode;
++ } else {
++ printk(KERN_ERR "Invalid " "nef_filp->f_path.dentry value!\n");
++ goto out;
++ }
++
++ nef_size = i_size_read(inode->i_mapping->host);
++ if (nef_size <= 0) {
++ printk(KERN_ERR "Invalid "
++ "nand emu file size: 0x%llx\n", nef_size);
++ goto out;
++ } else {
++ nand_dbg_print(NAND_DBG_DEBUG, "nand emu file size: "
++ "%lld\n", nef_size);
++ }
++
++ file_offset = 0;
++ for (i = 0; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++) {
++ tmp_file_offset = file_offset;
++ nwritten = vfs_write(nef_filp,
++ (char __user *)flash_memory[i],
++ GLOB_LLD_PAGE_SIZE, &tmp_file_offset);
++ if (nwritten < GLOB_LLD_PAGE_SIZE) {
++ printk(KERN_ERR "%s, Line %d - "
++ "nand emu file partial write: "
++ "%d bytes\n", __FILE__, __LINE__, (int)nwritten);
++ goto out;
++ }
++ file_offset += GLOB_LLD_PAGE_SIZE;
++ }
++ rc = 0;
++
++out:
++ filp_close(nef_filp, current->files);
++ set_fs(fs);
++ return rc;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Flash_Init
++* Inputs: none
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Creates & initializes the flash RAM array.
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_Flash_Init(void)
++{
++ int i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ flash_memory[0] = (u8 *)vmalloc(GLOB_LLD_PAGE_SIZE *
++ GLOB_LLD_BLOCKS *
++ GLOB_LLD_PAGES *
++ sizeof(u8));
++ if (!flash_memory[0]) {
++ printk(KERN_ERR "Fail to allocate memory "
++ "for nand emulator!\n");
++ return ERR;
++ }
++
++ memset((char *)(flash_memory[0]), 0xFF,
++ GLOB_LLD_PAGE_SIZE * GLOB_LLD_BLOCKS * GLOB_LLD_PAGES *
++ sizeof(u8));
++
++ for (i = 1; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++)
++ flash_memory[i] = flash_memory[i - 1] + GLOB_LLD_PAGE_SIZE;
++
++ emu_load_file_to_mem(); /* Load nand emu file to mem */
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Flash_Release
++* Inputs: none
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Releases the flash.
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++int emu_Flash_Release(void)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ emu_write_mem_to_file(); /* Write back mem to nand emu file */
++
++ vfree(flash_memory[0]);
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Read_Device_ID
++* Inputs: none
++* Outputs: PASS=1 FAIL=0
++* Description: Reads the info from the controller registers.
++* Sets up DeviceInfo structure with device parameters
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++
++u16 emu_Read_Device_ID(void)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ DeviceInfo.wDeviceMaker = 0;
++ DeviceInfo.wDeviceType = 8;
++ DeviceInfo.wSpectraStartBlock = 36;
++ DeviceInfo.wSpectraEndBlock = GLOB_LLD_BLOCKS - 1;
++ DeviceInfo.wTotalBlocks = GLOB_LLD_BLOCKS;
++ DeviceInfo.wPagesPerBlock = GLOB_LLD_PAGES;
++ DeviceInfo.wPageSize = GLOB_LLD_PAGE_SIZE;
++ DeviceInfo.wPageDataSize = GLOB_LLD_PAGE_DATA_SIZE;
++ DeviceInfo.wPageSpareSize = GLOB_LLD_PAGE_SIZE -
++ GLOB_LLD_PAGE_DATA_SIZE;
++ DeviceInfo.wBlockSize = DeviceInfo.wPageSize * GLOB_LLD_PAGES;
++ DeviceInfo.wBlockDataSize = DeviceInfo.wPageDataSize * GLOB_LLD_PAGES;
++ DeviceInfo.wDataBlockNum = (u32) (DeviceInfo.wSpectraEndBlock -
++ DeviceInfo.wSpectraStartBlock
++ + 1);
++ DeviceInfo.MLCDevice = 1; /* Emulate MLC device */
++ DeviceInfo.nBitsInPageNumber =
++ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
++ DeviceInfo.nBitsInPageDataSize =
++ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
++ DeviceInfo.nBitsInBlockDataSize =
++ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
++
++#if CMD_DMA
++ totalUsedBanks = 4;
++ valid_banks[0] = 1;
++ valid_banks[1] = 1;
++ valid_banks[2] = 1;
++ valid_banks[3] = 1;
++#endif
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Flash_Reset
++* Inputs: none
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Reset the flash
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_Flash_Reset(void)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Erase_Block
++* Inputs: Address
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Erase a block
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_Erase_Block(u32 block_add)
++{
++ int i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (block_add >= DeviceInfo.wTotalBlocks) {
++ printk(KERN_ERR "emu_Erase_Block error! "
++ "Too big block address: %d\n", block_add);
++ return FAIL;
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Erasing block %d\n",
++ (int)block_add);
++
++ for (i = block_add * GLOB_LLD_PAGES;
++ i < ((block_add + 1) * GLOB_LLD_PAGES); i++) {
++ if (flash_memory[i]) {
++ memset((u8 *)(flash_memory[i]), 0xFF,
++ DeviceInfo.wPageSize * sizeof(u8));
++ }
++ }
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Write_Page_Main
++* Inputs: Write buffer address pointer
++* Block number
++* Page number
++* Number of pages to process
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Write the data in the buffer to main area of flash
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_Write_Page_Main(u8 *write_data, u32 Block,
++ u16 Page, u16 PageCount)
++{
++ int i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (Block >= DeviceInfo.wTotalBlocks)
++ return FAIL;
++
++ if (Page + PageCount > DeviceInfo.wPagesPerBlock)
++ return FAIL;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "emu_Write_Page_Main: "
++ "lba %u Page %u PageCount %u\n",
++ (unsigned int)Block,
++ (unsigned int)Page, (unsigned int)PageCount);
++
++ for (i = 0; i < PageCount; i++) {
++ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
++ printk(KERN_ERR "Run out of memory\n");
++ return FAIL;
++ }
++ memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]),
++ write_data, DeviceInfo.wPageDataSize);
++ write_data += DeviceInfo.wPageDataSize;
++ Page++;
++ }
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Read_Page_Main
++* Inputs: Read buffer address pointer
++* Block number
++* Page number
++* Number of pages to process
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Read the data from the flash main area to the buffer
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_Read_Page_Main(u8 *read_data, u32 Block,
++ u16 Page, u16 PageCount)
++{
++ int i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (Block >= DeviceInfo.wTotalBlocks)
++ return FAIL;
++
++ if (Page + PageCount > DeviceInfo.wPagesPerBlock)
++ return FAIL;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "emu_Read_Page_Main: "
++ "lba %u Page %u PageCount %u\n",
++ (unsigned int)Block,
++ (unsigned int)Page, (unsigned int)PageCount);
++
++ for (i = 0; i < PageCount; i++) {
++ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
++ memset(read_data, 0xFF, DeviceInfo.wPageDataSize);
++ } else {
++ memcpy(read_data,
++ (u8 *) (flash_memory[Block * GLOB_LLD_PAGES
++ + Page]),
++ DeviceInfo.wPageDataSize);
++ }
++ read_data += DeviceInfo.wPageDataSize;
++ Page++;
++ }
++
++ return PASS;
++}
++
++#ifndef ELDORA
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Read_Page_Main_Spare
++* Inputs: Write Buffer
++* Address
++* Buffer size
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Read from flash main+spare area
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_Read_Page_Main_Spare(u8 *read_data, u32 Block,
++ u16 Page, u16 PageCount)
++{
++ int i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (Block >= DeviceInfo.wTotalBlocks) {
++ printk(KERN_ERR "Read Page Main+Spare "
++ "Error: Block Address too big\n");
++ return FAIL;
++ }
++
++ if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
++ printk(KERN_ERR "Read Page Main+Spare "
++ "Error: Page number too big\n");
++ return FAIL;
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Read Page Main + Spare - "
++ "No. of pages %u block %u start page %u\n",
++ (unsigned int)PageCount,
++ (unsigned int)Block, (unsigned int)Page);
++
++ for (i = 0; i < PageCount; i++) {
++ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
++ memset(read_data, 0xFF, DeviceInfo.wPageSize);
++ } else {
++ memcpy(read_data, (u8 *) (flash_memory[Block *
++ GLOB_LLD_PAGES
++ + Page]),
++ DeviceInfo.wPageSize);
++ }
++
++ read_data += DeviceInfo.wPageSize;
++ Page++;
++ }
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Write_Page_Main_Spare
++* Inputs: Write buffer
++* address
++* buffer length
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Write the buffer to main+spare area of flash
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_Write_Page_Main_Spare(u8 *write_data, u32 Block,
++ u16 Page, u16 page_count)
++{
++ u16 i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (Block >= DeviceInfo.wTotalBlocks) {
++ printk(KERN_ERR "Write Page Main + Spare "
++ "Error: Block Address too big\n");
++ return FAIL;
++ }
++
++ if (Page + page_count > DeviceInfo.wPagesPerBlock) {
++ printk(KERN_ERR "Write Page Main + Spare "
++ "Error: Page number too big\n");
++ return FAIL;
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Write Page Main+Spare - "
++ "No. of pages %u block %u start page %u\n",
++ (unsigned int)page_count,
++ (unsigned int)Block, (unsigned int)Page);
++
++ for (i = 0; i < page_count; i++) {
++ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
++ printk(KERN_ERR "Run out of memory!\n");
++ return FAIL;
++ }
++ memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]),
++ write_data, DeviceInfo.wPageSize);
++ write_data += DeviceInfo.wPageSize;
++ Page++;
++ }
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Write_Page_Spare
++* Inputs: Write buffer
++* Address
++* buffer size
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Write the buffer in the spare area
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_Write_Page_Spare(u8 *write_data, u32 Block,
++ u16 Page, u16 PageCount)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (Block >= DeviceInfo.wTotalBlocks) {
++ printk(KERN_ERR "Read Page Spare Error: "
++ "Block Address too big\n");
++ return FAIL;
++ }
++
++ if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
++ printk(KERN_ERR "Read Page Spare Error: "
++ "Page number too big\n");
++ return FAIL;
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Write Page Spare- "
++ "block %u page %u\n",
++ (unsigned int)Block, (unsigned int)Page);
++
++ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
++ printk(KERN_ERR "Run out of memory!\n");
++ return FAIL;
++ }
++
++ memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page] +
++ DeviceInfo.wPageDataSize), write_data,
++ (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Read_Page_Spare
++* Inputs: Write Buffer
++* Address
++* Buffer size
++* Outputs: PASS=0 (notice 0=ok here)
++* Description: Read data from the spare area
++*
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_Read_Page_Spare(u8 *write_data, u32 Block,
++ u16 Page, u16 PageCount)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (Block >= DeviceInfo.wTotalBlocks) {
++ printk(KERN_ERR "Read Page Spare "
++ "Error: Block Address too big\n");
++ return FAIL;
++ }
++
++ if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
++ printk(KERN_ERR "Read Page Spare "
++ "Error: Page number too big\n");
++ return FAIL;
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Read Page Spare- "
++ "block %u page %u\n",
++ (unsigned int)Block, (unsigned int)Page);
++
++ if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
++ memset(write_data, 0xFF,
++ (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
++ } else {
++ memcpy(write_data,
++ (u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]
++ + DeviceInfo.wPageDataSize),
++ (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
++ }
++
++ return PASS;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Enable_Disable_Interrupts
++* Inputs: enable or disable
++* Outputs: none
++* Description: NOP
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++void emu_Enable_Disable_Interrupts(u16 INT_ENABLE)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++}
++
++u16 emu_Get_Bad_Block(u32 block)
++{
++ return 0;
++}
++
++#if CMD_DMA
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Support for CDMA functions
++************************************
++* emu_CDMA_Flash_Init
++* CDMA_process_data command (use LLD_CDMA)
++* CDMA_MemCopy_CMD (use LLD_CDMA)
++* emu_CDMA_execute all commands
++* emu_CDMA_Event_Status
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_CDMA_Flash_Init(void)
++{
++ u16 i;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0; i < MAX_DESCS + MAX_CHANS; i++) {
++ PendingCMD[i].CMD = 0;
++ PendingCMD[i].Tag = 0;
++ PendingCMD[i].DataAddr = 0;
++ PendingCMD[i].Block = 0;
++ PendingCMD[i].Page = 0;
++ PendingCMD[i].PageCount = 0;
++ PendingCMD[i].DataDestAddr = 0;
++ PendingCMD[i].DataSrcAddr = 0;
++ PendingCMD[i].MemCopyByteCnt = 0;
++ PendingCMD[i].ChanSync[0] = 0;
++ PendingCMD[i].ChanSync[1] = 0;
++ PendingCMD[i].ChanSync[2] = 0;
++ PendingCMD[i].ChanSync[3] = 0;
++ PendingCMD[i].ChanSync[4] = 0;
++ PendingCMD[i].Status = 3;
++ }
++
++ return PASS;
++}
++
++static void emu_isr(int irq, void *dev_id)
++{
++ /* TODO: ... */
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: CDMA_Execute_CMDs
++* Inputs: tag_count: the number of pending cmds to do
++* Outputs: PASS/FAIL
++* Description: execute each command in the pending CMD array
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_CDMA_Execute_CMDs(u16 tag_count)
++{
++ u16 i, j;
++ u8 CMD; /* cmd parameter */
++ u8 *data;
++ u32 block;
++ u16 page;
++ u16 count;
++ u16 status = PASS;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ nand_dbg_print(NAND_DBG_TRACE, "At start of Execute CMDs: "
++ "Tag Count %u\n", tag_count);
++
++ for (i = 0; i < totalUsedBanks; i++) {
++ PendingCMD[i].CMD = DUMMY_CMD;
++ PendingCMD[i].Tag = 0xFF;
++ PendingCMD[i].Block =
++ (DeviceInfo.wTotalBlocks / totalUsedBanks) * i;
++
++ for (j = 0; j <= MAX_CHANS; j++)
++ PendingCMD[i].ChanSync[j] = 0;
++ }
++
++ CDMA_Execute_CMDs(tag_count);
++
++ print_pending_cmds(tag_count);
++
++#if DEBUG_SYNC
++ }
++ debug_sync_cnt++;
++#endif
++
++ for (i = MAX_CHANS;
++ i < tag_count + MAX_CHANS; i++) {
++ CMD = PendingCMD[i].CMD;
++ data = PendingCMD[i].DataAddr;
++ block = PendingCMD[i].Block;
++ page = PendingCMD[i].Page;
++ count = PendingCMD[i].PageCount;
++
++ switch (CMD) {
++ case ERASE_CMD:
++ emu_Erase_Block(block);
++ PendingCMD[i].Status = PASS;
++ break;
++ case WRITE_MAIN_CMD:
++ emu_Write_Page_Main(data, block, page, count);
++ PendingCMD[i].Status = PASS;
++ break;
++ case WRITE_MAIN_SPARE_CMD:
++ emu_Write_Page_Main_Spare(data, block, page, count);
++ PendingCMD[i].Status = PASS;
++ break;
++ case READ_MAIN_CMD:
++ emu_Read_Page_Main(data, block, page, count);
++ PendingCMD[i].Status = PASS;
++ break;
++ case MEMCOPY_CMD:
++ memcpy(PendingCMD[i].DataDestAddr,
++ PendingCMD[i].DataSrcAddr,
++ PendingCMD[i].MemCopyByteCnt);
++ case DUMMY_CMD:
++ PendingCMD[i].Status = PASS;
++ break;
++ default:
++ PendingCMD[i].Status = FAIL;
++ break;
++ }
++ }
++
++ /*
++ * Temperory adding code to reset PendingCMD array for basic testing.
++ * It should be done at the end of event status function.
++ */
++ for (i = tag_count + MAX_CHANS; i < MAX_DESCS; i++) {
++ PendingCMD[i].CMD = 0;
++ PendingCMD[i].Tag = 0;
++ PendingCMD[i].DataAddr = 0;
++ PendingCMD[i].Block = 0;
++ PendingCMD[i].Page = 0;
++ PendingCMD[i].PageCount = 0;
++ PendingCMD[i].DataDestAddr = 0;
++ PendingCMD[i].DataSrcAddr = 0;
++ PendingCMD[i].MemCopyByteCnt = 0;
++ PendingCMD[i].ChanSync[0] = 0;
++ PendingCMD[i].ChanSync[1] = 0;
++ PendingCMD[i].ChanSync[2] = 0;
++ PendingCMD[i].ChanSync[3] = 0;
++ PendingCMD[i].ChanSync[4] = 0;
++ PendingCMD[i].Status = CMD_NOT_DONE;
++ }
++
++ nand_dbg_print(NAND_DBG_TRACE, "At end of Execute CMDs.\n");
++
++ emu_isr(0, 0); /* This is a null isr now. Need fill it in future */
++
++ return status;
++}
++
++/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
++* Function: emu_Event_Status
++* Inputs: none
++* Outputs: Event_Status code
++* Description: This function can also be used to force errors
++*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
++u16 emu_CDMA_Event_Status(void)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ return EVENT_PASS;
++}
++
++#endif /* CMD_DMA */
++#endif /* !ELDORA */
++#endif /* FLASH_EMU */
+diff --git a/drivers/block/spectra/lld_emu.h b/drivers/block/spectra/lld_emu.h
+new file mode 100644
+index 0000000..63f84c3
+--- /dev/null
++++ b/drivers/block/spectra/lld_emu.h
+@@ -0,0 +1,51 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef _LLD_EMU_
++#define _LLD_EMU_
++
++#include "ffsport.h"
++#include "ffsdefs.h"
++
++/* prototypes: emulator API functions */
++extern u16 emu_Flash_Reset(void);
++extern u16 emu_Flash_Init(void);
++extern int emu_Flash_Release(void);
++extern u16 emu_Read_Device_ID(void);
++extern u16 emu_Erase_Block(u32 block_addr);
++extern u16 emu_Write_Page_Main(u8 *write_data, u32 Block,
++ u16 Page, u16 PageCount);
++extern u16 emu_Read_Page_Main(u8 *read_data, u32 Block, u16 Page,
++ u16 PageCount);
++extern u16 emu_Event_Status(void);
++extern void emu_Enable_Disable_Interrupts(u16 INT_ENABLE);
++extern u16 emu_Write_Page_Main_Spare(u8 *write_data, u32 Block,
++ u16 Page, u16 PageCount);
++extern u16 emu_Write_Page_Spare(u8 *write_data, u32 Block,
++ u16 Page, u16 PageCount);
++extern u16 emu_Read_Page_Main_Spare(u8 *read_data, u32 Block,
++ u16 Page, u16 PageCount);
++extern u16 emu_Read_Page_Spare(u8 *read_data, u32 Block, u16 Page,
++ u16 PageCount);
++extern u16 emu_Get_Bad_Block(u32 block);
++
++u16 emu_CDMA_Flash_Init(void);
++u16 emu_CDMA_Execute_CMDs(u16 tag_count);
++u16 emu_CDMA_Event_Status(void);
++#endif /*_LLD_EMU_*/
+diff --git a/drivers/block/spectra/lld_nand.c b/drivers/block/spectra/lld_nand.c
+new file mode 100644
+index 0000000..8c279b8
+--- /dev/null
++++ b/drivers/block/spectra/lld_nand.c
+@@ -0,0 +1,2601 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include "lld.h"
++#include "lld_nand.h"
++#include "lld_cdma.h"
++
++#include "spectraswconfig.h"
++#include "flash.h"
++#include "ffsdefs.h"
++
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/wait.h>
++#include <linux/mutex.h>
++
++#include "nand_regs.h"
++
++#define SPECTRA_NAND_NAME "nd"
++
++#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
++#define MAX_PAGES_PER_RW 128
++
++#define INT_IDLE_STATE 0
++#define INT_READ_PAGE_MAIN 0x01
++#define INT_WRITE_PAGE_MAIN 0x02
++#define INT_PIPELINE_READ_AHEAD 0x04
++#define INT_PIPELINE_WRITE_AHEAD 0x08
++#define INT_MULTI_PLANE_READ 0x10
++#define INT_MULTI_PLANE_WRITE 0x11
++
++static u32 enable_ecc;
++
++struct mrst_nand_info info;
++
++int totalUsedBanks;
++u32 GLOB_valid_banks[LLD_MAX_FLASH_BANKS];
++
++void __iomem *FlashReg;
++void __iomem *FlashMem;
++
++u16 conf_parameters[] = {
++ 0x0000,
++ 0x0000,
++ 0x01F4,
++ 0x01F4,
++ 0x01F4,
++ 0x01F4,
++ 0x0000,
++ 0x0000,
++ 0x0001,
++ 0x0000,
++ 0x0000,
++ 0x0000,
++ 0x0000,
++ 0x0040,
++ 0x0001,
++ 0x000A,
++ 0x000A,
++ 0x000A,
++ 0x0000,
++ 0x0000,
++ 0x0005,
++ 0x0012,
++ 0x000C
++};
++
++u16 NAND_Get_Bad_Block(u32 block)
++{
++ u32 status = PASS;
++ u32 flag_bytes = 0;
++ u32 skip_bytes = DeviceInfo.wSpareSkipBytes;
++ u32 page, i;
++ u8 *pReadSpareBuf = buf_get_bad_block;
++
++ if (enable_ecc)
++ flag_bytes = DeviceInfo.wNumPageSpareFlag;
++
++ for (page = 0; page < 2; page++) {
++ status = NAND_Read_Page_Spare(pReadSpareBuf, block, page, 1);
++ if (status != PASS)
++ return READ_ERROR;
++ for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
++ if (pReadSpareBuf[i] != 0xff)
++ return DEFECTIVE_BLOCK;
++ }
++
++ for (page = 1; page < 3; page++) {
++ status = NAND_Read_Page_Spare(pReadSpareBuf, block,
++ DeviceInfo.wPagesPerBlock - page , 1);
++ if (status != PASS)
++ return READ_ERROR;
++ for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
++ if (pReadSpareBuf[i] != 0xff)
++ return DEFECTIVE_BLOCK;
++ }
++
++ return GOOD_BLOCK;
++}
++
++
++u16 NAND_Flash_Reset(void)
++{
++ u32 i;
++ u32 intr_status_rst_comp[4] = {INTR_STATUS0__RST_COMP,
++ INTR_STATUS1__RST_COMP,
++ INTR_STATUS2__RST_COMP,
++ INTR_STATUS3__RST_COMP};
++ u32 intr_status_time_out[4] = {INTR_STATUS0__TIME_OUT,
++ INTR_STATUS1__TIME_OUT,
++ INTR_STATUS2__TIME_OUT,
++ INTR_STATUS3__TIME_OUT};
++ u32 intr_status[4] = {INTR_STATUS0, INTR_STATUS1,
++ INTR_STATUS2, INTR_STATUS3};
++ u32 device_reset_banks[4] = {DEVICE_RESET__BANK0,
++ DEVICE_RESET__BANK1,
++ DEVICE_RESET__BANK2,
++ DEVICE_RESET__BANK3};
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
++ iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
++ FlashReg + intr_status[i]);
++
++ for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
++ iowrite32(device_reset_banks[i], FlashReg + DEVICE_RESET);
++ while (!(ioread32(FlashReg + intr_status[i]) &
++ (intr_status_rst_comp[i] | intr_status_time_out[i])))
++ ;
++ if (ioread32(FlashReg + intr_status[i]) &
++ intr_status_time_out[i])
++ nand_dbg_print(NAND_DBG_WARN,
++ "NAND Reset operation timed out on bank %d\n", i);
++ }
++
++ for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
++ iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
++ FlashReg + intr_status[i]);
++
++ return PASS;
++}
++
++static void NAND_ONFi_Timing_Mode(u16 mode)
++{
++ u16 Trea[6] = {40, 30, 25, 20, 20, 16};
++ u16 Trp[6] = {50, 25, 17, 15, 12, 10};
++ u16 Treh[6] = {30, 15, 15, 10, 10, 7};
++ u16 Trc[6] = {100, 50, 35, 30, 25, 20};
++ u16 Trhoh[6] = {0, 15, 15, 15, 15, 15};
++ u16 Trloh[6] = {0, 0, 0, 0, 5, 5};
++ u16 Tcea[6] = {100, 45, 30, 25, 25, 25};
++ u16 Tadl[6] = {200, 100, 100, 100, 70, 70};
++ u16 Trhw[6] = {200, 100, 100, 100, 100, 100};
++ u16 Trhz[6] = {200, 100, 100, 100, 100, 100};
++ u16 Twhr[6] = {120, 80, 80, 60, 60, 60};
++ u16 Tcs[6] = {70, 35, 25, 25, 20, 15};
++
++ u16 TclsRising = 1;
++ u16 data_invalid_rhoh, data_invalid_rloh, data_invalid;
++ u16 dv_window = 0;
++ u16 en_lo, en_hi;
++ u16 acc_clks;
++ u16 addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ en_lo = CEIL_DIV(Trp[mode], CLK_X);
++ en_hi = CEIL_DIV(Treh[mode], CLK_X);
++
++#if ONFI_BLOOM_TIME
++ if ((en_hi * CLK_X) < (Treh[mode] + 2))
++ en_hi++;
++#endif
++
++ if ((en_lo + en_hi) * CLK_X < Trc[mode])
++ en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
++
++ if ((en_lo + en_hi) < CLK_MULTI)
++ en_lo += CLK_MULTI - en_lo - en_hi;
++
++ while (dv_window < 8) {
++ data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
++
++ data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
++
++ data_invalid =
++ data_invalid_rhoh <
++ data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
++
++ dv_window = data_invalid - Trea[mode];
++
++ if (dv_window < 8)
++ en_lo++;
++ }
++
++ acc_clks = CEIL_DIV(Trea[mode], CLK_X);
++
++ while (((acc_clks * CLK_X) - Trea[mode]) < 3)
++ acc_clks++;
++
++ if ((data_invalid - acc_clks * CLK_X) < 2)
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n",
++ __FILE__, __LINE__);
++
++ addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
++ re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
++ re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
++ we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
++ cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
++ if (!TclsRising)
++ cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
++ if (cs_cnt == 0)
++ cs_cnt = 1;
++
++ if (Tcea[mode]) {
++ while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
++ cs_cnt++;
++ }
++
++#if MODE5_WORKAROUND
++ if (mode == 5)
++ acc_clks = 5;
++#endif
++
++ /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
++ if ((ioread32(FlashReg + MANUFACTURER_ID) == 0) &&
++ (ioread32(FlashReg + DEVICE_ID) == 0x88))
++ acc_clks = 6;
++
++ iowrite32(acc_clks, FlashReg + ACC_CLKS);
++ iowrite32(re_2_we, FlashReg + RE_2_WE);
++ iowrite32(re_2_re, FlashReg + RE_2_RE);
++ iowrite32(we_2_re, FlashReg + WE_2_RE);
++ iowrite32(addr_2_data, FlashReg + ADDR_2_DATA);
++ iowrite32(en_lo, FlashReg + RDWR_EN_LO_CNT);
++ iowrite32(en_hi, FlashReg + RDWR_EN_HI_CNT);
++ iowrite32(cs_cnt, FlashReg + CS_SETUP_CNT);
++}
++
++static void index_addr(u32 address, u32 data)
++{
++ iowrite32(address, FlashMem);
++ iowrite32(data, FlashMem + 0x10);
++}
++
++static void index_addr_read_data(u32 address, u32 *pdata)
++{
++ iowrite32(address, FlashMem);
++ *pdata = ioread32(FlashMem + 0x10);
++}
++
++static void set_ecc_config(void)
++{
++#if SUPPORT_8BITECC
++ if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) < 4096) ||
++ (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) <= 128))
++ iowrite32(8, FlashReg + ECC_CORRECTION);
++#endif
++
++ if ((ioread32(FlashReg + ECC_CORRECTION) & ECC_CORRECTION__VALUE)
++ == 1) {
++ DeviceInfo.wECCBytesPerSector = 4;
++ DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
++ DeviceInfo.wNumPageSpareFlag =
++ DeviceInfo.wPageSpareSize -
++ DeviceInfo.wPageDataSize /
++ (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
++ DeviceInfo.wECCBytesPerSector
++ - DeviceInfo.wSpareSkipBytes;
++ } else {
++ DeviceInfo.wECCBytesPerSector =
++ (ioread32(FlashReg + ECC_CORRECTION) &
++ ECC_CORRECTION__VALUE) * 13 / 8;
++ if ((DeviceInfo.wECCBytesPerSector) % 2 == 0)
++ DeviceInfo.wECCBytesPerSector += 2;
++ else
++ DeviceInfo.wECCBytesPerSector += 1;
++
++ DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
++ DeviceInfo.wNumPageSpareFlag = DeviceInfo.wPageSpareSize -
++ DeviceInfo.wPageDataSize /
++ (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
++ DeviceInfo.wECCBytesPerSector
++ - DeviceInfo.wSpareSkipBytes;
++ }
++}
++
++static u16 get_onfi_nand_para(void)
++{
++ int i;
++ u16 blks_lun_l, blks_lun_h, n_of_luns;
++ u32 blockperlun, id;
++
++ iowrite32(DEVICE_RESET__BANK0, FlashReg + DEVICE_RESET);
++
++ while (!((ioread32(FlashReg + INTR_STATUS0) &
++ INTR_STATUS0__RST_COMP) |
++ (ioread32(FlashReg + INTR_STATUS0) &
++ INTR_STATUS0__TIME_OUT)))
++ ;
++
++ if (ioread32(FlashReg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) {
++ iowrite32(DEVICE_RESET__BANK1, FlashReg + DEVICE_RESET);
++ while (!((ioread32(FlashReg + INTR_STATUS1) &
++ INTR_STATUS1__RST_COMP) |
++ (ioread32(FlashReg + INTR_STATUS1) &
++ INTR_STATUS1__TIME_OUT)))
++ ;
++
++ if (ioread32(FlashReg + INTR_STATUS1) &
++ INTR_STATUS1__RST_COMP) {
++ iowrite32(DEVICE_RESET__BANK2,
++ FlashReg + DEVICE_RESET);
++ while (!((ioread32(FlashReg + INTR_STATUS2) &
++ INTR_STATUS2__RST_COMP) |
++ (ioread32(FlashReg + INTR_STATUS2) &
++ INTR_STATUS2__TIME_OUT)))
++ ;
++
++ if (ioread32(FlashReg + INTR_STATUS2) &
++ INTR_STATUS2__RST_COMP) {
++ iowrite32(DEVICE_RESET__BANK3,
++ FlashReg + DEVICE_RESET);
++ while (!((ioread32(FlashReg + INTR_STATUS3) &
++ INTR_STATUS3__RST_COMP) |
++ (ioread32(FlashReg + INTR_STATUS3) &
++ INTR_STATUS3__TIME_OUT)))
++ ;
++ } else {
++ printk(KERN_ERR "Getting a time out for bank 2!\n");
++ }
++ } else {
++ printk(KERN_ERR "Getting a time out for bank 1!\n");
++ }
++ }
++
++ iowrite32(INTR_STATUS0__TIME_OUT, FlashReg + INTR_STATUS0);
++ iowrite32(INTR_STATUS1__TIME_OUT, FlashReg + INTR_STATUS1);
++ iowrite32(INTR_STATUS2__TIME_OUT, FlashReg + INTR_STATUS2);
++ iowrite32(INTR_STATUS3__TIME_OUT, FlashReg + INTR_STATUS3);
++
++ DeviceInfo.wONFIDevFeatures =
++ ioread32(FlashReg + ONFI_DEVICE_FEATURES);
++ DeviceInfo.wONFIOptCommands =
++ ioread32(FlashReg + ONFI_OPTIONAL_COMMANDS);
++ DeviceInfo.wONFITimingMode =
++ ioread32(FlashReg + ONFI_TIMING_MODE);
++ DeviceInfo.wONFIPgmCacheTimingMode =
++ ioread32(FlashReg + ONFI_PGM_CACHE_TIMING_MODE);
++
++ n_of_luns = ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
++ ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS;
++ blks_lun_l = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L);
++ blks_lun_h = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U);
++
++ blockperlun = (blks_lun_h << 16) | blks_lun_l;
++
++ DeviceInfo.wTotalBlocks = n_of_luns * blockperlun;
++
++ if (!(ioread32(FlashReg + ONFI_TIMING_MODE) &
++ ONFI_TIMING_MODE__VALUE))
++ return FAIL;
++
++ for (i = 5; i > 0; i--) {
++ if (ioread32(FlashReg + ONFI_TIMING_MODE) & (0x01 << i))
++ break;
++ }
++
++ NAND_ONFi_Timing_Mode(i);
++
++ index_addr(MODE_11 | 0, 0x90);
++ index_addr(MODE_11 | 1, 0);
++
++ for (i = 0; i < 3; i++)
++ index_addr_read_data(MODE_11 | 2, &id);
++
++ nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id);
++
++ DeviceInfo.MLCDevice = id & 0x0C;
++
++ /* By now, all the ONFI devices we know support the page cache */
++ /* rw feature. So here we enable the pipeline_rw_ahead feature */
++ /* iowrite32(1, FlashReg + CACHE_WRITE_ENABLE); */
++ /* iowrite32(1, FlashReg + CACHE_READ_ENABLE); */
++
++ return PASS;
++}
++
++static void get_samsung_nand_para(void)
++{
++ u8 no_of_planes;
++ u32 blk_size;
++ u64 plane_size, capacity;
++ u32 id_bytes[5];
++ int i;
++
++ index_addr((u32)(MODE_11 | 0), 0x90);
++ index_addr((u32)(MODE_11 | 1), 0);
++ for (i = 0; i < 5; i++)
++ index_addr_read_data((u32)(MODE_11 | 2), &id_bytes[i]);
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
++ id_bytes[0], id_bytes[1], id_bytes[2],
++ id_bytes[3], id_bytes[4]);
++
++ if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
++ /* Set timing register values according to datasheet */
++ iowrite32(5, FlashReg + ACC_CLKS);
++ iowrite32(20, FlashReg + RE_2_WE);
++ iowrite32(12, FlashReg + WE_2_RE);
++ iowrite32(14, FlashReg + ADDR_2_DATA);
++ iowrite32(3, FlashReg + RDWR_EN_LO_CNT);
++ iowrite32(2, FlashReg + RDWR_EN_HI_CNT);
++ iowrite32(2, FlashReg + CS_SETUP_CNT);
++ }
++
++ no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2);
++ plane_size = (u64)64 << ((id_bytes[4] & 0x70) >> 4);
++ blk_size = 64 << ((ioread32(FlashReg + DEVICE_PARAM_1) & 0x30) >> 4);
++ capacity = (u64)128 * plane_size * no_of_planes;
++
++ DeviceInfo.wTotalBlocks = (u32)GLOB_u64_Div(capacity, blk_size);
++}
++
++static void get_toshiba_nand_para(void)
++{
++ void __iomem *scratch_reg;
++ u32 tmp;
++
++ /* Workaround to fix a controller bug which reports a wrong */
++ /* spare area size for some kind of Toshiba NAND device */
++ if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
++ (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) == 64)) {
++ iowrite32(216, FlashReg + DEVICE_SPARE_AREA_SIZE);
++ tmp = ioread32(FlashReg + DEVICES_CONNECTED) *
++ ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
++ iowrite32(tmp, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
++#if SUPPORT_15BITECC
++ iowrite32(15, FlashReg + ECC_CORRECTION);
++#elif SUPPORT_8BITECC
++ iowrite32(8, FlashReg + ECC_CORRECTION);
++#endif
++ }
++
++ /* As Toshiba NAND can not provide it's block number, */
++ /* so here we need user to provide the correct block */
++ /* number in a scratch register before the Linux NAND */
++ /* driver is loaded. If no valid value found in the scratch */
++ /* register, then we use default block number value */
++ scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
++ if (!scratch_reg) {
++ printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
++ __FILE__, __LINE__);
++ DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
++ } else {
++ nand_dbg_print(NAND_DBG_WARN,
++ "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
++ DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
++ if (DeviceInfo.wTotalBlocks < 512)
++ DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
++ iounmap(scratch_reg);
++ }
++}
++
++static void get_hynix_nand_para(void)
++{
++ void __iomem *scratch_reg;
++ u32 main_size, spare_size;
++
++ switch (DeviceInfo.wDeviceID) {
++ case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
++ case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
++ iowrite32(128, FlashReg + PAGES_PER_BLOCK);
++ iowrite32(4096, FlashReg + DEVICE_MAIN_AREA_SIZE);
++ iowrite32(224, FlashReg + DEVICE_SPARE_AREA_SIZE);
++ main_size = 4096 * ioread32(FlashReg + DEVICES_CONNECTED);
++ spare_size = 224 * ioread32(FlashReg + DEVICES_CONNECTED);
++ iowrite32(main_size, FlashReg + LOGICAL_PAGE_DATA_SIZE);
++ iowrite32(spare_size, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
++ iowrite32(0, FlashReg + DEVICE_WIDTH);
++#if SUPPORT_15BITECC
++ iowrite32(15, FlashReg + ECC_CORRECTION);
++#elif SUPPORT_8BITECC
++ iowrite32(8, FlashReg + ECC_CORRECTION);
++#endif
++ DeviceInfo.MLCDevice = 1;
++ break;
++ default:
++ nand_dbg_print(NAND_DBG_WARN,
++ "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
++ "Will use default parameter values instead.\n",
++ DeviceInfo.wDeviceID);
++ }
++
++ scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
++ if (!scratch_reg) {
++ printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
++ __FILE__, __LINE__);
++ DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
++ } else {
++ nand_dbg_print(NAND_DBG_WARN,
++ "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
++ DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
++ if (DeviceInfo.wTotalBlocks < 512)
++ DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
++ iounmap(scratch_reg);
++ }
++}
++
++static void find_valid_banks(void)
++{
++ u32 id[LLD_MAX_FLASH_BANKS];
++ int i;
++
++ totalUsedBanks = 0;
++ for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
++ index_addr((u32)(MODE_11 | (i << 24) | 0), 0x90);
++ index_addr((u32)(MODE_11 | (i << 24) | 1), 0);
++ index_addr_read_data((u32)(MODE_11 | (i << 24) | 2), &id[i]);
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "Return 1st ID for bank[%d]: %x\n", i, id[i]);
++
++ if (i == 0) {
++ if (id[i] & 0x0ff)
++ GLOB_valid_banks[i] = 1;
++ } else {
++ if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
++ GLOB_valid_banks[i] = 1;
++ }
++
++ totalUsedBanks += GLOB_valid_banks[i];
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "totalUsedBanks: %d\n", totalUsedBanks);
++}
++
++static void detect_partition_feature(void)
++{
++ if (ioread32(FlashReg + FEATURES) & FEATURES__PARTITION) {
++ if ((ioread32(FlashReg + PERM_SRC_ID_1) &
++ PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
++ DeviceInfo.wSpectraStartBlock =
++ ((ioread32(FlashReg + MIN_MAX_BANK_1) &
++ MIN_MAX_BANK_1__MIN_VALUE) *
++ DeviceInfo.wTotalBlocks)
++ +
++ (ioread32(FlashReg + MIN_BLK_ADDR_1) &
++ MIN_BLK_ADDR_1__VALUE);
++
++ DeviceInfo.wSpectraEndBlock =
++ (((ioread32(FlashReg + MIN_MAX_BANK_1) &
++ MIN_MAX_BANK_1__MAX_VALUE) >> 2) *
++ DeviceInfo.wTotalBlocks)
++ +
++ (ioread32(FlashReg + MAX_BLK_ADDR_1) &
++ MAX_BLK_ADDR_1__VALUE);
++
++ DeviceInfo.wTotalBlocks *= totalUsedBanks;
++
++ if (DeviceInfo.wSpectraEndBlock >=
++ DeviceInfo.wTotalBlocks) {
++ DeviceInfo.wSpectraEndBlock =
++ DeviceInfo.wTotalBlocks - 1;
++ }
++
++ DeviceInfo.wDataBlockNum =
++ DeviceInfo.wSpectraEndBlock -
++ DeviceInfo.wSpectraStartBlock + 1;
++ } else {
++ DeviceInfo.wTotalBlocks *= totalUsedBanks;
++ DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
++ DeviceInfo.wSpectraEndBlock =
++ DeviceInfo.wTotalBlocks - 1;
++ DeviceInfo.wDataBlockNum =
++ DeviceInfo.wSpectraEndBlock -
++ DeviceInfo.wSpectraStartBlock + 1;
++ }
++ } else {
++ DeviceInfo.wTotalBlocks *= totalUsedBanks;
++ DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
++ DeviceInfo.wSpectraEndBlock = DeviceInfo.wTotalBlocks - 1;
++ DeviceInfo.wDataBlockNum =
++ DeviceInfo.wSpectraEndBlock -
++ DeviceInfo.wSpectraStartBlock + 1;
++ }
++}
++
++static void dump_device_info(void)
++{
++ nand_dbg_print(NAND_DBG_DEBUG, "DeviceInfo:\n");
++ nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n",
++ DeviceInfo.wDeviceMaker);
++ nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n",
++ DeviceInfo.wDeviceID);
++ nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n",
++ DeviceInfo.wDeviceType);
++ nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n",
++ DeviceInfo.wSpectraStartBlock);
++ nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n",
++ DeviceInfo.wSpectraEndBlock);
++ nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n",
++ DeviceInfo.wTotalBlocks);
++ nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n",
++ DeviceInfo.wPagesPerBlock);
++ nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n",
++ DeviceInfo.wPageSize);
++ nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n",
++ DeviceInfo.wPageDataSize);
++ nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n",
++ DeviceInfo.wPageSpareSize);
++ nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n",
++ DeviceInfo.wNumPageSpareFlag);
++ nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n",
++ DeviceInfo.wECCBytesPerSector);
++ nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n",
++ DeviceInfo.wBlockSize);
++ nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n",
++ DeviceInfo.wBlockDataSize);
++ nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n",
++ DeviceInfo.wDataBlockNum);
++ nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n",
++ DeviceInfo.bPlaneNum);
++ nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n",
++ DeviceInfo.wDeviceMainAreaSize);
++ nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n",
++ DeviceInfo.wDeviceSpareAreaSize);
++ nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n",
++ DeviceInfo.wDevicesConnected);
++ nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n",
++ DeviceInfo.wDeviceWidth);
++ nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n",
++ DeviceInfo.wHWRevision);
++ nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n",
++ DeviceInfo.wHWFeatures);
++ nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n",
++ DeviceInfo.wONFIDevFeatures);
++ nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n",
++ DeviceInfo.wONFIOptCommands);
++ nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n",
++ DeviceInfo.wONFITimingMode);
++ nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n",
++ DeviceInfo.wONFIPgmCacheTimingMode);
++ nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n",
++ DeviceInfo.MLCDevice ? "Yes" : "No");
++ nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n",
++ DeviceInfo.wSpareSkipBytes);
++ nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n",
++ DeviceInfo.nBitsInPageNumber);
++ nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n",
++ DeviceInfo.nBitsInPageDataSize);
++ nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n",
++ DeviceInfo.nBitsInBlockDataSize);
++}
++
++u16 NAND_Read_Device_ID(void)
++{
++ u16 status = PASS;
++ u8 no_of_planes;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ iowrite32(0x02, FlashReg + SPARE_AREA_SKIP_BYTES);
++ iowrite32(0xffff, FlashReg + SPARE_AREA_MARKER);
++ DeviceInfo.wDeviceMaker = ioread32(FlashReg + MANUFACTURER_ID);
++ DeviceInfo.wDeviceID = ioread32(FlashReg + DEVICE_ID);
++ DeviceInfo.MLCDevice = ioread32(FlashReg + DEVICE_PARAM_0) & 0x0c;
++
++ if (ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
++ ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
++ if (FAIL == get_onfi_nand_para())
++ return FAIL;
++ } else if (DeviceInfo.wDeviceMaker == 0xEC) { /* Samsung NAND */
++ get_samsung_nand_para();
++ } else if (DeviceInfo.wDeviceMaker == 0x98) { /* Toshiba NAND */
++ get_toshiba_nand_para();
++ } else if (DeviceInfo.wDeviceMaker == 0xAD) { /* Hynix NAND */
++ get_hynix_nand_para();
++ } else {
++ DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
++ }
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
++ "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
++ "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
++ "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
++ ioread32(FlashReg + ACC_CLKS),
++ ioread32(FlashReg + RE_2_WE),
++ ioread32(FlashReg + WE_2_RE),
++ ioread32(FlashReg + ADDR_2_DATA),
++ ioread32(FlashReg + RDWR_EN_LO_CNT),
++ ioread32(FlashReg + RDWR_EN_HI_CNT),
++ ioread32(FlashReg + CS_SETUP_CNT));
++
++ DeviceInfo.wHWRevision = ioread32(FlashReg + REVISION);
++ DeviceInfo.wHWFeatures = ioread32(FlashReg + FEATURES);
++
++ DeviceInfo.wDeviceMainAreaSize =
++ ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE);
++ DeviceInfo.wDeviceSpareAreaSize =
++ ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
++
++ DeviceInfo.wPageDataSize =
++ ioread32(FlashReg + LOGICAL_PAGE_DATA_SIZE);
++
++ /* Note: When using the Micon 4K NAND device, the controller will report
++ * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
++ * And if force set it to 218 bytes, the controller can not work
++ * correctly. So just let it be. But keep in mind that this bug may
++ * cause
++ * other problems in future. - Yunpeng 2008-10-10
++ */
++ DeviceInfo.wPageSpareSize =
++ ioread32(FlashReg + LOGICAL_PAGE_SPARE_SIZE);
++
++ DeviceInfo.wPagesPerBlock = ioread32(FlashReg + PAGES_PER_BLOCK);
++
++ DeviceInfo.wPageSize =
++ DeviceInfo.wPageDataSize + DeviceInfo.wPageSpareSize;
++ DeviceInfo.wBlockSize =
++ DeviceInfo.wPageSize * DeviceInfo.wPagesPerBlock;
++ DeviceInfo.wBlockDataSize =
++ DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
++
++ DeviceInfo.wDeviceWidth = ioread32(FlashReg + DEVICE_WIDTH);
++ DeviceInfo.wDeviceType =
++ ((ioread32(FlashReg + DEVICE_WIDTH) > 0) ? 16 : 8);
++
++ DeviceInfo.wDevicesConnected = ioread32(FlashReg + DEVICES_CONNECTED);
++
++ DeviceInfo.wSpareSkipBytes =
++ ioread32(FlashReg + SPARE_AREA_SKIP_BYTES) *
++ DeviceInfo.wDevicesConnected;
++
++ DeviceInfo.nBitsInPageNumber =
++ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
++ DeviceInfo.nBitsInPageDataSize =
++ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
++ DeviceInfo.nBitsInBlockDataSize =
++ (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
++
++ set_ecc_config();
++
++ no_of_planes = ioread32(FlashReg + NUMBER_OF_PLANES) &
++ NUMBER_OF_PLANES__VALUE;
++
++ switch (no_of_planes) {
++ case 0:
++ case 1:
++ case 3:
++ case 7:
++ DeviceInfo.bPlaneNum = no_of_planes + 1;
++ break;
++ default:
++ status = FAIL;
++ break;
++ }
++
++ find_valid_banks();
++
++ detect_partition_feature();
++
++ dump_device_info();
++
++ return status;
++}
++
++u16 NAND_UnlockArrayAll(void)
++{
++ u64 start_addr, end_addr;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ start_addr = 0;
++ end_addr = ((u64)DeviceInfo.wBlockSize *
++ (DeviceInfo.wTotalBlocks - 1)) >>
++ DeviceInfo.nBitsInPageDataSize;
++
++ index_addr((u32)(MODE_10 | (u32)start_addr), 0x10);
++ index_addr((u32)(MODE_10 | (u32)end_addr), 0x11);
++
++ return PASS;
++}
++
++void NAND_LLD_Enable_Disable_Interrupts(u16 INT_ENABLE)
++{
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (INT_ENABLE)
++ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE);
++ else
++ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
++}
++
++u16 NAND_Erase_Block(u32 block)
++{
++ u16 status = PASS;
++ u64 flash_add;
++ u16 flash_bank;
++ u32 intr_status = 0;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ * DeviceInfo.wBlockDataSize;
++
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ if (block >= DeviceInfo.wTotalBlocks)
++ status = FAIL;
++
++ if (status == PASS) {
++ intr_status = intr_status_addresses[flash_bank];
++
++ iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
++ FlashReg + intr_status);
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 1);
++
++ while (!(ioread32(FlashReg + intr_status) &
++ (INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL)))
++ ;
++
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ERASE_FAIL)
++ status = FAIL;
++
++ iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
++ FlashReg + intr_status);
++ }
++
++ return status;
++}
++
++static u32 Boundary_Check_Block_Page(u32 block, u16 page,
++ u16 page_count)
++{
++ u32 status = PASS;
++
++ if (block >= DeviceInfo.wTotalBlocks)
++ status = FAIL;
++
++ if (page + page_count > DeviceInfo.wPagesPerBlock)
++ status = FAIL;
++
++ return status;
++}
++
++u16 NAND_Read_Page_Spare(u8 *read_data, u32 block, u16 page,
++ u16 page_count)
++{
++ u32 status = PASS;
++ u32 i;
++ u64 flash_add;
++ u32 PageSpareSize = DeviceInfo.wPageSpareSize;
++ u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
++ u32 flash_bank;
++ u32 intr_status = 0;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ u8 *page_spare = buf_read_page_spare;
++
++ if (block >= DeviceInfo.wTotalBlocks) {
++ printk(KERN_ERR "block too big: %d\n", (int)block);
++ status = FAIL;
++ }
++
++ if (page >= DeviceInfo.wPagesPerBlock) {
++ printk(KERN_ERR "page too big: %d\n", page);
++ status = FAIL;
++ }
++
++ if (page_count > 1) {
++ printk(KERN_ERR "page count too big: %d\n", page_count);
++ status = FAIL;
++ }
++
++ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ * DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ if (status == PASS) {
++ intr_status = intr_status_addresses[flash_bank];
++ iowrite32(ioread32(FlashReg + intr_status),
++ FlashReg + intr_status);
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
++ 0x41);
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
++ 0x2000 | page_count);
++ while (!(ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__LOAD_COMP))
++ ;
++
++ iowrite32((u32)(MODE_01 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
++ FlashMem);
++
++ for (i = 0; i < (PageSpareSize / 4); i++)
++ *((u32 *)page_spare + i) =
++ ioread32(FlashMem + 0x10);
++
++ if (enable_ecc) {
++ for (i = 0; i < spareFlagBytes; i++)
++ read_data[i] =
++ page_spare[PageSpareSize -
++ spareFlagBytes + i];
++ for (i = 0; i < (PageSpareSize - spareFlagBytes); i++)
++ read_data[spareFlagBytes + i] =
++ page_spare[i];
++ } else {
++ for (i = 0; i < PageSpareSize; i++)
++ read_data[i] = page_spare[i];
++ }
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
++ }
++
++ return status;
++}
++
++/* No use function. Should be removed later */
++u16 NAND_Write_Page_Spare(u8 *write_data, u32 block, u16 page,
++ u16 page_count)
++{
++ printk(KERN_ERR
++ "Error! This function (NAND_Write_Page_Spare) should never"
++ " be called!\n");
++ return ERR;
++}
++
++/* op value: 0 - DDMA read; 1 - DDMA write */
++static void ddma_trans(u8 *data, u64 flash_add,
++ u32 flash_bank, int op, u32 numPages)
++{
++ u32 data_addr;
++
++ /* Map virtual address to bus address for DDMA */
++ data_addr = virt_to_bus(data);
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
++ (u16)(2 << 12) | (op << 8) | numPages);
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ ((u16)(0x0FFFF & (data_addr >> 16)) << 8)),
++ (u16)(2 << 12) | (2 << 8) | 0);
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ ((u16)(0x0FFFF & data_addr) << 8)),
++ (u16)(2 << 12) | (3 << 8) | 0);
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (1 << 16) | (0x40 << 8)),
++ (u16)(2 << 12) | (4 << 8) | 0);
++}
++
++/* If data in buf are all 0xff, then return 1; otherwise return 0 */
++static int check_all_1(u8 *buf)
++{
++ int i, j, cnt;
++
++ for (i = 0; i < DeviceInfo.wPageDataSize; i++) {
++ if (buf[i] != 0xff) {
++ cnt = 0;
++ nand_dbg_print(NAND_DBG_WARN,
++ "the first non-0xff data byte is: %d\n", i);
++ for (j = i; j < DeviceInfo.wPageDataSize; j++) {
++ nand_dbg_print(NAND_DBG_WARN, "0x%x ", buf[j]);
++ cnt++;
++ if (cnt > 8)
++ break;
++ }
++ nand_dbg_print(NAND_DBG_WARN, "\n");
++ return 0;
++ }
++ }
++
++ return 1;
++}
++
++static int do_ecc_new(unsigned long bank, u8 *buf,
++ u32 block, u16 page)
++{
++ int status = PASS;
++ u16 err_page = 0;
++ u16 err_byte;
++ u8 err_sect;
++ u8 err_dev;
++ u16 err_fix_info;
++ u16 err_addr;
++ u32 ecc_sect_size;
++ u8 *err_pos;
++ u32 err_page_addr[4] = {ERR_PAGE_ADDR0,
++ ERR_PAGE_ADDR1, ERR_PAGE_ADDR2, ERR_PAGE_ADDR3};
++
++ ecc_sect_size = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
++
++ do {
++ err_page = ioread32(FlashReg + err_page_addr[bank]);
++ err_addr = ioread32(FlashReg + ECC_ERROR_ADDRESS);
++ err_byte = err_addr & ECC_ERROR_ADDRESS__OFFSET;
++ err_sect = ((err_addr & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12);
++ err_fix_info = ioread32(FlashReg + ERR_CORRECTION_INFO);
++ err_dev = ((err_fix_info & ERR_CORRECTION_INFO__DEVICE_NR)
++ >> 8);
++ if (err_fix_info & ERR_CORRECTION_INFO__ERROR_TYPE) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "%s, Line %d Uncorrectable ECC error "
++ "when read block %d page %d."
++ "PTN_INTR register: 0x%x "
++ "err_page: %d, err_sect: %d, err_byte: %d, "
++ "err_dev: %d, ecc_sect_size: %d, "
++ "err_fix_info: 0x%x\n",
++ __FILE__, __LINE__, block, page,
++ ioread32(FlashReg + PTN_INTR),
++ err_page, err_sect, err_byte, err_dev,
++ ecc_sect_size, (u32)err_fix_info);
++
++ if (check_all_1(buf))
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
++ "All 0xff!\n",
++ __FILE__, __LINE__);
++ else
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
++ "Not all 0xff!\n",
++ __FILE__, __LINE__);
++ status = FAIL;
++ } else {
++ nand_dbg_print(NAND_DBG_WARN,
++ "%s, Line %d Found ECC error "
++ "when read block %d page %d."
++ "err_page: %d, err_sect: %d, err_byte: %d, "
++ "err_dev: %d, ecc_sect_size: %d, "
++ "err_fix_info: 0x%x\n",
++ __FILE__, __LINE__, block, page,
++ err_page, err_sect, err_byte, err_dev,
++ ecc_sect_size, (u32)err_fix_info);
++ if (err_byte < ECC_SECTOR_SIZE) {
++ err_pos = buf +
++ (err_page - page) *
++ DeviceInfo.wPageDataSize +
++ err_sect * ecc_sect_size +
++ err_byte *
++ DeviceInfo.wDevicesConnected +
++ err_dev;
++
++ *err_pos ^= err_fix_info &
++ ERR_CORRECTION_INFO__BYTEMASK;
++ }
++ }
++ } while (!(err_fix_info & ERR_CORRECTION_INFO__LAST_ERR_INFO));
++
++ return status;
++}
++
++u16 NAND_Read_Page_Main_Polling(u8 *read_data,
++ u32 block, u16 page, u16 page_count)
++{
++ u32 status = PASS;
++ u64 flash_add;
++ u32 intr_status = 0;
++ u32 flash_bank;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ u8 *read_data_l;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ status = Boundary_Check_Block_Page(block, page, page_count);
++ if (status != PASS)
++ return status;
++
++ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ * DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++
++ intr_status = intr_status_addresses[flash_bank];
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ if (page_count > 1) {
++ read_data_l = read_data;
++ while (page_count > MAX_PAGES_PER_RW) {
++ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
++ status = NAND_Multiplane_Read(read_data_l,
++ block, page, MAX_PAGES_PER_RW);
++ else
++ status = NAND_Pipeline_Read_Ahead_Polling(
++ read_data_l, block, page,
++ MAX_PAGES_PER_RW);
++
++ if (status == FAIL)
++ return status;
++
++ read_data_l += DeviceInfo.wPageDataSize *
++ MAX_PAGES_PER_RW;
++ page_count -= MAX_PAGES_PER_RW;
++ page += MAX_PAGES_PER_RW;
++ }
++ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
++ status = NAND_Multiplane_Read(read_data_l,
++ block, page, page_count);
++ else
++ status = NAND_Pipeline_Read_Ahead_Polling(
++ read_data_l, block, page, page_count);
++
++ return status;
++ }
++
++ iowrite32(1, FlashReg + DMA_ENABLE);
++ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ ddma_trans(read_data, flash_add, flash_bank, 0, 1);
++
++ if (enable_ecc) {
++ while (!(ioread32(FlashReg + intr_status) &
++ (INTR_STATUS0__ECC_TRANSACTION_DONE |
++ INTR_STATUS0__ECC_ERR)))
++ ;
++
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_ERR) {
++ iowrite32(INTR_STATUS0__ECC_ERR,
++ FlashReg + intr_status);
++ status = do_ecc_new(flash_bank, read_data,
++ block, page);
++ }
++
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_TRANSACTION_DONE &
++ INTR_STATUS0__ECC_ERR)
++ iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE |
++ INTR_STATUS0__ECC_ERR,
++ FlashReg + intr_status);
++ else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_TRANSACTION_DONE)
++ iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
++ FlashReg + intr_status);
++ else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_ERR)
++ iowrite32(INTR_STATUS0__ECC_ERR,
++ FlashReg + intr_status);
++ } else {
++ while (!(ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__DMA_CMD_COMP))
++ ;
++ iowrite32(INTR_STATUS0__DMA_CMD_COMP, FlashReg + intr_status);
++ }
++
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ iowrite32(0, FlashReg + DMA_ENABLE);
++ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ return status;
++}
++
++u16 NAND_Pipeline_Read_Ahead_Polling(u8 *read_data,
++ u32 block, u16 page, u16 page_count)
++{
++ u32 status = PASS;
++ u32 NumPages = page_count;
++ u64 flash_add;
++ u32 flash_bank;
++ u32 intr_status = 0;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ u32 ecc_done_OR_dma_comp;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ status = Boundary_Check_Block_Page(block, page, page_count);
++
++ if (page_count < 2)
++ status = FAIL;
++
++ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ *DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ if (status == PASS) {
++ intr_status = intr_status_addresses[flash_bank];
++ iowrite32(ioread32(FlashReg + intr_status),
++ FlashReg + intr_status);
++
++ iowrite32(1, FlashReg + DMA_ENABLE);
++ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
++ ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
++
++ ecc_done_OR_dma_comp = 0;
++ while (1) {
++ if (enable_ecc) {
++ while (!ioread32(FlashReg + intr_status))
++ ;
++
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_ERR) {
++ iowrite32(INTR_STATUS0__ECC_ERR,
++ FlashReg + intr_status);
++ status = do_ecc_new(flash_bank,
++ read_data, block, page);
++ } else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__DMA_CMD_COMP) {
++ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
++ FlashReg + intr_status);
++
++ if (1 == ecc_done_OR_dma_comp)
++ break;
++
++ ecc_done_OR_dma_comp = 1;
++ } else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_TRANSACTION_DONE) {
++ iowrite32(
++ INTR_STATUS0__ECC_TRANSACTION_DONE,
++ FlashReg + intr_status);
++
++ if (1 == ecc_done_OR_dma_comp)
++ break;
++
++ ecc_done_OR_dma_comp = 1;
++ }
++ } else {
++ while (!(ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__DMA_CMD_COMP))
++ ;
++
++ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
++ FlashReg + intr_status);
++ break;
++ }
++
++ iowrite32((~INTR_STATUS0__ECC_ERR) &
++ (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
++ (~INTR_STATUS0__DMA_CMD_COMP),
++ FlashReg + intr_status);
++
++ }
++
++ iowrite32(ioread32(FlashReg + intr_status),
++ FlashReg + intr_status);
++
++ iowrite32(0, FlashReg + DMA_ENABLE);
++
++ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++ }
++ return status;
++}
++
++u16 NAND_Read_Page_Main(u8 *read_data, u32 block, u16 page,
++ u16 page_count)
++{
++ u32 status = PASS;
++ u64 flash_add;
++ u32 intr_status = 0;
++ u32 flash_bank;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ int ret;
++ u8 *read_data_l;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ status = Boundary_Check_Block_Page(block, page, page_count);
++ if (status != PASS)
++ return status;
++
++ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ * DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++
++ intr_status = intr_status_addresses[flash_bank];
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ if (page_count > 1) {
++ read_data_l = read_data;
++ while (page_count > MAX_PAGES_PER_RW) {
++ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
++ status = NAND_Multiplane_Read(read_data_l,
++ block, page, MAX_PAGES_PER_RW);
++ else
++ status = NAND_Pipeline_Read_Ahead(
++ read_data_l, block, page,
++ MAX_PAGES_PER_RW);
++
++ if (status == FAIL)
++ return status;
++
++ read_data_l += DeviceInfo.wPageDataSize *
++ MAX_PAGES_PER_RW;
++ page_count -= MAX_PAGES_PER_RW;
++ page += MAX_PAGES_PER_RW;
++ }
++ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
++ status = NAND_Multiplane_Read(read_data_l,
++ block, page, page_count);
++ else
++ status = NAND_Pipeline_Read_Ahead(
++ read_data_l, block, page, page_count);
++
++ return status;
++ }
++
++ iowrite32(1, FlashReg + DMA_ENABLE);
++ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ /* Fill the mrst_nand_info structure */
++ info.state = INT_READ_PAGE_MAIN;
++ info.read_data = read_data;
++ info.flash_bank = flash_bank;
++ info.block = block;
++ info.page = page;
++ info.ret = PASS;
++
++ ddma_trans(read_data, flash_add, flash_bank, 0, 1);
++
++ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
++
++ ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
++ if (!ret) {
++ printk(KERN_ERR "Wait for completion timeout "
++ "in %s, Line %d\n", __FILE__, __LINE__);
++ status = ERR;
++ } else {
++ status = info.ret;
++ }
++
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ iowrite32(0, FlashReg + DMA_ENABLE);
++ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ return status;
++}
++
++void Conv_Spare_Data_Log2Phy_Format(u8 *data)
++{
++ int i;
++ const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
++ const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
++
++ if (enable_ecc) {
++ for (i = spareFlagBytes - 1; i >= 0; i++)
++ data[PageSpareSize - spareFlagBytes + i] = data[i];
++ }
++}
++
++void Conv_Spare_Data_Phy2Log_Format(u8 *data)
++{
++ int i;
++ const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
++ const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
++
++ if (enable_ecc) {
++ for (i = 0; i < spareFlagBytes; i++)
++ data[i] = data[PageSpareSize - spareFlagBytes + i];
++ }
++}
++
++
++void Conv_Main_Spare_Data_Log2Phy_Format(u8 *data, u16 page_count)
++{
++ const u32 PageSize = DeviceInfo.wPageSize;
++ const u32 PageDataSize = DeviceInfo.wPageDataSize;
++ const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
++ const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
++ const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
++ u32 eccSectorSize;
++ u32 page_offset;
++ int i, j;
++
++ eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
++ if (enable_ecc) {
++ while (page_count > 0) {
++ page_offset = (page_count - 1) * PageSize;
++ j = (DeviceInfo.wPageDataSize / eccSectorSize);
++ for (i = spareFlagBytes - 1; i >= 0; i--)
++ data[page_offset +
++ (eccSectorSize + eccBytes) * j + i] =
++ data[page_offset + PageDataSize + i];
++ for (j--; j >= 1; j--) {
++ for (i = eccSectorSize - 1; i >= 0; i--)
++ data[page_offset +
++ (eccSectorSize + eccBytes) * j + i] =
++ data[page_offset +
++ eccSectorSize * j + i];
++ }
++ for (i = (PageSize - spareSkipBytes) - 1;
++ i >= PageDataSize; i--)
++ data[page_offset + i + spareSkipBytes] =
++ data[page_offset + i];
++ page_count--;
++ }
++ }
++}
++
++void Conv_Main_Spare_Data_Phy2Log_Format(u8 *data, u16 page_count)
++{
++ const u32 PageSize = DeviceInfo.wPageSize;
++ const u32 PageDataSize = DeviceInfo.wPageDataSize;
++ const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
++ const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
++ const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
++ u32 eccSectorSize;
++ u32 page_offset;
++ int i, j;
++
++ eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
++ if (enable_ecc) {
++ while (page_count > 0) {
++ page_offset = (page_count - 1) * PageSize;
++ for (i = PageDataSize;
++ i < PageSize - spareSkipBytes;
++ i++)
++ data[page_offset + i] =
++ data[page_offset + i +
++ spareSkipBytes];
++ for (j = 1;
++ j < DeviceInfo.wPageDataSize / eccSectorSize;
++ j++) {
++ for (i = 0; i < eccSectorSize; i++)
++ data[page_offset +
++ eccSectorSize * j + i] =
++ data[page_offset +
++ (eccSectorSize + eccBytes) * j
++ + i];
++ }
++ for (i = 0; i < spareFlagBytes; i++)
++ data[page_offset + PageDataSize + i] =
++ data[page_offset +
++ (eccSectorSize + eccBytes) * j + i];
++ page_count--;
++ }
++ }
++}
++
++/* Un-tested function */
++u16 NAND_Multiplane_Read(u8 *read_data, u32 block, u16 page,
++ u16 page_count)
++{
++ u32 status = PASS;
++ u32 NumPages = page_count;
++ u64 flash_add;
++ u32 flash_bank;
++ u32 intr_status = 0;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ u32 ecc_done_OR_dma_comp;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ status = Boundary_Check_Block_Page(block, page, page_count);
++
++ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ * DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ if (status == PASS) {
++ intr_status = intr_status_addresses[flash_bank];
++ iowrite32(ioread32(FlashReg + intr_status),
++ FlashReg + intr_status);
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++ iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
++
++ iowrite32(1, FlashReg + DMA_ENABLE);
++ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
++ ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
++
++ ecc_done_OR_dma_comp = 0;
++ while (1) {
++ if (enable_ecc) {
++ while (!ioread32(FlashReg + intr_status))
++ ;
++
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_ERR) {
++ iowrite32(INTR_STATUS0__ECC_ERR,
++ FlashReg + intr_status);
++ status = do_ecc_new(flash_bank,
++ read_data, block, page);
++ } else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__DMA_CMD_COMP) {
++ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
++ FlashReg + intr_status);
++
++ if (1 == ecc_done_OR_dma_comp)
++ break;
++
++ ecc_done_OR_dma_comp = 1;
++ } else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_TRANSACTION_DONE) {
++ iowrite32(
++ INTR_STATUS0__ECC_TRANSACTION_DONE,
++ FlashReg + intr_status);
++
++ if (1 == ecc_done_OR_dma_comp)
++ break;
++
++ ecc_done_OR_dma_comp = 1;
++ }
++ } else {
++ while (!(ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__DMA_CMD_COMP))
++ ;
++ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
++ FlashReg + intr_status);
++ break;
++ }
++
++ iowrite32((~INTR_STATUS0__ECC_ERR) &
++ (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
++ (~INTR_STATUS0__DMA_CMD_COMP),
++ FlashReg + intr_status);
++
++ }
++
++ iowrite32(ioread32(FlashReg + intr_status),
++ FlashReg + intr_status);
++
++ iowrite32(0, FlashReg + DMA_ENABLE);
++
++ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
++ }
++
++ return status;
++}
++
++u16 NAND_Pipeline_Read_Ahead(u8 *read_data, u32 block,
++ u16 page, u16 page_count)
++{
++ u32 status = PASS;
++ u32 NumPages = page_count;
++ u64 flash_add;
++ u32 flash_bank;
++ u32 intr_status = 0;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ int ret;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ status = Boundary_Check_Block_Page(block, page, page_count);
++
++ if (page_count < 2)
++ status = FAIL;
++
++ if (status != PASS)
++ return status;
++
++ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ *DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ intr_status = intr_status_addresses[flash_bank];
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ iowrite32(1, FlashReg + DMA_ENABLE);
++ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++
++ /* Fill the mrst_nand_info structure */
++ info.state = INT_PIPELINE_READ_AHEAD;
++ info.read_data = read_data;
++ info.flash_bank = flash_bank;
++ info.block = block;
++ info.page = page;
++ info.ret = PASS;
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
++
++ ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
++
++ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
++
++ ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
++ if (!ret) {
++ printk(KERN_ERR "Wait for completion timeout "
++ "in %s, Line %d\n", __FILE__, __LINE__);
++ status = ERR;
++ } else {
++ status = info.ret;
++ }
++
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ iowrite32(0, FlashReg + DMA_ENABLE);
++
++ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ return status;
++}
++
++
++u16 NAND_Write_Page_Main(u8 *write_data, u32 block, u16 page,
++ u16 page_count)
++{
++ u32 status = PASS;
++ u64 flash_add;
++ u32 intr_status = 0;
++ u32 flash_bank;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ int ret;
++ u8 *write_data_l;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ status = Boundary_Check_Block_Page(block, page, page_count);
++ if (status != PASS)
++ return status;
++
++ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ * DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ intr_status = intr_status_addresses[flash_bank];
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++
++ iowrite32(INTR_STATUS0__PROGRAM_COMP |
++ INTR_STATUS0__PROGRAM_FAIL, FlashReg + intr_status);
++
++ if (page_count > 1) {
++ write_data_l = write_data;
++ while (page_count > MAX_PAGES_PER_RW) {
++ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
++ status = NAND_Multiplane_Write(write_data_l,
++ block, page, MAX_PAGES_PER_RW);
++ else
++ status = NAND_Pipeline_Write_Ahead(
++ write_data_l, block, page,
++ MAX_PAGES_PER_RW);
++ if (status == FAIL)
++ return status;
++
++ write_data_l += DeviceInfo.wPageDataSize *
++ MAX_PAGES_PER_RW;
++ page_count -= MAX_PAGES_PER_RW;
++ page += MAX_PAGES_PER_RW;
++ }
++ if (ioread32(FlashReg + MULTIPLANE_OPERATION))
++ status = NAND_Multiplane_Write(write_data_l,
++ block, page, page_count);
++ else
++ status = NAND_Pipeline_Write_Ahead(write_data_l,
++ block, page, page_count);
++
++ return status;
++ }
++
++ iowrite32(1, FlashReg + DMA_ENABLE);
++ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ /* Fill the mrst_nand_info structure */
++ info.state = INT_WRITE_PAGE_MAIN;
++ info.write_data = write_data;
++ info.flash_bank = flash_bank;
++ info.block = block;
++ info.page = page;
++ info.ret = PASS;
++
++ ddma_trans(write_data, flash_add, flash_bank, 1, 1);
++
++ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
++
++ ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
++ if (!ret) {
++ printk(KERN_ERR "Wait for completion timeout "
++ "in %s, Line %d\n", __FILE__, __LINE__);
++ status = ERR;
++ } else {
++ status = info.ret;
++ }
++
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ iowrite32(0, FlashReg + DMA_ENABLE);
++ while (ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG)
++ ;
++
++ return status;
++}
++
++void NAND_ECC_Ctrl(int enable)
++{
++ if (enable) {
++ nand_dbg_print(NAND_DBG_WARN,
++ "Will enable ECC in %s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++ iowrite32(1, FlashReg + ECC_ENABLE);
++ enable_ecc = 1;
++ } else {
++ nand_dbg_print(NAND_DBG_WARN,
++ "Will disable ECC in %s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++ iowrite32(0, FlashReg + ECC_ENABLE);
++ enable_ecc = 0;
++ }
++}
++
++u16 NAND_Write_Page_Main_Spare(u8 *write_data, u32 block,
++ u16 page, u16 page_count)
++{
++ u32 status = PASS;
++ u32 i, j, page_num = 0;
++ u32 PageSize = DeviceInfo.wPageSize;
++ u32 PageDataSize = DeviceInfo.wPageDataSize;
++ u32 eccBytes = DeviceInfo.wECCBytesPerSector;
++ u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
++ u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
++ u64 flash_add;
++ u32 eccSectorSize;
++ u32 flash_bank;
++ u32 intr_status = 0;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ u8 *page_main_spare = buf_write_page_main_spare;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
++
++ status = Boundary_Check_Block_Page(block, page, page_count);
++
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ if (status == PASS) {
++ intr_status = intr_status_addresses[flash_bank];
++
++ iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
++
++ while ((status != FAIL) && (page_count > 0)) {
++ flash_add = (u64)(block %
++ (DeviceInfo.wTotalBlocks / totalUsedBanks)) *
++ DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++
++ iowrite32(ioread32(FlashReg + intr_status),
++ FlashReg + intr_status);
++
++ iowrite32((u32)(MODE_01 | (flash_bank << 24) |
++ (flash_add >>
++ DeviceInfo.nBitsInPageDataSize)),
++ FlashMem);
++
++ if (enable_ecc) {
++ for (j = 0;
++ j <
++ DeviceInfo.wPageDataSize / eccSectorSize;
++ j++) {
++ for (i = 0; i < eccSectorSize; i++)
++ page_main_spare[(eccSectorSize +
++ eccBytes) * j +
++ i] =
++ write_data[eccSectorSize *
++ j + i];
++
++ for (i = 0; i < eccBytes; i++)
++ page_main_spare[(eccSectorSize +
++ eccBytes) * j +
++ eccSectorSize +
++ i] =
++ write_data[PageDataSize +
++ spareFlagBytes +
++ eccBytes * j +
++ i];
++ }
++
++ for (i = 0; i < spareFlagBytes; i++)
++ page_main_spare[(eccSectorSize +
++ eccBytes) * j + i] =
++ write_data[PageDataSize + i];
++
++ for (i = PageSize - 1; i >= PageDataSize +
++ spareSkipBytes; i--)
++ page_main_spare[i] = page_main_spare[i -
++ spareSkipBytes];
++
++ for (i = PageDataSize; i < PageDataSize +
++ spareSkipBytes; i++)
++ page_main_spare[i] = 0xff;
++
++ for (i = 0; i < PageSize / 4; i++)
++ iowrite32(
++ *((u32 *)page_main_spare + i),
++ FlashMem + 0x10);
++ } else {
++
++ for (i = 0; i < PageSize / 4; i++)
++ iowrite32(*((u32 *)write_data + i),
++ FlashMem + 0x10);
++ }
++
++ while (!(ioread32(FlashReg + intr_status) &
++ (INTR_STATUS0__PROGRAM_COMP |
++ INTR_STATUS0__PROGRAM_FAIL)))
++ ;
++
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__PROGRAM_FAIL)
++ status = FAIL;
++
++ iowrite32(ioread32(FlashReg + intr_status),
++ FlashReg + intr_status);
++
++ page_num++;
++ page_count--;
++ write_data += PageSize;
++ }
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++ }
++
++ return status;
++}
++
++u16 NAND_Read_Page_Main_Spare(u8 *read_data, u32 block, u16 page,
++ u16 page_count)
++{
++ u32 status = PASS;
++ u32 i, j;
++ u64 flash_add = 0;
++ u32 PageSize = DeviceInfo.wPageSize;
++ u32 PageDataSize = DeviceInfo.wPageDataSize;
++ u32 PageSpareSize = DeviceInfo.wPageSpareSize;
++ u32 eccBytes = DeviceInfo.wECCBytesPerSector;
++ u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
++ u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
++ u32 eccSectorSize;
++ u32 flash_bank;
++ u32 intr_status = 0;
++ u8 *read_data_l = read_data;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ u8 *page_main_spare = buf_read_page_main_spare;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
++
++ status = Boundary_Check_Block_Page(block, page, page_count);
++
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ if (status == PASS) {
++ intr_status = intr_status_addresses[flash_bank];
++
++ iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
++
++ iowrite32(ioread32(FlashReg + intr_status),
++ FlashReg + intr_status);
++
++ while ((status != FAIL) && (page_count > 0)) {
++ flash_add = (u64)(block %
++ (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ * DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
++ 0x43);
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)),
++ 0x2000 | page_count);
++
++ while (!(ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__LOAD_COMP))
++ ;
++
++ iowrite32((u32)(MODE_01 | (flash_bank << 24) |
++ (flash_add >>
++ DeviceInfo.nBitsInPageDataSize)),
++ FlashMem);
++
++ for (i = 0; i < PageSize / 4; i++)
++ *(((u32 *)page_main_spare) + i) =
++ ioread32(FlashMem + 0x10);
++
++ if (enable_ecc) {
++ for (i = PageDataSize; i < PageSize -
++ spareSkipBytes; i++)
++ page_main_spare[i] = page_main_spare[i +
++ spareSkipBytes];
++
++ for (j = 0;
++ j < DeviceInfo.wPageDataSize / eccSectorSize;
++ j++) {
++
++ for (i = 0; i < eccSectorSize; i++)
++ read_data_l[eccSectorSize * j +
++ i] =
++ page_main_spare[
++ (eccSectorSize +
++ eccBytes) * j + i];
++
++ for (i = 0; i < eccBytes; i++)
++ read_data_l[PageDataSize +
++ spareFlagBytes +
++ eccBytes * j + i] =
++ page_main_spare[
++ (eccSectorSize +
++ eccBytes) * j +
++ eccSectorSize + i];
++ }
++
++ for (i = 0; i < spareFlagBytes; i++)
++ read_data_l[PageDataSize + i] =
++ page_main_spare[(eccSectorSize +
++ eccBytes) * j + i];
++ } else {
++ for (i = 0; i < (PageDataSize + PageSpareSize);
++ i++)
++ read_data_l[i] = page_main_spare[i];
++
++ }
++
++ if (enable_ecc) {
++ while (!(ioread32(FlashReg + intr_status) &
++ (INTR_STATUS0__ECC_TRANSACTION_DONE |
++ INTR_STATUS0__ECC_ERR)))
++ ;
++
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_ERR) {
++ iowrite32(INTR_STATUS0__ECC_ERR,
++ FlashReg + intr_status);
++ status = do_ecc_new(flash_bank,
++ read_data, block, page);
++ }
++
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_TRANSACTION_DONE &
++ INTR_STATUS0__ECC_ERR) {
++ iowrite32(INTR_STATUS0__ECC_ERR |
++ INTR_STATUS0__ECC_TRANSACTION_DONE,
++ FlashReg + intr_status);
++ } else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_TRANSACTION_DONE) {
++ iowrite32(
++ INTR_STATUS0__ECC_TRANSACTION_DONE,
++ FlashReg + intr_status);
++ } else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_ERR) {
++ iowrite32(INTR_STATUS0__ECC_ERR,
++ FlashReg + intr_status);
++ }
++ }
++
++ page++;
++ page_count--;
++ read_data_l += PageSize;
++ }
++ }
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
++
++ return status;
++}
++
++u16 NAND_Pipeline_Write_Ahead(u8 *write_data, u32 block,
++ u16 page, u16 page_count)
++{
++ u16 status = PASS;
++ u32 NumPages = page_count;
++ u64 flash_add;
++ u32 flash_bank;
++ u32 intr_status = 0;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ int ret;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ status = Boundary_Check_Block_Page(block, page, page_count);
++
++ if (page_count < 2)
++ status = FAIL;
++
++ if (status != PASS)
++ return status;
++
++ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ * DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ intr_status = intr_status_addresses[flash_bank];
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ iowrite32(1, FlashReg + DMA_ENABLE);
++ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++
++ /* Fill the mrst_nand_info structure */
++ info.state = INT_PIPELINE_WRITE_AHEAD;
++ info.write_data = write_data;
++ info.flash_bank = flash_bank;
++ info.block = block;
++ info.page = page;
++ info.ret = PASS;
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
++
++ ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
++
++ iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
++
++ ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
++ if (!ret) {
++ printk(KERN_ERR "Wait for completion timeout "
++ "in %s, Line %d\n", __FILE__, __LINE__);
++ status = ERR;
++ } else {
++ status = info.ret;
++ }
++
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ iowrite32(0, FlashReg + DMA_ENABLE);
++ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ return status;
++}
++
++/* Un-tested function */
++u16 NAND_Multiplane_Write(u8 *write_data, u32 block, u16 page,
++ u16 page_count)
++{
++ u16 status = PASS;
++ u32 NumPages = page_count;
++ u64 flash_add;
++ u32 flash_bank;
++ u32 intr_status = 0;
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ u16 status2 = PASS;
++ u32 t;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ status = Boundary_Check_Block_Page(block, page, page_count);
++ if (status != PASS)
++ return status;
++
++ flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
++ * DeviceInfo.wBlockDataSize +
++ (u64)page * DeviceInfo.wPageDataSize;
++
++ flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
++
++ intr_status = intr_status_addresses[flash_bank];
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++ iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
++
++ iowrite32(1, FlashReg + DMA_ENABLE);
++ while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
++
++ index_addr((u32)(MODE_10 | (flash_bank << 24) |
++ (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
++
++ ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
++
++ while (1) {
++ while (!ioread32(FlashReg + intr_status))
++ ;
++
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__DMA_CMD_COMP) {
++ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
++ FlashReg + intr_status);
++ status = PASS;
++ if (status2 == FAIL)
++ status = FAIL;
++ break;
++ } else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__PROGRAM_FAIL) {
++ status2 = FAIL;
++ status = FAIL;
++ t = ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__PROGRAM_FAIL;
++ iowrite32(t, FlashReg + intr_status);
++ } else {
++ iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
++ (~INTR_STATUS0__DMA_CMD_COMP),
++ FlashReg + intr_status);
++ }
++ }
++
++ iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
++
++ iowrite32(0, FlashReg + DMA_ENABLE);
++
++ while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
++ ;
++
++ iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
++
++ return status;
++}
++
++
++#if CMD_DMA
++static irqreturn_t cdma_isr(int irq, void *dev_id)
++{
++ struct mrst_nand_info *dev = dev_id;
++ int first_failed_cmd;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ if (!is_cdma_interrupt())
++ return IRQ_NONE;
++
++ /* Disable controller interrupts */
++ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
++ GLOB_FTL_Event_Status(&first_failed_cmd);
++ complete(&dev->complete);
++
++ return IRQ_HANDLED;
++}
++#else
++static void handle_nand_int_read(struct mrst_nand_info *dev)
++{
++ u32 intr_status_addresses[4] = {INTR_STATUS0,
++ INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
++ u32 intr_status;
++ u32 ecc_done_OR_dma_comp = 0;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ dev->ret = PASS;
++ intr_status = intr_status_addresses[dev->flash_bank];
++
++ while (1) {
++ if (enable_ecc) {
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_ERR) {
++ iowrite32(INTR_STATUS0__ECC_ERR,
++ FlashReg + intr_status);
++ dev->ret = do_ecc_new(dev->flash_bank,
++ dev->read_data,
++ dev->block, dev->page);
++ } else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__DMA_CMD_COMP) {
++ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
++ FlashReg + intr_status);
++ if (1 == ecc_done_OR_dma_comp)
++ break;
++ ecc_done_OR_dma_comp = 1;
++ } else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__ECC_TRANSACTION_DONE) {
++ iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
++ FlashReg + intr_status);
++ if (1 == ecc_done_OR_dma_comp)
++ break;
++ ecc_done_OR_dma_comp = 1;
++ }
++ } else {
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__DMA_CMD_COMP) {
++ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
++ FlashReg + intr_status);
++ break;
++ } else {
++ printk(KERN_ERR "Illegal INTS "
++ "(offset addr 0x%x) value: 0x%x\n",
++ intr_status,
++ ioread32(FlashReg + intr_status));
++ }
++ }
++
++ iowrite32((~INTR_STATUS0__ECC_ERR) &
++ (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
++ (~INTR_STATUS0__DMA_CMD_COMP),
++ FlashReg + intr_status);
++ }
++}
++
++static void handle_nand_int_write(struct mrst_nand_info *dev)
++{
++ u32 intr_status;
++ u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
++ INTR_STATUS2, INTR_STATUS3};
++ int status = PASS;
++
++ nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ dev->ret = PASS;
++ intr_status = intr[dev->flash_bank];
++
++ while (1) {
++ while (!ioread32(FlashReg + intr_status))
++ ;
++
++ if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__DMA_CMD_COMP) {
++ iowrite32(INTR_STATUS0__DMA_CMD_COMP,
++ FlashReg + intr_status);
++ if (FAIL == status)
++ dev->ret = FAIL;
++ break;
++ } else if (ioread32(FlashReg + intr_status) &
++ INTR_STATUS0__PROGRAM_FAIL) {
++ status = FAIL;
++ iowrite32(INTR_STATUS0__PROGRAM_FAIL,
++ FlashReg + intr_status);
++ } else {
++ iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
++ (~INTR_STATUS0__DMA_CMD_COMP),
++ FlashReg + intr_status);
++ }
++ }
++}
++
++static irqreturn_t ddma_isr(int irq, void *dev_id)
++{
++ struct mrst_nand_info *dev = dev_id;
++ u32 int_mask, ints0, ints1, ints2, ints3, ints_offset;
++ u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
++ INTR_STATUS2, INTR_STATUS3};
++
++ int_mask = INTR_STATUS0__DMA_CMD_COMP |
++ INTR_STATUS0__ECC_TRANSACTION_DONE |
++ INTR_STATUS0__ECC_ERR |
++ INTR_STATUS0__PROGRAM_FAIL |
++ INTR_STATUS0__ERASE_FAIL;
++
++ ints0 = ioread32(FlashReg + INTR_STATUS0);
++ ints1 = ioread32(FlashReg + INTR_STATUS1);
++ ints2 = ioread32(FlashReg + INTR_STATUS2);
++ ints3 = ioread32(FlashReg + INTR_STATUS3);
++
++ ints_offset = intr[dev->flash_bank];
++
++ nand_dbg_print(NAND_DBG_DEBUG,
++ "INTR0: 0x%x, INTR1: 0x%x, INTR2: 0x%x, INTR3: 0x%x, "
++ "DMA_INTR: 0x%x, "
++ "dev->state: 0x%x, dev->flash_bank: %d\n",
++ ints0, ints1, ints2, ints3,
++ ioread32(FlashReg + DMA_INTR),
++ dev->state, dev->flash_bank);
++
++ if (!(ioread32(FlashReg + ints_offset) & int_mask)) {
++ iowrite32(ints0, FlashReg + INTR_STATUS0);
++ iowrite32(ints1, FlashReg + INTR_STATUS1);
++ iowrite32(ints2, FlashReg + INTR_STATUS2);
++ iowrite32(ints3, FlashReg + INTR_STATUS3);
++ nand_dbg_print(NAND_DBG_WARN,
++ "ddma_isr: Invalid interrupt for NAND controller. "
++ "Ignore it\n");
++ return IRQ_NONE;
++ }
++
++ switch (dev->state) {
++ case INT_READ_PAGE_MAIN:
++ case INT_PIPELINE_READ_AHEAD:
++ /* Disable controller interrupts */
++ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
++ handle_nand_int_read(dev);
++ break;
++ case INT_WRITE_PAGE_MAIN:
++ case INT_PIPELINE_WRITE_AHEAD:
++ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
++ handle_nand_int_write(dev);
++ break;
++ default:
++ printk(KERN_ERR "ddma_isr - Illegal state: 0x%x\n",
++ dev->state);
++ return IRQ_NONE;
++ }
++
++ dev->state = INT_IDLE_STATE;
++ complete(&dev->complete);
++ return IRQ_HANDLED;
++}
++#endif
++
++static const struct pci_device_id nand_pci_ids[] = {
++ {
++ .vendor = 0x8086,
++ .device = 0x0809,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ },
++ { /* end: all zeroes */ }
++};
++
++static int nand_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
++{
++ int ret = -ENODEV;
++ unsigned long csr_base;
++ unsigned long csr_len;
++ struct mrst_nand_info *pndev = &info;
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ ret = pci_enable_device(dev);
++ if (ret) {
++ printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
++ return ret;
++ }
++
++ pci_set_master(dev);
++ pndev->dev = dev;
++
++ csr_base = pci_resource_start(dev, 0);
++ if (!csr_base) {
++ printk(KERN_ERR "Spectra: pci_resource_start failed!\n");
++ return -ENODEV;
++ }
++
++ csr_len = pci_resource_len(dev, 0);
++ if (!csr_len) {
++ printk(KERN_ERR "Spectra: pci_resource_len failed!\n");
++ return -ENODEV;
++ }
++
++ ret = pci_request_regions(dev, SPECTRA_NAND_NAME);
++ if (ret) {
++ printk(KERN_ERR "Spectra: Unable to request "
++ "memory region\n");
++ goto failed_req_csr;
++ }
++
++ pndev->ioaddr = ioremap_nocache(csr_base, csr_len);
++ if (!pndev->ioaddr) {
++ printk(KERN_ERR "Spectra: Unable to remap memory region\n");
++ ret = -ENOMEM;
++ goto failed_remap_csr;
++ }
++ nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08lx -> 0x%p (0x%lx)\n",
++ csr_base, pndev->ioaddr, csr_len);
++
++ init_completion(&pndev->complete);
++ nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
++
++#if CMD_DMA
++ if (request_irq(dev->irq, cdma_isr, IRQF_SHARED,
++ SPECTRA_NAND_NAME, &info)) {
++ printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
++ ret = -ENODEV;
++ iounmap(pndev->ioaddr);
++ goto failed_remap_csr;
++ }
++#else
++ if (request_irq(dev->irq, ddma_isr, IRQF_SHARED,
++ SPECTRA_NAND_NAME, &info)) {
++ printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
++ ret = -ENODEV;
++ iounmap(pndev->ioaddr);
++ goto failed_remap_csr;
++ }
++#endif
++
++ pci_set_drvdata(dev, pndev);
++
++ return 0;
++
++failed_remap_csr:
++ pci_release_regions(dev);
++failed_req_csr:
++
++ return ret;
++}
++
++static void nand_pci_remove(struct pci_dev *dev)
++{
++ struct mrst_nand_info *pndev = pci_get_drvdata(dev);
++
++ nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++#if CMD_DMA
++ free_irq(dev->irq, pndev);
++#endif
++ iounmap(pndev->ioaddr);
++ pci_release_regions(dev);
++ pci_disable_device(dev);
++}
++
++MODULE_DEVICE_TABLE(pci, nand_pci_ids);
++
++static struct pci_driver nand_pci_driver = {
++ .name = SPECTRA_NAND_NAME,
++ .id_table = nand_pci_ids,
++ .probe = nand_pci_probe,
++ .remove = nand_pci_remove,
++};
++
++int NAND_Flash_Init(void)
++{
++ int retval;
++ u32 int_mask;
++
++ nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
++ __FILE__, __LINE__, __func__);
++
++ FlashReg = ioremap_nocache(GLOB_HWCTL_REG_BASE,
++ GLOB_HWCTL_REG_SIZE);
++ if (!FlashReg) {
++ printk(KERN_ERR "Spectra: ioremap_nocache failed!");
++ return -ENOMEM;
++ }
++ nand_dbg_print(NAND_DBG_WARN,
++ "Spectra: Remapped reg base address: "
++ "0x%p, len: %d\n",
++ FlashReg, GLOB_HWCTL_REG_SIZE);
++
++ FlashMem = ioremap_nocache(GLOB_HWCTL_MEM_BASE,
++ GLOB_HWCTL_MEM_SIZE);
++ if (!FlashMem) {
++ printk(KERN_ERR "Spectra: ioremap_nocache failed!");
++ iounmap(FlashReg);
++ return -ENOMEM;
++ }
++ nand_dbg_print(NAND_DBG_WARN,
++ "Spectra: Remapped flash base address: "
++ "0x%p, len: %d\n",
++ (void *)FlashMem, GLOB_HWCTL_MEM_SIZE);
++
++ nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
++ "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
++ "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
++ "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
++ ioread32(FlashReg + ACC_CLKS),
++ ioread32(FlashReg + RE_2_WE),
++ ioread32(FlashReg + WE_2_RE),
++ ioread32(FlashReg + ADDR_2_DATA),
++ ioread32(FlashReg + RDWR_EN_LO_CNT),
++ ioread32(FlashReg + RDWR_EN_HI_CNT),
++ ioread32(FlashReg + CS_SETUP_CNT));
++
++ NAND_Flash_Reset();
++
++ iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
++
++#if CMD_DMA
++ info.pcmds_num = 0;
++ info.flash_bank = 0;
++ info.cdma_num = 0;
++ int_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
++ DMA_INTR__DESC_COMP_CHANNEL1 |
++ DMA_INTR__DESC_COMP_CHANNEL2 |
++ DMA_INTR__DESC_COMP_CHANNEL3 |
++ DMA_INTR__MEMCOPY_DESC_COMP);
++ iowrite32(int_mask, FlashReg + DMA_INTR_EN);
++ iowrite32(0xFFFF, FlashReg + DMA_INTR);
++
++ int_mask = (INTR_STATUS0__ECC_ERR |
++ INTR_STATUS0__PROGRAM_FAIL |
++ INTR_STATUS0__ERASE_FAIL);
++#else
++ int_mask = INTR_STATUS0__DMA_CMD_COMP |
++ INTR_STATUS0__ECC_TRANSACTION_DONE |
++ INTR_STATUS0__ECC_ERR |
++ INTR_STATUS0__PROGRAM_FAIL |
++ INTR_STATUS0__ERASE_FAIL;
++#endif
++ iowrite32(int_mask, FlashReg + INTR_EN0);
++ iowrite32(int_mask, FlashReg + INTR_EN1);
++ iowrite32(int_mask, FlashReg + INTR_EN2);
++ iowrite32(int_mask, FlashReg + INTR_EN3);
++
++ /* Clear all status bits */
++ iowrite32(0xFFFF, FlashReg + INTR_STATUS0);
++ iowrite32(0xFFFF, FlashReg + INTR_STATUS1);
++ iowrite32(0xFFFF, FlashReg + INTR_STATUS2);
++ iowrite32(0xFFFF, FlashReg + INTR_STATUS3);
++
++ iowrite32(0x0F, FlashReg + RB_PIN_ENABLED);
++ iowrite32(CHIP_EN_DONT_CARE__FLAG, FlashReg + CHIP_ENABLE_DONT_CARE);
++
++ /* Should set value for these registers when init */
++ iowrite32(0, FlashReg + TWO_ROW_ADDR_CYCLES);
++ iowrite32(1, FlashReg + ECC_ENABLE);
++ enable_ecc = 1;
++
++ retval = pci_register_driver(&nand_pci_driver);
++ if (retval)
++ return -ENOMEM;
++
++ return PASS;
++}
++
++/* Free memory */
++int nand_release(void)
++{
++ pci_unregister_driver(&nand_pci_driver);
++ iounmap(FlashMem);
++ iounmap(FlashReg);
++
++ return 0;
++}
++
++
++
+diff --git a/drivers/block/spectra/lld_nand.h b/drivers/block/spectra/lld_nand.h
+new file mode 100644
+index 0000000..c7d62c5
+--- /dev/null
++++ b/drivers/block/spectra/lld_nand.h
+@@ -0,0 +1,131 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef _LLD_NAND_
++#define _LLD_NAND_
++
++#ifdef ELDORA
++#include "defs.h"
++#else
++#include "flash.h"
++#include "ffsport.h"
++#endif
++
++#define MODE_00 0x00000000
++#define MODE_01 0x04000000
++#define MODE_10 0x08000000
++#define MODE_11 0x0C000000
++
++
++#define DATA_TRANSFER_MODE 0
++#define PROTECTION_PER_BLOCK 1
++#define LOAD_WAIT_COUNT 2
++#define PROGRAM_WAIT_COUNT 3
++#define ERASE_WAIT_COUNT 4
++#define INT_MONITOR_CYCLE_COUNT 5
++#define READ_BUSY_PIN_ENABLED 6
++#define MULTIPLANE_OPERATION_SUPPORT 7
++#define PRE_FETCH_MODE 8
++#define CE_DONT_CARE_SUPPORT 9
++#define COPYBACK_SUPPORT 10
++#define CACHE_WRITE_SUPPORT 11
++#define CACHE_READ_SUPPORT 12
++#define NUM_PAGES_IN_BLOCK 13
++#define ECC_ENABLE_SELECT 14
++#define WRITE_ENABLE_2_READ_ENABLE 15
++#define ADDRESS_2_DATA 16
++#define READ_ENABLE_2_WRITE_ENABLE 17
++#define TWO_ROW_ADDRESS_CYCLES 18
++#define MULTIPLANE_ADDRESS_RESTRICT 19
++#define ACC_CLOCKS 20
++#define READ_WRITE_ENABLE_LOW_COUNT 21
++#define READ_WRITE_ENABLE_HIGH_COUNT 22
++
++#define ECC_SECTOR_SIZE 512
++#define LLD_MAX_FLASH_BANKS 4
++
++struct mrst_nand_info {
++ struct pci_dev *dev;
++ u32 state;
++ u32 flash_bank;
++ u8 *read_data;
++ u8 *write_data;
++ u32 block;
++ u16 page;
++ u32 use_dma;
++ void __iomem *ioaddr; /* Mapped io reg base address */
++ int ret;
++ u32 pcmds_num;
++ struct pending_cmd *pcmds;
++ int cdma_num; /* CDMA descriptor number in this chan */
++ u8 *cdma_desc_buf; /* CDMA descriptor table */
++ u8 *memcp_desc_buf; /* Memory copy descriptor table */
++ dma_addr_t cdma_desc; /* Mapped CDMA descriptor table */
++ dma_addr_t memcp_desc; /* Mapped memory copy descriptor table */
++ struct completion complete;
++};
++
++int NAND_Flash_Init(void);
++int nand_release(void);
++u16 NAND_Flash_Reset(void);
++u16 NAND_Read_Device_ID(void);
++u16 NAND_Erase_Block(u32 flash_add);
++u16 NAND_Write_Page_Main(u8 *write_data, u32 block, u16 page,
++ u16 page_count);
++u16 NAND_Read_Page_Main(u8 *read_data, u32 block, u16 page,
++ u16 page_count);
++u16 NAND_UnlockArrayAll(void);
++u16 NAND_Write_Page_Main_Spare(u8 *write_data, u32 block,
++ u16 page, u16 page_count);
++u16 NAND_Write_Page_Spare(u8 *read_data, u32 block, u16 page,
++ u16 page_count);
++u16 NAND_Read_Page_Main_Spare(u8 *read_data, u32 block, u16 page,
++ u16 page_count);
++u16 NAND_Read_Page_Spare(u8 *read_data, u32 block, u16 page,
++ u16 page_count);
++void NAND_LLD_Enable_Disable_Interrupts(u16 INT_ENABLE);
++u16 NAND_Get_Bad_Block(u32 block);
++u16 NAND_Pipeline_Read_Ahead(u8 *read_data, u32 block, u16 page,
++ u16 page_count);
++u16 NAND_Pipeline_Write_Ahead(u8 *write_data, u32 block,
++ u16 page, u16 page_count);
++u16 NAND_Multiplane_Read(u8 *read_data, u32 block, u16 page,
++ u16 page_count);
++u16 NAND_Multiplane_Write(u8 *write_data, u32 block, u16 page,
++ u16 page_count);
++void NAND_ECC_Ctrl(int enable);
++u16 NAND_Read_Page_Main_Polling(u8 *read_data,
++ u32 block, u16 page, u16 page_count);
++u16 NAND_Pipeline_Read_Ahead_Polling(u8 *read_data,
++ u32 block, u16 page, u16 page_count);
++void Conv_Spare_Data_Log2Phy_Format(u8 *data);
++void Conv_Spare_Data_Phy2Log_Format(u8 *data);
++void Conv_Main_Spare_Data_Log2Phy_Format(u8 *data, u16 page_count);
++void Conv_Main_Spare_Data_Phy2Log_Format(u8 *data, u16 page_count);
++
++extern void __iomem *FlashReg;
++extern void __iomem *FlashMem;
++
++extern int totalUsedBanks;
++extern u32 GLOB_valid_banks[LLD_MAX_FLASH_BANKS];
++
++#endif /*_LLD_NAND_*/
++
++
++
+diff --git a/drivers/block/spectra/nand_regs.h b/drivers/block/spectra/nand_regs.h
+new file mode 100644
+index 0000000..e192e4a
+--- /dev/null
++++ b/drivers/block/spectra/nand_regs.h
+@@ -0,0 +1,619 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#define DEVICE_RESET 0x0
++#define DEVICE_RESET__BANK0 0x0001
++#define DEVICE_RESET__BANK1 0x0002
++#define DEVICE_RESET__BANK2 0x0004
++#define DEVICE_RESET__BANK3 0x0008
++
++#define TRANSFER_SPARE_REG 0x10
++#define TRANSFER_SPARE_REG__FLAG 0x0001
++
++#define LOAD_WAIT_CNT 0x20
++#define LOAD_WAIT_CNT__VALUE 0xffff
++
++#define PROGRAM_WAIT_CNT 0x30
++#define PROGRAM_WAIT_CNT__VALUE 0xffff
++
++#define ERASE_WAIT_CNT 0x40
++#define ERASE_WAIT_CNT__VALUE 0xffff
++
++#define INT_MON_CYCCNT 0x50
++#define INT_MON_CYCCNT__VALUE 0xffff
++
++#define RB_PIN_ENABLED 0x60
++#define RB_PIN_ENABLED__BANK0 0x0001
++#define RB_PIN_ENABLED__BANK1 0x0002
++#define RB_PIN_ENABLED__BANK2 0x0004
++#define RB_PIN_ENABLED__BANK3 0x0008
++
++#define MULTIPLANE_OPERATION 0x70
++#define MULTIPLANE_OPERATION__FLAG 0x0001
++
++#define MULTIPLANE_READ_ENABLE 0x80
++#define MULTIPLANE_READ_ENABLE__FLAG 0x0001
++
++#define COPYBACK_DISABLE 0x90
++#define COPYBACK_DISABLE__FLAG 0x0001
++
++#define CACHE_WRITE_ENABLE 0xa0
++#define CACHE_WRITE_ENABLE__FLAG 0x0001
++
++#define CACHE_READ_ENABLE 0xb0
++#define CACHE_READ_ENABLE__FLAG 0x0001
++
++#define PREFETCH_MODE 0xc0
++#define PREFETCH_MODE__PREFETCH_EN 0x0001
++#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0
++
++#define CHIP_ENABLE_DONT_CARE 0xd0
++#define CHIP_EN_DONT_CARE__FLAG 0x01
++
++#define ECC_ENABLE 0xe0
++#define ECC_ENABLE__FLAG 0x0001
++
++#define GLOBAL_INT_ENABLE 0xf0
++#define GLOBAL_INT_EN_FLAG 0x01
++
++#define WE_2_RE 0x100
++#define WE_2_RE__VALUE 0x003f
++
++#define ADDR_2_DATA 0x110
++#define ADDR_2_DATA__VALUE 0x003f
++
++#define RE_2_WE 0x120
++#define RE_2_WE__VALUE 0x003f
++
++#define ACC_CLKS 0x130
++#define ACC_CLKS__VALUE 0x000f
++
++#define NUMBER_OF_PLANES 0x140
++#define NUMBER_OF_PLANES__VALUE 0x0007
++
++#define PAGES_PER_BLOCK 0x150
++#define PAGES_PER_BLOCK__VALUE 0xffff
++
++#define DEVICE_WIDTH 0x160
++#define DEVICE_WIDTH__VALUE 0x0003
++
++#define DEVICE_MAIN_AREA_SIZE 0x170
++#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff
++
++#define DEVICE_SPARE_AREA_SIZE 0x180
++#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff
++
++#define TWO_ROW_ADDR_CYCLES 0x190
++#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001
++
++#define MULTIPLANE_ADDR_RESTRICT 0x1a0
++#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001
++
++#define ECC_CORRECTION 0x1b0
++#define ECC_CORRECTION__VALUE 0x001f
++
++#define READ_MODE 0x1c0
++#define READ_MODE__VALUE 0x000f
++
++#define WRITE_MODE 0x1d0
++#define WRITE_MODE__VALUE 0x000f
++
++#define COPYBACK_MODE 0x1e0
++#define COPYBACK_MODE__VALUE 0x000f
++
++#define RDWR_EN_LO_CNT 0x1f0
++#define RDWR_EN_LO_CNT__VALUE 0x001f
++
++#define RDWR_EN_HI_CNT 0x200
++#define RDWR_EN_HI_CNT__VALUE 0x001f
++
++#define MAX_RD_DELAY 0x210
++#define MAX_RD_DELAY__VALUE 0x000f
++
++#define CS_SETUP_CNT 0x220
++#define CS_SETUP_CNT__VALUE 0x001f
++
++#define SPARE_AREA_SKIP_BYTES 0x230
++#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f
++
++#define SPARE_AREA_MARKER 0x240
++#define SPARE_AREA_MARKER__VALUE 0xffff
++
++#define DEVICES_CONNECTED 0x250
++#define DEVICES_CONNECTED__VALUE 0x0007
++
++#define DIE_MASK 0x260
++#define DIE_MASK__VALUE 0x00ff
++
++#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
++#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff
++
++#define WRITE_PROTECT 0x280
++#define WRITE_PROTECT__FLAG 0x0001
++
++#define RE_2_RE 0x290
++#define RE_2_RE__VALUE 0x003f
++
++#define MANUFACTURER_ID 0x300
++#define MANUFACTURER_ID__VALUE 0x00ff
++
++#define DEVICE_ID 0x310
++#define DEVICE_ID__VALUE 0x00ff
++
++#define DEVICE_PARAM_0 0x320
++#define DEVICE_PARAM_0__VALUE 0x00ff
++
++#define DEVICE_PARAM_1 0x330
++#define DEVICE_PARAM_1__VALUE 0x00ff
++
++#define DEVICE_PARAM_2 0x340
++#define DEVICE_PARAM_2__VALUE 0x00ff
++
++#define LOGICAL_PAGE_DATA_SIZE 0x350
++#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff
++
++#define LOGICAL_PAGE_SPARE_SIZE 0x360
++#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff
++
++#define REVISION 0x370
++#define REVISION__VALUE 0xffff
++
++#define ONFI_DEVICE_FEATURES 0x380
++#define ONFI_DEVICE_FEATURES__VALUE 0x003f
++
++#define ONFI_OPTIONAL_COMMANDS 0x390
++#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f
++
++#define ONFI_TIMING_MODE 0x3a0
++#define ONFI_TIMING_MODE__VALUE 0x003f
++
++#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0
++#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f
++
++#define ONFI_DEVICE_NO_OF_LUNS 0x3c0
++#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff
++#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100
++
++#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0
++#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff
++
++#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0
++#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff
++
++#define FEATURES 0x3f0
++#define FEATURES__N_BANKS 0x0003
++#define FEATURES__ECC_MAX_ERR 0x003c
++#define FEATURES__DMA 0x0040
++#define FEATURES__CMD_DMA 0x0080
++#define FEATURES__PARTITION 0x0100
++#define FEATURES__XDMA_SIDEBAND 0x0200
++#define FEATURES__GPREG 0x0400
++#define FEATURES__INDEX_ADDR 0x0800
++
++#define TRANSFER_MODE 0x400
++#define TRANSFER_MODE__VALUE 0x0003
++
++#define INTR_STATUS0 0x410
++#define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001
++#define INTR_STATUS0__ECC_ERR 0x0002
++#define INTR_STATUS0__DMA_CMD_COMP 0x0004
++#define INTR_STATUS0__TIME_OUT 0x0008
++#define INTR_STATUS0__PROGRAM_FAIL 0x0010
++#define INTR_STATUS0__ERASE_FAIL 0x0020
++#define INTR_STATUS0__LOAD_COMP 0x0040
++#define INTR_STATUS0__PROGRAM_COMP 0x0080
++#define INTR_STATUS0__ERASE_COMP 0x0100
++#define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200
++#define INTR_STATUS0__LOCKED_BLK 0x0400
++#define INTR_STATUS0__UNSUP_CMD 0x0800
++#define INTR_STATUS0__INT_ACT 0x1000
++#define INTR_STATUS0__RST_COMP 0x2000
++#define INTR_STATUS0__PIPE_CMD_ERR 0x4000
++#define INTR_STATUS0__PAGE_XFER_INC 0x8000
++
++#define INTR_EN0 0x420
++#define INTR_EN0__ECC_TRANSACTION_DONE 0x0001
++#define INTR_EN0__ECC_ERR 0x0002
++#define INTR_EN0__DMA_CMD_COMP 0x0004
++#define INTR_EN0__TIME_OUT 0x0008
++#define INTR_EN0__PROGRAM_FAIL 0x0010
++#define INTR_EN0__ERASE_FAIL 0x0020
++#define INTR_EN0__LOAD_COMP 0x0040
++#define INTR_EN0__PROGRAM_COMP 0x0080
++#define INTR_EN0__ERASE_COMP 0x0100
++#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200
++#define INTR_EN0__LOCKED_BLK 0x0400
++#define INTR_EN0__UNSUP_CMD 0x0800
++#define INTR_EN0__INT_ACT 0x1000
++#define INTR_EN0__RST_COMP 0x2000
++#define INTR_EN0__PIPE_CMD_ERR 0x4000
++#define INTR_EN0__PAGE_XFER_INC 0x8000
++
++#define PAGE_CNT0 0x430
++#define PAGE_CNT0__VALUE 0x00ff
++
++#define ERR_PAGE_ADDR0 0x440
++#define ERR_PAGE_ADDR0__VALUE 0xffff
++
++#define ERR_BLOCK_ADDR0 0x450
++#define ERR_BLOCK_ADDR0__VALUE 0xffff
++
++#define INTR_STATUS1 0x460
++#define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001
++#define INTR_STATUS1__ECC_ERR 0x0002
++#define INTR_STATUS1__DMA_CMD_COMP 0x0004
++#define INTR_STATUS1__TIME_OUT 0x0008
++#define INTR_STATUS1__PROGRAM_FAIL 0x0010
++#define INTR_STATUS1__ERASE_FAIL 0x0020
++#define INTR_STATUS1__LOAD_COMP 0x0040
++#define INTR_STATUS1__PROGRAM_COMP 0x0080
++#define INTR_STATUS1__ERASE_COMP 0x0100
++#define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200
++#define INTR_STATUS1__LOCKED_BLK 0x0400
++#define INTR_STATUS1__UNSUP_CMD 0x0800
++#define INTR_STATUS1__INT_ACT 0x1000
++#define INTR_STATUS1__RST_COMP 0x2000
++#define INTR_STATUS1__PIPE_CMD_ERR 0x4000
++#define INTR_STATUS1__PAGE_XFER_INC 0x8000
++
++#define INTR_EN1 0x470
++#define INTR_EN1__ECC_TRANSACTION_DONE 0x0001
++#define INTR_EN1__ECC_ERR 0x0002
++#define INTR_EN1__DMA_CMD_COMP 0x0004
++#define INTR_EN1__TIME_OUT 0x0008
++#define INTR_EN1__PROGRAM_FAIL 0x0010
++#define INTR_EN1__ERASE_FAIL 0x0020
++#define INTR_EN1__LOAD_COMP 0x0040
++#define INTR_EN1__PROGRAM_COMP 0x0080
++#define INTR_EN1__ERASE_COMP 0x0100
++#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200
++#define INTR_EN1__LOCKED_BLK 0x0400
++#define INTR_EN1__UNSUP_CMD 0x0800
++#define INTR_EN1__INT_ACT 0x1000
++#define INTR_EN1__RST_COMP 0x2000
++#define INTR_EN1__PIPE_CMD_ERR 0x4000
++#define INTR_EN1__PAGE_XFER_INC 0x8000
++
++#define PAGE_CNT1 0x480
++#define PAGE_CNT1__VALUE 0x00ff
++
++#define ERR_PAGE_ADDR1 0x490
++#define ERR_PAGE_ADDR1__VALUE 0xffff
++
++#define ERR_BLOCK_ADDR1 0x4a0
++#define ERR_BLOCK_ADDR1__VALUE 0xffff
++
++#define INTR_STATUS2 0x4b0
++#define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001
++#define INTR_STATUS2__ECC_ERR 0x0002
++#define INTR_STATUS2__DMA_CMD_COMP 0x0004
++#define INTR_STATUS2__TIME_OUT 0x0008
++#define INTR_STATUS2__PROGRAM_FAIL 0x0010
++#define INTR_STATUS2__ERASE_FAIL 0x0020
++#define INTR_STATUS2__LOAD_COMP 0x0040
++#define INTR_STATUS2__PROGRAM_COMP 0x0080
++#define INTR_STATUS2__ERASE_COMP 0x0100
++#define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200
++#define INTR_STATUS2__LOCKED_BLK 0x0400
++#define INTR_STATUS2__UNSUP_CMD 0x0800
++#define INTR_STATUS2__INT_ACT 0x1000
++#define INTR_STATUS2__RST_COMP 0x2000
++#define INTR_STATUS2__PIPE_CMD_ERR 0x4000
++#define INTR_STATUS2__PAGE_XFER_INC 0x8000
++
++#define INTR_EN2 0x4c0
++#define INTR_EN2__ECC_TRANSACTION_DONE 0x0001
++#define INTR_EN2__ECC_ERR 0x0002
++#define INTR_EN2__DMA_CMD_COMP 0x0004
++#define INTR_EN2__TIME_OUT 0x0008
++#define INTR_EN2__PROGRAM_FAIL 0x0010
++#define INTR_EN2__ERASE_FAIL 0x0020
++#define INTR_EN2__LOAD_COMP 0x0040
++#define INTR_EN2__PROGRAM_COMP 0x0080
++#define INTR_EN2__ERASE_COMP 0x0100
++#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200
++#define INTR_EN2__LOCKED_BLK 0x0400
++#define INTR_EN2__UNSUP_CMD 0x0800
++#define INTR_EN2__INT_ACT 0x1000
++#define INTR_EN2__RST_COMP 0x2000
++#define INTR_EN2__PIPE_CMD_ERR 0x4000
++#define INTR_EN2__PAGE_XFER_INC 0x8000
++
++#define PAGE_CNT2 0x4d0
++#define PAGE_CNT2__VALUE 0x00ff
++
++#define ERR_PAGE_ADDR2 0x4e0
++#define ERR_PAGE_ADDR2__VALUE 0xffff
++
++#define ERR_BLOCK_ADDR2 0x4f0
++#define ERR_BLOCK_ADDR2__VALUE 0xffff
++
++#define INTR_STATUS3 0x500
++#define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001
++#define INTR_STATUS3__ECC_ERR 0x0002
++#define INTR_STATUS3__DMA_CMD_COMP 0x0004
++#define INTR_STATUS3__TIME_OUT 0x0008
++#define INTR_STATUS3__PROGRAM_FAIL 0x0010
++#define INTR_STATUS3__ERASE_FAIL 0x0020
++#define INTR_STATUS3__LOAD_COMP 0x0040
++#define INTR_STATUS3__PROGRAM_COMP 0x0080
++#define INTR_STATUS3__ERASE_COMP 0x0100
++#define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200
++#define INTR_STATUS3__LOCKED_BLK 0x0400
++#define INTR_STATUS3__UNSUP_CMD 0x0800
++#define INTR_STATUS3__INT_ACT 0x1000
++#define INTR_STATUS3__RST_COMP 0x2000
++#define INTR_STATUS3__PIPE_CMD_ERR 0x4000
++#define INTR_STATUS3__PAGE_XFER_INC 0x8000
++
++#define INTR_EN3 0x510
++#define INTR_EN3__ECC_TRANSACTION_DONE 0x0001
++#define INTR_EN3__ECC_ERR 0x0002
++#define INTR_EN3__DMA_CMD_COMP 0x0004
++#define INTR_EN3__TIME_OUT 0x0008
++#define INTR_EN3__PROGRAM_FAIL 0x0010
++#define INTR_EN3__ERASE_FAIL 0x0020
++#define INTR_EN3__LOAD_COMP 0x0040
++#define INTR_EN3__PROGRAM_COMP 0x0080
++#define INTR_EN3__ERASE_COMP 0x0100
++#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200
++#define INTR_EN3__LOCKED_BLK 0x0400
++#define INTR_EN3__UNSUP_CMD 0x0800
++#define INTR_EN3__INT_ACT 0x1000
++#define INTR_EN3__RST_COMP 0x2000
++#define INTR_EN3__PIPE_CMD_ERR 0x4000
++#define INTR_EN3__PAGE_XFER_INC 0x8000
++
++#define PAGE_CNT3 0x520
++#define PAGE_CNT3__VALUE 0x00ff
++
++#define ERR_PAGE_ADDR3 0x530
++#define ERR_PAGE_ADDR3__VALUE 0xffff
++
++#define ERR_BLOCK_ADDR3 0x540
++#define ERR_BLOCK_ADDR3__VALUE 0xffff
++
++#define DATA_INTR 0x550
++#define DATA_INTR__WRITE_SPACE_AV 0x0001
++#define DATA_INTR__READ_DATA_AV 0x0002
++
++#define DATA_INTR_EN 0x560
++#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001
++#define DATA_INTR_EN__READ_DATA_AV 0x0002
++
++#define GPREG_0 0x570
++#define GPREG_0__VALUE 0xffff
++
++#define GPREG_1 0x580
++#define GPREG_1__VALUE 0xffff
++
++#define GPREG_2 0x590
++#define GPREG_2__VALUE 0xffff
++
++#define GPREG_3 0x5a0
++#define GPREG_3__VALUE 0xffff
++
++#define ECC_THRESHOLD 0x600
++#define ECC_THRESHOLD__VALUE 0x03ff
++
++#define ECC_ERROR_BLOCK_ADDRESS 0x610
++#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff
++
++#define ECC_ERROR_PAGE_ADDRESS 0x620
++#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff
++#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000
++
++#define ECC_ERROR_ADDRESS 0x630
++#define ECC_ERROR_ADDRESS__OFFSET 0x0fff
++#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000
++
++#define ERR_CORRECTION_INFO 0x640
++#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff
++#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00
++#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000
++#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000
++
++#define DMA_ENABLE 0x700
++#define DMA_ENABLE__FLAG 0x0001
++
++#define IGNORE_ECC_DONE 0x710
++#define IGNORE_ECC_DONE__FLAG 0x0001
++
++#define DMA_INTR 0x720
++#define DMA_INTR__TARGET_ERROR 0x0001
++#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002
++#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004
++#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008
++#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010
++#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020
++
++#define DMA_INTR_EN 0x730
++#define DMA_INTR_EN__TARGET_ERROR 0x0001
++#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002
++#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004
++#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008
++#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010
++#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020
++
++#define TARGET_ERR_ADDR_LO 0x740
++#define TARGET_ERR_ADDR_LO__VALUE 0xffff
++
++#define TARGET_ERR_ADDR_HI 0x750
++#define TARGET_ERR_ADDR_HI__VALUE 0xffff
++
++#define CHNL_ACTIVE 0x760
++#define CHNL_ACTIVE__CHANNEL0 0x0001
++#define CHNL_ACTIVE__CHANNEL1 0x0002
++#define CHNL_ACTIVE__CHANNEL2 0x0004
++#define CHNL_ACTIVE__CHANNEL3 0x0008
++
++#define ACTIVE_SRC_ID 0x800
++#define ACTIVE_SRC_ID__VALUE 0x00ff
++
++#define PTN_INTR 0x810
++#define PTN_INTR__CONFIG_ERROR 0x0001
++#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002
++#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004
++#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008
++#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010
++#define PTN_INTR__REG_ACCESS_ERROR 0x0020
++
++#define PTN_INTR_EN 0x820
++#define PTN_INTR_EN__CONFIG_ERROR 0x0001
++#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002
++#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004
++#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008
++#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
++#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
++
++#define PERM_SRC_ID_0 0x830
++#define PERM_SRC_ID_0__SRCID 0x00ff
++#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800
++#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000
++#define PERM_SRC_ID_0__READ_ACTIVE 0x4000
++#define PERM_SRC_ID_0__PARTITION_VALID 0x8000
++
++#define MIN_BLK_ADDR_0 0x840
++#define MIN_BLK_ADDR_0__VALUE 0xffff
++
++#define MAX_BLK_ADDR_0 0x850
++#define MAX_BLK_ADDR_0__VALUE 0xffff
++
++#define MIN_MAX_BANK_0 0x860
++#define MIN_MAX_BANK_0__MIN_VALUE 0x0003
++#define MIN_MAX_BANK_0__MAX_VALUE 0x000c
++
++#define PERM_SRC_ID_1 0x870
++#define PERM_SRC_ID_1__SRCID 0x00ff
++#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800
++#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000
++#define PERM_SRC_ID_1__READ_ACTIVE 0x4000
++#define PERM_SRC_ID_1__PARTITION_VALID 0x8000
++
++#define MIN_BLK_ADDR_1 0x880
++#define MIN_BLK_ADDR_1__VALUE 0xffff
++
++#define MAX_BLK_ADDR_1 0x890
++#define MAX_BLK_ADDR_1__VALUE 0xffff
++
++#define MIN_MAX_BANK_1 0x8a0
++#define MIN_MAX_BANK_1__MIN_VALUE 0x0003
++#define MIN_MAX_BANK_1__MAX_VALUE 0x000c
++
++#define PERM_SRC_ID_2 0x8b0
++#define PERM_SRC_ID_2__SRCID 0x00ff
++#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800
++#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000
++#define PERM_SRC_ID_2__READ_ACTIVE 0x4000
++#define PERM_SRC_ID_2__PARTITION_VALID 0x8000
++
++#define MIN_BLK_ADDR_2 0x8c0
++#define MIN_BLK_ADDR_2__VALUE 0xffff
++
++#define MAX_BLK_ADDR_2 0x8d0
++#define MAX_BLK_ADDR_2__VALUE 0xffff
++
++#define MIN_MAX_BANK_2 0x8e0
++#define MIN_MAX_BANK_2__MIN_VALUE 0x0003
++#define MIN_MAX_BANK_2__MAX_VALUE 0x000c
++
++#define PERM_SRC_ID_3 0x8f0
++#define PERM_SRC_ID_3__SRCID 0x00ff
++#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800
++#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000
++#define PERM_SRC_ID_3__READ_ACTIVE 0x4000
++#define PERM_SRC_ID_3__PARTITION_VALID 0x8000
++
++#define MIN_BLK_ADDR_3 0x900
++#define MIN_BLK_ADDR_3__VALUE 0xffff
++
++#define MAX_BLK_ADDR_3 0x910
++#define MAX_BLK_ADDR_3__VALUE 0xffff
++
++#define MIN_MAX_BANK_3 0x920
++#define MIN_MAX_BANK_3__MIN_VALUE 0x0003
++#define MIN_MAX_BANK_3__MAX_VALUE 0x000c
++
++#define PERM_SRC_ID_4 0x930
++#define PERM_SRC_ID_4__SRCID 0x00ff
++#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800
++#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000
++#define PERM_SRC_ID_4__READ_ACTIVE 0x4000
++#define PERM_SRC_ID_4__PARTITION_VALID 0x8000
++
++#define MIN_BLK_ADDR_4 0x940
++#define MIN_BLK_ADDR_4__VALUE 0xffff
++
++#define MAX_BLK_ADDR_4 0x950
++#define MAX_BLK_ADDR_4__VALUE 0xffff
++
++#define MIN_MAX_BANK_4 0x960
++#define MIN_MAX_BANK_4__MIN_VALUE 0x0003
++#define MIN_MAX_BANK_4__MAX_VALUE 0x000c
++
++#define PERM_SRC_ID_5 0x970
++#define PERM_SRC_ID_5__SRCID 0x00ff
++#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800
++#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000
++#define PERM_SRC_ID_5__READ_ACTIVE 0x4000
++#define PERM_SRC_ID_5__PARTITION_VALID 0x8000
++
++#define MIN_BLK_ADDR_5 0x980
++#define MIN_BLK_ADDR_5__VALUE 0xffff
++
++#define MAX_BLK_ADDR_5 0x990
++#define MAX_BLK_ADDR_5__VALUE 0xffff
++
++#define MIN_MAX_BANK_5 0x9a0
++#define MIN_MAX_BANK_5__MIN_VALUE 0x0003
++#define MIN_MAX_BANK_5__MAX_VALUE 0x000c
++
++#define PERM_SRC_ID_6 0x9b0
++#define PERM_SRC_ID_6__SRCID 0x00ff
++#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800
++#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000
++#define PERM_SRC_ID_6__READ_ACTIVE 0x4000
++#define PERM_SRC_ID_6__PARTITION_VALID 0x8000
++
++#define MIN_BLK_ADDR_6 0x9c0
++#define MIN_BLK_ADDR_6__VALUE 0xffff
++
++#define MAX_BLK_ADDR_6 0x9d0
++#define MAX_BLK_ADDR_6__VALUE 0xffff
++
++#define MIN_MAX_BANK_6 0x9e0
++#define MIN_MAX_BANK_6__MIN_VALUE 0x0003
++#define MIN_MAX_BANK_6__MAX_VALUE 0x000c
++
++#define PERM_SRC_ID_7 0x9f0
++#define PERM_SRC_ID_7__SRCID 0x00ff
++#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800
++#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000
++#define PERM_SRC_ID_7__READ_ACTIVE 0x4000
++#define PERM_SRC_ID_7__PARTITION_VALID 0x8000
++
++#define MIN_BLK_ADDR_7 0xa00
++#define MIN_BLK_ADDR_7__VALUE 0xffff
++
++#define MAX_BLK_ADDR_7 0xa10
++#define MAX_BLK_ADDR_7__VALUE 0xffff
++
++#define MIN_MAX_BANK_7 0xa20
++#define MIN_MAX_BANK_7__MIN_VALUE 0x0003
++#define MIN_MAX_BANK_7__MAX_VALUE 0x000c
+diff --git a/drivers/block/spectra/spectraswconfig.h b/drivers/block/spectra/spectraswconfig.h
+new file mode 100644
+index 0000000..b630f06
+--- /dev/null
++++ b/drivers/block/spectra/spectraswconfig.h
+@@ -0,0 +1,81 @@
++/*
++ * NAND Flash Controller Device Driver
++ * Copyright (c) 2009, Intel Corporation and its suppliers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef _SPECTRASWCONFIG_
++#define _SPECTRASWCONFIG_
++
++/* NAND driver version */
++#define GLOB_VERSION "driver version 20100311"
++
++
++/***** Common Parameters *****/
++#define RETRY_TIMES 3
++
++#define READ_BADBLOCK_INFO 1
++#define READBACK_VERIFY 0
++#define AUTO_FORMAT_FLASH 0
++
++/***** Cache Parameters *****/
++#define CACHE_ITEM_NUM 128
++#define BLK_NUM_FOR_L2_CACHE 16
++
++/***** Block Table Parameters *****/
++#define BLOCK_TABLE_INDEX 0
++
++/***** Wear Leveling Parameters *****/
++#define WEAR_LEVELING_GATE 0x10
++#define WEAR_LEVELING_BLOCK_NUM 10
++
++#define DEBUG_BNDRY 0
++
++/***** Product Feature Support *****/
++#define FLASH_EMU defined(CONFIG_MRST_NAND_EMU)
++#define FLASH_NAND defined(CONFIG_MRST_NAND_HW)
++#define CMD_DMA 0
++
++#define SPECTRA_PARTITION_ID 0
++
++/* Enable this macro if the number of flash blocks is larger than 16K. */
++#define SUPPORT_LARGE_BLOCKNUM 1
++
++/**** Block Table and Reserved Block Parameters *****/
++#define SPECTRA_START_BLOCK 3
++//#define NUM_FREE_BLOCKS_GATE 30
++#define NUM_FREE_BLOCKS_GATE 60
++
++/**** Hardware Parameters ****/
++#define GLOB_HWCTL_REG_BASE 0xFFA40000
++#define GLOB_HWCTL_REG_SIZE 4096
++
++#define GLOB_HWCTL_MEM_BASE 0xFFA48000
++#define GLOB_HWCTL_MEM_SIZE 4096
++
++/* KBV - Updated to LNW scratch register address */
++#define SCRATCH_REG_ADDR 0xFF108018
++#define SCRATCH_REG_SIZE 64
++
++#define GLOB_HWCTL_DEFAULT_BLKS 2048
++
++#define SUPPORT_15BITECC 1
++#define SUPPORT_8BITECC 1
++
++#define ONFI_BLOOM_TIME 0
++#define MODE5_WORKAROUND 1
++
++#endif /*_SPECTRASWCONFIG_*/
+--
+1.6.0.6
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-only-enable-mrst-pciquirks-on-mrst.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-only-enable-mrst-pciquirks-on-mrst.patch
new file mode 100644
index 0000000..4a56684
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-only-enable-mrst-pciquirks-on-mrst.patch
@@ -0,0 +1,35 @@
+From 722a639fd2cec44501c04ae32af57fd822c5a2d5 Mon Sep 17 00:00:00 2001
+From: Yinghai Lu <yinghai@kernel.org>
+Date: Wed, 24 Feb 2010 12:39:37 -0800
+Subject: [PATCH] x86, pci: Exclude Moorestown PCI code if CONFIG_X86_MRST=n
+
+If we don't have any Moorestown CPU support compiled in, we don't need
+the Moorestown PCI support either.
+
+Signed-off-by: Yinghai Lu <yinghai@kernel.org>
+LKML-Reference: <4B858E89.7040807@kernel.org>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Acked-by: Yong Wang <yong.y.wang@intel.com>
+Patch-mainline: Patch-mainline: Merged into x86/mrst branch of -tip
+---
+ arch/x86/pci/Makefile | 4 +++-
+ 1 files changed, 3 insertions(+), 1 deletions(-)
+
+diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
+index 4753ebc..56caf2a 100644
+--- a/arch/x86/pci/Makefile
++++ b/arch/x86/pci/Makefile
+@@ -13,7 +13,9 @@ obj-$(CONFIG_X86_VISWS) += visws.o
+
+ obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
+
+-obj-y += common.o early.o mrst.o
++obj-$(CONFIG_X86_MRST) += mrst.o
++
++obj-y += common.o early.o
+ obj-y += amd_bus.o
+ obj-$(CONFIG_X86_64) += bus_numa.o
+
+--
+1.5.5.1
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-platform-enabling.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-platform-enabling.patch
new file mode 100644
index 0000000..7f81eb8
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-platform-enabling.patch
@@ -0,0 +1,13580 @@
+Index: linux-2.6.33/drivers/pci/pci.c
+===================================================================
+--- linux-2.6.33.orig/drivers/pci/pci.c
++++ linux-2.6.33/drivers/pci/pci.c
+@@ -297,6 +297,49 @@ int pci_find_ext_capability(struct pci_d
+ }
+ EXPORT_SYMBOL_GPL(pci_find_ext_capability);
+
++/**
++ * pci_bus_find_ext_capability - find an extended capability
++ * @bus: the PCI bus to query
++ * @devfn: PCI device to query
++ * @cap: capability code
++ *
++ * Like pci_find_ext_capability() but works for pci devices that do not have a
++ * pci_dev structure set up yet.
++ *
++ * Returns the address of the requested capability structure within the
++ * device's PCI configuration space or 0 in case the device does not
++ * support it.
++ */
++int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
++ int cap)
++{
++ u32 header;
++ int ttl;
++ int pos = PCI_CFG_SPACE_SIZE;
++
++ /* minimum 8 bytes per capability */
++ ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
++
++ if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
++ return 0;
++ if (header == 0xffffffff || header == 0)
++ return 0;
++
++ while (ttl-- > 0) {
++ if (PCI_EXT_CAP_ID(header) == cap)
++ return pos;
++
++ pos = PCI_EXT_CAP_NEXT(header);
++ if (pos < PCI_CFG_SPACE_SIZE)
++ break;
++
++ if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
++ break;
++ }
++
++ return 0;
++}
++
+ static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
+ {
+ int rc, ttl = PCI_FIND_CAP_TTL;
+Index: linux-2.6.33/include/linux/pci.h
+===================================================================
+--- linux-2.6.33.orig/include/linux/pci.h
++++ linux-2.6.33/include/linux/pci.h
+@@ -631,6 +631,8 @@ enum pci_lost_interrupt_reason pci_lost_
+ int pci_find_capability(struct pci_dev *dev, int cap);
+ int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
+ int pci_find_ext_capability(struct pci_dev *dev, int cap);
++int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
++ int cap);
+ int pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
+ int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
+ struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
+Index: linux-2.6.33/arch/x86/include/asm/numaq.h
+===================================================================
+--- linux-2.6.33.orig/arch/x86/include/asm/numaq.h
++++ linux-2.6.33/arch/x86/include/asm/numaq.h
+@@ -30,6 +30,7 @@
+
+ extern int found_numaq;
+ extern int get_memcfg_numaq(void);
++extern int pci_numaq_init(void);
+
+ extern void *xquad_portio;
+
+Index: linux-2.6.33/arch/x86/include/asm/pci.h
+===================================================================
+--- linux-2.6.33.orig/arch/x86/include/asm/pci.h
++++ linux-2.6.33/arch/x86/include/asm/pci.h
+@@ -45,8 +45,15 @@ static inline int pci_proc_domain(struct
+
+ #ifdef CONFIG_PCI
+ extern unsigned int pcibios_assign_all_busses(void);
++extern int pci_legacy_init(void);
++# ifdef CONFIG_ACPI
++# define x86_default_pci_init pci_acpi_init
++# else
++# define x86_default_pci_init pci_legacy_init
++# endif
+ #else
+-#define pcibios_assign_all_busses() 0
++# define pcibios_assign_all_busses() 0
++# define x86_default_pci_init NULL
+ #endif
+
+ extern unsigned long pci_mem_start;
+Index: linux-2.6.33/arch/x86/include/asm/pci_x86.h
+===================================================================
+--- linux-2.6.33.orig/arch/x86/include/asm/pci_x86.h
++++ linux-2.6.33/arch/x86/include/asm/pci_x86.h
+@@ -82,7 +82,6 @@ struct irq_routing_table {
+
+ extern unsigned int pcibios_irq_mask;
+
+-extern int pcibios_scanned;
+ extern spinlock_t pci_config_lock;
+
+ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
+@@ -111,10 +110,10 @@ extern void __init dmi_check_skip_isa_al
+
+ /* some common used subsys_initcalls */
+ extern int __init pci_acpi_init(void);
+-extern int __init pcibios_irq_init(void);
+-extern int __init pci_visws_init(void);
+-extern int __init pci_numaq_init(void);
++extern void __init pcibios_irq_init(void);
+ extern int __init pcibios_init(void);
++extern int pci_legacy_init(void);
++extern void pcibios_fixup_irqs(void);
+
+ /* pci-mmconfig.c */
+
+@@ -182,3 +181,17 @@ static inline void mmio_config_writel(vo
+ {
+ asm volatile("movl %%eax,(%1)" : : "a" (val), "r" (pos) : "memory");
+ }
++
++#ifdef CONFIG_PCI
++# ifdef CONFIG_ACPI
++# define x86_default_pci_init pci_acpi_init
++# else
++# define x86_default_pci_init pci_legacy_init
++# endif
++# define x86_default_pci_init_irq pcibios_irq_init
++# define x86_default_pci_fixup_irqs pcibios_fixup_irqs
++#else
++# define x86_default_pci_init NULL
++# define x86_default_pci_init_irq NULL
++# define x86_default_pci_fixup_irqs NULL
++#endif
+Index: linux-2.6.33/arch/x86/include/asm/setup.h
+===================================================================
+--- linux-2.6.33.orig/arch/x86/include/asm/setup.h
++++ linux-2.6.33/arch/x86/include/asm/setup.h
+@@ -37,10 +37,8 @@ void setup_bios_corruption_check(void);
+
+ #ifdef CONFIG_X86_VISWS
+ extern void visws_early_detect(void);
+-extern int is_visws_box(void);
+ #else
+ static inline void visws_early_detect(void) { }
+-static inline int is_visws_box(void) { return 0; }
+ #endif
+
+ extern unsigned long saved_video_mode;
+Index: linux-2.6.33/arch/x86/include/asm/visws/cobalt.h
+===================================================================
+--- linux-2.6.33.orig/arch/x86/include/asm/visws/cobalt.h
++++ linux-2.6.33/arch/x86/include/asm/visws/cobalt.h
+@@ -122,4 +122,6 @@ extern char visws_board_type;
+
+ extern char visws_board_rev;
+
++extern int pci_visws_init(void);
++
+ #endif /* _ASM_X86_VISWS_COBALT_H */
+Index: linux-2.6.33/arch/x86/include/asm/x86_init.h
+===================================================================
+--- linux-2.6.33.orig/arch/x86/include/asm/x86_init.h
++++ linux-2.6.33/arch/x86/include/asm/x86_init.h
+@@ -99,6 +99,18 @@ struct x86_init_iommu {
+ };
+
+ /**
++ * struct x86_init_pci - platform specific pci init functions
++ * @init: platform specific pci init
++ * @init_irq: platform specific pci irq init
++ * @fixup_irqs: platform specific pci irq fixup
++ */
++struct x86_init_pci {
++ int (*init)(void);
++ void (*init_irq)(void);
++ void (*fixup_irqs)(void);
++};
++
++/**
+ * struct x86_init_ops - functions for platform specific setup
+ *
+ */
+@@ -110,6 +122,7 @@ struct x86_init_ops {
+ struct x86_init_paging paging;
+ struct x86_init_timers timers;
+ struct x86_init_iommu iommu;
++ struct x86_init_pci pci;
+ };
+
+ /**
+Index: linux-2.6.33/arch/x86/kernel/acpi/boot.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/acpi/boot.c
++++ linux-2.6.33/arch/x86/kernel/acpi/boot.c
+@@ -35,6 +35,7 @@
+ #include <linux/ioport.h>
+ #include <linux/pci.h>
+
++#include <asm/pci_x86.h>
+ #include <asm/pgtable.h>
+ #include <asm/io_apic.h>
+ #include <asm/apic.h>
+@@ -1603,6 +1604,9 @@ int __init acpi_boot_init(void)
+
+ acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
+
++ if (!acpi_noirq)
++ x86_init.pci.init = pci_acpi_init;
++
+ return 0;
+ }
+
+Index: linux-2.6.33/arch/x86/kernel/apic/numaq_32.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/apic/numaq_32.c
++++ linux-2.6.33/arch/x86/kernel/apic/numaq_32.c
+@@ -277,6 +277,7 @@ static __init void early_check_numaq(voi
+ x86_init.mpparse.mpc_oem_pci_bus = mpc_oem_pci_bus;
+ x86_init.mpparse.mpc_oem_bus_info = mpc_oem_bus_info;
+ x86_init.timers.tsc_pre_init = numaq_tsc_init;
++ x86_init.pci.init = pci_numaq_init;
+ }
+ }
+
+Index: linux-2.6.33/arch/x86/kernel/visws_quirks.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/visws_quirks.c
++++ linux-2.6.33/arch/x86/kernel/visws_quirks.c
+@@ -49,11 +49,6 @@ extern int no_broadcast;
+ char visws_board_type = -1;
+ char visws_board_rev = -1;
+
+-int is_visws_box(void)
+-{
+- return visws_board_type >= 0;
+-}
+-
+ static void __init visws_time_init(void)
+ {
+ printk(KERN_INFO "Starting Cobalt Timer system clock\n");
+@@ -242,6 +237,8 @@ void __init visws_early_detect(void)
+ x86_init.irqs.pre_vector_init = visws_pre_intr_init;
+ x86_init.irqs.trap_init = visws_trap_init;
+ x86_init.timers.timer_init = visws_time_init;
++ x86_init.pci.init = pci_visws_init;
++ x86_init.pci.init_irq = x86_init_noop;
+
+ /*
+ * Install reboot quirks:
+@@ -508,7 +505,7 @@ static struct irq_chip cobalt_irq_type =
+ */
+ static unsigned int startup_piix4_master_irq(unsigned int irq)
+ {
+- init_8259A(0);
++ legacy_pic->init(0);
+
+ return startup_cobalt_irq(irq);
+ }
+@@ -531,10 +528,7 @@ static struct irq_chip piix4_master_irq_
+
+
+ static struct irq_chip piix4_virtual_irq_type = {
+- .name = "PIIX4-virtual",
+- .shutdown = disable_8259A_irq,
+- .enable = enable_8259A_irq,
+- .disable = disable_8259A_irq,
++ .typename = "PIIX4-virtual",
+ };
+
+
+@@ -609,7 +603,7 @@ static irqreturn_t piix4_master_intr(int
+ handle_IRQ_event(realirq, desc->action);
+
+ if (!(desc->status & IRQ_DISABLED))
+- enable_8259A_irq(realirq);
++ legacy_pic->chip->unmask(realirq);
+
+ return IRQ_HANDLED;
+
+@@ -628,6 +622,12 @@ static struct irqaction cascade_action =
+ .name = "cascade",
+ };
+
++static inline void set_piix4_virtual_irq_type(void)
++{
++ piix4_virtual_irq_type.shutdown = i8259A_chip.mask;
++ piix4_virtual_irq_type.enable = i8259A_chip.unmask;
++ piix4_virtual_irq_type.disable = i8259A_chip.mask;
++}
+
+ void init_VISWS_APIC_irqs(void)
+ {
+@@ -653,6 +653,7 @@ void init_VISWS_APIC_irqs(void)
+ desc->chip = &piix4_master_irq_type;
+ }
+ else if (i < CO_IRQ_APIC0) {
++ set_piix4_virtual_irq_type();
+ desc->chip = &piix4_virtual_irq_type;
+ }
+ else if (IS_CO_APIC(i)) {
+Index: linux-2.6.33/arch/x86/kernel/x86_init.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/x86_init.c
++++ linux-2.6.33/arch/x86/kernel/x86_init.c
+@@ -4,9 +4,11 @@
+ * For licencing details see kernel-base/COPYING
+ */
+ #include <linux/init.h>
++#include <linux/ioport.h>
+
+ #include <asm/bios_ebda.h>
+ #include <asm/paravirt.h>
++#include <asm/pci_x86.h>
+ #include <asm/mpspec.h>
+ #include <asm/setup.h>
+ #include <asm/apic.h>
+@@ -70,6 +72,12 @@ struct x86_init_ops x86_init __initdata
+ .iommu = {
+ .iommu_init = iommu_init_noop,
+ },
++
++ .pci = {
++ .init = x86_default_pci_init,
++ .init_irq = x86_default_pci_init_irq,
++ .fixup_irqs = x86_default_pci_fixup_irqs,
++ },
+ };
+
+ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
+Index: linux-2.6.33/arch/x86/pci/acpi.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/pci/acpi.c
++++ linux-2.6.33/arch/x86/pci/acpi.c
+@@ -282,17 +282,14 @@ int __init pci_acpi_init(void)
+ {
+ struct pci_dev *dev = NULL;
+
+- if (pcibios_scanned)
+- return 0;
+-
+ if (acpi_noirq)
+- return 0;
++ return -ENODEV;
+
+ printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
+ acpi_irq_penalty_init();
+- pcibios_scanned++;
+ pcibios_enable_irq = acpi_pci_irq_enable;
+ pcibios_disable_irq = acpi_pci_irq_disable;
++ x86_init.pci.init_irq = x86_init_noop;
+
+ if (pci_routeirq) {
+ /*
+Index: linux-2.6.33/arch/x86/pci/common.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/pci/common.c
++++ linux-2.6.33/arch/x86/pci/common.c
+@@ -72,12 +72,6 @@ struct pci_ops pci_root_ops = {
+ };
+
+ /*
+- * legacy, numa, and acpi all want to call pcibios_scan_root
+- * from their initcalls. This flag prevents that.
+- */
+-int pcibios_scanned;
+-
+-/*
+ * This interrupt-safe spinlock protects all accesses to PCI
+ * configuration space.
+ */
+Index: linux-2.6.33/arch/x86/pci/legacy.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/pci/legacy.c
++++ linux-2.6.33/arch/x86/pci/legacy.c
+@@ -35,16 +35,13 @@ static void __devinit pcibios_fixup_peer
+ }
+ }
+
+-static int __init pci_legacy_init(void)
++int __init pci_legacy_init(void)
+ {
+ if (!raw_pci_ops) {
+ printk("PCI: System does not support PCI\n");
+ return 0;
+ }
+
+- if (pcibios_scanned++)
+- return 0;
+-
+ printk("PCI: Probing PCI hardware\n");
+ pci_root_bus = pcibios_scan_root(0);
+ if (pci_root_bus)
+@@ -55,18 +52,15 @@ static int __init pci_legacy_init(void)
+
+ int __init pci_subsys_init(void)
+ {
+-#ifdef CONFIG_X86_NUMAQ
+- pci_numaq_init();
+-#endif
+-#ifdef CONFIG_ACPI
+- pci_acpi_init();
+-#endif
+-#ifdef CONFIG_X86_VISWS
+- pci_visws_init();
+-#endif
+- pci_legacy_init();
++ /*
++ * The init function returns an non zero value when
++ * pci_legacy_init should be invoked.
++ */
++ if (x86_init.pci.init())
++ pci_legacy_init();
++
+ pcibios_fixup_peer_bridges();
+- pcibios_irq_init();
++ x86_init.pci.init_irq();
+ pcibios_init();
+
+ return 0;
+Index: linux-2.6.33/arch/x86/pci/numaq_32.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/pci/numaq_32.c
++++ linux-2.6.33/arch/x86/pci/numaq_32.c
+@@ -152,14 +152,8 @@ int __init pci_numaq_init(void)
+ {
+ int quad;
+
+- if (!found_numaq)
+- return 0;
+-
+ raw_pci_ops = &pci_direct_conf1_mq;
+
+- if (pcibios_scanned++)
+- return 0;
+-
+ pci_root_bus = pcibios_scan_root(0);
+ if (pci_root_bus)
+ pci_bus_add_devices(pci_root_bus);
+Index: linux-2.6.33/arch/x86/pci/visws.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/pci/visws.c
++++ linux-2.6.33/arch/x86/pci/visws.c
+@@ -69,9 +69,6 @@ void __init pcibios_update_irq(struct pc
+
+ int __init pci_visws_init(void)
+ {
+- if (!is_visws_box())
+- return -1;
+-
+ pcibios_enable_irq = &pci_visws_enable_irq;
+ pcibios_disable_irq = &pci_visws_disable_irq;
+
+@@ -90,5 +87,6 @@ int __init pci_visws_init(void)
+ pci_scan_bus_with_sysdata(pci_bus1);
+ pci_fixup_irqs(pci_common_swizzle, visws_map_irq);
+ pcibios_resource_survey();
+- return 0;
++ /* Request bus scan */
++ return 1;
+ }
+Index: linux-2.6.33/arch/x86/pci/irq.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/pci/irq.c
++++ linux-2.6.33/arch/x86/pci/irq.c
+@@ -53,7 +53,7 @@ struct irq_router_handler {
+ int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
+ };
+
+-int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL;
++int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
+ void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
+
+ /*
+@@ -1016,7 +1016,7 @@ static int pcibios_lookup_irq(struct pci
+ return 1;
+ }
+
+-static void __init pcibios_fixup_irqs(void)
++void __init pcibios_fixup_irqs(void)
+ {
+ struct pci_dev *dev = NULL;
+ u8 pin;
+@@ -1110,12 +1110,12 @@ static struct dmi_system_id __initdata p
+ { }
+ };
+
+-int __init pcibios_irq_init(void)
++void __init pcibios_irq_init(void)
+ {
+ DBG(KERN_DEBUG "PCI: IRQ init\n");
+
+- if (pcibios_enable_irq || raw_pci_ops == NULL)
+- return 0;
++ if (raw_pci_ops == NULL)
++ return;
+
+ dmi_check_system(pciirq_dmi_table);
+
+@@ -1142,9 +1142,7 @@ int __init pcibios_irq_init(void)
+ pirq_table = NULL;
+ }
+
+- pcibios_enable_irq = pirq_enable_irq;
+-
+- pcibios_fixup_irqs();
++ x86_init.pci.fixup_irqs();
+
+ if (io_apic_assign_pci_irqs && pci_routeirq) {
+ struct pci_dev *dev = NULL;
+@@ -1157,8 +1155,6 @@ int __init pcibios_irq_init(void)
+ for_each_pci_dev(dev)
+ pirq_enable_irq(dev);
+ }
+-
+- return 0;
+ }
+
+ static void pirq_penalize_isa_irq(int irq, int active)
+Index: linux-2.6.33/arch/x86/kernel/apic/apic.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/apic/apic.c
++++ linux-2.6.33/arch/x86/kernel/apic/apic.c
+@@ -718,6 +718,9 @@ static int __init calibrate_APIC_clock(v
+ */
+ void __init setup_boot_APIC_clock(void)
+ {
++ /* we rely on global clockevent for calibration */
++ if (global_clock_event == NULL)
++ return;
+ /*
+ * The local apic timer can be disabled via the kernel
+ * commandline or from the CPU detection code. Register the lapic
+@@ -1390,7 +1393,7 @@ void __init enable_IR_x2apic(void)
+ }
+
+ local_irq_save(flags);
+- mask_8259A();
++ legacy_pic->mask_all();
+ mask_IO_APIC_setup(ioapic_entries);
+
+ if (dmar_table_init_ret)
+@@ -1422,7 +1425,7 @@ void __init enable_IR_x2apic(void)
+ nox2apic:
+ if (!ret) /* IR enabling failed */
+ restore_IO_APIC_setup(ioapic_entries);
+- unmask_8259A();
++ legacy_pic->restore_mask();
+ local_irq_restore(flags);
+
+ out:
+@@ -2018,7 +2021,7 @@ static int lapic_resume(struct sys_devic
+ }
+
+ mask_IO_APIC_setup(ioapic_entries);
+- mask_8259A();
++ legacy_pic->mask_all();
+ }
+
+ if (x2apic_mode)
+@@ -2062,7 +2065,7 @@ static int lapic_resume(struct sys_devic
+
+ if (intr_remapping_enabled) {
+ reenable_intr_remapping(x2apic_mode);
+- unmask_8259A();
++ legacy_pic->restore_mask();
+ restore_IO_APIC_setup(ioapic_entries);
+ free_ioapic_entries(ioapic_entries);
+ }
+Index: linux-2.6.33/arch/x86/kernel/apic/io_apic.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/apic/io_apic.c
++++ linux-2.6.33/arch/x86/kernel/apic/io_apic.c
+@@ -94,10 +94,8 @@ struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCE
+ /* # of MP IRQ source entries */
+ int mp_irq_entries;
+
+-/* Number of legacy interrupts */
+-static int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY;
+ /* GSI interrupts */
+-static int nr_irqs_gsi = NR_IRQS_LEGACY;
++int nr_irqs_gsi = NR_IRQS_LEGACY;
+
+ #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
+ int mp_bus_id_to_type[MAX_MP_BUSSES];
+@@ -140,33 +138,10 @@ static struct irq_pin_list *get_one_free
+
+ /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
+ #ifdef CONFIG_SPARSE_IRQ
+-static struct irq_cfg irq_cfgx[] = {
++static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY];
+ #else
+-static struct irq_cfg irq_cfgx[NR_IRQS] = {
++static struct irq_cfg irq_cfgx[NR_IRQS];
+ #endif
+- [0] = { .vector = IRQ0_VECTOR, },
+- [1] = { .vector = IRQ1_VECTOR, },
+- [2] = { .vector = IRQ2_VECTOR, },
+- [3] = { .vector = IRQ3_VECTOR, },
+- [4] = { .vector = IRQ4_VECTOR, },
+- [5] = { .vector = IRQ5_VECTOR, },
+- [6] = { .vector = IRQ6_VECTOR, },
+- [7] = { .vector = IRQ7_VECTOR, },
+- [8] = { .vector = IRQ8_VECTOR, },
+- [9] = { .vector = IRQ9_VECTOR, },
+- [10] = { .vector = IRQ10_VECTOR, },
+- [11] = { .vector = IRQ11_VECTOR, },
+- [12] = { .vector = IRQ12_VECTOR, },
+- [13] = { .vector = IRQ13_VECTOR, },
+- [14] = { .vector = IRQ14_VECTOR, },
+- [15] = { .vector = IRQ15_VECTOR, },
+-};
+-
+-void __init io_apic_disable_legacy(void)
+-{
+- nr_legacy_irqs = 0;
+- nr_irqs_gsi = 0;
+-}
+
+ int __init arch_early_irq_init(void)
+ {
+@@ -176,16 +151,23 @@ int __init arch_early_irq_init(void)
+ int node;
+ int i;
+
++ if (!legacy_pic->nr_legacy_irqs) {
++ nr_irqs_gsi = 0;
++ io_apic_irqs = ~0UL;
++ }
++
+ cfg = irq_cfgx;
+ count = ARRAY_SIZE(irq_cfgx);
+ node= cpu_to_node(boot_cpu_id);
+
+ for (i = 0; i < count; i++) {
++ if (i < legacy_pic->nr_legacy_irqs)
++ cfg[i].vector = IRQ0_VECTOR + i;
+ desc = irq_to_desc(i);
+ desc->chip_data = &cfg[i];
+ zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node);
+ zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
+- if (i < nr_legacy_irqs)
++ if (i < legacy_pic->nr_legacy_irqs)
+ cpumask_setall(cfg[i].domain);
+ }
+
+@@ -865,7 +847,7 @@ static int __init find_isa_irq_apic(int
+ */
+ static int EISA_ELCR(unsigned int irq)
+ {
+- if (irq < nr_legacy_irqs) {
++ if (irq < legacy_pic->nr_legacy_irqs) {
+ unsigned int port = 0x4d0 + (irq >> 3);
+ return (inb(port) >> (irq & 7)) & 1;
+ }
+@@ -1461,8 +1443,8 @@ static void setup_IO_APIC_irq(int apic_i
+ }
+
+ ioapic_register_intr(irq, desc, trigger);
+- if (irq < nr_legacy_irqs)
+- disable_8259A_irq(irq);
++ if (irq < legacy_pic->nr_legacy_irqs)
++ legacy_pic->chip->mask(irq);
+
+ ioapic_write_entry(apic_id, pin, entry);
+ }
+@@ -1875,7 +1857,7 @@ __apicdebuginit(void) print_PIC(void)
+ unsigned int v;
+ unsigned long flags;
+
+- if (!nr_legacy_irqs)
++ if (!legacy_pic->nr_legacy_irqs)
+ return;
+
+ printk(KERN_DEBUG "\nprinting PIC contents\n");
+@@ -1959,7 +1941,7 @@ void __init enable_IO_APIC(void)
+ nr_ioapic_registers[apic] = reg_01.bits.entries+1;
+ }
+
+- if (!nr_legacy_irqs)
++ if (!legacy_pic->nr_legacy_irqs)
+ return;
+
+ for(apic = 0; apic < nr_ioapics; apic++) {
+@@ -2016,7 +1998,7 @@ void disable_IO_APIC(void)
+ */
+ clear_IO_APIC();
+
+- if (!nr_legacy_irqs)
++ if (!legacy_pic->nr_legacy_irqs)
+ return;
+
+ /*
+@@ -2249,9 +2231,9 @@ static unsigned int startup_ioapic_irq(u
+ struct irq_cfg *cfg;
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+- if (irq < nr_legacy_irqs) {
+- disable_8259A_irq(irq);
+- if (i8259A_irq_pending(irq))
++ if (irq < legacy_pic->nr_legacy_irqs) {
++ legacy_pic->chip->mask(irq);
++ if (legacy_pic->irq_pending(irq))
+ was_pending = 1;
+ }
+ cfg = irq_cfg(irq);
+@@ -2784,8 +2766,8 @@ static inline void init_IO_APIC_traps(vo
+ * so default to an old-fashioned 8259
+ * interrupt if we can..
+ */
+- if (irq < nr_legacy_irqs)
+- make_8259A_irq(irq);
++ if (irq < legacy_pic->nr_legacy_irqs)
++ legacy_pic->make_irq(irq);
+ else
+ /* Strange. Oh, well.. */
+ desc->chip = &no_irq_chip;
+@@ -2942,7 +2924,7 @@ static inline void __init check_timer(vo
+ /*
+ * get/set the timer IRQ vector:
+ */
+- disable_8259A_irq(0);
++ legacy_pic->chip->mask(0);
+ assign_irq_vector(0, cfg, apic->target_cpus());
+
+ /*
+@@ -2955,7 +2937,7 @@ static inline void __init check_timer(vo
+ * automatically.
+ */
+ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
+- init_8259A(1);
++ legacy_pic->init(1);
+ #ifdef CONFIG_X86_32
+ {
+ unsigned int ver;
+@@ -3014,7 +2996,7 @@ static inline void __init check_timer(vo
+ if (timer_irq_works()) {
+ if (nmi_watchdog == NMI_IO_APIC) {
+ setup_nmi();
+- enable_8259A_irq(0);
++ legacy_pic->chip->unmask(0);
+ }
+ if (disable_timer_pin_1 > 0)
+ clear_IO_APIC_pin(0, pin1);
+@@ -3037,14 +3019,14 @@ static inline void __init check_timer(vo
+ */
+ replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2);
+ setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
+- enable_8259A_irq(0);
++ legacy_pic->chip->unmask(0);
+ if (timer_irq_works()) {
+ apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
+ timer_through_8259 = 1;
+ if (nmi_watchdog == NMI_IO_APIC) {
+- disable_8259A_irq(0);
++ legacy_pic->chip->mask(0);
+ setup_nmi();
+- enable_8259A_irq(0);
++ legacy_pic->chip->unmask(0);
+ }
+ goto out;
+ }
+@@ -3052,7 +3034,7 @@ static inline void __init check_timer(vo
+ * Cleanup, just in case ...
+ */
+ local_irq_disable();
+- disable_8259A_irq(0);
++ legacy_pic->chip->mask(0);
+ clear_IO_APIC_pin(apic2, pin2);
+ apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
+ }
+@@ -3071,22 +3053,22 @@ static inline void __init check_timer(vo
+
+ lapic_register_intr(0, desc);
+ apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
+- enable_8259A_irq(0);
++ legacy_pic->chip->unmask(0);
+
+ if (timer_irq_works()) {
+ apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
+ goto out;
+ }
+ local_irq_disable();
+- disable_8259A_irq(0);
++ legacy_pic->chip->mask(0);
+ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
+ apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
+
+ apic_printk(APIC_QUIET, KERN_INFO
+ "...trying to set up timer as ExtINT IRQ...\n");
+
+- init_8259A(0);
+- make_8259A_irq(0);
++ legacy_pic->init(0);
++ legacy_pic->make_irq(0);
+ apic_write(APIC_LVT0, APIC_DM_EXTINT);
+
+ unlock_ExtINT_logic();
+@@ -3128,7 +3110,7 @@ void __init setup_IO_APIC(void)
+ /*
+ * calling enable_IO_APIC() is moved to setup_local_APIC for BP
+ */
+- io_apic_irqs = nr_legacy_irqs ? ~PIC_IRQS : ~0UL;
++ io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL;
+
+ apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
+ /*
+@@ -3139,7 +3121,7 @@ void __init setup_IO_APIC(void)
+ sync_Arb_IDs();
+ setup_IO_APIC_irqs();
+ init_IO_APIC_traps();
+- if (nr_legacy_irqs)
++ if (legacy_pic->nr_legacy_irqs)
+ check_timer();
+ }
+
+@@ -3932,7 +3914,7 @@ static int __io_apic_set_pci_routing(str
+ /*
+ * IRQs < 16 are already in the irq_2_pin[] map
+ */
+- if (irq >= nr_legacy_irqs) {
++ if (irq >= legacy_pic->nr_legacy_irqs) {
+ cfg = desc->chip_data;
+ if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) {
+ printk(KERN_INFO "can not add pin %d for irq %d\n",
+@@ -4310,3 +4292,25 @@ void __init mp_register_ioapic(int id, u
+
+ nr_ioapics++;
+ }
++
++/* Enable IOAPIC early just for system timer */
++void __init pre_init_apic_IRQ0(void)
++{
++ struct irq_cfg *cfg;
++ struct irq_desc *desc;
++
++ printk(KERN_INFO "Early APIC setup for system timer0\n");
++#ifndef CONFIG_SMP
++ phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
++#endif
++ desc = irq_to_desc_alloc_node(0, 0);
++
++ setup_local_APIC();
++
++ cfg = irq_cfg(0);
++ add_pin_to_irq_node(cfg, 0, 0, 0);
++ set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
++
++ /* FIXME: get trigger and polarity from mp_irqs[] */
++ setup_IO_APIC_irq(0, 0, 0, desc, 0, 0);
++}
+Index: linux-2.6.33/arch/x86/kernel/smpboot.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/smpboot.c
++++ linux-2.6.33/arch/x86/kernel/smpboot.c
+@@ -48,6 +48,7 @@
+ #include <linux/err.h>
+ #include <linux/nmi.h>
+ #include <linux/tboot.h>
++#include <linux/stackprotector.h>
+
+ #include <asm/acpi.h>
+ #include <asm/desc.h>
+@@ -67,6 +68,7 @@
+ #include <linux/mc146818rtc.h>
+
+ #include <asm/smpboot_hooks.h>
++#include <asm/i8259.h>
+
+ #ifdef CONFIG_X86_32
+ u8 apicid_2_node[MAX_APICID];
+@@ -286,9 +288,9 @@ notrace static void __cpuinit start_seco
+ check_tsc_sync_target();
+
+ if (nmi_watchdog == NMI_IO_APIC) {
+- disable_8259A_irq(0);
++ legacy_pic->chip->mask(0);
+ enable_NMI_through_LVT0();
+- enable_8259A_irq(0);
++ legacy_pic->chip->unmask(0);
+ }
+
+ #ifdef CONFIG_X86_32
+@@ -324,6 +326,9 @@ notrace static void __cpuinit start_seco
+ /* enable local interrupts */
+ local_irq_enable();
+
++ /* to prevent fake stack check failure in clock setup */
++ boot_init_stack_canary();
++
+ x86_cpuinit.setup_percpu_clockev();
+
+ wmb();
+Index: linux-2.6.33/Documentation/kernel-parameters.txt
+===================================================================
+--- linux-2.6.33.orig/Documentation/kernel-parameters.txt
++++ linux-2.6.33/Documentation/kernel-parameters.txt
+@@ -1738,6 +1738,12 @@ and is between 256 and 4096 characters.
+ nomfgpt [X86-32] Disable Multi-Function General Purpose
+ Timer usage (for AMD Geode machines).
+
++ x86_mrst_timer [X86-32,APBT]
++ choose timer option for x86 moorestown mid platform.
++ two valid options are apbt timer only and lapic timer
++ plus one apbt timer for broadcast timer.
++ x86_mrst_timer=apbt_only | lapic_and_apbt
++
+ norandmaps Don't use address space randomization. Equivalent to
+ echo 0 > /proc/sys/kernel/randomize_va_space
+
+Index: linux-2.6.33/arch/x86/Kconfig
+===================================================================
+--- linux-2.6.33.orig/arch/x86/Kconfig
++++ linux-2.6.33/arch/x86/Kconfig
+@@ -390,6 +390,7 @@ config X86_MRST
+ bool "Moorestown MID platform"
+ depends on X86_32
+ depends on X86_EXTENDED_PLATFORM
++ select APB_TIMER
+ ---help---
+ Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin
+ Internet Device(MID) platform. Moorestown consists of two chips:
+@@ -398,6 +399,14 @@ config X86_MRST
+ nor standard legacy replacement devices/features. e.g. Moorestown does
+ not contain i8259, i8254, HPET, legacy BIOS, most of the io ports.
+
++config MRST_SPI_UART_BOOT_MSG
++ def_bool y
++ prompt "Moorestown SPI UART boot message"
++ depends on (X86_MRST && X86_32)
++ help
++ Enable this to see boot message during protected mode boot phase, such as
++ kernel decompression, BAUD rate is set at 115200 8n1
++
+ config X86_RDC321X
+ bool "RDC R-321x SoC"
+ depends on X86_32
+@@ -612,6 +621,24 @@ config HPET_EMULATE_RTC
+ def_bool y
+ depends on HPET_TIMER && (RTC=y || RTC=m || RTC_DRV_CMOS=m || RTC_DRV_CMOS=y)
+
++config APB_TIMER
++ def_bool y if X86_MRST
++ prompt "Langwell APB Timer Support" if X86_MRST
++ help
++ APB timer is the replacement for 8254, HPET on X86 MID platforms.
++ The APBT provides a stable time base on SMP
++ systems, unlike the TSC, but it is more expensive to access,
++ as it is off-chip. APB timers are always running regardless of CPU
++ C states, they are used as per CPU clockevent device when possible.
++
++config LNW_IPC
++ def_bool n
++ prompt "Langwell IPC Support" if (X86_32 || X86_MRST)
++ depends on X86_MRST
++ help
++ IPC unit is used on Moorestown to bridge the communications
++ between IA and SCU.
++
+ # Mark as embedded because too many people got it wrong.
+ # The code disables itself when not needed.
+ config DMI
+Index: linux-2.6.33/arch/x86/include/asm/apb_timer.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/include/asm/apb_timer.h
+@@ -0,0 +1,72 @@
++/*
++ * apb_timer.h: Driver for Langwell APB timer based on Synopsis DesignWare
++ *
++ * (C) Copyright 2009 Intel Corporation
++ * Author: Jacob Pan (jacob.jun.pan@intel.com)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ *
++ * Note:
++ */
++
++#ifndef ASM_X86_APBT_H
++#define ASM_X86_APBT_H
++#include <linux/sfi.h>
++
++#ifdef CONFIG_APB_TIMER
++
++/* Langwell DW APB timer registers */
++#define APBTMR_N_LOAD_COUNT 0x00
++#define APBTMR_N_CURRENT_VALUE 0x04
++#define APBTMR_N_CONTROL 0x08
++#define APBTMR_N_EOI 0x0c
++#define APBTMR_N_INT_STATUS 0x10
++
++#define APBTMRS_INT_STATUS 0xa0
++#define APBTMRS_EOI 0xa4
++#define APBTMRS_RAW_INT_STATUS 0xa8
++#define APBTMRS_COMP_VERSION 0xac
++#define APBTMRS_REG_SIZE 0x14
++
++/* register bits */
++#define APBTMR_CONTROL_ENABLE (1<<0)
++#define APBTMR_CONTROL_MODE_PERIODIC (1<<1) /*1: periodic 0:free running */
++#define APBTMR_CONTROL_INT (1<<2)
++
++/* default memory mapped register base */
++#define LNW_SCU_ADDR 0xFF100000
++#define LNW_EXT_TIMER_OFFSET 0x1B800
++#define APBT_DEFAULT_BASE (LNW_SCU_ADDR+LNW_EXT_TIMER_OFFSET)
++#define LNW_EXT_TIMER_PGOFFSET 0x800
++
++/* APBT clock speed range from PCLK to fabric base, 25-100MHz */
++#define APBT_MAX_FREQ 50
++#define APBT_MIN_FREQ 1
++#define APBT_MMAP_SIZE 1024
++
++#define APBT_DEV_USED 1
++
++#define SFI_MTMR_MAX_NUM 8
++
++extern void apbt_time_init(void);
++extern struct clock_event_device *global_clock_event;
++extern unsigned long apbt_quick_calibrate(void);
++extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu);
++extern void apbt_setup_secondary_clock(void);
++extern unsigned int boot_cpu_id;
++extern int disable_apbt_percpu;
++
++extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint);
++extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr);
++extern int sfi_mtimer_num;
++
++#else /* CONFIG_APB_TIMER */
++
++static inline unsigned long apbt_quick_calibrate(void) {return 0; }
++static inline void apbt_time_init(void) {return 0; }
++
++#endif
++#endif /* ASM_X86_APBT_H */
+Index: linux-2.6.33/arch/x86/kernel/Makefile
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/Makefile
++++ linux-2.6.33/arch/x86/kernel/Makefile
+@@ -57,6 +57,12 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
+ obj-y += cpu/
+ obj-y += acpi/
+ obj-$(CONFIG_SFI) += sfi.o
++sfi-processor-objs += sfi/sfi_processor_core.o
++sfi-processor-objs += sfi/sfi_processor_idle.o
++sfi-processor-objs += sfi/sfi_processor_perflib.o
++
++obj-$(CONFIG_SFI_PROCESSOR_PM) += sfi-processor.o
++
+ obj-y += reboot.o
+ obj-$(CONFIG_MCA) += mca_32.o
+ obj-$(CONFIG_X86_MSR) += msr.o
+@@ -85,8 +91,11 @@ obj-$(CONFIG_DOUBLEFAULT) += doublefaul
+ obj-$(CONFIG_KGDB) += kgdb.o
+ obj-$(CONFIG_VM86) += vm86_32.o
+ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
++obj-$(CONFIG_X86_MRST_EARLY_PRINTK) += mrst_earlyprintk.o
+
+ obj-$(CONFIG_HPET_TIMER) += hpet.o
++obj-$(CONFIG_APB_TIMER) += apb_timer.o
++obj-$(CONFIG_LNW_IPC) += ipc_mrst.o
+
+ obj-$(CONFIG_K8_NB) += k8.o
+ obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o
+@@ -105,7 +114,7 @@ obj-$(CONFIG_SCx200) += scx200.o
+ scx200-y += scx200_32.o
+
+ obj-$(CONFIG_OLPC) += olpc.o
+-obj-$(CONFIG_X86_MRST) += mrst.o
++obj-$(CONFIG_X86_MRST) += mrst.o vrtc.o
+
+ microcode-y := microcode_core.o
+ microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o
+Index: linux-2.6.33/arch/x86/kernel/apb_timer.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/kernel/apb_timer.c
+@@ -0,0 +1,765 @@
++/*
++ * apb_timer.c: Driver for Langwell APB timers
++ *
++ * (C) Copyright 2009 Intel Corporation
++ * Author: Jacob Pan (jacob.jun.pan@intel.com)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ *
++ * Note:
++ * Langwell is the south complex of Intel Moorestown MID platform. There are
++ * eight external timers in total that can be used by the operating system.
++ * The timer information, such as frequency and addresses, is provided to the
++ * OS via SFI tables.
++ * Timer interrupts are routed via FW/HW emulated IOAPIC independently via
++ * individual redirection table entries (RTE).
++ * Unlike HPET, there is no master counter, therefore one of the timers are
++ * used as clocksource. The overall allocation looks like:
++ * - timer 0 - NR_CPUs for per cpu timer
++ * - one timer for clocksource
++ * - one timer for watchdog driver.
++ * It is also worth notice that APB timer does not support true one-shot mode,
++ * free-running mode will be used here to emulate one-shot mode.
++ * APB timer can also be used as broadcast timer along with per cpu local APIC
++ * timer, but by default APB timer has higher rating than local APIC timers.
++ */
++
++#include <linux/clocksource.h>
++#include <linux/clockchips.h>
++#include <linux/delay.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/sysdev.h>
++#include <linux/pm.h>
++#include <linux/pci.h>
++#include <linux/sfi.h>
++#include <linux/interrupt.h>
++#include <linux/cpu.h>
++#include <linux/irq.h>
++
++#include <asm/fixmap.h>
++#include <asm/apb_timer.h>
++
++#define APBT_MASK CLOCKSOURCE_MASK(32)
++#define APBT_SHIFT 22
++#define APBT_CLOCKEVENT_RATING 150
++#define APBT_CLOCKSOURCE_RATING 250
++#define APBT_MIN_DELTA_USEC 200
++
++#define EVT_TO_APBT_DEV(evt) container_of(evt, struct apbt_dev, evt)
++#define APBT_CLOCKEVENT0_NUM (0)
++#define APBT_CLOCKEVENT1_NUM (1)
++#define APBT_CLOCKSOURCE_NUM (2)
++
++static unsigned long apbt_address;
++static int apb_timer_block_enabled;
++static void __iomem *apbt_virt_address;
++static int phy_cs_timer_id;
++
++/*
++ * Common DW APB timer info
++ */
++static uint64_t apbt_freq;
++
++static void apbt_set_mode(enum clock_event_mode mode,
++ struct clock_event_device *evt);
++static int apbt_next_event(unsigned long delta,
++ struct clock_event_device *evt);
++static cycle_t apbt_read_clocksource(struct clocksource *cs);
++static void apbt_restart_clocksource(void);
++
++struct apbt_dev {
++ struct clock_event_device evt;
++ unsigned int num;
++ int cpu;
++ unsigned int irq;
++ unsigned int tick;
++ unsigned int count;
++ unsigned int flags;
++ char name[10];
++};
++
++int disable_apbt_percpu __cpuinitdata;
++
++#ifdef CONFIG_SMP
++static unsigned int apbt_num_timers_used;
++static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev);
++static struct apbt_dev *apbt_devs;
++#endif
++
++static inline unsigned long apbt_readl_reg(unsigned long a)
++{
++ return readl(apbt_virt_address + a);
++}
++
++static inline void apbt_writel_reg(unsigned long d, unsigned long a)
++{
++ writel(d, apbt_virt_address + a);
++}
++
++static inline unsigned long apbt_readl(int n, unsigned long a)
++{
++ return readl(apbt_virt_address + a + n * APBTMRS_REG_SIZE);
++}
++
++static inline void apbt_writel(int n, unsigned long d, unsigned long a)
++{
++ writel(d, apbt_virt_address + a + n * APBTMRS_REG_SIZE);
++}
++
++static inline void apbt_set_mapping(void)
++{
++ struct sfi_timer_table_entry *mtmr;
++
++ if (apbt_virt_address) {
++ pr_debug("APBT base already mapped\n");
++ return;
++ }
++ mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
++ if (mtmr == NULL) {
++ printk(KERN_ERR "Failed to get MTMR %d from SFI\n",
++ APBT_CLOCKEVENT0_NUM);
++ return;
++ }
++ apbt_address = (unsigned long)mtmr->phys_addr;
++ if (!apbt_address) {
++ printk(KERN_WARNING "No timer base from SFI, use default\n");
++ apbt_address = APBT_DEFAULT_BASE;
++ }
++ apbt_virt_address = ioremap_nocache(apbt_address, APBT_MMAP_SIZE);
++ if (apbt_virt_address) {
++ pr_debug("Mapped APBT physical addr %p at virtual addr %p\n",\
++ (void *)apbt_address, (void *)apbt_virt_address);
++ } else {
++ pr_debug("Failed mapping APBT phy address at %p\n",\
++ (void *)apbt_address);
++ goto panic_noapbt;
++ }
++ apbt_freq = mtmr->freq_hz / USEC_PER_SEC;
++ sfi_free_mtmr(mtmr);
++
++ /* Now figure out the physical timer id for clocksource device */
++ mtmr = sfi_get_mtmr(APBT_CLOCKSOURCE_NUM);
++ if (mtmr == NULL)
++ goto panic_noapbt;
++
++ /* Now figure out the physical timer id */
++ phy_cs_timer_id = (unsigned int)(mtmr->phys_addr & 0xff)
++ / APBTMRS_REG_SIZE;
++ pr_debug("Use timer %d for clocksource\n", phy_cs_timer_id);
++ return;
++
++panic_noapbt:
++ panic("Failed to setup APB system timer\n");
++
++}
++
++static inline void apbt_clear_mapping(void)
++{
++ iounmap(apbt_virt_address);
++ apbt_virt_address = NULL;
++}
++
++/*
++ * APBT timer interrupt enable / disable
++ */
++static inline int is_apbt_capable(void)
++{
++ return apbt_virt_address ? 1 : 0;
++}
++
++static struct clocksource clocksource_apbt = {
++ .name = "apbt",
++ .rating = APBT_CLOCKSOURCE_RATING,
++ .read = apbt_read_clocksource,
++ .mask = APBT_MASK,
++ .shift = APBT_SHIFT,
++ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
++ .resume = apbt_restart_clocksource,
++};
++
++/* boot APB clock event device */
++static struct clock_event_device apbt_clockevent = {
++ .name = "apbt0",
++ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
++ .set_mode = apbt_set_mode,
++ .set_next_event = apbt_next_event,
++ .shift = APBT_SHIFT,
++ .irq = 0,
++ .rating = APBT_CLOCKEVENT_RATING,
++};
++
++/*
++ * if user does not want to use per CPU apb timer, just give it a lower rating
++ * than local apic timer and skip the late per cpu timer init.
++ */
++static inline int __init setup_x86_mrst_timer(char *arg)
++{
++ if (!arg)
++ return -EINVAL;
++
++ if (strcmp("apbt_only", arg) == 0)
++ disable_apbt_percpu = 0;
++ else if (strcmp("lapic_and_apbt", arg) == 0)
++ disable_apbt_percpu = 1;
++ else {
++ pr_warning("X86 MRST timer option %s not recognised"
++ " use x86_mrst_timer=apbt_only or lapic_and_apbt\n",
++ arg);
++ return -EINVAL;
++ }
++ return 0;
++}
++__setup("x86_mrst_timer=", setup_x86_mrst_timer);
++
++/*
++ * start count down from 0xffff_ffff. this is done by toggling the enable bit
++ * then load initial load count to ~0.
++ */
++static void apbt_start_counter(int n)
++{
++ unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
++
++ ctrl &= ~APBTMR_CONTROL_ENABLE;
++ apbt_writel(n, ctrl, APBTMR_N_CONTROL);
++ apbt_writel(n, ~0, APBTMR_N_LOAD_COUNT);
++ /* enable, mask interrupt */
++ ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
++ ctrl |= (APBTMR_CONTROL_ENABLE | APBTMR_CONTROL_INT);
++ apbt_writel(n, ctrl, APBTMR_N_CONTROL);
++ /* read it once to get cached counter value initialized */
++ apbt_read_clocksource(&clocksource_apbt);
++}
++
++static irqreturn_t apbt_interrupt_handler(int irq, void *data)
++{
++ struct apbt_dev *dev = (struct apbt_dev *)data;
++ struct clock_event_device *aevt = &dev->evt;
++
++ if (!aevt->event_handler) {
++ printk(KERN_INFO "Spurious APBT timer interrupt on %d\n",
++ dev->num);
++ return IRQ_NONE;
++ }
++ aevt->event_handler(aevt);
++ return IRQ_HANDLED;
++}
++
++static void apbt_restart_clocksource(void)
++{
++ apbt_start_counter(phy_cs_timer_id);
++}
++
++/* Setup IRQ routing via IOAPIC */
++#ifdef CONFIG_SMP
++static void apbt_setup_irq(struct apbt_dev *adev)
++{
++ struct irq_chip *chip;
++ struct irq_desc *desc;
++
++ /* timer0 irq has been setup early */
++ if (adev->irq == 0)
++ return;
++ desc = irq_to_desc(adev->irq);
++ chip = get_irq_chip(adev->irq);
++ disable_irq(adev->irq);
++ desc->status |= IRQ_MOVE_PCNTXT;
++ irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
++ /* APB timer irqs are set up as mp_irqs, timer is edge triggerred */
++ set_irq_chip_and_handler_name(adev->irq, chip, handle_edge_irq, "edge");
++ enable_irq(adev->irq);
++ if (system_state == SYSTEM_BOOTING)
++ if (request_irq(adev->irq, apbt_interrupt_handler,
++ IRQF_TIMER | IRQF_DISABLED|IRQF_NOBALANCING, adev->name, adev)) {
++ printk(KERN_ERR "Failed request IRQ for APBT%d\n", adev->num);
++ }
++}
++#endif
++
++static void apbt_enable_int(int n)
++{
++ unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
++ /* clear pending intr */
++ apbt_readl(n, APBTMR_N_EOI);
++ ctrl &= ~APBTMR_CONTROL_INT;
++ apbt_writel(n, ctrl, APBTMR_N_CONTROL);
++}
++
++static void apbt_disable_int(int n)
++{
++ unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
++
++ ctrl |= APBTMR_CONTROL_INT;
++ apbt_writel(n, ctrl, APBTMR_N_CONTROL);
++}
++
++
++static int apbt_clockevent_register(void)
++{
++ struct sfi_timer_table_entry *mtmr;
++
++ mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
++ if (mtmr == NULL) {
++ printk(KERN_ERR "Failed to get MTMR %d from SFI\n",
++ APBT_CLOCKEVENT0_NUM);
++ return -ENODEV;
++ }
++
++ /*
++ * We need to calculate the scaled math multiplication factor for
++ * nanosecond to apbt tick conversion.
++ * mult = (nsec/cycle)*2^APBT_SHIFT
++ */
++ apbt_clockevent.mult = div_sc((unsigned long) mtmr->freq_hz
++ , NSEC_PER_SEC, APBT_SHIFT);
++
++ /* Calculate the min / max delta */
++ apbt_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
++ &apbt_clockevent);
++ apbt_clockevent.min_delta_ns = clockevent_delta2ns(
++ APBT_MIN_DELTA_USEC*apbt_freq,
++ &apbt_clockevent);
++ /*
++ * Start apbt with the boot cpu mask and make it
++ * global if not used for per cpu timer.
++ */
++ apbt_clockevent.cpumask = cpumask_of(smp_processor_id());
++
++ if (disable_apbt_percpu) {
++ apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100;
++ global_clock_event = &apbt_clockevent;
++ printk(KERN_DEBUG "%s clockevent registered as global\n",
++ global_clock_event->name);
++ }
++ if (request_irq(apbt_clockevent.irq, apbt_interrupt_handler,
++ IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
++ apbt_clockevent.name, &apbt_clockevent)) {
++ printk(KERN_ERR "Failed request IRQ for APBT%d\n",
++ apbt_clockevent.irq);
++ }
++
++ clockevents_register_device(&apbt_clockevent);
++ /* Start APBT 0 interrupts */
++ apbt_enable_int(APBT_CLOCKEVENT0_NUM);
++
++ sfi_free_mtmr(mtmr);
++ return 0;
++}
++
++#ifdef CONFIG_SMP
++/* Should be called with per cpu */
++void apbt_setup_secondary_clock(void)
++{
++ struct apbt_dev *adev;
++ struct clock_event_device *aevt;
++ int cpu;
++
++ /* Don't register boot CPU clockevent */
++ cpu = smp_processor_id();
++ if (cpu == boot_cpu_id)
++ return;
++ /*
++ * We need to calculate the scaled math multiplication factor for
++ * nanosecond to apbt tick conversion.
++ * mult = (nsec/cycle)*2^APBT_SHIFT
++ */
++ printk(KERN_INFO "Init per CPU clockevent %d\n", cpu);
++ adev = &per_cpu(cpu_apbt_dev, cpu);
++ aevt = &adev->evt;
++
++ memcpy(aevt, &apbt_clockevent, sizeof(*aevt));
++ aevt->cpumask = cpumask_of(cpu);
++ aevt->name = adev->name;
++ aevt->mode = CLOCK_EVT_MODE_UNUSED;
++
++ printk(KERN_INFO "Registering CPU %d clockevent device %s, mask %08x\n",
++ cpu, aevt->name, *(u32 *)aevt->cpumask);
++
++ apbt_setup_irq(adev);
++
++ clockevents_register_device(aevt);
++
++ apbt_enable_int(cpu);
++
++ return;
++}
++
++static int apbt_cpuhp_notify(struct notifier_block *n,
++ unsigned long action, void *hcpu)
++{
++ unsigned long cpu = (unsigned long)hcpu;
++ struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu);
++
++ switch (action & 0xf) {
++ case CPU_DEAD:
++ apbt_disable_int(cpu);
++ if (system_state == SYSTEM_RUNNING)
++ pr_debug("skipping APBT CPU %lu offline\n", cpu);
++ else if (adev) {
++ pr_debug("APBT clockevent for cpu %lu offline\n", cpu);
++ free_irq(adev->irq, adev);
++ }
++ break;
++ }
++ return NOTIFY_OK;
++}
++
++static __init int apbt_late_init(void)
++{
++ if (disable_apbt_percpu)
++ return 0;
++ /* This notifier should be called after workqueue is ready */
++ hotcpu_notifier(apbt_cpuhp_notify, -20);
++ return 0;
++}
++fs_initcall(apbt_late_init);
++#else
++
++void apbt_setup_secondary_clock(void) {}
++
++#endif /* CONFIG_SMP */
++
++static void apbt_set_mode(enum clock_event_mode mode,
++ struct clock_event_device *evt)
++{
++ unsigned long ctrl;
++ uint64_t delta;
++ int timer_num;
++ struct apbt_dev *adev = EVT_TO_APBT_DEV(evt);
++
++ timer_num = adev->num;
++ pr_debug("%s CPU %d timer %d mode=%d\n",
++ __func__, first_cpu(*evt->cpumask), timer_num, mode);
++
++ switch (mode) {
++ case CLOCK_EVT_MODE_PERIODIC:
++ delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * apbt_clockevent.mult;
++ delta >>= apbt_clockevent.shift;
++ ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
++ ctrl |= APBTMR_CONTROL_MODE_PERIODIC;
++ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
++ /*
++ * DW APB p. 46, have to disable timer before load counter,
++ * may cause sync problem.
++ */
++ ctrl &= ~APBTMR_CONTROL_ENABLE;
++ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
++ udelay(1);
++ pr_debug("Setting clock period %d for HZ %d\n", (int)delta, HZ);
++ apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT);
++ ctrl |= APBTMR_CONTROL_ENABLE;
++ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
++ break;
++ /* APB timer does not have one-shot mode, use free running mode */
++ case CLOCK_EVT_MODE_ONESHOT:
++ ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
++ /*
++ * set free running mode, this mode will let timer reload max
++ * timeout which will give time (3min on 25MHz clock) to rearm
++ * the next event, therefore emulate the one-shot mode.
++ */
++ ctrl &= ~APBTMR_CONTROL_ENABLE;
++ ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
++
++ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
++ /* write again to set free running mode */
++ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
++
++ /*
++ * DW APB p. 46, load counter with all 1s before starting free
++ * running mode.
++ */
++ apbt_writel(timer_num, ~0, APBTMR_N_LOAD_COUNT);
++ ctrl &= ~APBTMR_CONTROL_INT;
++ ctrl |= APBTMR_CONTROL_ENABLE;
++ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
++ break;
++
++ case CLOCK_EVT_MODE_UNUSED:
++ case CLOCK_EVT_MODE_SHUTDOWN:
++ apbt_disable_int(timer_num);
++ ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
++ ctrl &= ~APBTMR_CONTROL_ENABLE;
++ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
++ break;
++
++ case CLOCK_EVT_MODE_RESUME:
++ apbt_enable_int(timer_num);
++ break;
++ }
++}
++
++static int apbt_next_event(unsigned long delta,
++ struct clock_event_device *evt)
++{
++ unsigned long ctrl;
++ int timer_num;
++
++ struct apbt_dev *adev = EVT_TO_APBT_DEV(evt);
++
++ timer_num = adev->num;
++ /* Disable timer */
++ ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL);
++ ctrl &= ~APBTMR_CONTROL_ENABLE;
++ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
++ /* write new count */
++ apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT);
++ ctrl |= APBTMR_CONTROL_ENABLE;
++ apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL);
++ return 0;
++}
++
++/*
++ * APB timer clock is not in sync with pclk on Langwell, which translates to
++ * unreliable read value caused by sampling error. the error does not add up
++ * overtime and only happens when sampling a 0 as a 1 by mistake. so the time
++ * would go backwards. the following code is trying to prevent time traveling
++ * backwards. little bit paranoid.
++ */
++static cycle_t apbt_read_clocksource(struct clocksource *cs)
++{
++ unsigned long t0, t1, t2;
++ static unsigned long last_read;
++
++bad_count:
++ t1 = apbt_readl(phy_cs_timer_id,
++ APBTMR_N_CURRENT_VALUE);
++ t2 = apbt_readl(phy_cs_timer_id,
++ APBTMR_N_CURRENT_VALUE);
++ if (unlikely(t1 < t2)) {
++ pr_debug("APBT: read current count error %lx:%lx:%lx\n",
++ t1, t2, t2 - t1);
++ goto bad_count;
++ }
++ /*
++ * check against cached last read, makes sure time does not go back.
++ * it could be a normal rollover but we will do tripple check anyway
++ */
++ if (unlikely(t2 > last_read)) {
++ /* check if we have a normal rollover */
++ unsigned long raw_intr_status =
++ apbt_readl_reg(APBTMRS_RAW_INT_STATUS);
++ /*
++ * cs timer interrupt is masked but raw intr bit is set if
++ * rollover occurs. then we read EOI reg to clear it.
++ */
++ if (raw_intr_status & (1 << phy_cs_timer_id)) {
++ apbt_readl(phy_cs_timer_id, APBTMR_N_EOI);
++ goto out;
++ }
++ pr_debug("APB CS going back %lx:%lx:%lx ",
++ t2, last_read, t2 - last_read);
++bad_count_x3:
++ pr_debug(KERN_INFO "tripple check enforced\n");
++ t0 = apbt_readl(phy_cs_timer_id,
++ APBTMR_N_CURRENT_VALUE);
++ udelay(1);
++ t1 = apbt_readl(phy_cs_timer_id,
++ APBTMR_N_CURRENT_VALUE);
++ udelay(1);
++ t2 = apbt_readl(phy_cs_timer_id,
++ APBTMR_N_CURRENT_VALUE);
++ if ((t2 > t1) || (t1 > t0)) {
++ printk(KERN_ERR "Error: APB CS tripple check failed\n");
++ goto bad_count_x3;
++ }
++ }
++out:
++ last_read = t2;
++ return (cycle_t)~t2;
++}
++
++static int apbt_clocksource_register(void)
++{
++ u64 start, now;
++ cycle_t t1;
++
++ /* Start the counter, use timer 2 as source, timer 0/1 for event */
++ apbt_start_counter(phy_cs_timer_id);
++
++ /* Verify whether apbt counter works */
++ t1 = apbt_read_clocksource(&clocksource_apbt);
++ rdtscll(start);
++
++ /*
++ * We don't know the TSC frequency yet, but waiting for
++ * 200000 TSC cycles is safe:
++ * 4 GHz == 50us
++ * 1 GHz == 200us
++ */
++ do {
++ rep_nop();
++ rdtscll(now);
++ } while ((now - start) < 200000UL);
++
++ /* APBT is the only always on clocksource, it has to work! */
++ if (t1 == apbt_read_clocksource(&clocksource_apbt))
++ panic("APBT counter not counting. APBT disabled\n");
++
++ /*
++ * initialize and register APBT clocksource
++ * convert that to ns/clock cycle
++ * mult = (ns/c) * 2^APBT_SHIFT
++ */
++ clocksource_apbt.mult = div_sc(MSEC_PER_SEC,
++ (unsigned long) apbt_freq, APBT_SHIFT);
++ clocksource_register(&clocksource_apbt);
++
++ return 0;
++}
++
++/*
++ * Early setup the APBT timer, only use timer 0 for booting then switch to
++ * per CPU timer if possible.
++ * returns 1 if per cpu apbt is setup
++ * returns 0 if no per cpu apbt is chosen
++ * panic if set up failed, this is the only platform timer on Moorestown.
++ */
++void __init apbt_time_init(void)
++{
++#ifdef CONFIG_SMP
++ int i;
++ struct sfi_timer_table_entry *p_mtmr;
++ unsigned int percpu_timer;
++ struct apbt_dev *adev;
++#endif
++
++ if (apb_timer_block_enabled)
++ return;
++ apbt_set_mapping();
++ if (apbt_virt_address) {
++ pr_debug("Found APBT version 0x%lx\n",\
++ apbt_readl_reg(APBTMRS_COMP_VERSION));
++ } else
++ goto out_noapbt;
++ /*
++ * Read the frequency and check for a sane value, for ESL model
++ * we extend the possible clock range to allow time scaling.
++ */
++
++ if (apbt_freq < APBT_MIN_FREQ || apbt_freq > APBT_MAX_FREQ) {
++ pr_debug("APBT has invalid freq 0x%llx\n", apbt_freq);
++ goto out_noapbt;
++ }
++ if (apbt_clocksource_register()) {
++ pr_debug("APBT has failed to register clocksource\n");
++ goto out_noapbt;
++ }
++ if (!apbt_clockevent_register())
++ apb_timer_block_enabled = 1;
++ else {
++ pr_debug("APBT has failed to register clockevent\n");
++ goto out_noapbt;
++ }
++#ifdef CONFIG_SMP
++ /* kernel cmdline disable apb timer, so we will use lapic timers */
++ if (disable_apbt_percpu) {
++ printk(KERN_INFO "apbt: disabled per cpu timer\n");
++ return;
++ }
++ pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus());
++ if (num_possible_cpus() <= sfi_mtimer_num) {
++ percpu_timer = 1;
++ apbt_num_timers_used = num_possible_cpus();
++ } else {
++ percpu_timer = 0;
++ apbt_num_timers_used = 1;
++ adev = &per_cpu(cpu_apbt_dev, 0);
++ adev->flags &= ~APBT_DEV_USED;
++ }
++ pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used);
++
++ /* here we set up per CPU timer data structure */
++ apbt_devs = kzalloc(sizeof(struct apbt_dev) * apbt_num_timers_used,
++ GFP_KERNEL);
++ if (!apbt_devs) {
++ printk(KERN_ERR "Failed to allocate APB timer devices\n");
++ return;
++ }
++ for (i = 0; i < apbt_num_timers_used; i++) {
++ adev = &per_cpu(cpu_apbt_dev, i);
++ adev->num = i;
++ adev->cpu = i;
++ p_mtmr = sfi_get_mtmr(i);
++ if (p_mtmr) {
++ adev->tick = p_mtmr->freq_hz;
++ adev->irq = p_mtmr->irq;
++ } else
++ printk(KERN_ERR "Failed to get timer for cpu %d\n", i);
++ adev->count = 0;
++ sprintf(adev->name, "apbt%d", i);
++ }
++#endif
++
++ return;
++
++out_noapbt:
++ apbt_clear_mapping();
++ apb_timer_block_enabled = 0;
++ panic("failed to enable APB timer\n");
++}
++
++static inline void apbt_disable(int n)
++{
++ if (is_apbt_capable()) {
++ unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
++ ctrl &= ~APBTMR_CONTROL_ENABLE;
++ apbt_writel(n, ctrl, APBTMR_N_CONTROL);
++ }
++}
++
++/* called before apb_timer_enable, use early map */
++unsigned long apbt_quick_calibrate()
++{
++ int i, scale;
++ u64 old, new;
++ cycle_t t1, t2;
++ unsigned long khz = 0;
++ u32 loop, shift;
++
++ apbt_set_mapping();
++ apbt_start_counter(phy_cs_timer_id);
++
++ /* check if the timer can count down, otherwise return */
++ old = apbt_read_clocksource(&clocksource_apbt);
++ i = 10000;
++ while (--i) {
++ if (old != apbt_read_clocksource(&clocksource_apbt))
++ break;
++ }
++ if (!i)
++ goto failed;
++
++ /* count 16 ms */
++ loop = (apbt_freq * 1000) << 4;
++
++ /* restart the timer to ensure it won't get to 0 in the calibration */
++ apbt_start_counter(phy_cs_timer_id);
++
++ old = apbt_read_clocksource(&clocksource_apbt);
++ old += loop;
++
++ t1 = __native_read_tsc();
++
++ do {
++ new = apbt_read_clocksource(&clocksource_apbt);
++ } while (new < old);
++
++ t2 = __native_read_tsc();
++
++ shift = 5;
++ if (unlikely(loop >> shift == 0)) {
++ printk(KERN_INFO
++ "APBT TSC calibration failed, not enough resolution\n");
++ return 0;
++ }
++ scale = (int)div_u64((t2 - t1), loop >> shift);
++ khz = (scale * apbt_freq * 1000) >> shift;
++ printk(KERN_INFO "TSC freq calculated by APB timer is %lu khz\n", khz);
++ return khz;
++failed:
++ return 0;
++}
+Index: linux-2.6.33/arch/x86/include/asm/mrst.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/include/asm/mrst.h
+@@ -0,0 +1,16 @@
++/*
++ * mrst.h: Intel Moorestown platform specific setup code
++ *
++ * (C) Copyright 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ */
++#ifndef _ASM_X86_MRST_H
++#define _ASM_X86_MRST_H
++extern int pci_mrst_init(void);
++int __init sfi_parse_mrtc(struct sfi_table_header *table);
++
++#endif /* _ASM_X86_MRST_H */
+Index: linux-2.6.33/arch/x86/kernel/mrst.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/mrst.c
++++ linux-2.6.33/arch/x86/kernel/mrst.c
+@@ -2,16 +2,234 @@
+ * mrst.c: Intel Moorestown platform specific setup code
+ *
+ * (C) Copyright 2008 Intel Corporation
+- * Author: Jacob Pan (jacob.jun.pan@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
++
+ #include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/sfi.h>
++#include <linux/bitmap.h>
++#include <linux/threads.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/langwell_pmic_gpio.h>
++#include <linux/i2c.h>
++#include <linux/sfi.h>
++#include <linux/i2c/pca953x.h>
++#include <linux/gpio_keys.h>
++#include <linux/input.h>
++#include <linux/platform_device.h>
++#include <linux/irq.h>
+
++#include <asm/string.h>
+ #include <asm/setup.h>
++#include <asm/mpspec_def.h>
++#include <asm/hw_irq.h>
++#include <asm/apic.h>
++#include <asm/io_apic.h>
++#include <asm/apb_timer.h>
++#include <asm/io.h>
++#include <asm/mrst.h>
++#include <asm/vrtc.h>
++#include <asm/ipc_defs.h>
++#include <asm/reboot.h>
++#include <asm/i8259.h>
++
++#define LANGWELL_GPIO_ALT_ADDR 0xff12c038
++#define MRST_I2C_BUSNUM 3
++#define SFI_MRTC_MAX 8
++
++static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM];
++static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM];
++int sfi_mtimer_num;
++
++struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
++EXPORT_SYMBOL_GPL(sfi_mrtc_array);
++int sfi_mrtc_num;
++
++static inline void assign_to_mp_irq(struct mpc_intsrc *m,
++ struct mpc_intsrc *mp_irq)
++{
++ memcpy(mp_irq, m, sizeof(struct mpc_intsrc));
++}
++
++static inline int mp_irq_cmp(struct mpc_intsrc *mp_irq,
++ struct mpc_intsrc *m)
++{
++ return memcmp(mp_irq, m, sizeof(struct mpc_intsrc));
++}
++
++static void save_mp_irq(struct mpc_intsrc *m)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++) {
++ if (!mp_irq_cmp(&mp_irqs[i], m))
++ return;
++ }
++
++ assign_to_mp_irq(m, &mp_irqs[mp_irq_entries]);
++ if (++mp_irq_entries == MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!!\n");
++}
++
++/* parse all the mtimer info to a global mtimer array */
++static int __init sfi_parse_mtmr(struct sfi_table_header *table)
++{
++ struct sfi_table_simple *sb;
++ struct sfi_timer_table_entry *pentry;
++ struct mpc_intsrc mp_irq;
++ int totallen;
++
++ sb = (struct sfi_table_simple *)table;
++ if (!sfi_mtimer_num) {
++ sfi_mtimer_num = SFI_GET_NUM_ENTRIES(sb,
++ struct sfi_timer_table_entry);
++ pentry = (struct sfi_timer_table_entry *) sb->pentry;
++ totallen = sfi_mtimer_num * sizeof(*pentry);
++ memcpy(sfi_mtimer_array, pentry, totallen);
++ }
++
++ printk(KERN_INFO "SFI: MTIMER info (num = %d):\n", sfi_mtimer_num);
++ pentry = sfi_mtimer_array;
++ for (totallen = 0; totallen < sfi_mtimer_num; totallen++, pentry++) {
++ printk(KERN_INFO "timer[%d]: paddr = 0x%08x, freq = %dHz,"
++ " irq = %d\n", totallen, (u32)pentry->phys_addr,
++ pentry->freq_hz, pentry->irq);
++ if (!pentry->irq)
++ continue;
++ mp_irq.type = MP_IOAPIC;
++ mp_irq.irqtype = mp_INT;
++ mp_irq.irqflag = 0;
++ mp_irq.srcbus = 0;
++ mp_irq.srcbusirq = pentry->irq; /* IRQ */
++ mp_irq.dstapic = MP_APIC_ALL;
++ mp_irq.dstirq = pentry->irq;
++ save_mp_irq(&mp_irq);
++ }
++
++ return 0;
++}
++
++struct sfi_timer_table_entry *sfi_get_mtmr(int hint)
++{
++ int i;
++ if (hint < sfi_mtimer_num) {
++ if (!sfi_mtimer_usage[hint]) {
++ printk(KERN_DEBUG "hint taken for timer %d irq %d\n",\
++ hint, sfi_mtimer_array[hint].irq);
++ sfi_mtimer_usage[hint] = 1;
++ return &sfi_mtimer_array[hint];
++ }
++ }
++ /* take the first timer available */
++ for (i = 0; i < sfi_mtimer_num;) {
++ if (!sfi_mtimer_usage[i]) {
++ sfi_mtimer_usage[i] = 1;
++ return &sfi_mtimer_array[i];
++ }
++ i++;
++ }
++ return NULL;
++}
++
++void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr)
++{
++ int i;
++ for (i = 0; i < sfi_mtimer_num;) {
++ if (mtmr->irq == sfi_mtimer_array[i].irq) {
++ sfi_mtimer_usage[i] = 0;
++ return;
++ }
++ i++;
++ }
++}
++
++/* parse all the mrtc info to a global mrtc array */
++int __init sfi_parse_mrtc(struct sfi_table_header *table)
++{
++ struct sfi_table_simple *sb;
++ struct sfi_rtc_table_entry *pentry;
++ struct mpc_intsrc mp_irq;
++
++ int totallen;
++
++ sb = (struct sfi_table_simple *)table;
++ if (!sfi_mrtc_num) {
++ sfi_mrtc_num = SFI_GET_NUM_ENTRIES(sb,
++ struct sfi_rtc_table_entry);
++ pentry = (struct sfi_rtc_table_entry *)sb->pentry;
++ totallen = sfi_mrtc_num * sizeof(*pentry);
++ memcpy(sfi_mrtc_array, pentry, totallen);
++ }
++
++ printk(KERN_INFO "SFI: RTC info (num = %d):\n", sfi_mrtc_num);
++ pentry = sfi_mrtc_array;
++ for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) {
++ printk(KERN_INFO "RTC[%d]: paddr = 0x%08x, irq = %d\n",
++ totallen, (u32)pentry->phys_addr, pentry->irq);
++ mp_irq.type = MP_IOAPIC;
++ mp_irq.irqtype = mp_INT;
++ mp_irq.irqflag = 0;
++ mp_irq.srcbus = 0;
++ mp_irq.srcbusirq = pentry->irq; /* IRQ */
++ mp_irq.dstapic = MP_APIC_ALL;
++ mp_irq.dstirq = pentry->irq;
++ save_mp_irq(&mp_irq);
++ }
++ return 0;
++}
++
++/*
++ * the secondary clock in Moorestown can be APBT or LAPIC clock, default to
++ * APBT but cmdline option can also override it.
++ */
++static void __cpuinit mrst_setup_secondary_clock(void)
++{
++ /* restore default lapic clock if disabled by cmdline */
++ if (disable_apbt_percpu)
++ return setup_secondary_APIC_clock();
++ apbt_setup_secondary_clock();
++}
++
++static unsigned long __init mrst_calibrate_tsc(void)
++{
++ unsigned long flags, fast_calibrate;
++
++ local_irq_save(flags);
++ fast_calibrate = apbt_quick_calibrate();
++ local_irq_restore(flags);
++
++ if (fast_calibrate)
++ return fast_calibrate;
++
++ return 0;
++}
++
++void __init mrst_time_init(void)
++{
++ sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr);
++ pre_init_apic_IRQ0();
++ apbt_time_init();
++}
++
++void __init mrst_rtc_init(void)
++{
++ sfi_table_parse(SFI_SIG_MRTC, NULL, NULL, sfi_parse_mrtc);
++}
++
++static void mrst_power_off(void)
++{
++ lnw_ipc_single_cmd(0xf1, 1, 0, 0);
++}
++
++static void mrst_reboot(void)
++{
++ lnw_ipc_single_cmd(0xf1, 0, 0, 0);
++}
+
+ /*
+ * Moorestown specific x86_init function overrides and early setup
+@@ -21,4 +239,241 @@ void __init x86_mrst_early_setup(void)
+ {
+ x86_init.resources.probe_roms = x86_init_noop;
+ x86_init.resources.reserve_resources = x86_init_noop;
++ x86_init.timers.timer_init = mrst_time_init;
++ x86_init.irqs.pre_vector_init = x86_init_noop;
++
++ x86_cpuinit.setup_percpu_clockev = mrst_setup_secondary_clock;
++
++ x86_platform.calibrate_tsc = mrst_calibrate_tsc;
++ x86_platform.get_wallclock = vrtc_get_time;
++ x86_platform.set_wallclock = vrtc_set_mmss;
++
++ x86_init.pci.init = pci_mrst_init;
++ x86_init.pci.fixup_irqs = x86_init_noop;
++
++ x86_init.oem.banner = mrst_rtc_init;
++ legacy_pic = &null_legacy_pic;
++
++ /* Moorestown specific power_off/restart method */
++ pm_power_off = mrst_power_off;
++ machine_ops.emergency_restart = mrst_reboot;
+ }
++
++/*
++ * the dummy SPI2 salves are in SPIB table with host_num = 0, but their
++ * chip_selects begin with MRST_SPI2_CS_START, this will save a dummy ugly
++ * SPI2 controller driver
++ */
++#define MRST_SPI2_CS_START 4
++static struct langwell_pmic_gpio_platform_data pmic_gpio_pdata;
++
++static int __init sfi_parse_spib(struct sfi_table_header *table)
++{
++ struct sfi_table_simple *sb;
++ struct sfi_spi_table_entry *pentry;
++ struct spi_board_info *info;
++ int num, i, j;
++ int ioapic;
++ struct io_apic_irq_attr irq_attr;
++
++ sb = (struct sfi_table_simple *)table;
++ num = SFI_GET_NUM_ENTRIES(sb, struct sfi_spi_table_entry);
++ pentry = (struct sfi_spi_table_entry *) sb->pentry;
++
++ info = kzalloc(num * sizeof(*info), GFP_KERNEL);
++ if (!info) {
++ pr_info("%s(): Error in kzalloc\n", __func__);
++ return -ENOMEM;
++ }
++
++ if (num)
++ pr_info("Moorestown SPI devices info:\n");
++
++ for (i = 0, j = 0; i < num; i++, pentry++) {
++ strncpy(info[j].modalias, pentry->name, 16);
++ info[j].irq = pentry->irq_info;
++ info[j].bus_num = pentry->host_num;
++ info[j].chip_select = pentry->cs;
++ info[j].max_speed_hz = 3125000; /* hard coded */
++ if (info[i].chip_select >= MRST_SPI2_CS_START) {
++ /* these SPI2 devices are not exposed to system as PCI
++ * devices, but they have separate RTE entry in IOAPIC
++ * so we have to enable them one by one here
++ */
++ ioapic = mp_find_ioapic(info[j].irq);
++ irq_attr.ioapic = ioapic;
++ irq_attr.ioapic_pin = info[j].irq;
++ irq_attr.trigger = 1;
++ irq_attr.polarity = 1;
++ io_apic_set_pci_routing(NULL, info[j].irq,
++ &irq_attr);
++ }
++ info[j].platform_data = pentry->dev_info;
++
++ if (!strcmp(pentry->name, "pmic_gpio")) {
++ memcpy(&pmic_gpio_pdata, pentry->dev_info, 8);
++ pmic_gpio_pdata.gpiointr = 0xffffeff8;
++ info[j].platform_data = &pmic_gpio_pdata;
++ }
++ pr_info("info[%d]: name = %16s, irq = 0x%04x, bus = %d, "
++ "cs = %d\n", j, info[j].modalias, info[j].irq,
++ info[j].bus_num, info[j].chip_select);
++ j++;
++ }
++ spi_register_board_info(info, j);
++ kfree(info);
++ return 0;
++}
++
++static struct pca953x_platform_data max7315_pdata;
++static struct pca953x_platform_data max7315_pdata_2;
++
++static int __init sfi_parse_i2cb(struct sfi_table_header *table)
++{
++ struct sfi_table_simple *sb;
++ struct sfi_i2c_table_entry *pentry;
++ struct i2c_board_info *info[MRST_I2C_BUSNUM];
++ int table_length[MRST_I2C_BUSNUM] = {0};
++ int num, i, j, busnum;
++
++ sb = (struct sfi_table_simple *)table;
++ num = SFI_GET_NUM_ENTRIES(sb, struct sfi_i2c_table_entry);
++ pentry = (struct sfi_i2c_table_entry *) sb->pentry;
++
++ if (num <= 0)
++ return -ENODEV;
++
++ for (busnum = 0; busnum < MRST_I2C_BUSNUM; busnum++) {
++ info[busnum] = kzalloc(num * sizeof(**info), GFP_KERNEL);
++ if (!info[busnum]) {
++ pr_info("%s(): Error in kzalloc\n", __func__);
++ while (busnum--)
++ kfree(info[busnum]);
++ return -ENOMEM;
++ }
++ }
++
++ if (num)
++ pr_info("Moorestown I2C devices info:\n");
++
++ for (busnum = 0, j = 0; j < num; j++, pentry++) {
++ busnum = pentry->host_num;
++ if (busnum >= MRST_I2C_BUSNUM || busnum < 0)
++ continue;
++
++ i = table_length[busnum];
++ strncpy(info[busnum][i].type, pentry->name, 16);
++ info[busnum][i].irq = pentry->irq_info;
++ info[busnum][i].addr = pentry->addr;
++ info[busnum][i].platform_data = pentry->dev_info;
++ table_length[busnum]++;
++
++ if (!strcmp(pentry->name, "i2c_max7315")) {
++ strcpy(info[busnum][i].type, "max7315");
++ memcpy(&max7315_pdata, pentry->dev_info, 10);
++ info[busnum][i].platform_data = &max7315_pdata;
++ }
++ else if (!strcmp(pentry->name, "i2c_max7315_2")) {
++ strcpy(info[busnum][i].type, "max7315");
++ memcpy(&max7315_pdata_2, pentry->dev_info, 10);
++ info[busnum][i].platform_data = &max7315_pdata_2;
++ }
++
++ pr_info("info[%d]: bus = %d, name = %16s, irq = 0x%04x, addr = "
++ "0x%x\n", i, busnum, info[busnum][i].type,
++ info[busnum][i].irq, info[busnum][i].addr);
++ }
++
++ for (busnum = 0; busnum < MRST_I2C_BUSNUM; busnum++) {
++ i2c_register_board_info(busnum, info[busnum],
++ table_length[busnum]);
++ }
++
++ return 0;
++}
++
++/* setting multi-function-pin */
++static void set_alt_func(void)
++{
++ u32 __iomem *mem = ioremap_nocache(LANGWELL_GPIO_ALT_ADDR, 16);
++ u32 value;
++
++ if (!mem) {
++ pr_err("can not map GPIO controller address.\n");
++ return;
++ }
++ value = (readl(mem + 1) & 0x0000ffff) | 0x55550000;
++ writel(value, mem + 1);
++ value = (readl(mem + 2) & 0xf0000000) | 0x05555555;
++ writel(value, mem + 2);
++ value = (readl(mem + 3) & 0xfff000ff) | 0x00055500;
++ writel(value, mem + 3);
++
++ iounmap(mem);
++}
++
++static int __init mrst_platform_init(void)
++{
++ sfi_table_parse(SFI_SIG_SPIB, NULL, NULL, sfi_parse_spib);
++ sfi_table_parse(SFI_SIG_I2CB, NULL, NULL, sfi_parse_i2cb);
++ set_alt_func();
++ return 0;
++}
++
++arch_initcall(mrst_platform_init);
++
++static struct gpio_keys_button gpio_button[] = {
++ [0] = {
++ .desc = "power button1",
++ .code = KEY_POWER,
++ .type = EV_KEY,
++ .active_low = 1,
++ .debounce_interval = 3000, /*soft debounce*/
++ .gpio = 65,
++ },
++ [1] = {
++ .desc = "programmable button1",
++ .code = KEY_PROG1,
++ .type = EV_KEY,
++ .active_low = 1,
++ .debounce_interval = 20,
++ .gpio = 66,
++ },
++ [2] = {
++ .desc = "programmable button2",
++ .code = KEY_PROG2,
++ .type = EV_KEY,
++ .active_low = 1,
++ .debounce_interval = 20,
++ .gpio = 69
++ },
++ [3] = {
++ .desc = "lid switch",
++ .code = SW_LID,
++ .type = EV_SW,
++ .active_low = 1,
++ .debounce_interval = 20,
++ .gpio = 101
++ },
++};
++
++static struct gpio_keys_platform_data mrst_gpio_keys = {
++ .buttons = gpio_button,
++ .rep = 1,
++ .nbuttons = sizeof(gpio_button) / sizeof(struct gpio_keys_button),
++};
++
++static struct platform_device pb_device = {
++ .name = "gpio-keys",
++ .id = -1,
++ .dev = {
++ .platform_data = &mrst_gpio_keys,
++ },
++};
++
++static int __init pb_keys_init(void)
++{
++ return platform_device_register(&pb_device);
++}
++
++late_initcall(pb_keys_init);
+Index: linux-2.6.33/arch/x86/include/asm/io_apic.h
+===================================================================
+--- linux-2.6.33.orig/arch/x86/include/asm/io_apic.h
++++ linux-2.6.33/arch/x86/include/asm/io_apic.h
+@@ -143,8 +143,6 @@ extern int noioapicreroute;
+ /* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
+ extern int timer_through_8259;
+
+-extern void io_apic_disable_legacy(void);
+-
+ /*
+ * If we use the IO-APIC for IRQ routing, disable automatic
+ * assignment of PCI IRQ's.
+@@ -189,6 +187,7 @@ extern struct mp_ioapic_gsi mp_gsi_rout
+ int mp_find_ioapic(int gsi);
+ int mp_find_ioapic_pin(int ioapic, int gsi);
+ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base);
++extern void __init pre_init_apic_IRQ0(void);
+
+ #else /* !CONFIG_X86_IO_APIC */
+
+Index: linux-2.6.33/arch/x86/pci/mmconfig-shared.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/pci/mmconfig-shared.c
++++ linux-2.6.33/arch/x86/pci/mmconfig-shared.c
+@@ -601,7 +601,8 @@ static void __init __pci_mmcfg_init(int
+ if (!known_bridge)
+ acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
+
+- pci_mmcfg_reject_broken(early);
++ if (!acpi_disabled)
++ pci_mmcfg_reject_broken(early);
+
+ if (list_empty(&pci_mmcfg_list))
+ return;
+Index: linux-2.6.33/arch/x86/pci/Makefile
+===================================================================
+--- linux-2.6.33.orig/arch/x86/pci/Makefile
++++ linux-2.6.33/arch/x86/pci/Makefile
+@@ -13,7 +13,7 @@ obj-$(CONFIG_X86_VISWS) += visws.o
+
+ obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
+
+-obj-y += common.o early.o
++obj-y += common.o early.o mrst.o
+ obj-y += amd_bus.o
+ obj-$(CONFIG_X86_64) += bus_numa.o
+
+Index: linux-2.6.33/arch/x86/pci/mrst.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/pci/mrst.c
+@@ -0,0 +1,262 @@
++/*
++ * Moorestown PCI support
++ * Copyright (c) 2008 Intel Corporation
++ * Jesse Barnes <jesse.barnes@intel.com>
++ *
++ * Moorestown has an interesting PCI implementation:
++ * - configuration space is memory mapped (as defined by MCFG)
++ * - Lincroft devices also have a real, type 1 configuration space
++ * - Early Lincroft silicon has a type 1 access bug that will cause
++ * a hang if non-existent devices are accessed
++ * - some devices have the "fixed BAR" capability, which means
++ * they can't be relocated or modified; check for that during
++ * BAR sizing
++ *
++ * So, we use the MCFG space for all reads and writes, but also send
++ * Lincroft writes to type 1 space. But only read/write if the device
++ * actually exists, otherwise return all 1s for reads and bit bucket
++ * the writes.
++ */
++
++#include <linux/sched.h>
++#include <linux/pci.h>
++#include <linux/ioport.h>
++#include <linux/init.h>
++#include <linux/dmi.h>
++
++#include <asm/acpi.h>
++#include <asm/segment.h>
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/pci_x86.h>
++#include <asm/hw_irq.h>
++
++#define PCIE_CAP_OFFSET 0x100
++
++/* Fixed BAR fields */
++#define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */
++#define PCI_FIXED_BAR_0_SIZE 0x04
++#define PCI_FIXED_BAR_1_SIZE 0x08
++#define PCI_FIXED_BAR_2_SIZE 0x0c
++#define PCI_FIXED_BAR_3_SIZE 0x10
++#define PCI_FIXED_BAR_4_SIZE 0x14
++#define PCI_FIXED_BAR_5_SIZE 0x1c
++
++/**
++ * fixed_bar_cap - return the offset of the fixed BAR cap if found
++ * @bus: PCI bus
++ * @devfn: device in question
++ *
++ * Look for the fixed BAR cap on @bus and @devfn, returning its offset
++ * if found or 0 otherwise.
++ */
++static int fixed_bar_cap(struct pci_bus *bus, unsigned int devfn)
++{
++ int pos;
++ u32 pcie_cap = 0, cap_data;
++ if (!raw_pci_ext_ops) return 0;
++
++ pos = PCIE_CAP_OFFSET;
++ while (pos) {
++ if (raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
++ devfn, pos, 4, &pcie_cap))
++ return 0;
++
++ if (pcie_cap == 0xffffffff)
++ return 0;
++
++ if (PCI_EXT_CAP_ID(pcie_cap) == PCI_EXT_CAP_ID_VNDR) {
++ raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
++ devfn, pos + 4, 4, &cap_data);
++ if ((cap_data & 0xffff) == PCIE_VNDR_CAP_ID_FIXED_BAR)
++ return pos;
++ }
++
++ pos = pcie_cap >> 20;
++ }
++
++ return 0;
++}
++
++static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn,
++ int reg, int len, u32 val, int offset)
++{
++ u32 size;
++ unsigned int domain, busnum;
++ int bar = (reg - PCI_BASE_ADDRESS_0) >> 2;
++
++ domain = pci_domain_nr(bus);
++ busnum = bus->number;
++
++ if (val == ~0 && len == 4) {
++ unsigned long decode;
++
++ raw_pci_ext_ops->read(domain, busnum, devfn,
++ offset + 8 + (bar * 4), 4, &size);
++
++ /* Turn the size into a decode pattern for the sizing code */
++ if (size) {
++ decode = size - 1;
++ decode |= decode >> 1;
++ decode |= decode >> 2;
++ decode |= decode >> 4;
++ decode |= decode >> 8;
++ decode |= decode >> 16;
++ decode++;
++ decode = ~(decode - 1);
++ } else {
++ decode = ~0;
++ }
++
++ /*
++ * If val is all ones, the core code is trying to size the reg,
++ * so update the mmconfig space with the real size.
++ *
++ * Note: this assumes the fixed size we got is a power of two.
++ */
++ return raw_pci_ext_ops->write(domain, busnum, devfn, reg, 4,
++ decode);
++ }
++
++ /* This is some other kind of BAR write, so just do it. */
++ return raw_pci_ext_ops->write(domain, busnum, devfn, reg, len, val);
++}
++
++/**
++ * type1_access_ok - check whether to use type 1
++ * @bus: bus number
++ * @devfn: device & function in question
++ *
++ * If the bus is on a Lincroft chip and it exists, or is not on a Lincroft at
++ * all, the we can go ahead with any reads & writes. If it's on a Lincroft,
++ * but doesn't exist, avoid the access altogether to keep the chip from
++ * hanging.
++ */
++static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
++{
++ /* This is a workaround for A0 LNC bug where PCI status register does
++ * not have new CAP bit set. can not be written by SW either.
++ *
++ * PCI header type in real LNC indicates a single function device, this
++ * will prevent probing other devices under the same function in PCI
++ * shim. Therefore, use the header type in shim instead.
++ */
++ if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE)
++ return 0;
++ if (bus == 0 && (devfn == PCI_DEVFN(2, 0) || devfn == PCI_DEVFN(0, 0)))
++ return 1;
++ return 0; /* langwell on others */
++}
++
++static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
++ int size, u32 *value)
++{
++ if (type1_access_ok(bus->number, devfn, where))
++ return pci_direct_conf1.read(pci_domain_nr(bus), bus->number,
++ devfn, where, size, value);
++ return raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
++ devfn, where, size, value);
++}
++
++static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
++ int size, u32 value)
++{
++ int offset;
++
++ /* On MRST, there is no PCI ROM BAR, this will cause a subsequent read
++ * to ROM BAR return 0 then being ignored.
++ */
++ if (where == PCI_ROM_ADDRESS)
++ return 0;
++
++ /*
++ * Devices with fixed BARs need special handling:
++ * - BAR sizing code will save, write ~0, read size, restore
++ * - so writes to fixed BARs need special handling
++ * - other writes to fixed BAR devices should go through mmconfig
++ */
++ offset = fixed_bar_cap(bus, devfn);
++ if (offset &&
++ (where >= PCI_BASE_ADDRESS_0 && where <= PCI_BASE_ADDRESS_5)) {
++ return pci_device_update_fixed(bus, devfn, where, size, value,
++ offset);
++ }
++
++ /*
++ * On Moorestown update both real & mmconfig space
++ * Note: early Lincroft silicon can't handle type 1 accesses to
++ * non-existent devices, so just eat the write in that case.
++ */
++ if (type1_access_ok(bus->number, devfn, where))
++ return pci_direct_conf1.write(pci_domain_nr(bus), bus->number,
++ devfn, where, size, value);
++ return raw_pci_ext_ops->write(pci_domain_nr(bus), bus->number, devfn,
++ where, size, value);
++}
++
++static int mrst_pci_irq_enable(struct pci_dev *dev)
++{
++ u8 pin;
++ struct io_apic_irq_attr irq_attr;
++
++ if (!dev->irq)
++ return 0;
++
++ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
++
++ /* MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to
++ * IOAPIC RTE entries, so we just enable RTE for the device.
++ */
++ irq_attr.ioapic = mp_find_ioapic(dev->irq);
++ irq_attr.ioapic_pin = dev->irq;
++ irq_attr.trigger = 1; /* level */
++ irq_attr.polarity = 1; /* active low */
++ io_apic_set_pci_routing(&dev->dev, dev->irq, &irq_attr);
++
++ return 0;
++}
++
++struct pci_ops pci_mrst_ops = {
++ .read = pci_read,
++ .write = pci_write,
++};
++
++/**
++ * pci_mrst_init - installs pci_mrst_ops
++ *
++ * Moorestown has an interesting PCI implementation (see above).
++ * Called when the early platform detection installs it.
++ */
++int __init pci_mrst_init(void)
++{
++ printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
++ pci_mmcfg_late_init();
++ pcibios_enable_irq = mrst_pci_irq_enable;
++ pci_root_ops = pci_mrst_ops;
++ /* Continue with standard init */
++ return 1;
++}
++
++/*
++ * Langwell devices reside at fixed offsets, don't try to move them.
++ */
++static void __devinit pci_fixed_bar_fixup(struct pci_dev *dev)
++{
++ unsigned long offset;
++ u32 size;
++ int i;
++
++ /* Fixup the BAR sizes for fixed BAR devices and make them unmoveable */
++ offset = fixed_bar_cap(dev->bus, dev->devfn);
++ if (!offset || PCI_DEVFN(2, 0) == dev->devfn ||
++ PCI_DEVFN(2, 2) == dev->devfn)
++ return;
++
++ for (i = 0; i < PCI_ROM_RESOURCE; i++) {
++ pci_read_config_dword(dev, offset + 8 + (i * 4), &size);
++ dev->resource[i].end = dev->resource[i].start + size - 1;
++ dev->resource[i].flags |= IORESOURCE_PCI_FIXED;
++ }
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_fixed_bar_fixup);
++
+Index: linux-2.6.33/include/linux/pci_regs.h
+===================================================================
+--- linux-2.6.33.orig/include/linux/pci_regs.h
++++ linux-2.6.33/include/linux/pci_regs.h
+@@ -507,6 +507,7 @@
+ #define PCI_EXT_CAP_ID_VC 2
+ #define PCI_EXT_CAP_ID_DSN 3
+ #define PCI_EXT_CAP_ID_PWR 4
++#define PCI_EXT_CAP_ID_VNDR 11
+ #define PCI_EXT_CAP_ID_ACS 13
+ #define PCI_EXT_CAP_ID_ARI 14
+ #define PCI_EXT_CAP_ID_ATS 15
+Index: linux-2.6.33/arch/x86/include/asm/fixmap.h
+===================================================================
+--- linux-2.6.33.orig/arch/x86/include/asm/fixmap.h
++++ linux-2.6.33/arch/x86/include/asm/fixmap.h
+@@ -114,6 +114,10 @@ enum fixed_addresses {
+ FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */
+ FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
+ __end_of_permanent_fixed_addresses,
++
++#ifdef CONFIG_X86_MRST
++ FIX_LNW_VRTC,
++#endif
+ /*
+ * 256 temporary boot-time mappings, used by early_ioremap(),
+ * before ioremap() is functional.
+Index: linux-2.6.33/arch/x86/include/asm/vrtc.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/include/asm/vrtc.h
+@@ -0,0 +1,30 @@
++#ifndef _MRST_VRTC_H
++#define _MRST_VRTC_H
++
++#ifdef CONFIG_X86_MRST
++extern unsigned char vrtc_cmos_read(unsigned char reg);
++extern void vrtc_cmos_write(unsigned char val, unsigned char reg);
++
++extern struct sfi_rtc_table_entry sfi_mrtc_array[];
++extern int sfi_mrtc_num;
++
++extern unsigned long vrtc_get_time(void);
++extern int vrtc_set_mmss(unsigned long nowtime);
++
++#define MRST_VRTC_PGOFFSET (0xc00)
++
++#else
++static inline unsigned char vrtc_cmos_read(unsigned char reg)
++{
++ return 0xff;
++}
++
++static inline void vrtc_cmos_write(unsigned char val, unsigned char reg)
++{
++ return;
++}
++#endif
++
++#define MRST_VRTC_MAP_SZ (1024)
++
++#endif
+Index: linux-2.6.33/arch/x86/kernel/vrtc.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/kernel/vrtc.c
+@@ -0,0 +1,116 @@
++/*
++ * vrtc.c: Driver for virtual RTC device on Intel MID platform
++ *
++ * (C) Copyright 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ *
++ * Note:
++ * VRTC is emulated by system controller firmware, the real HW
++ * RTC is located in the PMIC device. SCU FW shadows PMIC RTC
++ * in a memory mapped IO space that is visible to the host IA
++ * processor. However, any updates to VRTC requires an IPI call
++ * to the SCU FW.
++ *
++ * This driver is based on RTC CMOS driver.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sfi.h>
++
++#include <asm/vrtc.h>
++#include <asm/time.h>
++#include <asm/fixmap.h>
++
++static unsigned char *vrtc_va __read_mostly;
++
++static void vrtc_init_mmap(void)
++{
++ unsigned long rtc_paddr = sfi_mrtc_array[0].phys_addr;
++
++ BUG_ON(!rtc_paddr);
++
++ /* vRTC's register address may not be page aligned */
++ set_fixmap_nocache(FIX_LNW_VRTC, rtc_paddr);
++ vrtc_va = (unsigned char __iomem *)__fix_to_virt(FIX_LNW_VRTC);
++ vrtc_va += rtc_paddr & ~PAGE_MASK;
++}
++
++unsigned char vrtc_cmos_read(unsigned char reg)
++{
++ unsigned char retval;
++
++ /* vRTC's registers range from 0x0 to 0xD */
++ if (reg > 0xd)
++ return 0xff;
++
++ if (unlikely(!vrtc_va))
++ vrtc_init_mmap();
++
++ lock_cmos_prefix(reg);
++ retval = *(vrtc_va + (reg << 2));
++ lock_cmos_suffix(reg);
++ return retval;
++}
++EXPORT_SYMBOL(vrtc_cmos_read);
++
++void vrtc_cmos_write(unsigned char val, unsigned char reg)
++{
++ if (reg > 0xd)
++ return;
++
++ if (unlikely(!vrtc_va))
++ vrtc_init_mmap();
++
++ lock_cmos_prefix(reg);
++ *(vrtc_va + (reg << 2)) = val;
++ lock_cmos_suffix(reg);
++}
++EXPORT_SYMBOL(vrtc_cmos_write);
++
++unsigned long vrtc_get_time(void)
++{
++ u8 sec, min, hour, mday, mon;
++ u32 year;
++
++ while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP))
++ cpu_relax();
++
++ sec = vrtc_cmos_read(RTC_SECONDS);
++ min = vrtc_cmos_read(RTC_MINUTES);
++ hour = vrtc_cmos_read(RTC_HOURS);
++ mday = vrtc_cmos_read(RTC_DAY_OF_MONTH);
++ mon = vrtc_cmos_read(RTC_MONTH);
++ year = vrtc_cmos_read(RTC_YEAR);
++
++ /* vRTC YEAR reg contains the offset to 1970 */
++ year += 1970;
++
++ printk(KERN_INFO "vRTC: sec: %d min: %d hour: %d day: %d "
++ "mon: %d year: %d\n", sec, min, hour, mday, mon, year);
++
++ return mktime(year, mon, mday, hour, min, sec);
++}
++
++/* Only care about the minutes and seconds */
++int vrtc_set_mmss(unsigned long nowtime)
++{
++ int real_sec, real_min;
++ int vrtc_min;
++
++ vrtc_min = vrtc_cmos_read(RTC_MINUTES);
++
++ real_sec = nowtime % 60;
++ real_min = nowtime / 60;
++ if (((abs(real_min - vrtc_min) + 15)/30) & 1)
++ real_min += 30;
++ real_min %= 60;
++
++ vrtc_cmos_write(real_sec, RTC_SECONDS);
++ vrtc_cmos_write(real_min, RTC_MINUTES);
++ return 0;
++}
+Index: linux-2.6.33/drivers/rtc/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/rtc/Kconfig
++++ linux-2.6.33/drivers/rtc/Kconfig
+@@ -423,6 +423,19 @@ config RTC_DRV_CMOS
+ This driver can also be built as a module. If so, the module
+ will be called rtc-cmos.
+
++config RTC_DRV_VRTC
++ tristate "Virtual RTC for MRST"
++ depends on X86_MRST
++ default y if X86_MRST
++
++ help
++ Say "yes" here to get direct support for the real time clock
++ found in Moorestown platform. The VRTC is a emulated RTC that
++ Derive its clock source from a realy RTC in PMIC. MC146818
++ stype programming interface is most conserved other than any
++ updates is done via IPC calls to the system controller FW.
++
++
+ config RTC_DRV_DS1216
+ tristate "Dallas DS1216"
+ depends on SNI_RM
+Index: linux-2.6.33/drivers/rtc/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/rtc/Makefile
++++ linux-2.6.33/drivers/rtc/Makefile
+@@ -28,6 +28,7 @@ obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq48
+ obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
+ obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o
+ obj-$(CONFIG_RTC_DRV_DM355EVM) += rtc-dm355evm.o
++obj-$(CONFIG_RTC_DRV_VRTC) += rtc-mrst.o
+ obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
+ obj-$(CONFIG_RTC_DRV_DS1286) += rtc-ds1286.o
+ obj-$(CONFIG_RTC_DRV_DS1302) += rtc-ds1302.o
+Index: linux-2.6.33/drivers/rtc/rtc-mrst.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/rtc/rtc-mrst.c
+@@ -0,0 +1,660 @@
++/*
++ * rtc-mrst.c: Driver for Moorestown virtual RTC
++ *
++ * (C) Copyright 2009 Intel Corporation
++ * Author: Jacob Pan (jacob.jun.pan@intel.com)
++ * Feng Tang (feng.tang@intel.com)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ *
++ * Note:
++ * VRTC is emulated by system controller firmware, the real HW
++ * RTC is located in the PMIC device. SCU FW shadows PMIC RTC
++ * in a memory mapped IO space that is visible to the host IA
++ * processor. However, any updates to VRTC requires an IPI call
++ * to the SCU FW.
++ *
++ * This driver is based on RTC CMOS driver.
++ */
++
++/*
++ * Note:
++ * * MRST vRTC only support binary mode and 24H mode
++ * * MRST vRTC only support PIE and AIE, no UIE
++ * * its alarm function is also limited to hr/min/sec.
++ * * so far it doesn't support wake event func
++ */
++
++#include <linux/mod_devicetable.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/spinlock.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/sfi.h>
++
++#include <asm-generic/rtc.h>
++
++#include <asm/ipc_defs.h>
++#include <asm/vrtc.h>
++
++struct mrst_rtc {
++ struct rtc_device *rtc;
++ struct device *dev;
++ int irq;
++ struct resource *iomem;
++
++ void (*wake_on)(struct device *);
++ void (*wake_off)(struct device *);
++
++ u8 enabled_wake;
++ u8 suspend_ctrl;
++
++ /* Newer hardware extends the original register set */
++ u8 day_alrm;
++ u8 mon_alrm;
++ u8 century;
++};
++
++/* both platform and pnp busses use negative numbers for invalid irqs */
++#define is_valid_irq(n) ((n) >= 0)
++
++static const char driver_name[] = "rtc_mrst";
++
++#define RTC_IRQMASK (RTC_PF | RTC_AF)
++
++static inline int is_intr(u8 rtc_intr)
++{
++ if (!(rtc_intr & RTC_IRQF))
++ return 0;
++ return rtc_intr & RTC_IRQMASK;
++}
++
++/*
++ * rtc_time's year contains the increment over 1900, but vRTC's YEAR
++ * register can't be programmed to value larger than 0x64, so vRTC
++ * driver chose to use 1970 (UNIX time start point) as the base, and
++ * do the translation in read/write time
++ */
++static int mrst_read_time(struct device *dev, struct rtc_time *time)
++{
++ unsigned long flags;
++
++ if (rtc_is_updating())
++ mdelay(20);
++
++ spin_lock_irqsave(&rtc_lock, flags);
++ time->tm_sec = vrtc_cmos_read(RTC_SECONDS);
++ time->tm_min = vrtc_cmos_read(RTC_MINUTES);
++ time->tm_hour = vrtc_cmos_read(RTC_HOURS);
++ time->tm_mday = vrtc_cmos_read(RTC_DAY_OF_MONTH);
++ time->tm_mon = vrtc_cmos_read(RTC_MONTH);
++ time->tm_year = vrtc_cmos_read(RTC_YEAR);
++ spin_unlock_irqrestore(&rtc_lock, flags);
++
++ /* Adjust for the 1970/1900 */
++ time->tm_year += 70;
++ time->tm_mon--;
++ return RTC_24H;
++}
++
++static int mrst_set_time(struct device *dev, struct rtc_time *time)
++{
++ int ret;
++ unsigned long flags;
++ unsigned char mon, day, hrs, min, sec;
++ unsigned int yrs;
++
++ yrs = time->tm_year;
++ mon = time->tm_mon + 1; /* tm_mon starts at zero */
++ day = time->tm_mday;
++ hrs = time->tm_hour;
++ min = time->tm_min;
++ sec = time->tm_sec;
++
++ if (yrs < 70 || yrs > 138)
++ return -EINVAL;
++ yrs -= 70;
++
++ spin_lock_irqsave(&rtc_lock, flags);
++
++ /* Need think about leap year */
++ vrtc_cmos_write(yrs, RTC_YEAR);
++ vrtc_cmos_write(mon, RTC_MONTH);
++ vrtc_cmos_write(day, RTC_DAY_OF_MONTH);
++ vrtc_cmos_write(hrs, RTC_HOURS);
++ vrtc_cmos_write(min, RTC_MINUTES);
++ vrtc_cmos_write(sec, RTC_SECONDS);
++
++ ret = lnw_ipc_single_cmd(IPC_VRTC_CMD, IPC_VRTC_SET_TIME, 0, 0);
++ spin_unlock_irqrestore(&rtc_lock, flags);
++ return ret;
++}
++
++static int mrst_read_alarm(struct device *dev, struct rtc_wkalrm *t)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ unsigned char rtc_control;
++
++ if (!is_valid_irq(mrst->irq))
++ return -EIO;
++
++ /* Basic alarms only support hour, minute, and seconds fields.
++ * Some also support day and month, for alarms up to a year in
++ * the future.
++ */
++ t->time.tm_mday = -1;
++ t->time.tm_mon = -1;
++ t->time.tm_year = -1;
++
++ /* vRTC only supports binary mode */
++ spin_lock_irq(&rtc_lock);
++ t->time.tm_sec = vrtc_cmos_read(RTC_SECONDS_ALARM);
++ t->time.tm_min = vrtc_cmos_read(RTC_MINUTES_ALARM);
++ t->time.tm_hour = vrtc_cmos_read(RTC_HOURS_ALARM);
++
++ rtc_control = vrtc_cmos_read(RTC_CONTROL);
++ spin_unlock_irq(&rtc_lock);
++
++ t->enabled = !!(rtc_control & RTC_AIE);
++ t->pending = 0;
++
++ return 0;
++}
++
++static void mrst_checkintr(struct mrst_rtc *mrst, unsigned char rtc_control)
++{
++ unsigned char rtc_intr;
++
++ /*
++ * NOTE after changing RTC_xIE bits we always read INTR_FLAGS;
++ * allegedly some older rtcs need that to handle irqs properly
++ */
++ rtc_intr = vrtc_cmos_read(RTC_INTR_FLAGS);
++ rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
++ if (is_intr(rtc_intr))
++ rtc_update_irq(mrst->rtc, 1, rtc_intr);
++}
++
++static void mrst_irq_enable(struct mrst_rtc *mrst, unsigned char mask)
++{
++ unsigned char rtc_control;
++
++ /*
++ * Flush any pending IRQ status, notably for update irqs,
++ * before we enable new IRQs
++ */
++ rtc_control = vrtc_cmos_read(RTC_CONTROL);
++ mrst_checkintr(mrst, rtc_control);
++
++ rtc_control |= mask;
++ vrtc_cmos_write(rtc_control, RTC_CONTROL);
++
++ mrst_checkintr(mrst, rtc_control);
++}
++
++static void mrst_irq_disable(struct mrst_rtc *mrst, unsigned char mask)
++{
++ unsigned char rtc_control;
++
++ rtc_control = vrtc_cmos_read(RTC_CONTROL);
++ rtc_control &= ~mask;
++ vrtc_cmos_write(rtc_control, RTC_CONTROL);
++ mrst_checkintr(mrst, rtc_control);
++}
++
++static int mrst_set_alarm(struct device *dev, struct rtc_wkalrm *t)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ unsigned char hrs, min, sec;
++ int ret = 0;
++
++ if (!is_valid_irq(mrst->irq))
++ return -EIO;
++
++ hrs = t->time.tm_hour;
++ min = t->time.tm_min;
++ sec = t->time.tm_sec;
++
++ spin_lock_irq(&rtc_lock);
++ /* Next rtc irq must not be from previous alarm setting */
++ mrst_irq_disable(mrst, RTC_AIE);
++
++ /* Update alarm */
++ vrtc_cmos_write(hrs, RTC_HOURS_ALARM);
++ vrtc_cmos_write(min, RTC_MINUTES_ALARM);
++ vrtc_cmos_write(sec, RTC_SECONDS_ALARM);
++
++ ret = lnw_ipc_single_cmd(IPC_VRTC_CMD, IPC_VRTC_SET_ALARM, 0, 0);
++ spin_unlock_irq(&rtc_lock);
++
++ if (ret)
++ return ret;
++
++ spin_lock_irq(&rtc_lock);
++ if (t->enabled)
++ mrst_irq_enable(mrst, RTC_AIE);
++
++ spin_unlock_irq(&rtc_lock);
++
++ return 0;
++}
++
++
++static int mrst_irq_set_state(struct device *dev, int enabled)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ unsigned long flags;
++
++ if (!is_valid_irq(mrst->irq))
++ return -ENXIO;
++
++ spin_lock_irqsave(&rtc_lock, flags);
++
++ if (enabled)
++ mrst_irq_enable(mrst, RTC_PIE);
++ else
++ mrst_irq_disable(mrst, RTC_PIE);
++
++ spin_unlock_irqrestore(&rtc_lock, flags);
++ return 0;
++}
++
++#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
++
++/* Currently, the vRTC doesn't support UIE ON/OFF */
++static int
++mrst_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ unsigned long flags;
++
++ switch (cmd) {
++ case RTC_AIE_OFF:
++ case RTC_AIE_ON:
++ if (!is_valid_irq(mrst->irq))
++ return -EINVAL;
++ break;
++ default:
++ /* PIE ON/OFF is handled by mrst_irq_set_state() */
++ return -ENOIOCTLCMD;
++ }
++
++ spin_lock_irqsave(&rtc_lock, flags);
++ switch (cmd) {
++ case RTC_AIE_OFF: /* alarm off */
++ mrst_irq_disable(mrst, RTC_AIE);
++ break;
++ case RTC_AIE_ON: /* alarm on */
++ mrst_irq_enable(mrst, RTC_AIE);
++ break;
++ }
++ spin_unlock_irqrestore(&rtc_lock, flags);
++ return 0;
++}
++
++#else
++#define mrst_rtc_ioctl NULL
++#endif
++
++#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
++
++static int mrst_procfs(struct device *dev, struct seq_file *seq)
++{
++ unsigned char rtc_control, valid;
++
++ spin_lock_irq(&rtc_lock);
++ rtc_control = vrtc_cmos_read(RTC_CONTROL);
++ valid = vrtc_cmos_read(RTC_VALID);
++ spin_unlock_irq(&rtc_lock);
++
++ return seq_printf(seq,
++ "periodic_IRQ\t: %s\n"
++ "square_wave\t: %s\n"
++ "BCD\t\t: %s\n"
++ "DST_enable\t: %s\n"
++ "periodic_freq\t: daily\n",
++ (rtc_control & RTC_PIE) ? "yes" : "no",
++ (rtc_control & RTC_SQWE) ? "yes" : "no",
++ (rtc_control & RTC_DM_BINARY) ? "no" : "yes",
++ (rtc_control & RTC_DST_EN) ? "yes" : "no");
++}
++
++#else
++#define mrst_procfs NULL
++#endif
++
++static const struct rtc_class_ops mrst_rtc_ops = {
++ .ioctl = mrst_rtc_ioctl,
++ .read_time = mrst_read_time,
++ .set_time = mrst_set_time,
++ .read_alarm = mrst_read_alarm,
++ .set_alarm = mrst_set_alarm,
++ .proc = mrst_procfs,
++ .irq_set_freq = NULL,
++ .irq_set_state = mrst_irq_set_state,
++};
++
++static struct mrst_rtc mrst_rtc;
++
++/*
++ * When vRTC IRQ is captured by SCU FW, FW will clear the AIE bit in
++ * Reg B, so no need for this driver to clear it
++ */
++static irqreturn_t mrst_interrupt(int irq, void *p)
++{
++ u8 irqstat;
++
++ spin_lock(&rtc_lock);
++ /* This read will clear all IRQ flags inside Reg C */
++ irqstat = vrtc_cmos_read(RTC_INTR_FLAGS);
++ spin_unlock(&rtc_lock);
++
++ irqstat &= RTC_IRQMASK | RTC_IRQF;
++ if (is_intr(irqstat)) {
++ rtc_update_irq(p, 1, irqstat);
++ return IRQ_HANDLED;
++ } else {
++ printk(KERN_ERR "vRTC: error in IRQ handler\n");
++ return IRQ_NONE;
++ }
++}
++
++static int __init
++vrtc_mrst_do_probe(struct device *dev, struct resource *iomem, int rtc_irq)
++{
++ int retval = 0;
++ unsigned char rtc_control;
++
++ /* There can be only one ... */
++ if (mrst_rtc.dev)
++ return -EBUSY;
++
++ if (!iomem)
++ return -ENODEV;
++
++ iomem = request_mem_region(iomem->start,
++ iomem->end + 1 - iomem->start,
++ driver_name);
++ if (!iomem) {
++ dev_dbg(dev, "i/o mem already in use.\n");
++ return -EBUSY;
++ }
++
++ mrst_rtc.irq = rtc_irq;
++ mrst_rtc.iomem = iomem;
++
++ mrst_rtc.day_alrm = 0;
++ mrst_rtc.mon_alrm = 0;
++ mrst_rtc.century = 0;
++ mrst_rtc.wake_on = NULL;
++ mrst_rtc.wake_off = NULL;
++
++ mrst_rtc.rtc = rtc_device_register(driver_name, dev,
++ &mrst_rtc_ops, THIS_MODULE);
++ if (IS_ERR(mrst_rtc.rtc)) {
++ retval = PTR_ERR(mrst_rtc.rtc);
++ goto cleanup0;
++ }
++
++ mrst_rtc.dev = dev;
++ dev_set_drvdata(dev, &mrst_rtc);
++ rename_region(iomem, dev_name(&mrst_rtc.rtc->dev));
++
++ spin_lock_irq(&rtc_lock);
++ mrst_irq_disable(&mrst_rtc, RTC_PIE | RTC_AIE);
++ rtc_control = vrtc_cmos_read(RTC_CONTROL);
++ spin_unlock_irq(&rtc_lock);
++
++ if (!(rtc_control & RTC_24H) || (rtc_control & (RTC_DM_BINARY)))
++ dev_dbg(dev, "TODO: support more than 24-hr BCD mode \n");
++
++ if (is_valid_irq(rtc_irq)) {
++ irq_handler_t rtc_mrst_int_handler;
++ rtc_mrst_int_handler = mrst_interrupt;
++
++ retval = request_irq(rtc_irq, rtc_mrst_int_handler,
++ IRQF_DISABLED, dev_name(&mrst_rtc.rtc->dev),
++ mrst_rtc.rtc);
++ if (retval < 0) {
++ dev_dbg(dev, "IRQ %d is already in use, err %d\n",
++ rtc_irq, retval);
++ goto cleanup1;
++ }
++ }
++
++ pr_info("vRTC driver for Moorewtown is initialized\n");
++ return 0;
++
++cleanup1:
++ mrst_rtc.dev = NULL;
++ rtc_device_unregister(mrst_rtc.rtc);
++cleanup0:
++ release_region(iomem->start, iomem->end + 1 - iomem->start);
++ pr_warning("vRTC driver for Moorewtown initialization Failed!!\n");
++ return retval;
++}
++
++static void rtc_mrst_do_shutdown(void)
++{
++ spin_lock_irq(&rtc_lock);
++ mrst_irq_disable(&mrst_rtc, RTC_IRQMASK);
++ spin_unlock_irq(&rtc_lock);
++}
++
++static void __exit rtc_mrst_do_remove(struct device *dev)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ struct resource *iomem;
++
++ rtc_mrst_do_shutdown();
++
++ if (is_valid_irq(mrst->irq))
++ free_irq(mrst->irq, mrst->rtc);
++
++ rtc_device_unregister(mrst->rtc);
++ mrst->rtc = NULL;
++
++ iomem = mrst->iomem;
++ release_region(iomem->start, iomem->end + 1 - iomem->start);
++ mrst->iomem = NULL;
++
++ mrst->dev = NULL;
++ dev_set_drvdata(dev, NULL);
++}
++
++#ifdef CONFIG_PM
++
++static int mrst_suspend(struct device *dev, pm_message_t mesg)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ unsigned char tmp;
++
++ /* Only the alarm might be a wakeup event source */
++ spin_lock_irq(&rtc_lock);
++ mrst->suspend_ctrl = tmp = vrtc_cmos_read(RTC_CONTROL);
++ if (tmp & (RTC_PIE | RTC_AIE)) {
++ unsigned char mask;
++
++ if (device_may_wakeup(dev))
++ mask = RTC_IRQMASK & ~RTC_AIE;
++ else
++ mask = RTC_IRQMASK;
++ tmp &= ~mask;
++ vrtc_cmos_write(tmp, RTC_CONTROL);
++
++ mrst_checkintr(mrst, tmp);
++ }
++ spin_unlock_irq(&rtc_lock);
++
++ if (tmp & RTC_AIE) {
++ mrst->enabled_wake = 1;
++ if (mrst->wake_on)
++ mrst->wake_on(dev);
++ else
++ enable_irq_wake(mrst->irq);
++ }
++
++ pr_debug("%s: suspend%s, ctrl %02x\n",
++ dev_name(&mrst_rtc.rtc->dev),
++ (tmp & RTC_AIE) ? ", alarm may wake" : "",
++ tmp);
++
++ return 0;
++}
++
++/*
++ * We want RTC alarms to wake us from e.g. ACPI G2/S5 "soft off", even
++ * after a detour through G3 "mechanical off", although the ACPI spec
++ * says wakeup should only work from G1/S4 "hibernate". To most users,
++ * distinctions between S4 and S5 are pointless. So when the hardware
++ * allows, don't draw that distinction.
++ */
++static inline int mrst_poweroff(struct device *dev)
++{
++ return mrst_suspend(dev, PMSG_HIBERNATE);
++}
++
++static int mrst_resume(struct device *dev)
++{
++ struct mrst_rtc *mrst = dev_get_drvdata(dev);
++ unsigned char tmp = mrst->suspend_ctrl;
++
++ /* Re-enable any irqs previously active */
++ if (tmp & RTC_IRQMASK) {
++ unsigned char mask;
++
++ if (mrst->enabled_wake) {
++ if (mrst->wake_off)
++ mrst->wake_off(dev);
++ else
++ disable_irq_wake(mrst->irq);
++ mrst->enabled_wake = 0;
++ }
++
++ spin_lock_irq(&rtc_lock);
++ do {
++ vrtc_cmos_write(tmp, RTC_CONTROL);
++
++ mask = vrtc_cmos_read(RTC_INTR_FLAGS);
++ mask &= (tmp & RTC_IRQMASK) | RTC_IRQF;
++ if (!is_intr(mask))
++ break;
++
++ rtc_update_irq(mrst->rtc, 1, mask);
++ tmp &= ~RTC_AIE;
++ } while (mask & RTC_AIE);
++ spin_unlock_irq(&rtc_lock);
++ }
++
++ pr_debug("%s: resume, ctrl %02x\n",
++ dev_name(&mrst_rtc.rtc->dev),
++ tmp);
++
++ return 0;
++}
++
++#else
++#define mrst_suspend NULL
++#define mrst_resume NULL
++
++static inline int mrst_poweroff(struct device *dev)
++{
++ return -ENOSYS;
++}
++
++#endif
++
++
++/*----------------------------------------------------------------*/
++
++/* Platform setup should have set up an RTC device, when PNP is
++ * unavailable ... this could happen even on (older) PCs.
++ */
++
++static int __init vrtc_mrst_platform_probe(struct platform_device *pdev)
++{
++ return vrtc_mrst_do_probe(&pdev->dev,
++ platform_get_resource(pdev, IORESOURCE_MEM, 0),
++ platform_get_irq(pdev, 0));
++}
++
++static int __exit vrtc_mrst_platform_remove(struct platform_device *pdev)
++{
++ rtc_mrst_do_remove(&pdev->dev);
++ return 0;
++}
++
++static void vrtc_mrst_platform_shutdown(struct platform_device *pdev)
++{
++ if (system_state == SYSTEM_POWER_OFF && !mrst_poweroff(&pdev->dev))
++ return;
++
++ rtc_mrst_do_shutdown();
++}
++
++/* Work with hotplug and coldplug */
++MODULE_ALIAS("platform:vrtc_mrst");
++
++static struct platform_driver vrtc_mrst_platform_driver = {
++ .remove = __exit_p(vrtc_mrst_platform_remove),
++ .shutdown = vrtc_mrst_platform_shutdown,
++ .driver = {
++ .name = (char *) driver_name,
++ .suspend = mrst_suspend,
++ .resume = mrst_resume,
++ }
++};
++
++/*
++ * Moorestown platform has memory mapped virtual RTC device that emulates
++ * the programming interface of the RTC.
++ */
++
++static struct resource vrtc_resources[] = {
++ [0] = {
++ .flags = IORESOURCE_MEM,
++ },
++ [1] = {
++ .flags = IORESOURCE_IRQ,
++ }
++};
++
++static struct platform_device vrtc_device = {
++ .name = "rtc_mrst",
++ .id = -1,
++ .resource = vrtc_resources,
++ .num_resources = ARRAY_SIZE(vrtc_resources),
++};
++
++static int __init vrtc_mrst_init(void)
++{
++ /* iomem resource */
++ vrtc_resources[0].start = sfi_mrtc_array[0].phys_addr;
++ vrtc_resources[0].end = sfi_mrtc_array[0].phys_addr +
++ MRST_VRTC_MAP_SZ;
++ /* irq resource */
++ vrtc_resources[1].start = sfi_mrtc_array[0].irq;
++ vrtc_resources[1].end = sfi_mrtc_array[0].irq;
++
++ platform_device_register(&vrtc_device);
++ return platform_driver_probe(&vrtc_mrst_platform_driver,
++ vrtc_mrst_platform_probe);
++}
++
++static void __exit vrtc_mrst_exit(void)
++{
++ platform_driver_unregister(&vrtc_mrst_platform_driver);
++ platform_device_unregister(&vrtc_device);
++}
++
++module_init(vrtc_mrst_init);
++module_exit(vrtc_mrst_exit);
++
++MODULE_AUTHOR("Jacob Pan; Feng Tang");
++MODULE_DESCRIPTION("Driver for Moorestown virtual RTC");
++MODULE_LICENSE("GPL");
+Index: linux-2.6.33/drivers/spi/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/spi/Kconfig
++++ linux-2.6.33/drivers/spi/Kconfig
+@@ -302,6 +302,18 @@ config SPI_NUC900
+ select SPI_BITBANG
+ help
+ SPI driver for Nuvoton NUC900 series ARM SoCs
++config SPI_MRST
++ tristate "SPI controller driver for Intel Moorestown platform "
++ depends on SPI_MASTER && PCI && X86_MRST
++ help
++ This is the SPI controller master driver for Intel Moorestown platform
++
++config SPI_MRST_DMA
++ boolean "Enable DMA for MRST SPI0 controller"
++ default y
++ depends on SPI_MRST && INTEL_LNW_DMAC2
++ help
++ This has to be enabled after Moorestown DMAC2 driver is enabled
+
+ #
+ # Add new SPI master controllers in alphabetical order above this line
+Index: linux-2.6.33/drivers/spi/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/spi/Makefile
++++ linux-2.6.33/drivers/spi/Makefile
+@@ -42,6 +42,7 @@ obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.
+ obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o
+ obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o
+ obj-$(CONFIG_SPI_NUC900) += spi_nuc900.o
++obj-$(CONFIG_SPI_MRST) += mrst_spi.o
+
+ # special build for s3c24xx spi driver with fiq support
+ spi_s3c24xx_hw-y := spi_s3c24xx.o
+Index: linux-2.6.33/drivers/spi/mrst_spi.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/spi/mrst_spi.c
+@@ -0,0 +1,1382 @@
++/*
++ * mrst_spi.c - Moorestown SPI controller driver (referring pxa2xx_spi.c)
++ *
++ * Copyright (C) Intel 2008 Feng Tang <feng.tang@intel.com>
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++/* Note:
++ *
++ * * FW will create a SPI device info block table, and driver need parse
++ * them out and use register_board_info to register them to kernel
++ */
++
++#include <linux/delay.h>
++#include <linux/highmem.h>
++#include <linux/pci.h>
++#include <linux/dma-mapping.h>
++#include <linux/interrupt.h>
++
++#include <linux/spi/spi.h>
++#include <linux/spi/mrst_spi.h>
++
++#define MRST_MAX_DMA_LEN 2047
++#ifdef CONFIG_SPI_MRST_DMA
++#include <linux/lnw_dma.h>
++#endif
++
++#ifdef CONFIG_DEBUG_FS
++#include <linux/debugfs.h>
++#endif
++
++#define DRIVER_NAME "mrst_spi"
++
++#define START_STATE ((void *)0)
++#define RUNNING_STATE ((void *)1)
++#define DONE_STATE ((void *)2)
++#define ERROR_STATE ((void *)-1)
++
++#define QUEUE_RUNNING 0
++#define QUEUE_STOPPED 1
++
++#define MRST_SPI_DEASSERT 0
++#define MRST_SPI_ASSERT 1
++
++/* HW info for MRST CLk Control Unit, one 32b reg */
++#define MRST_SPI_CLK_BASE 100000000 /* 100m */
++#define MRST_CLK_SPI0_REG 0xff11d86c
++#define CLK_SPI_BDIV_OFFSET 0
++#define CLK_SPI_BDIV_MASK 0x00000007
++#define CLK_SPI_CDIV_OFFSET 9
++#define CLK_SPI_CDIV_MASK 0x00000e00
++#define CLK_SPI_CDIV_100M 0x0
++#define CLK_SPI_CDIV_50M 0x1
++#define CLK_SPI_CDIV_33M 0x2
++#define CLK_SPI_CDIV_25M 0x3
++#define CLK_SPI_DISABLE_OFFSET 8
++
++/* per controller struct */
++struct driver_data {
++ /* Driver model hookup */
++ struct pci_dev *pdev;
++ struct spi_master *master;
++
++ struct spi_device *devices;
++ struct spi_device *cur_dev;
++ enum mrst_ssi_type type;
++
++ /* phy and virtual register addresses */
++ void *paddr;
++ void *vaddr;
++ u32 iolen;
++ int irq;
++ dma_addr_t dma_addr;
++ u32 freq; /* controller core clk freqency in Hz */
++
++ /* Driver message queue */
++ struct workqueue_struct *workqueue;
++ struct work_struct pump_messages;
++ spinlock_t lock;
++ struct list_head queue;
++ int busy;
++ int run;
++
++ /* Message Transfer pump */
++ struct tasklet_struct pump_transfers;
++
++ /* Current message transfer state info */
++ struct spi_message *cur_msg;
++ struct spi_transfer *cur_transfer;
++ struct chip_data *cur_chip;
++ struct chip_data *prev_chip;
++ size_t len;
++ void *tx;
++ void *tx_end;
++ void *rx;
++ void *rx_end;
++ int dma_mapped;
++ dma_addr_t rx_dma;
++ dma_addr_t tx_dma;
++ size_t rx_map_len;
++ size_t tx_map_len;
++ u8 n_bytes; /* current is a 1/2 bytes op */
++ u8 max_bits_per_word; /* SPI0's maxim width is 16 bits */
++ u32 dma_width;
++ int cs_change;
++ int (*write)(struct driver_data *drv_data);
++ int (*read)(struct driver_data *drv_data);
++ irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
++ void (*cs_control)(u32 command);
++
++#ifdef CONFIG_DEBUG_FS
++ struct dentry *debugfs;
++#endif
++
++ int dma_inited;
++
++#ifdef CONFIG_SPI_MRST_DMA
++ struct lnw_dma_slave dmas_tx;
++ struct lnw_dma_slave dmas_rx;
++ struct dma_chan *txchan;
++ struct dma_chan *rxchan;
++ int txdma_done;
++ int rxdma_done;
++
++ u64 tx_param;
++ u64 rx_param;
++ struct pci_dev *dma_dev;
++#endif
++};
++
++/* slave spi_dev related */
++struct chip_data {
++ /* cr0 and cr1 are only 16b valid */
++ u16 cr0;
++ u16 cr1;
++
++ u8 cs; /* chip select pin */
++ u8 n_bytes; /* current is a 1/2/4 byte op */
++ u8 tmode; /* TR/TO/RO/EEPROM */
++ u8 type; /* SPI/SSP/MicroWire */
++
++ u8 poll_mode; /* 1 means use poll mode */
++
++ u32 dma_width;
++ u32 rx_threshold;
++ u32 tx_threshold;
++ u8 enable_dma;
++ u8 bits_per_word;
++ u16 clk_div; /* baud rate divider */
++ u32 speed_hz; /* baud rate */
++ int (*write)(struct driver_data *drv_data);
++ int (*read)(struct driver_data *drv_data);
++ void (*cs_control)(u32 command);
++};
++
++#ifdef CONFIG_SPI_MRST_DMA
++static bool chan_filter(struct dma_chan *chan, void *param)
++{
++ struct driver_data *drv_data = param;
++ bool ret = false;
++
++ if (chan->device->dev == &drv_data->dma_dev->dev)
++ ret = true;
++ return ret;
++}
++
++static void mrst_spi_dma_init(struct driver_data *drv_data)
++{
++ struct lnw_dma_slave *rxs, *txs;
++ dma_cap_mask_t mask;
++ struct pci_dev *dmac2;
++
++ drv_data->txchan = NULL;
++ drv_data->rxchan = NULL;
++
++ /* mrst spi0 controller only work with mrst dma contrller 2 */
++ dmac2 = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0813, NULL);
++ if (!dmac2) {
++ printk(KERN_WARNING
++ "MRST SPI0: can't find DMAC2, dma init failed\n");
++ return;
++ } else
++ drv_data->dma_dev = dmac2;
++
++ /* 1. init rx channel */
++ rxs = &drv_data->dmas_rx;
++
++ rxs->dirn = DMA_FROM_DEVICE;
++ rxs->hs_mode = LNW_DMA_HW_HS;
++ rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
++ rxs->src_width = LNW_DMA_WIDTH_16BIT;
++ rxs->dst_width = LNW_DMA_WIDTH_32BIT;
++ rxs->src_msize = LNW_DMA_MSIZE_16;
++ rxs->dst_msize = LNW_DMA_MSIZE_16;
++
++ dma_cap_zero(mask);
++ dma_cap_set(DMA_MEMCPY, mask);
++ dma_cap_set(DMA_SLAVE, mask);
++
++ drv_data->rxchan = dma_request_channel(mask, chan_filter,
++ drv_data);
++ if (!drv_data->rxchan)
++ goto err_exit;
++ drv_data->rxchan->private = rxs;
++
++ /* 2. init tx channel */
++ txs = &drv_data->dmas_tx;
++
++ txs->dirn = DMA_TO_DEVICE;
++ txs->hs_mode = LNW_DMA_HW_HS;
++ txs->cfg_mode = LNW_DMA_MEM_TO_PER;
++ txs->src_width = LNW_DMA_WIDTH_32BIT;
++ txs->dst_width = LNW_DMA_WIDTH_16BIT;
++ txs->src_msize = LNW_DMA_MSIZE_16;
++ txs->dst_msize = LNW_DMA_MSIZE_16;
++
++ dma_cap_set(DMA_SLAVE, mask);
++ dma_cap_set(DMA_MEMCPY, mask);
++
++ drv_data->txchan = dma_request_channel(mask, chan_filter,
++ drv_data);
++ if (!drv_data->txchan)
++ goto free_rxchan;
++ drv_data->txchan->private = txs;
++
++ /* set the dma done bit to 1 */
++ drv_data->dma_inited = 1;
++ drv_data->txdma_done = 1;
++ drv_data->rxdma_done = 1;
++
++ drv_data->tx_param = ((u64)(u32)drv_data << 32)
++ | (u32)(&drv_data->txdma_done);
++ drv_data->rx_param = ((u64)(u32)drv_data << 32)
++ | (u32)(&drv_data->rxdma_done);
++ return;
++
++free_rxchan:
++ dma_release_channel(drv_data->rxchan);
++err_exit:
++ pci_dev_put(dmac2);
++ return;
++}
++
++static void mrst_spi_dma_exit(struct driver_data *drv_data)
++{
++ dma_release_channel(drv_data->txchan);
++ dma_release_channel(drv_data->rxchan);
++ pci_dev_put(drv_data->dma_dev);
++}
++
++
++static inline void unmap_dma_buffers(struct driver_data *drv_data);
++static void transfer_complete(struct driver_data *drv_data);
++
++static void mrst_spi_dma_done(void *arg)
++{
++ u64 *param = arg;
++ struct driver_data *drv_data;
++ int *done;
++
++ drv_data = (struct driver_data *)(u32)(*param >> 32);
++ done = (int *)(u32)(*param & 0xffffffff);
++
++ *done = 1;
++ /* wait till both tx/rx channels are done */
++ if (!drv_data->txdma_done || !drv_data->rxdma_done)
++ return;
++
++ transfer_complete(drv_data);
++}
++#endif
++
++
++#ifdef CONFIG_DEBUG_FS
++static int spi_show_regs_open(struct inode *inode, struct file *file)
++{
++ file->private_data = inode->i_private;
++ return 0;
++}
++
++#define SPI_REGS_BUFSIZE 1024
++static ssize_t spi_show_regs(struct file *file, char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ char *buf;
++ u32 len = 0;
++ ssize_t ret;
++ struct driver_data *drv_data;
++ void *reg;
++
++ drv_data = (struct driver_data *)file->private_data;
++ reg = drv_data->vaddr;
++
++ buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
++ if (!buf)
++ return 0;
++
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "MRST SPI0 registers:\n");
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "=================================\n");
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "CTRL0: \t\t0x%08x\n", read_ctrl0(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "CTRL1: \t\t0x%08x\n", read_ctrl1(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "SSIENR: \t0x%08x\n", read_ssienr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "SER: \t\t0x%08x\n", read_ser(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "BAUDR: \t\t0x%08x\n", read_baudr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "TXFTLR: \t0x%08x\n", read_txftlr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "RXFTLR: \t0x%08x\n", read_rxftlr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "TXFLR: \t\t0x%08x\n", read_txflr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "RXFLR: \t\t0x%08x\n", read_rxflr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "SR: \t\t0x%08x\n", read_sr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "IMR: \t\t0x%08x\n", read_imr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "ISR: \t\t0x%08x\n", read_isr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "DMACR: \t\t0x%08x\n", read_dmacr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "DMATDLR: \t0x%08x\n", read_dmatdlr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "DMARDLR: \t0x%08x\n", read_dmardlr(reg));
++ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
++ "=================================\n");
++
++ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
++ kfree(buf);
++ return ret;
++}
++
++static const struct file_operations mrst_spi_regs_ops = {
++ .owner = THIS_MODULE,
++ .open = spi_show_regs_open,
++ .read = spi_show_regs,
++};
++
++static int mrst_spi_debugfs_init(struct driver_data *drv_data)
++{
++ drv_data->debugfs = debugfs_create_dir("mrst_spi", NULL);
++ if (!drv_data->debugfs)
++ return -ENOMEM;
++
++ debugfs_create_file("registers", S_IFREG | S_IRUGO,
++ drv_data->debugfs, (void *)drv_data, &mrst_spi_regs_ops);
++ return 0;
++}
++
++static void mrst_spi_debugfs_remove(struct driver_data *drv_data)
++{
++ if (drv_data->debugfs)
++ debugfs_remove_recursive(drv_data->debugfs);
++}
++
++#else
++static inline int mrst_spi_debugfs_init(struct driver_data *drv_data)
++{
++}
++
++static inline void mrst_spi_debugfs_remove(struct driver_data *drv_data)
++{
++}
++#endif /* CONFIG_DEBUG_FS */
++
++static int flush(struct driver_data *drv_data)
++{
++ unsigned long limit = loops_per_jiffy << 1;
++ void *reg = drv_data->vaddr;
++
++ while (read_sr(reg) & SR_RF_NOT_EMPT) {
++ limit = loops_per_jiffy << 1;
++ while ((read_sr(reg) & SR_BUSY) && limit--)
++ ;
++ read_dr(reg);
++ }
++ return limit;
++}
++
++static void null_cs_control(u32 command)
++{
++}
++
++static int null_writer(struct driver_data *drv_data)
++{
++ void *reg = drv_data->vaddr;
++ u8 n_bytes = drv_data->n_bytes;
++
++ if (!(read_sr(reg) & SR_TF_NOT_FULL)
++ || (drv_data->tx == drv_data->tx_end))
++ return 0;
++
++ write_dr(0, reg);
++ drv_data->tx += n_bytes;
++ return 1;
++}
++
++static int null_reader(struct driver_data *drv_data)
++{
++ void *reg = drv_data->vaddr;
++ u8 n_bytes = drv_data->n_bytes;
++
++ while ((read_sr(reg) & SR_RF_NOT_EMPT)
++ && (drv_data->rx < drv_data->rx_end)) {
++ read_dr(reg);
++ drv_data->rx += n_bytes;
++ }
++ return drv_data->rx == drv_data->rx_end;
++}
++
++static int u8_writer(struct driver_data *drv_data)
++{
++ void *reg = drv_data->vaddr;
++
++ if (!(read_sr(reg) & SR_TF_NOT_FULL)
++ || (drv_data->tx == drv_data->tx_end))
++ return 0;
++
++ write_dr(*(u8 *)(drv_data->tx), reg);
++ ++drv_data->tx;
++
++ while (read_sr(reg) & SR_BUSY)
++ ;
++ return 1;
++}
++
++static int u8_reader(struct driver_data *drv_data)
++{
++ void *reg = drv_data->vaddr;
++
++ while ((read_sr(reg) & SR_RF_NOT_EMPT)
++ && (drv_data->rx < drv_data->rx_end)) {
++ *(u8 *)(drv_data->rx) = read_dr(reg);
++ ++drv_data->rx;
++ }
++
++ while (read_sr(reg) & SR_BUSY)
++ ;
++ return drv_data->rx == drv_data->rx_end;
++}
++
++static int u16_writer(struct driver_data *drv_data)
++{
++ void *reg = drv_data->vaddr;
++
++ if (!(read_sr(reg) & SR_TF_NOT_FULL)
++ || (drv_data->tx == drv_data->tx_end))
++ return 0;
++
++ write_dr(*(u16 *)(drv_data->tx), reg);
++ drv_data->tx += 2;
++ while (read_sr(reg) & SR_BUSY)
++ ;
++
++ return 1;
++}
++
++static int u16_reader(struct driver_data *drv_data)
++{
++ void *reg = drv_data->vaddr;
++ u16 temp;
++
++ while ((read_sr(reg) & SR_RF_NOT_EMPT)
++ && (drv_data->rx < drv_data->rx_end)) {
++ temp = read_dr(reg);
++ *(u16 *)(drv_data->rx) = temp;
++ drv_data->rx += 2;
++ }
++
++ while (read_sr(reg) & SR_BUSY)
++ ;
++
++ return drv_data->rx == drv_data->rx_end;
++}
++
++static void *next_transfer(struct driver_data *drv_data)
++{
++ struct spi_message *msg = drv_data->cur_msg;
++ struct spi_transfer *trans = drv_data->cur_transfer;
++
++ /* Move to next transfer */
++ if (trans->transfer_list.next != &msg->transfers) {
++ drv_data->cur_transfer =
++ list_entry(trans->transfer_list.next,
++ struct spi_transfer,
++ transfer_list);
++ return RUNNING_STATE;
++ } else
++ return DONE_STATE;
++}
++
++/*
++ * Note: first step is the protocol driver prepares
++ * a dma-capable memory, and this func just need translate
++ * the virt addr to physical
++ */
++static int map_dma_buffers(struct driver_data *drv_data)
++{
++ if (!drv_data->cur_msg->is_dma_mapped || !drv_data->dma_inited
++ || !drv_data->cur_chip->enable_dma)
++ return 0;
++
++ if (drv_data->cur_transfer->tx_dma)
++ drv_data->tx_dma = drv_data->cur_transfer->tx_dma;
++
++ if (drv_data->cur_transfer->rx_dma)
++ drv_data->rx_dma = drv_data->cur_transfer->rx_dma;
++
++ return 1;
++}
++
++static inline void unmap_dma_buffers(struct driver_data *drv_data)
++{
++ if (!drv_data->dma_mapped)
++ return;
++ drv_data->dma_mapped = 0;
++}
++
++/* caller already set message->status; dma and pio irqs are blocked */
++static void giveback(struct driver_data *drv_data)
++{
++ struct spi_transfer *last_transfer;
++ unsigned long flags;
++ struct spi_message *msg;
++
++ spin_lock_irqsave(&drv_data->lock, flags);
++ msg = drv_data->cur_msg;
++ drv_data->cur_msg = NULL;
++ drv_data->cur_transfer = NULL;
++ drv_data->prev_chip = drv_data->cur_chip;
++ drv_data->cur_chip = NULL;
++ queue_work(drv_data->workqueue, &drv_data->pump_messages);
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++
++ last_transfer = list_entry(msg->transfers.prev,
++ struct spi_transfer,
++ transfer_list);
++
++ if (!last_transfer->cs_change)
++ drv_data->cs_control(MRST_SPI_DEASSERT);
++
++ msg->state = NULL;
++ if (msg->complete)
++ msg->complete(msg->context);
++}
++
++static void dma_transfer(struct driver_data *drv_data, int cs_change)
++{
++#ifdef CONFIG_SPI_MRST_DMA
++ void *reg = drv_data->vaddr;
++ struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
++ struct dma_chan *txchan, *rxchan;
++ enum dma_ctrl_flags flag;
++ u16 dmacr = 0;
++
++ /* 1. setup DMA related registers */
++ if (cs_change) {
++ mrst_spi_enable(reg, 0);
++
++ write_dmardlr(0xf, reg);
++ write_dmatdlr(0x10, reg);
++
++ if (drv_data->tx_dma)
++ dmacr |= 0x2;
++ if (drv_data->rx_dma)
++ dmacr |= 0x1;
++
++ write_dmacr(dmacr, reg);
++ mrst_spi_enable(reg, 1);
++ }
++
++ if (drv_data->tx_dma)
++ drv_data->txdma_done = 0;
++
++ if (drv_data->rx_dma)
++ drv_data->rxdma_done = 0;
++
++ /* 2. start the TX dma transfer */
++ txchan = drv_data->txchan;
++ rxchan = drv_data->rxchan;
++
++ flag = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
++
++ if (drv_data->tx_dma) {
++ txdesc = txchan->device->device_prep_dma_memcpy(txchan,
++ drv_data->dma_addr, drv_data->tx_dma,
++ drv_data->len, flag);
++
++ txdesc->callback = mrst_spi_dma_done;
++ txdesc->callback_param = &drv_data->tx_param;
++ }
++
++ /* 3. start the RX dma transfer */
++ if (drv_data->rx_dma) {
++ rxdesc = rxchan->device->device_prep_dma_memcpy(rxchan,
++ drv_data->rx_dma, drv_data->dma_addr,
++ drv_data->len, flag);
++
++ rxdesc->callback = mrst_spi_dma_done;
++ rxdesc->callback_param = &drv_data->rx_param;
++ }
++
++ /* rx must be started before tx due to spi instinct */
++ if (rxdesc)
++ rxdesc->tx_submit(rxdesc);
++ if (txdesc)
++ txdesc->tx_submit(txdesc);
++#endif
++}
++
++static void int_error_stop(struct driver_data *drv_data, const char *msg)
++{
++ void *reg = drv_data->vaddr;
++
++ /* Stop and reset hw */
++ flush(drv_data);
++ write_ssienr(0, reg);
++
++ dev_err(&drv_data->pdev->dev, "%s\n", msg);
++
++ drv_data->cur_msg->state = ERROR_STATE;
++ tasklet_schedule(&drv_data->pump_transfers);
++}
++
++static void transfer_complete(struct driver_data *drv_data)
++{
++ /* Update total byte transfered return count actual bytes read */
++ drv_data->cur_msg->actual_length += drv_data->len;
++
++ /* Move to next transfer */
++ drv_data->cur_msg->state = next_transfer(drv_data);
++
++ /* handle end of message */
++ if (drv_data->cur_msg->state == DONE_STATE) {
++ drv_data->cur_msg->status = 0;
++ giveback(drv_data);
++ } else
++ tasklet_schedule(&drv_data->pump_transfers);
++}
++
++static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
++{
++ void *reg = drv_data->vaddr;
++ u32 irq_status, irq_mask = 0x3f;
++
++ irq_status = read_isr(reg) & irq_mask;
++
++ /* error handling */
++ if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
++ read_txoicr(reg);
++ read_rxoicr(reg);
++ read_rxuicr(reg);
++ int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
++ return IRQ_HANDLED;
++ }
++
++ /* INT comes from tx */
++ if (drv_data->tx && (irq_status & SPI_INT_TXEI))
++ while (drv_data->tx < drv_data->tx_end) {
++ drv_data->write(drv_data);
++
++ if (drv_data->tx == drv_data->tx_end) {
++ spi_mask_intr(reg, SPI_INT_TXEI);
++ transfer_complete(drv_data);
++ }
++ }
++
++ /* INT comes from rx */
++ if (drv_data->rx && (irq_status & SPI_INT_RXFI)) {
++ if (drv_data->read(drv_data))
++ transfer_complete(drv_data);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t mrst_spi_irq(int irq, void *dev_id)
++{
++ struct driver_data *drv_data = dev_id;
++ void *reg = drv_data->vaddr;
++
++ if (!drv_data->cur_msg) {
++ spi_mask_intr(reg, SPI_INT_TXEI);
++ /* Never fail */
++ return IRQ_HANDLED;
++ }
++
++ return drv_data->transfer_handler(drv_data);
++}
++
++/* must be called inside pump_transfers() */
++static void poll_transfer(struct driver_data *drv_data)
++{
++ if (drv_data->tx)
++ while (drv_data->write(drv_data))
++ drv_data->read(drv_data);
++
++ drv_data->read(drv_data);
++ transfer_complete(drv_data);
++}
++
++static void pump_transfers(unsigned long data)
++{
++ struct driver_data *drv_data = (struct driver_data *)data;
++ struct spi_message *message = NULL;
++ struct spi_transfer *transfer = NULL;
++ struct spi_transfer *previous = NULL;
++ struct spi_device *spi = NULL;
++ struct chip_data *chip = NULL;
++ void *reg = drv_data->vaddr;
++ u8 bits = 0;
++ u8 imask = 0;
++ u8 cs_change = 0;
++ u16 rxint_level = 0;
++ u16 txint_level = 0;
++ u16 clk_div = 0;
++ u32 speed = 0;
++ u32 cr0 = 0;
++
++ /* get current state information */
++ message = drv_data->cur_msg;
++ transfer = drv_data->cur_transfer;
++ chip = drv_data->cur_chip;
++ spi = message->spi;
++
++ if (unlikely(!chip->clk_div)) {
++ /* default for 115200 UART device */
++ if (chip->speed_hz)
++ chip->clk_div = drv_data->freq / chip->speed_hz;
++ else
++ chip->clk_div = drv_data->freq / 115200;
++ }
++
++ /* handle for abort */
++ if (message->state == ERROR_STATE) {
++ message->status = -EIO;
++ goto early_exit;
++ }
++
++ /* handle end of message */
++ if (message->state == DONE_STATE) {
++ message->status = 0;
++ goto early_exit;
++ }
++
++ /* delay if requested at end of transfer*/
++ if (message->state == RUNNING_STATE) {
++ previous = list_entry(transfer->transfer_list.prev,
++ struct spi_transfer,
++ transfer_list);
++ if (previous->delay_usecs)
++ udelay(previous->delay_usecs);
++ }
++
++ drv_data->n_bytes = chip->n_bytes;
++ drv_data->dma_width = chip->dma_width;
++ drv_data->cs_control = chip->cs_control;
++
++ drv_data->rx_dma = transfer->rx_dma;
++ drv_data->tx_dma = transfer->tx_dma;
++ drv_data->tx = (void *)transfer->tx_buf;
++ drv_data->tx_end = drv_data->tx + transfer->len;
++ drv_data->rx = transfer->rx_buf;
++ drv_data->rx_end = drv_data->rx + transfer->len;
++ drv_data->write = drv_data->tx ? chip->write : null_writer;
++ drv_data->read = drv_data->rx ? chip->read : null_reader;
++ drv_data->cs_change = transfer->cs_change;
++ drv_data->len = drv_data->cur_transfer->len;
++ if (chip != drv_data->prev_chip)
++ cs_change = 1;
++
++ /* handle per transfer options for bpw and speed */
++ cr0 = chip->cr0;
++ if (transfer->speed_hz) {
++ speed = chip->speed_hz;
++
++ if (transfer->speed_hz != speed) {
++ speed = transfer->speed_hz;
++ if (speed > drv_data->freq) {
++ printk(KERN_ERR "MRST SPI0: unsupported"
++ "freq: %dHz\n", speed);
++ message->status = -EIO;
++ goto early_exit;
++ }
++
++ /* clk_div doesn't support odd number */
++ clk_div = (drv_data->freq + speed - 1) / speed;
++ clk_div = ((clk_div + 1) >> 1) << 1;
++
++ chip->speed_hz = speed;
++ chip->clk_div = clk_div;
++ }
++ }
++
++ if (transfer->bits_per_word) {
++ bits = transfer->bits_per_word;
++
++ switch (bits) {
++ case 8:
++ drv_data->n_bytes = 1;
++ drv_data->dma_width = 1;
++ drv_data->read = drv_data->read != null_reader ?
++ u8_reader : null_reader;
++ drv_data->write = drv_data->write != null_writer ?
++ u8_writer : null_writer;
++ break;
++ case 16:
++ drv_data->n_bytes = 2;
++ drv_data->dma_width = 2;
++ drv_data->read = drv_data->read != null_reader ?
++ u16_reader : null_reader;
++ drv_data->write = drv_data->write != null_writer ?
++ u16_writer : null_writer;
++ break;
++ default:
++ printk(KERN_ERR "MRST SPI0: unsupported bits:"
++ "%db\n", bits);
++ message->status = -EIO;
++ goto early_exit;
++ }
++
++ cr0 = (bits - 1)
++ | (chip->type << SPI_FRF_OFFSET)
++ | (spi->mode << SPI_MODE_OFFSET)
++ | (chip->tmode << SPI_TMOD_OFFSET);
++ }
++
++ message->state = RUNNING_STATE;
++
++ /* try to map dma buffer and do a dma transfer if successful */
++ drv_data->dma_mapped = 0;
++ if (drv_data->len && (drv_data->len <= MRST_MAX_DMA_LEN))
++ drv_data->dma_mapped = map_dma_buffers(drv_data);
++
++ if (!drv_data->dma_mapped && !chip->poll_mode) {
++ if (drv_data->rx) {
++ if (drv_data->len >= SPI_INT_THRESHOLD)
++ rxint_level = SPI_INT_THRESHOLD;
++ else
++ rxint_level = drv_data->len;
++ imask |= SPI_INT_RXFI;
++ }
++
++ if (drv_data->tx)
++ imask |= SPI_INT_TXEI;
++ drv_data->transfer_handler = interrupt_transfer;
++ }
++
++ /*
++ * reprogram registers only if
++ * 1. chip select changes
++ * 2. clk_div is changes
++ * 3. control value changes
++ */
++ if (read_ctrl0(reg) != cr0 || cs_change || clk_div) {
++ mrst_spi_enable(reg, 0);
++
++ if (read_ctrl0(reg) != cr0)
++ write_ctrl0(cr0, reg);
++
++ if (txint_level)
++ write_txftlr(txint_level, reg);
++
++ if (rxint_level)
++ write_rxftlr(rxint_level, reg);
++
++ /* set the interrupt mask, for poll mode just diable all int */
++ spi_mask_intr(reg, 0xff);
++ if (!chip->poll_mode)
++ spi_umask_intr(reg, imask);
++
++ spi_enable_clk(reg, clk_div ? clk_div : chip->clk_div);
++ spi_chip_sel(reg, spi->chip_select);
++ mrst_spi_enable(reg, 1);
++
++ if (cs_change)
++ drv_data->prev_chip = chip;
++ }
++
++ if (drv_data->dma_mapped)
++ dma_transfer(drv_data, cs_change);
++
++ if (chip->poll_mode)
++ poll_transfer(drv_data);
++
++ return;
++
++early_exit:
++ giveback(drv_data);
++ return;
++}
++
++static void pump_messages(struct work_struct *work)
++{
++ struct driver_data *drv_data =
++ container_of(work, struct driver_data, pump_messages);
++ unsigned long flags;
++
++ /* Lock queue and check for queue work */
++ spin_lock_irqsave(&drv_data->lock, flags);
++ if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
++ drv_data->busy = 0;
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++ return;
++ }
++
++ /* Make sure we are not already running a message */
++ if (drv_data->cur_msg) {
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++ return;
++ }
++
++ /* Extract head of queue */
++ drv_data->cur_msg = list_entry(drv_data->queue.next,
++ struct spi_message, queue);
++ list_del_init(&drv_data->cur_msg->queue);
++
++ /* Initial message state*/
++ drv_data->cur_msg->state = START_STATE;
++ drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
++ struct spi_transfer,
++ transfer_list);
++ drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
++
++ /* Mark as busy and launch transfers */
++ tasklet_schedule(&drv_data->pump_transfers);
++
++ drv_data->busy = 1;
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++}
++
++/* spi_device use this to queue in the their spi_msg */
++static int mrst_spi_transfer(struct spi_device *spi, struct spi_message *msg)
++{
++ struct driver_data *drv_data = spi_master_get_devdata(spi->master);
++ unsigned long flags;
++
++ spin_lock_irqsave(&drv_data->lock, flags);
++
++ if (drv_data->run == QUEUE_STOPPED) {
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++ return -ESHUTDOWN;
++ }
++
++ msg->actual_length = 0;
++ msg->status = -EINPROGRESS;
++ msg->state = START_STATE;
++
++ list_add_tail(&msg->queue, &drv_data->queue);
++
++ if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) {
++
++ if (drv_data->cur_transfer || drv_data->cur_msg)
++ queue_work(drv_data->workqueue,
++ &drv_data->pump_messages);
++ else {
++ /* if no other data transaction in air, just go */
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++ pump_messages(&drv_data->pump_messages);
++ return 0;
++ }
++ }
++
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++ return 0;
++}
++
++/* this may be called twice for each spi dev */
++static int mrst_spi_setup(struct spi_device *spi)
++{
++ struct mrst_spi_chip *chip_info = NULL;
++ struct chip_data *chip;
++
++ if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
++ return -EINVAL;
++
++ /* Only alloc on first setup */
++ chip = spi_get_ctldata(spi);
++ if (!chip) {
++ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
++ if (!chip)
++ return -ENOMEM;
++
++ chip->cs_control = null_cs_control;
++ chip->enable_dma = 0;
++ }
++
++ /* protocol drivers may change the chip settings, so...
++ * if chip_info exists, use it */
++ chip_info = spi->controller_data;
++
++ /* chip_info doesn't always exist */
++ if (chip_info) {
++ if (chip_info->cs_control)
++ chip->cs_control = chip_info->cs_control;
++
++ chip->poll_mode = chip_info->poll_mode;
++ chip->type = chip_info->type;
++
++ chip->rx_threshold = 0;
++ chip->tx_threshold = 0;
++
++ chip->enable_dma = chip_info->enable_dma;
++ }
++
++ if (spi->bits_per_word <= 8) {
++ chip->n_bytes = 1;
++ chip->dma_width = 1;
++ chip->read = u8_reader;
++ chip->write = u8_writer;
++ } else if (spi->bits_per_word <= 16) {
++ chip->n_bytes = 2;
++ chip->dma_width = 2;
++ chip->read = u16_reader;
++ chip->write = u16_writer;
++ } else {
++ /* never take >16b case for MRST SPIC */
++ dev_err(&spi->dev, "invalid wordsize\n");
++ return -ENODEV;
++ }
++
++ chip->bits_per_word = spi->bits_per_word;
++ chip->speed_hz = spi->max_speed_hz;
++ chip->tmode = 0; /* Tx & Rx */
++ /* default SPI mode is SCPOL = 0, SCPH = 0 */
++ chip->cr0 = (chip->bits_per_word - 1)
++ | (chip->type << SPI_FRF_OFFSET)
++ | (spi->mode << SPI_MODE_OFFSET)
++ | (chip->tmode << SPI_TMOD_OFFSET);
++
++ spi_set_ctldata(spi, chip);
++ return 0;
++}
++
++static void mrst_spi_cleanup(struct spi_device *spi)
++{
++ struct chip_data *chip = spi_get_ctldata(spi);
++
++ kfree(chip);
++}
++
++static int __init init_queue(struct driver_data *drv_data)
++{
++ INIT_LIST_HEAD(&drv_data->queue);
++ spin_lock_init(&drv_data->lock);
++
++ drv_data->run = QUEUE_STOPPED;
++ drv_data->busy = 0;
++
++ tasklet_init(&drv_data->pump_transfers,
++ pump_transfers, (unsigned long)drv_data);
++
++ INIT_WORK(&drv_data->pump_messages, pump_messages);
++ drv_data->workqueue = create_singlethread_workqueue(
++ dev_name(drv_data->master->dev.parent));
++ if (drv_data->workqueue == NULL)
++ return -EBUSY;
++
++ return 0;
++}
++
++static int start_queue(struct driver_data *drv_data)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&drv_data->lock, flags);
++
++ if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++ return -EBUSY;
++ }
++
++ drv_data->run = QUEUE_RUNNING;
++ drv_data->cur_msg = NULL;
++ drv_data->cur_transfer = NULL;
++ drv_data->cur_chip = NULL;
++ drv_data->prev_chip = NULL;
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++
++ queue_work(drv_data->workqueue, &drv_data->pump_messages);
++
++ return 0;
++}
++
++static int stop_queue(struct driver_data *drv_data)
++{
++ unsigned long flags;
++ unsigned limit = 500;
++ int status = 0;
++
++ spin_lock_irqsave(&drv_data->lock, flags);
++ drv_data->run = QUEUE_STOPPED;
++ while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++ msleep(10);
++ spin_lock_irqsave(&drv_data->lock, flags);
++ }
++
++ if (!list_empty(&drv_data->queue) || drv_data->busy)
++ status = -EBUSY;
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++
++ return status;
++}
++
++static int destroy_queue(struct driver_data *drv_data)
++{
++ int status;
++
++ status = stop_queue(drv_data);
++ if (status != 0)
++ return status;
++ destroy_workqueue(drv_data->workqueue);
++ return 0;
++}
++
++/* restart the spic, disable all interrupts, clean rx fifo */
++static void spi_hw_init(struct driver_data *drv_data)
++{
++ void *reg = drv_data->vaddr;
++
++ mrst_spi_enable(reg, 0x0);
++ spi_mask_intr(reg, 0xff);
++ mrst_spi_enable(reg, 0x1);
++
++ flush(drv_data);
++}
++
++static int __devinit mrst_spi_probe(struct pci_dev *pdev,
++ const struct pci_device_id *ent)
++{
++ int ret;
++ struct driver_data *drv_data;
++ struct spi_master *master;
++ struct device *dev = &pdev->dev;
++ u32 *clk_reg, clk_cdiv;
++ int pci_bar = 0;
++
++ BUG_ON(pdev == NULL);
++ BUG_ON(ent == NULL);
++
++ printk(KERN_INFO "MRST: found PCI SPI controller(ID: %04x:%04x)\n",
++ pdev->vendor, pdev->device);
++
++ ret = pci_enable_device(pdev);
++ if (ret)
++ return ret;
++
++ master = spi_alloc_master(dev, sizeof(struct driver_data));
++ if (!master) {
++ ret = -ENOMEM;
++ goto exit;
++ }
++
++ drv_data = spi_master_get_devdata(master);
++ drv_data->master = master;
++ drv_data->pdev = pdev;
++ drv_data->type = SSI_MOTO_SPI;
++ drv_data->prev_chip = NULL;
++
++ /* get basic io resource and map it */
++ drv_data->paddr = (void *)pci_resource_start(pdev, pci_bar);
++ drv_data->iolen = pci_resource_len(pdev, pci_bar);
++ drv_data->dma_addr = (dma_addr_t)(drv_data->paddr + 0x60);
++
++ ret = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
++ if (ret)
++ goto err_free_master;
++
++ drv_data->vaddr = ioremap_nocache((unsigned long)drv_data->paddr,
++ drv_data->iolen);
++ if (!drv_data->vaddr) {
++ ret = -ENOMEM;
++ goto err_free_pci;
++ }
++
++ clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16);
++ if (!clk_reg) {
++ ret = -ENOMEM;
++ goto err_iounmap;
++ }
++
++ /* get SPI controller operating freq info */
++ clk_cdiv = ((*clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET;
++ drv_data->freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
++ iounmap(clk_reg);
++
++ drv_data->irq = pdev->irq;
++ ret = request_irq(drv_data->irq, mrst_spi_irq, 0,
++ "mrst_spic0", drv_data);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "can not get IRQ\n");
++ goto err_iounmap;
++ }
++
++ spin_lock_init(&drv_data->lock);
++
++ master->mode_bits = SPI_CPOL | SPI_CPHA;
++
++ master->bus_num = 0;
++ master->num_chipselect = 16;
++ master->cleanup = mrst_spi_cleanup;
++ master->setup = mrst_spi_setup;
++ master->transfer = mrst_spi_transfer;
++
++ drv_data->dma_inited = 0;
++#ifdef CONFIG_SPI_MRST_DMA
++ mrst_spi_dma_init(drv_data);
++#endif
++
++ /* basic HW init */
++ spi_hw_init(drv_data);
++
++ /* Initial and start queue */
++ ret = init_queue(drv_data);
++ if (ret) {
++ dev_err(&pdev->dev, "problem initializing queue\n");
++ goto err_diable_hw;
++ }
++ ret = start_queue(drv_data);
++ if (ret) {
++ dev_err(&pdev->dev, "problem starting queue\n");
++ goto err_diable_hw;
++ }
++
++ ret = spi_register_master(master);
++ if (ret) {
++ dev_err(&pdev->dev, "problem registering spi master\n");
++ goto err_queue_alloc;
++ }
++
++ /* PCI hook and SPI hook use the same drv data */
++ pci_set_drvdata(pdev, drv_data);
++ mrst_spi_debugfs_init(drv_data);
++
++ return 0;
++
++err_queue_alloc:
++ destroy_queue(drv_data);
++#ifdef CONFIG_SPI_MRST_DMA
++ mrst_spi_dma_exit(drv_data);
++#endif
++err_diable_hw:
++ mrst_spi_enable(drv_data->vaddr, 0);
++ free_irq(drv_data->irq, drv_data);
++err_iounmap:
++ iounmap(drv_data->vaddr);
++err_free_pci:
++ pci_release_region(pdev, pci_bar);
++err_free_master:
++ spi_master_put(master);
++exit:
++ pci_disable_device(pdev);
++ return ret;
++}
++
++static void __devexit mrst_spi_remove(struct pci_dev *pdev)
++{
++ struct driver_data *drv_data = pci_get_drvdata(pdev);
++ void *reg;
++ int status = 0;
++
++ if (!drv_data)
++ return;
++
++ mrst_spi_debugfs_remove(drv_data);
++ pci_set_drvdata(pdev, NULL);
++
++ /* remove the queue */
++ status = destroy_queue(drv_data);
++ if (status != 0)
++ dev_err(&pdev->dev, "mrst_spi_remove: workqueue will not "
++ "complete, message memory not freed\n");
++
++#ifdef CONFIG_SPI_MRST_DMA
++ mrst_spi_dma_exit(drv_data);
++#endif
++
++ reg = drv_data->vaddr;
++ mrst_spi_enable(reg, 0);
++ spi_disable_clk(reg);
++
++ /* release IRQ */
++ free_irq(drv_data->irq, drv_data);
++
++ iounmap(drv_data->vaddr);
++ pci_release_region(pdev, 0);
++
++ /* disconnect from the SPI framework */
++ spi_unregister_master(drv_data->master);
++ pci_disable_device(pdev);
++}
++
++#ifdef CONFIG_PM
++static int mrst_spi_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ struct driver_data *drv_data = pci_get_drvdata(pdev);
++ void *reg = drv_data->vaddr;
++ int status = 0;
++
++ status = stop_queue(drv_data);
++ if (status)
++ return status;
++
++ mrst_spi_enable(reg, 0);
++ spi_disable_clk(reg);
++ return status;
++}
++
++static int mrst_spi_resume(struct pci_dev *pdev)
++{
++ struct driver_data *drv_data = pci_get_drvdata(pdev);
++ int status = 0;
++
++ spi_hw_init(drv_data);
++
++ /* Start the queue running */
++ status = start_queue(drv_data);
++ if (status)
++ dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
++ return status;
++}
++#else
++#define mrst_spi_suspend NULL
++#define mrst_spi_resume NULL
++#endif
++
++static const struct pci_device_id pci_ids[] __devinitdata = {
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
++ {},
++};
++
++static struct pci_driver mrst_spi_driver = {
++ .name = DRIVER_NAME,
++ .id_table = pci_ids,
++ .probe = mrst_spi_probe,
++ .remove = __devexit_p(mrst_spi_remove),
++ .suspend = mrst_spi_suspend,
++ .resume = mrst_spi_resume,
++};
++
++static int __init mrst_spi_init(void)
++{
++ return pci_register_driver(&mrst_spi_driver);
++}
++
++static void __exit mrst_spi_exit(void)
++{
++ pci_unregister_driver(&mrst_spi_driver);
++}
++
++module_init(mrst_spi_init);
++module_exit(mrst_spi_exit);
++
++MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
++MODULE_DESCRIPTION("Intel Moorestown SPI controller driver");
++MODULE_LICENSE("GPL v2");
+Index: linux-2.6.33/include/linux/spi/mrst_spi.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/include/linux/spi/mrst_spi.h
+@@ -0,0 +1,162 @@
++#ifndef MRST_SPI_HEADER_H
++#define MRST_SPI_HEADER_H
++#include <linux/io.h>
++
++/* bit fields in CTRLR0 */
++#define SPI_DFS_OFFSET 0
++
++#define SPI_FRF_OFFSET 4
++#define SPI_FRF_SPI 0x0
++#define SPI_FRF_SSP 0x1
++#define SPI_FRF_MICROWIRE 0x2
++#define SPI_FRF_RESV 0x3
++
++#define SPI_MODE_OFFSET 6
++#define SPI_SCPH_OFFSET 6
++#define SPI_SCOL_OFFSET 7
++#define SPI_TMOD_OFFSET 8
++#define SPI_TMOD_TR 0x0 /* xmit & recv */
++#define SPI_TMOD_TO 0x1 /* xmit only */
++#define SPI_TMOD_RO 0x2 /* recv only */
++#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */
++
++#define SPI_SLVOE_OFFSET 10
++#define SPI_SRL_OFFSET 11
++#define SPI_CFS_OFFSET 12
++
++/* bit fields in SR, 7 bits */
++#define SR_MASK 0x7f /* cover 7 bits */
++#define SR_BUSY (1 << 0)
++#define SR_TF_NOT_FULL (1 << 1)
++#define SR_TF_EMPT (1 << 2)
++#define SR_RF_NOT_EMPT (1 << 3)
++#define SR_RF_FULL (1 << 4)
++#define SR_TX_ERR (1 << 5)
++#define SR_DCOL (1 << 6)
++
++/* bit fields in ISR, IMR, RISR, 7 bits */
++#define SPI_INT_TXEI (1 << 0)
++#define SPI_INT_TXOI (1 << 1)
++#define SPI_INT_RXUI (1 << 2)
++#define SPI_INT_RXOI (1 << 3)
++#define SPI_INT_RXFI (1 << 4)
++#define SPI_INT_MSTI (1 << 5)
++
++/* TX RX interrupt level threshhold, max can be 256 */
++#define SPI_INT_THRESHOLD 32
++
++#define DEFINE_MRST_SPI_RW_REG(reg, off) \
++static inline u32 read_##reg(void *p) \
++{ return readl(p + (off)); } \
++static inline void write_##reg(u32 v, void *p) \
++{ writel(v, p + (off)); }
++
++#define DEFINE_MRST_SPI_RO_REG(reg, off) \
++static inline u32 read_##reg(void *p) \
++{ return readl(p + (off)); } \
++
++DEFINE_MRST_SPI_RW_REG(ctrl0, 0x00)
++DEFINE_MRST_SPI_RW_REG(ctrl1, 0x04)
++DEFINE_MRST_SPI_RW_REG(ssienr, 0x08)
++DEFINE_MRST_SPI_RW_REG(mwcr, 0x0c)
++DEFINE_MRST_SPI_RW_REG(ser, 0x10)
++DEFINE_MRST_SPI_RW_REG(baudr, 0x14)
++DEFINE_MRST_SPI_RW_REG(txftlr, 0x18)
++DEFINE_MRST_SPI_RW_REG(rxftlr, 0x1c)
++DEFINE_MRST_SPI_RO_REG(txflr, 0x20)
++DEFINE_MRST_SPI_RO_REG(rxflr, 0x24)
++DEFINE_MRST_SPI_RO_REG(sr, 0x28)
++DEFINE_MRST_SPI_RW_REG(imr, 0x2c)
++DEFINE_MRST_SPI_RO_REG(isr, 0x30)
++DEFINE_MRST_SPI_RO_REG(risr, 0x34)
++DEFINE_MRST_SPI_RO_REG(txoicr, 0x38)
++DEFINE_MRST_SPI_RO_REG(rxoicr, 0x3c)
++DEFINE_MRST_SPI_RO_REG(rxuicr, 0x40)
++DEFINE_MRST_SPI_RO_REG(msticr, 0x44)
++DEFINE_MRST_SPI_RO_REG(icr, 0x48)
++DEFINE_MRST_SPI_RW_REG(dmacr, 0x4c)
++DEFINE_MRST_SPI_RW_REG(dmatdlr, 0x50)
++DEFINE_MRST_SPI_RW_REG(dmardlr, 0x54)
++DEFINE_MRST_SPI_RO_REG(idr, 0x58)
++DEFINE_MRST_SPI_RO_REG(version, 0x5c)
++DEFINE_MRST_SPI_RW_REG(dr, 0x60)
++
++static inline void mrst_spi_enable(void *reg, int enable)
++{
++ if (enable)
++ write_ssienr(0x1, reg);
++ else
++ write_ssienr(0x0, reg);
++}
++
++static inline void spi_enable_clk(void *reg, u16 div)
++{
++ write_baudr(div, reg);
++}
++
++static inline void spi_chip_sel(void *reg, u16 cs)
++{
++ if (cs > 4)
++ return;
++ write_ser((1 << cs), reg);
++}
++
++static inline void spi_disable_clk(void *reg)
++{
++ /* set the divider to 0 will diable the clock */
++ write_baudr(0, reg);
++}
++
++/* disable some INT */
++static inline void spi_mask_intr(void *reg, u32 mask)
++{
++ u32 imr;
++ imr = read_imr(reg) & ~mask;
++ write_imr(imr, reg);
++}
++
++/* enable INT */
++static inline void spi_umask_intr(void *reg, u32 mask)
++{
++ u32 imr;
++ imr = read_imr(reg) | mask;
++ write_imr(imr, reg);
++}
++
++enum mrst_ssi_type {
++ SSI_MOTO_SPI = 0,
++ SSI_TI_SSP,
++ SSI_NS_MICROWIRE,
++};
++
++/* usually will be controller_data for SPI slave devices */
++struct mrst_spi_chip {
++ u8 poll_mode; /* 0 for contoller polling mode */
++ u8 type; /* SPI/SSP/Micrwire */
++ u8 enable_dma;
++ void (*cs_control)(u32 command);
++};
++
++#define SPI_DIB_NAME_LEN 16
++#define SPI_DIB_SPEC_INFO_LEN 10
++
++#define MRST_GPE_IRQ_VIA_GPIO_BIT (1 << 15)
++/* SPI device info block related */
++struct spi_dib_header {
++ u32 signature;
++ u32 length;
++ u8 rev;
++ u8 checksum;
++ u8 dib[0];
++} __attribute__((packed));
++
++struct spi_dib {
++ u16 host_num;
++ u16 cs;
++ u16 irq;
++ char name[SPI_DIB_NAME_LEN];
++ u8 dev_data[SPI_DIB_SPEC_INFO_LEN];
++} __attribute__((packed));
++
++extern struct console early_mrst_console;
++#endif /* #ifndef MRST_SPI_HEADER_H */
+Index: linux-2.6.33/drivers/serial/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/serial/Kconfig
++++ linux-2.6.33/drivers/serial/Kconfig
+@@ -688,6 +688,27 @@ config SERIAL_SA1100_CONSOLE
+ your boot loader (lilo or loadlin) about how to pass options to the
+ kernel at boot time.)
+
++config SERIAL_MAX3110
++ tristate "SPI UART driver for Max3110"
++ depends on SPI_MRST
++ select SERIAL_CORE
++ select SERIAL_CORE_CONSOLE
++ help
++ This is the UART protocol driver for MAX3110 device on
++ Intel Moorestown platform
++
++config MRST_MAX3110
++ boolean "Add Max3110 support for Moorestown platform"
++ default y
++ depends on SERIAL_MAX3110
++
++config MRST_MAX3110_IRQ
++ boolean "Enable GPIO IRQ for Max3110 over Moorestown"
++ default n
++ depends on MRST_MAX3110 && GPIO_LANGWELL
++ help
++ This has to be enabled after Moorestown GPIO driver is loaded
++
+ config SERIAL_BFIN
+ tristate "Blackfin serial port support"
+ depends on BLACKFIN
+Index: linux-2.6.33/drivers/serial/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/serial/Makefile
++++ linux-2.6.33/drivers/serial/Makefile
+@@ -82,3 +82,4 @@ obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgd
+ obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
+ obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
+ obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o
++obj-$(CONFIG_SERIAL_MAX3110) += max3110.o
+Index: linux-2.6.33/drivers/serial/max3110.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/serial/max3110.c
+@@ -0,0 +1,850 @@
++/*
++ * max3110.c - spi uart protocol driver for Maxim 3110 on Moorestown
++ *
++ * Copyright (C) Intel 2008 Feng Tang <feng.tang@intel.com>
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++/*
++ * Note:
++ * 1. From Max3110 spec, the Rx FIFO has 8 words, while the Tx FIFO only has
++ * 1 word. If SPI master controller doesn't support sclk frequency change,
++ * then the char need be sent out one by one with some delay
++ *
++ * 2. Currently only RX availabe interrrupt is used, no need for waiting TXE
++ * interrupt for a low speed UART device
++ */
++
++#include <linux/module.h>
++#include <linux/ioport.h>
++#include <linux/init.h>
++#include <linux/console.h>
++#include <linux/sysrq.h>
++#include <linux/platform_device.h>
++#include <linux/tty.h>
++#include <linux/tty_flip.h>
++#include <linux/serial_core.h>
++#include <linux/serial_reg.h>
++
++#include <linux/kthread.h>
++#include <linux/delay.h>
++#include <asm/atomic.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/mrst_spi.h>
++
++#include "max3110.h"
++
++#define PR_FMT "max3110: "
++
++struct uart_max3110 {
++ struct uart_port port;
++ struct spi_device *spi;
++ char *name;
++
++ wait_queue_head_t wq;
++ struct task_struct *main_thread;
++ struct task_struct *read_thread;
++ int mthread_up;
++ spinlock_t lock;
++
++ u32 baud;
++ u16 cur_conf;
++ u8 clock;
++ u8 parity, word_7bits;
++
++ atomic_t uart_tx_need;
++
++ /* console related */
++ struct circ_buf con_xmit;
++ atomic_t con_tx_need;
++
++ /* irq related */
++ u16 irq;
++ atomic_t irq_pending;
++};
++
++/* global data structure, may need be removed */
++struct uart_max3110 *pmax;
++static inline void receive_char(struct uart_max3110 *max, u8 ch);
++static void receive_chars(struct uart_max3110 *max,
++ unsigned char *str, int len);
++static int max3110_read_multi(struct uart_max3110 *max, int len, u8 *buf);
++static void max3110_console_receive(struct uart_max3110 *max);
++
++int max3110_write_then_read(struct uart_max3110 *max,
++ const u8 *txbuf, u8 *rxbuf, unsigned len, int always_fast)
++{
++ struct spi_device *spi = max->spi;
++ struct spi_message message;
++ struct spi_transfer x;
++ int ret;
++
++ if (!txbuf || !rxbuf)
++ return -EINVAL;
++
++ spi_message_init(&message);
++ memset(&x, 0, sizeof x);
++ x.len = len;
++ x.tx_buf = txbuf;
++ x.rx_buf = rxbuf;
++ spi_message_add_tail(&x, &message);
++
++ if (always_fast)
++ x.speed_hz = 3125000;
++ else if (max->baud)
++ x.speed_hz = max->baud;
++
++ /* Do the i/o */
++ ret = spi_sync(spi, &message);
++ return ret;
++}
++
++/* Write a u16 to the device, and return one u16 read back */
++int max3110_out(struct uart_max3110 *max, const u16 out)
++{
++ u16 tmp;
++ int ret;
++
++ ret = max3110_write_then_read(max, (u8 *)&out, (u8 *)&tmp, 2, 1);
++ if (ret)
++ return ret;
++
++ /* If some valid data is read back */
++ if (tmp & MAX3110_READ_DATA_AVAILABLE)
++ receive_char(max, (tmp & 0xff));
++
++ return ret;
++}
++
++#define MAX_READ_LEN 20
++/*
++ * This is usually used to read data from SPIC RX FIFO, which doesn't
++ * need any delay like flushing character out. It returns how many
++ * valide bytes are read back
++ */
++static int max3110_read_multi(struct uart_max3110 *max, int len, u8 *buf)
++{
++ u16 out[MAX_READ_LEN], in[MAX_READ_LEN];
++ u8 *pbuf, valid_str[MAX_READ_LEN];
++ int i, j, bytelen;
++
++ if (len > MAX_READ_LEN) {
++ pr_err(PR_FMT "read len %d is too large\n", len);
++ return 0;
++ }
++
++ bytelen = len * 2;
++ memset(out, 0, bytelen);
++ memset(in, 0, bytelen);
++
++ if (max3110_write_then_read(max, (u8 *)out, (u8 *)in, bytelen, 1))
++ return 0;
++
++ /* If caller don't provide a buffer, then handle received char */
++ pbuf = buf ? buf : valid_str;
++
++ for (i = 0, j = 0; i < len; i++) {
++ if (in[i] & MAX3110_READ_DATA_AVAILABLE)
++ pbuf[j++] = (u8)(in[i] & 0xff);
++ }
++
++ if (j && (pbuf == valid_str))
++ receive_chars(max, valid_str, j);
++
++ return j;
++}
++
++static void serial_m3110_con_putchar(struct uart_port *port, int ch)
++{
++ struct uart_max3110 *max =
++ container_of(port, struct uart_max3110, port);
++ struct circ_buf *xmit = &max->con_xmit;
++
++ if (uart_circ_chars_free(xmit)) {
++ xmit->buf[xmit->head] = (char)ch;
++ xmit->head = (xmit->head + 1) & (PAGE_SIZE - 1);
++ }
++
++ if (!atomic_read(&max->con_tx_need)) {
++ atomic_set(&max->con_tx_need, 1);
++ wake_up_process(max->main_thread);
++ }
++}
++
++/*
++ * Print a string to the serial port trying not to disturb
++ * any possible real use of the port...
++ *
++ * The console_lock must be held when we get here.
++ */
++static void serial_m3110_con_write(struct console *co,
++ const char *s, unsigned int count)
++{
++ if (!pmax)
++ return;
++
++ uart_console_write(&pmax->port, s, count, serial_m3110_con_putchar);
++}
++
++static int __init
++serial_m3110_con_setup(struct console *co, char *options)
++{
++ struct uart_max3110 *max = pmax;
++ int baud = 115200;
++ int bits = 8;
++ int parity = 'n';
++ int flow = 'n';
++
++ pr_info(PR_FMT "setting up console\n");
++
++ if (!max) {
++ pr_err(PR_FMT "pmax is NULL, return");
++ return -ENODEV;
++ }
++
++ if (options)
++ uart_parse_options(options, &baud, &parity, &bits, &flow);
++
++ return uart_set_options(&max->port, co, baud, parity, bits, flow);
++}
++
++static struct tty_driver *serial_m3110_con_device(struct console *co,
++ int *index)
++{
++ struct uart_driver *p = co->data;
++ *index = co->index;
++ return p->tty_driver;
++}
++
++static struct uart_driver serial_m3110_reg;
++static struct console serial_m3110_console = {
++ .name = "ttyS",
++ .write = serial_m3110_con_write,
++ .device = serial_m3110_con_device,
++ .setup = serial_m3110_con_setup,
++ .flags = CON_PRINTBUFFER,
++ .index = -1,
++ .data = &serial_m3110_reg,
++};
++
++#define MRST_CONSOLE (&serial_m3110_console)
++
++static unsigned int serial_m3110_tx_empty(struct uart_port *port)
++{
++ return 1;
++}
++
++static void serial_m3110_stop_tx(struct uart_port *port)
++{
++ return;
++}
++
++/* stop_rx will be called in spin_lock env */
++static void serial_m3110_stop_rx(struct uart_port *port)
++{
++ return;
++}
++
++#define WORDS_PER_XFER 128
++static inline void send_circ_buf(struct uart_max3110 *max,
++ struct circ_buf *xmit)
++{
++ int len, left = 0;
++ u16 obuf[WORDS_PER_XFER], ibuf[WORDS_PER_XFER];
++ u8 valid_str[WORDS_PER_XFER];
++ int i, j;
++
++ while (!uart_circ_empty(xmit)) {
++ left = uart_circ_chars_pending(xmit);
++ while (left) {
++ len = (left >= WORDS_PER_XFER) ? WORDS_PER_XFER : left;
++
++ memset(obuf, 0, len * 2);
++ memset(ibuf, 0, len * 2);
++ for (i = 0; i < len; i++) {
++ obuf[i] = (u8)xmit->buf[xmit->tail] | WD_TAG;
++ xmit->tail = (xmit->tail + 1) &
++ (UART_XMIT_SIZE - 1);
++ }
++ max3110_write_then_read(max, (u8 *)obuf,
++ (u8 *)ibuf, len * 2, 0);
++
++ for (i = 0, j = 0; i < len; i++) {
++ if (ibuf[i] & MAX3110_READ_DATA_AVAILABLE)
++ valid_str[j++] = (u8)(ibuf[i] & 0xff);
++ }
++
++ if (j)
++ receive_chars(max, valid_str, j);
++
++ max->port.icount.tx += len;
++ left -= len;
++ }
++ }
++}
++
++static void transmit_char(struct uart_max3110 *max)
++{
++ struct uart_port *port = &max->port;
++ struct circ_buf *xmit = &port->state->xmit;
++
++ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
++ return;
++
++ send_circ_buf(max, xmit);
++
++ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
++ uart_write_wakeup(port);
++
++ if (uart_circ_empty(xmit))
++ serial_m3110_stop_tx(port);
++}
++
++/* This will be called by uart_write() and tty_write, can't
++ * go to sleep */
++static void serial_m3110_start_tx(struct uart_port *port)
++{
++ struct uart_max3110 *max =
++ container_of(port, struct uart_max3110, port);
++
++ if (!atomic_read(&max->uart_tx_need)) {
++ atomic_set(&max->uart_tx_need, 1);
++ wake_up_process(max->main_thread);
++ }
++}
++
++static void receive_chars(struct uart_max3110 *max, unsigned char *str, int len)
++{
++ struct uart_port *port = &max->port;
++ struct tty_struct *tty;
++ int usable;
++
++ /* If uart is not opened, just return */
++ if (!port->state)
++ return;
++
++ tty = port->state->port.tty;
++ if (!tty)
++ return; /* receive some char before the tty is opened */
++
++ while (len) {
++ usable = tty_buffer_request_room(tty, len);
++ if (usable) {
++ tty_insert_flip_string(tty, str, usable);
++ str += usable;
++ port->icount.rx += usable;
++ tty_flip_buffer_push(tty);
++ }
++ len -= usable;
++ }
++}
++
++static inline void receive_char(struct uart_max3110 *max, u8 ch)
++{
++ receive_chars(max, &ch, 1);
++}
++
++static void max3110_console_receive(struct uart_max3110 *max)
++{
++ int loop = 1, num, total = 0;
++ u8 recv_buf[512], *pbuf;
++
++ pbuf = recv_buf;
++ do {
++ num = max3110_read_multi(max, 8, pbuf);
++
++ if (num) {
++ loop = 10;
++ pbuf += num;
++ total += num;
++
++ if (total >= 500) {
++ receive_chars(max, recv_buf, total);
++ pbuf = recv_buf;
++ total = 0;
++ }
++ }
++ } while (--loop);
++
++ if (total)
++ receive_chars(max, recv_buf, total);
++}
++
++static int max3110_main_thread(void *_max)
++{
++ struct uart_max3110 *max = _max;
++ wait_queue_head_t *wq = &max->wq;
++ int ret = 0;
++ struct circ_buf *xmit = &max->con_xmit;
++
++ init_waitqueue_head(wq);
++ pr_info(PR_FMT "start main thread\n");
++
++ do {
++ wait_event_interruptible(*wq, (atomic_read(&max->irq_pending) ||
++ atomic_read(&max->con_tx_need) ||
++ atomic_read(&max->uart_tx_need)) ||
++ kthread_should_stop());
++ max->mthread_up = 1;
++
++#ifdef CONFIG_MRST_MAX3110_IRQ
++ if (atomic_read(&max->irq_pending)) {
++ max3110_console_receive(max);
++ atomic_set(&max->irq_pending, 0);
++ }
++#endif
++
++ /* first handle console output */
++ if (atomic_read(&max->con_tx_need)) {
++ send_circ_buf(max, xmit);
++ atomic_set(&max->con_tx_need, 0);
++ }
++
++ /* handle uart output */
++ if (atomic_read(&max->uart_tx_need)) {
++ transmit_char(max);
++ atomic_set(&max->uart_tx_need, 0);
++ }
++ max->mthread_up = 0;
++ } while (!kthread_should_stop());
++
++ return ret;
++}
++
++#ifdef CONFIG_MRST_MAX3110_IRQ
++irqreturn_t static serial_m3110_irq(int irq, void *dev_id)
++{
++ struct uart_max3110 *max = dev_id;
++
++ /* max3110's irq is a falling edge, not level triggered,
++ * so no need to disable the irq */
++ if (!atomic_read(&max->irq_pending)) {
++ atomic_inc(&max->irq_pending);
++ wake_up_process(max->main_thread);
++ }
++ return IRQ_HANDLED;
++}
++#else
++/* if don't use RX IRQ, then need a thread to polling read */
++static int max3110_read_thread(void *_max)
++{
++ struct uart_max3110 *max = _max;
++
++ pr_info(PR_FMT "start read thread\n");
++ do {
++ if (!max->mthread_up)
++ max3110_console_receive(max);
++
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule_timeout(HZ / 20);
++ } while (!kthread_should_stop());
++
++ return 0;
++}
++#endif
++
++static int serial_m3110_startup(struct uart_port *port)
++{
++ struct uart_max3110 *max =
++ container_of(port, struct uart_max3110, port);
++ u16 config = 0;
++ int ret = 0;
++
++ if (port->line != 0)
++ pr_err(PR_FMT "uart port startup failed\n");
++
++ /* firstly disable all IRQ and config it to 115200, 8n1 */
++ config = WC_TAG | WC_FIFO_ENABLE
++ | WC_1_STOPBITS
++ | WC_8BIT_WORD
++ | WC_BAUD_DR2;
++ ret = max3110_out(max, config);
++
++ /* as we use thread to handle tx/rx, need set low latency */
++ port->state->port.tty->low_latency = 1;
++
++#ifdef CONFIG_MRST_MAX3110_IRQ
++ ret = request_irq(max->irq, serial_m3110_irq,
++ IRQ_TYPE_EDGE_FALLING, "max3110", max);
++ if (ret)
++ return ret;
++
++ /* enable RX IRQ only */
++ config |= WC_RXA_IRQ_ENABLE;
++ max3110_out(max, config);
++#else
++ /* if IRQ is disabled, start a read thread for input data */
++ max->read_thread =
++ kthread_run(max3110_read_thread, max, "max3110_read");
++#endif
++
++ max->cur_conf = config;
++ return 0;
++}
++
++static void serial_m3110_shutdown(struct uart_port *port)
++{
++ struct uart_max3110 *max =
++ container_of(port, struct uart_max3110, port);
++ u16 config;
++
++ if (max->read_thread) {
++ kthread_stop(max->read_thread);
++ max->read_thread = NULL;
++ }
++
++#ifdef CONFIG_MRST_MAX3110_IRQ
++ free_irq(max->irq, max);
++#endif
++
++ /* Disable interrupts from this port */
++ config = WC_TAG | WC_SW_SHDI;
++ max3110_out(max, config);
++}
++
++static void serial_m3110_release_port(struct uart_port *port)
++{
++}
++
++static int serial_m3110_request_port(struct uart_port *port)
++{
++ return 0;
++}
++
++static void serial_m3110_config_port(struct uart_port *port, int flags)
++{
++ /* give it fake type */
++ port->type = PORT_PXA;
++}
++
++static int
++serial_m3110_verify_port(struct uart_port *port, struct serial_struct *ser)
++{
++ /* we don't want the core code to modify any port params */
++ return -EINVAL;
++}
++
++
++static const char *serial_m3110_type(struct uart_port *port)
++{
++ struct uart_max3110 *max =
++ container_of(port, struct uart_max3110, port);
++ return max->name;
++}
++
++static void
++serial_m3110_set_termios(struct uart_port *port, struct ktermios *termios,
++ struct ktermios *old)
++{
++ struct uart_max3110 *max =
++ container_of(port, struct uart_max3110, port);
++ unsigned char cval;
++ unsigned int baud, parity = 0;
++ int clk_div = -1;
++ u16 new_conf = max->cur_conf;
++
++ switch (termios->c_cflag & CSIZE) {
++ case CS7:
++ cval = UART_LCR_WLEN7;
++ new_conf |= WC_7BIT_WORD;
++ break;
++ default:
++ case CS8:
++ cval = UART_LCR_WLEN8;
++ new_conf |= WC_8BIT_WORD;
++ break;
++ }
++
++ baud = uart_get_baud_rate(port, termios, old, 0, 230400);
++
++ /* first calc the div for 1.8MHZ clock case */
++ switch (baud) {
++ case 300:
++ clk_div = WC_BAUD_DR384;
++ break;
++ case 600:
++ clk_div = WC_BAUD_DR192;
++ break;
++ case 1200:
++ clk_div = WC_BAUD_DR96;
++ break;
++ case 2400:
++ clk_div = WC_BAUD_DR48;
++ break;
++ case 4800:
++ clk_div = WC_BAUD_DR24;
++ break;
++ case 9600:
++ clk_div = WC_BAUD_DR12;
++ break;
++ case 19200:
++ clk_div = WC_BAUD_DR6;
++ break;
++ case 38400:
++ clk_div = WC_BAUD_DR3;
++ break;
++ case 57600:
++ clk_div = WC_BAUD_DR2;
++ break;
++ case 115200:
++ clk_div = WC_BAUD_DR1;
++ break;
++ default:
++ /* pick the previous baud rate */
++ baud = max->baud;
++ clk_div = max->cur_conf & WC_BAUD_DIV_MASK;
++ tty_termios_encode_baud_rate(termios, baud, baud);
++ }
++
++ if (max->clock & MAX3110_HIGH_CLK) {
++ clk_div += 1;
++ /* high clk version max3110 doesn't support B300 */
++ if (baud == 300)
++ baud = 600;
++ if (baud == 230400)
++ clk_div = WC_BAUD_DR1;
++ tty_termios_encode_baud_rate(termios, baud, baud);
++ }
++
++ new_conf = (new_conf & ~WC_BAUD_DIV_MASK) | clk_div;
++ if (termios->c_cflag & CSTOPB)
++ new_conf |= WC_2_STOPBITS;
++ else
++ new_conf &= ~WC_2_STOPBITS;
++
++ if (termios->c_cflag & PARENB) {
++ new_conf |= WC_PARITY_ENABLE;
++ parity |= UART_LCR_PARITY;
++ } else
++ new_conf &= ~WC_PARITY_ENABLE;
++
++ if (!(termios->c_cflag & PARODD))
++ parity |= UART_LCR_EPAR;
++ max->parity = parity;
++
++ uart_update_timeout(port, termios->c_cflag, baud);
++
++ new_conf |= WC_TAG;
++ if (new_conf != max->cur_conf) {
++ max3110_out(max, new_conf);
++ max->cur_conf = new_conf;
++ max->baud = baud;
++ }
++}
++
++/* don't handle hw handshaking */
++static unsigned int serial_m3110_get_mctrl(struct uart_port *port)
++{
++ return TIOCM_DSR | TIOCM_CAR | TIOCM_DSR;
++}
++
++static void serial_m3110_set_mctrl(struct uart_port *port, unsigned int mctrl)
++{
++}
++
++static void serial_m3110_break_ctl(struct uart_port *port, int break_state)
++{
++}
++
++static void serial_m3110_pm(struct uart_port *port, unsigned int state,
++ unsigned int oldstate)
++{
++}
++
++static void serial_m3110_enable_ms(struct uart_port *port)
++{
++}
++
++struct uart_ops serial_m3110_ops = {
++ .tx_empty = serial_m3110_tx_empty,
++ .set_mctrl = serial_m3110_set_mctrl,
++ .get_mctrl = serial_m3110_get_mctrl,
++ .stop_tx = serial_m3110_stop_tx,
++ .start_tx = serial_m3110_start_tx,
++ .stop_rx = serial_m3110_stop_rx,
++ .enable_ms = serial_m3110_enable_ms,
++ .break_ctl = serial_m3110_break_ctl,
++ .startup = serial_m3110_startup,
++ .shutdown = serial_m3110_shutdown,
++ .set_termios = serial_m3110_set_termios, /* must have */
++ .pm = serial_m3110_pm,
++ .type = serial_m3110_type,
++ .release_port = serial_m3110_release_port,
++ .request_port = serial_m3110_request_port,
++ .config_port = serial_m3110_config_port,
++ .verify_port = serial_m3110_verify_port,
++};
++
++static struct uart_driver serial_m3110_reg = {
++ .owner = THIS_MODULE,
++ .driver_name = "MRST serial",
++ .dev_name = "ttyS",
++ .major = TTY_MAJOR,
++ .minor = 64,
++ .nr = 1,
++ .cons = MRST_CONSOLE,
++};
++
++static int serial_m3110_suspend(struct spi_device *spi, pm_message_t state)
++{
++ return 0;
++}
++
++static int serial_m3110_resume(struct spi_device *spi)
++{
++ return 0;
++}
++
++#ifdef CONFIG_MRST_MAX3110
++static struct mrst_spi_chip spi0_uart = {
++ .poll_mode = 1,
++ .enable_dma = 0,
++ .type = SPI_FRF_SPI,
++};
++#endif
++
++static int serial_m3110_probe(struct spi_device *spi)
++{
++ struct uart_max3110 *max;
++ int ret;
++ unsigned char *buffer;
++
++ max = kzalloc(sizeof(*max), GFP_KERNEL);
++ if (!max)
++ return -ENOMEM;
++
++ /* set spi info */
++ spi->mode = SPI_MODE_0;
++ spi->bits_per_word = 16;
++#ifdef CONFIG_MRST_MAX3110
++ max->clock = MAX3110_HIGH_CLK;
++ spi->controller_data = &spi0_uart;
++#endif
++ spi_setup(spi);
++
++ max->port.type = PORT_PXA; /* need apply for a max3110 type */
++ max->port.fifosize = 2; /* only have 16b buffer */
++ max->port.ops = &serial_m3110_ops;
++ max->port.line = 0;
++ max->port.dev = &spi->dev;
++ max->port.uartclk = 115200;
++
++ max->spi = spi;
++ max->name = spi->modalias; /* use spi name as the name */
++ max->irq = (u16)spi->irq;
++
++ spin_lock_init(&max->lock);
++
++ max->word_7bits = 0;
++ max->parity = 0;
++ max->baud = 0;
++
++ max->cur_conf = 0;
++ atomic_set(&max->irq_pending, 0);
++
++ buffer = (unsigned char *)__get_free_page(GFP_KERNEL);
++ if (!buffer) {
++ ret = -ENOMEM;
++ goto err_get_page;
++ }
++ max->con_xmit.buf = (unsigned char *)buffer;
++ max->con_xmit.head = max->con_xmit.tail = 0;
++
++ max->main_thread = kthread_run(max3110_main_thread,
++ max, "max3110_main");
++ if (IS_ERR(max->main_thread)) {
++ ret = PTR_ERR(max->main_thread);
++ goto err_kthread;
++ }
++
++ pmax = max;
++ /* give membase a psudo value to pass serial_core's check */
++ max->port.membase = (void *)0xff110000;
++ uart_add_one_port(&serial_m3110_reg, &max->port);
++
++ return 0;
++
++err_kthread:
++ free_page((unsigned long)buffer);
++err_get_page:
++ pmax = NULL;
++ kfree(max);
++ return ret;
++}
++
++static int max3110_remove(struct spi_device *dev)
++{
++ struct uart_max3110 *max = pmax;
++
++ if (!pmax)
++ return 0;
++
++ pmax = NULL;
++ uart_remove_one_port(&serial_m3110_reg, &max->port);
++
++ free_page((unsigned long)max->con_xmit.buf);
++
++ if (max->main_thread)
++ kthread_stop(max->main_thread);
++
++ kfree(max);
++ return 0;
++}
++
++static struct spi_driver uart_max3110_driver = {
++ .driver = {
++ .name = "spi_max3111",
++ .bus = &spi_bus_type,
++ .owner = THIS_MODULE,
++ },
++ .probe = serial_m3110_probe,
++ .remove = __devexit_p(max3110_remove),
++ .suspend = serial_m3110_suspend,
++ .resume = serial_m3110_resume,
++};
++
++
++int __init serial_m3110_init(void)
++{
++ int ret = 0;
++
++ ret = uart_register_driver(&serial_m3110_reg);
++ if (ret)
++ return ret;
++
++ ret = spi_register_driver(&uart_max3110_driver);
++ if (ret)
++ uart_unregister_driver(&serial_m3110_reg);
++
++ return ret;
++}
++
++void __exit serial_m3110_exit(void)
++{
++ spi_unregister_driver(&uart_max3110_driver);
++ uart_unregister_driver(&serial_m3110_reg);
++}
++
++module_init(serial_m3110_init);
++module_exit(serial_m3110_exit);
++
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("max3110-uart");
+Index: linux-2.6.33/drivers/serial/max3110.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/serial/max3110.h
+@@ -0,0 +1,59 @@
++#ifndef _MAX3110_HEAD_FILE_
++#define _MAX3110_HEAD_FILE_
++
++#define MAX3110_HIGH_CLK 0x1 /* 3.6864 MHZ */
++#define MAX3110_LOW_CLK 0x0 /* 1.8432 MHZ */
++
++/* status bits for all 4 MAX3110 operate modes */
++#define MAX3110_READ_DATA_AVAILABLE (1 << 15)
++#define MAX3110_WRITE_BUF_EMPTY (1 << 14)
++
++#define WC_TAG (3 << 14)
++#define RC_TAG (1 << 14)
++#define WD_TAG (2 << 14)
++#define RD_TAG (0 << 14)
++
++/* bits def for write configuration */
++#define WC_FIFO_ENABLE_MASK (1 << 13)
++#define WC_FIFO_ENABLE (0 << 13)
++
++#define WC_SW_SHDI (1 << 12)
++
++#define WC_IRQ_MASK (0xF << 8)
++#define WC_TXE_IRQ_ENABLE (1 << 11) /* TX empty irq */
++#define WC_RXA_IRQ_ENABLE (1 << 10) /* RX availabe irq */
++#define WC_PAR_HIGH_IRQ_ENABLE (1 << 9)
++#define WC_REC_ACT_IRQ_ENABLE (1 << 8)
++
++#define WC_IRDA_ENABLE (1 << 7)
++
++#define WC_STOPBITS_MASK (1 << 6)
++#define WC_2_STOPBITS (1 << 6)
++#define WC_1_STOPBITS (0 << 6)
++
++#define WC_PARITY_ENABLE_MASK (1 << 5)
++#define WC_PARITY_ENABLE (1 << 5)
++
++#define WC_WORDLEN_MASK (1 << 4)
++#define WC_7BIT_WORD (1 << 4)
++#define WC_8BIT_WORD (0 << 4)
++
++#define WC_BAUD_DIV_MASK (0xF)
++#define WC_BAUD_DR1 (0x0)
++#define WC_BAUD_DR2 (0x1)
++#define WC_BAUD_DR4 (0x2)
++#define WC_BAUD_DR8 (0x3)
++#define WC_BAUD_DR16 (0x4)
++#define WC_BAUD_DR32 (0x5)
++#define WC_BAUD_DR64 (0x6)
++#define WC_BAUD_DR128 (0x7)
++#define WC_BAUD_DR3 (0x8)
++#define WC_BAUD_DR6 (0x9)
++#define WC_BAUD_DR12 (0xA)
++#define WC_BAUD_DR24 (0xB)
++#define WC_BAUD_DR48 (0xC)
++#define WC_BAUD_DR96 (0xD)
++#define WC_BAUD_DR192 (0xE)
++#define WC_BAUD_DR384 (0xF)
++
++#endif
+Index: linux-2.6.33/arch/x86/Kconfig.debug
+===================================================================
+--- linux-2.6.33.orig/arch/x86/Kconfig.debug
++++ linux-2.6.33/arch/x86/Kconfig.debug
+@@ -43,6 +43,10 @@ config EARLY_PRINTK
+ with klogd/syslogd or the X server. You should normally N here,
+ unless you want to debug such a crash.
+
++config X86_MRST_EARLY_PRINTK
++ bool "Early printk for MRST platform support"
++ depends on EARLY_PRINTK && X86_MRST
++
+ config EARLY_PRINTK_DBGP
+ bool "Early printk via EHCI debug port"
+ default n
+Index: linux-2.6.33/arch/x86/kernel/early_printk.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/early_printk.c
++++ linux-2.6.33/arch/x86/kernel/early_printk.c
+@@ -14,6 +14,7 @@
+ #include <xen/hvc-console.h>
+ #include <asm/pci-direct.h>
+ #include <asm/fixmap.h>
++#include <linux/spi/mrst_spi.h>
+ #include <asm/pgtable.h>
+ #include <linux/usb/ehci_def.h>
+
+@@ -231,6 +232,10 @@ static int __init setup_early_printk(cha
+ if (!strncmp(buf, "xen", 3))
+ early_console_register(&xenboot_console, keep);
+ #endif
++#ifdef CONFIG_X86_MRST_EARLY_PRINTK
++ if (!strncmp(buf, "mrst", 4))
++ early_console_register(&early_mrst_console, keep);
++#endif
+ buf++;
+ }
+ return 0;
+Index: linux-2.6.33/arch/x86/kernel/mrst_earlyprintk.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/kernel/mrst_earlyprintk.c
+@@ -0,0 +1,177 @@
++/*
++ * mrst_earlyprintk.c - spi-uart early printk for Intel Moorestown platform
++ *
++ * Copyright (c) 2008 Intel Corporation
++ * Author: Feng Tang(feng.tang@intel.com)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ */
++
++#include <linux/console.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/spi/mrst_spi.h>
++
++#include <asm/fixmap.h>
++#include <asm/pgtable.h>
++
++#define MRST_SPI_TIMEOUT 0x200000
++#define MRST_REGBASE_SPI0 0xff128000
++#define MRST_CLK_SPI0_REG 0xff11d86c
++
++/* use SPI0 register for MRST x86 core */
++static unsigned long mrst_spi_paddr = MRST_REGBASE_SPI0;
++
++/* always contains a accessable address, start with 0 */
++static void *pspi;
++static u32 *pclk_spi0;
++static int mrst_spi_inited;
++
++/*
++ * One trick for the early printk is that it could be called
++ * before and after the real page table is enabled for kernel,
++ * so the PHY IO registers should be mapped twice. And a flag
++ * "real_pgt_is_up" is used as an indicator
++ */
++static int real_pgt_is_up;
++
++static void early_mrst_spi_init(void)
++{
++ u32 ctrlr0 = 0;
++ u32 spi0_cdiv;
++ static u32 freq; /* freq info only need be searched once */
++
++ if (pspi && mrst_spi_inited)
++ return;
++
++ if (!freq) {
++ set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, MRST_CLK_SPI0_REG);
++ pclk_spi0 = (void *)(__fix_to_virt(FIX_EARLYCON_MEM_BASE) +
++ (MRST_CLK_SPI0_REG & (PAGE_SIZE - 1)));
++
++ spi0_cdiv = ((*pclk_spi0) & 0xe00) >> 9;
++ freq = 100000000 / (spi0_cdiv + 1);
++ }
++
++ set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, mrst_spi_paddr);
++ pspi = (void *)(__fix_to_virt(FIX_EARLYCON_MEM_BASE) +
++ (mrst_spi_paddr & (PAGE_SIZE - 1)));
++
++ /* disable SPI controller */
++ write_ssienr(0x0, pspi);
++
++ /* set control param, 8 bits, transmit only mode */
++ ctrlr0 = read_ctrl0(pspi);
++
++ ctrlr0 &= 0xfcc0;
++ ctrlr0 |= 0xf | (SPI_FRF_SPI << SPI_FRF_OFFSET)
++ | (SPI_TMOD_TO << SPI_TMOD_OFFSET);
++ write_ctrl0(ctrlr0, pspi);
++
++ /* change the spi0 clk to comply with 115200 bps */
++ write_baudr(freq/115200, pspi);
++
++ /* disable all INT for early phase */
++ write_imr(0x0, pspi);
++
++ /* set the cs to max3110 */
++ write_ser(0x2, pspi);
++
++ /* enable the HW, the last step for HW init */
++ write_ssienr(0x1, pspi);
++
++ mrst_spi_inited = 1;
++}
++
++/* set the ratio rate, INT */
++static void max3110_write_config(void)
++{
++ u16 config;
++
++ /* 115200, TM not set, no parity, 8bit word */
++ config = 0xc001;
++ write_dr(config, pspi);
++}
++
++/* transfer char to a eligibal word and send to max3110 */
++static void max3110_write_data(char c)
++{
++ u16 data;
++
++ data = 0x8000 | c;
++ write_dr(data, pspi);
++}
++
++/* slave select should be called in the read/write function */
++static int early_mrst_spi_putc(char c)
++{
++ unsigned int timeout;
++ u32 sr;
++
++ timeout = MRST_SPI_TIMEOUT;
++ /* early putc need make sure the TX FIFO is not full*/
++ while (timeout--) {
++ sr = read_sr(pspi);
++ if (!(sr & SR_TF_NOT_FULL))
++ cpu_relax();
++ else
++ break;
++ }
++
++ if (timeout == 0xffffffff) {
++ printk(KERN_INFO "SPI: waiting timeout \n");
++ return -1;
++ }
++
++ max3110_write_data(c);
++ return 0;
++}
++
++/* early SPI only use polling mode */
++static void early_mrst_spi_write(struct console *con,
++ const char *str, unsigned n)
++{
++ int i;
++
++ if ((read_cr3() == __pa(swapper_pg_dir)) && !real_pgt_is_up) {
++ mrst_spi_inited = 0;
++ real_pgt_is_up = 1;
++ }
++
++ if (!mrst_spi_inited) {
++ early_mrst_spi_init();
++ max3110_write_config();
++ }
++
++ for (i = 0; i < n && *str; i++) {
++ if (*str == '\n')
++ early_mrst_spi_putc('\r');
++ early_mrst_spi_putc(*str);
++
++ str++;
++ }
++}
++
++struct console early_mrst_console = {
++ .name = "earlymrst",
++ .write = early_mrst_spi_write,
++ .flags = CON_PRINTBUFFER,
++ .index = -1,
++};
++
++/* a debug function */
++void mrst_early_printk(const char *fmt, ...)
++{
++ char buf[512];
++ int n;
++ va_list ap;
++
++ va_start(ap, fmt);
++ n = vscnprintf(buf, 512, fmt, ap);
++ va_end(ap);
++
++ early_mrst_console.write(&early_mrst_console, buf, n);
++}
+Index: linux-2.6.33/arch/x86/include/asm/ipc_defs.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/include/asm/ipc_defs.h
+@@ -0,0 +1,217 @@
++/*
++*ipc_defs.h - Header file defining data types and functions for ipc driver.
++*
++*Copyright (C) 2008 Intel Corp
++*Copyright (C) 2008 Sreenidhi Gurudatt <sreenidhi.b.gurudatt@intel.com>
++*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++*
++*This program is free software; you can redistribute it and/or modify
++*it under the terms of the GNU General Public License as published by
++*the Free Software Foundation; version 2 of the License.
++*
++*This program is distributed in the hope that it will be useful, but
++*WITHOUT ANY WARRANTY; without even the implied warranty of
++*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++*General Public License for more details.
++ *
++*You should have received a copy of the GNU General Public License along
++*with this program; if not, write to the Free Software Foundation, Inc.,
++*59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++*
++*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++*
++*This driver implements core IPC kernel functions to read/write and execute
++*various commands supported by System controller firmware for Moorestown
++*platform.
++*/
++
++#ifndef __IPC_DEFS_H__
++#define __IPC_DEFS_H__
++
++#include <linux/init.h>
++#include <linux/module.h>
++
++#define E_INVALID_CMD -249
++#define E_READ_USER_CMD -250
++#define E_READ_USER_DATA -251
++#define E_WRITE_USER_DATA -252
++#define E_PMIC_MALLOC -253
++
++#define MAX_PMICREGS 5
++#define MAX_PMIC_MOD_REGS 4
++
++#ifndef FALSE
++#define FALSE 0
++#define TRUE 1
++#endif
++#define SUCCESS 0
++
++/*
++ * List of commands sent by calling host
++ * drivers to IPC_Driver
++*/
++
++/* CCA battery driver specific commands.
++ * Thise commands are shared across IPC driver
++ * and calling host driver
++ */
++
++#define IPC_WATCHDOG 0xA0
++#define IPC_PROGRAM_BUS_MASTER 0xA1
++#define DEVICE_FW_UPGRADE 0xA2
++#define GET_FW_VERSION 0xA3
++
++#define IPC_BATT_CCA_READ 0xB0
++#define IPC_BATT_CCA_WRITE 0xB1
++#define IPC_BATT_GET_PROP 0xB2
++
++#define IPC_PMIC_REGISTER_READ_NON_BLOCKING 0xEB
++#define IPC_READ32 0xEC
++#define IPC_WRITE32 0xED
++#define IPC_LPE_READ 0xEE
++#define IPC_LPE_WRITE 0xEF
++#define IPC_SEND_COMMAND 0xFA
++#define IPC_PMIC_REGISTER_READ 0xFB
++#define IPC_PMIC_REGISTER_READ_MODIFY 0xFC
++#define IPC_PMIC_REGISTER_WRITE 0xFD
++#define IPC_CHECK_STATUS 0xFE
++#define GET_SCU_FIRMWARE_VERSION 0xFF
++
++#define MAX_PMICREGS 5
++#define MAX_PMIC_MOD_REGS 4
++
++/* Adding the error code*/
++#define E_INVALID_PARAM -0xA0
++#define E_NUM_ENTRIES_OUT_OF_RANGE -0xA1
++#define E_CMD_FAILED -0xA2
++#define E_NO_INTERRUPT_ON_IOC -0xA3
++#define E_QUEUE_IS_FULL -0xA4
++
++/* VRTC IPC CMD ID and sub id */
++#define IPC_VRTC_CMD 0xFA
++#define IPC_VRTC_SET_TIME 0x01
++#define IPC_VRTC_SET_ALARM 0x02
++
++struct ipc_cmd_val {
++ /*
++ *More fields to be added for
++ *future enhancements
++ */
++ u32 ipc_cmd_data;
++};
++
++struct ipc_cmd_type {
++ u8 cmd;
++ u32 data;
++ u8 value;
++ u8 ioc;
++};
++
++/*
++ * Structures defined for battery PMIC driver
++ * This structure is used by the following commands
++ * IPC_BATT_CCA_READ and IPC_BATT_CCA_WRITE
++ */
++struct ipc_batt_cca_data {
++ int cca_val;
++};
++
++/*
++ * Structures defined for battery PMIC driver
++ * This structure is used by IPC_BATT_GET_PROP
++ */
++struct ipc_batt_prop_data {
++ u32 batt_value1;
++ u8 batt_value2[5];
++};
++
++struct ipc_reg_data {
++ u8 ioc;
++ u32 address;
++ u32 data;
++};
++
++struct ipc_cmd {
++ u8 cmd;
++ u32 data;
++};
++
++struct pmicmodreg {
++ u16 register_address;
++ u8 value;
++ u8 bit_map;
++};
++
++struct pmicreg {
++ u16 register_address;
++ u8 value;
++};
++
++struct ipc_pmic_reg_data {
++ bool ioc;
++ struct pmicreg pmic_reg_data[MAX_PMICREGS];
++ u8 num_entries;
++};
++
++struct ipc_pmic_mod_reg_data {
++ bool ioc;
++ struct pmicmodreg pmic_mod_reg_data[MAX_PMIC_MOD_REGS];
++ u8 num_entries;
++};
++
++/* Firmware ingredient version information.
++ * fw_data[0] = scu_rt_minor;
++ * fw_data[1] = scu_rt_major;
++ * fw_data[2] = scu_bs_minor;
++ * fw_data[3] = scu_bs_major;
++ * fw_data[4] = punit_minor;
++ * fw_data[5] = punit_major;
++ * fw_data[6] = x86_minor;
++ * fw_data[7] = x86_major;
++ * fw_data[8] = spectra_minor;
++ * fw_data[9] = spectra_major;
++ * fw_data[10] = val_hook_minor;
++ * fw_data[11] = val_hook_major;
++ * fw_data[12] = ifw_minor;
++ * fw_data[13] = ifw_major;
++ * fw_data[14] = rfu1;
++ * fw_data[15] = rfu2;
++*/
++struct watchdog_reg_data {
++ int payload1;
++ int payload2;
++ bool ioc;
++};
++
++struct ipc_io_bus_master_regs {
++ u32 ctrl_reg_addr;
++ u32 ctrl_reg_data;
++};
++
++struct ipc_non_blocking_pmic_read{
++ struct ipc_pmic_reg_data pmic_nb_read;
++ void *context;
++ int (*callback_host)(struct ipc_pmic_reg_data pmic_read_data,
++ void *context);
++};
++
++int ipc_check_status(void);
++int mrst_get_firmware_version(unsigned char *mrst_fw_ver_info);
++int ipc_config_cmd(struct ipc_cmd_type ipc_cmd,
++ u32 ipc_cmd_len, void *cmd_data);
++int ipc_pmic_register_write(struct ipc_pmic_reg_data *p_write_reg_data,
++ u8 ipc_blocking_flag);
++int ipc_pmic_register_read(struct ipc_pmic_reg_data *p_read_reg_data);
++int ipc_pmic_register_read_modify(struct ipc_pmic_mod_reg_data
++ *p_read_mod_reg_data);
++int mrst_ipc_read32(struct ipc_reg_data *p_reg_data);
++int mrst_ipc_write32(struct ipc_reg_data *p_reg_data);
++int ipc_set_watchdog(struct watchdog_reg_data *p_watchdog_data);
++int ipc_program_io_bus_master(struct ipc_io_bus_master_regs
++ *p_reg_data);
++int ipc_pmic_register_read_non_blocking(struct ipc_non_blocking_pmic_read
++ *p_nb_read);
++int ipc_device_fw_upgrade(u8 *cmd_data, u32 ipc_cmd_len);
++int lnw_ipc_single_cmd(u8 cmd_id, u8 sub_id, int size, int msi);
++
++#endif
+Index: linux-2.6.33/arch/x86/kernel/ipc_mrst.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/kernel/ipc_mrst.c
+@@ -0,0 +1,1612 @@
++/*
++ * ipc_mrst.c: Driver for Langwell IPC1
++ *
++ * (C) Copyright 2008 Intel Corporation
++ * Author: Sreenidhi Gurudatt (sreenidhi.b.gurudatt@intel.com)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ *
++ * Note:
++ * Langwell provides two IPC units to communicate with IA host. IPC1 is
++ * dedicated for IA. IPC commands results in LNW SCU interrupt. The
++ * initial implementation of this driver is platform specific. It will be
++ * converted to a PCI driver once SCU FW is in place.
++ * Log: Tested after submitting bugzilla patch - 24th December 08
++ * Log: Implemented Error Handling features and resolved IPC driver sighting
++ * PMIC Read/Write calls now take 80 to 200usecs - March 09 09.
++ * Log: Adding the IO BUS Master programming support - March 09 09.
++ */
++#include <linux/delay.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/sysdev.h>
++#include <linux/pm.h>
++#include <linux/pci.h>
++#include <asm/ipc_defs.h>
++#include <linux/workqueue.h>
++#include <linux/sched.h>
++
++#include "ipc_mrst.h"
++
++#ifndef CONFIG_PCI
++#error "This file is PCI bus glue.CONFIG_PCI must be defined."
++#endif
++
++/*virtual memory address for IPC base returned by IOREMAP().*/
++void __iomem *p_ipc_base;
++void __iomem *p_i2c_ser_bus;
++void __iomem *p_dfu_fw_base;
++void __iomem *p_dfu_mailbox_base;
++static unsigned char fw_ver_data[16];
++
++static wait_queue_head_t wait;
++static struct semaphore sema_ipc;
++static int scu_cmd_completed = FALSE;
++static bool non_blocking_read_flag = FALSE;
++static struct ipc_work_struct ipc_wq;
++static struct ipc_non_blocking_pmic_read pmic_read_que[MAX_NB_BUF_SIZE];
++static unsigned int cmd_id;
++static int (*callback)(struct ipc_pmic_reg_data pmic_read_data, void *context);
++static DEFINE_MUTEX(mrst_ipc_mutex);
++
++#ifdef LNW_IPC_DEBUG
++
++#define lnw_ipc_dbg(fmt, args...) \
++ do { printk(fmt, ## args); } while (0)
++#else
++#define lnw_ipc_dbg(fmt, args...) do { } while (0)
++#endif
++static const char ipc_name[] = "ipc_mrst";
++
++unsigned long lnw_ipc_address;
++static void __iomem *lnw_ipc_virt_address;
++static unsigned short cmdid_pool = 0xffff;
++static inline int lnw_ipc_set_mapping(struct pci_dev *dev)
++{
++ unsigned long cadr;
++ cadr = dev->resource[0].start;
++ cadr &= PCI_BASE_ADDRESS_MEM_MASK;
++ if (!cadr) {
++ printk(KERN_INFO "No PCI resource for IPC\n");
++ return -ENODEV;
++ }
++ lnw_ipc_virt_address = ioremap_nocache(cadr, 0x1000);
++ if (lnw_ipc_virt_address != NULL) {
++ dev_info(&dev->dev, "lnw ipc base found 0x%lup: 0x%p\n",
++ cadr, lnw_ipc_virt_address);
++ return 0;
++ }
++ printk(KERN_INFO "Failed map LNW IPC1 phy address at %lu\n", cadr);
++ return -ENODEV;
++}
++
++static inline void lnw_ipc_clear_mapping(void)
++{
++ iounmap(lnw_ipc_virt_address);
++ lnw_ipc_virt_address = NULL;
++}
++
++unsigned long lnw_ipc_readl(unsigned long a)
++{
++ return readl(lnw_ipc_virt_address + a);
++}
++
++static inline void lnw_ipc_writel(unsigned long d, unsigned long a)
++{
++ writel(d, lnw_ipc_virt_address + a);
++}
++
++static unsigned char lnw_ipc_assign_cmdid(void)
++{
++ unsigned char cmdid = 0;
++ unsigned short thebit;
++ thebit = cmdid_pool&(~cmdid_pool + 1);
++ printk(KERN_INFO "pool=0x%04x thebit=0x%04x\n",
++ cmdid_pool, thebit);
++ while (thebit >> cmdid)
++ cmdid++;
++ printk(KERN_INFO "Allocate IPC cmd ID %d\n", cmdid);
++ cmdid_pool &= ~thebit;
++ return cmdid;
++}
++
++int lnw_ipc_single_cmd(u8 cmd_id, u8 sub_id, int size, int msi)
++{
++ unsigned long cmdreg, stsreg, retry;
++
++ if (!lnw_ipc_virt_address) {
++ printk(KERN_ERR "No IPC mapping\n");
++ goto err_ipccmd;
++ }
++ if (size >= 16) {
++ printk(KERN_ERR "IPC message size too big %d\n", size);
++ goto err_ipccmd;
++ }
++
++ WARN_ON((msi != 0) && (msi != 1));
++
++ cmdreg = cmd_id
++ | (sub_id << 12)
++ | (size << 16)
++ | (msi << 8);
++
++ lnw_ipc_writel(cmdreg, LNW_IPC_CMD);
++
++ /* check status make sure the command is received by SCU */
++ retry = 1000;
++ stsreg = lnw_ipc_readl(LNW_IPC_STS);
++ if (stsreg & LNW_IPC_STS_ERR) {
++ lnw_ipc_dbg("IPC command ID %d error\n", cmd_id);
++ goto err_ipccmd;
++ }
++ while ((stsreg & LNW_IPC_STS_BUSY) && retry) {
++ lnw_ipc_dbg("IPC command ID %d busy\n", cmd_id);
++ stsreg = lnw_ipc_readl(LNW_IPC_STS);
++ udelay(10);
++ retry--;
++ }
++
++ if (!retry)
++ printk(KERN_ERR "IPC command ID %d failed/timeout", cmd_id);
++ else
++ lnw_ipc_dbg("IPC command ID %d completed\n", cmd_id);
++
++ return 0;
++
++err_ipccmd:
++ return -1;
++}
++EXPORT_SYMBOL(lnw_ipc_single_cmd);
++
++int lnw_ipc_send_cmd(unsigned char cmd, int size, int msi)
++{
++ unsigned long cmdreg, stsreg;
++ unsigned char cmdid, retry;
++
++ if (!lnw_ipc_virt_address) {
++ printk(KERN_ERR "No IPC mapping\n");
++ goto err_ipccmd;
++ }
++ if (size >= 16) {
++ printk(KERN_ERR "IPC message size too big %d\n", size);
++ goto err_ipccmd;
++ }
++
++ cmdid = lnw_ipc_assign_cmdid();
++ cmdreg = lnw_ipc_readl(LNW_IPC_CMD);
++ cmdreg |= cmdid << 12;
++ cmdreg |= size << 16;
++ if (msi)
++ cmdreg |= 1 << 8;
++ lnw_ipc_writel(cmdreg, LNW_IPC_CMD);
++ /* check status make sure the command is received by SCU */
++ retry = 10;
++ stsreg = lnw_ipc_readl(LNW_IPC_STS);
++ if (stsreg&LNW_IPC_STS_ERR) {
++ lnw_ipc_dbg("IPC command ID %d error\n", cmdid);
++ goto err_ipccmd;
++ }
++ while ((stsreg&LNW_IPC_STS_BUSY) || retry) {
++ lnw_ipc_dbg("IPC command ID %d busy\n", cmdid);
++ stsreg = lnw_ipc_readl(LNW_IPC_STS);
++ udelay(10);
++ retry--;
++ }
++ if (!retry)
++ lnw_ipc_dbg("IPC command ID %d failed/timeout\n", cmdid);
++ else
++ lnw_ipc_dbg("IPC command ID %d completed\n", cmdid);
++
++err_ipccmd:
++ return -1;
++}
++/*
++ * For IPC transfer modes except read DMA, there is no need for MSI,
++ * so the driver polls status after each IPC command is issued.
++ */
++static irqreturn_t ipc_irq(int irq, void *dev_id)
++{
++ union ipc_sts ipc_sts_reg;
++
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++
++ if (!ipc_sts_reg.ipc_sts_parts.busy) {
++ /*Call on NON Blocking flag being set.*/
++ if (non_blocking_read_flag == TRUE) {
++ schedule_work(&ipc_wq.ipc_work);
++ } else {
++ scu_cmd_completed = TRUE;
++ wake_up_interruptible(&wait);
++ }
++ }
++ return IRQ_HANDLED;
++}
++
++static const struct ipc_driver ipc_mrst_driver = {
++ .name = "MRST IPC Controller",
++ /*
++ * generic hardware linkage
++ */
++ .irq = ipc_irq,
++ .flags = 0,
++};
++
++static int ipc_mrst_pci_probe(struct pci_dev *dev,
++ const struct pci_device_id *id)
++{
++ int err, retval, i;
++ lnw_ipc_dbg("Attempt to enable IPC irq 0x%x, pin %d\n",
++ dev->irq, dev->pin);
++ err = pci_enable_device(dev);
++ if (err) {
++ dev_err(&dev->dev, "Failed to enable MSRT IPC(%d)\n",
++ err);
++ goto exit;
++ }
++ retval = pci_request_regions(dev, "ipc_mrst");
++ if (retval)
++ dev_err(&dev->dev, "Failed to allocate resource\
++ for MRST IPC(%d)\n", retval);
++
++ init_ipc_driver();
++
++ /* 0 means cmd ID is in use */
++ cmdid_pool = 0xffff;
++ /* initialize mapping */
++ retval = lnw_ipc_set_mapping(dev);
++ if (retval)
++ goto exit;
++ /* clear buffer */
++ for (i = 0; i < LNW_IPC_RWBUF_SIZE; i = i + 4) {
++ lnw_ipc_writel(0, LNW_IPC_WBUF + i);
++ lnw_ipc_writel(0, LNW_IPC_RBUF + i);
++ }
++ retval = request_irq(dev->irq, ipc_irq, IRQF_SHARED,
++ "ipc_mrst", (void *)&ipc_mrst_driver);
++ if (retval) {
++ printk(KERN_ERR "ipc: cannot register ISR %p irq %d ret %d\n",
++ ipc_irq, dev->irq, retval);
++ return -EIO;
++ }
++exit:
++ return 0;
++}
++
++void ipc_mrst_pci_remove(struct pci_dev *pdev)
++{
++ pci_release_regions(pdev);
++}
++
++/* PCI driver selection metadata; PCI hotplugging uses this */
++static const struct pci_device_id pci_ids[] = {
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080e)}
++};
++
++MODULE_DEVICE_TABLE(pci, pci_ids);
++
++/* pci driver glue; this is a "new style" PCI driver module */
++static struct pci_driver ipc_mrst_pci_driver = {
++ .name = (char *)ipc_name,
++ .id_table = pci_ids,
++ .probe = ipc_mrst_pci_probe,
++ .remove = ipc_mrst_pci_remove,
++};
++
++static int __init ipc_mrst_init(void)
++{
++ int retval = 0;
++ lnw_ipc_dbg("%s\n", __func__);
++ retval = pci_register_driver(&ipc_mrst_pci_driver);
++ if (retval < 0) {
++ printk(KERN_CRIT "Failed to register %s\n",
++ ipc_mrst_pci_driver.name);
++ pci_unregister_driver(&ipc_mrst_pci_driver);
++ } else {
++ printk(KERN_CRIT "****Loaded %s driver version %s****\n",
++ ipc_mrst_pci_driver.name, MRST_IPC_DRIVER_VERSION);
++ cache_mrst_firmware_version();
++ }
++ return retval;
++}
++
++static void __exit ipc_mrst_exit(void)
++{
++ iounmap(p_ipc_base);
++ iounmap(p_i2c_ser_bus);
++ pci_unregister_driver(&ipc_mrst_pci_driver);
++ de_init_ipc_driver();
++}
++
++/*
++ * Steps to read PMIC Register(Psuedocode)
++ * 1) Construct the SCU FW command structure with normal read
++ * 2) Fill the IPC_WBUF with the p_reg_data
++ * 3) write the command to(Memory Mapped address) IPC_CMD register
++ * 4) Wait for an interrupt from SCUFirmware or do a timeout.
++*/
++int ipc_check_status(void)
++{
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_INFO "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++
++ lnw_ipc_dbg(KERN_INFO
++ "ipc_driver: in <%s> -><%s> file line = <%d>\n",
++ __func__, __FILE__, __LINE__);
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++EXPORT_SYMBOL(ipc_check_status);
++
++int ipc_config_cmd(struct ipc_cmd_type cca_cmd, u32 ipc_cmd_len, void *cmd_data)
++{
++
++ union ipc_fw_cmd ipc_cca_cmd;
++ union ipc_sts ipc_sts_reg;
++ u32 retry = MAX_RETRY_CNT;
++ u32 ipc_wbuf;
++ u8 cbuf[MAX_NUM_ENTRIES] = { '\0' };
++ u32 rbuf_offset = 2;
++ u32 i = 0;
++
++ if ((&cca_cmd == NULL) || (cmd_data == NULL)) {
++ printk(KERN_INFO "Invalid arguments recieved:\
++ <%s> -> <%s> file line = <%d>\n", __func__, __FILE__, __LINE__);
++ return -EBUSY;
++ }
++
++ if (ipc_cmd_len < 4) {
++ printk(KERN_INFO
++ "ipc_send_config: Invalid input param (size) recieved \n");
++ return -EBUSY;
++ }
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_INFO "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++ lnw_ipc_dbg(KERN_INFO
++ "ipc_driver: in <%s> -> <%s> file at line no = <%d>\n",
++ __func__, __FILE__, __LINE__);
++
++ switch (cca_cmd.cmd) {
++ case IPC_BATT_CCA_READ:
++ {
++ struct ipc_batt_cca_data *cca_data =
++ (struct ipc_batt_cca_data *)cmd_data;
++
++ lnw_ipc_dbg(KERN_INFO "Recieved IPC_BATT_CCA_READ\n");
++ ipc_cca_cmd.cmd_parts.cmd = IPC_CCA_CMD_READ_WRITE;
++ ipc_cca_cmd.cmd_parts.ioc = cca_cmd.ioc;
++ ipc_cca_cmd.cmd_parts.rfu1 = 0x0;
++ ipc_cca_cmd.cmd_parts.cmd_ID = CCA_REG_READ;
++ ipc_cca_cmd.cmd_parts.size = 0;
++ ipc_cca_cmd.cmd_parts.rfu2 = 0x0;
++
++ lnw_ipc_dbg(KERN_INFO "ipc_cca_cmd.cmd_data = 0x%x\n",
++ ipc_cca_cmd.cmd_data);
++ /* Check for Status bit = 0 before sending an IPC command */
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "SCU is busy %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++ __raw_writel(ipc_cca_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ /* Wait for command completion from SCU firmware */
++ scu_cmd_completed = FALSE;
++ wait_event_interruptible_timeout(wait,
++ scu_cmd_completed, IPC_TIMEOUT);
++
++ /*Check for error in command processing*/
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (ipc_sts_reg.ipc_sts_parts.error) {
++ printk(KERN_CRIT "IPC Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.error);
++ up(&sema_ipc);
++ return E_CMD_FAILED;
++ }
++
++ ipc_wbuf =
++ __raw_readl(p_ipc_base + IPC_RBUF);
++ cca_data->cca_val = ipc_wbuf;
++ lnw_ipc_dbg(KERN_INFO
++ "CCA Read at (0x%.8x) = 0x%.8x\n",
++ (u32) (p_ipc_base + IPC_RBUF), ipc_wbuf);
++ break;
++ }
++ case IPC_BATT_CCA_WRITE:
++
++ ipc_cca_cmd.cmd_parts.cmd = IPC_CCA_CMD_READ_WRITE;
++ ipc_cca_cmd.cmd_parts.ioc = cca_cmd.ioc;
++ ipc_cca_cmd.cmd_parts.rfu1 = 0x0;
++ ipc_cca_cmd.cmd_parts.cmd_ID = CCA_REG_WRITE;
++ ipc_cca_cmd.cmd_parts.size = 0;
++ ipc_cca_cmd.cmd_parts.rfu2 = 0x0;
++
++ lnw_ipc_dbg(KERN_INFO "ipc_cca_cmd.cmd_data = 0x%x\n",
++ ipc_cca_cmd.cmd_data);
++
++ /* Check for Status bit = 0 before sending an IPC command */
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "SCU is busy %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++ __raw_writel(cca_cmd.data, ((p_ipc_base + IPC_WBUF) + 4));
++ __raw_writel(ipc_cca_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ /* Wait for command completion from SCU firmware */
++ scu_cmd_completed = FALSE;
++ wait_event_interruptible_timeout(wait,
++ scu_cmd_completed, IPC_TIMEOUT);
++
++ /*Check for error in command processing*/
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (ipc_sts_reg.ipc_sts_parts.error) {
++ printk(KERN_CRIT "IPC Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.error);
++ up(&sema_ipc);
++ return E_CMD_FAILED;
++ }
++
++ break;
++ case IPC_BATT_GET_PROP:
++ {
++ struct ipc_batt_prop_data *prop_data =
++ (struct ipc_batt_prop_data *)cmd_data;
++
++ lnw_ipc_dbg(KERN_CRIT "Recieved IPC_BATT_GET_PROP\n");
++
++ /* CCA Read Implementation here.*/
++ ipc_cca_cmd.cmd_parts.cmd = IPC_CCA_CMD_READ_WRITE;
++ ipc_cca_cmd.cmd_parts.ioc = cca_cmd.ioc;
++ ipc_cca_cmd.cmd_parts.rfu1 = 0x0;
++ ipc_cca_cmd.cmd_parts.cmd_ID = CCA_REG_GET_PROP;
++ ipc_cca_cmd.cmd_parts.size = 0;
++ ipc_cca_cmd.cmd_parts.rfu2 = 0x0;
++
++ lnw_ipc_dbg(KERN_CRIT "ipc_cca_cmd.cmd_data = 0x%x\n",
++ ipc_cca_cmd.cmd_data);
++ /* Check for Status bit = 0 before sending an IPC command */
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "SCU is busy %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++ __raw_writel(ipc_cca_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ scu_cmd_completed = FALSE;
++ wait_event_interruptible_timeout(wait,
++ scu_cmd_completed, IPC_TIMEOUT);
++
++ if (ipc_cca_cmd.cmd_parts.ioc == 0) {
++ /*Check for error in command processing*/
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (ipc_sts_reg.ipc_sts_parts.error) {
++ printk(KERN_CRIT "IPC Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.error);
++ up(&sema_ipc);
++ return E_CMD_FAILED;
++ }
++ }
++
++ /* On wake-up fill the user buffer with IPC_RBUF data.*/
++ rbuf_offset = 0;
++ if ((ipc_cmd_len < 4) || (ipc_cmd_len > 9)) {
++ lnw_ipc_dbg(KERN_CRIT
++ "ipc_send_config: Invalid input param\
++ (size) recieved \n");
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++
++ if (ipc_cmd_len >= 4) {
++ ipc_wbuf = __raw_readl(p_ipc_base + IPC_RBUF);
++ lnw_ipc_dbg(KERN_CRIT
++ "Read ipc_wbuf at (0x%.8x) = 0x%.8x\n",
++ (u32) (p_ipc_base + IPC_RBUF + rbuf_offset),
++ ipc_wbuf);
++ rbuf_offset += 4;
++ for (i = 0; i < (ipc_cmd_len - 4); i++) {
++ cbuf[i] =
++ __raw_readb((p_ipc_base + IPC_RBUF +
++ rbuf_offset));
++ prop_data->batt_value2[i] = cbuf[i];
++ lnw_ipc_dbg(KERN_CRIT
++ "Read cbuf[%d] at (0x%.8x) = 0x%.8x\n",
++ i,
++ (u32) (p_ipc_base + IPC_RBUF +
++ rbuf_offset), cbuf[i]);
++ rbuf_offset++;
++ }
++
++ }
++
++ break;
++ }
++ default:
++ printk(KERN_CRIT "Recieved unknown option\n");
++ up(&sema_ipc);
++ return -ENODEV;
++ }
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++EXPORT_SYMBOL(ipc_config_cmd);
++
++int mrst_get_firmware_version(unsigned char *mrst_fw_ver_info)
++{
++ int i = 0;
++ mutex_lock(&mrst_ipc_mutex);
++
++ if (mrst_fw_ver_info == NULL) {
++ WARN_ON(1);
++ return -EINVAL;
++ }
++ for (i = 0; i < 16; i++)
++ mrst_fw_ver_info[i] = fw_ver_data[i];
++
++ mutex_unlock(&mrst_ipc_mutex);
++ return 0;
++}
++EXPORT_SYMBOL(mrst_get_firmware_version);
++
++int init_ipc_driver(void)
++{
++ init_waitqueue_head(&wait);
++
++ sema_init(&sema_ipc, MAX_INSTANCES_ALLOWED);
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_CRIT "IPC_Driver module busy\n");
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++
++ INIT_WORK(&ipc_wq.ipc_work, mrst_pmic_read_handler);
++
++ /* Map the memory of ipc1 PMIC reg base */
++ p_ipc_base = ioremap_nocache(IPC_BASE_ADDRESS, IPC_MAX_ADDRESS);
++ if (p_ipc_base == NULL) {
++ printk(KERN_CRIT
++ "IPC Driver: unable to map the address of IPC 1 \n");
++ up(&sema_ipc);
++ return E_PMIC_MALLOC;
++ }
++
++ printk(KERN_CRIT "p_ipc_base = <0x%.8X>\
++ IPC_BASE_ADDRESS = <0x%.8X>\n", (u32) p_ipc_base, IPC_BASE_ADDRESS);
++ p_i2c_ser_bus = ioremap_nocache(I2C_SER_BUS, I2C_MAX_ADDRESS);
++ if (p_i2c_ser_bus == NULL) {
++ printk(KERN_CRIT
++ "IPC Driver: unable to map the address of IPC 1 \n");
++ up(&sema_ipc);
++ return E_PMIC_MALLOC;
++ }
++
++ printk(KERN_CRIT "p_i2c_ser_bus = <0x%.8X>\
++ I2C_SER_BUS = <0x%.8X>\n", (u32) p_i2c_ser_bus, I2C_SER_BUS);
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++
++int de_init_ipc_driver(void)
++{
++ if (down_interruptible(&sema_ipc)) {
++ lnw_ipc_dbg(KERN_CRIT "IPC_Driver module busy\n");
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++
++ lnw_ipc_dbg(KERN_CRIT
++ "ipc_driver: in <%s> -> <%s> file at line no = <%d>\n",
++ __func__, __FILE__, __LINE__);
++ IOUNMAP(p_ipc_base);
++ IOUNMAP(p_i2c_ser_bus);
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++
++int ipc_pmic_register_read(struct ipc_pmic_reg_data *p_read_reg_data)
++{
++ union ipc_fw_cmd ipc_cmd;
++ union ipc_sts ipc_sts_reg;
++ u32 retry = MAX_RETRY_CNT;
++ u32 *ipc_wbuf;
++ u8 cbuf[IPC_BUF_LEN] = { '\0' };
++ u32 cnt = 0;
++ u32 i = 0;
++ u32 rbuf_offset = 2;
++ u8 temp_value = 0;
++ u64 time_to_wait = 0;
++
++ ipc_wbuf = (u32 *)&cbuf;
++
++ if (p_read_reg_data == NULL) {
++ printk(KERN_CRIT "Invalid Input Param recieved in pmic read\n");
++ return -E_INVALID_PARAM;
++ }
++ if (p_read_reg_data->num_entries > MAX_NUM_ENTRIES) {
++ printk(KERN_CRIT "Invalid Input Param recieved in pmic read\n");
++ return -E_NUM_ENTRIES_OUT_OF_RANGE;
++ }
++
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_CRIT "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++
++ ipc_cmd.cmd_parts.cmd = IPC_PMIC_CMD_READ_WRITE;
++ ipc_cmd.cmd_parts.ioc = p_read_reg_data->ioc;
++ ipc_cmd.cmd_parts.rfu1 = 0x0;
++ ipc_cmd.cmd_parts.cmd_ID = PMIC_REG_READ;
++ ipc_cmd.cmd_parts.size = 3 * (p_read_reg_data->num_entries);
++ ipc_cmd.cmd_parts.rfu2 = 0x0;
++
++ /* command is set. Fill the IPC_BUF */
++ lnw_ipc_dbg(KERN_INFO "p_read_reg_data->num_entries <0x%X>\n",
++ p_read_reg_data->num_entries);
++
++ lnw_ipc_dbg(KERN_INFO "p_read_reg_data->register_address <0x%X>\n",
++ p_read_reg_data->pmic_reg_data[0].register_address);
++
++ for (i = 0; i < p_read_reg_data->num_entries; i++) {
++ cbuf[cnt] = p_read_reg_data->pmic_reg_data[i].register_address;
++ cbuf[(cnt) + 1] =
++ (p_read_reg_data->pmic_reg_data[i].register_address >> 8);
++ cbuf[(cnt) + 2] = p_read_reg_data->pmic_reg_data[i].value;
++ cnt = cnt + 3;
++ }
++
++ rbuf_offset = 0;
++ for (i = 0; i < p_read_reg_data->num_entries; i++) {
++ __raw_writel(ipc_wbuf[i], ((p_ipc_base + IPC_WBUF)
++ + rbuf_offset));
++ rbuf_offset += 4;
++ if (i >= 3)
++ break;
++ }
++
++ /* Check for Status bit = 0 before sending an IPC command */
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "SCU is busy %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++
++ scu_cmd_completed = FALSE;
++ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ /*wait for 10ms do not tie to kernel timer_ticks*/
++ time_to_wait = msecs_to_jiffies(IPC_TIMEOUT);
++
++ /* Wait for command completion from SCU firmware */
++ wait_event_interruptible_timeout(wait,
++ scu_cmd_completed, time_to_wait);
++
++ if (ipc_cmd.cmd_parts.ioc == 0) {
++ /*Check for error in command processing*/
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "Timeout occured for ioc=0 and SCU is busy%d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++ if (ipc_sts_reg.ipc_sts_parts.error) {
++ printk(KERN_CRIT "IPC Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.error);
++ up(&sema_ipc);
++ return E_CMD_FAILED;
++ }
++ }
++ /* IPC driver expects interrupt when IOC is set to 1.*/
++ if ((ipc_cmd.cmd_parts.ioc == 1) && (scu_cmd_completed == FALSE)) {
++ up(&sema_ipc);
++ return E_NO_INTERRUPT_ON_IOC;
++ }
++ rbuf_offset = 2;
++ for (i = 0; i < p_read_reg_data->num_entries; i++) {
++ temp_value = readb((p_ipc_base + IPC_RBUF + rbuf_offset));
++ p_read_reg_data->pmic_reg_data[i].value = temp_value;
++ rbuf_offset += 3;
++ }
++
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++EXPORT_SYMBOL(ipc_pmic_register_read);
++
++int ipc_pmic_register_write(struct ipc_pmic_reg_data *p_write_reg_data,
++ u8 ipc_blocking_flag)
++{
++ union ipc_fw_cmd ipc_cmd;
++ union ipc_sts ipc_sts_reg;
++ u32 retry = MAX_RETRY_CNT;
++ u32 *ipc_wbuf;
++ u8 cbuf[IPC_BUF_LEN] = { '\0' };
++ u32 cnt = 0;
++ u32 i = 0;
++ u32 rbuf_offset = 2;
++
++ ipc_wbuf = (u32 *)&cbuf;
++
++ if (p_write_reg_data == NULL) {
++ printk(KERN_CRIT "Invalid Input Param recieved in pmic write\n");
++ return -E_INVALID_PARAM;
++ }
++ if (p_write_reg_data->num_entries > MAX_NUM_ENTRIES) {
++ printk(KERN_CRIT "Invalid Input Param recieved in pmic write\n");
++ return -E_NUM_ENTRIES_OUT_OF_RANGE;
++ }
++
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_INFO "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++
++ ipc_cmd.cmd_parts.cmd = IPC_PMIC_CMD_READ_WRITE;
++ ipc_cmd.cmd_parts.ioc = p_write_reg_data->ioc;
++ ipc_cmd.cmd_parts.rfu1 = 0x0;
++ ipc_cmd.cmd_parts.cmd_ID = PMIC_REG_WRITE;
++ ipc_cmd.cmd_parts.size = 3 * (p_write_reg_data->num_entries);
++ ipc_cmd.cmd_parts.rfu2 = 0x0;
++
++ /* command is set. Fill the IPC_BUF */
++ lnw_ipc_dbg(KERN_INFO "p_write_reg_data->num_entries 0x%X>\n",
++ p_write_reg_data->num_entries);
++
++ lnw_ipc_dbg(KERN_INFO "p_write_reg_data->register_address 0x%X>\n",
++ p_write_reg_data->pmic_reg_data[0].register_address);
++ for (i = 0; i < p_write_reg_data->num_entries; i++) {
++ cbuf[cnt] = p_write_reg_data->pmic_reg_data[i].register_address;
++ cbuf[(cnt) + 1] =
++ (p_write_reg_data->pmic_reg_data[i].register_address >> 8);
++ cbuf[(cnt) + 2] = p_write_reg_data->pmic_reg_data[i].value;
++ cnt = cnt + 3;
++ }
++
++ rbuf_offset = 0;
++ for (i = 0; i < p_write_reg_data->num_entries; i++) {
++ __raw_writel(ipc_wbuf[i], ((p_ipc_base + IPC_WBUF)
++ + rbuf_offset));
++ rbuf_offset += 4;
++ if (i >= 3)
++ break;
++ }
++ /* Check for Status bit = 0 before sending an IPC command */
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "IPC Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ /* Wait for command completion from SCU firmware */
++ scu_cmd_completed = FALSE;
++ wait_event_interruptible_timeout(wait,
++ scu_cmd_completed, IPC_TIMEOUT);
++
++ /*Check for error in command processing*/
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (ipc_sts_reg.ipc_sts_parts.error) {
++ printk(KERN_CRIT "IPC Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.error);
++ up(&sema_ipc);
++ return E_CMD_FAILED;
++ }
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++EXPORT_SYMBOL(ipc_pmic_register_write);
++
++int ipc_pmic_register_read_modify(struct ipc_pmic_mod_reg_data
++ *p_read_mod_reg_data)
++{
++ union ipc_fw_cmd ipc_cmd;
++ union ipc_sts ipc_sts_reg;
++ u32 retry = MAX_RETRY_CNT;
++ u32 *ipc_wbuf;
++ u8 cbuf[IPC_BUF_LEN] = { '\0' };
++ u32 cnt = 0;
++ u32 i = 0;
++ u32 rbuf_offset = 2;
++ ipc_wbuf = (u32 *)&cbuf;
++
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_INFO "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++
++ if (p_read_mod_reg_data == NULL) {
++ printk(KERN_CRIT "Invalid Input recieved pmic read modify\n");
++ up(&sema_ipc);
++ return -E_INVALID_PARAM;
++ }
++ if (p_read_mod_reg_data->num_entries > MAX_NUM_ENTRIES) {
++ printk(KERN_CRIT "Invalid Input recieved pmic read modify\n");
++ up(&sema_ipc);
++ return -E_NUM_ENTRIES_OUT_OF_RANGE;
++ }
++
++ ipc_cmd.cmd_parts.cmd = IPC_PMIC_CMD_READ_WRITE;
++ ipc_cmd.cmd_parts.ioc = p_read_mod_reg_data->ioc;
++ ipc_cmd.cmd_parts.rfu1 = 0x0;
++ ipc_cmd.cmd_parts.cmd_ID = PMIC_REG_READ_MODIFY;
++ ipc_cmd.cmd_parts.size = 3 * (p_read_mod_reg_data->num_entries);
++ ipc_cmd.cmd_parts.rfu2 = 0x0;
++
++ /* command is set. Fill the IPC_BUF */
++ lnw_ipc_dbg(KERN_INFO "p_read_mod_reg_data->num_entries <0x%X> \n",
++ p_read_mod_reg_data->num_entries);
++
++ for (i = 0; i < p_read_mod_reg_data->num_entries; i++) {
++ cbuf[cnt] =
++ p_read_mod_reg_data->pmic_mod_reg_data[i].register_address;
++ cbuf[(cnt) + 1] =
++ (p_read_mod_reg_data->pmic_mod_reg_data[i].
++ register_address >> 8);
++ cbuf[(cnt) + 2] =
++ p_read_mod_reg_data->pmic_mod_reg_data[i].value;
++ cbuf[(cnt) + 3] =
++ p_read_mod_reg_data->pmic_mod_reg_data[i].bit_map;
++ cnt = cnt + 4;
++ }
++
++ rbuf_offset = 0;
++ for (i = 0; i < p_read_mod_reg_data->num_entries; i++) {
++ __raw_writel(ipc_wbuf[i],
++ ((p_ipc_base + IPC_WBUF) + rbuf_offset));
++ rbuf_offset += 4;
++ if (i >= 3)
++ break;
++ }
++
++ /* Check for Status bit = 0 before sending an IPC command */
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "SCU is busy %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ /* Wait for command completion from SCU firmware */
++ scu_cmd_completed = FALSE;
++ wait_event_interruptible_timeout(wait,
++ scu_cmd_completed, IPC_TIMEOUT);
++
++ if (ipc_cmd.cmd_parts.ioc == 0) {
++ /*Check for error in command processing*/
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (ipc_sts_reg.ipc_sts_parts.error) {
++ printk(KERN_CRIT "IPC Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.error);
++ up(&sema_ipc);
++ return E_CMD_FAILED;
++ }
++ }
++
++ /* IPC driver expects interrupt when IOC is set to 1.*/
++ if ((ipc_cmd.cmd_parts.ioc == 1) && (scu_cmd_completed == FALSE)) {
++ up(&sema_ipc);
++ return E_NO_INTERRUPT_ON_IOC;
++ }
++
++ /* On wake-up fill the user buffer with IPC_RBUF data.*/
++ rbuf_offset = 0;
++ for (i = 0; i < p_read_mod_reg_data->num_entries; i++) {
++ ipc_wbuf[i] =
++ __raw_readl((p_ipc_base + IPC_RBUF + rbuf_offset));
++ rbuf_offset += 4;
++ }
++
++ rbuf_offset = 2;
++ for (i = 0; i < p_read_mod_reg_data->num_entries; i++) {
++ p_read_mod_reg_data->pmic_mod_reg_data[i].value =
++ __raw_readb((p_ipc_base + IPC_RBUF + rbuf_offset));
++ rbuf_offset += 4;
++ }
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++EXPORT_SYMBOL(ipc_pmic_register_read_modify);
++
++int ipc_pmic_register_read_non_blocking(
++ struct ipc_non_blocking_pmic_read *p_nb_read)
++{
++ union ipc_fw_cmd ipc_cmd;
++ union ipc_sts ipc_sts_reg;
++ u32 retry = MAX_RETRY_CNT;
++ u32 *ipc_wbuf;
++ u8 cbuf[IPC_BUF_LEN] = { '\0' };
++ u32 cnt = 0;
++ u32 i = 0;
++ u32 rbuf_offset = 2;
++ ipc_wbuf = (u32 *)&cbuf;
++
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_CRIT "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++ if (p_nb_read == NULL) {
++ printk(KERN_CRIT "Invalid Input Param recieved\
++ in non blocking pmic read\n");
++ up(&sema_ipc);
++ return -E_INVALID_PARAM;
++ }
++ if (p_nb_read->pmic_nb_read.num_entries > MAX_NUM_ENTRIES) {
++ printk(KERN_CRIT "Invalid Number Of Entries\
++ - non blocking pmic read\n");
++ up(&sema_ipc);
++ return -E_NUM_ENTRIES_OUT_OF_RANGE;
++ }
++
++ if (cmd_id >= MAX_NB_BUF_SIZE) {
++ printk(KERN_CRIT "Queue is full!! cannot service request!\n");
++ up(&sema_ipc);
++ return -E_QUEUE_IS_FULL;
++ }
++
++
++ non_blocking_read_flag = TRUE;
++ /*Copy the contents to this global structure for future use*/
++ pmic_read_que[cmd_id] = *(p_nb_read);
++ ipc_wq.cmd_id = cmd_id++;
++ callback = p_nb_read->callback_host;
++ pmic_read_que[cmd_id].callback_host = p_nb_read->callback_host;
++
++ ipc_cmd.cmd_parts.cmd = IPC_PMIC_CMD_READ_WRITE;
++ ipc_cmd.cmd_parts.ioc = 1;
++ ipc_cmd.cmd_parts.rfu1 = 0x0;
++ ipc_cmd.cmd_parts.cmd_ID = PMIC_REG_READ;
++ ipc_cmd.cmd_parts.size = 3 * (p_nb_read->pmic_nb_read.num_entries);
++ ipc_cmd.cmd_parts.rfu2 = 0x0;
++
++ /* command is set. Fill the IPC_BUF */
++ lnw_ipc_dbg(KERN_INFO "pmic_nb_read.num_entries <0x%X>\n",
++ p_nb_read->pmic_nb_read.num_entries);
++
++ lnw_ipc_dbg(KERN_INFO "pmic_nb_read.register_address <0x%X>\n",
++ p_nb_read->pmic_nb_read.pmic_reg_data[0].register_address);
++
++ for (i = 0; i < p_nb_read->pmic_nb_read.num_entries; i++) {
++ cbuf[cnt] =
++ p_nb_read->pmic_nb_read.pmic_reg_data[i].register_address;
++ cbuf[(cnt) + 1] = (p_nb_read->pmic_nb_read.pmic_reg_data[i]\
++ .register_address >> 8);
++ cbuf[(cnt) + 2] =
++ p_nb_read->pmic_nb_read.pmic_reg_data[i].value;
++ cnt = cnt + 3;
++ }
++ rbuf_offset = 0;
++ for (i = 0; i < p_nb_read->pmic_nb_read.num_entries; i++) {
++ __raw_writel(ipc_wbuf[i], ((p_ipc_base + IPC_WBUF)
++ + rbuf_offset));
++ rbuf_offset += 4;
++ if (i >= 3)
++ break;
++ }
++ /* Check for Status bit = 0 before sending an IPC command */
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data = __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "SCU is busy %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++ /*Control returns after issueing the command here*/
++ /*Data is read asynchronously later*/
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++EXPORT_SYMBOL(ipc_pmic_register_read_non_blocking);
++
++int mrst_ipc_read32(struct ipc_reg_data *p_reg_data)
++{
++ union ipc_fw_cmd ipc_cmd;
++ union ipc_sts ipc_sts_reg;
++ u32 retry = MAX_RETRY_CNT;
++
++ if (p_reg_data == NULL) {
++ printk(KERN_CRIT "Invalid Input Param recieved\
++ in mrst_ipc_read32\n");
++ return -E_INVALID_PARAM;
++ }
++
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_INFO "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++
++ lnw_ipc_dbg(KERN_INFO
++ "ipc_driver: Address = 0x%.8X\t: Data = 0x%.8X\n",
++ p_reg_data->address, p_reg_data->data);
++
++ ipc_cmd.cmd_parts.cmd = INDIRECT_READ;
++ ipc_cmd.cmd_parts.ioc = p_reg_data->ioc;
++ ipc_cmd.cmd_parts.rfu1 = 0x0;
++ ipc_cmd.cmd_parts.cmd_ID = 0x00;
++ ipc_cmd.cmd_parts.size = 4;
++ ipc_cmd.cmd_parts.rfu2 = 0x0;
++
++ lnw_ipc_dbg(KERN_INFO
++ "ipc_driver: IPC_CMD-> 0x%.8X\n", ipc_cmd.cmd_data);
++ /* Check for Status bit = 0 before sending an IPC command */
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "SCU is busy %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++ /*
++ * Write the Address to IPC_SPTR
++ * Issue the command by writing to IPC_CMD
++ * Read the contents of IPC_RBUF to data
++ */
++
++ __raw_writel(p_reg_data->address, (p_ipc_base + IPC_SPTR));
++ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ scu_cmd_completed = FALSE;
++ wait_event_interruptible_timeout(wait,
++ scu_cmd_completed, IPC_TIMEOUT);
++
++ if (ipc_cmd.cmd_parts.ioc == 0) {
++ /*Check for error in command processing*/
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (ipc_sts_reg.ipc_sts_parts.error) {
++ printk(KERN_CRIT "IPC Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.error);
++ up(&sema_ipc);
++ return E_CMD_FAILED;
++ }
++ }
++ /* IPC driver expects interrupt when IOC is set to 1.*/
++ if ((ipc_cmd.cmd_parts.ioc == 1) && (scu_cmd_completed == FALSE)) {
++ up(&sema_ipc);
++ return E_NO_INTERRUPT_ON_IOC;
++ }
++
++ /* Command completed successfully Read the data */
++ p_reg_data->data =
++ __raw_readl(p_ipc_base + IPC_RBUF);
++ lnw_ipc_dbg(KERN_INFO
++ "ipc_driver: Data Recieved from IPC_RBUF = 0x%.8X\n",
++ p_reg_data->data);
++
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++EXPORT_SYMBOL(mrst_ipc_read32);
++
++int mrst_ipc_write32(struct ipc_reg_data *p_reg_data)
++{
++ union ipc_fw_cmd ipc_cmd;
++ union ipc_sts ipc_sts_reg;
++ u32 retry = MAX_RETRY_CNT;
++
++ if (p_reg_data == NULL) {
++ printk(KERN_CRIT "Invalid Input Param recieved\
++ in mrst_ipc_write32\n");
++ return -E_INVALID_PARAM;
++ }
++
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_INFO "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++
++ lnw_ipc_dbg(KERN_INFO
++ "ipc_driver: in <%s> -> <%s> file at line no = <%d>\n",
++ __func__, __FILE__, __LINE__);
++
++ ipc_cmd.cmd_parts.cmd = INDIRECT_WRITE;
++ ipc_cmd.cmd_parts.ioc = p_reg_data->ioc;
++ ipc_cmd.cmd_parts.rfu1 = 0x0;
++ ipc_cmd.cmd_parts.cmd_ID = 0x00;
++ ipc_cmd.cmd_parts.size = 4;
++ ipc_cmd.cmd_parts.rfu2 = 0x0;
++
++ /* Check for Status bit = 0 before sending an IPC command */
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "SCU is busy %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++ __raw_writel(p_reg_data->address, (p_ipc_base + IPC_DPTR));
++ __raw_writel(p_reg_data->data, (p_ipc_base + IPC_WBUF));
++ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ scu_cmd_completed = FALSE;
++ wait_event_interruptible_timeout(wait,
++ scu_cmd_completed, IPC_TIMEOUT);
++
++ /*Check for error in command processing*/
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (ipc_sts_reg.ipc_sts_parts.error) {
++ printk(KERN_CRIT "IPC Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.error);
++ up(&sema_ipc);
++ return E_CMD_FAILED;
++ }
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++EXPORT_SYMBOL(mrst_ipc_write32);
++
++int ipc_set_watchdog(struct watchdog_reg_data *p_watchdog_reg_data)
++{
++ union ipc_fw_cmd ipc_cmd;
++ u32 *ipc_wbuf;
++ u8 cbuf[16] = { '\0' };
++ u32 rbuf_offset = 2;
++ u32 retry = MAX_RETRY_CNT;
++ union ipc_sts ipc_sts_reg;
++
++ ipc_wbuf = (u32 *)&cbuf;
++
++ if (p_watchdog_reg_data == NULL) {
++ printk(KERN_CRIT "Invalid Input Param recieved in pmic read\n");
++ return -E_INVALID_PARAM;
++ }
++
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_CRIT "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++
++ ipc_cmd.cmd_parts.cmd = IPC_SET_WATCHDOG_TIMER;
++ ipc_cmd.cmd_parts.ioc = p_watchdog_reg_data->ioc;
++ ipc_cmd.cmd_parts.rfu1 = 0x0;
++ ipc_cmd.cmd_parts.size = 2;
++ ipc_cmd.cmd_parts.rfu2 = 0x0;
++
++ /* Check for Status bit = 0 before sending an IPC command */
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++
++ ipc_wbuf[0] = p_watchdog_reg_data->payload1;
++ printk(KERN_INFO "p_watchdog_data->payload1 <0x%X>\n",
++ ipc_wbuf[0]);
++ __raw_writel(ipc_wbuf[0], ((p_ipc_base + IPC_WBUF) + rbuf_offset));
++
++ ipc_wbuf[1] = p_watchdog_reg_data->payload2;
++ lnw_ipc_dbg(KERN_INFO "p_watchdog_data->payload2 <0x%X>\n",
++ ipc_wbuf[1]);
++ __raw_writel(ipc_wbuf[1], ((p_ipc_base + IPC_WBUF) + rbuf_offset));
++
++ lnw_ipc_dbg(KERN_INFO "ipc_cmd.cmd_data is <0x%X>\n",
++ ipc_cmd.cmd_data);
++ /*execute the command by writing to IPC_CMD registers*/
++ __raw_writel(ipc_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ /* Wait for command completion from SCU firmware and return */
++ scu_cmd_completed = FALSE;
++ wait_event_interruptible_timeout(wait,
++ scu_cmd_completed, IPC_TIMEOUT);
++
++ /* IPC driver expects interrupt when IOC is set to 1.*/
++ if ((ipc_cmd.cmd_parts.ioc == 1) && (scu_cmd_completed == FALSE)) {
++ up(&sema_ipc);
++ return E_NO_INTERRUPT_ON_IOC;
++ }
++
++ /*Check for error in command processing*/
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (ipc_sts_reg.ipc_sts_parts.error) {
++ printk(KERN_CRIT "IPC Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.error);
++ up(&sema_ipc);
++ return E_CMD_FAILED;
++ }
++ lnw_ipc_dbg(KERN_CRIT "IPC Command status = 0x%x\n",
++ ipc_sts_reg.ipc_sts_data);
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++EXPORT_SYMBOL(ipc_set_watchdog);
++
++int ipc_program_io_bus_master(struct ipc_io_bus_master_regs *p_reg_data)
++{
++ u32 io_bus_master_cmd = 0;
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_INFO "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++
++ if (p_reg_data == NULL) {
++ printk(KERN_CRIT "Invalid Input Param recieved in\
++ <ipc_program_io_bus_master>\n");
++ up(&sema_ipc);
++ return -E_INVALID_PARAM;
++ }
++ printk(KERN_CRIT "p_reg_data->ctrl_reg_addr = 0x%x\n",\
++ p_reg_data->ctrl_reg_addr);
++ printk(KERN_CRIT "p_reg_data->ctrl_reg_data = 0x%x\n",\
++ p_reg_data->ctrl_reg_data);
++
++ /* Read the first byte for command*/
++ io_bus_master_cmd = (p_reg_data->ctrl_reg_addr)&(0xFF000000);
++ io_bus_master_cmd = (io_bus_master_cmd >> 24);
++
++ if (io_bus_master_cmd == NOP_CMD) {
++ printk(KERN_CRIT "NOP_CMD = 0x%x\n", io_bus_master_cmd);
++ } else if (io_bus_master_cmd == READ_CMD) {
++ lnw_ipc_dbg(KERN_CRIT "Address %#xp = data = %#x\n",
++ (unsigned int)(p_i2c_ser_bus + CTRL_REG_ADDR),
++ p_reg_data->ctrl_reg_addr);
++ __raw_writel(p_reg_data->ctrl_reg_addr,
++ (p_i2c_ser_bus + CTRL_REG_ADDR));
++ udelay(1000);/*Write Not getting updated without delay*/
++ p_reg_data->ctrl_reg_data =
++ __raw_readl(p_i2c_ser_bus + CTRL_REG_DATA);
++ lnw_ipc_dbg(KERN_CRIT "Data = %#x\n",
++ p_reg_data->ctrl_reg_data);
++ } else if (io_bus_master_cmd == WRITE_CMD) {
++ printk(KERN_CRIT"WRITE_CMD = 0x%x\n", io_bus_master_cmd);
++
++ __raw_writel(p_reg_data->ctrl_reg_data,
++ (p_i2c_ser_bus + CTRL_REG_DATA));
++ udelay(1000);
++ __raw_writel(p_reg_data->ctrl_reg_addr,
++ (p_i2c_ser_bus + CTRL_REG_ADDR));
++ } else {
++ printk(KERN_CRIT "in INVALID_CMD = 0x%x\n", io_bus_master_cmd);
++ up(&sema_ipc);
++ return -E_INVALID_CMD;
++ }
++ up(&sema_ipc);
++ return SUCCESS;
++}
++EXPORT_SYMBOL(ipc_program_io_bus_master);
++
++/*Work QUEUE Handler function:
++ *This function gets invoked by queue.
++ */
++static void mrst_pmic_read_handler(struct work_struct *work)
++{
++ static int i;
++ union ipc_sts ipc_sts_reg;
++ u32 retry = MAX_RETRY_CNT;
++ u32 rbuf_offset = 2;
++
++ u8 pmic_data = 0;
++
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_CRIT "IPC_Driver non-blocking read handler\n");
++ } else {
++ non_blocking_read_flag = FALSE;
++ pmic_data = __raw_readb((p_ipc_base + IPC_RBUF + 2));
++
++ while (retry--) {
++ ipc_sts_reg.ipc_sts_data =
++ __raw_readl((p_ipc_base + IPC_STS));
++ if (!ipc_sts_reg.ipc_sts_parts.busy)
++ break;
++ udelay(USLEEP_STS_TIMEOUT); /*10usec*/
++ }
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_CRIT "SCU is busy %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ pmic_data = -1 /*Invalid data*/;
++ } else {
++ rbuf_offset = 2;
++ cmd_id--;
++ for (i = 0; i < pmic_read_que[cmd_id].
++ pmic_nb_read.num_entries; i++) {
++ pmic_read_que[cmd_id].pmic_nb_read.
++ pmic_reg_data[i].value =
++ __raw_readb((p_ipc_base + IPC_RBUF
++ + rbuf_offset));
++ rbuf_offset += 3;
++ }
++ }
++ }
++ up(&sema_ipc);
++ /*Call the call-back function.
++ *The host driver is responsible for reading valid data.
++ */
++ pmic_read_que[cmd_id].callback_host(pmic_read_que[cmd_id].pmic_nb_read,
++ pmic_read_que[cmd_id].context);
++}
++
++
++/**
++ * int ipc_device_fw_upgrade() - API to upgrade the Integrated Firmware Image
++ * for Intel(R) Moorestown platform.
++ * @u8 *mrst_fw_buf: Command data.
++ * @u32 mrst_fw_buf_len: length of the command to be sent.
++ *
++ * This function provides and interface to send an IPC coulumb counter
++ * command to SCU Firmware and recieve a response. This is used by the
++ * PMIC battery driver on Moorestown platform.
++ */
++int ipc_device_fw_upgrade(u8 *mrst_fw_buf, u32 mrst_fw_buf_len)
++{
++ union ipc_fw_cmd ipc_dfu_cmd;
++ void __iomem *p_tmp_fw_base;
++ int retry_cnt = 0;
++
++ MailBox_t *pMailBox = NULL;
++
++ if (down_interruptible(&sema_ipc)) {
++ printk(KERN_ERR "IPC_Driver module busy\n");
++ return -EBUSY;
++ }
++
++ /* Map the memory of ipc1 PMIC reg base */
++ p_dfu_fw_base = ioremap_nocache(DFU_LOAD_ADDR, MIP_HEADER_SIZE);
++ p_tmp_fw_base = p_dfu_fw_base;
++ if (p_dfu_fw_base == NULL) {
++ up(&sema_ipc);
++ return E_PMIC_MALLOC;
++ }
++ p_dfu_mailbox_base = ioremap_nocache(DFU_MAILBOX_ADDR,
++ sizeof(MailBox_t));
++ if (p_dfu_mailbox_base == NULL) {
++ up(&sema_ipc);
++ return E_PMIC_MALLOC;
++ }
++
++ pMailBox = (MailBox_t*)p_dfu_mailbox_base;
++
++ ipc_dfu_cmd.cmd_data = FW_UPGRADE_READY_CMD;
++ writel(ipc_dfu_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ /*IA initializes both IAFlag and SCUFlag to zero*/
++ pMailBox->SCUFlag = 0;
++ pMailBox->IAFlag = 0;
++
++ /*IA copies the 2KB MIP header to SRAM at 0xFFFC0000*/
++ memcpy((u8*)(p_dfu_fw_base), mrst_fw_buf, 0x800);
++ iounmap(p_tmp_fw_base);
++
++ /* IA sends "FW Update" IPC command (CMD_ID 0xFE; MSG_ID 0x02).
++ * Upon receiving this command, SCU will write the 2K MIP header
++ * from 0xFFFC0000 into NAND.
++ * SCU will write a status code into the Mailbox, and then set SCUFlag.
++ */
++
++ ipc_dfu_cmd.cmd_data = FW_UPGRADE_GO_CMD;
++ writel(ipc_dfu_cmd.cmd_data, (p_ipc_base + IPC_CMD));
++
++ /*IA stalls until SCUFlag is set */
++ while (pMailBox->SCUFlag != 1)
++ udelay(100);
++
++ /* IA checks Mailbox status.
++ * If the status is 'BADN', then abort (bad NAND).
++ * If the status is 'TxLO', then continue.
++ */
++ while (pMailBox->Mailbox != TxLO)
++ udelay(10000);
++ udelay(10000);
++
++update_retry:
++ if (retry_cnt > 5)
++ goto exit_function;
++
++ if (pMailBox->Mailbox == TxLO) {
++ /* Map the memory of ipc1 PMIC reg base */
++ p_dfu_fw_base = ioremap_nocache(DFU_LOAD_ADDR, (128*1024));
++ p_tmp_fw_base = p_dfu_fw_base;
++ if (p_dfu_fw_base == NULL) {
++ up(&sema_ipc);
++ iounmap(p_dfu_mailbox_base);
++ return E_PMIC_MALLOC;
++ }
++
++ mrst_fw_buf = mrst_fw_buf+0x800;
++ memcpy((u8 *)(p_dfu_fw_base), mrst_fw_buf, 0x20000);
++ pMailBox->IAFlag = 0x1;
++ while (pMailBox->SCUFlag == 1)
++ udelay(100);
++
++ /* check for 'BADN' */
++ if (pMailBox->Mailbox == BADN) {
++ up(&sema_ipc);
++ iounmap(p_tmp_fw_base);
++ iounmap(p_dfu_mailbox_base);
++ return -1;
++ }
++
++ iounmap(p_tmp_fw_base);
++ } else {
++ up(&sema_ipc);
++ iounmap(p_dfu_mailbox_base);
++ return -1;
++ }
++
++ while (pMailBox->Mailbox != TxHI)
++ udelay(10000);
++ udelay(10000);
++
++ if (pMailBox->Mailbox == TxHI) {
++ /* Map the memory of ipc1 PMIC reg base */
++ p_dfu_fw_base = ioremap_nocache(DFU_LOAD_ADDR, (128*1024));
++ p_tmp_fw_base = p_dfu_fw_base;
++ if (p_dfu_fw_base == NULL) {
++ up(&sema_ipc);
++ iounmap(p_dfu_mailbox_base);
++ return E_PMIC_MALLOC;
++ }
++
++ mrst_fw_buf = mrst_fw_buf+0x20000;
++ memcpy((u8 *)(p_dfu_fw_base), mrst_fw_buf, 0x20000);
++ pMailBox->IAFlag = 0;
++ while (pMailBox->SCUFlag == 0)
++ udelay(100);
++
++ /* check for 'BADN' */
++ if (pMailBox->Mailbox == BADN) {
++ up(&sema_ipc);
++ iounmap(p_tmp_fw_base);
++ iounmap(p_dfu_mailbox_base);
++ return -1;
++ }
++
++ iounmap(p_tmp_fw_base);
++ } else {
++ up(&sema_ipc);
++ iounmap(p_dfu_mailbox_base);
++ return -1;
++ }
++
++ if (pMailBox->Mailbox == TxLO) {
++ ++retry_cnt;
++ goto update_retry;
++ }
++
++ if (pMailBox->Mailbox == DONE)
++ printk(KERN_INFO "Firmware update completed!\n");
++
++exit_function:
++ iounmap(p_dfu_mailbox_base);
++ up(&sema_ipc);
++
++ return SUCCESS;
++}
++EXPORT_SYMBOL(ipc_device_fw_upgrade);
++
++static int cache_mrst_firmware_version(void)
++{
++ union ipc_sts ipc_sts_reg;
++ int i = 0;
++
++ mutex_lock(&mrst_ipc_mutex);
++
++ /*execute the command by writing to IPC_CMD registers*/
++ writel(IPC_GET_FW_VERSION, (p_ipc_base + IPC_CMD));
++ udelay(1000);
++
++ ipc_sts_reg.ipc_sts_data = readl(p_ipc_base + IPC_STS);
++ if (ipc_sts_reg.ipc_sts_parts.error) {
++ printk(KERN_ERR "IPC GetSCUFW Version Command failed %d\n",
++ ipc_sts_reg.ipc_sts_parts.error);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++ if (ipc_sts_reg.ipc_sts_parts.busy) {
++ printk(KERN_ERR "SCU is busy %d\n",
++ ipc_sts_reg.ipc_sts_parts.busy);
++ up(&sema_ipc);
++ return -EBUSY;
++ }
++
++ for (i = 0; i < 16 ; i++)
++ fw_ver_data[i] = readb(p_ipc_base + IPC_RBUF + i);
++ mutex_unlock(&mrst_ipc_mutex);
++ return 0;
++}
++
++MODULE_AUTHOR("Sreenidhi Gurudatt <sreenidhi.b.gurudatt@intel.com>");
++MODULE_DESCRIPTION("Intel Moorestown IPC driver");
++MODULE_LICENSE("GPL");
++
++module_init(ipc_mrst_init);
++module_exit(ipc_mrst_exit);
+Index: linux-2.6.33/arch/x86/kernel/ipc_mrst.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/kernel/ipc_mrst.h
+@@ -0,0 +1,241 @@
++/*
++ * ipc_mrst.h: Driver for Langwell IPC1
++ *
++ * (C) Copyright 2008 Intel Corporation
++ * Author: Sreenidhi Gurudatt (sreenidhi.b.gurudatt@intel.com)
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; version 2
++ * of the License.
++ *
++ * Note:
++ * Langwell provides two IPC units to communicate with IA host. IPC1 is
++ * dedicated for IA. IPC commands results in LNW SCU interrupt. The
++ * initial implementation of this driver is platform specific. It will be
++ * converted to a PCI driver once SCU FW is in place.
++ */
++#ifndef __IPC_MRST_H__
++#define __IPC_MRST_H__
++
++#include <linux/interrupt.h>
++#include <linux/workqueue.h>
++
++#define MRST_IPC_DRIVER_VERSION "0.01.004"
++#define IPC_TIMEOUT 10 /*in msecs*/
++#define MAX_RETRY_CNT 10
++#define MAX_NB_BUF_SIZE 100
++#define IPC_BUF_LEN 16
++#define MAX_NUM_ENTRIES 5
++#define USLEEP_STS_TIMEOUT 10
++
++#define LNW_IPC1_BASE 0xff11c000
++#define LNW_IPC1_MMAP_SIZE 1024
++
++#define LNW_IPC1
++#define LNW_IPC_CMD 0x00
++#define LNW_IPC_STS 0x04
++#define LNW_IPC_DPTR 0x08
++#define LNW_IPC_WBUF 0x80
++#define LNW_IPC_RBUF 0x90
++#define LNW_IPC_RWBUF_SIZE 16
++
++/* IPC status register layout */
++#define LNW_IPC_STS_BUSY (1<<0)
++#define LNW_IPC_STS_ERR (1<<1)
++#define LNW_IPC_STS_CMDID (0xF<<4)
++#define LNW_IPC_STS_INITID (0xFF<<8)
++#define LNW_IPC_STS_ERR_CODE (0xFF<<16)
++
++/* IPC command register layout */
++#define LNW_IPC_CMD_CMD (0xFF<<0)
++#define LNW_IPC_CMD_MSI (1<<8)
++#define LNW_IPC_CMD_ID (0xF<<12)
++#define LNW_IPC_CMD_SIZE (0xFF<<16)
++
++#define FW_UPGRADE_READY_CMD 0x10FE
++#define FW_UPGRADE_GO_CMD 0x20FE
++#define DFU_MAILBOX_ADDR 0xFFFFDFF4
++#define IPC_CMD_GO_TO_DFU_MODE 0x0001
++#define IPC_CMD_UPDATE_FW 0x0002
++#define IPC_CMD_FORCE_UPDATE_FW 0x0003
++
++/*256K storage size for loading the FW image.*/
++#define MAX_FW_SIZE 262144
++#define MIP_HEADER_SIZE 2048
++#define DONE 0x444f4e45
++#define BADN 0x4241444E
++#define TxHI 0x54784849
++#define TxLO 0x54784c4f
++
++typedef struct {
++ volatile unsigned int Mailbox;
++ volatile unsigned int SCUFlag;
++ volatile unsigned int IAFlag;
++} MailBox_t;
++
++enum IPC_CMD {
++ NORMAL_WRITE, /*0x00 Normal Write */
++ MSG_WRITE, /*0x01 Message Write */
++ INDIRECT_READ, /*0x02 Indirect Read */
++ RSVD, /*0x03 Reserved */
++ READ_DMA, /*0x04 Read DMA */
++ INDIRECT_WRITE, /*0x05 Indirect write */
++};
++
++int lnw_ipc_send_cmd(unsigned char cmd, int size, int msi);
++
++struct ipc_driver {
++ const char *name;
++ irqreturn_t(*irq) (int irq, void *ipc);
++ int flags;
++};
++
++/*
++ * defines specific to ipc_driver and
++ * not exposed outside
++ */
++
++/*cmd_ID fields for CCA Read/Writes*/
++
++#define CCA_REG_WRITE 0x0000
++#define CCA_REG_READ 0x0001
++#define CCA_REG_GET_PROP 0x0002
++
++#define IPC_SET_WATCHDOG_TIMER 0xF8
++#define IPC_CCA_CMD_READ_WRITE 0xEF
++#define IPC_DEVICE_FIRMWARE_UPGRADE 0xFE
++#define IPC_PMIC_CMD_READ_WRITE 0xFF
++#define IPC_GET_FW_VERSION 0xF4
++
++/*cmd_ID fields for CCA Read/Writes*/
++#define PMIC_REG_WRITE 0x0000
++#define PMIC_REG_READ 0x0001
++#define PMIC_REG_READ_MODIFY 0x0002
++#define LPE_READ 0x0003
++#define LPE_WRITE 0x0004
++
++#define IPC_CMD_GO_TO_DFU_MODE 0x0001
++#define IPC_CMD_UPDATE_FW 0x0002
++#define IPC_CMD_FORCE_UPDATE_FW 0x0003
++
++#define NORMAL_WRITE 0x00
++#define MESSAGE_WRITE 0x01
++#define INDIRECT_READ 0x02
++#define INDIRECT_WRITE 0x05
++#define READ_DMA 0x04
++
++
++/* Used to override user option */
++#define IOC 1
++
++#define IPC_REG_ISR_FAILED 0xFF
++
++/*
++ * IO remap functions for PMIC Register reads
++ * and writes.
++ */
++
++#ifdef UNIT_TEST
++#define IOREMAP(x, y) \
++ kmalloc((y), GFP_KERNEL);
++
++#define IOUNMAP(x) \
++ kfree((x));
++
++#define IOREAD32(x) \
++ *(u32 *) (x);
++
++#define IOWRITE32(x, y) \
++ *(u32 *) (y) = x;
++#else
++
++#define IOREMAP(x, y) \
++ ioremap_nocache((x), (y));
++
++#define IOUNMAP(x) \
++ iounmap((x));
++
++#define IOREAD32(x) \
++ ioread32((x));
++
++#define IOWRITE32(x, y) \
++ iowrite32((x), (y));
++
++#endif
++
++/*********************************************
++ * Define IPC_Base_Address and offsets
++ ********************************************/
++#define IPC_BASE_ADDRESS 0xFF11C000
++#define I2C_SER_BUS 0xFF12B000
++#define DFU_LOAD_ADDR 0xFFFC0000
++/*256K storage size for loading the FW image.*/
++#define MAX_FW_SIZE 262144
++
++#define NOP_CMD 0x00
++#define WRITE_CMD 0x01
++#define READ_CMD 0x02
++
++/* IPC2 offset addresses */
++#define IPC_MAX_ADDRESS 0x100
++/* I2C offset addresses - Confirm this */
++#define I2C_MAX_ADDRESS 0x10
++/* Offsets for CTRL_REG_ADDR and CTRL_REG_DATA */
++#define CTRL_REG_ADDR 0x00
++#define CTRL_REG_DATA 0x04
++#define I2C_MAX_ADDRESS 0x10
++
++#define IPC_CMD 0x00
++#define IPC_STS 0x04
++#define IPC_SPTR 0x08
++#define IPC_DPTR 0x0C
++#define IPC_WBUF 0x80
++#define IPC_RBUF 0x90
++
++#define MAX_INSTANCES_ALLOWED 1
++
++union ipc_sts {
++ struct {
++ u32 busy:1;
++ u32 error:1;
++ u32 rfu1:2;
++ u32 cmd_id:4;
++ u32 initiator_id:8;
++ u32 error_code:8;
++ u32 rfu3:8;
++ } ipc_sts_parts;
++ u32 ipc_sts_data;
++};
++
++union ipc_fw_cmd {
++ struct {
++ u32 cmd:8;
++ u32 ioc:1;
++ u32 rfu1:3;
++ u32 cmd_ID:4;
++ u32 size:8;
++ u32 rfu2:8;
++ } cmd_parts;
++ u32 cmd_data;
++};
++
++struct ipc_intr {
++ u8 cmd;
++ u32 data;
++
++};
++
++struct ipc_work_struct{
++ struct work_struct ipc_work;
++ unsigned int cmd_id;
++};
++
++int ipc_process_interrupt(struct ipc_intr intr_data);
++int init_ipc_driver(void);
++int de_init_ipc_driver(void);
++static int cache_mrst_firmware_version(void);
++static void mrst_pmic_read_handler(struct work_struct *work);
++static DECLARE_DELAYED_WORK(mrst_ipc, mrst_pmic_read_handler);
++
++#endif
+Index: linux-2.6.33/drivers/input/keyboard/gpio_keys.c
+===================================================================
+--- linux-2.6.33.orig/drivers/input/keyboard/gpio_keys.c
++++ linux-2.6.33/drivers/input/keyboard/gpio_keys.c
+@@ -45,6 +45,9 @@ static void gpio_keys_report_event(struc
+ int state = (gpio_get_value(button->gpio) ? 1 : 0) ^ button->active_low;
+
+ input_event(input, type, button->code, !!state);
++ /* if button disabled auto repeat */
++ if (state && test_bit(EV_REP, input->evbit) && button->norep)
++ input_event(input, type, button->code, 0);
+ input_sync(input);
+ }
+
+Index: linux-2.6.33/include/linux/gpio_keys.h
+===================================================================
+--- linux-2.6.33.orig/include/linux/gpio_keys.h
++++ linux-2.6.33/include/linux/gpio_keys.h
+@@ -10,6 +10,7 @@ struct gpio_keys_button {
+ int type; /* input event type (EV_KEY, EV_SW) */
+ int wakeup; /* configure the button as a wake-up source */
+ int debounce_interval; /* debounce ticks interval in msecs */
++ unsigned int norep:1; /* more precise auto repeat control */
+ };
+
+ struct gpio_keys_platform_data {
+Index: linux-2.6.33/drivers/gpio/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/gpio/Kconfig
++++ linux-2.6.33/drivers/gpio/Kconfig
+@@ -224,6 +224,12 @@ config GPIO_TIMBERDALE
+
+ comment "SPI GPIO expanders:"
+
++config GPIO_LANGWELL_PMIC
++ bool "Intel Moorestown Platform Langwell GPIO support"
++ depends on SPI_MASTER
++ help
++ Say Y here to support Intel Moorestown platform GPIO.
++
+ config GPIO_MAX7301
+ tristate "Maxim MAX7301 GPIO expander"
+ depends on SPI_MASTER
+Index: linux-2.6.33/drivers/gpio/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/gpio/Makefile
++++ linux-2.6.33/drivers/gpio/Makefile
+@@ -7,6 +7,7 @@ obj-$(CONFIG_GPIOLIB) += gpiolib.o
+ obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o
+ obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o
+ obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o
++obj-$(CONFIG_GPIO_LANGWELL_PMIC) += langwell_pmic_gpio.o
+ obj-$(CONFIG_GPIO_MAX7301) += max7301.o
+ obj-$(CONFIG_GPIO_MAX732X) += max732x.o
+ obj-$(CONFIG_GPIO_MC33880) += mc33880.o
+Index: linux-2.6.33/drivers/gpio/langwell_pmic_gpio.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/gpio/langwell_pmic_gpio.c
+@@ -0,0 +1,331 @@
++/* Moorestown PMIC GPIO (access through SPI and IPC) driver
++ * Copyright (c) 2008 - 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* Supports:
++ * Moorestown platform pmic chip
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/stddef.h>
++#include <linux/ioport.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/device.h>
++#include <linux/pci.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/langwell_pmic_gpio.h>
++#include <linux/gpio.h>
++#include <asm/ipc_defs.h>
++
++/* register offset that IPC driver should use
++ * 8 GPIO + 8 GPOSW + 8GPO
++ */
++enum pmic_gpio_register {
++ GPIO0 = 0xE0,
++ GPIO7 = 0xE7,
++ GPIOINT = 0xE8,
++ GPOSWCTL0 = 0xEC,
++ GPOSWCTL5 = 0xF1,
++ GPO = 0xF4,
++};
++
++/* bits definitions for GPIO & GPOSW */
++#define GPIO_DRV 0x01
++#define GPIO_DIR 0x02
++#define GPIO_DIN 0x04
++#define GPIO_DOU 0x08
++#define GPIO_INTCTL 0x30
++#define GPIO_DBC 0xc0
++
++#define GPOSW_DRV 0x01
++#define GPOSW_DOU 0x08
++#define GPOSW_RDRV 0x30
++
++/* to schedule ipc read_modify in work queue for irq context */
++#define MAX_IPC_QUEUE 16
++struct ipc_cmd_queue {
++ struct ipc_pmic_mod_reg_data cmd[MAX_IPC_QUEUE];
++ struct work_struct work;
++};
++
++struct pmic_gpio {
++ struct gpio_chip chip;
++ struct ipc_cmd_queue cmd_queue;
++ void *gpiointr;
++ int irq;
++ struct spi_device *spi;
++ unsigned irq_base;
++};
++
++static int ipc_read_char(u16 offset)
++{
++ struct ipc_pmic_reg_data tmp;
++ tmp.ioc = 0;
++ tmp.pmic_reg_data[0].register_address = offset;
++ tmp.num_entries = 1;
++ if (ipc_pmic_register_read(&tmp)) {
++ printk(KERN_ERR "%s: IPC read error\n", __func__);
++ return 0;
++ }
++ return tmp.pmic_reg_data[0].value;
++}
++
++static int ipc_modify_char(u16 offset, u8 value, u8 mask)
++{
++ struct ipc_pmic_mod_reg_data tmp;
++
++ tmp.ioc = 0;
++ tmp.pmic_mod_reg_data[0].register_address = offset;
++ tmp.pmic_mod_reg_data[0].value = value;
++ tmp.pmic_mod_reg_data[0].bit_map = mask;
++ tmp.num_entries = 1;
++ return ipc_pmic_register_read_modify(&tmp);
++}
++
++static int queue_ipc_modify_char(struct pmic_gpio *pg,
++ u16 offset, u8 value, u8 mask)
++{
++ struct ipc_pmic_mod_reg_data *tmp;
++ int i;
++
++ for (i = 0; i < MAX_IPC_QUEUE; i ++) {
++ tmp = &pg->cmd_queue.cmd[i];
++ if (tmp->num_entries)
++ continue;
++ tmp->ioc = 0;
++ tmp->pmic_mod_reg_data[0].register_address = offset;
++ tmp->pmic_mod_reg_data[0].value = value;
++ tmp->pmic_mod_reg_data[0].bit_map = mask;
++ tmp->num_entries=1;
++ return i;
++ }
++ return -1;
++}
++
++static void ipc_modify_char_work(struct work_struct *work)
++{
++ struct pmic_gpio *pg =
++ container_of(work, struct pmic_gpio, cmd_queue.work);
++ struct ipc_pmic_mod_reg_data *tmp;
++ int i;
++
++ for (i = 0; i < MAX_IPC_QUEUE; i ++) {
++ tmp = &pg->cmd_queue.cmd[i];
++ if (tmp->num_entries) {
++ ipc_pmic_register_read_modify(tmp);
++ tmp->num_entries = 0;
++ }
++ }
++}
++
++static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
++{
++ if (offset > 8) {
++ printk(KERN_ERR
++ "%s: only pin 0-7 support input\n", __func__);
++ return -1;/* we only have 8 GPIO can use as input */
++ }
++ return ipc_modify_char(GPIO0 + offset, GPIO_DIR, GPIO_DIR);
++}
++
++static int pmic_gpio_direction_output(struct gpio_chip *chip,
++ unsigned offset, int value)
++{
++ int rc = 0;
++
++ if (offset < 8)/* it is GPIO */
++ rc = ipc_modify_char(GPIO0 + offset,
++ GPIO_DRV | (value ? GPIO_DOU : 0),
++ GPIO_DRV | GPIO_DOU | GPIO_DIR);
++ else if (offset < 16)/* it is GPOSW */
++ rc = ipc_modify_char(GPOSWCTL0 + offset - 8,
++ GPOSW_DRV | (value ? GPOSW_DOU : 0),
++ GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV);
++ else if (offset < 24)/* it is GPO */
++ rc = ipc_modify_char(GPO, value ? 1 << (offset - 16) : 0,
++ 1 << (offset - 16));
++
++ return rc;
++}
++
++static int pmic_gpio_get(struct gpio_chip *chip, unsigned offset)
++{
++ /* we only have 8 GPIO can use as input */
++ if (offset > 8) {
++ printk(KERN_ERR
++ "%s: only pin 0-7 support input\n", __func__);
++ return -1;
++ }
++ return ipc_read_char(GPIO0 + offset) & GPIO_DIN;
++}
++
++static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
++{
++ if (offset < 8)/* it is GPIO */
++ ipc_modify_char(GPIO0 + offset,
++ GPIO_DRV | (value ? GPIO_DOU : 0),
++ GPIO_DRV | GPIO_DOU);
++ else if (offset < 16)/* it is GPOSW */
++ ipc_modify_char(GPOSWCTL0 + offset - 8,
++ GPOSW_DRV | (value ? GPOSW_DOU : 0),
++ GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV);
++ else if (offset < 24)/* it is GPO */
++ ipc_modify_char(GPO, value ? 1 << (offset - 16) : 0,
++ 1 << (offset - 16));
++}
++
++static int pmic_irq_type(unsigned irq, unsigned type)
++{
++ struct pmic_gpio *pg = get_irq_chip_data(irq);
++ u32 gpio = irq - pg->irq_base;
++
++ if (gpio < 0 || gpio > pg->chip.ngpio)
++ return -EINVAL;
++
++ if (type & IRQ_TYPE_EDGE_RISING)
++ queue_ipc_modify_char(pg, GPIO0 + gpio, 0x20, 0x20);
++ else
++ queue_ipc_modify_char(pg, GPIO0 + gpio, 0x00, 0x20);
++
++ if (type & IRQ_TYPE_EDGE_FALLING)
++ queue_ipc_modify_char(pg, GPIO0 + gpio, 0x10, 0x10);
++ else
++ queue_ipc_modify_char(pg, GPIO0 + gpio, 0x00, 0x10);
++
++ schedule_work(&pg->cmd_queue.work);
++ return 0;
++};
++
++static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
++{
++ struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip);
++
++ return pg->irq_base + offset;
++}
++
++/* the gpiointr register is read-clear, so just do nothing. */
++static void pmic_irq_unmask(unsigned irq)
++{
++};
++
++static void pmic_irq_mask(unsigned irq)
++{
++};
++
++static struct irq_chip pmic_irqchip = {
++ .name = "PMIC-GPIO",
++ .mask = pmic_irq_mask,
++ .unmask = pmic_irq_unmask,
++ .set_type = pmic_irq_type,
++};
++
++static void pmic_irq_handler(unsigned irq, struct irq_desc *desc)
++{
++ struct pmic_gpio *pg = (struct pmic_gpio *)get_irq_data(irq);
++ u8 intsts = *((u8 *)pg->gpiointr + 4);
++ int gpio;
++
++ for (gpio = 0; gpio < 8; gpio++) {
++ if (intsts & (1 << gpio)) {
++ pr_debug("pmic pin %d triggered\n", gpio);
++ generic_handle_irq(pg->irq_base + gpio);
++ }
++ }
++ desc->chip->eoi(irq);
++}
++
++static int __devinit pmic_gpio_probe(struct spi_device *spi)
++{
++ struct pmic_gpio *pg;
++ struct langwell_pmic_gpio_platform_data *pdata;
++ int retval;
++ int i;
++
++ printk(KERN_INFO "%s: PMIC GPIO driver loaded.\n", __func__);
++
++ pdata = spi->dev.platform_data;
++ if (!pdata || !pdata->gpio_base || !pdata->irq_base) {
++ dev_dbg(&spi->dev, "incorrect or missing platform data\n");
++ return -EINVAL;
++ }
++
++ pg = kzalloc(sizeof(*pg), GFP_KERNEL);
++ if (!pg)
++ return -ENOMEM;
++
++ dev_set_drvdata(&spi->dev, pg);
++
++ pg->irq = spi->irq;
++ /* setting up SRAM mapping for GPIOINT register */
++ pg->gpiointr = ioremap_nocache(pdata->gpiointr, 8);
++ if (!pg->gpiointr) {
++ printk(KERN_ERR "%s: Can not map GPIOINT.\n", __func__);
++ retval = -EINVAL;
++ goto err2;
++ }
++ pg->irq_base = pdata->irq_base;
++ pg->chip.label = "langwell_pmic";
++ pg->chip.direction_input = pmic_gpio_direction_input;
++ pg->chip.direction_output = pmic_gpio_direction_output;
++ pg->chip.get = pmic_gpio_get;
++ pg->chip.set = pmic_gpio_set;
++ pg->chip.to_irq = pmic_gpio_to_irq;
++ pg->chip.base = pdata->gpio_base;
++ pg->chip.ngpio = 24;
++ pg->chip.can_sleep = 1;
++ pg->chip.dev = &spi->dev;
++ retval = gpiochip_add(&pg->chip);
++ if (retval) {
++ printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__);
++ goto err;
++ }
++ set_irq_data(pg->irq, pg);
++ set_irq_chained_handler(pg->irq, pmic_irq_handler);
++ for (i = 0; i < 8; i++) {
++ set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip,
++ handle_simple_irq, "demux");
++ set_irq_chip_data(i + pg->irq_base, pg);
++ }
++ INIT_WORK(&pg->cmd_queue.work, ipc_modify_char_work);
++ return 0;
++err:
++ iounmap(pg->gpiointr);
++err2:
++ kfree(pg);
++ return retval;
++}
++
++static struct spi_driver pmic_gpio_driver = {
++ .driver = {
++ .name = "pmic_gpio",
++ .owner = THIS_MODULE,
++ },
++ .probe = pmic_gpio_probe,
++};
++
++static int __init pmic_gpio_init(void)
++{
++ return spi_register_driver(&pmic_gpio_driver);
++}
++
++/* register after spi postcore initcall and before
++ * subsys initcalls that may rely on these GPIOs
++ */
++subsys_initcall(pmic_gpio_init);
+Index: linux-2.6.33/include/linux/spi/langwell_pmic_gpio.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/include/linux/spi/langwell_pmic_gpio.h
+@@ -0,0 +1,15 @@
++#ifndef LINUX_SPI_LANGWELL_PMIC_H
++#define LINUX_SPI_LANGWELL_PMIC_H
++
++struct langwell_pmic_gpio_platform_data {
++ /* the first IRQ of the chip */
++ unsigned irq_base;
++ /* number assigned to the first GPIO */
++ unsigned gpio_base;
++ /* sram address for gpiointr register, the langwell chip will map
++ * the PMIC spi GPIO expander's GPIOINTR register in sram.
++ */
++ unsigned gpiointr;
++};
++
++#endif
+Index: linux-2.6.33/drivers/gpio/pca953x.c
+===================================================================
+--- linux-2.6.33.orig/drivers/gpio/pca953x.c
++++ linux-2.6.33/drivers/gpio/pca953x.c
+@@ -14,6 +14,7 @@
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/gpio.h>
++#include <linux/interrupt.h>
+ #include <linux/i2c.h>
+ #include <linux/i2c/pca953x.h>
+ #ifdef CONFIG_OF_GPIO
+@@ -50,6 +51,7 @@ MODULE_DEVICE_TABLE(i2c, pca953x_id);
+
+ struct pca953x_chip {
+ unsigned gpio_start;
++ unsigned irq_base;
+ uint16_t reg_output;
+ uint16_t reg_direction;
+
+@@ -182,6 +184,13 @@ static void pca953x_gpio_set_value(struc
+ chip->reg_output = reg_val;
+ }
+
++static int pca953x_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
++{
++ struct pca953x_chip *chip = container_of(gc, struct pca953x_chip,
++ gpio_chip);
++ return chip->irq_base + offset;
++}
++
+ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
+ {
+ struct gpio_chip *gc;
+@@ -192,6 +201,7 @@ static void pca953x_setup_gpio(struct pc
+ gc->direction_output = pca953x_gpio_direction_output;
+ gc->get = pca953x_gpio_get_value;
+ gc->set = pca953x_gpio_set_value;
++ gc->to_irq = pca953x_gpio_to_irq;
+ gc->can_sleep = 1;
+
+ gc->base = chip->gpio_start;
+@@ -250,6 +260,39 @@ pca953x_get_alt_pdata(struct i2c_client
+ }
+ #endif
+
++static void pca953x_irq_unmask(unsigned irq)
++{
++}
++
++static void pca953x_irq_mask(unsigned irq)
++{
++}
++
++static struct irq_chip pca953x_irqchip = {
++ .name = "pca953x",
++ .mask = pca953x_irq_mask,
++ .unmask = pca953x_irq_unmask,
++};
++
++static void pca953x_irq_handler(unsigned irq, struct irq_desc *desc)
++{
++ struct pca953x_chip *chip = (struct pca953x_chip *)get_irq_data(irq);
++ int i;
++
++ if (desc->chip->ack)
++ desc->chip->ack(irq);
++ /* we must call all sub-irqs, since there is no way to read
++ * I2C gpio expander's status in irq context. The driver itself
++ * would be reponsible to check if the irq is for him.
++ */
++ for (i = 0; i < chip->gpio_chip.ngpio; i++)
++ if (chip->reg_direction & (1u << i))
++ generic_handle_irq(chip->irq_base + i);
++
++ if (desc->chip->unmask)
++ desc->chip->unmask(irq);
++}
++
+ static int __devinit pca953x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+ {
+@@ -283,6 +326,8 @@ static int __devinit pca953x_probe(struc
+
+ chip->names = pdata->names;
+
++ chip->irq_base = pdata->irq_base;
++
+ /* initialize cached registers from their original values.
+ * we can't share this chip with another i2c master.
+ */
+@@ -314,6 +359,21 @@ static int __devinit pca953x_probe(struc
+ }
+
+ i2c_set_clientdata(client, chip);
++
++ if (chip->irq_base != (unsigned)-1) {
++ int i;
++
++ set_irq_type(client->irq,
++ IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING);
++ set_irq_data(client->irq, chip);
++ for (i = 0; i < chip->gpio_chip.ngpio; i++) {
++ set_irq_chip(i + chip->irq_base, &pca953x_irqchip);
++ __set_irq_handler(i + chip->irq_base,
++ handle_simple_irq, 0, "demux");
++ set_irq_chip_data(i + chip->irq_base, chip);
++ }
++ set_irq_chained_handler(client->irq, pca953x_irq_handler);
++ }
+ return 0;
+
+ out_failed:
+Index: linux-2.6.33/include/linux/i2c/pca953x.h
+===================================================================
+--- linux-2.6.33.orig/include/linux/i2c/pca953x.h
++++ linux-2.6.33/include/linux/i2c/pca953x.h
+@@ -1,6 +1,8 @@
+ /* platform data for the PCA9539 16-bit I/O expander driver */
+
+ struct pca953x_platform_data {
++ /* number of the first IRQ */
++ unsigned irq_base;
+ /* number of the first GPIO */
+ unsigned gpio_base;
+
+Index: linux-2.6.33/drivers/input/keyboard/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/input/keyboard/Kconfig
++++ linux-2.6.33/drivers/input/keyboard/Kconfig
+@@ -73,7 +73,7 @@ config KEYBOARD_ATKBD
+ default y
+ select SERIO
+ select SERIO_LIBPS2
+- select SERIO_I8042 if X86
++ select SERIO_I8042 if X86 && !X86_MRST
+ select SERIO_GSCPS2 if GSC
+ help
+ Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
+Index: linux-2.6.33/drivers/input/mouse/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/input/mouse/Kconfig
++++ linux-2.6.33/drivers/input/mouse/Kconfig
+@@ -17,7 +17,7 @@ config MOUSE_PS2
+ default y
+ select SERIO
+ select SERIO_LIBPS2
+- select SERIO_I8042 if X86
++ select SERIO_I8042 if X86 && !X86_MRST
+ select SERIO_GSCPS2 if GSC
+ help
+ Say Y here if you have a PS/2 mouse connected to your system. This
+Index: linux-2.6.33/kernel/time/tick-broadcast.c
+===================================================================
+--- linux-2.6.33.orig/kernel/time/tick-broadcast.c
++++ linux-2.6.33/kernel/time/tick-broadcast.c
+@@ -214,10 +214,13 @@ static void tick_do_broadcast_on_off(uns
+
+ raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
+
++ bc = tick_broadcast_device.evtdev;
++ if (!bc)
++ goto out;
++
+ cpu = smp_processor_id();
+ td = &per_cpu(tick_cpu_device, cpu);
+ dev = td->evtdev;
+- bc = tick_broadcast_device.evtdev;
+
+ /*
+ * Is the device not affected by the powerstate ?
+@@ -467,6 +470,9 @@ void tick_broadcast_oneshot_control(unsi
+ goto out;
+
+ bc = tick_broadcast_device.evtdev;
++ if (!bc)
++ goto out;
++
+ cpu = smp_processor_id();
+ td = &per_cpu(tick_cpu_device, cpu);
+ dev = td->evtdev;
+Index: linux-2.6.33/drivers/usb/core/hcd.h
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/core/hcd.h
++++ linux-2.6.33/drivers/usb/core/hcd.h
+@@ -104,6 +104,9 @@ struct usb_hcd {
+ unsigned wireless:1; /* Wireless USB HCD */
+ unsigned authorized_default:1;
+ unsigned has_tt:1; /* Integrated TT in root hub */
++ unsigned has_sram:1; /* Local SRAM for caching */
++ unsigned sram_no_payload:1; /* sram not for payload */
++ unsigned lpm_cap:1; /* LPM capable */
+
+ int irq; /* irq allocated */
+ void __iomem *regs; /* device memory/io */
+@@ -148,6 +151,13 @@ struct usb_hcd {
+ * (ohci 32, uhci 1024, ehci 256/512/1024).
+ */
+
++#ifdef CONFIG_USB_OTG
++ /* some otg HCDs need this to get USB_DEVICE_ADD and USB_DEVICE_REMOVE
++ * from root hub, we do not want to use USB notification chain, since
++ * it would be a over kill to use high level notification.
++ */
++ void (*otg_notify) (struct usb_device *udev, unsigned action);
++#endif
+ /* The HC driver's private data is stored at the end of
+ * this structure.
+ */
+Index: linux-2.6.33/drivers/usb/core/hub.c
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/core/hub.c
++++ linux-2.6.33/drivers/usb/core/hub.c
+@@ -1563,6 +1563,24 @@ static void hub_free_dev(struct usb_devi
+ hcd->driver->free_dev(hcd, udev);
+ }
+
++#ifdef CONFIG_USB_OTG
++
++static void otg_notify(struct usb_device *udev, unsigned action)
++{
++ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++
++ if (hcd->otg_notify)
++ hcd->otg_notify(udev, action);
++}
++
++#else
++
++static inline void otg_notify(struct usb_device *udev, unsigned action)
++{
++}
++
++#endif
++
+ /**
+ * usb_disconnect - disconnect a device (usbcore-internal)
+ * @pdev: pointer to device being disconnected
+@@ -1620,7 +1638,7 @@ void usb_disconnect(struct usb_device **
+ * notifier chain (used by usbfs and possibly others).
+ */
+ device_del(&udev->dev);
+-
++ otg_notify(udev, USB_DEVICE_REMOVE);
+ /* Free the device number and delete the parent's children[]
+ * (or root_hub) pointer.
+ */
+@@ -1833,6 +1851,7 @@ int usb_new_device(struct usb_device *ud
+ * notifier chain (used by usbfs and possibly others).
+ */
+ err = device_add(&udev->dev);
++ otg_notify(udev, USB_DEVICE_ADD);
+ if (err) {
+ dev_err(&udev->dev, "can't device_add, error %d\n", err);
+ goto fail;
+Index: linux-2.6.33/drivers/usb/core/usb.h
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/core/usb.h
++++ linux-2.6.33/drivers/usb/core/usb.h
+@@ -178,4 +178,3 @@ extern void usb_notify_add_device(struct
+ extern void usb_notify_remove_device(struct usb_device *udev);
+ extern void usb_notify_add_bus(struct usb_bus *ubus);
+ extern void usb_notify_remove_bus(struct usb_bus *ubus);
+-
+Index: linux-2.6.33/drivers/usb/host/ehci-hcd.c
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/host/ehci-hcd.c
++++ linux-2.6.33/drivers/usb/host/ehci-hcd.c
+@@ -35,6 +35,7 @@
+ #include <linux/moduleparam.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/debugfs.h>
++#include <linux/uaccess.h>
+
+ #include "../core/hcd.h"
+
+@@ -43,6 +44,8 @@
+ #include <asm/irq.h>
+ #include <asm/system.h>
+ #include <asm/unaligned.h>
++#include <linux/usb/otg.h>
++#include <linux/usb/langwell_otg.h>
+
+ /*-------------------------------------------------------------------------*/
+
+@@ -101,6 +104,11 @@ static int ignore_oc = 0;
+ module_param (ignore_oc, bool, S_IRUGO);
+ MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
+
++/* for link power management(LPM) feature */
++static unsigned int hird;
++module_param(hird, int, S_IRUGO);
++MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us\n");
++
+ #define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
+
+ /*-------------------------------------------------------------------------*/
+@@ -305,6 +313,7 @@ static void end_unlink_async(struct ehci
+ static void ehci_work(struct ehci_hcd *ehci);
+
+ #include "ehci-hub.c"
++#include "ehci-lpm.c"
+ #include "ehci-mem.c"
+ #include "ehci-q.c"
+ #include "ehci-sched.c"
+@@ -501,7 +510,8 @@ static void ehci_stop (struct usb_hcd *h
+ ehci_work (ehci);
+ spin_unlock_irq (&ehci->lock);
+ ehci_mem_cleanup (ehci);
+-
++ if (hcd->has_sram)
++ sram_deinit(hcd);
+ #ifdef EHCI_STATS
+ ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
+ ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
+@@ -577,6 +587,17 @@ static int ehci_init(struct usb_hcd *hcd
+ if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
+ log2_irq_thresh = 0;
+ temp = 1 << (16 + log2_irq_thresh);
++ if (HCC_32FRAME_PERIODIC_LIST(hcc_params))
++ ehci_dbg(ehci, "32 frame periodic list capable\n");
++ if (HCC_PER_PORT_CHANGE_EVENT(hcc_params)) {
++ ehci_dbg(ehci, "enable per-port change event %d\n", park);
++ temp |= CMD_PPCEE;
++ }
++ if (HCC_HW_PREFETCH(hcc_params)) {
++ ehci_dbg(ehci, "HW prefetch capable %d\n", park);
++ temp |= (CMD_ASPE | CMD_PSPE);
++ }
++
+ if (HCC_CANPARK(hcc_params)) {
+ /* HW default park == 3, on hardware that supports it (like
+ * NVidia and ALI silicon), maximizes throughput on the async
+@@ -590,7 +611,7 @@ static int ehci_init(struct usb_hcd *hcd
+ temp |= CMD_PARK;
+ temp |= park << 8;
+ }
+- ehci_dbg(ehci, "park %d\n", park);
++ ehci_dbg(ehci, "park %d ", park);
+ }
+ if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+ /* periodic schedule size can be smaller than default */
+@@ -603,6 +624,17 @@ static int ehci_init(struct usb_hcd *hcd
+ default: BUG();
+ }
+ }
++ if (HCC_LPM(hcc_params)) {
++ /* support link power management EHCI 1.1 addendum */
++ ehci_dbg(ehci, "lpm\n");
++ hcd->lpm_cap = 1;
++ if (hird > 0xf) {
++ ehci_dbg(ehci, "hird %d invalid, use default 0",
++ hird);
++ hird = 0;
++ }
++ temp |= hird << 24;
++ }
+ ehci->command = temp;
+
+ /* Accept arbitrarily long scatter-gather lists */
+@@ -840,6 +872,7 @@ static int ehci_urb_enqueue (
+ ) {
+ struct ehci_hcd *ehci = hcd_to_ehci (hcd);
+ struct list_head qtd_list;
++ int status;
+
+ INIT_LIST_HEAD (&qtd_list);
+
+@@ -855,7 +888,16 @@ static int ehci_urb_enqueue (
+ default:
+ if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
+ return -ENOMEM;
+- return submit_async(ehci, urb, &qtd_list, mem_flags);
++ status = submit_async(ehci, urb, &qtd_list, mem_flags);
++
++ /* check device LPM cap after set address */
++ if (usb_pipecontrol(urb->pipe)) {
++ if (((struct usb_ctrlrequest *)urb->setup_packet)
++ ->bRequest == USB_REQ_SET_ADDRESS &&
++ ehci_to_hcd(ehci)->lpm_cap)
++ ehci_lpm_check(ehci, urb->dev->portnum);
++ }
++ return status;
+
+ case PIPE_INTERRUPT:
+ if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
+@@ -1101,6 +1143,10 @@ MODULE_LICENSE ("GPL");
+ #ifdef CONFIG_PCI
+ #include "ehci-pci.c"
+ #define PCI_DRIVER ehci_pci_driver
++#ifdef CONFIG_USB_LANGWELL_OTG
++#include "ehci-langwell-pci.c"
++#define LNW_OTG_HOST_DRIVER ehci_otg_driver
++#endif
+ #endif
+
+ #ifdef CONFIG_USB_EHCI_FSL
+@@ -1213,8 +1259,19 @@ static int __init ehci_hcd_init(void)
+ if (retval < 0)
+ goto clean3;
+ #endif
++
++#ifdef LNW_OTG_HOST_DRIVER
++ retval = langwell_register_host(&LNW_OTG_HOST_DRIVER);
++ if (retval < 0)
++ goto clean4;
++#endif
+ return retval;
+
++#ifdef LNW_OTG_HOST_DRIVER
++clean4:
++ langwell_unregister_host(&LNW_OTG_HOST_DRIVER);
++#endif
++
+ #ifdef OF_PLATFORM_DRIVER
+ /* of_unregister_platform_driver(&OF_PLATFORM_DRIVER); */
+ clean3:
+@@ -1255,6 +1312,9 @@ static void __exit ehci_hcd_cleanup(void
+ #ifdef PS3_SYSTEM_BUS_DRIVER
+ ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
+ #endif
++#ifdef LNW_OTG_HOST_DRIVER
++ langwell_unregister_host(&LNW_OTG_HOST_DRIVER);
++#endif
+ #ifdef DEBUG
+ debugfs_remove(ehci_debug_root);
+ #endif
+Index: linux-2.6.33/drivers/usb/host/ehci-hub.c
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/host/ehci-hub.c
++++ linux-2.6.33/drivers/usb/host/ehci-hub.c
+@@ -112,6 +112,7 @@ static int ehci_bus_suspend (struct usb_
+ int port;
+ int mask;
+ u32 __iomem *hostpc_reg = NULL;
++ int rc = 0;
+
+ ehci_dbg(ehci, "suspend root hub\n");
+
+@@ -228,13 +229,18 @@ static int ehci_bus_suspend (struct usb_
+ ehci_readl(ehci, &ehci->regs->intr_enable);
+
+ ehci->next_statechange = jiffies + msecs_to_jiffies(10);
++
++#ifdef CONFIG_USB_OTG
++ if (ehci->has_otg && ehci->otg_suspend)
++ rc = ehci->otg_suspend(hcd);
++#endif
+ spin_unlock_irq (&ehci->lock);
+
+ /* ehci_work() may have re-enabled the watchdog timer, which we do not
+ * want, and so we must delete any pending watchdog timer events.
+ */
+ del_timer_sync(&ehci->watchdog);
+- return 0;
++ return rc;
+ }
+
+
+@@ -246,6 +252,7 @@ static int ehci_bus_resume (struct usb_h
+ u32 power_okay;
+ int i;
+ u8 resume_needed = 0;
++ int rc = 0;
+
+ if (time_before (jiffies, ehci->next_statechange))
+ msleep(5);
+@@ -295,7 +302,11 @@ static int ehci_bus_resume (struct usb_h
+ i = HCS_N_PORTS (ehci->hcs_params);
+ while (i--) {
+ temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
+- temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
++ temp &= ~(PORT_RWC_BITS | PORT_WKDISC_E | PORT_WKOC_E);
++ if (temp & PORT_CONNECT)
++ temp |= PORT_WKOC_E | PORT_WKDISC_E;
++ else
++ temp |= PORT_WKOC_E | PORT_WKCONN_E;
+ if (test_bit(i, &ehci->bus_suspended) &&
+ (temp & PORT_SUSPEND)) {
+ temp |= PORT_RESUME;
+@@ -340,9 +351,13 @@ static int ehci_bus_resume (struct usb_h
+ /* Now we can safely re-enable irqs */
+ ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
+
++#ifdef CONFIG_USB_OTG
++ if (ehci->has_otg && ehci->otg_resume)
++ rc = ehci->otg_resume(hcd);
++#endif
+ spin_unlock_irq (&ehci->lock);
+ ehci_handover_companion_ports(ehci);
+- return 0;
++ return rc;
+ }
+
+ #else
+@@ -678,10 +693,20 @@ static int ehci_hub_control (
+ if (temp & PORT_SUSPEND) {
+ if ((temp & PORT_PE) == 0)
+ goto error;
+- /* resume signaling for 20 msec */
+- temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
++ /* clear phy low power mode before resume */
++ if (hostpc_reg) {
++ temp1 = ehci_readl(ehci, hostpc_reg);
++ ehci_writel(ehci, temp1 & ~HOSTPC_PHCD,
++ hostpc_reg);
++ mdelay(5);
++ }
++ /* after PORT_PE check, the port must be
++ connected, set correct wakeup bits */
++ temp &= ~PORT_WKCONN_E;
++ temp |= PORT_WKDISC_E | PORT_WKOC_E;
+ ehci_writel(ehci, temp | PORT_RESUME,
+ status_reg);
++ /* resume signaling for 20 msec */
+ ehci->reset_done [wIndex] = jiffies
+ + msecs_to_jiffies (20);
+ }
+@@ -696,6 +721,23 @@ static int ehci_hub_control (
+ status_reg);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
++ /*
++ * for connection change, we need to enable
++ * appropriate wake bits.
++ */
++ temp |= PORT_WKOC_E;
++ if (temp & PORT_CONNECT) {
++ temp |= PORT_WKDISC_E;
++ temp &= ~PORT_WKCONN_E;
++ } else {
++ temp &= ~PORT_WKDISC_E;
++ temp |= PORT_WKCONN_E;
++ }
++ if (ehci_to_hcd(ehci)->lpm_cap) {
++ /* clear PORTSC bits on disconnect */
++ temp &= ~PORT_LPM;
++ temp &= ~PORT_DEV_ADDR;
++ }
+ ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_CSC,
+ status_reg);
+ break;
+Index: linux-2.6.33/drivers/usb/host/ehci-langwell-pci.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/usb/host/ehci-langwell-pci.c
+@@ -0,0 +1,195 @@
++/*
++ * Intel Moorestown Platform Langwell OTG EHCI Controller PCI Bus Glue.
++ *
++ * Copyright (c) 2008 - 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License 2 as published by the
++ * Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software Foundation,
++ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++static int usb_otg_suspend(struct usb_hcd *hcd)
++{
++ struct otg_transceiver *otg;
++ struct langwell_otg *iotg;
++
++ otg = otg_get_transceiver();
++ if (otg == NULL) {
++ printk(KERN_ERR "%s Failed to get otg transceiver\n", __func__);
++ return -EINVAL;
++ }
++ iotg = container_of(otg, struct langwell_otg, otg);
++ printk(KERN_INFO "%s OTG HNP update suspend\n", __func__);
++ if (iotg->otg.default_a)
++ iotg->hsm.a_suspend_req = 1;
++ else
++ iotg->hsm.b_bus_req = 0;
++ langwell_update_transceiver();
++ otg_put_transceiver(otg);
++ return 0;
++}
++
++static int usb_otg_resume(struct usb_hcd *hcd)
++{
++ struct otg_transceiver *otg;
++ struct langwell_otg *iotg;
++
++ otg = otg_get_transceiver();
++ if (otg == NULL) {
++ printk(KERN_ERR "%s Failed to get otg transceiver\n", __func__);
++ return -EINVAL;
++ }
++ iotg = container_of(otg, struct langwell_otg, otg);
++ printk(KERN_INFO "%s OTG HNP update resume\n", __func__);
++ if (iotg->otg.default_a) {
++ iotg->hsm.b_bus_resume = 1;
++ langwell_update_transceiver();
++ }
++ otg_put_transceiver(otg);
++ return 0;
++}
++
++/* the root hub will call this callback when device added/removed */
++static void otg_notify(struct usb_device *udev, unsigned action)
++{
++ struct otg_transceiver *otg;
++ struct langwell_otg *iotg;
++
++ otg = otg_get_transceiver();
++ if (otg == NULL) {
++ printk(KERN_ERR "%s Failed to get otg transceiver\n", __func__);
++ return;
++ }
++ iotg = container_of(otg, struct langwell_otg, otg);
++
++ switch (action) {
++ case USB_DEVICE_ADD:
++ pr_debug("Notify OTG HNP add device\n");
++ if (iotg->otg.default_a == 1)
++ iotg->hsm.b_conn = 1;
++ else
++ iotg->hsm.a_conn = 1;
++ break;
++ case USB_DEVICE_REMOVE:
++ pr_debug("Notify OTG HNP delete device\n");
++ if (iotg->otg.default_a == 1)
++ iotg->hsm.b_conn = 0;
++ else
++ iotg->hsm.a_conn = 0;
++ break;
++ default:
++ otg_put_transceiver(otg);
++ return ;
++ }
++ if (spin_trylock(&iotg->wq_lock)) {
++ langwell_update_transceiver();
++ spin_unlock(&iotg->wq_lock);
++ }
++ otg_put_transceiver(otg);
++ return;
++}
++
++static int ehci_langwell_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id)
++{
++ struct hc_driver *driver;
++ struct langwell_otg *iotg;
++ struct otg_transceiver *otg;
++ struct usb_hcd *hcd;
++ struct ehci_hcd *ehci;
++ int irq;
++ int retval;
++
++ pr_debug("initializing Langwell USB OTG Host Controller\n");
++
++ /* we need not call pci_enable_dev since otg transceiver already take
++ * the control of this device and this probe actaully gets called by
++ * otg transceiver driver with HNP protocol.
++ */
++ irq = pdev->irq;
++
++ if (!id)
++ return -EINVAL;
++ driver = (struct hc_driver *)id->driver_data;
++ if (!driver)
++ return -EINVAL;
++
++ hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
++ if (!hcd) {
++ retval = -ENOMEM;
++ goto err1;
++ }
++
++ hcd->self.otg_port = 1;
++ ehci = hcd_to_ehci(hcd);
++ /* this will be called in ehci_bus_suspend and ehci_bus_resume */
++ ehci->otg_suspend = usb_otg_suspend;
++ ehci->otg_resume = usb_otg_resume;
++ /* this will be called by root hub code */
++ hcd->otg_notify = otg_notify;
++ otg = otg_get_transceiver();
++ if (otg == NULL) {
++ printk(KERN_ERR "%s Failed to get otg transceiver\n", __func__);
++ retval = -EINVAL;
++ goto err1;
++ }
++ iotg = container_of(otg, struct langwell_otg, otg);
++ hcd->regs = iotg->regs;
++ hcd->rsrc_start = pci_resource_start(pdev, 0);
++ hcd->rsrc_len = pci_resource_len(pdev, 0);
++
++ if (hcd->regs == NULL) {
++ dev_dbg(&pdev->dev, "error mapping memory\n");
++ retval = -EFAULT;
++ goto err2;
++ }
++ retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
++ if (retval != 0)
++ goto err2;
++ retval = otg_set_host(otg, &hcd->self);
++ if (!otg->default_a)
++ hcd->self.is_b_host = 1;
++ otg_put_transceiver(otg);
++ return retval;
++
++err2:
++ usb_put_hcd(hcd);
++err1:
++ dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), retval);
++ return retval;
++}
++
++void ehci_langwell_remove(struct pci_dev *dev)
++{
++ struct usb_hcd *hcd = pci_get_drvdata(dev);
++
++ if (!hcd)
++ return;
++ usb_remove_hcd(hcd);
++ usb_put_hcd(hcd);
++}
++
++/* Langwell OTG EHCI driver */
++static struct pci_driver ehci_otg_driver = {
++ .name = "ehci-langwell",
++ .id_table = pci_ids,
++
++ .probe = ehci_langwell_probe,
++ .remove = ehci_langwell_remove,
++
++#ifdef CONFIG_PM_SLEEP
++ .driver = {
++ .pm = &usb_hcd_pci_pm_ops
++ },
++#endif
++ .shutdown = usb_hcd_pci_shutdown,
++};
+Index: linux-2.6.33/drivers/usb/host/ehci-pci.c
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/host/ehci-pci.c
++++ linux-2.6.33/drivers/usb/host/ehci-pci.c
+@@ -41,6 +41,39 @@ static int ehci_pci_reinit(struct ehci_h
+ return 0;
+ }
+
++/* enable SRAM if sram detected */
++static void sram_init(struct usb_hcd *hcd)
++{
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
++ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
++
++ if (!hcd->has_sram)
++ return;
++ ehci->sram_addr = pci_resource_start(pdev, 1);
++ ehci->sram_size = pci_resource_len(pdev, 1);
++ ehci_info(ehci, "Found HCD SRAM at %x size:%x\n",
++ ehci->sram_addr, ehci->sram_size);
++ if (pci_request_region(pdev, 1, kobject_name(&pdev->dev.kobj))) {
++ ehci_warn(ehci, "SRAM request failed\n");
++ hcd->has_sram = 0;
++ } else if (!dma_declare_coherent_memory(&pdev->dev, ehci->sram_addr,
++ ehci->sram_addr, ehci->sram_size, DMA_MEMORY_MAP)) {
++ ehci_warn(ehci, "SRAM DMA declare failed\n");
++ pci_release_region(pdev, 1);
++ hcd->has_sram = 0;
++ }
++}
++
++static void sram_deinit(struct usb_hcd *hcd)
++{
++ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
++
++ if (!hcd->has_sram)
++ return;
++ dma_release_declared_memory(&pdev->dev);
++ pci_release_region(pdev, 1);
++}
++
+ /* called during probe() after chip reset completes */
+ static int ehci_pci_setup(struct usb_hcd *hcd)
+ {
+@@ -50,6 +83,7 @@ static int ehci_pci_setup(struct usb_hcd
+ u8 rev;
+ u32 temp;
+ int retval;
++ int force_otg_hc_mode = 0;
+
+ switch (pdev->vendor) {
+ case PCI_VENDOR_ID_TOSHIBA_2:
+@@ -63,6 +97,26 @@ static int ehci_pci_setup(struct usb_hcd
+ #endif
+ }
+ break;
++ case PCI_VENDOR_ID_INTEL:
++ if (pdev->device == 0x0811) {
++ ehci_info(ehci, "Detected Langwell OTG HC\n");
++ hcd->has_tt = 1;
++ ehci->has_hostpc = 1;
++#ifdef CONFIG_USB_OTG
++ ehci->has_otg = 1;
++#endif
++ force_otg_hc_mode = 1;
++ hcd->has_sram = 1;
++ hcd->sram_no_payload = 1;
++ sram_init(hcd);
++ } else if (pdev->device == 0x0806) {
++ ehci_info(ehci, "Detected Langwell MPH\n");
++ hcd->has_tt = 1;
++ ehci->has_hostpc = 1;
++ hcd->has_sram = 1;
++ hcd->sram_no_payload = 1;
++ sram_init(hcd);
++ }
+ }
+
+ ehci->caps = hcd->regs;
+@@ -98,6 +152,8 @@ static int ehci_pci_setup(struct usb_hcd
+
+ /* cache this readonly data; minimize chip reads */
+ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
++ if (force_otg_hc_mode)
++ ehci_reset(ehci);
+
+ retval = ehci_halt(ehci);
+ if (retval)
+Index: linux-2.6.33/drivers/usb/host/ehci.h
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/host/ehci.h
++++ linux-2.6.33/drivers/usb/host/ehci.h
+@@ -139,8 +139,15 @@ struct ehci_hcd { /* one per controlle
+ #define OHCI_HCCTRL_LEN 0x4
+ __hc32 *ohci_hcctrl_reg;
+ unsigned has_hostpc:1;
+-
++#ifdef CONFIG_USB_OTG
++ unsigned has_otg:1; /* if it is otg host*/
++ /* otg host has additional bus_suspend and bus_resume */
++ int (*otg_suspend)(struct usb_hcd *hcd);
++ int (*otg_resume)(struct usb_hcd *hcd);
++#endif
+ u8 sbrn; /* packed release number */
++ unsigned int sram_addr;
++ unsigned int sram_size;
+
+ /* irq statistics */
+ #ifdef EHCI_STATS
+@@ -156,6 +163,7 @@ struct ehci_hcd { /* one per controlle
+ struct dentry *debug_async;
+ struct dentry *debug_periodic;
+ struct dentry *debug_registers;
++ struct dentry *debug_lpm;
+ #endif
+ };
+
+@@ -719,5 +727,10 @@ static inline u32 hc32_to_cpup (const st
+ #endif /* DEBUG */
+
+ /*-------------------------------------------------------------------------*/
+-
++#ifdef CONFIG_PCI
++static void sram_deinit(struct usb_hcd *hcd);
++#else
++static void sram_deinit(struct usb_hcd *hcd) { return; };
++#endif
++static unsigned ehci_lpm_check(struct ehci_hcd *ehci, int port);
+ #endif /* __LINUX_EHCI_HCD_H */
+Index: linux-2.6.33/include/linux/usb.h
+===================================================================
+--- linux-2.6.33.orig/include/linux/usb.h
++++ linux-2.6.33/include/linux/usb.h
+@@ -1582,6 +1582,7 @@ usb_maxpacket(struct usb_device *udev, i
+ #define USB_DEVICE_REMOVE 0x0002
+ #define USB_BUS_ADD 0x0003
+ #define USB_BUS_REMOVE 0x0004
++
+ extern void usb_register_notify(struct notifier_block *nb);
+ extern void usb_unregister_notify(struct notifier_block *nb);
+
+Index: linux-2.6.33/drivers/usb/core/buffer.c
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/core/buffer.c
++++ linux-2.6.33/drivers/usb/core/buffer.c
+@@ -115,6 +115,11 @@ void *hcd_buffer_alloc(
+ return kmalloc(size, mem_flags);
+ }
+
++ /* we won't use internal SRAM as data payload, we can't get
++ any benefits from it */
++ if (hcd->has_sram && hcd->sram_no_payload)
++ return dma_alloc_coherent(NULL, size, dma, mem_flags);
++
+ for (i = 0; i < HCD_BUFFER_POOLS; i++) {
+ if (size <= pool_max [i])
+ return dma_pool_alloc(hcd->pool [i], mem_flags, dma);
+@@ -141,6 +146,11 @@ void hcd_buffer_free(
+ return;
+ }
+
++ if (hcd->has_sram && hcd->sram_no_payload) {
++ dma_free_coherent(NULL, size, addr, dma);
++ return;
++ }
++
+ for (i = 0; i < HCD_BUFFER_POOLS; i++) {
+ if (size <= pool_max [i]) {
+ dma_pool_free(hcd->pool [i], addr, dma);
+Index: linux-2.6.33/drivers/usb/host/ehci-dbg.c
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/host/ehci-dbg.c
++++ linux-2.6.33/drivers/usb/host/ehci-dbg.c
+@@ -98,13 +98,18 @@ static void dbg_hcc_params (struct ehci_
+ HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
+ } else {
+ ehci_dbg (ehci,
+- "%s hcc_params %04x thresh %d uframes %s%s%s\n",
++ "%s hcc_params %04x thresh %d uframes %s%s%s%s%s%s%s\n",
+ label,
+ params,
+ HCC_ISOC_THRES(params),
+ HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
+ HCC_CANPARK(params) ? " park" : "",
+- HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
++ HCC_64BIT_ADDR(params) ? " 64 bit addr" : "",
++ HCC_LPM(params) ? " LPM" : "",
++ HCC_PER_PORT_CHANGE_EVENT(params) ? " ppce" : "",
++ HCC_HW_PREFETCH(params) ? " hw prefetch" : "",
++ HCC_32FRAME_PERIODIC_LIST(params) ?
++ " 32 peridic list" : "");
+ }
+ }
+ #else
+@@ -191,8 +196,9 @@ static int __maybe_unused
+ dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
+ {
+ return scnprintf (buf, len,
+- "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
++ "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s%s",
+ label, label [0] ? " " : "", status,
++ (status & STS_PPCE_MASK) ? " PPCE" : "",
+ (status & STS_ASS) ? " Async" : "",
+ (status & STS_PSS) ? " Periodic" : "",
+ (status & STS_RECL) ? " Recl" : "",
+@@ -210,8 +216,9 @@ static int __maybe_unused
+ dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
+ {
+ return scnprintf (buf, len,
+- "%s%sintrenable %02x%s%s%s%s%s%s",
++ "%s%sintrenable %02x%s%s%s%s%s%s%s",
+ label, label [0] ? " " : "", enable,
++ (enable & STS_PPCE_MASK) ? " PPCE" : "",
+ (enable & STS_IAA) ? " IAA" : "",
+ (enable & STS_FATAL) ? " FATAL" : "",
+ (enable & STS_FLR) ? " FLR" : "",
+@@ -228,9 +235,14 @@ static int
+ dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
+ {
+ return scnprintf (buf, len,
+- "%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s",
++ "%s%scmd %07x %s%s%s%s%s%s=%d ithresh=%d%s%s%s%s prd=%s%s %s",
+ label, label [0] ? " " : "", command,
+- (command & CMD_PARK) ? "park" : "(park)",
++ (command & CMD_HIRD) ? " HIRD" : "",
++ (command & CMD_PPCEE) ? " PPCEE" : "",
++ (command & CMD_FSP) ? " FSP" : "",
++ (command & CMD_ASPE) ? " ASPE" : "",
++ (command & CMD_PSPE) ? " PSPE" : "",
++ (command & CMD_PARK) ? " park" : "(park)",
+ CMD_PARK_CNT (command),
+ (command >> 16) & 0x3f,
+ (command & CMD_LRESET) ? " LReset" : "",
+@@ -257,11 +269,21 @@ dbg_port_buf (char *buf, unsigned len, c
+ }
+
+ return scnprintf (buf, len,
+- "%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s",
++ "%s%sp:%d sts %06x %d %s%s%s%s%s%s sig=%s%s%s%s%s%s%s%s%s%s%s",
+ label, label [0] ? " " : "", port, status,
++ status>>25,/*device address */
++ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ACK ?
++ " ACK" : "",
++ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_NYET ?
++ " NYET" : "",
++ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_STALL ?
++ " STALL" : "",
++ (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ERR ?
++ " ERR" : "",
+ (status & PORT_POWER) ? " POWER" : "",
+ (status & PORT_OWNER) ? " OWNER" : "",
+ sig,
++ (status & PORT_LPM) ? " LPM" : "",
+ (status & PORT_RESET) ? " RESET" : "",
+ (status & PORT_SUSPEND) ? " SUSPEND" : "",
+ (status & PORT_RESUME) ? " RESUME" : "",
+@@ -330,6 +352,13 @@ static int debug_async_open(struct inode
+ static int debug_periodic_open(struct inode *, struct file *);
+ static int debug_registers_open(struct inode *, struct file *);
+ static int debug_async_open(struct inode *, struct file *);
++static int debug_lpm_open(struct inode *, struct file *);
++static ssize_t debug_lpm_read(struct file *file, char __user *user_buf,
++ size_t count, loff_t *ppos);
++static ssize_t debug_lpm_write(struct file *file, const char __user *buffer,
++ size_t count, loff_t *ppos);
++static int debug_lpm_close(struct inode *inode, struct file *file);
++
+ static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
+ static int debug_close(struct inode *, struct file *);
+
+@@ -351,6 +380,13 @@ static const struct file_operations debu
+ .read = debug_output,
+ .release = debug_close,
+ };
++static const struct file_operations debug_lpm_fops = {
++ .owner = THIS_MODULE,
++ .open = debug_lpm_open,
++ .read = debug_lpm_read,
++ .write = debug_lpm_write,
++ .release = debug_lpm_close,
++};
+
+ static struct dentry *ehci_debug_root;
+
+@@ -917,6 +953,94 @@ static int debug_registers_open(struct i
+ return file->private_data ? 0 : -ENOMEM;
+ }
+
++static int debug_lpm_open(struct inode *inode, struct file *file)
++{
++ file->private_data = inode->i_private;
++ return 0;
++}
++static int debug_lpm_close(struct inode *inode, struct file *file)
++{
++ return 0;
++}
++static ssize_t debug_lpm_read(struct file *file, char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ /* TODO: show lpm stats */
++ return 0;
++}
++
++
++static
++ssize_t debug_lpm_write(struct file *file, const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct usb_hcd *hcd;
++ struct ehci_hcd *ehci;
++ char buf[50];
++ size_t len;
++ u32 temp;
++ unsigned long port;
++ u32 __iomem *portsc ;
++ u32 params;
++
++ hcd = bus_to_hcd(file->private_data);
++ ehci = hcd_to_ehci(hcd);
++
++ len = min(count, sizeof(buf) - 1);
++ if (copy_from_user(buf, user_buf, len))
++ return -EFAULT;
++ buf[len] = '\0';
++ if (len > 0 && buf[len - 1] == '\n')
++ buf[len - 1] = '\0';
++
++ if (strncmp(buf, "enable", 5) == 0) {
++ if (strict_strtoul(buf + 7, 10, &port))
++ return -EINVAL;
++ params = ehci_readl(ehci, &ehci->caps->hcs_params);
++ if (port > HCS_N_PORTS(params)) {
++ ehci_dbg(ehci, "ERR: LPM on bad port %lu\n", port);
++ return -ENODEV;
++ }
++ portsc = &ehci->regs->port_status[port-1];
++ temp = ehci_readl(ehci, portsc);
++ if (!(temp & PORT_DEV_ADDR)) {
++ ehci_dbg(ehci, "LPM: no device attached\n");
++ return -ENODEV;
++ }
++ temp |= PORT_LPM;
++ ehci_writel(ehci, temp, portsc);
++ printk(KERN_INFO "force enable LPM for port %lu\n", port);
++ } else if (strncmp(buf, "hird=", 5) == 0) {
++ unsigned long hird;
++ if (strict_strtoul(buf + 5, 16, &hird))
++ return -EINVAL;
++ printk(KERN_INFO " setting hird %s %lu \n", buf + 6, hird);
++ temp = ehci_readl(ehci, &ehci->regs->command);
++ temp &= ~CMD_HIRD;
++ temp |= hird << 24;
++ ehci_writel(ehci, temp, &ehci->regs->command);
++ } else if (strncmp(buf, "disable", 7) == 0) {
++ if (strict_strtoul(buf + 8, 10, &port))
++ return -EINVAL;
++ params = ehci_readl(ehci, &ehci->caps->hcs_params);
++ if (port > HCS_N_PORTS(params)) {
++ ehci_dbg(ehci, "ERR: LPM off bad port %lu\n", port);
++ return -ENODEV;
++ }
++ portsc = &ehci->regs->port_status[port-1];
++ temp = ehci_readl(ehci, portsc);
++ if (!(temp & PORT_DEV_ADDR)) {
++ ehci_dbg(ehci, "ERR: no device attached\n");
++ return -ENODEV;
++ }
++ temp &= ~PORT_LPM;
++ ehci_writel(ehci, temp, portsc);
++ printk(KERN_INFO "disabled LPM for port %lu\n", port);
++ } else
++ return -EOPNOTSUPP;
++ return count;
++}
++
+ static inline void create_debug_files (struct ehci_hcd *ehci)
+ {
+ struct usb_bus *bus = &ehci_to_hcd(ehci)->self;
+@@ -940,6 +1064,10 @@ static inline void create_debug_files (s
+ ehci->debug_registers = debugfs_create_file("registers", S_IRUGO,
+ ehci->debug_dir, bus,
+ &debug_registers_fops);
++
++ ehci->debug_registers = debugfs_create_file("lpm", S_IRUGO|S_IWUGO,
++ ehci->debug_dir, bus,
++ &debug_lpm_fops);
+ if (!ehci->debug_registers)
+ goto registers_error;
+ return;
+Index: linux-2.6.33/drivers/usb/host/ehci-lpm.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/usb/host/ehci-lpm.c
+@@ -0,0 +1,90 @@
++/*
++ *
++ * Author: Jacob Pan <jacob.jun.pan@intel.com>
++ *
++ * Copyright 2009- Intel Corp.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software Foundation,
++ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/* this file is part of ehci-hcd.c */
++static int ehci_lpm_set_da(struct ehci_hcd *ehci, int dev_addr, int port_num)
++{
++ u32 __iomem portsc;
++
++ ehci_dbg(ehci, "set dev address %d for port %d \n", dev_addr, port_num);
++ if (port_num > HCS_N_PORTS(ehci->hcs_params)) {
++ ehci_dbg(ehci, "invalid port number %d \n", port_num);
++ return -ENODEV;
++ }
++ portsc = ehci_readl(ehci, &ehci->regs->port_status[port_num-1]);
++ portsc &= ~PORT_DEV_ADDR;
++ portsc |= dev_addr<<25;
++ ehci_writel(ehci, portsc, &ehci->regs->port_status[port_num-1]);
++ return 0;
++}
++
++/*
++ * this function is called to put a link into L1 state. the steps are:
++ * - verify HC supports LPM
++ * - make sure all pipe idle on the link
++ * - shutdown all qh on the pipe
++ * - send LPM packet
++ * - confirm device ack
++ */
++static unsigned ehci_lpm_check(struct ehci_hcd *ehci, int port)
++{
++ u32 __iomem *portsc ;
++ u32 val32;
++ int retval;
++
++ portsc = &ehci->regs->port_status[port-1];
++ val32 = ehci_readl(ehci, portsc);
++ if (!(val32 & PORT_DEV_ADDR)) {
++ ehci_dbg(ehci, "LPM: no device attached\n");
++ return -ENODEV;
++ }
++ val32 |= PORT_LPM;
++ ehci_writel(ehci, val32, portsc);
++ mdelay(5);
++ val32 |= PORT_SUSPEND;
++ ehci_dbg(ehci, "Sending LPM 0x%08x to port %d\n", val32, port);
++ ehci_writel(ehci, val32, portsc);
++ /* wait for ACK */
++ mdelay(10);
++ retval = handshake(ehci, &ehci->regs->port_status[port-1], PORT_SSTS,
++ PORTSC_SUSPEND_STS_ACK, 125);
++ dbg_port(ehci, "LPM", port, val32);
++ if (retval != -ETIMEDOUT) {
++ ehci_dbg(ehci, "LPM: device ACK for LPM\n");
++ val32 |= PORT_LPM;
++ /*
++ * now device should be in L1 sleep, let's wake up the device
++ * so that we can complete enumeration.
++ */
++ ehci_writel(ehci, val32, portsc);
++ mdelay(10);
++ val32 |= PORT_RESUME;
++ ehci_writel(ehci, val32, portsc);
++ } else {
++ ehci_dbg(ehci, "LPM: device does not ACK, disable LPM %d\n",
++ retval);
++ val32 &= ~PORT_LPM;
++ retval = -ETIMEDOUT;
++ ehci_writel(ehci, val32, portsc);
++ }
++
++ return retval;
++}
+Index: linux-2.6.33/drivers/usb/host/ehci-q.c
+===================================================================
+--- linux-2.6.33.orig/drivers/usb/host/ehci-q.c
++++ linux-2.6.33/drivers/usb/host/ehci-q.c
+@@ -643,6 +643,16 @@ qh_urb_transaction (
+ sizeof (struct usb_ctrlrequest),
+ token | (2 /* "setup" */ << 8), 8);
+
++ if (((struct usb_ctrlrequest *)urb->setup_packet)->bRequest
++ == USB_REQ_SET_ADDRESS) {
++ /* for LPM capable HC, set up device address*/
++ int dev_address = ((struct usb_ctrlrequest *)
++ (urb->setup_packet))->wValue;
++ if (ehci_to_hcd(ehci)->lpm_cap)
++ ehci_lpm_set_da(ehci, dev_address,
++ urb->dev->portnum);
++ }
++
+ /* ... and always at least one more pid */
+ token ^= QTD_TOGGLE;
+ qtd_prev = qtd;
+Index: linux-2.6.33/include/linux/usb/ehci_def.h
+===================================================================
+--- linux-2.6.33.orig/include/linux/usb/ehci_def.h
++++ linux-2.6.33/include/linux/usb/ehci_def.h
+@@ -39,6 +39,12 @@ struct ehci_caps {
+ #define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
+
+ u32 hcc_params; /* HCCPARAMS - offset 0x8 */
++/* for 1.1 addendum */
++#define HCC_32FRAME_PERIODIC_LIST(p) ((p)&(1 << 19))
++#define HCC_PER_PORT_CHANGE_EVENT(p) ((p)&(1 << 18))
++#define HCC_LPM(p) ((p)&(1 << 17))
++#define HCC_HW_PREFETCH(p) ((p)&(1 << 16))
++
+ #define HCC_EXT_CAPS(p) (((p)>>8)&0xff) /* for pci extended caps */
+ #define HCC_ISOC_CACHE(p) ((p)&(1 << 7)) /* true: can cache isoc frame */
+ #define HCC_ISOC_THRES(p) (((p)>>4)&0x7) /* bits 6:4, uframes cached */
+@@ -54,6 +60,13 @@ struct ehci_regs {
+
+ /* USBCMD: offset 0x00 */
+ u32 command;
++
++/* EHCI 1.1 addendum */
++#define CMD_HIRD (0xf<<24) /* host initiated resume duration */
++#define CMD_PPCEE (1<<15) /* per port change event enable */
++#define CMD_FSP (1<<14) /* fully synchronized prefetch */
++#define CMD_ASPE (1<<13) /* async schedule prefetch enable */
++#define CMD_PSPE (1<<12) /* periodic schedule prefetch enable */
+ /* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
+ #define CMD_PARK (1<<11) /* enable "park" on async qh */
+ #define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */
+@@ -67,6 +80,7 @@ struct ehci_regs {
+
+ /* USBSTS: offset 0x04 */
+ u32 status;
++#define STS_PPCE_MASK (0xff<<16) /* Per-Port change event 1-16 */
+ #define STS_ASS (1<<15) /* Async Schedule Status */
+ #define STS_PSS (1<<14) /* Periodic Schedule Status */
+ #define STS_RECL (1<<13) /* Reclamation */
+@@ -100,6 +114,14 @@ struct ehci_regs {
+
+ /* PORTSC: offset 0x44 */
+ u32 port_status [0]; /* up to N_PORTS */
++/* EHCI 1.1 addendum */
++#define PORTSC_SUSPEND_STS_ACK 0
++#define PORTSC_SUSPEND_STS_NYET 1
++#define PORTSC_SUSPEND_STS_STALL 2
++#define PORTSC_SUSPEND_STS_ERR 3
++
++#define PORT_DEV_ADDR (0x7f<<25) /* device address */
++#define PORT_SSTS (0x3<<23) /* suspend status */
+ /* 31:23 reserved */
+ #define PORT_WKOC_E (1<<22) /* wake on overcurrent (enable) */
+ #define PORT_WKDISC_E (1<<21) /* wake on disconnect (enable) */
+@@ -115,6 +137,7 @@ struct ehci_regs {
+ #define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
+ /* 11:10 for detecting lowspeed devices (reset vs release ownership) */
+ /* 9 reserved */
++#define PORT_LPM (1<<9) /* LPM transaction */
+ #define PORT_RESET (1<<8) /* reset port */
+ #define PORT_SUSPEND (1<<7) /* suspend port */
+ #define PORT_RESUME (1<<6) /* resume it */
+Index: linux-2.6.33/arch/x86/include/asm/i8259.h
+===================================================================
+--- linux-2.6.33.orig/arch/x86/include/asm/i8259.h
++++ linux-2.6.33/arch/x86/include/asm/i8259.h
+@@ -26,11 +26,6 @@ extern unsigned int cached_irq_mask;
+
+ extern spinlock_t i8259A_lock;
+
+-extern void init_8259A(int auto_eoi);
+-extern void enable_8259A_irq(unsigned int irq);
+-extern void disable_8259A_irq(unsigned int irq);
+-extern unsigned int startup_8259A_irq(unsigned int irq);
+-
+ /* the PIC may need a careful delay on some platforms, hence specific calls */
+ static inline unsigned char inb_pic(unsigned int port)
+ {
+@@ -57,7 +52,17 @@ static inline void outb_pic(unsigned cha
+
+ extern struct irq_chip i8259A_chip;
+
+-extern void mask_8259A(void);
+-extern void unmask_8259A(void);
++struct legacy_pic {
++ int nr_legacy_irqs;
++ struct irq_chip *chip;
++ void (*mask_all)(void);
++ void (*restore_mask)(void);
++ void (*init)(int auto_eoi);
++ int (*irq_pending)(unsigned int irq);
++ void (*make_irq)(unsigned int irq);
++};
++
++extern struct legacy_pic *legacy_pic;
++extern struct legacy_pic null_legacy_pic;
+
+ #endif /* _ASM_X86_I8259_H */
+Index: linux-2.6.33/arch/x86/kernel/i8259.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/i8259.c
++++ linux-2.6.33/arch/x86/kernel/i8259.c
+@@ -34,6 +34,12 @@
+ static int i8259A_auto_eoi;
+ DEFINE_SPINLOCK(i8259A_lock);
+ static void mask_and_ack_8259A(unsigned int);
++static void mask_8259A(void);
++static void unmask_8259A(void);
++static void disable_8259A_irq(unsigned int irq);
++static void enable_8259A_irq(unsigned int irq);
++static void init_8259A(int auto_eoi);
++static int i8259A_irq_pending(unsigned int irq);
+
+ struct irq_chip i8259A_chip = {
+ .name = "XT-PIC",
+@@ -63,7 +69,7 @@ unsigned int cached_irq_mask = 0xffff;
+ */
+ unsigned long io_apic_irqs;
+
+-void disable_8259A_irq(unsigned int irq)
++static void disable_8259A_irq(unsigned int irq)
+ {
+ unsigned int mask = 1 << irq;
+ unsigned long flags;
+@@ -77,7 +83,7 @@ void disable_8259A_irq(unsigned int irq)
+ spin_unlock_irqrestore(&i8259A_lock, flags);
+ }
+
+-void enable_8259A_irq(unsigned int irq)
++static void enable_8259A_irq(unsigned int irq)
+ {
+ unsigned int mask = ~(1 << irq);
+ unsigned long flags;
+@@ -91,7 +97,7 @@ void enable_8259A_irq(unsigned int irq)
+ spin_unlock_irqrestore(&i8259A_lock, flags);
+ }
+
+-int i8259A_irq_pending(unsigned int irq)
++static int i8259A_irq_pending(unsigned int irq)
+ {
+ unsigned int mask = 1<<irq;
+ unsigned long flags;
+@@ -107,7 +113,7 @@ int i8259A_irq_pending(unsigned int irq)
+ return ret;
+ }
+
+-void make_8259A_irq(unsigned int irq)
++static void make_8259A_irq(unsigned int irq)
+ {
+ disable_irq_nosync(irq);
+ io_apic_irqs &= ~(1<<irq);
+@@ -281,7 +287,7 @@ static int __init i8259A_init_sysfs(void
+
+ device_initcall(i8259A_init_sysfs);
+
+-void mask_8259A(void)
++static void mask_8259A(void)
+ {
+ unsigned long flags;
+
+@@ -293,7 +299,7 @@ void mask_8259A(void)
+ spin_unlock_irqrestore(&i8259A_lock, flags);
+ }
+
+-void unmask_8259A(void)
++static void unmask_8259A(void)
+ {
+ unsigned long flags;
+
+@@ -305,7 +311,7 @@ void unmask_8259A(void)
+ spin_unlock_irqrestore(&i8259A_lock, flags);
+ }
+
+-void init_8259A(int auto_eoi)
++static void init_8259A(int auto_eoi)
+ {
+ unsigned long flags;
+
+@@ -358,3 +364,47 @@ void init_8259A(int auto_eoi)
+
+ spin_unlock_irqrestore(&i8259A_lock, flags);
+ }
++
++/*
++ * make i8259 a driver so that we can select pic functions at run time. the goal
++ * is to make x86 binary compatible among pc compatible and non-pc compatible
++ * platforms, such as x86 MID.
++ */
++
++static void __init legacy_pic_noop(void) { };
++static void __init legacy_pic_uint_noop(unsigned int unused) { };
++static void __init legacy_pic_int_noop(int unused) { };
++
++static struct irq_chip dummy_pic_chip = {
++ .name = "dummy pic",
++ .mask = legacy_pic_uint_noop,
++ .unmask = legacy_pic_uint_noop,
++ .disable = legacy_pic_uint_noop,
++ .mask_ack = legacy_pic_uint_noop,
++};
++static int legacy_pic_irq_pending_noop(unsigned int irq)
++{
++ return 0;
++}
++
++struct legacy_pic null_legacy_pic = {
++ .nr_legacy_irqs = 0,
++ .chip = &dummy_pic_chip,
++ .mask_all = legacy_pic_noop,
++ .restore_mask = legacy_pic_noop,
++ .init = legacy_pic_int_noop,
++ .irq_pending = legacy_pic_irq_pending_noop,
++ .make_irq = legacy_pic_uint_noop,
++};
++
++struct legacy_pic default_legacy_pic = {
++ .nr_legacy_irqs = NR_IRQS_LEGACY,
++ .chip = &i8259A_chip,
++ .mask_all = mask_8259A,
++ .restore_mask = unmask_8259A,
++ .init = init_8259A,
++ .irq_pending = i8259A_irq_pending,
++ .make_irq = make_8259A_irq,
++};
++
++struct legacy_pic *legacy_pic = &default_legacy_pic;
+Index: linux-2.6.33/arch/x86/include/asm/hw_irq.h
+===================================================================
+--- linux-2.6.33.orig/arch/x86/include/asm/hw_irq.h
++++ linux-2.6.33/arch/x86/include/asm/hw_irq.h
+@@ -53,13 +53,6 @@ extern void threshold_interrupt(void);
+ extern void call_function_interrupt(void);
+ extern void call_function_single_interrupt(void);
+
+-/* PIC specific functions */
+-extern void disable_8259A_irq(unsigned int irq);
+-extern void enable_8259A_irq(unsigned int irq);
+-extern int i8259A_irq_pending(unsigned int irq);
+-extern void make_8259A_irq(unsigned int irq);
+-extern void init_8259A(int aeoi);
+-
+ /* IOAPIC */
+ #define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
+ extern unsigned long io_apic_irqs;
+Index: linux-2.6.33/arch/x86/kernel/apic/nmi.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/apic/nmi.c
++++ linux-2.6.33/arch/x86/kernel/apic/nmi.c
+@@ -177,7 +177,7 @@ int __init check_nmi_watchdog(void)
+ error:
+ if (nmi_watchdog == NMI_IO_APIC) {
+ if (!timer_through_8259)
+- disable_8259A_irq(0);
++ legacy_pic->chip->mask(0);
+ on_each_cpu(__acpi_nmi_disable, NULL, 1);
+ }
+
+Index: linux-2.6.33/arch/x86/kernel/irqinit.c
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/irqinit.c
++++ linux-2.6.33/arch/x86/kernel/irqinit.c
+@@ -123,7 +123,7 @@ void __init init_ISA_irqs(void)
+ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
+ init_bsp_APIC();
+ #endif
+- init_8259A(0);
++ legacy_pic->init(0);
+
+ /*
+ * 16 old-style INTA-cycle interrupts:
+Index: linux-2.6.33/drivers/misc/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/misc/Makefile
++++ linux-2.6.33/drivers/misc/Makefile
+@@ -20,6 +20,7 @@ obj-$(CONFIG_SGI_XP) += sgi-xp/
+ obj-$(CONFIG_SGI_GRU) += sgi-gru/
+ obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o
+ obj-$(CONFIG_HP_ILO) += hpilo.o
++obj-$(CONFIG_MRST) += intel_mrst.o
+ obj-$(CONFIG_ISL29003) += isl29003.o
+ obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
+ obj-$(CONFIG_DS1682) += ds1682.o
+Index: linux-2.6.33/drivers/misc/intel_mrst.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/misc/intel_mrst.c
+@@ -0,0 +1,216 @@
++/*
++ * intel_mrst.c - Intel Moorestown Driver for misc functionality
++ *
++ * Copyright (C) 2009 Intel Corp
++ * Author: James Ausmus <james.ausmus@intel.com>
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This driver sets up initial PMIC register values for various voltage rails
++ * and GPIOs
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++
++#include <linux/delay.h>
++#include <asm/ipc_defs.h>
++
++
++MODULE_AUTHOR("James Ausmus");
++MODULE_AUTHOR("German Monroy");
++MODULE_DESCRIPTION("Intel MRST platform specific driver");
++MODULE_LICENSE("GPL");
++
++static int intel_mrst_pmic_read(unsigned int reg, unsigned int *value)
++{
++ struct ipc_pmic_reg_data pmic_data = { 0 };
++ int ret = 0;
++
++ pmic_data.pmic_reg_data[0].register_address = reg;
++ pmic_data.num_entries = 1;
++ ret = ipc_pmic_register_read(&pmic_data);
++ if (ret)
++ printk(KERN_ERR "intel_mrst_pmic_read: unable to read "
++ "PMIC register 0x%03x\n", reg);
++ else
++ *value = pmic_data.pmic_reg_data[0].value;
++
++ return ret;
++}
++
++static int intel_mrst_pmic_write(unsigned int reg, unsigned int value)
++{
++ struct ipc_pmic_reg_data pmic_data = { 0 };
++ int ret = 0;
++
++ pmic_data.pmic_reg_data[0].register_address = reg;
++ pmic_data.pmic_reg_data[0].value = value;
++ pmic_data.num_entries = 1;
++ ret = ipc_pmic_register_write(&pmic_data, 0);
++ if (ret) {
++ printk(KERN_ERR "intel_mrst_pmic_write: register 0x%03x "
++ "failed ipc_pmic_register_write of value %02x, "
++ "retval %d\n", reg, value, ret);
++ } else {
++ printk(KERN_INFO "intel_mrst_pmic_write: register "
++ "0x%03x, now=0x%02x\n",
++ reg, value);
++ }
++
++ return ret;
++}
++
++static int intel_mrst_sdio_EVP_power_up(void)
++{
++ intel_mrst_pmic_write(0xF4, 0x25);
++ intel_mrst_pmic_write(0x21, 0x00);
++ intel_mrst_pmic_write(0x4a, 0x7f);
++ intel_mrst_pmic_write(0x4b, 0x7f);
++ intel_mrst_pmic_write(0x4c, 0x3f);
++
++ intel_mrst_pmic_write(0x3b, 0x3f);
++ intel_mrst_pmic_write(0x3c, 0x3f);
++ mdelay(1);
++ intel_mrst_pmic_write(0xF4, 0x05);
++ mdelay(12);
++ intel_mrst_pmic_write(0xF4, 0x21);
++
++ return 0;
++
++}
++
++static int intel_mrst_sdio_EVP_power_down(void)
++{
++ intel_mrst_pmic_write(0xF4, 0x25);
++ intel_mrst_pmic_write(0x21, 0x00);
++
++ intel_mrst_pmic_write(0x4b, 0x00);
++ intel_mrst_pmic_write(0x4c, 0x00);
++
++ intel_mrst_pmic_write(0x3b, 0x00);
++ intel_mrst_pmic_write(0x3c, 0x00);
++ intel_mrst_pmic_write(0x4a, 0x00);
++
++ return 0;
++}
++
++static int intel_mrst_sdio_8688_power_up(void)
++{
++ intel_mrst_pmic_write(0x37, 0x3f); /* Set VDDQ for Marvell 8688 */
++ intel_mrst_pmic_write(0x4a, 0x3f); /* Set GYMXIOCNT for Marvell 8688 */
++ intel_mrst_pmic_write(0x4e, 0x3f); /* Set GYMX33CNT for Marvell 8688 */
++
++ intel_mrst_pmic_write(0x3a, 0x27); /* Enables the V3p3_FLASH line,
++ which routes to VIO_X1 and VIO_X2
++ on the MRVL8688 */
++
++ intel_mrst_pmic_write(0x4b, 0x27); /* Enable V1p2_VWYMXA for MRVL8688 */
++ intel_mrst_pmic_write(0x4c, 0x27); /* Enable V1p8_VWYMXARF for
++ MRVL8688 */
++
++ return 0;
++}
++
++static int intel_mrst_bringup_8688_sdio2(void)
++{
++ unsigned int temp = 0;
++
++ /* Register 0xf4 has 2 GPIO lines connected to the MRVL 8688:
++ * bit 4: PDn
++ * bit 3: WiFi RESETn */
++
++ intel_mrst_pmic_read(0xf4, &temp);
++ temp = temp|0x8;
++ intel_mrst_pmic_write(0xf4, temp);
++
++ temp = temp|0x10;
++ intel_mrst_pmic_write(0xf4, temp);
++
++ return 0;
++}
++
++static int intel_mrst_bringup_EVP_sdio2_Option_spi(void)
++{
++ unsigned int temp = 0;
++
++ /* Register 0xf4 has 3 GPIO lines connected to the EVP:
++ * bit 0: RF_KILL_N
++ * bit 2: H2D_INT
++ * bit 5: SYS_RST_N
++ */
++
++ /* Register 0xf4 has 2 GPIO lines connected to the Option:
++ * bit 0: GPO_WWAN_DISABLE
++ * bit 5: GPO_WWAN_RESET
++ */
++
++ intel_mrst_pmic_read(0xf4, &temp);
++ temp = temp|0x21;
++ temp = temp & 0xFB;
++ intel_mrst_pmic_write(0xf4, temp); /* Set RF_KILL_N & SYS_RST_N to
++ High. H2D_INT to LOW */
++
++ intel_mrst_pmic_read(0xf4, &temp); /* Set SYS_RST_N to Low */
++ temp = temp & 0xDF;
++ mdelay(1);
++ intel_mrst_pmic_write(0xf4, temp);
++
++ mdelay(12); /* Try to generate a 12mS delay here if possible */
++ intel_mrst_pmic_read(0xf4, &temp); /* Set SYS_RST_N to High */
++ temp = temp | 0x20;
++ intel_mrst_pmic_write(0xf4, temp);
++
++ return 0;
++}
++
++
++static int __init intel_mrst_module_init(void)
++{
++ int ret = 0;
++
++/* We only need the following PMIC register initializations if
++ * we are using the Marvell 8688 WLAN card on the SDIO2 port */
++
++#ifdef CONFIG_8688_RC
++
++ printk(KERN_INFO "intel_mrst_module_init: bringing up power for "
++ "8688 WLAN on SDIO2...\n");
++ ret = intel_mrst_bringup_8688_sdio2();
++
++#endif /* CONFIG_8688_RC */
++
++/* We only need the following PMIC register initializations if
++ * we are using the EVP on SDIO2 port or Option on SPI port */
++
++#if defined(CONFIG_EVP_SDIO2) || defined(CONFIG_SPI_MRST_GTM501)
++
++ printk(KERN_INFO "intel_mrst_module_init: bringing up power for "
++ "EvP on SDIO2 and Option on SPI...\n");
++ ret = intel_mrst_bringup_EVP_sdio2_Option_spi();
++
++#endif /* CONFIG_EVP_SDIO2 || CONFIG_SPI_MRST_GTM501 */
++ return ret;
++}
++
++static void __exit intel_mrst_module_exit(void)
++{
++}
++
++module_init(intel_mrst_module_init);
++module_exit(intel_mrst_module_exit);
+Index: linux-2.6.33/drivers/i2c/busses/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/i2c/busses/Kconfig
++++ linux-2.6.33/drivers/i2c/busses/Kconfig
+@@ -772,4 +772,14 @@ config SCx200_ACB
+ This support is also available as a module. If so, the module
+ will be called scx200_acb.
+
++config I2C_MRST
++ tristate "Intel Moorestown I2C Controller"
++ depends on PCI && GPIOLIB && GPIO_LANGWELL
++ default y
++ help
++ If you say yes to this option, support will be included for the Intel
++ Moorestown chipset I2C controller.
++ This driver can also be built as a module. If so, the module
++ will be called i2c-mrst.
++
+ endmenu
+Index: linux-2.6.33/drivers/i2c/busses/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/i2c/busses/Makefile
++++ linux-2.6.33/drivers/i2c/busses/Makefile
+@@ -72,6 +72,7 @@ obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
+ obj-$(CONFIG_I2C_STUB) += i2c-stub.o
+ obj-$(CONFIG_SCx200_ACB) += scx200_acb.o
+ obj-$(CONFIG_SCx200_I2C) += scx200_i2c.o
++obj-$(CONFIG_I2C_MRST) += i2c-mrst.o
+
+ ifeq ($(CONFIG_I2C_DEBUG_BUS),y)
+ EXTRA_CFLAGS += -DDEBUG
+Index: linux-2.6.33/drivers/i2c/busses/i2c-mrst.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/i2c/busses/i2c-mrst.c
+@@ -0,0 +1,953 @@
++/*
++ * Support for Moorestown Langwell I2C chip
++ *
++ * Copyright (c) 2009 Intel Corporation.
++ * Copyright (c) 2009 Synopsys. Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License, version
++ * 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT ANY
++ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
++ * details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc., 51
++ * Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/err.h>
++#include <linux/slab.h>
++#include <linux/stat.h>
++#include <linux/types.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <linux/gpio.h>
++
++#include <linux/io.h>
++
++#include "i2c-mrst.h"
++
++#define MAX_T_POLL_COUNT 4000 /* FIXME */
++#define DEF_BAR 0
++#define VERSION "Version 0.5"
++
++#define mrst_i2c_read(reg) __raw_readl(reg)
++#define mrst_i2c_write(reg, val) __raw_writel((val), (reg))
++
++static int speed_mode = STANDARD;
++module_param(speed_mode, int, S_IRUGO);
++
++static int mrst_i2c_register_board_info(struct pci_dev *dev, int busnum)
++{
++ struct mrst_i2c_private *mrst = (struct mrst_i2c_private *)
++ pci_get_drvdata(dev);
++ int err;
++ unsigned short addr, irq, host;
++ char *name = NULL;
++ struct i2c_board_info *info = NULL;
++ unsigned int addr_off, irq_off, name_off, data_off, host_off;
++ unsigned int table_len, block_num, block = 0;
++ int i, j;
++ unsigned int start, len;
++ void __iomem *base = NULL, *ptr = NULL;
++
++ /* Determine the address of the I2C device info table area */
++ start = pci_resource_start(dev, 1);
++ len = pci_resource_len(dev, 1);
++ if (!start || len <= 0) {
++ err = -ENODEV;
++ return err;
++ }
++
++ err = pci_request_region(dev, 1, "mrst_i2c");
++ if (err) {
++ dev_err(&dev->dev, "Failed to request I2C region "
++ "0x%1x-0x%Lx\n", start,
++ (unsigned long long)pci_resource_end(dev, 1));
++ return err;
++ }
++
++ ptr = ioremap(start, len);
++ if (!ptr) {
++ dev_err(&dev->dev, "I/O memory remapping failed\n");
++ err = -ENOMEM;
++ goto err0;
++ }
++
++ if (len == 8) {
++ start = ioread32(ptr);
++ len = ioread32(ptr + 4);
++ iounmap(ptr);
++ dev_dbg(&dev->dev, "New FW: start 0x%x 0x%x\n", start, len);
++ base = ioremap(start, len);
++ } else {
++ dev_dbg(&dev->dev, "this is an old FW\n");
++ base = ptr;
++ }
++
++ /* Initialization */
++ name = kzalloc(sizeof(char) * NAME_LENGTH, GFP_KERNEL);
++ if (name == NULL) {
++ err = -ENOMEM;
++ goto err1;
++ }
++
++ info = kzalloc(sizeof(struct i2c_board_info), GFP_KERNEL);
++ if (info == NULL) {
++ dev_err(&dev->dev,
++ "Can't allocate interface for i2c_board_info\n");
++ err = -ENOMEM;
++ goto err2;
++ }
++
++ /* Get I2C info table length */
++ table_len = ioread32(base + I2C_INFO_TABLE_LENGTH);
++
++ /* Calculate the number of I2C device */
++ block_num = (table_len - HEAD_LENGTH)/BLOCK_LENGTH;
++ dev_dbg(&dev->dev, "the number of table is %d\n", block_num);
++ if (!block_num)
++ /* No I2C device info */
++ goto err3;
++
++ /* Initialize mrst_i2c_info array */
++ mrst->mrst_i2c_info = kzalloc(sizeof(struct i2c_board_info) *
++ block_num, GFP_KERNEL);
++ if (mrst->mrst_i2c_info == NULL) {
++ dev_err(&dev->dev,
++ "Can't allocate interface for i2c_board_info\n");
++ err = -ENOMEM;
++ goto err3;
++ }
++
++ mrst->data = kzalloc(sizeof(*mrst->data) * block_num, GFP_KERNEL);
++ if (mrst->data == NULL) {
++ dev_err(&dev->dev,
++ "Can't allocate interface for per device data\n");
++ err = -ENOMEM;
++ goto err4;
++ }
++
++ for (i = 0; i < block_num; i++) {
++ /* I2C device info block offsets */
++ host_off = I2C_INFO_DEV_BLOCK + BLOCK_LENGTH * i;
++ addr_off = I2C_INFO_DEV_BLOCK + BLOCK_LENGTH * i + I2C_DEV_ADDR;
++ irq_off = I2C_INFO_DEV_BLOCK + BLOCK_LENGTH * i + I2C_DEV_IRQ;
++ name_off = I2C_INFO_DEV_BLOCK + BLOCK_LENGTH * i + I2C_DEV_NAME;
++ data_off = I2C_INFO_DEV_BLOCK + BLOCK_LENGTH * i + I2C_DEV_INFO;
++
++ /* Read PCI config table */
++ host = ioread16(base + host_off);
++ if (host != busnum)
++ continue;
++ addr = ioread16(base + addr_off);
++ irq = ioread16(base + irq_off);
++ for (j = 0; j < NAME_LENGTH; j++)
++ name[j] = ioread8(base + name_off + j);
++
++ for (j = 0; j < INFO_LENGTH; j++)
++ mrst->data[i][j] = ioread8(base + data_off + j);
++ dev_dbg(&dev->dev, "after read PCI config table: name = %s,"
++ " address = %x\n", name, addr);
++
++ /* Fill in i2c_board_info struct */
++ memcpy(info->type, name, NAME_LENGTH);
++ info->platform_data = mrst->data[i];
++ info->addr = addr;
++ info->irq = irq;
++
++ /* Add to mrst_i2c_info array */
++ memcpy(mrst->mrst_i2c_info + block, info,
++ sizeof(struct i2c_board_info));
++ block++;
++ }
++
++ /* Register i2c board info */
++ err = i2c_register_board_info(busnum, mrst->mrst_i2c_info, block);
++ goto err3;
++
++/* Clean up */
++err4:
++ kfree(mrst->mrst_i2c_info);
++err3:
++ kfree(info);
++err2:
++ kfree(name);
++err1:
++ iounmap(base);
++err0:
++ pci_release_region(dev, 1);
++ return err;
++}
++/* End update */
++
++/**
++ * mrst_i2c_disable - Disable I2C controller
++ * @adap: struct pointer to i2c_adapter
++ *
++ * Return Value:
++ * 0 success
++ * -EBUSY if device is busy
++ * -ETIMEOUT if i2c cannot be disabled within the given time
++ *
++ * I2C bus state should be checked prior to disabling the hardware. If bus is
++ * not in idle state, an errno is returned. Write "0" to IC_ENABLE to disable
++ * I2C controller.
++ */
++static int mrst_i2c_disable(struct i2c_adapter *adap)
++{
++ struct mrst_i2c_private *i2c =
++ (struct mrst_i2c_private *)i2c_get_adapdata(adap);
++
++ int count = 0;
++
++ /* Check if device is busy */
++ dev_dbg(&adap->dev, "mrst i2c disable\n");
++ while (mrst_i2c_read(i2c->base + IC_STATUS) & 0x1) {
++ dev_dbg(&adap->dev, "i2c is busy, count is %d\n", count);
++ if (count++ > 10000)
++ return -EBUSY;
++ }
++
++ /* Set IC_ENABLE to 0 */
++ mrst_i2c_write(i2c->base + IC_ENABLE, 0);
++
++ /* Disable all interupts */
++ mrst_i2c_write(i2c->base + IC_INTR_MASK, 0x0000);
++
++ /* Clear all interrupts */
++ mrst_i2c_read(i2c->base + IC_CLR_INTR);
++
++ return 0;
++}
++
++/**
++ * mrst_i2c_hwinit - Initiate the I2C hardware registers. This function will
++ * be called in mrst_i2c_probe() before device registration.
++ * @dev: pci device struct pointer
++ *
++ * Return Values:
++ * 0 success
++ * -EBUSY i2c cannot be disabled
++ * -ETIMEDOUT i2c cannot be disabled
++ * -EFAULT If APB data width is not 32-bit wide
++ *
++ * I2C should be disabled prior to other register operation. If failed, an
++ * errno is returned. Mask and Clear all interrpts, this should be done at
++ * first. Set common registers which will not be modified during normal
++ * transfers, including: controll register, FIFO threshold and clock freq.
++ * Check APB data width at last.
++ */
++static int __devinit mrst_i2c_hwinit(struct pci_dev *dev)
++{
++ struct mrst_i2c_private *i2c =
++ (struct mrst_i2c_private *)pci_get_drvdata(dev);
++ int err = 0;
++
++ /* Disable i2c first */
++ err = mrst_i2c_disable(i2c->adap);
++ if (err)
++ return err;
++
++ /* Disable all interupts */
++ mrst_i2c_write(i2c->base + IC_INTR_MASK, 0x0000);
++
++ /* Clear all interrupts */
++ mrst_i2c_read(i2c->base + IC_CLR_INTR);
++
++ /*
++ * Setup clock frequency and speed mode
++ * Enable restart condition,
++ * enable master FSM, disable slave FSM,
++ * use target address when initiating transfer
++ */
++ switch (speed_mode) {
++ case STANDARD:
++ mrst_i2c_write(i2c->base + IC_CON,
++ SLV_DIS | RESTART | STANDARD_MODE | MASTER_EN);
++ mrst_i2c_write(i2c->base + IC_SS_SCL_HCNT, 0x75);
++ mrst_i2c_write(i2c->base + IC_SS_SCL_LCNT, 0x7c);
++ break;
++ case FAST:
++ mrst_i2c_write(i2c->base + IC_CON,
++ SLV_DIS | RESTART | FAST_MODE | MASTER_EN);
++ mrst_i2c_write(i2c->base + IC_SS_SCL_HCNT, 0x15);
++ mrst_i2c_write(i2c->base + IC_SS_SCL_LCNT, 0x21);
++ break;
++ case HIGH:
++ mrst_i2c_write(i2c->base + IC_CON,
++ SLV_DIS | RESTART | HIGH_MODE | MASTER_EN);
++ mrst_i2c_write(i2c->base + IC_SS_SCL_HCNT, 0x7);
++ mrst_i2c_write(i2c->base + IC_SS_SCL_LCNT, 0xE);
++ break;
++ default:
++ ;
++ }
++
++ /* Set tranmit & receive FIFO threshold to zero */
++ mrst_i2c_write(i2c->base + IC_RX_TL, 0x3);
++ mrst_i2c_write(i2c->base + IC_TX_TL, 0x3);
++
++ mrst_i2c_write(i2c->base + IC_ENABLE, 1);
++
++ return err;
++}
++
++/**
++ * mrst_i2c_func - Return the supported three I2C operations.
++ * @adapter: i2c_adapter struct pointer
++ */
++static u32 mrst_i2c_func(struct i2c_adapter *adapter)
++{
++ return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | I2C_FUNC_SMBUS_EMUL;
++}
++
++/**
++ * mrst_i2c_invalid_address - To check if the address in i2c message is
++ * correct.
++ * @p: i2c_msg struct pointer
++ *
++ * Return Values:
++ * 0 if the address is valid
++ * 1 if the address is invalid
++ */
++static inline int mrst_i2c_invalid_address(const struct i2c_msg *p)
++{
++ int ret = ((p->addr > 0x3ff) || (!(p->flags & I2C_M_TEN)
++ && (p->addr > 0x7f)));
++ return ret;
++}
++
++/**
++ * mrst_i2c_address_neq - To check if the addresses for different i2c messages
++ * are equal.
++ * @p1: first i2c_msg
++ * @p2: second i2c_msg
++ *
++ * Return Values:
++ * 0 if addresse are equal
++ * 1 if not equal
++ *
++ * Within a single transfer, I2C client may need to send its address more
++ * than one time. So a check for the address equation is needed.
++ */
++static inline int mrst_i2c_address_neq(const struct i2c_msg *p1,
++ const struct i2c_msg *p2)
++{
++ int ret = ((p1->addr != p2->addr) || ((p1->flags & (I2C_M_TEN))
++ != ((p2->flags) & (I2C_M_TEN))));
++ return ret;
++}
++
++/**
++ * mrst_i2c_abort - To handle transfer abortions and print error messages.
++ * @adap: i2c_adapter struct pointer
++ *
++ * By reading register IC_TX_ABRT_SOURCE, various transfer errors can be
++ * distingushed. At present, no circumstances have been found out that
++ * multiple errors would be occured simutaneously, so we simply use the
++ * register value directly.
++ *
++ * At last the error bits are cleared. (Note clear ABRT_SBYTE_NORSTRT bit need
++ * a few extra steps)
++ */
++static void mrst_i2c_abort(struct i2c_adapter *adap)
++{
++ struct mrst_i2c_private *i2c = (struct mrst_i2c_private *)
++ i2c_get_adapdata(adap);
++
++ /* Read about source register */
++ int abort = mrst_i2c_read(i2c->base + IC_TX_ABRT_SOURCE);
++
++ dev_dbg(&adap->dev, "Abort: ");
++
++ /* Single transfer error check:
++ * According to databook, TX/RX FIFOs would be flushed when
++ * the abort interrupt occured.
++ */
++ switch (abort) {
++ case (ABRT_MASTER_DIS):
++ dev_err(&adap->dev,
++ "initiate Master operation with Master mode"
++ "disabled.\n");
++
++ break;
++ case (ABRT_10B_RD_NORSTRT):
++ dev_err(&adap->dev,
++ "RESTART disabled and master sends READ cmd in 10-BIT"
++ "addressing.\n");
++ break;
++ case (ABRT_SBYTE_NORSTRT):
++ dev_err(&adap->dev,
++ "RESTART disabled and user is trying to send START"
++ "byte.\n");
++ /* Page 141 data book */
++ mrst_i2c_write(i2c->base + IC_TX_ABRT_SOURCE,
++ !(ABRT_SBYTE_NORSTRT));
++ mrst_i2c_write(i2c->base + IC_CON, RESTART);
++ mrst_i2c_write(i2c->base + IC_TAR, !(IC_TAR_SPECIAL));
++ break;
++ case (ABRT_SBYTE_ACKDET):
++ dev_err(&adap->dev,
++ "START byte was acknowledged.\n");
++ break;
++ case (ABRT_TXDATA_NOACK):
++ dev_err(&adap->dev,
++ "No acknowledge received from slave.\n");
++ break;
++ case (ABRT_10ADDR2_NOACK):
++ dev_err(&adap->dev,
++ "The 2nd address byte of 10-bit address not"
++ "acknowledged.\n");
++ break;
++ case (ABRT_10ADDR1_NOACK):
++ dev_dbg(&adap->dev,
++ "The 1st address byte of 10-bit address not"
++ "acknowledged.\n");
++ break;
++ case (ABRT_7B_ADDR_NOACK):
++ dev_err(&adap->dev,
++ "7-bit address not acknowledged.\n");
++ break;
++ default:
++ ;;
++ }
++
++ /* Clear TX_ABRT bit */
++ mrst_i2c_read(i2c->base + IC_CLR_TX_ABRT);
++}
++
++/**
++ * xfer_read - Internal function to implement master read transfer.
++ * @adap: i2c_adapter struct pointer
++ * @buf: buffer in i2c_msg
++ * @length: number of bytes to be read
++ *
++ * Return Values:
++ * 0 if the read transfer succeeds
++ * -ETIMEDOUT if cannot read the "raw" interrupt register
++ * -EINVAL if transfer abort occured
++ *
++ * For every byte, a "READ" command will be loaded into IC_DATA_CMD prior to
++ * data transfer. The actual "read" operation will be performed if the RX_FULL
++ * interrupt is occured.
++ *
++ * Note there may be two interrupt signals captured, one should read
++ * IC_RAW_INTR_STAT to seperate between errors and actual data.
++ */
++static int xfer_read(struct i2c_adapter *adap, unsigned char *buf, int length)
++{
++ struct mrst_i2c_private *i2c = (struct mrst_i2c_private *)
++ i2c_get_adapdata(adap);
++ uint32_t reg_val;
++ int i = length;
++ unsigned count = 0;
++ uint32_t bit_get = 1 << 3; /* receive fifo not empty */
++
++ while (i--)
++ mrst_i2c_write(i2c->base + IC_DATA_CMD, (uint16_t)0x100);
++
++ i = length;
++ while (i--) {
++ count = 0;
++ reg_val = mrst_i2c_read(i2c->base + IC_STATUS);
++ while ((reg_val & bit_get) == 0) {
++ reg_val = mrst_i2c_read(i2c->base + IC_RAW_INTR_STAT);
++ if ((reg_val & 0x40) == 0x40)
++ goto read_abrt;
++ reg_val = mrst_i2c_read(i2c->base + IC_STATUS);
++ if (count++ > MAX_T_POLL_COUNT)
++ goto read_loop;
++ }
++
++ reg_val = mrst_i2c_read(i2c->base + IC_DATA_CMD);
++ *buf++ = reg_val;
++ }
++
++ return 0;
++
++read_loop:
++ dev_err(&adap->dev, "Time out in read\n");
++ return -ETIMEDOUT;
++read_abrt:
++ dev_err(&adap->dev, "Abort from read\n");
++ mrst_i2c_abort(adap);
++ return -EINVAL;
++}
++
++/**
++ * xfer_write - Internal function to implement master write transfer.
++ * @adap: i2c_adapter struct pointer
++ * @buf: buffer in i2c_msg
++ * @length: number of bytes to be read
++ *
++ * Return Values:
++ * 0 if the read transfer succeeds
++ * -ETIMEDOUT if cannot read the "raw" interrupt register
++ * -EINVAL if transfer abort occured
++ *
++ * For every byte, a "WRITE" command will be loaded into IC_DATA_CMD prior to
++ * data transfer. The actual "write" operation will be performed if the
++ * RX_FULL interrupt siganal is occured.
++ *
++ * Note there may be two interrupt signals captured, one should read
++ * IC_RAW_INTR_STAT to seperate between errors and actual data.
++ */
++static int xfer_write(struct i2c_adapter *adap,
++ unsigned char *buf, int length)
++{
++ struct mrst_i2c_private *i2c = (struct mrst_i2c_private *)
++ i2c_get_adapdata(adap);
++
++ int i;
++ uint32_t reg_val;
++ unsigned count = 0;
++ uint32_t bit_get = 1 << 2; /* transmit fifo completely empty */
++
++ for (i = 0; i < length; i++)
++ mrst_i2c_write(i2c->base + IC_DATA_CMD,
++ (uint16_t)(*(buf + i)));
++
++ reg_val = mrst_i2c_read(i2c->base + IC_STATUS);
++ while ((reg_val & bit_get) == 0) {
++ if (count++ > MAX_T_POLL_COUNT)
++ goto write_loop;
++ reg_val = mrst_i2c_read(i2c->base + IC_STATUS);
++ }
++
++ udelay(100);
++ reg_val = mrst_i2c_read(i2c->base + IC_RAW_INTR_STAT);
++ if ((reg_val & 0x40) == 0x40)
++ goto write_abrt;
++
++ return 0;
++
++write_loop:
++ dev_err(&adap->dev, "Time out in write\n");
++ return -ETIMEDOUT;
++write_abrt:
++ dev_err(&adap->dev, "Abort from write\n");
++ mrst_i2c_abort(adap);
++ return -EINVAL;
++}
++
++static int mrst_i2c_setup(struct i2c_adapter *adap, struct i2c_msg *pmsg)
++{
++ struct mrst_i2c_private *i2c =
++ (struct mrst_i2c_private *)i2c_get_adapdata(adap);
++ int err;
++ uint32_t reg_val;
++ uint32_t bit_mask;
++
++ /* Disable device first */
++ err = mrst_i2c_disable(adap);
++ if (err) {
++ dev_err(&adap->dev,
++ "Cannot disable i2c controller, timeout!\n");
++ return -ETIMEDOUT;
++ }
++
++
++ reg_val = mrst_i2c_read(i2c->base + IC_ENABLE);
++ if (reg_val & 0x1) {
++ dev_dbg(&adap->dev, "i2c busy, can't setup\n");
++ return -EINVAL;
++ }
++
++ /* set the speed mode to standard */
++ reg_val = mrst_i2c_read(i2c->base + IC_CON);
++ if ((reg_val & (1<<1 | 1<<2)) != 1<<1) {
++ dev_dbg(&adap->dev, "set standard mode\n");
++ mrst_i2c_write(i2c->base + IC_CON, (reg_val & (~0x6)) | 1<<1);
++ }
++
++ reg_val = mrst_i2c_read(i2c->base + IC_CON);
++ /* use 7-bit addressing */
++ if ((reg_val & (1<<4)) != 0x0) {
++ dev_dbg(&adap->dev, "set i2c 7 bit address mode\n");
++ mrst_i2c_write(i2c->base + IC_CON, reg_val & (~(1<<4)));
++ }
++
++ /*enable restart conditions */
++ reg_val = mrst_i2c_read(i2c->base + IC_CON);
++ if ((reg_val & (1<<5)) != 1<<5) {
++ dev_dbg(&adap->dev, "enable restart conditions\n");
++ mrst_i2c_write(i2c->base + IC_CON, (reg_val & (~(1 << 5)))
++ | 1 << 5);
++ }
++
++ /* enable master FSM */
++ reg_val = mrst_i2c_read(i2c->base + IC_CON);
++ dev_dbg(&adap->dev, "ic_con reg_val is 0x%x\n", reg_val);
++ if ((reg_val & (1<<6)) != 1<<6) {
++ dev_dbg(&adap->dev, "enable master FSM\n");
++ mrst_i2c_write(i2c->base + IC_CON, (reg_val & (~(1 << 6)))
++ | 1<<6);
++ dev_dbg(&adap->dev, "ic_con reg_val is 0x%x\n", reg_val);
++ }
++
++ /* use target address when initiating transfer */
++ reg_val = mrst_i2c_read(i2c->base + IC_TAR);
++ bit_mask = 1 << 11 | 1 << 10;
++
++ if ((reg_val & bit_mask) != 0x0) {
++ dev_dbg(&adap->dev, "WR: use target address when intiating"
++ "transfer, i2c_tx_target\n");
++ mrst_i2c_write(i2c->base + IC_TAR, reg_val & ~bit_mask);
++ }
++
++ /* set target address to the I2C slave address */
++ dev_dbg(&adap->dev, "set target address to the I2C slave address,"
++ "addr is %x\n", pmsg->addr);
++ mrst_i2c_write(i2c->base + IC_TAR, pmsg->addr
++ | (pmsg->flags & I2C_M_TEN ? IC_TAR_10BIT_ADDR : 0));
++
++ /* Enable I2C controller */
++ mrst_i2c_write(i2c->base + IC_ENABLE, ENABLE);
++
++ reg_val = mrst_i2c_read(i2c->base + IC_CON);
++
++ return 0;
++}
++
++/**
++ * mrst_i2c_xfer - Main master transfer routine.
++ * @adap: i2c_adapter struct pointer
++ * @pmsg: i2c_msg struct pointer
++ * @num: number of i2c_msg
++ *
++ * Return Values:
++ * + number of messages transfered
++ * -ETIMEDOUT If cannot disable I2C controller or read IC_STATUS
++ * -EINVAL If the address in i2c_msg is invalid
++ *
++ * This function will be registered in i2c-core and exposed to external
++ * I2C clients.
++ * 1. Disable I2C controller
++ * 2. Unmask three interrupts: RX_FULL, TX_EMPTY, TX_ABRT
++ * 3. Check if address in i2c_msg is valid
++ * 4. Enable I2C controller
++ * 5. Perform real transfer (call xfer_read or xfer_write)
++ * 6. Wait until the current transfer is finished(check bus state)
++ * 7. Mask and clear all interrupts
++ */
++static int mrst_i2c_xfer(struct i2c_adapter *adap,
++ struct i2c_msg *pmsg,
++ int num)
++{
++ struct mrst_i2c_private *i2c =
++ (struct mrst_i2c_private *)i2c_get_adapdata(adap);
++ int i, err;
++
++ dev_dbg(&adap->dev, "mrst_i2c_xfer, process %d msg(s)\n", num);
++ dev_dbg(&adap->dev, KERN_INFO "slave address is %x\n", pmsg->addr);
++
++ /* if number of messages equal 0*/
++ if (num == 0)
++ return 0;
++
++ /* Checked the sanity of passed messages. */
++ if (unlikely(mrst_i2c_invalid_address(&pmsg[0]))) {
++ dev_err(&adap->dev, "Invalid address 0x%03x (%d-bit)\n",
++ pmsg[0].addr, pmsg[0].flags & I2C_M_TEN ? 10 : 7);
++ return -EINVAL;
++ }
++ for (i = 0; i < num; i++) {
++ /* Message address equal? */
++ if (unlikely(mrst_i2c_address_neq(&pmsg[0], &pmsg[i]))) {
++ dev_err(&adap->dev, "Invalid address in msg[%d]\n", i);
++ return -EINVAL;
++ }
++ }
++
++ if (mrst_i2c_setup(adap, pmsg))
++ return -EINVAL;
++
++ for (i = 0; i < num; i++) {
++ dev_dbg(&adap->dev, " #%d: %sing %d byte%s %s 0x%02x\n", i,
++ pmsg->flags & I2C_M_RD ? "read" : "writ",
++ pmsg->len, pmsg->len > 1 ? "s" : "",
++ pmsg->flags & I2C_M_RD ? "from" : "to", pmsg->addr);
++
++
++ /* Read or Write */
++ if (pmsg->len && pmsg->buf) {
++ if (pmsg->flags & I2C_M_RD) {
++ dev_dbg(&adap->dev, "I2C_M_RD\n");
++ err = xfer_read(adap, pmsg->buf, pmsg->len);
++ } else {
++ dev_dbg(&adap->dev, "I2C_M_WR\n");
++ err = xfer_write(adap, pmsg->buf, pmsg->len);
++ }
++ if (err < 0)
++ goto err_1;
++ }
++ dev_dbg(&adap->dev, "msg[%d] transfer complete\n", i);
++ pmsg++; /* next message */
++ }
++ goto exit;
++
++err_1:
++ i = err;
++exit:
++ /* Mask interrupts */
++ mrst_i2c_write(i2c->base + IC_INTR_MASK, 0x0000);
++ /* Clear all interrupts */
++ mrst_i2c_read(i2c->base + IC_CLR_INTR);
++
++ return i;
++}
++
++static int mrst_gpio_init(int sda, int scl)
++{
++ if (gpio_request(sda, "I2C_SDA"))
++ goto err_sda;
++
++ if (gpio_request(scl, "I2C_SCL"))
++ goto err_scl;
++
++ return 0;
++err_scl:
++ gpio_free(sda);
++err_sda:
++ return -1;
++}
++
++static struct pci_device_id mrst_i2c_ids[] = {
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0802)},
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0803)},
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0804)},
++ {0,}
++};
++MODULE_DEVICE_TABLE(pci, mrst_i2c_ids);
++
++static struct i2c_algorithm mrst_i2c_algorithm = {
++ .master_xfer = mrst_i2c_xfer,
++ .functionality = mrst_i2c_func,
++};
++
++static struct pci_driver mrst_i2c_driver = {
++ .name = "mrst_i2c",
++ .id_table = mrst_i2c_ids,
++ .probe = mrst_i2c_probe,
++ .remove = __devexit_p(mrst_i2c_remove),
++ .suspend = NULL,
++ .resume = NULL,
++};
++
++/**
++ * mrst_i2c_probe - I2C controller initialization routine
++ * @dev: pci device
++ * @id: device id
++ *
++ * Return Values:
++ * 0 success
++ * -ENODEV If cannot allocate pci resource
++ * -ENOMEM If the register base remapping failed, or
++ * if kzalloc failed
++ *
++ * Initialization steps:
++ * 1. Request for PCI resource
++ * 2. Remap the start address of PCI resource to register base
++ * 3. Request for device memory region
++ * 4. Fill in the struct members of mrst_i2c_private
++ * 5. Call mrst_i2c_hwinit() for hardware initialization
++ * 6. Register I2C adapter in i2c-core
++ */
++static int __devinit mrst_i2c_probe(struct pci_dev *dev,
++ const struct pci_device_id *id)
++{
++ struct mrst_i2c_private *mrst;
++ struct i2c_adapter *adap;
++ unsigned int start, len;
++ int err, busnum = 0;
++ void __iomem *base = NULL;
++ int gpio_sda = 0, gpio_scl = 0;
++
++ err = pci_enable_device(dev);
++ if (err) {
++ dev_err(&dev->dev, "Failed to enable I2C PCI device (%d)\n",
++ err);
++ goto exit;
++ }
++
++ /* Determine the address of the I2C area */
++ start = pci_resource_start(dev, DEF_BAR);
++ len = pci_resource_len(dev, DEF_BAR);
++ if (!start || len <= 0) {
++ dev_err(&dev->dev, "Base address initialization failed\n");
++ err = -ENODEV;
++ goto exit;
++ }
++ dev_dbg(&dev->dev, "mrst i2c resource start %x, len=%d\n",
++ start, len);
++ err = pci_request_region(dev, DEF_BAR, mrst_i2c_driver.name);
++ if (err) {
++ dev_err(&dev->dev, "Failed to request I2C region "
++ "0x%1x-0x%Lx\n", start,
++ (unsigned long long)pci_resource_end(dev, DEF_BAR));
++ goto exit;
++ }
++
++ base = ioremap_nocache(start, len);
++ if (!base) {
++ dev_err(&dev->dev, "I/O memory remapping failed\n");
++ err = -ENOMEM;
++ goto fail0;
++ }
++
++ /* Allocate the per-device data structure, mrst_i2c_private */
++ mrst = kzalloc(sizeof(struct mrst_i2c_private), GFP_KERNEL);
++ if (mrst == NULL) {
++ dev_err(&dev->dev, "Can't allocate interface!\n");
++ err = -ENOMEM;
++ goto fail1;
++ }
++
++ adap = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL);
++ if (adap == NULL) {
++ dev_err(&dev->dev, "Can't allocate interface!\n");
++ err = -ENOMEM;
++ goto fail2;
++ }
++
++ /* Initialize struct members */
++ snprintf(adap->name, sizeof(adap->name), "mrst_i2c");
++ adap->owner = THIS_MODULE;
++ adap->algo = &mrst_i2c_algorithm;
++ adap->class = I2C_CLASS_HWMON;
++ adap->dev.parent = &dev->dev;
++ mrst->adap = adap;
++ mrst->base = base;
++ mrst->speed = speed_mode;
++
++ pci_set_drvdata(dev, mrst);
++ i2c_set_adapdata(adap, mrst);
++
++ /* Initialize i2c controller */
++ err = mrst_i2c_hwinit(dev);
++ if (err < 0) {
++ dev_err(&dev->dev, "I2C interface initialization failed\n");
++ goto fail3;
++ }
++
++ switch (id->device) {
++ case 0x0802:
++ dev_dbg(&adap->dev, KERN_INFO "I2C0\n");
++ gpio_sda = GPIO_I2C_0_SDA;
++ gpio_scl = GPIO_I2C_0_SCL;
++ adap->nr = busnum = 0;
++ break;
++ case 0x0803:
++ dev_dbg(&adap->dev, KERN_INFO "I2C1\n");
++ gpio_sda = GPIO_I2C_1_SDA;
++ gpio_scl = GPIO_I2C_1_SCL;
++ adap->nr = busnum = 1;
++ break;
++ case 0x0804:
++ dev_dbg(&adap->dev, KERN_INFO "I2C2\n");
++ gpio_sda = GPIO_I2C_2_SDA;
++ gpio_scl = GPIO_I2C_2_SCL;
++ adap->nr = busnum = 2;
++ break;
++ default:
++ ;
++ }
++
++ /* Config GPIO pin for I2C */
++ err = mrst_gpio_init(gpio_sda, gpio_scl);
++ if (err) {
++ dev_err(&dev->dev, "GPIO %s registration failed\n",
++ adap->name);
++ goto fail3;
++ }
++
++ /* Register i2c board info */
++ /*mrst_i2c_register_board_info(dev, busnum);*/
++
++ /* Adapter registration */
++ err = i2c_add_numbered_adapter(adap);
++ if (err) {
++ dev_err(&dev->dev, "Adapter %s registration failed\n",
++ adap->name);
++ goto fail3;
++ }
++
++ dev_dbg(&dev->dev, "MRST I2C bus %d driver bind success.\n", busnum);
++ return 0;
++
++fail3:
++ i2c_set_adapdata(adap, NULL);
++ pci_set_drvdata(dev, NULL);
++ kfree(adap);
++fail2:
++ kfree(mrst);
++fail1:
++ iounmap(base);
++fail0:
++ pci_release_region(dev, DEF_BAR);
++exit:
++ return err;
++}
++
++static void __devexit mrst_i2c_remove(struct pci_dev *dev)
++{
++ struct mrst_i2c_private *mrst = (struct mrst_i2c_private *)
++ pci_get_drvdata(dev);
++ if (i2c_del_adapter(mrst->adap))
++ dev_err(&dev->dev, "Failed to delete i2c adapter");
++
++ kfree(mrst->mrst_i2c_info);
++ kfree(mrst->data);
++
++ switch (dev->device) {
++ case 0x0802:
++ gpio_free(GPIO_I2C_0_SDA);
++ gpio_free(GPIO_I2C_0_SCL);
++ break;
++ case 0x0803:
++ gpio_free(GPIO_I2C_1_SDA);
++ gpio_free(GPIO_I2C_1_SCL);
++ break;
++ case 0x0804:
++ gpio_free(GPIO_I2C_2_SDA);
++ gpio_free(GPIO_I2C_2_SCL);
++ break;
++ default:
++ break;
++ }
++
++ pci_set_drvdata(dev, NULL);
++ iounmap(mrst->base);
++ kfree(mrst);
++ pci_release_region(dev, DEF_BAR);
++}
++
++static int __init mrst_i2c_init(void)
++{
++ printk(KERN_NOTICE "Moorestown I2C driver %s\n", VERSION);
++ return pci_register_driver(&mrst_i2c_driver);
++}
++
++static void __exit mrst_i2c_exit(void)
++{
++ pci_unregister_driver(&mrst_i2c_driver);
++}
++
++module_init(mrst_i2c_init);
++module_exit(mrst_i2c_exit);
++
++MODULE_AUTHOR("Ba Zheng <zheng.ba@intel.com>");
++MODULE_DESCRIPTION("I2C driver for Moorestown Platform");
++MODULE_LICENSE("GPL");
++MODULE_VERSION(VERSION);
+Index: linux-2.6.33/drivers/i2c/busses/i2c-mrst.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/i2c/busses/i2c-mrst.h
+@@ -0,0 +1,282 @@
++#ifndef __I2C_MRST_H
++#define __I2C_MRST_H
++
++#include <linux/i2c.h>
++
++/* Update for 2.6.27 kernel by Wen */
++
++/* PCI config table macros */
++/* Offests */
++#define I2C_INFO_TABLE_LENGTH 4
++#define I2C_INFO_DEV_BLOCK 10
++#define I2C_DEV_ADDR 2
++#define I2C_DEV_IRQ 4
++#define I2C_DEV_NAME 6
++#define I2C_DEV_INFO 22
++/* Length */
++#define HEAD_LENGTH 10
++#define BLOCK_LENGTH 32
++#define ADDR_LENGTH 2
++#define IRQ_LENGTH 2
++#define NAME_LENGTH 16
++#define INFO_LENGTH 10
++
++struct mrst_i2c_private {
++ struct i2c_adapter *adap;
++ /* Register base address */
++ void __iomem *base;
++ /* Speed mode */
++ int speed;
++ struct i2c_board_info *mrst_i2c_info;
++ char (*data)[INFO_LENGTH];
++};
++
++/* Speed mode macros */
++#define STANDARD 100
++#define FAST 25
++#define HIGH 3
++
++/* Control register */
++#define IC_CON 0x00
++#define SLV_DIS (1 << 6) /* Disable slave mode */
++#define RESTART (1 << 5) /* Send a Restart condition */
++#define ADDR_10BIT (1 << 4) /* 10-bit addressing */
++#define STANDARD_MODE (1 << 1) /* standard mode */
++#define FAST_MODE (2 << 1) /* fast mode */
++#define HIGH_MODE (3 << 1) /* high speed mode */
++#define MASTER_EN (1 << 0) /* Master mode */
++
++/* Target address register */
++#define IC_TAR 0x04
++#define IC_TAR_10BIT_ADDR (1 << 12) /* 10-bit addressing */
++#define IC_TAR_SPECIAL (1 << 11) /* Perform special I2C cmd */
++#define IC_TAR_GC_OR_START (1 << 10) /* 0: Gerneral Call Address */
++ /* 1: START BYTE */
++
++/* Slave Address Register */
++#define IC_SAR 0x08 /* Not used in Master mode */
++
++/* High Speed Master Mode Code Address Register */
++#define IC_HS_MADDR 0x0c
++
++/* Rx/Tx Data Buffer and Command Register */
++#define IC_DATA_CMD 0x10
++#define IC_RD (1 << 8) /* 1: Read 0: Write */
++
++/* Standard Speed Clock SCL High Count Register */
++#define IC_SS_SCL_HCNT 0x14
++
++/* Standard Speed Clock SCL Low Count Register */
++#define IC_SS_SCL_LCNT 0x18
++
++/* Fast Speed Clock SCL High Count Register */
++#define IC_FS_SCL_HCNT 0x1c
++
++/* Fast Spedd Clock SCL Low Count Register */
++#define IC_FS_SCL_LCNT 0x20
++
++/* High Speed Clock SCL High Count Register */
++#define IC_HS_SCL_HCNT 0x24
++
++/* High Speed Clock SCL Low Count Register */
++#define IC_HS_SCL_LCNT 0x28
++
++/* Interrupt Status Register */
++#define IC_INTR_STAT 0x2c /* Read only */
++#define R_GEN_CALL (1 << 11)
++#define R_START_DET (1 << 10)
++#define R_STOP_DET (1 << 9)
++#define R_ACTIVITY (1 << 8)
++#define R_RX_DONE (1 << 7)
++#define R_TX_ABRT (1 << 6)
++#define R_RD_REQ (1 << 5)
++#define R_TX_EMPTY (1 << 4)
++#define R_TX_OVER (1 << 3)
++#define R_RX_FULL (1 << 2)
++#define R_RX_OVER (1 << 1)
++#define R_RX_UNDER (1 << 0)
++
++/* Interrupt Mask Register */
++#define IC_INTR_MASK 0x30 /* Read and Write */
++#define M_GEN_CALL (1 << 11)
++#define M_START_DET (1 << 10)
++#define M_STOP_DET (1 << 9)
++#define M_ACTIVITY (1 << 8)
++#define M_RX_DONE (1 << 7)
++#define M_TX_ABRT (1 << 6)
++#define M_RD_REQ (1 << 5)
++#define M_TX_EMPTY (1 << 4)
++#define M_TX_OVER (1 << 3)
++#define M_RX_FULL (1 << 2)
++#define M_RX_OVER (1 << 1)
++#define M_RX_UNDER (1 << 0)
++
++/* Raw Interrupt Status Register */
++#define IC_RAW_INTR_STAT 0x34 /* Read Only */
++#define GEN_CALL (1 << 11) /* General call */
++#define START_DET (1 << 10) /* (RE)START occured */
++#define STOP_DET (1 << 9) /* STOP occured */
++#define ACTIVITY (1 << 8) /* Bus busy */
++#define RX_DONE (1 << 7) /* Not used in Master mode */
++#define TX_ABRT (1 << 6) /* Transmit Abort */
++#define RD_REQ (1 << 5) /* Not used in Master mode */
++#define TX_EMPTY (1 << 4) /* TX FIFO <= threshold */
++#define TX_OVER (1 << 3) /* TX FIFO overflow */
++#define RX_FULL (1 << 2) /* RX FIFO >= threshold */
++#define RX_OVER (1 << 1) /* RX FIFO overflow */
++#define RX_UNDER (1 << 0) /* RX FIFO empty */
++
++/* Receive FIFO Threshold Register */
++#define IC_RX_TL 0x38
++
++/* Transmit FIFO Treshold Register */
++#define IC_TX_TL 0x3c
++
++/* Clear Combined and Individual Interrupt Register */
++#define IC_CLR_INTR 0x40
++#define CLR_INTR (1 << 0)
++
++/* Clear RX_UNDER Interrupt Register */
++#define IC_CLR_RX_UNDER 0x44
++#define CLR_RX_UNDER (1 << 0)
++
++/* Clear RX_OVER Interrupt Register */
++#define IC_CLR_RX_OVER 0x48
++#define CLR_RX_OVER (1 << 0)
++
++/* Clear TX_OVER Interrupt Register */
++#define IC_CLR_TX_OVER 0x4c
++#define CLR_TX_OVER (1 << 0)
++
++#define IC_CLR_RD_REQ 0x50
++
++/* Clear TX_ABRT Interrupt Register */
++#define IC_CLR_TX_ABRT 0x54
++#define CLR_TX_ABRT (1 << 0)
++
++#define IC_CLR_RX_DONE 0x58
++
++
++/* Clear ACTIVITY Interrupt Register */
++#define IC_CLR_ACTIVITY 0x5c
++#define CLR_ACTIVITY (1 << 0)
++
++/* Clear STOP_DET Interrupt Register */
++#define IC_CLR_STOP_DET 0x60
++#define CLR_STOP_DET (1 << 0)
++
++/* Clear START_DET Interrupt Register */
++#define IC_CLR_START_DET 0x64
++#define CLR_START_DET (1 << 0)
++
++/* Clear GEN_CALL Interrupt Register */
++#define IC_CLR_GEN_CALL 0x68
++#define CLR_GEN_CALL (1 << 0)
++
++/* Enable Register */
++#define IC_ENABLE 0x6c
++#define ENABLE (1 << 0)
++
++/* Status Register */
++#define IC_STATUS 0x70 /* Read Only */
++#define STAT_SLV_ACTIVITY (1 << 6) /* Slave not in idle */
++#define STAT_MST_ACTIVITY (1 << 5) /* Master not in idle */
++#define STAT_RFF (1 << 4) /* RX FIFO Full */
++#define STAT_RFNE (1 << 3) /* RX FIFO Not Empty */
++#define STAT_TFE (1 << 2) /* TX FIFO Empty */
++#define STAT_TFNF (1 << 1) /* TX FIFO Not Full */
++#define STAT_ACTIVITY (1 << 0) /* Activity Status */
++
++/* Transmit FIFO Level Register */
++#define IC_TXFLR 0x74 /* Read Only */
++#define TXFLR (1 << 0) /* TX FIFO level */
++
++/* Receive FIFO Level Register */
++#define IC_RXFLR 0x78 /* Read Only */
++#define RXFLR (1 << 0) /* RX FIFO level */
++
++/* Transmit Abort Source Register */
++#define IC_TX_ABRT_SOURCE 0x80
++#define ABRT_SLVRD_INTX (1 << 15)
++#define ABRT_SLV_ARBLOST (1 << 14)
++#define ABRT_SLVFLUSH_TXFIFO (1 << 13)
++#define ARB_LOST (1 << 12)
++#define ABRT_MASTER_DIS (1 << 11)
++#define ABRT_10B_RD_NORSTRT (1 << 10)
++#define ABRT_SBYTE_NORSTRT (1 << 9)
++#define ABRT_HS_NORSTRT (1 << 8)
++#define ABRT_SBYTE_ACKDET (1 << 7)
++#define ABRT_HS_ACKDET (1 << 6)
++#define ABRT_GCALL_READ (1 << 5)
++#define ABRT_GCALL_NOACK (1 << 4)
++#define ABRT_TXDATA_NOACK (1 << 3)
++#define ABRT_10ADDR2_NOACK (1 << 2)
++#define ABRT_10ADDR1_NOACK (1 << 1)
++#define ABRT_7B_ADDR_NOACK (1 << 0)
++
++/* Enable Status Register */
++#define IC_ENABLE_STATUS 0x9c
++#define IC_EN (1 << 0) /* I2C in an enabled state */
++
++/* Component Parameter Register 1*/
++#define IC_COMP_PARAM_1 0xf4
++#define APB_DATA_WIDTH (0x3 << 0)
++
++/* GPIO_PINS */
++#define GPIO_I2C_0_SDA 56
++#define GPIO_I2C_0_SCL 57
++
++#define GPIO_I2C_1_SDA 54
++#define GPIO_I2C_1_SCL 55
++
++#define GPIO_I2C_2_SDA 52
++#define GPIO_I2C_2_SCL 53
++
++/* added by xiaolin --begin */
++#define SS_MIN_SCL_HIGH 4000
++#define SS_MIN_SCL_LOW 4700
++#define FS_MIN_SCL_HIGH 600
++#define FS_MIN_SCL_LOW 1300
++#define HS_MIN_SCL_HIGH_100PF 60
++#define HS_MIN_SCL_LOW_100PF 120
++
++enum mrst_i2c_irq {
++ i2c_irq_none = 0x000,
++ i2c_irq_rx_under = 0x001,
++ i2c_irq_rx_over = 0x002,
++ i2c_irq_rx_full = 0x004,
++ i2c_irq_tx_over = 0x008,
++ i2c_irq_tx_empty = 0x010,
++ i2c_irq_rd_req = 0x020,
++ i2c_irq_tx_abrt = 0x040,
++ i2c_irq_rx_done = 0x080,
++ i2c_irq_activity = 0x100,
++ i2c_irq_stop_det = 0x200,
++ i2c_irq_start_det = 0x400,
++ i2c_irq_gen_call = 0x800,
++ i2c_irq_all = 0xfff
++};
++
++/* added by xiaolin --end */
++
++/* Function declarations */
++
++static int mrst_i2c_disable(struct i2c_adapter *);
++static int __devinit mrst_i2c_hwinit(struct pci_dev *);
++static u32 mrst_i2c_func(struct i2c_adapter *);
++static inline int mrst_i2c_invalid_address(const struct i2c_msg *);
++static inline int mrst_i2c_address_neq(const struct i2c_msg *,
++ const struct i2c_msg *);
++static int mrst_i2c_xfer(struct i2c_adapter *,
++ struct i2c_msg *,
++ int);
++static int __devinit mrst_i2c_probe(struct pci_dev *,
++ const struct pci_device_id *);
++static void __devexit mrst_i2c_remove(struct pci_dev *);
++static int __init mrst_i2c_init(void);
++static void __exit mrst_i2c_exit(void);
++static int xfer_read(struct i2c_adapter *,
++ unsigned char *, int);
++static int xfer_write(struct i2c_adapter *,
++ unsigned char *, int);
++#endif /* __I2C_MRST_H */
+Index: linux-2.6.33/drivers/i2c/i2c-boardinfo.c
+===================================================================
+--- linux-2.6.33.orig/drivers/i2c/i2c-boardinfo.c
++++ linux-2.6.33/drivers/i2c/i2c-boardinfo.c
+@@ -58,11 +58,13 @@ EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bu
+ * The board info passed can safely be __initdata, but be careful of embedded
+ * pointers (for platform_data, functions, etc) since that won't be copied.
+ */
+-int __init
++int
+ i2c_register_board_info(int busnum,
+ struct i2c_board_info const *info, unsigned len)
+ {
+ int status;
++ int flag = 0;
++ struct i2c_devinfo *devinfo;
+
+ down_write(&__i2c_board_lock);
+
+@@ -71,21 +73,32 @@ i2c_register_board_info(int busnum,
+ __i2c_first_dynamic_bus_num = busnum + 1;
+
+ for (status = 0; len; len--, info++) {
+- struct i2c_devinfo *devinfo;
+-
+- devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
+- if (!devinfo) {
+- pr_debug("i2c-core: can't register boardinfo!\n");
+- status = -ENOMEM;
+- break;
++ list_for_each_entry(devinfo, &__i2c_board_list, list) {
++ if (devinfo->busnum == busnum
++ && devinfo->board_info.addr == info->addr) {
++ flag = 1;
++ break;
++ }
+ }
+-
+- devinfo->busnum = busnum;
+- devinfo->board_info = *info;
+- list_add_tail(&devinfo->list, &__i2c_board_list);
++ if (flag != 1) {
++ struct i2c_devinfo *dev;
++ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
++ if (!dev) {
++ pr_debug("i2c-core: can't register"
++ "boardinfo!\n");
++ status = -ENOMEM;
++ break;
++ }
++
++ dev->busnum = busnum;
++ dev->board_info = *info;
++ list_add_tail(&dev->list, &__i2c_board_list);
++ }
++ flag = 0;
+ }
+
+ up_write(&__i2c_board_lock);
+
+ return status;
+ }
++EXPORT_SYMBOL_GPL(i2c_register_board_info);
+Index: linux-2.6.33/arch/x86/kernel/cpu/cpufreq/Kconfig
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/cpu/cpufreq/Kconfig
++++ linux-2.6.33/arch/x86/kernel/cpu/cpufreq/Kconfig
+@@ -10,6 +10,22 @@ if CPU_FREQ
+
+ comment "CPUFreq processor drivers"
+
++config X86_SFI_CPUFREQ
++ tristate "SFI Processor P-States driver"
++ depends on SFI_PROCESSOR_PM
++ select CPU_FREQ_TABLE
++ help
++ This driver adds a CPUFreq driver which utilizes the SFI
++ Processor Performance States.
++ This driver also supports Intel Enhanced Speedstep.
++
++ To compile this driver as a module, choose M here: the
++ module will be called sfi-cpufreq.
++
++ For details, take a look at <file:Documentation/cpu-freq/>.
++
++ If in doubt, say N.
++
+ config X86_ACPI_CPUFREQ
+ tristate "ACPI Processor P-States driver"
+ select CPU_FREQ_TABLE
+Index: linux-2.6.33/arch/x86/kernel/cpu/cpufreq/Makefile
+===================================================================
+--- linux-2.6.33.orig/arch/x86/kernel/cpu/cpufreq/Makefile
++++ linux-2.6.33/arch/x86/kernel/cpu/cpufreq/Makefile
+@@ -15,6 +15,7 @@ obj-$(CONFIG_X86_GX_SUSPMOD) += gx-susp
+ obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o
+ obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o
+ obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
++obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o
+ obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
+ obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
+ obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
+Index: linux-2.6.33/arch/x86/kernel/cpu/cpufreq/sfi-cpufreq.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/kernel/cpu/cpufreq/sfi-cpufreq.c
+@@ -0,0 +1,655 @@
++/*
++ * sfi_cpufreq.c - sfi Processor P-States Driver
++ *
++ *
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or (at
++ * your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * Author: Vishwesh M Rudramuni
++ * Contact information: Vishwesh Rudramuni <vishwesh.m.rudramuni@intel.com>
++ */
++
++/*
++ * This sfi Processor P-States Driver re-uses most part of the code available
++ * in acpi cpufreq driver.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/smp.h>
++#include <linux/sched.h>
++#include <linux/cpufreq.h>
++#include <linux/compiler.h>
++#include <linux/dmi.h>
++
++#include <linux/sfi.h>
++#include <linux/sfi_processor.h>
++
++#include <linux/io.h>
++#include <asm/msr.h>
++#include <asm/processor.h>
++#include <asm/cpufeature.h>
++#include <linux/delay.h>
++#include <linux/uaccess.h>
++
++#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
++ "sfi-cpufreq", msg)
++
++MODULE_AUTHOR("Vishwesh Rudramuni");
++MODULE_DESCRIPTION("SFI Processor P-States Driver");
++MODULE_LICENSE("GPL");
++#define SYSTEM_INTEL_MSR_CAPABLE 0x1
++#define INTEL_MSR_RANGE (0xffff)
++#define CPUID_6_ECX_APERFMPERF_CAPABILITY (0x1)
++
++struct sfi_cpufreq_data {
++ struct sfi_processor_performance *sfi_data;
++ struct cpufreq_frequency_table *freq_table;
++ unsigned int max_freq;
++ unsigned int resume;
++ unsigned int cpu_feature;
++};
++
++static DEFINE_PER_CPU(struct sfi_cpufreq_data *, drv_data);
++
++/* sfi_perf_data is a pointer to percpu data. */
++static struct sfi_processor_performance *sfi_perf_data;
++
++static struct cpufreq_driver sfi_cpufreq_driver;
++
++static unsigned int sfi_pstate_strict;
++
++static int check_est_cpu(unsigned int cpuid)
++{
++ struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
++
++ if (cpu->x86_vendor != X86_VENDOR_INTEL ||
++ !cpu_has(cpu, X86_FEATURE_EST))
++ return 0;
++
++ return 1;
++}
++
++static unsigned extract_freq(u32 msr, struct sfi_cpufreq_data *data)
++{
++ int i;
++ struct sfi_processor_performance *perf;
++
++ msr &= INTEL_MSR_RANGE;
++ perf = data->sfi_data;
++
++ for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
++ if (msr == perf->states[data->freq_table[i].index].status)
++ return data->freq_table[i].frequency;
++ }
++ return data->freq_table[0].frequency;
++}
++
++
++struct msr_addr {
++ u32 reg;
++};
++
++
++struct drv_cmd {
++ unsigned int type;
++ cpumask_t mask;
++ u32 msr_reg;
++ u32 val;
++};
++
++static void do_drv_read(struct drv_cmd *cmd)
++{
++ u32 h;
++ rdmsr(cmd->msr_reg, cmd->val, h);
++}
++
++static void do_drv_write(struct drv_cmd *cmd)
++{
++ u32 lo, hi;
++
++ rdmsr(cmd->msr_reg, lo, hi);
++ lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
++ wrmsr(cmd->msr_reg, lo, hi);
++}
++
++static void drv_read(struct drv_cmd *cmd)
++{
++ cpumask_t saved_mask = current->cpus_allowed;
++ cmd->val = 0;
++
++ set_cpus_allowed(current, cmd->mask);
++ do_drv_read(cmd);
++ set_cpus_allowed(current, saved_mask);
++}
++
++static void drv_write(struct drv_cmd *cmd)
++{
++ cpumask_t saved_mask = current->cpus_allowed;
++ unsigned int i;
++
++ for_each_cpu_mask(i, cmd->mask) {
++ set_cpus_allowed(current, cpumask_of_cpu(i));
++ do_drv_write(cmd);
++ }
++
++ set_cpus_allowed(current, saved_mask);
++ return;
++}
++
++static u32 get_cur_val(cpumask_t mask)
++{
++ struct drv_cmd cmd;
++
++ if (unlikely(cpus_empty(mask)))
++ return 0;
++
++ cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
++ cmd.msr_reg = MSR_IA32_PERF_STATUS;
++ cmd.mask = mask;
++
++ drv_read(&cmd);
++
++ dprintk("get_cur_val = %u\n", cmd.val);
++
++ return cmd.val;
++}
++
++/*
++ * Return the measured active (C0) frequency on this CPU since last call
++ * to this function.
++ * Input: cpu number
++ * Return: Average CPU frequency in terms of max frequency (zero on error)
++ *
++ * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
++ * over a period of time, while CPU is in C0 state.
++ * IA32_MPERF counts at the rate of max advertised frequency
++ * IA32_APERF counts at the rate of actual CPU frequency
++ * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
++ * no meaning should be associated with absolute values of these MSRs.
++ */
++static unsigned int get_measured_perf(struct cpufreq_policy *policy,
++ unsigned int cpu)
++{
++ union {
++ struct {
++ u32 lo;
++ u32 hi;
++ } split;
++ u64 whole;
++ } aperf_cur, mperf_cur;
++
++ cpumask_t saved_mask;
++ unsigned int perf_percent;
++ unsigned int retval;
++
++ saved_mask = current->cpus_allowed;
++ set_cpus_allowed(current, cpumask_of_cpu(cpu));
++ if (get_cpu() != cpu) {
++ /* We were not able to run on requested processor */
++ put_cpu();
++ return 0;
++ }
++
++ rdmsr(MSR_IA32_APERF, aperf_cur.split.lo, aperf_cur.split.hi);
++ rdmsr(MSR_IA32_MPERF, mperf_cur.split.lo, mperf_cur.split.hi);
++
++ wrmsr(MSR_IA32_APERF, 0, 0);
++ wrmsr(MSR_IA32_MPERF, 0, 0);
++
++#ifdef __i386__
++ /*
++ * We dont want to do 64 bit divide with 32 bit kernel
++ * Get an approximate value. Return failure in case we cannot get
++ * an approximate value.
++ */
++ if (unlikely(aperf_cur.split.hi || mperf_cur.split.hi)) {
++ int shift_count;
++ u32 h;
++
++ h = max_t(u32, aperf_cur.split.hi, mperf_cur.split.hi);
++ shift_count = fls(h);
++
++ aperf_cur.whole >>= shift_count;
++ mperf_cur.whole >>= shift_count;
++ }
++
++ if (((unsigned long)(-1) / 100) < aperf_cur.split.lo) {
++ int shift_count = 7;
++ aperf_cur.split.lo >>= shift_count;
++ mperf_cur.split.lo >>= shift_count;
++ }
++
++ if (aperf_cur.split.lo && mperf_cur.split.lo)
++ perf_percent = (aperf_cur.split.lo * 100) / mperf_cur.split.lo;
++ else
++ perf_percent = 0;
++
++#else
++ if (unlikely(((unsigned long)(-1) / 100) < aperf_cur.whole)) {
++ int shift_count = 7;
++ aperf_cur.whole >>= shift_count;
++ mperf_cur.whole >>= shift_count;
++ }
++
++ if (aperf_cur.whole && mperf_cur.whole)
++ perf_percent = (aperf_cur.whole * 100) / mperf_cur.whole;
++ else
++ perf_percent = 0;
++
++#endif
++
++ retval = per_cpu(drv_data, cpu)->max_freq * perf_percent / 100;
++
++ put_cpu();
++ set_cpus_allowed(current, saved_mask);
++
++ dprintk("cpu %d: performance percent %d\n", cpu, perf_percent);
++ return retval;
++}
++
++
++static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
++{
++ struct sfi_cpufreq_data *data = per_cpu(drv_data, cpu);
++ unsigned int freq;
++
++ unsigned int cached_freq;
++
++ dprintk("get_cur_freq_on_cpu (%d)\n", cpu);
++
++ if (unlikely(data == NULL ||
++ data->sfi_data == NULL || data->freq_table == NULL)) {
++ return 0;
++ }
++ cached_freq = data->freq_table[data->sfi_data->state].frequency;
++ freq = extract_freq(get_cur_val(cpumask_of_cpu(cpu)), data);
++
++ if (freq != cached_freq) {
++ data->resume = 1;
++ return cached_freq;
++ }
++
++ dprintk("cur freq = %u\n", freq);
++
++ return freq;
++}
++
++static unsigned int check_freqs(cpumask_t mask, unsigned int freq,
++ struct sfi_cpufreq_data *data)
++{
++ unsigned int cur_freq;
++ unsigned int i;
++
++ for (i = 0; i < 100; i++) {
++ cur_freq = extract_freq(get_cur_val(mask), data);
++ if (cur_freq == freq)
++ return 1;
++ udelay(10);
++ }
++ return 0;
++}
++
++static int sfi_cpufreq_target(struct cpufreq_policy *policy,
++ unsigned int target_freq, unsigned int relation)
++{
++ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
++ struct sfi_processor_performance *perf;
++ struct cpufreq_freqs freqs;
++ cpumask_t online_policy_cpus;
++ struct drv_cmd cmd;
++ unsigned int next_state = 0; /* Index into freq_table */
++ unsigned int next_perf_state = 0; /* Index into perf table */
++ unsigned int i;
++ int result = 0;
++
++ dprintk("sfi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
++
++ if (unlikely(data == NULL ||
++ data->sfi_data == NULL || data->freq_table == NULL)) {
++ return -ENODEV;
++ }
++
++ perf = data->sfi_data;
++ result = cpufreq_frequency_table_target(policy,
++ data->freq_table,
++ target_freq,
++ relation, &next_state);
++ if (unlikely(result))
++ return -ENODEV;
++
++#ifdef CONFIG_HOTPLUG_CPU
++ /* cpufreq holds the hotplug lock, so we are safe from here on */
++ cpus_and(online_policy_cpus, cpu_online_map, *policy->cpus);
++#else
++ online_policy_cpus = policy->cpus;
++#endif
++
++ next_perf_state = data->freq_table[next_state].index;
++ if (perf->state == next_perf_state) {
++ if (unlikely(data->resume)) {
++ dprintk("Called after resume, resetting to P%d\n",
++ next_perf_state);
++ data->resume = 0;
++ } else {
++ dprintk("Already at target state (P%d)\n",
++ next_perf_state);
++ return 0;
++ }
++ }
++
++ cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
++ cmd.msr_reg = MSR_IA32_PERF_CTL;
++ cmd.val = (u32) perf->states[next_perf_state].control;
++
++ cpus_clear(cmd.mask);
++
++ if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
++ cmd.mask = online_policy_cpus;
++ else
++ cpu_set(policy->cpu, cmd.mask);
++
++ freqs.old = perf->states[perf->state].core_frequency * 1000;
++ freqs.new = data->freq_table[next_state].frequency;
++ for_each_cpu_mask(i, cmd.mask) {
++ freqs.cpu = i;
++ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
++ }
++
++ drv_write(&cmd);
++
++ if (sfi_pstate_strict) {
++ if (!check_freqs(cmd.mask, freqs.new, data)) {
++ dprintk("sfi_cpufreq_target failed (%d)\n",
++ policy->cpu);
++ return -EAGAIN;
++ }
++ }
++
++ for_each_cpu_mask(i, cmd.mask) {
++ freqs.cpu = i;
++ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
++ }
++ perf->state = next_perf_state;
++
++ return result;
++}
++
++static int sfi_cpufreq_verify(struct cpufreq_policy *policy)
++{
++ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
++
++ dprintk("sfi_cpufreq_verify\n");
++
++ return cpufreq_frequency_table_verify(policy, data->freq_table);
++}
++
++/*
++ * sfi_cpufreq_early_init - initialize SFI P-States library
++ *
++ * Initialize the SFI P-States library (drivers/acpi/processor_perflib.c)
++ * in order to determine correct frequency and voltage pairings. We can
++ * do _PDC and _PSD and find out the processor dependency for the
++ * actual init that will happen later...
++ */
++static int __init sfi_cpufreq_early_init(void)
++{
++ int i;
++ struct sfi_processor *pr;
++
++ dprintk("sfi_cpufreq_early_init\n");
++
++ sfi_perf_data = alloc_percpu(struct sfi_processor_performance);
++ if (!sfi_perf_data) {
++ dprintk("Memory allocation error for sfi_perf_data.\n");
++ return -ENOMEM;
++ }
++
++ for_each_possible_cpu(i) {
++ pr = per_cpu(sfi_processors, i);
++ if (!pr || !pr->performance)
++ continue;
++
++ /* Assume no coordination on any error parsing domain info */
++ cpus_clear(*pr->performance->shared_cpu_map);
++ cpu_set(i, *pr->performance->shared_cpu_map);
++ pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
++ pr->performance = NULL; /* Will be set for real in register */
++ }
++
++ /* _PSD & _PDC is not supported in SFI.Its just a placeholder.
++ * sfi_processor_preregister_performance(sfi_perf_data);
++ * TBD: We need to study what we need to do here
++ */
++ return 0;
++}
++
++
++static int sfi_cpufreq_cpu_init(struct cpufreq_policy *policy)
++{
++ unsigned int i;
++ unsigned int valid_states = 0;
++ unsigned int cpu = policy->cpu;
++ struct sfi_cpufreq_data *data;
++ unsigned int result = 0;
++ struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
++ struct sfi_processor_performance *perf;
++
++ dprintk("sfi_cpufreq_cpu_init\n");
++
++ data = kzalloc(sizeof(struct sfi_cpufreq_data), GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++
++ data->sfi_data = per_cpu_ptr(sfi_perf_data, cpu);
++ per_cpu(drv_data, cpu) = data;
++
++ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
++ sfi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
++
++
++ result = sfi_processor_register_performance(data->sfi_data, cpu);
++ if (result)
++ goto err_free;
++
++ perf = data->sfi_data;
++ policy->shared_type = perf->shared_type;
++
++ /*
++ * Will let policy->cpus know about dependency only when software
++ * coordination is required.
++ */
++ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
++ policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
++ memcpy(policy->cpus, perf->shared_cpu_map
++ , sizeof(cpumask_var_t));
++ }
++
++ /* capability check */
++ if (perf->state_count <= 1) {
++ dprintk("No P-States\n");
++ result = -ENODEV;
++ goto err_unreg;
++ }
++
++ dprintk("HARDWARE addr space\n");
++ if (!check_est_cpu(cpu)) {
++ result = -ENODEV;
++ goto err_unreg;
++ }
++
++ data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
++ data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
++ (perf->state_count+1), GFP_KERNEL);
++ if (!data->freq_table) {
++ result = -ENOMEM;
++ goto err_unreg;
++ }
++
++ /* detect transition latency */
++ policy->cpuinfo.transition_latency = 0;
++ for (i = 0; i < perf->state_count; i++) {
++ if ((perf->states[i].transition_latency * 1000) >
++ policy->cpuinfo.transition_latency)
++ policy->cpuinfo.transition_latency =
++ perf->states[i].transition_latency * 1000;
++ }
++
++ data->max_freq = perf->states[0].core_frequency * 1000;
++ /* table init */
++ for (i = 0; i < perf->state_count; i++) {
++ if (i > 0 && perf->states[i].core_frequency >=
++ data->freq_table[valid_states-1].frequency / 1000)
++ continue;
++
++ data->freq_table[valid_states].index = i;
++ data->freq_table[valid_states].frequency =
++ perf->states[i].core_frequency * 1000;
++ valid_states++;
++ }
++ data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
++ perf->state = 0;
++
++ result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
++ if (result)
++ goto err_freqfree;
++
++ sfi_cpufreq_driver.get = get_cur_freq_on_cpu;
++ policy->cur = get_cur_freq_on_cpu(cpu);
++
++ /* notify BIOS that we exist
++ * currently not being done.
++ */
++
++ /* Check for APERF/MPERF support in hardware */
++ if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6) {
++ unsigned int ecx;
++ ecx = cpuid_ecx(6);
++ if (ecx & CPUID_6_ECX_APERFMPERF_CAPABILITY)
++ sfi_cpufreq_driver.getavg = get_measured_perf;
++ }
++
++ dprintk("CPU%u - SFI performance management activated.\n", cpu);
++ for (i = 0; i < perf->state_count; i++)
++ dprintk(" %cP%d: %d MHz, %d uS\n",
++ (i == perf->state ? '*' : ' '), i,
++ (u32) perf->states[i].core_frequency,
++ (u32) perf->states[i].transition_latency);
++
++ cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
++
++ /*
++ * the first call to ->target() should result in us actually
++ * writing something to the appropriate registers.
++ */
++ data->resume = 1;
++
++ return result;
++
++err_freqfree:
++ kfree(data->freq_table);
++err_unreg:
++ sfi_processor_unregister_performance(perf, cpu);
++err_free:
++ kfree(data);
++ per_cpu(drv_data, cpu) = NULL;
++
++ return result;
++}
++
++static int sfi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
++{
++ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
++
++ dprintk("sfi_cpufreq_cpu_exit\n");
++
++ if (data) {
++ cpufreq_frequency_table_put_attr(policy->cpu);
++ per_cpu(drv_data, policy->cpu) = NULL;
++ /* acpi_processor_unregister_performance(data->acpi_data,
++ * policy->cpu);
++ * TBD: Need to study how do we do this
++ */
++ sfi_processor_unregister_performance(data->sfi_data,
++ policy->cpu);
++ kfree(data);
++ }
++
++ return 0;
++}
++
++static int sfi_cpufreq_resume(struct cpufreq_policy *policy)
++{
++ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
++
++ dprintk("sfi_cpufreq_resume\n");
++
++ data->resume = 1;
++
++ return 0;
++}
++
++static struct freq_attr *sfi_cpufreq_attr[] = {
++ &cpufreq_freq_attr_scaling_available_freqs,
++ NULL,
++};
++
++static struct cpufreq_driver sfi_cpufreq_driver = {
++ .verify = sfi_cpufreq_verify,
++ .target = sfi_cpufreq_target,
++ .init = sfi_cpufreq_cpu_init,
++ .exit = sfi_cpufreq_cpu_exit,
++ .resume = sfi_cpufreq_resume,
++ .name = "sfi-cpufreq",
++ .owner = THIS_MODULE,
++ .attr = sfi_cpufreq_attr,
++};
++
++static int __init sfi_cpufreq_init(void)
++{
++ int ret;
++
++ dprintk("sfi_cpufreq_init\n");
++
++ ret = sfi_cpufreq_early_init();
++ if (ret)
++ return ret;
++
++ return cpufreq_register_driver(&sfi_cpufreq_driver);
++}
++
++static void __exit sfi_cpufreq_exit(void)
++{
++ dprintk("sfi_cpufreq_exit\n");
++
++ cpufreq_unregister_driver(&sfi_cpufreq_driver);
++
++ free_percpu(sfi_perf_data);
++
++ return;
++}
++
++module_param(sfi_pstate_strict, uint, 0644);
++MODULE_PARM_DESC(sfi_pstate_strict,
++ "value 0 or non-zero. non-zero -> strict sfi checks are "
++ "performed during frequency changes.");
++
++late_initcall(sfi_cpufreq_init);
++module_exit(sfi_cpufreq_exit);
++
++MODULE_ALIAS("sfi");
+Index: linux-2.6.33/arch/x86/kernel/sfi/sfi_processor_core.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/kernel/sfi/sfi_processor_core.c
+@@ -0,0 +1,134 @@
++/*
++ * sfi_processor_core.c
++ *
++ * Copyright (C) 2008 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * Author: Sujith Thomas
++ * Contact information: Sujith Thomas <sujith.thomas@intel.com>
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/sfi.h>
++#include <linux/cpu.h>
++#include <linux/sfi_processor.h>
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Sujith Thomas");
++MODULE_DESCRIPTION("Processor enumeration based on SFI table.");
++
++DEFINE_PER_CPU(struct sfi_processor *, sfi_processors);
++
++int sfi_cstate_num;
++struct sfi_cstate_table_entry sfi_cstate_array[SFI_C_STATES_MAX];
++
++static int __init sfi_parse_idle(struct sfi_table_header *table)
++{
++ struct sfi_table_simple *sb;
++ struct sfi_cstate_table_entry *pentry;
++ int totallen;
++
++ sb = (struct sfi_table_simple *)table;
++ if (!sb) {
++ printk(KERN_WARNING "SFI: Unable to map IDLE\n");
++ return -ENODEV;
++ }
++
++ if (!sfi_cstate_num) {
++ sfi_cstate_num = SFI_GET_NUM_ENTRIES(sb, struct sfi_cstate_table_entry);
++ pentry = (struct sfi_cstate_table_entry *)sb->pentry;
++ totallen = sfi_cstate_num * sizeof(*pentry);
++ memcpy(sfi_cstate_array, pentry, totallen);
++ }
++
++ printk(KERN_INFO "SFI: IDLE C-state info (num = %d):\n",
++ sfi_cstate_num);
++ pentry = sfi_cstate_array;
++ for (totallen = 0; totallen < sfi_cstate_num; totallen++, pentry++) {
++ printk(KERN_INFO "Cstate[%d]: hint = 0x%08x, latency = %dms\n",
++ totallen, pentry->hint, pentry->latency);
++ }
++
++ return 0;
++}
++
++static int __init sfi_init_cpus(void/*struct sfi_table_header *table*/)
++{
++ struct sfi_processor *pr;
++ int i;
++ int result = 0;
++
++
++ for (i = 0; i < num_processors; i++) {
++ pr = kzalloc(sizeof(struct sfi_processor), GFP_KERNEL);
++ pr->id = early_per_cpu(x86_cpu_to_apicid, i);
++//sfi_cpu_array[i].apicid;
++ per_cpu(sfi_processors, pr->id) = pr;
++
++#ifdef CONFIG_SFI_CPUIDLE
++ result = sfi_processor_power_init(pr);
++#endif
++ }
++ return result;
++}
++
++static int __init sfi_processor_init(void)
++{
++ int result = 0;
++
++ sfi_table_parse(SFI_SIG_IDLE, NULL, NULL, sfi_parse_idle);
++
++#ifdef CONFIG_SFI_CPUIDLE
++ if (sfi_cstate_num > 0)
++ result = cpuidle_register_driver(&sfi_idle_driver);
++ if (result)
++ return result;
++#endif
++ result = sfi_init_cpus();
++#ifdef CONFIG_SFI_CPUIDLE
++ if (result)
++ cpuidle_unregister_driver(&sfi_idle_driver);
++
++#endif
++ return result;
++}
++
++static void __exit sfi_processor_exit(void)
++{
++ struct sfi_processor *pr;
++ int i;
++ for (i = 0; i < num_processors; i++) {
++ pr = per_cpu(sfi_processors, i);
++ if (pr) {
++#ifdef CONFIG_SFI_CPUIDLE
++ sfi_processor_power_exit(pr);
++#endif
++ kfree(pr);
++ per_cpu(sfi_processors, i) = NULL;
++ }
++ }
++
++#ifdef CONFIG_SFI_CPUIDLE
++ cpuidle_unregister_driver(&sfi_idle_driver);
++#endif
++
++}
++
++module_init(sfi_processor_init);
++module_exit(sfi_processor_exit);
+Index: linux-2.6.33/arch/x86/kernel/sfi/sfi_processor_idle.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/kernel/sfi/sfi_processor_idle.c
+@@ -0,0 +1,490 @@
++/*
++ * sfi_processor_idle.c
++ *
++ * Copyright (C) 2009 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * Author: Sujith Thomas
++ * Contact information: Sujith Thomas <sujith.thomas@intel.com>
++ * Author: Vishwesh Rudramuni
++ * Contact information: Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>
++ */
++
++#include <asm/processor.h>
++#include <linux/sfi_processor.h>
++#include <linux/sched.h>
++#include <linux/clockchips.h>
++#include <linux/sfi.h>
++
++#ifdef CONFIG_MSTWN_POWER_MGMT
++#include <linux/intel_mid.h>
++#endif
++
++static short mwait_supported[SFI_PROCESSOR_MAX_POWER];
++
++#define MWAIT_SUBSTATE_MASK (0xf)
++#define MWAIT_SUBSTATE_SIZE (4)
++
++#ifdef CONFIG_MSTWN_POWER_MGMT
++#define MID_S0I1_STATE 1
++#define MID_S0I3_STATE 3
++static int p1_c6;
++static int __init s0ix_latency_setup(char *str);
++static u32 s0ix_latency = 20000;
++__setup("s0ix_latency=", s0ix_latency_setup);
++#endif
++
++#define CPUID_MWAIT_LEAF (5)
++#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
++#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
++
++#define MWAIT_ECX_INTERRUPT_BREAK (0x1)
++
++static unsigned int latency_factor __read_mostly = 4;
++module_param(latency_factor, uint, 0644);
++
++static int sfi_idle_enter_bm(struct cpuidle_device *dev,
++ struct cpuidle_state *state);
++
++struct cpuidle_driver sfi_idle_driver = {
++ .name = "sfi_idle",
++ .owner = THIS_MODULE,
++};
++
++/*
++ * Callers should disable interrupts before the call and enable
++ * interrupts after return.
++ */
++static void sfi_safe_halt(void)
++{
++ current_thread_info()->status &= ~TS_POLLING;
++ /*
++ * TS_POLLING-cleared state must be visible before we
++ * test NEED_RESCHED:
++ */
++ smp_mb();
++ if (!need_resched()) {
++ safe_halt();
++ local_irq_disable();
++ }
++ current_thread_info()->status |= TS_POLLING;
++}
++
++static int sfi_idle_enter_c1(struct cpuidle_device *dev,
++ struct cpuidle_state *state)
++{
++ ktime_t t1, t2;
++ s64 diff = 0;
++
++ local_irq_disable();
++
++ t1 = ktime_get();
++ sfi_safe_halt();
++ t2 = ktime_get();
++
++ local_irq_enable();
++
++ diff = ktime_to_us(ktime_sub(t2, t1));
++
++ if (diff > INT_MAX)
++ diff = INT_MAX;
++
++ return (int)diff;
++}
++
++static int sfi_idle_enter_simple(struct cpuidle_device *dev,
++ struct cpuidle_state *state)
++{
++ ktime_t t1, t2;
++ s64 diff = 0;
++ struct sfi_cstate_table_entry *data;
++
++ data = (struct sfi_cstate_table_entry *)cpuidle_get_statedata(state);
++ if (unlikely(!data))
++ return 0;
++
++
++ local_irq_disable();
++ current_thread_info()->status &= ~TS_POLLING;
++ /*
++ * TS_POLLING-cleared state must be visible before we test
++ * NEED_RESCHED:
++ */
++ smp_mb();
++
++ if (unlikely(need_resched())) {
++ current_thread_info()->status |= TS_POLLING;
++ local_irq_enable();
++ return 0;
++ }
++
++ t1 = ktime_get();
++ mwait_idle_with_hints(data->hint, MWAIT_ECX_INTERRUPT_BREAK);
++ t2 = ktime_get();
++
++ local_irq_enable();
++ current_thread_info()->status |= TS_POLLING;
++
++ diff = ktime_to_us(ktime_sub(t2, t1));
++ if (diff > INT_MAX)
++ diff = INT_MAX;
++
++ return (int)diff;
++}
++
++#ifdef CONFIG_MSTWN_POWER_MGMT
++static int __init s0ix_latency_setup(char *str)
++{
++ u32 latency;
++
++ latency = memparse(str, &str);
++ if (latency > 150)
++ s0ix_latency = latency;
++
++ printk(KERN_INFO "latency for c7 is %x\n", latency);
++ return 1;
++}
++
++static int s0i3_enter_bm(struct cpuidle_device *dev,
++ struct cpuidle_state *state)
++{
++ ktime_t t1, t2;
++ s64 diff_us = 0;
++ s64 diff_ns = 0;
++ struct sfi_processor *pr;
++ struct cpuidle_state *next_state;
++ int pr_id;
++ int ret;
++
++ pr_id = smp_processor_id();
++
++ pr = __get_cpu_var(sfi_processors);
++ if (unlikely(!pr))
++ return 0;
++
++ switch (g_ospm_base->platform_sx_state) {
++ case MID_S0I3_STATE:
++ if (pr_id == 0) {
++ t1 = ktime_get();
++
++ /* Tell the scheduler that we
++ * are going deep-idle:
++ */
++ sched_clock_idle_sleep_event();
++
++ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
++ &pr->id);
++
++ mid_suspend_enter(MID_S0I3_STATE);
++
++ t2 = ktime_get();
++
++ diff_us = ktime_to_us(ktime_sub(t2, t1));
++ diff_ns = ktime_to_ns(ktime_sub(t2, t1));
++
++ /* Tell the scheduler how much
++ * we idled:
++ */
++ sched_clock_idle_wakeup_event(diff_ns);
++ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
++ &pr->id);
++
++ if (diff_us > INT_MAX)
++ diff_us = INT_MAX;
++
++ return (int)diff_us;
++
++ } else {
++ ret = sfi_idle_enter_c1(dev, state);
++ return ret;
++ }
++ break;
++ case MID_S0I1_STATE:
++ if ((pr_id == 0) && (p1_c6 == 1)) {
++ /* pmu_issue_command(s0i1) only for thread 0 rest
++ * fall through
++ */
++ mid_suspend_enter(MID_S0I1_STATE);
++ }
++ next_state = &dev->states[4];
++ ret = sfi_idle_enter_bm(dev, next_state);
++ return ret;
++ break;
++ default:
++ next_state = &dev->states[4];
++ ret = sfi_idle_enter_bm(dev, next_state);
++ dev->last_state = &dev->states[4];
++ return ret;
++ break;
++
++ }
++
++ return 0;
++
++}
++#endif
++
++static int sfi_idle_enter_bm(struct cpuidle_device *dev,
++ struct cpuidle_state *state)
++{
++
++ ktime_t t1, t2;
++ s64 diff_us = 0;
++ s64 diff_ns = 0;
++ struct sfi_cstate_table_entry *data;
++ struct sfi_processor *pr;
++
++ pr = __get_cpu_var(sfi_processors);
++ if (unlikely(!pr))
++ return 0;
++
++ data = (struct sfi_cstate_table_entry *)cpuidle_get_statedata(state);
++ if (unlikely(!data))
++ return 0;
++
++ local_irq_disable();
++ current_thread_info()->status &= ~TS_POLLING;
++ /*
++ * TS_POLLING-cleared state must be visible before we test
++ * NEED_RESCHED:
++ */
++ smp_mb();
++
++ if (unlikely(need_resched())) {
++ current_thread_info()->status |= TS_POLLING;
++ local_irq_enable();
++ return 0;
++ }
++
++ t1 = ktime_get();
++
++ /* Tell the scheduler that we are going deep-idle: */
++ sched_clock_idle_sleep_event();
++
++ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &pr->id);
++
++
++#ifdef CONFIG_MSTWN_POWER_MGMT
++ if ((smp_processor_id() == 1) && (data->hint == 0x52))
++ p1_c6 = 1;
++#endif
++
++ mwait_idle_with_hints(data->hint, MWAIT_ECX_INTERRUPT_BREAK);
++
++#ifdef CONFIG_MSTWN_POWER_MGMT
++ if ((smp_processor_id() == 1) && (data->hint == 0x52))
++ p1_c6 = 0;
++#endif
++
++ t2 = ktime_get();
++
++ diff_us = ktime_to_us(ktime_sub(t2, t1));
++ diff_ns = ktime_to_ns(ktime_sub(t2, t1));
++
++ /* Tell the scheduler how much we idled: */
++ sched_clock_idle_wakeup_event(diff_ns);
++
++ local_irq_enable();
++ current_thread_info()->status |= TS_POLLING;
++
++ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &pr->id);
++
++ if (diff_us > INT_MAX)
++ diff_us = INT_MAX;
++
++ return (int)diff_us;
++
++}
++
++/**
++ * sfi_processor_setup_cpuidle - prepares and configures CPUIDLE
++ * @pr: the SFI processor
++ */
++static int sfi_processor_setup_cpuidle(struct sfi_processor *pr)
++{
++ int i;
++ int count = CPUIDLE_DRIVER_STATE_START;
++ struct cpuidle_state *state;
++ struct cpuidle_device *dev = &pr->power.dev;
++
++ for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
++ dev->states[i].name[0] = '\0';
++ dev->states[i].desc[0] = '\0';
++ }
++
++ for (i = 1; i < SFI_PROCESSOR_MAX_POWER; i++) {
++
++ /*Mwait not supported by processor */
++ if (!mwait_supported[i])
++ continue;
++
++ state = &dev->states[count];
++
++ snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
++ snprintf(state->desc, CPUIDLE_DESC_LEN, "C%d", i);
++
++ state->exit_latency = pr->power.states[count].exit_latency;
++ state->target_residency = state->exit_latency * latency_factor;
++ state->power_usage = pr->power.states[count].power_usage;
++ state->flags = 0;
++ cpuidle_set_statedata(state, &pr->power.sfi_cstates[count]);
++
++ printk
++ (KERN_INFO "State details Name:%s, Desc:%s, \
++ exit_latency:%d,target_residency%d,power_usage%d,hint%d",
++ state->name, state->desc, state->exit_latency,
++ state->target_residency, state->power_usage,
++ pr->power.sfi_cstates[count].hint);
++
++ switch (i) {
++ case SFI_STATE_C1:
++ state->flags |= CPUIDLE_FLAG_SHALLOW;
++ state->enter = sfi_idle_enter_c1;
++ break;
++
++ case SFI_STATE_C2:
++ state->flags |= CPUIDLE_FLAG_BALANCED;
++ state->flags |= CPUIDLE_FLAG_TIME_VALID;
++ state->enter = sfi_idle_enter_simple;
++ break;
++
++ case SFI_STATE_C3:
++ case SFI_STATE_C4:
++ case SFI_STATE_C5:
++ case SFI_STATE_C6:
++ state->flags |= CPUIDLE_FLAG_DEEP;
++ state->flags |= CPUIDLE_FLAG_TIME_VALID;
++ state->flags |= CPUIDLE_FLAG_CHECK_BM;
++ state->enter = sfi_idle_enter_bm;
++ break;
++#ifdef CONFIG_MSTWN_POWER_MGMT
++ case STATE_S0IX:
++ state->flags |= CPUIDLE_FLAG_DEEP;
++ state->flags |= CPUIDLE_FLAG_TIME_VALID;
++ state->flags |= CPUIDLE_FLAG_CHECK_BM;
++ state->enter = s0i3_enter_bm;
++ break;
++#endif
++ }
++
++ count++;
++ if (count == CPUIDLE_STATE_MAX)
++ break;
++ }
++
++ dev->state_count = count;
++ if (!count)
++ return -EINVAL;
++
++ return 0;
++}
++
++int sfi_cstate_probe(unsigned int hint)
++{
++ int retval;
++ unsigned int eax, ebx, ecx, edx;
++ unsigned int edx_part;
++ unsigned int cstate_type;
++ unsigned int num_cstate_subtype;
++
++ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
++
++ /* Check whether this particular CState is supported or not */
++ cstate_type = (hint >> MWAIT_SUBSTATE_SIZE) + 1;
++ edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
++ num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
++
++ retval = 0;
++ if (num_cstate_subtype < (hint & MWAIT_SUBSTATE_MASK)) {
++ retval = -1;
++ goto out;
++ }
++
++ /* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
++ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
++ !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) {
++ retval = -1;
++ goto out;
++ }
++
++ if (!mwait_supported[cstate_type]) {
++ mwait_supported[cstate_type] = 1;
++ printk(KERN_DEBUG
++ "Monitor-Mwait will be used to enter C-%d state\n",
++ cstate_type);
++ }
++
++out:
++ return retval;
++}
++
++int sfi_processor_power_init(struct sfi_processor *pr)
++{
++
++ int totallen;
++ struct sfi_cstate_table_entry *pentry;
++ u32 sfi_max_states;
++
++ pentry = sfi_cstate_array;
++
++#ifdef CONFIG_MSTWN_POWER_MGMT
++ sfi_max_states = SFI_PROCESSOR_MAX_POWER - 1;
++#else
++ sfi_max_states = SFI_PROCESSOR_MAX_POWER;
++#endif
++
++ for (totallen = 1; totallen <= sfi_cstate_num &&
++ totallen < sfi_max_states; totallen++, pentry++) {
++ pr->power.states[totallen].power_usage = 0;
++ pr->power.states[totallen].exit_latency = pentry->latency;
++
++ pr->power.sfi_cstates[totallen].hint = pentry->hint;
++ pr->power.sfi_cstates[totallen].latency = pentry->latency;
++
++ sfi_cstate_probe(pentry->hint);
++
++ printk(KERN_INFO "Cstate[%d]: hint = 0x%08x, latency = %dms\n",
++ totallen, pentry->hint, pentry->latency);
++ }
++
++#ifdef CONFIG_MSTWN_POWER_MGMT
++
++ p1_c6 = 0;
++
++ /* this initialization is for the S0i3 state */
++ pr->power.states[totallen].power_usage = 0;
++ pr->power.states[totallen].exit_latency = s0ix_latency;
++
++ pr->power.sfi_cstates[totallen].hint = 0;
++ pr->power.sfi_cstates[totallen].latency = s0ix_latency;
++
++ mwait_supported[STATE_S0IX] = 1;
++#endif
++
++ sfi_processor_setup_cpuidle(pr);
++ pr->power.dev.cpu = pr->id;
++ if (cpuidle_register_device(&pr->power.dev))
++ return -EIO;
++
++ return 0;
++}
++
++int sfi_processor_power_exit(struct sfi_processor *pr)
++{
++ cpuidle_unregister_device(&pr->power.dev);
++ return 0;
++}
+Index: linux-2.6.33/arch/x86/kernel/sfi/sfi_processor_perflib.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/arch/x86/kernel/sfi/sfi_processor_perflib.c
+@@ -0,0 +1,185 @@
++/*
++ * sfi_Processor_perflib.c - sfi Processor P-States Library
++ *
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or (at
++ * your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * Author: Vishwesh M Rudramuni
++ * Contact information: Vishwesh Rudramuni <vishwesh.m.rudramuni@intel.com>
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/cpufreq.h>
++#include <linux/sfi.h>
++#include <linux/sfi_processor.h>
++
++#define SFI_PROCESSOR_COMPONENT 0x01000000
++#define SFI_PROCESSOR_CLASS "processor"
++#define SFI_PROCESSOR_FILE_PERFORMANCE "performance"
++#define _COMPONENT SFI_PROCESSOR_COMPONENT
++
++static DEFINE_MUTEX(performance_mutex);
++
++/* Use cpufreq debug layer for _PPC changes. */
++#define cpufreq_printk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
++ "cpufreq-core", msg)
++
++static void sfi_cpufreq_add_file(struct sfi_processor *pr)
++{
++ return;
++}
++static void sfi_cpufreq_remove_file(struct sfi_processor *pr)
++{
++ return;
++}
++
++struct sfi_cpufreq_table_entry sfi_cpufreq_array[SFI_PROCESSOR_MAX_POWER];
++EXPORT_SYMBOL_GPL(sfi_cpufreq_array);
++
++int sfi_cpufreq_num;
++EXPORT_SYMBOL_GPL(sfi_cpufreq_num);
++
++static int __init sfi_parse_freq(struct sfi_table_header *table)
++{
++ struct sfi_table_simple *sb;
++ struct sfi_cpufreq_table_entry *pentry;
++ int totallen;
++
++ sb = (struct sfi_table_simple *)table;
++ if (!sb) {
++ printk(KERN_WARNING "SFI: Unable to map FREQ\n");
++ return -ENODEV;
++ }
++
++ if (!sfi_cpufreq_num) {
++ sfi_cpufreq_num = SFI_GET_NUM_ENTRIES(sb,
++ struct sfi_cpufreq_table_entry);
++ pentry = (struct sfi_cpufreq_table_entry *)sb->pentry;
++ totallen = sfi_cpufreq_num * sizeof(*pentry);
++ memcpy(sfi_cpufreq_array, pentry, totallen);
++ }
++
++ printk(KERN_INFO "SFI: P state info (num = %d):\n", sfi_cpufreq_num);
++ pentry = sfi_cpufreq_array;
++ for (totallen = 0; totallen < sfi_cpufreq_num; totallen++, pentry++) {
++ printk(KERN_INFO "Pstate[%d]: freq = %dMHz latency = %dms"
++ " ctrl = 0x%08x\n", totallen, pentry->freq,
++ pentry->latency, pentry->ctrl_val);
++ }
++
++ return 0;
++}
++
++
++static int sfi_processor_get_performance_states(struct sfi_processor *pr)
++{
++ int result = 0;
++ int i;
++
++ sfi_table_parse(SFI_SIG_FREQ, NULL, NULL, sfi_parse_freq);
++
++
++ pr->performance->state_count = sfi_cpufreq_num;
++ pr->performance->states =
++ kmalloc(sizeof(struct sfi_processor_px) * sfi_cpufreq_num,
++ GFP_KERNEL);
++ if (!pr->performance->states)
++ result = -ENOMEM;
++
++ printk(KERN_INFO "Num p-states %d\n", sfi_cpufreq_num);
++
++ /* Populate the P-states info from the SFI table here */
++ for (i = 0; i < sfi_cpufreq_num; i++) {
++ pr->performance->states[i].core_frequency = \
++ sfi_cpufreq_array[i].freq;
++ pr->performance->states[i].transition_latency = \
++ sfi_cpufreq_array[i].latency;
++ pr->performance->states[i].control = \
++ sfi_cpufreq_array[i].ctrl_val;
++ printk(KERN_INFO "State [%d]: core_frequency[%d] \
++ transition_latency[%d] \
++ control[0x%x] status[0x%x]\n", i,
++ (u32) pr->performance->states[i].core_frequency,
++ (u32) pr->performance->states[i].transition_latency,
++ (u32) pr->performance->states[i].control,
++ (u32) pr->performance->states[i].status);
++ }
++
++ return result;
++}
++
++int
++sfi_processor_register_performance(struct sfi_processor_performance
++ *performance, unsigned int cpu)
++{
++ struct sfi_processor *pr;
++
++ mutex_lock(&performance_mutex);
++
++ pr = per_cpu(sfi_processors, cpu);
++ if (!pr) {
++ mutex_unlock(&performance_mutex);
++ return -ENODEV;
++ }
++
++ if (pr->performance) {
++ mutex_unlock(&performance_mutex);
++ return -EBUSY;
++ }
++
++ WARN_ON(!performance);
++
++ pr->performance = performance;
++
++ sfi_processor_get_performance_states(pr);
++
++ sfi_cpufreq_add_file(pr);
++
++ mutex_unlock(&performance_mutex);
++ return 0;
++}
++EXPORT_SYMBOL(sfi_processor_register_performance);
++
++void
++sfi_processor_unregister_performance(struct sfi_processor_performance
++ *performance, unsigned int cpu)
++{
++ struct sfi_processor *pr;
++
++
++ mutex_lock(&performance_mutex);
++
++ pr = per_cpu(sfi_processors, cpu);
++ if (!pr) {
++ mutex_unlock(&performance_mutex);
++ return;
++ }
++
++ if (pr->performance)
++ kfree(pr->performance->states);
++ pr->performance = NULL;
++
++ sfi_cpufreq_remove_file(pr);
++
++ mutex_unlock(&performance_mutex);
++
++ return;
++}
++EXPORT_SYMBOL(sfi_processor_unregister_performance);
+Index: linux-2.6.33/drivers/sfi/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/sfi/Kconfig
++++ linux-2.6.33/drivers/sfi/Kconfig
+@@ -15,3 +15,13 @@ menuconfig SFI
+ For more information, see http://simplefirmware.org
+
+ Say 'Y' here to enable the kernel to boot on SFI-only platforms.
++config SFI_PROCESSOR_PM
++ bool "SFI Processor Power Management"
++ depends on SFI && X86_LOCAL_APIC
++ default y
++
++config SFI_CPUIDLE
++ bool "SFI Processor C-State driver"
++ depends on SFI_PROCESSOR_PM && CPU_IDLE
++ default y
++
+Index: linux-2.6.33/include/linux/sfi_processor.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/include/linux/sfi_processor.h
+@@ -0,0 +1,102 @@
++/*
++ * sfi_processor.h
++ *
++ * Copyright (C) 2008 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * Author: Sujith Thomas
++ * Contact information: Sujith Thomas <sujith.thomas@intel.com>
++ */
++
++#ifndef __SFI_PROCESSOR_H__
++#define __SFI_PROCESSOR_H__
++#include <linux/sfi.h>
++#include <linux/cpuidle.h>
++
++#define SFI_PROCESSOR_MAX_POWER 7
++
++#define CPU_SFI_GET_NUM(ptable, entry) \
++ ((ptable->header.length - SFI_TBL_HEADER_LEN) / \
++ (sizeof(struct entry)))
++
++struct sfi_processor_power {
++ struct cpuidle_device dev;
++ u32 default_state;
++ int count;
++ struct cpuidle_state states[SFI_PROCESSOR_MAX_POWER];
++ struct sfi_cstate_table_entry sfi_cstates[SFI_PROCESSOR_MAX_POWER];
++};
++
++struct sfi_processor_flags {
++ u8 valid;
++ u8 power;
++};
++
++struct sfi_processor {
++ u32 id;
++ struct sfi_processor_flags flags;
++ struct sfi_processor_power power;
++ struct sfi_processor_performance *performance;
++};
++
++/* Performance management */
++struct sfi_processor_px {
++ u32 core_frequency; /* megahertz */
++ u32 transition_latency; /* microseconds */
++ u32 control; /* control value */
++ u32 status; /* success indicator */
++};
++
++struct sfi_processor_performance {
++ unsigned int state;
++ unsigned int state_count;
++ struct sfi_processor_px *states;
++ cpumask_var_t shared_cpu_map;
++ unsigned int shared_type;
++};
++
++#define SFI_STATE_C0 (u8) 0
++#define SFI_STATE_C1 (u8) 1
++#define SFI_STATE_C2 (u8) 2
++#define SFI_STATE_C3 (u8) 3
++#define SFI_STATE_C4 (u8) 4
++#define SFI_STATE_C5 (u8) 5
++#define SFI_STATE_C6 (u8) 6
++
++#define SFI_C_STATES_MAX SFI_STATE_C6
++#define SFI_C_STATE_COUNT 6
++
++extern struct cpuidle_driver sfi_idle_driver;
++
++/* for communication between multiple parts of the processor kernel module */
++DECLARE_PER_CPU(struct sfi_processor *, sfi_processors);
++
++int sfi_processor_power_init(struct sfi_processor *pr);
++int sfi_processor_power_exit(struct sfi_processor *pr);
++extern int sfi_processor_register_performance(struct sfi_processor_performance
++ *performance, unsigned int cpu);
++extern void sfi_processor_unregister_performance(struct
++ sfi_processor_performance
++ *performance,
++ unsigned int cpu);
++extern struct sfi_cstate_table_entry sfi_cstate_array[SFI_C_STATES_MAX];
++extern int sfi_cstate_num;
++
++extern struct sfi_cstate_table_entry sfi_cstate_array[SFI_C_STATES_MAX];
++extern int sfi_cstate_num;
++
++#endif /*__SFI_PROCESSOR_H__*/
+Index: linux-2.6.33/include/linux/sfi.h
+===================================================================
+--- linux-2.6.33.orig/include/linux/sfi.h
++++ linux-2.6.33/include/linux/sfi.h
+@@ -120,6 +120,13 @@ struct sfi_cstate_table_entry {
+ u32 latency; /* latency in ms */
+ } __packed;
+
++
++struct sfi_cpufreq_table_entry {
++ u32 freq;
++ u32 latency; /* transition latency in ms for this pstate */
++ u32 ctrl_val; /* value to write to PERF_CTL to enter thisstate */
++}__packed;
++
+ struct sfi_apic_table_entry {
+ u64 phys_addr; /* phy base addr for APIC reg */
+ } __packed;
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-pmic-battery-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-pmic-battery-driver.patch
new file mode 100644
index 0000000..1e098a7
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-pmic-battery-driver.patch
@@ -0,0 +1,849 @@
+From 10551a86fa76709587b48a644caeb78dd07690be Mon Sep 17 00:00:00 2001
+From: Nithish Mahalingam <nithish.mahalingam@intel.com>
+Date: Tue, 29 Dec 2009 22:42:48 +0530
+Subject: [PATCH 068/104] Adding Intel Moorestown PMIC Battery Driver
+
+PMIC Battery driver provides battery charging and battery gauge functionality
+on Intel Moorestown platform.
+
+Signed-off-by: Nithish Mahalingam <nithish.mahalingam@intel.com>
+---
+ drivers/power/Kconfig | 7 +
+ drivers/power/Makefile | 1 +
+ drivers/power/pmic_battery.c | 799 ++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 807 insertions(+), 0 deletions(-)
+ create mode 100644 drivers/power/pmic_battery.c
+
+diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
+index d4b3d67..6936bc8 100644
+--- a/drivers/power/Kconfig
++++ b/drivers/power/Kconfig
+@@ -124,4 +124,11 @@ config CHARGER_PCF50633
+ help
+ Say Y to include support for NXP PCF50633 Main Battery Charger.
+
++config BATTERY_MRSTPMIC
++ tristate "PMIC battery driver for Intel Moorestown platform"
++ depends on SPI_MRST && LNW_IPC && USB_GADGET_LANGWELL
++ help
++ Say Y here to enable battery driver on Intel Moorestown
++ platform.
++
+ endif # POWER_SUPPLY
+diff --git a/drivers/power/Makefile b/drivers/power/Makefile
+index 573597c..97af4b4 100644
+--- a/drivers/power/Makefile
++++ b/drivers/power/Makefile
+@@ -31,3 +31,4 @@ obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o
+ obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o
+ obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
+ obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
++obj-$(CONFIG_BATTERY_MRSTPMIC) += pmic_battery.o
+diff --git a/drivers/power/pmic_battery.c b/drivers/power/pmic_battery.c
+new file mode 100644
+index 0000000..6e3c46a
+--- /dev/null
++++ b/drivers/power/pmic_battery.c
+@@ -0,0 +1,799 @@
++/*
++ * pmic_battery.c - Intel Moorestown PMIC Battery Driver
++ *
++ * Copyright (C) 2009 Intel Corporation
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * Author: Nithish Mahalingam <nithish.mahalingam@intel.com>
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/err.h>
++#include <linux/interrupt.h>
++#include <linux/workqueue.h>
++#include <linux/jiffies.h>
++#include <linux/param.h>
++#include <linux/device.h>
++#include <linux/spi/spi.h>
++#include <linux/power_supply.h>
++
++#include <asm/ipc_defs.h>
++#include <linux/usb/langwell_udc.h>
++
++
++MODULE_AUTHOR("Nithish Mahalingam <nithish.mahalingam@intel.com>");
++MODULE_DESCRIPTION("Intel Moorestown PMIC Battery Driver");
++MODULE_LICENSE("GPL");
++
++#define DRIVER_NAME "pmic_battery"
++
++/*********************************************************************
++ * Generic defines
++ *********************************************************************/
++
++static int pmicbatteryDebug;
++module_param(pmicbatteryDebug, int, 0444);
++MODULE_PARM_DESC(pmicbatteryDebug,
++ "Flag to enable PMIC Battery debug messages.");
++
++#define PMIC_BATT_DEBUG (pmicbatteryDebug)
++
++#define PMIC_BATT_DRV_INFO_UPDATED 1
++#define PMIC_BATT_PRESENT 1
++#define PMIC_BATT_NOT_PRESENT 0
++#define PMIC_USB_PRESENT PMIC_BATT_PRESENT
++#define PMIC_USB_NOT_PRESENT PMIC_BATT_NOT_PRESENT
++
++/* pmic battery register related */
++#define PMIC_BATT_CHR_SCHRGINT_ADDR 0xD2
++#define PMIC_BATT_CHR_SBATOVP_MASK (1 << 1)
++#define PMIC_BATT_CHR_STEMP_MASK (1 << 2)
++#define PMIC_BATT_CHR_SCOMP_MASK (1 << 3)
++#define PMIC_BATT_CHR_SUSBDET_MASK (1 << 4)
++#define PMIC_BATT_CHR_SBATDET_MASK (1 << 5)
++#define PMIC_BATT_CHR_SDCLMT_MASK (1 << 6)
++#define PMIC_BATT_CHR_SUSBOVP_MASK (1 << 7)
++#define PMIC_BATT_CHR_EXCPT_MASK 0xC6
++#define PMIC_BATT_ADC_ACCCHRG_MASK (1 << 31)
++#define PMIC_BATT_ADC_ACCCHRGVAL_MASK 0x7FFFFFFF
++
++/* pmic ipc related */
++#define PMIC_BATT_CHR_IPC_CMDID 0xEF
++#define PMIC_BATT_CHR_IPC_FCHRG_SUBID 0x4
++#define PMIC_BATT_CHR_IPC_TCHRG_SUBID 0x6
++
++/* internal return values */
++#define BATTSUCCESS 0
++#define EBATTFAIL 1
++#define EBATTERR 2
++
++/* types of battery charging */
++enum batt_charge_type {
++ BATT_USBOTG_500MA_CHARGE,
++ BATT_USBOTG_TRICKLE_CHARGE,
++};
++
++/* valid battery events */
++enum batt_event {
++ BATT_EVENT_BATOVP_EXCPT,
++ BATT_EVENT_USBOVP_EXCPT,
++ BATT_EVENT_TEMP_EXCPT,
++ BATT_EVENT_DCLMT_EXCPT,
++ BATT_EVENT_EXCPT
++};
++
++/* battery cca value */
++struct batt_cca_data {
++ signed int cca_val;
++};
++
++/* battery property structure */
++struct batt_prop_data {
++ unsigned int batt_capacity;
++ char batt_chrg_crnt;
++ char batt_chrg_volt;
++ char batt_chrg_prot;
++ char batt_chrg_prot2;
++ char batt_chrg_timer;
++} __attribute__((packed));
++
++
++/*********************************************************************
++ * Battery properties
++ *********************************************************************/
++
++/*
++ * pmic battery info
++ */
++struct pmic_power_module_info {
++ bool is_dev_info_updated;
++ struct spi_device *spi;
++ /* pmic battery data */
++ unsigned long update_time; /* jiffies when data read */
++ unsigned int usb_is_present;
++ unsigned int batt_is_present;
++ unsigned int batt_health;
++ unsigned int usb_health;
++ unsigned int batt_status;
++ unsigned int batt_charge_now; /* in mAS */
++ unsigned int batt_prev_charge_full; /* in mAS */
++ unsigned int batt_charge_rate; /* in units per second */
++
++ struct power_supply usb;
++ struct power_supply batt;
++ int irq; /* GPE_ID or IRQ# */
++ struct workqueue_struct *monitor_wqueue;
++ struct delayed_work monitor_battery;
++ struct work_struct handler;
++};
++
++static unsigned int delay_time = 2000; /* in ms */
++
++/*
++ * pmic ac properties
++ */
++static enum power_supply_property pmic_usb_props[] = {
++ POWER_SUPPLY_PROP_PRESENT,
++ POWER_SUPPLY_PROP_HEALTH,
++};
++
++/*
++ * pmic battery properties
++ */
++static enum power_supply_property pmic_battery_props[] = {
++ POWER_SUPPLY_PROP_STATUS,
++ POWER_SUPPLY_PROP_HEALTH,
++ POWER_SUPPLY_PROP_PRESENT,
++ POWER_SUPPLY_PROP_CHARGE_NOW,
++ POWER_SUPPLY_PROP_CHARGE_FULL,
++ POWER_SUPPLY_PROP_CHARGE_AVG,
++};
++
++
++/**
++ * pmic_battery_log_event - log battery events
++ * @event: battery event to be logged
++ * Context: can sleep
++ *
++ * There are multiple battery events which may be of interest to users;
++ * this battery function logs the different battery events onto the
++ * kernel log messages.
++ */
++static void pmic_battery_log_event(enum batt_event event)
++{
++ switch (event) {
++ case BATT_EVENT_BATOVP_EXCPT:
++ printk(KERN_WARNING "pmic-battery: battery overvoltage "
++ "condition detected\n");
++ break;
++ case BATT_EVENT_USBOVP_EXCPT:
++ printk(KERN_WARNING "pmic-battery: usb charger overvoltage "
++ "condition detected\n");
++ break;
++ case BATT_EVENT_TEMP_EXCPT:
++ printk(KERN_WARNING "pmic-battery: high battery temperature "
++ "condition detected\n");
++ break;
++ case BATT_EVENT_DCLMT_EXCPT:
++ printk(KERN_WARNING "pmic-battery: over battery charge "
++ " current condition detected\n");
++ break;
++ default:
++ printk(KERN_WARNING "pmic-battery: charger/battery "
++ " exception detected\n");
++ break;
++ }
++}
++
++/**
++ * pmic_battery_read_status - read battery status information
++ * @pbi: device info structure to update the read information
++ * Context: can sleep
++ *
++ * PMIC power source information need to be updated based on the data read
++ * from the PMIC battery registers.
++ *
++ */
++static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
++{
++ unsigned int update_time_intrvl = 0;
++ unsigned int chrg_val = 0;
++ struct ipc_pmic_reg_data pmic_batt_reg = {0};
++ struct ipc_cmd_type pmic_batt_cmd = {0};
++ struct batt_cca_data ccval = {0};
++ struct batt_prop_data batt_prop = {0};
++ int batt_present = 0;
++ int usb_present = 0;
++ int batt_exception = 0;
++
++ /* make sure the last batt_status read happened delay_time before */
++ if (pbi->update_time && time_before(jiffies, pbi->update_time +
++ msecs_to_jiffies(delay_time)))
++ return;
++
++ update_time_intrvl = jiffies_to_msecs(jiffies - pbi->update_time);
++ pbi->update_time = jiffies;
++
++ /* read coulomb counter registers and schrgint register */
++
++ pmic_batt_cmd.ioc = TRUE;
++ pmic_batt_cmd.cmd = IPC_BATT_CCA_READ;
++ if (ipc_config_cmd(pmic_batt_cmd, sizeof(struct batt_cca_data),
++ &ccval)) {
++ dev_warn(&pbi->spi->dev, "%s(): ipc config cmd failed\n",
++ __func__);
++ return;
++ }
++
++ pmic_batt_reg.ioc = TRUE;
++ pmic_batt_reg.pmic_reg_data[0].register_address =
++ PMIC_BATT_CHR_SCHRGINT_ADDR;
++ pmic_batt_reg.num_entries = 1;
++
++ if (ipc_pmic_register_read(&pmic_batt_reg)) {
++ dev_warn(&pbi->spi->dev, "%s(): ipc pmic read failed\n",
++ __func__);
++ return;
++ }
++
++ /*
++ * set pmic_power_module_info members based on pmic register values
++ * read.
++ */
++
++ /* set batt_is_present */
++ if (pmic_batt_reg.pmic_reg_data[0].value &
++ PMIC_BATT_CHR_SBATDET_MASK) {
++ pbi->batt_is_present = PMIC_BATT_PRESENT;
++ batt_present = 1;
++ } else {
++ pbi->batt_is_present = PMIC_BATT_NOT_PRESENT;
++ pbi->batt_health = POWER_SUPPLY_HEALTH_UNKNOWN;
++ pbi->batt_status = POWER_SUPPLY_STATUS_UNKNOWN;
++ }
++
++ /* set batt_health */
++ if (batt_present) {
++ if (pmic_batt_reg.pmic_reg_data[0].value &
++ PMIC_BATT_CHR_SBATOVP_MASK) {
++ pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
++ pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
++ pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT);
++ batt_exception = 1;
++ } else if (pmic_batt_reg.pmic_reg_data[0].value &
++ PMIC_BATT_CHR_SDCLMT_MASK) {
++ pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
++ pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
++ pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
++ batt_exception = 1;
++ } else if (pmic_batt_reg.pmic_reg_data[0].value &
++ PMIC_BATT_CHR_STEMP_MASK) {
++ pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT;
++ pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
++ pmic_battery_log_event(BATT_EVENT_TEMP_EXCPT);
++ batt_exception = 1;
++ } else {
++ pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
++ }
++ }
++
++ /* set usb_is_present */
++ if (pmic_batt_reg.pmic_reg_data[0].value &
++ PMIC_BATT_CHR_SUSBDET_MASK) {
++ pbi->usb_is_present = PMIC_USB_PRESENT;
++ usb_present = 1;
++ } else {
++ pbi->usb_is_present = PMIC_USB_NOT_PRESENT;
++ pbi->usb_health = POWER_SUPPLY_HEALTH_UNKNOWN;
++ }
++
++ if (usb_present) {
++ if (pmic_batt_reg.pmic_reg_data[0].value &
++ PMIC_BATT_CHR_SUSBOVP_MASK) {
++ pbi->usb_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
++ pmic_battery_log_event(BATT_EVENT_USBOVP_EXCPT);
++ } else {
++ pbi->usb_health = POWER_SUPPLY_HEALTH_GOOD;
++ }
++ }
++
++ chrg_val = ccval.cca_val & PMIC_BATT_ADC_ACCCHRGVAL_MASK;
++
++ /* set batt_prev_charge_full to battery capacity the first time */
++ if (!pbi->is_dev_info_updated) {
++ pmic_batt_cmd.ioc = TRUE;
++ pmic_batt_cmd.cmd = IPC_BATT_GET_PROP;
++ if (ipc_config_cmd(pmic_batt_cmd,
++ sizeof(struct batt_prop_data), &batt_prop)) {
++ dev_warn(&pbi->spi->dev, "%s(): ipc config cmd "
++ "failed\n", __func__);
++ return;
++ }
++ pbi->batt_prev_charge_full = batt_prop.batt_capacity;
++ }
++
++ /* set batt_status */
++ if ((batt_present) && (!batt_exception)) {
++ if (pmic_batt_reg.pmic_reg_data[0].value &
++ PMIC_BATT_CHR_SCOMP_MASK) {
++ pbi->batt_status = POWER_SUPPLY_STATUS_FULL;
++ pbi->batt_prev_charge_full = chrg_val;
++ } else if (ccval.cca_val & PMIC_BATT_ADC_ACCCHRG_MASK) {
++ pbi->batt_status = POWER_SUPPLY_STATUS_DISCHARGING;
++ } else {
++ pbi->batt_status = POWER_SUPPLY_STATUS_CHARGING;
++ }
++ }
++
++ /* set batt_charge_rate */
++ if ((pbi->is_dev_info_updated) && (batt_present) && (!batt_exception)) {
++ if (pbi->batt_status == POWER_SUPPLY_STATUS_DISCHARGING) {
++ if (pbi->batt_charge_now - chrg_val) {
++ pbi->batt_charge_rate = ((pbi->batt_charge_now -
++ chrg_val) * 1000 * 60) /
++ update_time_intrvl;
++ }
++ } else if (pbi->batt_status == POWER_SUPPLY_STATUS_CHARGING) {
++ if (chrg_val - pbi->batt_charge_now) {
++ pbi->batt_charge_rate = ((chrg_val -
++ pbi->batt_charge_now) * 1000 * 60) /
++ update_time_intrvl;
++ }
++ } else
++ pbi->batt_charge_rate = 0;
++ } else {
++ pbi->batt_charge_rate = -1;
++ }
++
++ /* batt_charge_now */
++ if ((batt_present) && (!batt_exception))
++ pbi->batt_charge_now = chrg_val;
++ else
++ pbi->batt_charge_now = -1;
++
++ pbi->is_dev_info_updated = PMIC_BATT_DRV_INFO_UPDATED;
++}
++
++/**
++ * pmic_usb_get_property - usb power source get property
++ * @psy: usb power supply context
++ * @psp: usb power source property
++ * @val: usb power source property value
++ * Context: can sleep
++ *
++ * PMIC usb power source property needs to be provided to power_supply
++ * subsytem for it to provide the information to users.
++ */
++static int pmic_usb_get_property(struct power_supply *psy,
++ enum power_supply_property psp,
++ union power_supply_propval *val)
++{
++ struct pmic_power_module_info *pbi = container_of(psy,
++ struct pmic_power_module_info, usb);
++
++ /* update pmic_power_module_info members */
++ pmic_battery_read_status(pbi);
++
++ switch (psp) {
++ case POWER_SUPPLY_PROP_PRESENT:
++ val->intval = pbi->usb_is_present;
++ break;
++ case POWER_SUPPLY_PROP_HEALTH:
++ val->intval = pbi->usb_health;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++/**
++ * pmic_battery_get_property - battery power source get property
++ * @psy: battery power supply context
++ * @psp: battery power source property
++ * @val: battery power source property value
++ * Context: can sleep
++ *
++ * PMIC battery power source property needs to be provided to power_supply
++ * subsytem for it to provide the information to users.
++ */
++static int pmic_battery_get_property(struct power_supply *psy,
++ enum power_supply_property psp,
++ union power_supply_propval *val)
++{
++ struct pmic_power_module_info *pbi = container_of(psy,
++ struct pmic_power_module_info, batt);
++
++ /* update pmic_power_module_info members */
++ pmic_battery_read_status(pbi);
++
++ switch (psp) {
++ case POWER_SUPPLY_PROP_STATUS:
++ val->intval = pbi->batt_status;
++ break;
++ case POWER_SUPPLY_PROP_HEALTH:
++ val->intval = pbi->batt_health;
++ break;
++ case POWER_SUPPLY_PROP_PRESENT:
++ val->intval = pbi->batt_is_present;
++ break;
++ case POWER_SUPPLY_PROP_CHARGE_NOW:
++ val->intval = pbi->batt_charge_now;
++ break;
++ case POWER_SUPPLY_PROP_CHARGE_FULL:
++ val->intval = pbi->batt_prev_charge_full;
++ break;
++ case POWER_SUPPLY_PROP_CHARGE_AVG:
++ val->intval = pbi->batt_charge_rate;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++/**
++ * pmic_battery_monitor - monitor battery status
++ * @work: work structure
++ * Context: can sleep
++ *
++ * PMIC battery status needs to be monitored for any change
++ * and information needs to be frequently updated.
++ */
++static void pmic_battery_monitor(struct work_struct *work)
++{
++ struct pmic_power_module_info *pbi = container_of(work,
++ struct pmic_power_module_info, monitor_battery.work);
++
++ /* update pmic_power_module_info members */
++ pmic_battery_read_status(pbi);
++ queue_delayed_work(pbi->monitor_wqueue, &pbi->monitor_battery, HZ * 10);
++}
++
++/**
++ * pmic_battery_set_charger - set battery charger
++ * @pbi: device info structure
++ * @chrg: charge mode to set battery charger in
++ * Context: can sleep
++ *
++ * PMIC battery charger needs to be enabled based on the usb charge
++ * capabilities connected to the platform.
++ */
++static int pmic_battery_set_charger(struct pmic_power_module_info *pbi,
++ enum batt_charge_type chrg)
++{
++ int retval;
++
++ /* set usblmt bits and chrgcntl register bits appropriately */
++ switch (chrg) {
++ case BATT_USBOTG_500MA_CHARGE:
++ retval = lnw_ipc_single_cmd(PMIC_BATT_CHR_IPC_CMDID,
++ PMIC_BATT_CHR_IPC_FCHRG_SUBID, 0, 0);
++ break;
++ case BATT_USBOTG_TRICKLE_CHARGE:
++ retval = lnw_ipc_single_cmd(PMIC_BATT_CHR_IPC_CMDID,
++ PMIC_BATT_CHR_IPC_TCHRG_SUBID, 0, 0);
++ break;
++ default:
++ dev_warn(&pbi->spi->dev, "%s(): out of range usb charger "
++ "charge detected\n", __func__);
++ return -EBATTFAIL;
++ }
++
++ if (retval) {
++ dev_warn(&pbi->spi->dev, "%s(): ipc pmic read failed\n",
++ __func__);
++ return -EBATTFAIL;
++ }
++
++ return BATTSUCCESS;
++}
++
++/**
++ * pmic_battery_interrupt_handler - pmic battery interrupt handler
++ * Context: interrupt context
++ *
++ * PMIC battery interrupt handler which will be called with either
++ * battery full condition occurs or usb otg & battery connect
++ * condition occurs.
++ */
++static irqreturn_t pmic_battery_interrupt_handler(int id, void *dev)
++{
++ struct pmic_power_module_info *pbi =
++ (struct pmic_power_module_info *)dev;
++
++ schedule_work(&pbi->handler);
++
++ return IRQ_HANDLED;
++}
++
++/**
++ * pmic_battery_handle_intrpt - pmic battery service interrupt
++ * @work: work structure
++ * Context: can sleep
++ *
++ * PMIC battery needs to either update the battery status as full
++ * if it detects battery full condition caused the interrupt or needs
++ * to enable battery charger if it detects usb and battery detect
++ * caused the source of interrupt.
++ */
++static void pmic_battery_handle_intrpt(struct work_struct *work)
++{
++ struct ipc_pmic_reg_data pmic_batt_reg = {0};
++ struct pmic_power_module_info *pbi = container_of(work,
++ struct pmic_power_module_info, handler);
++ int power = 0;
++ enum batt_charge_type chrg;
++ int retval = 0;
++
++ /* check if pmic_power_module_info is initialized */
++ if (!pbi)
++ return;
++
++ /* read schrgint register to interpret cause of interrupt */
++ pmic_batt_reg.ioc = TRUE;
++ pmic_batt_reg.pmic_reg_data[0].register_address =
++ PMIC_BATT_CHR_SCHRGINT_ADDR;
++ pmic_batt_reg.num_entries = 1;
++
++ if (ipc_pmic_register_read(&pmic_batt_reg)) {
++ dev_warn(&pbi->spi->dev, "%s(): ipc pmic read failed\n",
++ __func__);
++ return;
++ }
++
++ /* find the cause of the interrupt */
++
++ if (pmic_batt_reg.pmic_reg_data[0].value & PMIC_BATT_CHR_SBATDET_MASK) {
++ pbi->batt_is_present = PMIC_BATT_PRESENT;
++ } else {
++ pbi->batt_is_present = PMIC_BATT_NOT_PRESENT;
++ pbi->batt_health = POWER_SUPPLY_HEALTH_UNKNOWN;
++ pbi->batt_status = POWER_SUPPLY_STATUS_UNKNOWN;
++ return;
++ }
++
++ if (pmic_batt_reg.pmic_reg_data[0].value &
++ PMIC_BATT_CHR_EXCPT_MASK) {
++ pbi->batt_health = POWER_SUPPLY_HEALTH_UNKNOWN;
++ pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
++ pbi->usb_health = POWER_SUPPLY_HEALTH_UNKNOWN;
++ pmic_battery_log_event(BATT_EVENT_EXCPT);
++ return;
++ } else {
++ pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
++ pbi->usb_health = POWER_SUPPLY_HEALTH_GOOD;
++ }
++
++ if (pmic_batt_reg.pmic_reg_data[0].value & PMIC_BATT_CHR_SCOMP_MASK) {
++ struct ipc_cmd_type pmic_batt_cmd = {0};
++ struct batt_cca_data ccval = {0};
++
++ pbi->batt_status = POWER_SUPPLY_STATUS_FULL;
++ pmic_batt_cmd.ioc = TRUE;
++ pmic_batt_cmd.cmd = IPC_BATT_CCA_READ;
++ if (ipc_config_cmd(pmic_batt_cmd,
++ sizeof(struct batt_cca_data), &ccval)) {
++ dev_warn(&pbi->spi->dev, "%s(): ipc config cmd "
++ "failed\n", __func__);
++ return;
++ }
++ pbi->batt_prev_charge_full = ccval.cca_val &
++ PMIC_BATT_ADC_ACCCHRGVAL_MASK;
++ return;
++ }
++
++ if (pmic_batt_reg.pmic_reg_data[0].value & PMIC_BATT_CHR_SUSBDET_MASK) {
++ pbi->usb_is_present = PMIC_USB_PRESENT;
++ } else {
++ pbi->usb_is_present = PMIC_USB_NOT_PRESENT;
++ pbi->usb_health = POWER_SUPPLY_HEALTH_UNKNOWN;
++ return;
++ }
++
++ /* setup battery charging */
++
++ /* check usb otg power capability and set charger accordingly */
++ retval = langwell_udc_maxpower(&power);
++ if (retval) {
++ dev_warn(&pbi->spi->dev, "%s(): usb otg power query failed "
++ "with error code %d\n", __func__, retval);
++ return;
++ }
++
++ if (power >= 500)
++ chrg = BATT_USBOTG_500MA_CHARGE;
++ else
++ chrg = BATT_USBOTG_TRICKLE_CHARGE;
++
++ /* enable battery charging */
++ if (pmic_battery_set_charger(pbi, chrg)) {
++ dev_warn(&pbi->spi->dev, "%s(): failed to setup battery "
++ "charging\n", __func__);
++ return;
++ }
++
++ if (PMIC_BATT_DEBUG)
++ printk(KERN_INFO "pmic-battery: %s() - setting up battery "
++ "charger successful\n", __func__);
++}
++
++/**
++ * pmic_battery_probe - pmic battery initialize
++ * @spi: pmic battery spi structure
++ * Context: can sleep
++ *
++ * PMIC battery initializes its internal data structue and other
++ * infrastructure components for it to work as expected.
++ */
++static int pmic_battery_probe(struct spi_device *spi)
++{
++ int retval = 0;
++ struct pmic_power_module_info *pbi = 0;
++
++ if (PMIC_BATT_DEBUG)
++ printk(KERN_INFO "pmic-battery: %s() - found pmic battery "
++ "device\n", __func__);
++
++ pbi = kzalloc(sizeof(*pbi), GFP_KERNEL);
++ if (!pbi) {
++ dev_err(&spi->dev, "%s(): memory allocation failed\n",
++ __func__);
++ return -ENOMEM;
++ }
++
++ pbi->spi = spi;
++ pbi->irq = spi->irq;
++ dev_set_drvdata(&spi->dev, pbi);
++
++ /* initialize all required framework before enabling interrupts */
++ INIT_WORK(&pbi->handler, (void *)pmic_battery_handle_intrpt);
++ INIT_DELAYED_WORK(&pbi->monitor_battery, pmic_battery_monitor);
++ pbi->monitor_wqueue =
++ create_singlethread_workqueue(dev_name(&spi->dev));
++ if (!pbi->monitor_wqueue) {
++ dev_err(&spi->dev, "%s(): wqueue init failed\n", __func__);
++ retval = -ESRCH;
++ goto wqueue_failed;
++ }
++
++ /* register interrupt */
++ retval = request_irq(pbi->irq, pmic_battery_interrupt_handler,
++ 0, DRIVER_NAME, pbi);
++ if (retval) {
++ dev_err(&spi->dev, "%s(): cannot get IRQ\n", __func__);
++ goto requestirq_failed;
++ }
++
++ /* register pmic-batt with power supply subsystem */
++ pbi->batt.name = "pmic-batt";
++ pbi->batt.type = POWER_SUPPLY_TYPE_BATTERY;
++ pbi->batt.properties = pmic_battery_props;
++ pbi->batt.num_properties = ARRAY_SIZE(pmic_battery_props);
++ pbi->batt.get_property = pmic_battery_get_property;
++ retval = power_supply_register(&spi->dev, &pbi->batt);
++ if (retval) {
++ dev_err(&spi->dev, "%s(): failed to register pmic battery "
++ "device with power supply subsystem\n",
++ __func__);
++ goto power_reg_failed;
++ }
++
++ if (PMIC_BATT_DEBUG)
++ printk(KERN_INFO "pmic-battery: %s() - pmic battery device "
++ "registration with power supply subsystem "
++ "successful\n", __func__);
++
++ queue_delayed_work(pbi->monitor_wqueue, &pbi->monitor_battery, HZ * 1);
++
++ /* register pmic-usb with power supply subsystem */
++ pbi->usb.name = "pmic-usb";
++ pbi->usb.type = POWER_SUPPLY_TYPE_USB;
++ pbi->usb.properties = pmic_usb_props;
++ pbi->usb.num_properties = ARRAY_SIZE(pmic_usb_props);
++ pbi->usb.get_property = pmic_usb_get_property;
++ retval = power_supply_register(&spi->dev, &pbi->usb);
++ if (retval) {
++ dev_err(&spi->dev, "%s(): failed to register pmic usb "
++ "device with power supply subsystem\n",
++ __func__);
++ goto power_reg_failed_1;
++ }
++
++ if (PMIC_BATT_DEBUG)
++ printk(KERN_INFO "pmic-battery: %s() - pmic usb device "
++ "registration with power supply subsystem successful\n",
++ __func__);
++
++ return retval;
++
++power_reg_failed_1:
++ power_supply_unregister(&pbi->batt);
++power_reg_failed:
++ cancel_rearming_delayed_workqueue(pbi->monitor_wqueue,
++ &pbi->monitor_battery);
++requestirq_failed:
++ destroy_workqueue(pbi->monitor_wqueue);
++wqueue_failed:
++ kfree(pbi);
++
++ return retval;
++}
++
++/**
++ * pmic_battery_remove - pmic battery finalize
++ * @spi: pmic battery spi device structure
++ * Context: can sleep
++ *
++ * PMIC battery finalizes its internal data structue and other
++ * infrastructure components that it initialized in
++ * pmic_battery_probe.
++ */
++static int pmic_battery_remove(struct spi_device *spi)
++{
++ struct pmic_power_module_info *pbi = dev_get_drvdata(&spi->dev);
++
++ if (pbi) {
++ free_irq(pbi->irq, pbi);
++
++ cancel_rearming_delayed_workqueue(pbi->monitor_wqueue,
++ &pbi->monitor_battery);
++ destroy_workqueue(pbi->monitor_wqueue);
++
++ power_supply_unregister(&pbi->usb);
++ power_supply_unregister(&pbi->batt);
++
++ flush_scheduled_work();
++
++ kfree(pbi);
++ }
++
++ return 0;
++}
++
++
++/*********************************************************************
++ * Driver initialisation and finalization
++ *********************************************************************/
++
++static struct spi_driver pmic_battery_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .bus = &spi_bus_type,
++ .owner = THIS_MODULE,
++ },
++ .probe = pmic_battery_probe,
++ .remove = __devexit_p(pmic_battery_remove),
++};
++
++
++static int __init pmic_battery_module_init(void)
++{
++ return spi_register_driver(&pmic_battery_driver);
++}
++
++static void __exit pmic_battery_module_exit(void)
++{
++ spi_unregister_driver(&pmic_battery_driver);
++}
++
++module_init(pmic_battery_module_init);
++module_exit(pmic_battery_module_exit);
+--
+1.6.2.5
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-rar-handler-driver-3.1.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-rar-handler-driver-3.1.patch
new file mode 100644
index 0000000..a1a0fd6
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-rar-handler-driver-3.1.patch
@@ -0,0 +1,2531 @@
+Index: linux-2.6.33/drivers/staging/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/staging/Kconfig
++++ linux-2.6.33/drivers/staging/Kconfig
+@@ -141,5 +141,7 @@ source "drivers/staging/netwave/Kconfig"
+
+ source "drivers/staging/sm7xx/Kconfig"
+
++source "drivers/staging/rar_register/Kconfig"
++
+ endif # !STAGING_EXCLUDE_BUILD
+ endif # STAGING
+Index: linux-2.6.33/drivers/staging/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/staging/Makefile
++++ linux-2.6.33/drivers/staging/Makefile
+@@ -38,7 +38,7 @@ obj-$(CONFIG_VT6656) += vt6656/
+ obj-$(CONFIG_FB_UDL) += udlfb/
+ obj-$(CONFIG_HYPERV) += hv/
+ obj-$(CONFIG_VME_BUS) += vme/
+-obj-$(CONFIG_RAR_REGISTER) += rar/
++obj-$(CONFIG_RAR_DRIVER) += rar/
+ obj-$(CONFIG_DX_SEP) += sep/
+ obj-$(CONFIG_IIO) += iio/
+ obj-$(CONFIG_RAMZSWAP) += ramzswap/
+@@ -52,3 +52,4 @@ obj-$(CONFIG_WAVELAN) += wavelan/
+ obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan/
+ obj-$(CONFIG_PCMCIA_NETWAVE) += netwave/
+ obj-$(CONFIG_FB_SM7XX) += sm7xx/
++obj-$(CONFIG_RAR_REGISTER) += rar_register/
+Index: linux-2.6.33/drivers/staging/rar_register/Kconfig
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/staging/rar_register/Kconfig
+@@ -0,0 +1,14 @@
++#
++# Serial device configuration
++#
++
++menu "RAR Register Driver"
++
++config RAR_REGISTER
++ tristate "Intel Restricted Access Region Register Driver"
++ default n
++ ---help---
++ This driver allows other kernel drivers access to the
++ contents of the restricted access region registers.
++
++endmenu
+Index: linux-2.6.33/drivers/staging/rar_register/Makefile
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/staging/rar_register/Makefile
+@@ -0,0 +1,3 @@
++EXTRA_CFLAGS += -DLITTLE__ENDIAN
++obj-$(CONFIG_RAR_REGISTER) += rar_register.o
++rar_register_driver-objs := rar_register.o
+Index: linux-2.6.33/drivers/staging/rar_register/rar_register.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/staging/rar_register/rar_register.c
+@@ -0,0 +1,669 @@
++/*
++ * rar_register.c - An Intel Restricted Access Region register driver
++ *
++ * Copyright(c) 2009 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
++ * 02111-1307, USA.
++ *
++ * -------------------------------------------------------------------
++ *
++ * 20090806 Ossama Othman <ossama.othman@intel.com>
++ * Return zero high address if upper 22 bits is zero.
++ * Cleaned up checkpatch errors.
++ * Clarified that driver is dealing with bus addresses.
++ *
++ * 20090702 Ossama Othman <ossama.othman@intel.com>
++ * Removed unnecessary include directives
++ * Cleaned up spinlocks.
++ * Cleaned up logging.
++ * Improved invalid parameter checks.
++ * Fixed and simplified RAR address retrieval and RAR locking
++ * code.
++ *
++ * 20090626 Mark Allyn <mark.a.allyn@intel.com>
++ * Initial publish
++ */
++
++#include <linux/rar/rar_register.h>
++#include <linux/rar/memrar.h>
++
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include <linux/device.h>
++#include <linux/kernel.h>
++
++
++/* PCI vendor id for controller */
++#define VENDOR_ID 0x8086
++
++/* PCI device id for controller */
++#define DEVICE_ID 0x4110
++
++
++/* === Lincroft Message Bus Interface === */
++/* Message Control Register */
++#define LNC_MCR_OFFSET 0xD0
++
++/* Message Data Register */
++#define LNC_MDR_OFFSET 0xD4
++
++/* Message Opcodes */
++#define LNC_MESSAGE_READ_OPCODE 0xD0
++#define LNC_MESSAGE_WRITE_OPCODE 0xE0
++
++/* Message Write Byte Enables */
++#define LNC_MESSAGE_BYTE_WRITE_ENABLES 0xF
++
++/* B-unit Port */
++#define LNC_BUNIT_PORT 0x3
++
++/* === Lincroft B-Unit Registers - Programmed by IA32 firmware === */
++#define LNC_BRAR0L 0x10
++#define LNC_BRAR0H 0x11
++#define LNC_BRAR1L 0x12
++#define LNC_BRAR1H 0x13
++
++/* Reserved for SeP */
++#define LNC_BRAR2L 0x14
++#define LNC_BRAR2H 0x15
++
++/* Moorestown supports three restricted access regions. */
++#define MRST_NUM_RAR 3
++
++
++/* RAR Bus Address Range */
++struct RAR_address_range {
++ u32 low;
++ u32 high;
++};
++
++/* Structure containing low and high RAR register offsets. */
++struct RAR_offsets {
++ u32 low; /* Register offset for low RAR bus address. */
++ u32 high; /* Register offset for high RAR bus address. */
++};
++
++struct RAR_client {
++ int (*client_callback)(void *client_data);
++ void *customer_data;
++ int client_called;
++ };
++
++DEFINE_SPINLOCK(rar_spinlock_lock);
++DEFINE_SPINLOCK(lnc_reg_lock);
++
++struct RAR_device {
++ unsigned long rar_flags;
++ unsigned long lnc_reg_flags;
++ struct RAR_offsets rar_offsets[MRST_NUM_RAR];
++ struct RAR_address_range rar_addr[MRST_NUM_RAR];
++ struct pci_dev *rar_dev;
++ u32 registered;
++ };
++
++/* this platform has only one rar_device for 3 rar regions */
++struct RAR_device my_rar_device;
++
++/* flag to indicatew whether or not this driver is registered;
++ * this is for the entire driver and not just a device */
++int driver_registered;
++
++/* this data is for handling requests from other drivers which arrive
++ * prior to this driver initializing
++ */
++
++struct RAR_client clients[MRST_NUM_RAR];
++int num_clients;
++
++/* prototype for init */
++static int __init rar_init_handler(void);
++static void __exit rar_exit_handler(void);
++
++const struct pci_device_id rar_pci_id_tbl[] = {
++ { PCI_DEVICE(VENDOR_ID, DEVICE_ID) },
++ { 0 }
++};
++
++MODULE_DEVICE_TABLE(pci, rar_pci_id_tbl);
++
++/*
++ * Function that is activated on the succesful probe of the RAR
++ * device (Moorestown host controller).
++ */
++static int rar_probe(struct pci_dev *dev, const struct pci_device_id *id);
++
++/* field for registering driver to PCI device */
++static struct pci_driver rar_pci_driver = {
++ .name = "rar_register",
++ .id_table = rar_pci_id_tbl,
++ .probe = rar_probe
++};
++
++const struct pci_device_id *my_id_table = rar_pci_id_tbl;
++
++/*
++ * This function is used to retrieved RAR info using the Lincroft
++ * message bus interface.
++ */
++static int memrar_get_rar_addr(struct pci_dev *pdev,
++ int offset,
++ u32 *addr)
++{
++ /*
++ * ======== The Lincroft Message Bus Interface ========
++ * Lincroft registers may be obtained from the PCI
++ * (the Host Bridge) using the Lincroft Message Bus
++ * Interface. That message bus interface is generally
++ * comprised of two registers: a control register (MCR, 0xDO)
++ * and a data register (MDR, 0xD4).
++ *
++ * The MCR (message control register) format is the following:
++ * 1. [31:24]: Opcode
++ * 2. [23:16]: Port
++ * 3. [15:8]: Register Offset
++ * 4. [7:4]: Byte Enables (use 0xF to set all of these bits
++ * to 1)
++ * 5. [3:0]: reserved
++ *
++ * Read (0xD0) and write (0xE0) opcodes are written to the
++ * control register when reading and writing to Lincroft
++ * registers, respectively.
++ *
++ * We're interested in registers found in the Lincroft
++ * B-unit. The B-unit port is 0x3.
++ *
++ * The six B-unit RAR register offsets we use are listed
++ * earlier in this file.
++ *
++ * Lastly writing to the MCR register requires the "Byte
++ * enables" bits to be set to 1. This may be achieved by
++ * writing 0xF at bit 4.
++ *
++ * The MDR (message data register) format is the following:
++ * 1. [31:0]: Read/Write Data
++ *
++ * Data being read from this register is only available after
++ * writing the appropriate control message to the MCR
++ * register.
++ *
++ * Data being written to this register must be written before
++ * writing the appropriate control message to the MCR
++ * register.
++ */
++
++ int result;
++
++ /* Construct control message */
++ u32 const message =
++ (LNC_MESSAGE_READ_OPCODE << 24)
++ | (LNC_BUNIT_PORT << 16)
++ | (offset << 8)
++ | (LNC_MESSAGE_BYTE_WRITE_ENABLES << 4);
++
++ dev_dbg(&pdev->dev, "Offset for 'get' LNC MSG is %x\n", offset);
++
++ if (addr == 0) {
++ WARN_ON(1);
++ return -EINVAL;
++ }
++
++ spin_lock_irqsave(&lnc_reg_lock, my_rar_device.lnc_reg_flags);
++
++ /* Send the control message */
++ result = pci_write_config_dword(pdev,
++ LNC_MCR_OFFSET,
++ message);
++
++ dev_dbg(&pdev->dev,
++ "Result from send ctl register is %x\n",
++ result);
++
++ if (!result) {
++ result = pci_read_config_dword(pdev,
++ LNC_MDR_OFFSET,
++ addr);
++
++ dev_dbg(&pdev->dev,
++ "Result from read data register is %x\n",
++ result);
++
++ dev_dbg(&pdev->dev,
++ "Value read from data register is %x\n",
++ *addr);
++ }
++
++ spin_unlock_irqrestore(&lnc_reg_lock, my_rar_device.lnc_reg_flags);
++
++ return result;
++}
++
++static int memrar_set_rar_addr(struct pci_dev *pdev,
++ int offset,
++ u32 addr)
++{
++ /*
++ * Data being written to this register must be written before
++ * writing the appropriate control message to the MCR
++ * register.
++ *
++ * @note See memrar_get_rar_addr() for a description of the
++ * message bus interface being used here.
++ */
++
++ int result = 0;
++
++ /* Construct control message */
++ u32 const message =
++ (LNC_MESSAGE_WRITE_OPCODE << 24)
++ | (LNC_BUNIT_PORT << 16)
++ | (offset << 8)
++ | (LNC_MESSAGE_BYTE_WRITE_ENABLES << 4);
++
++ if (addr == 0) {
++ WARN_ON(1);
++ return -EINVAL;
++ }
++
++ spin_lock_irqsave(&lnc_reg_lock, my_rar_device.lnc_reg_flags);
++
++ dev_dbg(&pdev->dev,
++ "Offset for 'set' LNC MSG is %x\n", offset);
++
++ /* Send the control message */
++ result = pci_write_config_dword(pdev,
++ LNC_MDR_OFFSET,
++ addr);
++
++ dev_dbg(&pdev->dev,
++ "Result from write data register is %x\n",
++ result);
++
++ if (!result) {
++ dev_dbg(&pdev->dev,
++ "Value written to data register is %x\n",
++ addr);
++
++ result = pci_write_config_dword(pdev,
++ LNC_MCR_OFFSET,
++ message);
++
++ dev_dbg(&pdev->dev,
++ "Result from send ctl register is %x\n",
++ result);
++ }
++
++ spin_unlock_irqrestore(&lnc_reg_lock, my_rar_device.lnc_reg_flags);
++
++ return result;
++}
++
++/*
++ * Initialize RAR parameters, such as bus addresses, etc.
++ */
++static int memrar_init_rar_params(struct pci_dev *pdev)
++{
++ struct RAR_offsets const *end = my_rar_device.rar_offsets
++ + MRST_NUM_RAR;
++ struct RAR_offsets const *i;
++ struct pci_dev *my_pdev;
++ unsigned int n = 0;
++ int result = 0;
++
++ /* Retrieve RAR start and end bus addresses. */
++
++ /*
++ * Access the RAR registers through the Lincroft Message Bus
++ * Interface on PCI device: 00:00.0 Host bridge.
++ */
++
++ /* struct pci_dev *pdev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); */
++
++ my_pdev = pci_dev_get(pdev);
++
++ if (my_pdev == NULL) {
++ WARN_ON(1);
++ return -ENODEV;
++ }
++
++ for (i = my_rar_device.rar_offsets; i != end; ++i, ++n) {
++ if (memrar_get_rar_addr(my_pdev,
++ i->low,
++ &(my_rar_device.rar_addr[n].low)) != 0
++ || memrar_get_rar_addr(my_pdev,
++ i->high,
++ &(my_rar_device.rar_addr[n].high))
++ != 0) {
++ result = -1;
++ break;
++ }
++
++ /*
++ * Only the upper 22 bits of the RAR addresses are
++ * stored in their corresponding RAR registers so we
++ * must set the lower 10 bits accordingly.
++ *
++ * The low address has its lower 10 bits cleared, and
++ * the high address has all its lower 10 bits set,
++ * e.g.:
++ *
++ * low = 0x2ffffc00
++ * high = 0x3fffffff
++ *
++ * This is not arbitrary, and is actually how RAR
++ * addressing/configuration works.
++ */
++ my_rar_device.rar_addr[n].low &= 0xfffffc00u;
++
++ /*
++ * Set bits 9:0 if the 1 KiB aligned (the upper 22
++ * bits) high address is non-zero.
++ *
++ * Otherwise set all bits to zero since that indicates
++ * no RAR address is configured.
++ */
++ if ((my_rar_device.rar_addr[n].high & 0xfffffc00u) == 0)
++ my_rar_device.rar_addr[n].high = 0;
++ else
++ my_rar_device.rar_addr[n].high |= 0x3ffu;
++ }
++
++ /* Done accessing the device. */
++ /* pci_dev_put(pdev); */
++
++ if (result == 0) {
++ size_t z;
++ for (z = 0; z != MRST_NUM_RAR; ++z) {
++ /*
++ * "BRAR" refers to the RAR registers in the
++ * Lincroft B-unit.
++ */
++ dev_info(&pdev->dev,
++ "BRAR[%u] bus address range = "
++ "[0x%08x, 0x%08x]\n",
++ z,
++ my_rar_device.rar_addr[z].low,
++ my_rar_device.rar_addr[z].high);
++ }
++ }
++
++ return result;
++}
++
++/*
++ * This function registers the driver with the device subsystem (
++ * either PCI, USB, etc).
++*/
++static int __init rar_init_handler(void)
++{
++ return pci_register_driver(&rar_pci_driver);
++}
++
++static void __exit rar_exit_handler(void)
++{
++ pci_unregister_driver(&rar_pci_driver);
++}
++
++module_init(rar_init_handler);
++module_exit(rar_exit_handler);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Intel Restricted Access Region Register Driver");
++
++/*
++ * Function that is activaed on the succesful probe of the RAR device
++ * (Moorestown host controller).
++ */
++int rar_probe(struct pci_dev *dev, const struct pci_device_id *id)
++{
++ int error;
++ int counter;
++
++ dev_dbg(&dev->dev,
++ "PCI probe starting\n");
++
++ /* enable the device */
++ error = pci_enable_device(dev);
++ if (error) {
++ dev_err(&dev->dev,
++ "Error enabling RAR register PCI device\n");
++ goto end_function;
++ }
++
++ /* we have only one device; fill in the rar_device structure */
++ my_rar_device.rar_dev = dev;
++ my_rar_device.rar_flags = 0;
++ my_rar_device.lnc_reg_flags = 0;
++ my_rar_device.rar_offsets[0].low = LNC_BRAR0L;
++ my_rar_device.rar_offsets[0].high = LNC_BRAR0H;
++ my_rar_device.rar_offsets[1].low = LNC_BRAR1L;
++ my_rar_device.rar_offsets[1].high = LNC_BRAR1H;
++ my_rar_device.rar_offsets[2].low = LNC_BRAR2L;
++ my_rar_device.rar_offsets[2].high = LNC_BRAR2H;
++ my_rar_device.registered = 1;
++
++ /*
++ * Initialize the RAR parameters, which have to be retrieved */
++ /* via the message bus interface.
++ */
++ error = memrar_init_rar_params(dev);
++ if (error) {
++ pci_disable_device(dev);
++
++ dev_err(&dev->dev,
++ "Error retrieving RAR addresses\n");
++
++ goto end_function;
++ }
++
++ driver_registered = 1;
++
++ /* now call anyone who has registered (using callbacks) */
++ for (counter = 0; counter < num_clients; counter += 1) {
++ if (!clients[counter].client_called) {
++ error = (*clients[counter].client_callback)(
++ clients[counter].customer_data);
++ clients[counter].client_called = 1;
++ dev_dbg(&my_rar_device.rar_dev->dev,
++ "Callback called for %d\n",
++ counter);
++ }
++ }
++
++end_function:
++
++ return error;
++}
++
++
++/*
++ * The rar_get_address function is used by other device drivers
++ * to obtain RAR address information on a RAR. It takes three
++ * parameters:
++ *
++ * int rar_index
++ * The rar_index is an index to the rar for which you wish to retrieve
++ * the address information.
++ * Values can be 0,1, or 2.
++ *
++ * The function returns a 0 upon success or a -1 if there is no RAR
++ * facility on this system.
++ */
++int rar_get_address(int rar_index,
++ u32 *start_address,
++ u32 *end_address)
++{
++ int result = -ENODEV;
++
++ if (my_rar_device.registered) {
++ if (start_address == 0
++ || end_address == 0
++ || rar_index >= MRST_NUM_RAR
++ || rar_index < 0) {
++ result = -EINVAL;
++ } else {
++ *start_address = my_rar_device.rar_addr[rar_index].low;
++ *end_address = my_rar_device.rar_addr[rar_index].high;
++ result = 0;
++ }
++ }
++
++ return result;
++}
++EXPORT_SYMBOL(rar_get_address);
++
++/*
++ * The rar_lock function is ued by other device drivers to lock an RAR.
++ * once an RAR is locked, it stays locked until the next system reboot.
++ * The function takes one parameter:
++ *
++ * int rar_index
++ * The rar_index is an index to the rar that you want to lock.
++ * Values can be 0,1, or 2.
++ *
++ * The function returns a 0 upon success or a -1 if there is no RAR
++ * facility on this system.
++ */
++int rar_lock(int rar_index)
++{
++ int result = -ENODEV;
++
++ if (rar_index >= MRST_NUM_RAR || rar_index < 0) {
++ result = -EINVAL;
++ goto exit_rar_lock;
++ }
++
++ spin_lock_irqsave(&rar_spinlock_lock, my_rar_device.rar_flags);
++
++ if (my_rar_device.registered) {
++
++ u32 low;
++ u32 high;
++
++ /*
++ * Clear bits 4:0 in low register to lock.
++ * Clear bits 8,4:0 in high register to lock.
++ *
++ * The rest of the lower 10 bits in both registers are
++ * unused so we might as well clear them all.
++ */
++ if (rar_index == RAR_TYPE_VIDEO) {
++ low = my_rar_device.rar_addr[rar_index].low &
++ 0xfffffc00u;
++ high = my_rar_device.rar_addr[rar_index].high &
++ 0xfffffc00u;
++ low |= 0x00000009;
++ high |= 0x00000015;
++ }
++
++ else if (rar_index == RAR_TYPE_AUDIO) {
++ low = my_rar_device.rar_addr[rar_index].low &
++ 0xfffffc00u;
++ high = my_rar_device.rar_addr[rar_index].high &
++ 0xfffffc00u;
++ low |= 0x00000008;
++ high |= 0x00000018;
++ }
++
++ else {
++ low = my_rar_device.rar_addr[rar_index].low &
++ 0xfffffc00u;
++ high = my_rar_device.rar_addr[rar_index].high &
++ 0xfffffc00u;
++ high |= 0x00000018;
++ }
++
++ /*
++ * Now program the register using the Lincroft message
++ * bus interface.
++ */
++ result = memrar_set_rar_addr(my_rar_device.rar_dev,
++ my_rar_device.rar_offsets[rar_index].low,
++ low);
++
++ if (result == 0)
++ result = memrar_set_rar_addr(
++ my_rar_device.rar_dev,
++ my_rar_device.rar_offsets[rar_index].high,
++ high);
++ }
++
++ spin_unlock_irqrestore(&rar_spinlock_lock, my_rar_device.rar_flags);
++
++exit_rar_lock:
++
++ return result;
++}
++EXPORT_SYMBOL(rar_lock);
++
++/* The register_rar function is to used by other device drivers
++ * to ensure that this driver is ready. As we cannot be sure of
++ * the compile/execute order of dirvers in ther kernel, it is
++ * best to give this driver a callback function to call when
++ * it is ready to give out addresses. The callback function
++ * would have those steps that continue the initialization of
++ * a driver that do require a valid RAR address. One of those
++ * steps would be to call get_rar_address()
++ * This function return 0 on success an -1 on failure.
++ */
++int register_rar(int (*callback)(void *yourparameter), void *yourparameter)
++{
++
++ int result;
++
++ result = 0;
++
++ if (driver_registered) {
++
++ /* if the driver already registered, then we can simply
++ call the callback right now */
++
++ result = (*callback)(yourparameter);
++ if (result) {
++ dev_dbg(&my_rar_device.rar_dev->dev,
++ "Immediate Callback failed: %x\n",
++ result);
++ } else {
++ dev_dbg(&my_rar_device.rar_dev->dev,
++ "Immediate Callback ran okay\n");
++ }
++
++ return result;
++ }
++
++ else if (num_clients >= MRST_NUM_RAR) {
++ return -ENODEV;
++ }
++
++ else {
++
++ clients[num_clients].client_callback = callback;
++ clients[num_clients].customer_data = yourparameter;
++ clients[num_clients].client_called = 0;
++ num_clients += 1;
++ dev_dbg(&my_rar_device.rar_dev->dev, "Callback registered\n");
++ }
++
++return result;
++
++}
++EXPORT_SYMBOL(register_rar);
++
++/*
++ Local Variables:
++ c-file-style: "linux"
++ End:
++*/
+Index: linux-2.6.33/include/linux/rar/memrar.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/include/linux/rar/memrar.h
+@@ -0,0 +1,172 @@
++/*
++ * RAR Handler (/dev/memrar) internal driver API.
++ * Copyright (C) 2009 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of version 2 of the GNU General
++ * Public License as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be
++ * useful, but WITHOUT ANY WARRANTY; without even the implied
++ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
++ * PURPOSE. See the GNU General Public License for more details.
++ * You should have received a copy of the GNU General Public
++ * License along with this program; if not, write to the Free
++ * Software Foundation, Inc., 59 Temple Place - Suite 330,
++ * Boston, MA 02111-1307, USA.
++ * The full GNU General Public License is included in this
++ * distribution in the file called COPYING.
++ */
++
++
++#ifndef _MEMRAR_H
++#define _MEMRAR_H
++
++#include <linux/ioctl.h>
++#include <linux/types.h>
++
++
++/*
++ * Constants that specify different kinds of RAR regions that could be
++ * set up.
++ */
++static __u32 const RAR_TYPE_VIDEO; /* 0 */
++static __u32 const RAR_TYPE_AUDIO = 1;
++static __u32 const RAR_TYPE_IMAGE = 2;
++static __u32 const RAR_TYPE_DATA = 3;
++
++/*
++ * @struct RAR_stat
++ *
++ * @brief This structure is used for @c RAR_HANDLER_STAT ioctl and for
++ * @c RAR_get_stat() user space wrapper function.
++ */
++struct RAR_stat {
++ /* Type of RAR memory (e.g., audio vs. video) */
++ __u32 type;
++
++ /*
++ * Total size of RAR memory region.
++ */
++ __u32 capacity;
++
++ /* Size of the largest reservable block. */
++ __u32 largest_block_size;
++};
++
++
++/*
++ * @struct RAR_block_info
++ *
++ * @brief The argument for the @c RAR_HANDLER_RESERVE @c ioctl.
++ *
++ */
++struct RAR_block_info {
++ /* Type of RAR memory (e.g., audio vs. video) */
++ __u32 type;
++
++ /* Requested size of a block to be reserved in RAR. */
++ __u32 size;
++
++ /* Handle that can be used to refer to reserved block. */
++ __u32 handle;
++};
++
++/*
++ * @struct RAR_buffer
++ *
++ * Structure that contains all information related to a given block of
++ * memory in RAR. It is generally only used when retrieving bus
++ * addresses.
++ *
++ * @note This structure is used only by RAR-enabled drivers, and is
++ * not intended to be exposed to the user space.
++ */
++struct RAR_buffer {
++ /* Structure containing base RAR buffer information */
++ struct RAR_block_info info;
++
++ /* Buffer bus address */
++ __u32 bus_address;
++};
++
++
++#define RAR_IOCTL_BASE 0xE0
++
++/* Reserve RAR block. */
++#define RAR_HANDLER_RESERVE _IOWR(RAR_IOCTL_BASE, 0x00, struct RAR_block_info)
++
++/* Release previously reserved RAR block. */
++#define RAR_HANDLER_RELEASE _IOW(RAR_IOCTL_BASE, 0x01, __u32)
++
++/* Get RAR stats. */
++#define RAR_HANDLER_STAT _IOWR(RAR_IOCTL_BASE, 0x02, struct RAR_stat)
++
++
++/* -------------------------------------------------------------- */
++/* Kernel Side RAR Handler Interface */
++/* -------------------------------------------------------------- */
++
++/*
++ * @function rar_reserve
++ *
++ * @brief Reserve RAR buffers.
++ *
++ * This function will reserve buffers in the restricted access regions
++ * of given types.
++ *
++ * @return Number of successfully reserved buffers.
++ * Successful buffer reservations will have the corresponding
++ * @c bus_address field set to a non-zero value in the
++ * given @a buffers vector.
++ */
++extern size_t rar_reserve(struct RAR_buffer *buffers,
++ size_t count);
++
++/*
++ * @function rar_release
++ *
++ * @brief Release RAR buffers retrieved through call to
++ * @c rar_reserve() or @c rar_handle_to_bus().
++ *
++ * This function will release RAR buffers that were retrieved through
++ * a call to @c rar_reserve() or @c rar_handle_to_bus() by
++ * decrementing the reference count. The RAR buffer will be reclaimed
++ * when the reference count drops to zero.
++ *
++ * @return Number of successfully released buffers.
++ * Successful releases will have their handle field set to
++ * zero in the given @a buffers vector.
++ */
++extern size_t rar_release(struct RAR_buffer *buffers,
++ size_t count);
++
++/*
++ * @function rar_handle_to_bus
++ *
++ * @brief Convert a vector of RAR handles to bus addresses.
++ *
++ * This function will retrieve the RAR buffer bus addresses, type and
++ * size corresponding to the RAR handles provided in the @a buffers
++ * vector.
++ *
++ * @return Number of successfully converted buffers.
++ * The bus address will be set to @c 0 for unrecognized
++ * handles.
++ *
++ * @note The reference count for each corresponding buffer in RAR will
++ * be incremented. Call @c rar_release() when done with the
++ * buffers.
++ */
++extern size_t rar_handle_to_bus(struct RAR_buffer *buffers,
++ size_t count);
++
++
++#endif /* _MEMRAR_H */
++
++
++/*
++ Local Variables:
++ c-file-style: "linux"
++ End:
++*/
+Index: linux-2.6.33/include/linux/rar/rar_register.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/include/linux/rar/rar_register.h
+@@ -0,0 +1,79 @@
++/*
++ * Copyright (C) 2008, 2009 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of version 2 of the GNU General
++ * Public License as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be
++ * useful, but WITHOUT ANY WARRANTY; without even the implied
++ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
++ * PURPOSE. See the GNU General Public License for more details.
++ * You should have received a copy of the GNU General Public
++ * License along with this program; if not, write to the Free
++ * Software Foundation, Inc., 59 Temple Place - Suite 330,
++ * Boston, MA 02111-1307, USA.
++ * The full GNU General Public License is included in this
++ * distribution in the file called COPYING.
++ */
++
++
++#ifndef _RAR_REGISTER_H
++#define _RAR_REGISTER_H
++
++# include <linux/types.h>
++
++/* The register_rar function is to used by other device drivers
++ * to ensure that this driver is ready. As we cannot be sure of
++ * the compile/execute order of dirvers in ther kernel, it is
++ * best to give this driver a callback function to call when
++ * it is ready to give out addresses. The callback function
++ * would have those steps that continue the initialization of
++ * a driver that do require a valid RAR address. One of those
++ * steps would be to call get_rar_address()
++ * This function return 0 on success an -1 on failure.
++ */
++int register_rar(int (*callback)(void *yourparameter), void *yourparameter);
++
++/* The get_rar_address function is used by other device drivers
++ * to obtain RAR address information on a RAR. It takes two
++ * parameter:
++ *
++ * int rar_index
++ * The rar_index is an index to the rar for which you wish to retrieve
++ * the address information.
++ * Values can be 0,1, or 2.
++ *
++ * struct RAR_address_struct is a pointer to a place to which the function
++ * can return the address structure for the RAR.
++ *
++ * The function returns a 0 upon success or a -1 if there is no RAR
++ * facility on this system.
++ */
++int rar_get_address(int rar_index,
++ u32 *start_address,
++ u32 *end_address);
++
++
++/* The lock_rar function is ued by other device drivers to lock an RAR.
++ * once an RAR is locked, it stays locked until the next system reboot.
++ * The function takes one parameter:
++ *
++ * int rar_index
++ * The rar_index is an index to the rar that you want to lock.
++ * Values can be 0,1, or 2.
++ *
++ * The function returns a 0 upon success or a -1 if there is no RAR
++ * facility on this system.
++ */
++int rar_lock(int rar_index);
++
++
++#endif /* _RAR_REGISTER_H */
++
++
++/*
++ Local Variables:
++ c-file-style: "linux"
++ End:
++*/
+Index: linux-2.6.33/drivers/misc/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/misc/Kconfig
++++ linux-2.6.33/drivers/misc/Kconfig
+@@ -249,6 +249,17 @@ config SGI_GRU_DEBUG
+ This option enables addition debugging code for the SGI GRU driver. If
+ you are unsure, say N.
+
++config MRST_RAR_HANDLER
++ tristate "RAR handler driver for Intel Moorestown platform"
++ depends on X86
++ select RAR_REGISTER
++ ---help---
++ This driver provides a memory management interface to
++ restricted access regions available in the Intel Moorestown
++ platform.
++
++ If unsure, say N.
++
+ config MRST_VIB
+ tristate "vibrator driver for Intel Moorestown platform"
+ help
+Index: linux-2.6.33/drivers/misc/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/misc/Makefile
++++ linux-2.6.33/drivers/misc/Makefile
+@@ -22,6 +22,8 @@ obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfg
+ obj-$(CONFIG_HP_ILO) += hpilo.o
+ obj-$(CONFIG_MRST) += intel_mrst.o
+ obj-$(CONFIG_ISL29003) += isl29003.o
++obj-$(CONFIG_MRST_RAR_HANDLER) += memrar.o
++memrar-y := memrar_allocator.o memrar_handler.o
+ obj-$(CONFIG_MRST_VIB) += mrst_vib.o
+ obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
+ obj-$(CONFIG_DS1682) += ds1682.o
+Index: linux-2.6.33/drivers/misc/memrar_allocator.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/misc/memrar_allocator.c
+@@ -0,0 +1,374 @@
++/*
++ * memrar_allocator 0.2: An allocator for Intel RAR.
++ *
++ * Copyright (C) 2009 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of version 2 of the GNU General
++ * Public License as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be
++ * useful, but WITHOUT ANY WARRANTY; without even the implied
++ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
++ * PURPOSE. See the GNU General Public License for more details.
++ * You should have received a copy of the GNU General Public
++ * License along with this program; if not, write to the Free
++ * Software Foundation, Inc., 59 Temple Place - Suite 330,
++ * Boston, MA 02111-1307, USA.
++ * The full GNU General Public License is included in this
++ * distribution in the file called COPYING.
++ *
++ *
++ * ------------------------------------------------------------------
++ *
++ * This simple allocator implementation provides a
++ * malloc()/free()-like interface for reserving space within a
++ * previously reserved block of memory. It is not specific to
++ * any hardware, nor is it coupled with the lower level paging
++ * mechanism.
++ *
++ * The primary goal of this implementation is to provide a means
++ * to partition an arbitrary block of memory without actually
++ * accessing the memory or incurring any hardware side-effects
++ * (e.g. paging). It is, in effect, a bookkeeping mechanism for
++ * buffers.
++ */
++
++
++#include "memrar_allocator.h"
++#include <linux/slab.h>
++#include <linux/bug.h>
++#include <linux/kernel.h>
++
++
++struct memrar_allocator *memrar_create_allocator(unsigned long base,
++ size_t capacity,
++ size_t block_size)
++{
++ struct memrar_allocator *allocator = NULL;
++ struct memrar_free_list *first_node = NULL;
++
++ /*
++ * Make sure the base address is aligned on a block_size
++ * boundary.
++ *
++ * @todo Is this necessary?
++ */
++ /* base = ALIGN(base, block_size); */
++
++ /* Validate parameters.
++ *
++ * Make sure we can allocate the entire memory allocator
++ * space. Zero capacity or block size are obviously invalid.
++ */
++ if (base == 0
++ || capacity == 0
++ || block_size == 0
++ || ULONG_MAX - capacity < base
++ || capacity < block_size)
++ return allocator;
++
++ /*
++ * There isn't much point in creating a memory allocator that
++ * is only capable of holding one block but we'll allow it,
++ * and issue a diagnostic.
++ */
++ WARN(capacity < block_size * 2,
++ "memrar: Only one block available to allocator.\n");
++
++ allocator = kmalloc(sizeof(*allocator), GFP_KERNEL);
++
++ if (allocator == NULL)
++ return allocator;
++
++ mutex_init(&allocator->lock);
++ allocator->base = base;
++
++ /* Round the capacity down to a multiple of block_size. */
++ allocator->capacity = (capacity / block_size) * block_size;
++
++ allocator->block_size = block_size;
++
++ allocator->largest_free_area = allocator->capacity;
++
++ /* Initialize the handle and free lists. */
++ INIT_LIST_HEAD(&allocator->handle_list.list);
++ INIT_LIST_HEAD(&allocator->free_list.list);
++
++ first_node = kmalloc(sizeof(*first_node), GFP_KERNEL);
++ if (first_node == NULL) {
++ kfree(allocator);
++ allocator = NULL;
++ } else {
++ /* Full range of blocks is available. */
++ first_node->begin = base;
++ first_node->end = base + allocator->capacity;
++ list_add(&first_node->list,
++ &allocator->free_list.list);
++ }
++
++ return allocator;
++}
++
++void memrar_destroy_allocator(struct memrar_allocator *allocator)
++{
++ /*
++ * Assume that the memory allocator lock isn't held at this
++ * point in time. Caller must ensure that.
++ */
++
++ struct memrar_free_list *pos;
++ struct memrar_free_list *n;
++
++ if (allocator == NULL)
++ return;
++
++ mutex_lock(&allocator->lock);
++
++ /* Reclaim free list resources. */
++ list_for_each_entry_safe(pos,
++ n,
++ &allocator->free_list.list,
++ list) {
++ list_del(&pos->list);
++ kfree(pos);
++ }
++
++ mutex_unlock(&allocator->lock);
++
++ kfree(allocator);
++}
++
++unsigned long memrar_allocator_alloc(struct memrar_allocator *allocator,
++ size_t size)
++{
++ struct memrar_free_list *pos = NULL;
++
++ size_t num_blocks;
++ unsigned long reserved_bytes;
++
++ /*
++ * Address of allocated buffer. We assume that zero is not a
++ * valid address.
++ */
++ unsigned long addr = 0;
++
++ if (allocator == NULL || size == 0)
++ return addr;
++
++ /* Reserve enough blocks to hold the amount of bytes requested. */
++ num_blocks = DIV_ROUND_UP(size, allocator->block_size);
++
++ reserved_bytes = num_blocks * allocator->block_size;
++
++ mutex_lock(&allocator->lock);
++
++ if (reserved_bytes > allocator->largest_free_area) {
++ mutex_unlock(&allocator->lock);
++ return addr;
++ }
++
++ /*
++ * Iterate through the free list to find a suitably sized
++ * range of free contiguous memory blocks.
++ */
++ list_for_each_entry(pos, &allocator->free_list.list, list) {
++ size_t const curr_size = pos->end - pos->begin;
++
++ if (curr_size >= reserved_bytes) {
++ struct memrar_handle *handle = NULL;
++ struct memrar_handle_list * const new_node =
++ kmalloc(sizeof(*new_node), GFP_KERNEL);
++
++ if (new_node == NULL)
++ break;
++
++ list_add(&new_node->list,
++ &allocator->handle_list.list);
++
++ handle = &new_node->handle;
++ handle->end = pos->end;
++ pos->end -= reserved_bytes;
++ handle->begin = pos->end;
++ addr = handle->begin;
++
++ if (curr_size == allocator->largest_free_area)
++ allocator->largest_free_area -=
++ reserved_bytes;
++
++ break;
++ }
++ }
++
++ mutex_unlock(&allocator->lock);
++
++ return addr;
++}
++
++long memrar_allocator_free(struct memrar_allocator *allocator,
++ unsigned long addr)
++{
++ struct list_head *pos = NULL;
++ struct list_head *tmp = NULL;
++ struct memrar_handle_list *handles = NULL;
++ struct memrar_handle *handle = NULL;
++ struct memrar_free_list *new_node = NULL;
++ int result = -ENOMEM;
++
++ if (allocator == NULL)
++ return -EINVAL;
++
++ if (addr == 0)
++ return 0; /* Ignore free(0). */
++
++ mutex_lock(&allocator->lock);
++
++ /* Find the corresponding handle. */
++ list_for_each_entry(handles,
++ &allocator->handle_list.list,
++ list) {
++ if (handles->handle.begin == addr) {
++ handle = &handles->handle;
++ break;
++ }
++ }
++
++ /* No such buffer created by this allocator. */
++ if (handle == NULL) {
++ mutex_unlock(&allocator->lock);
++ return -EFAULT;
++ }
++
++ /*
++ * Coalesce adjacent chunks of memory if possible.
++ *
++ * @note This isn't full blown coalescing since we're only
++ * coalescing at most three chunks of memory.
++ */
++ list_for_each_safe(pos, tmp, &allocator->free_list.list) {
++ /* @todo O(n) performance. Optimize. */
++
++ struct memrar_free_list * const chunk =
++ list_entry(pos,
++ struct memrar_free_list,
++ list);
++
++ struct memrar_free_list * const next =
++ list_entry(pos->next,
++ struct memrar_free_list,
++ list);
++
++ /* Extend size of existing free adjacent chunk. */
++ if (chunk->end == handle->begin) {
++ /*
++ * Chunk "less than" than the one we're
++ * freeing is adjacent.
++ */
++
++ unsigned long new_chunk_size;
++
++ chunk->end = handle->end;
++
++ /*
++ * Now check if next free chunk is adjacent to
++ * the current extended free chunk.
++ */
++ if (pos != pos->next
++ && chunk->end == next->begin) {
++ chunk->end = next->end;
++ list_del(pos->next);
++ kfree(next);
++ }
++
++ new_chunk_size = chunk->end - chunk->begin;
++
++ if (new_chunk_size > allocator->largest_free_area)
++ allocator->largest_free_area =
++ new_chunk_size;
++
++ result = 0;
++ goto exit_memrar_free;
++ } else if (chunk->begin == handle->end) {
++ /*
++ * Chunk "greater than" than the one we're
++ * freeing is adjacent.
++ */
++
++ unsigned long new_chunk_size;
++
++ chunk->begin = handle->begin;
++
++ /*
++ * Now check if next free chunk is adjacent to
++ * the current extended free chunk.
++ */
++ if (pos != pos->next
++ && chunk->begin == next->end) {
++ chunk->begin = next->begin;
++ list_del(pos->next);
++ kfree(next);
++ }
++
++ new_chunk_size = chunk->end - chunk->begin;
++
++ if (new_chunk_size > allocator->largest_free_area)
++ allocator->largest_free_area =
++ new_chunk_size;
++
++ result = 0;
++ goto exit_memrar_free;
++ }
++ }
++
++ /*
++ * Memory being freed is not adjacent to existing free areas
++ * of memory in the allocator. Add a new item to the free list.
++ *
++ * @todo Allocate this free_list node when the buffer itself
++ * is allocated to avoid a potential problem where a new
++ * node cannot be allocated due to lack of available
++ * kernel memory. We can then free this node in the
++ * above coalescing code node if it isn't needed.
++ *
++ * @todo While making this change would address potential
++ * memory allocation failure, it would also
++ * unfortunately reduce performance of buffer allocation
++ * provided by this allocator.
++ */
++ new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
++ if (new_node != NULL) {
++ unsigned long new_chunk_size;
++
++ new_node->begin = handle->begin;
++ new_node->end = handle->end;
++ list_add(&new_node->list,
++ &allocator->free_list.list);
++
++ new_chunk_size = handle->end - handle->begin;
++
++ if (new_chunk_size > allocator->largest_free_area)
++ allocator->largest_free_area =
++ new_chunk_size;
++
++ result = 0;
++ }
++
++exit_memrar_free:
++
++ if (result == 0)
++ list_del(&handles->list);
++
++ mutex_unlock(&allocator->lock);
++
++ kfree(handles);
++
++ return result;
++}
++
++
++
++/*
++ Local Variables:
++ c-file-style: "linux"
++ End:
++*/
+Index: linux-2.6.33/drivers/misc/memrar_allocator.h
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/misc/memrar_allocator.h
+@@ -0,0 +1,165 @@
++/*
++ * Copyright (C) 2009 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of version 2 of the GNU General
++ * Public License as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be
++ * useful, but WITHOUT ANY WARRANTY; without even the implied
++ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
++ * PURPOSE. See the GNU General Public License for more details.
++ * You should have received a copy of the GNU General Public
++ * License along with this program; if not, write to the Free
++ * Software Foundation, Inc., 59 Temple Place - Suite 330,
++ * Boston, MA 02111-1307, USA.
++ * The full GNU General Public License is included in this
++ * distribution in the file called COPYING.
++ */
++
++#ifndef MEMRAR_ALLOCATOR_H
++#define MEMRAR_ALLOCATOR_H
++
++
++#include <linux/mutex.h>
++#include <linux/list.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++
++/*
++ * @struct memrar_free_list
++ *
++ * @brief List of available areas of memory.
++ */
++struct memrar_free_list {
++ /* Linked list of free memory allocator blocks. */
++ struct list_head list;
++
++ /* Beginning of available address range. */
++ unsigned long begin;
++
++ /*
++ * End of available address range, one past the end,
++ * i.e. [begin, end).
++ */
++ unsigned long end;
++};
++
++struct memrar_allocator;
++
++/* Structure that describes a chunk memory reserved by the allocator. */
++struct memrar_handle {
++ /* Beginning of available address range. */
++ unsigned long begin;
++
++ /*
++ * End of available address range, one past the end,
++ * i.e. [begin, end).
++ */
++ unsigned long end;
++};
++
++/*
++ * @struct memrar_handle_list
++ *
++ * @brief List of handles corresponding to allocated blocks of memory.
++ */
++struct memrar_handle_list {
++ /* Linked list of handles corresponding to allocated blocks. */
++ struct list_head list;
++
++ /* Handle for the allocated block of memory. */
++ struct memrar_handle handle;
++};
++
++/*
++ * @struct memrar_allocator
++ *
++ * @brief Encapsulation of the memory allocator state.
++ *
++ * This structure contains all memory allocator state, including the
++ * base address, capacity, free list, lock, etc.
++ */
++struct memrar_allocator {
++ /*
++ * Lock used to synchronize access to the memory allocator
++ * state.
++ */
++ struct mutex lock;
++
++ /* Base (start) address of the memory allocator. */
++ unsigned long base;
++
++ /* Size of the memory allocator in bytes. */
++ size_t capacity;
++
++ /*
++ * The size in bytes of individual blocks within the memory
++ * allocator.
++ */
++ size_t block_size;
++
++ /* Largest free area of memory in the allocator in bytes. */
++ size_t largest_free_area;
++
++ /* List of handles for allocated blocks of memory. */
++ struct memrar_handle_list handle_list;
++
++ /* List of free address ranges. */
++ struct memrar_free_list free_list;
++};
++
++/*
++ * @function memrar_create_allocator
++ *
++ * @brief Create a memory allocator.
++ *
++ * Create a memory allocator with the given capacity and block size.
++ * The capacity will be reduced to be a multiple of the block size, if
++ * necessary.
++ *
++ * @param base Address at which the memory allocator begins.
++ * @param capacity Desired size of the memory allocator. This value
++ * must be larger than the block_size, ideally more
++ * than twice as large since there wouldn't be much
++ * point in using a memory allocator otherwise.
++ * @param block_size The size of individual blocks within the memory
++ * allocator. This value must smaller than the
++ * capacity.
++ * @return An instance of the memory allocator, if creation succeeds.
++ * @return Zero if creation fails. Failure may occur if not enough
++ * kernel memory exists to create the memrar_allocator
++ * instance itself, or if the capacity and block_size
++ * arguments are not compatible or make sense.
++ */
++struct memrar_allocator *memrar_create_allocator(unsigned long base,
++ size_t capacity,
++ size_t block_size);
++
++/*
++ * Reclaim resources held by the memory allocator. The caller must
++ * explicitly free all memory reserved by memrar_allocator_alloc()
++ * prior to calling this function. Otherwise leaks will occur.
++ */
++void memrar_destroy_allocator(struct memrar_allocator *allocator);
++
++/*
++ * Reserve chunk of memory of given size in the memory allocator.
++ */
++unsigned long memrar_allocator_alloc(struct memrar_allocator *allocator,
++ size_t size);
++
++/*
++ * Reserve chunk of memory of given size in the memory allocator.
++ */
++long memrar_allocator_free(struct memrar_allocator *allocator,
++ unsigned long handle);
++
++#endif /* MEMRAR_ALLOCATOR_H */
++
++
++/*
++ Local Variables:
++ c-file-style: "linux"
++ End:
++*/
+Index: linux-2.6.33/drivers/misc/memrar_handler.c
+===================================================================
+--- /dev/null
++++ linux-2.6.33/drivers/misc/memrar_handler.c
+@@ -0,0 +1,929 @@
++/*
++ * memrar_handler 1.0: An Intel restricted access region handler device
++ *
++ * Copyright (C) 2009 Intel Corporation. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of version 2 of the GNU General
++ * Public License as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be
++ * useful, but WITHOUT ANY WARRANTY; without even the implied
++ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
++ * PURPOSE. See the GNU General Public License for more details.
++ * You should have received a copy of the GNU General Public
++ * License along with this program; if not, write to the Free
++ * Software Foundation, Inc., 59 Temple Place - Suite 330,
++ * Boston, MA 02111-1307, USA.
++ * The full GNU General Public License is included in this
++ * distribution in the file called COPYING.
++ *
++ * -------------------------------------------------------------------
++ *
++ * Moorestown restricted access regions (RAR) provide isolated
++ * areas of main memory that are only acceessible by authorized
++ * devices.
++ *
++ * The Intel Moorestown RAR handler module exposes a kernel space
++ * RAR memory management mechanism. It is essentially a
++ * RAR-specific allocator.
++ *
++ * Besides providing RAR buffer management, the RAR handler also
++ * behaves in many ways like an OS virtual memory manager. For
++ * example, the RAR "handles" created by the RAR handler are
++ * analogous to user space virtual addresses.
++ *
++ * RAR memory itself is never accessed directly by the RAR
++ * handler.
++ *
++ * -------------------------------------------------------------------
++ *
++ * TODO
++ *
++ * 1. Split user space interface from core/kernel code, e.g.:
++ * memrar_handler.c -> memrar_core.c, memrar_user.c
++ *
++ * 2. Convert API documentation to Kerneldoc.
++ *
++ * 3. Move memrar_allocator.* to kernel lib' directory since it
++ * is HW neutral.
++ * a. Alternatively, use lib/genalloc.c instead.
++ * b. A kernel port of Doug Lea's malloc() implementation may
++ * also be an option.
++ */
++
++#include <linux/miscdevice.h>
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/kref.h>
++#include <linux/mutex.h>
++#include <linux/kernel.h>
++#include <linux/uaccess.h>
++#include <linux/mm.h>
++#include <linux/ioport.h>
++#include <linux/io.h>
++
++#include <linux/rar/rar_register.h>
++#include <linux/rar/memrar.h>
++
++#include "memrar_allocator.h"
++
++
++#define MEMRAR_VER "1.0"
++
++/*
++ * Moorestown supports three restricted access regions.
++ *
++ * We only care about the first two, video and audio. The third,
++ * reserved for Chaabi and the P-unit, will be handled by their
++ * respective drivers.
++ */
++#define MRST_NUM_RAR 2
++
++/* ---------------- -------------------- ------------------- */
++
++/*
++ * List structure that keeps track of all RAR buffers.
++ */
++struct memrar_buffer_info {
++ /* Linked list of memrar_buffer_info objects. */
++ struct list_head list;
++
++ /* Core RAR buffer information. */
++ struct RAR_buffer buffer;
++
++ /* Reference count */
++ struct kref refcount;
++
++ /*
++ * File handle corresponding to process that reserved the
++ * block of memory in RAR. This will be zero for buffers
++ * allocated by other drivers instead of by a user space
++ * process.
++ */
++ struct file *owner;
++};
++
++/*
++ * Structure that describes that characteristics of a given RAR.
++ */
++struct memrar_rar_info {
++ /* Base bus address of the RAR. */
++ unsigned long base;
++
++ /* Length of the RAR. */
++ unsigned long length;
++
++ /* Virtual address of RAR mapped into kernel. */
++ void __iomem *iobase;
++
++ /*
++ * Allocator associated with the RAR.
++ *
++ * @note The allocator "capacity" may be smaller than the RAR
++ * length if the length is not a multiple of the
++ * configured allocator block size.
++ */
++ struct memrar_allocator *allocator;
++
++ /*
++ * Table that keeps track of all reserved RAR buffers.
++ */
++ struct memrar_buffer_info buffers;
++
++ /*
++ * Lock used to synchronize access to RAR-specific data
++ * structures.
++ */
++ struct mutex lock;
++};
++
++/*
++ * Array of RAR characteristics.
++ */
++static struct memrar_rar_info memrars[MRST_NUM_RAR];
++
++
++/* ---------------- -------------------- ------------------- */
++
++/* Validate RAR type. */
++static inline int memrar_is_valid_rar_type(u32 type)
++{
++ return type == RAR_TYPE_VIDEO || type == RAR_TYPE_AUDIO;
++}
++
++/* Check if an address/handle falls with the given RAR memory range. */
++static inline int memrar_handle_in_range(struct memrar_rar_info *rar,
++ u32 vaddr)
++{
++ unsigned long const iobase = (unsigned long) (rar->iobase);
++ return (vaddr >= iobase && vaddr < iobase + rar->length);
++}
++
++/* Retrieve RAR information associated with the given handle. */
++static struct memrar_rar_info *memrar_get_rar_info(u32 vaddr)
++{
++ int i;
++ for (i = 0; i < MRST_NUM_RAR; ++i) {
++ struct memrar_rar_info * const rar = &memrars[i];
++ if (memrar_handle_in_range(rar, vaddr))
++ return rar;
++ }
++
++ return NULL;
++}
++
++/*
++ * Retrieve bus address from given handle.
++ *
++ * @return Address corresponding to given handle. Zero if handle
++ * is invalid.
++ */
++static unsigned long memrar_get_bus_address(
++ struct memrar_rar_info *rar,
++ u32 vaddr)
++{
++ unsigned long const iobase = (unsigned long) (rar->iobase);
++
++ if (!memrar_handle_in_range(rar, vaddr))
++ return 0;
++
++ /*
++ * An assumption is made that the virtual address offset is
++ * the same as the bus address offset, at least based on the
++ * way this driver is implemented. For example, vaddr + 2 ==
++ * baddr + 2.
++ *
++ * @todo Is that a valid assumption?
++ */
++ return rar->base + (vaddr - iobase);
++}
++
++/*
++ * Retrieve physical address from given handle.
++ *
++ * @return Address corresponding to given handle. Zero if handle
++ * is invalid.
++ */
++static unsigned long memrar_get_physical_address(
++ struct memrar_rar_info *rar,
++ u32 vaddr)
++{
++ /*
++ * @todo This assumes that the bus address and physical
++ * address are the same. That is true for Moorestown
++ * but not necessarily on other platforms. This
++ * deficiency should be addressed at some point.
++ */
++ return memrar_get_bus_address(rar, vaddr);
++}
++
++/*
++ * Core block release code.
++ *
++ * @note This code removes the node from a list. Make sure any list
++ * iteration is performed using list_for_each_safe().
++ */
++static void memrar_release_block_i(struct kref *ref)
++{
++ /*
++ * Last reference is being released. Remove from the table,
++ * and reclaim resources.
++ */
++
++ struct memrar_buffer_info * const node =
++ container_of(ref, struct memrar_buffer_info, refcount);
++
++ struct RAR_block_info * const user_info =
++ &node->buffer.info;
++
++ struct memrar_allocator * const allocator =
++ memrars[user_info->type].allocator;
++
++ list_del(&node->list);
++
++ memrar_allocator_free(allocator, user_info->handle);
++
++ kfree(node);
++}
++
++/*
++ * Initialize RAR parameters, such as bus addresses, etc.
++ */
++static int memrar_init_rar_resources(char const *devname)
++{
++ /* ---- Sanity Checks ----
++ * 1. RAR bus addresses in both Lincroft and Langwell RAR
++ * registers should be the same.
++ * 2. Secure device ID in Langwell RAR registers should be set
++ * appropriately, i.e. only LPE DMA for the audio RAR, and
++ * security for the other Langwell based RAR register. The
++ * video RAR is not accessed from the Langwell side,
++ * meaning its corresponding Langwell RAR should only be
++ * accessible by the security engine.
++ * 3. Audio and video RAR register and RAR access should be
++ * locked. If not, lock them. Except for debugging
++ * purposes, there is no reason for them to be unlocked.
++ *
++ * @todo Should the RAR handler driver even be aware of audio
++ * and video RAR settings?
++ */
++
++ /*
++ * RAR buffer block size.
++ *
++ * We choose it to be the size of a page to simplify the
++ * /dev/memrar mmap() implementation and usage. Otherwise
++ * paging is not involved once an RAR is locked down.
++ */
++ static size_t const RAR_BLOCK_SIZE = PAGE_SIZE;
++
++ int z;
++ int found_rar = 0;
++
++ BUG_ON(MRST_NUM_RAR != ARRAY_SIZE(memrars));
++
++ for (z = 0; z != MRST_NUM_RAR; ++z) {
++ u32 low, high;
++ struct memrar_rar_info * const rar = &memrars[z];
++
++ BUG_ON(!memrar_is_valid_rar_type(z));
++
++ mutex_init(&rar->lock);
++
++ /*
++ * Initialize the process table before we reach any
++ * code that exit on failure since the finalization
++ * code requires an initialized list.
++ */
++ INIT_LIST_HEAD(&rar->buffers.list);
++
++ if (rar_get_address(z, &low, &high) != 0) {
++ /* No RAR is available. */
++ break;
++ } else if (low == 0 || high == 0) {
++ /*
++ * We don't immediately break out of the loop
++ * since the next type of RAR may be enabled.
++ */
++ rar->base = 0;
++ rar->length = 0;
++ rar->iobase = NULL;
++ rar->allocator = NULL;
++ continue;
++ }
++
++ /*
++ * @todo Verify that LNC and LNW RAR register contents
++ * addresses, security, etc are compatible and
++ * consistent).
++ */
++
++ rar->length = high - low + 1;
++
++ /* Claim RAR memory as our own. */
++ if (request_mem_region(low, rar->length, devname) == NULL) {
++ rar->length = 0;
++
++ pr_err("%s: Unable to claim RAR[%d] memory.\n",
++ devname,
++ z);
++ pr_err("%s: RAR[%d] disabled.\n", devname, z);
++
++ /*
++ * Rather than break out of the loop by
++ * returning -EBUSY, for example, we may be
++ * able to claim memory of the next RAR region
++ * as our own.
++ */
++ continue;
++ }
++
++ rar->base = low;
++
++ /*
++ * Now map it into the kernel address space.
++ *
++ * Note that the RAR memory may only be accessed by IA
++ * when debugging. Otherwise attempts to access the
++ * RAR memory when it is locked down will result in
++ * behavior similar to writing to /dev/null and
++ * reading from /dev/zero. This behavior is enforced
++ * by the hardware. Even if we don't access the
++ * memory, mapping it into the kernel provides us with
++ * a convenient RAR handle to physical address mapping.
++ */
++ rar->iobase = ioremap_nocache(rar->base, rar->length);
++ if (rar->iobase == NULL) {
++ pr_err("%s: Unable to map RAR memory.\n",
++ devname);
++ return -ENOMEM;
++ }
++
++ /* Initialize corresponding memory allocator. */
++ rar->allocator = memrar_create_allocator(
++ (unsigned long) rar->iobase,
++ rar->length,
++ RAR_BLOCK_SIZE);
++ if (rar->allocator == NULL)
++ return -1;
++
++ /*
++ * -------------------------------------------------
++ * Make sure all RARs handled by us are locked down.
++ * -------------------------------------------------
++ */
++
++ /* Enable RAR protection on the Lincroft side. */
++ if (0) {
++ /* @todo Enable once LNW A2 is widely available. */
++ rar_lock(z);
++ } else {
++ pr_warning("%s: LNC RAR[%d] no lock sanity check.\n",
++ devname,
++ z);
++ }
++
++ /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ */
++ /* |||||||||||||||||||||||||||||||||||||||||||||||||| */
++
++ /*
++ * Enable RAR protection on the Langwell side.
++ *
++ * Ideally Langwell side RAR protection should already
++ * have been enabled by the OEM in the SMIP header but
++ * we perform a sanity check, just in case.
++ *
++ * @todo Set appropriate "lock"/"valid" bits in LNW
++ * {LOW,UP}RAR[12] SCCB registers **and** LNW
++ * {LOW,UP}RAR[01] cDMI registers only if a
++ * suitable SDID (i.e. for security or LPE DMA)
++ * is set.
++ */
++ pr_warning("%s: LNW RAR[%d] no lock sanity check.\n",
++ devname,
++ z);
++
++
++ pr_info("%s: BRAR[%d]\n"
++ "\tlow address: 0x%x\n"
++ "\thigh address: 0x%x\n"
++ "\tsize : %u KiB\n",
++ devname,
++ z,
++ low,
++ high,
++ rar->allocator->capacity / 1024);
++
++ found_rar = 1;
++ }
++
++ if (!found_rar) {
++ /*
++ * No RAR support. Don't bother continuing.
++ *
++ * Note that this is not a failure.
++ */
++ pr_info("%s: No Moorestown RAR support available.\n",
++ devname);
++ return -ENODEV;
++ }
++
++ return 0;
++}
++
++/*
++ * Finalize RAR resources.
++ */
++static void memrar_fini_rar_resources(void)
++{
++ int z;
++ struct memrar_buffer_info *pos;
++ struct memrar_buffer_info *tmp;
++
++ /*
++ * @todo Do we need to hold a lock at this point in time?
++ * (module initialization failure or exit?)
++ */
++
++ for (z = MRST_NUM_RAR; z-- != 0; ) {
++ struct memrar_rar_info * const rar = &memrars[z];
++
++ /* Clean up remaining resources. */
++
++ list_for_each_entry_safe(pos,
++ tmp,
++ &rar->buffers.list,
++ list) {
++ kref_put(&pos->refcount, memrar_release_block_i);
++ }
++
++ memrar_destroy_allocator(rar->allocator);
++ rar->allocator = NULL;
++
++ iounmap(rar->iobase);
++ rar->iobase = NULL;
++
++ release_mem_region(rar->base, rar->length);
++ rar->base = 0;
++
++ rar->length = 0;
++ }
++}
++
++static long memrar_reserve_block(struct RAR_buffer *request,
++ struct file *filp)
++{
++ struct RAR_block_info * const rinfo = &request->info;
++ struct RAR_buffer *buffer;
++ struct memrar_buffer_info *buffer_info;
++ u32 handle;
++ struct memrar_rar_info *rar = NULL;
++
++ /* Prevent array overflow. */
++ if (!memrar_is_valid_rar_type(rinfo->type))
++ return -EINVAL;
++
++ rar = &memrars[rinfo->type];
++
++ /* Reserve memory in RAR. */
++ handle = memrar_allocator_alloc(rar->allocator, rinfo->size);
++ if (handle == 0)
++ return -ENOMEM;
++
++ buffer_info = kmalloc(sizeof(*buffer_info), GFP_KERNEL);
++
++ if (buffer_info == NULL) {
++ memrar_allocator_free(rar->allocator, handle);
++ return -ENOMEM;
++ }
++
++ buffer = &buffer_info->buffer;
++ buffer->info.type = rinfo->type;
++ buffer->info.size = rinfo->size;
++
++ /* Memory handle corresponding to the bus address. */
++ buffer->info.handle = handle;
++ buffer->bus_address = memrar_get_bus_address(rar, handle);
++
++ /*
++ * Keep track of owner so that we can later cleanup if
++ * necessary.
++ */
++ buffer_info->owner = filp;
++
++ kref_init(&buffer_info->refcount);
++
++ mutex_lock(&rar->lock);
++ list_add(&buffer_info->list, &rar->buffers.list);
++ mutex_unlock(&rar->lock);
++
++ rinfo->handle = buffer->info.handle;
++ request->bus_address = buffer->bus_address;
++
++ return 0;
++}
++
++static long memrar_release_block(u32 addr)
++{
++ struct memrar_buffer_info *pos;
++ struct memrar_buffer_info *tmp;
++ struct memrar_rar_info * const rar = memrar_get_rar_info(addr);
++ long result = -EINVAL;
++
++ if (rar == NULL)
++ return -EFAULT;
++
++ mutex_lock(&rar->lock);
++
++ /*
++ * Iterate through the buffer list to find the corresponding
++ * buffer to be released.
++ */
++ list_for_each_entry_safe(pos,
++ tmp,
++ &rar->buffers.list,
++ list) {
++ if (addr == pos->buffer.info.handle
++ && memrar_is_valid_rar_type(pos->buffer.info.type)) {
++ kref_put(&pos->refcount, memrar_release_block_i);
++ result = 0;
++ break;
++ }
++ }
++
++ mutex_unlock(&rar->lock);
++
++ return result;
++}
++
++static long memrar_get_stat(struct RAR_stat *r)
++{
++ long result = -EINVAL;
++
++ if (likely(r != NULL) && memrar_is_valid_rar_type(r->type)) {
++ struct memrar_allocator * const allocator =
++ memrars[r->type].allocator;
++
++ BUG_ON(allocator == NULL);
++
++ /*
++ * Allocator capacity doesn't change over time. No
++ * need to synchronize.
++ */
++ r->capacity = allocator->capacity;
++
++ mutex_lock(&allocator->lock);
++
++ r->largest_block_size = allocator->largest_free_area;
++
++ mutex_unlock(&allocator->lock);
++
++ result = 0;
++ }
++
++ return result;
++}
++
++static long memrar_ioctl(struct file *filp,
++ unsigned int cmd,
++ unsigned long arg)
++{
++ void __user *argp = (void __user *)arg;
++ long result = 0;
++
++ struct RAR_buffer buffer;
++ struct RAR_block_info * const request = &buffer.info;
++ struct RAR_stat rar_info;
++ u32 rar_handle;
++
++ switch (cmd) {
++ case RAR_HANDLER_RESERVE:
++ if (copy_from_user(request,
++ argp,
++ sizeof(*request)))
++ return -EFAULT;
++
++ result = memrar_reserve_block(&buffer, filp);
++ if (result != 0)
++ return result;
++
++ return copy_to_user(argp, request, sizeof(*request));
++
++ case RAR_HANDLER_RELEASE:
++ if (copy_from_user(&rar_handle,
++ argp,
++ sizeof(rar_handle)))
++ return -EFAULT;
++
++ return memrar_release_block(rar_handle);
++
++ case RAR_HANDLER_STAT:
++ if (copy_from_user(&rar_info,
++ argp,
++ sizeof(rar_info)))
++ return -EFAULT;
++
++ /*
++ * Populate the RAR_stat structure based on the RAR
++ * type given by the user
++ */
++ if (memrar_get_stat(&rar_info) != 0)
++ return -EINVAL;
++
++ /*
++ * @todo Do we need to verify destination pointer
++ * "argp" is non-zero? Is that already done by
++ * copy_to_user()?
++ */
++ return copy_to_user(argp,
++ &rar_info,
++ sizeof(rar_info)) ? -EFAULT : 0;
++
++ default:
++ return -ENOTTY;
++ }
++
++ return 0;
++}
++
++static int memrar_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++ size_t const size = vma->vm_end - vma->vm_start;
++
++ /* Users pass the RAR handle as the mmap() offset parameter. */
++ unsigned long const handle = vma->vm_pgoff << PAGE_SHIFT;
++
++ struct memrar_rar_info * const rar = memrar_get_rar_info(handle);
++
++ unsigned long pfn;
++
++ /* Invalid RAR handle or size passed to mmap(). */
++ if (rar == NULL
++ || handle == 0
++ || size > (handle - (unsigned long) rar->iobase))
++ return -EINVAL;
++
++ /*
++ * Retrieve physical address corresponding to the RAR handle,
++ * and convert it to a page frame.
++ */
++ pfn = memrar_get_physical_address(rar, handle) >> PAGE_SHIFT;
++
++
++ pr_debug("memrar: mapping RAR range [0x%lx, 0x%lx) into user space.\n",
++ handle,
++ handle + size);
++
++ /*
++ * Map RAR memory into user space. This is really only useful
++ * for debugging purposes since the memory won't be
++ * accesssible, i.e. reads return zero and writes are ignired,
++ * when it is locked down.
++ */
++ if (remap_pfn_range(vma,
++ vma->vm_start,
++ pfn,
++ size,
++ vma->vm_page_prot))
++ return -EAGAIN;
++
++ /* vma->vm_ops = &memrar_mem_ops; */
++
++ return 0;
++}
++
++static int memrar_open(struct inode *inode, struct file *filp)
++{
++ /* Nothing to do yet. */
++
++ return 0;
++}
++
++static int memrar_release(struct inode *inode, struct file *filp)
++{
++ /* Free all regions associated with the given file handle. */
++
++ struct memrar_buffer_info *pos;
++ struct memrar_buffer_info *tmp;
++ int z;
++
++ for (z = 0; z != MRST_NUM_RAR; ++z) {
++ struct memrar_rar_info * const rar = &memrars[z];
++
++ mutex_lock(&rar->lock);
++
++ list_for_each_entry_safe(pos,
++ tmp,
++ &rar->buffers.list,
++ list) {
++ if (filp == pos->owner)
++ kref_put(&pos->refcount,
++ memrar_release_block_i);
++ }
++
++ mutex_unlock(&rar->lock);
++ }
++
++ return 0;
++}
++
++/*
++ * @note This function is part of the kernel space memrar driver API.
++ */
++size_t rar_reserve(struct RAR_buffer *buffers, size_t count)
++{
++ struct RAR_buffer * const end =
++ (buffers == NULL ? buffers : buffers + count);
++ struct RAR_buffer *i;
++
++ size_t reserve_count = 0;
++
++ for (i = buffers; i != end; ++i) {
++ if (memrar_reserve_block(i, NULL) == 0)
++ ++reserve_count;
++ else
++ i->bus_address = 0;
++ }
++
++ return reserve_count;
++}
++EXPORT_SYMBOL(rar_reserve);
++
++/*
++ * @note This function is part of the kernel space memrar driver API.
++ */
++size_t rar_release(struct RAR_buffer *buffers, size_t count)
++{
++ struct RAR_buffer * const end =
++ (buffers == NULL ? buffers : buffers + count);
++ struct RAR_buffer *i;
++
++ size_t release_count = 0;
++
++ for (i = buffers; i != end; ++i) {
++ u32 * const handle = &i->info.handle;
++ if (memrar_release_block(*handle) == 0) {
++ /*
++ * @todo We assume we should do this each time
++ * the ref count is decremented. Should
++ * we instead only do this when the ref
++ * count has dropped to zero, and the
++ * buffer has been completely
++ * released/unmapped?
++ */
++ *handle = 0;
++ ++release_count;
++ }
++ }
++
++ return release_count;
++}
++EXPORT_SYMBOL(rar_release);
++
++/*
++ * @note This function is part of the kernel space driver API.
++ */
++size_t rar_handle_to_bus(struct RAR_buffer *buffers, size_t count)
++{
++ struct RAR_buffer * const end =
++ (buffers == NULL ? buffers : buffers + count);
++ struct RAR_buffer *i;
++ struct memrar_buffer_info *pos;
++
++ size_t conversion_count = 0;
++
++ /*
++ * Find all bus addresses corresponding to the given handles.
++ *
++ * @todo Not liking this nested loop. Optimize.
++ */
++ for (i = buffers; i != end; ++i) {
++ struct memrar_rar_info * const rar =
++ memrar_get_rar_info(i->info.handle);
++
++ /*
++ * Check if we have a bogus handle, and then continue
++ * with remaining buffers.
++ */
++ if (rar == NULL) {
++ i->bus_address = 0;
++ continue;
++ }
++
++ mutex_lock(&rar->lock);
++
++ list_for_each_entry(pos, &rar->buffers.list, list) {
++ struct RAR_block_info * const user_info =
++ &pos->buffer.info;
++
++ if (i->info.handle >= user_info->handle
++ && i->info.handle < (user_info->handle
++ + user_info->size)) {
++ u32 const offset =
++ i->info.handle - user_info->handle;
++
++ i->info.type = user_info->type;
++ i->info.size = user_info->size - offset;
++ i->bus_address =
++ pos->buffer.bus_address
++ + offset;
++
++ /* Increment the reference count. */
++ kref_get(&pos->refcount);
++
++ ++conversion_count;
++ break;
++ } else {
++ i->bus_address = 0;
++ }
++ }
++
++ mutex_unlock(&rar->lock);
++ }
++
++ return conversion_count;
++}
++EXPORT_SYMBOL(rar_handle_to_bus);
++
++static const struct file_operations memrar_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = memrar_ioctl,
++ .mmap = memrar_mmap,
++ .open = memrar_open,
++ .release = memrar_release,
++};
++
++static struct miscdevice memrar_miscdev = {
++ .minor = MISC_DYNAMIC_MINOR, /* dynamic allocation */
++ .name = "memrar", /* /dev/memrar */
++ .fops = &memrar_fops
++};
++
++static char const banner[] __initdata =
++ KERN_INFO
++ "Intel RAR Handler: " MEMRAR_VER " initialized.\n";
++
++static int __init memrar_init(void)
++{
++ int result = 0;
++
++ printk(banner);
++
++ /*
++ * We initialize the RAR parameters early on so that we can
++ * discontinue memrar device initialization and registration
++ * if suitably configured RARs are not available.
++ */
++ result = memrar_init_rar_resources(memrar_miscdev.name);
++
++ if (result != 0)
++ return result;
++
++ result = misc_register(&memrar_miscdev);
++
++ if (result != 0) {
++ pr_err("%s: misc_register() failed.\n",
++ memrar_miscdev.name);
++
++ /* Clean up resources previously reserved. */
++ memrar_fini_rar_resources();
++ }
++
++ return result;
++}
++
++static void __exit memrar_exit(void)
++{
++ memrar_fini_rar_resources();
++
++ misc_deregister(&memrar_miscdev);
++}
++
++#ifndef MODULE
++/*
++ * The RAR handler must be initialized after the RAR register driver.
++ * Otherwise the RAR handler will always assume no RAR support
++ * exists.
++ */
++late_initcall_sync(memrar_init);
++#else
++module_init(memrar_init);
++#endif /* MODULE */
++
++module_exit(memrar_exit);
++
++
++MODULE_AUTHOR("Ossama Othman <ossama.othman@intel.com>");
++MODULE_DESCRIPTION("Intel Restricted Access Region Handler");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
++MODULE_VERSION(MEMRAR_VER);
++
++
++
++/*
++ Local Variables:
++ c-file-style: "linux"
++ End:
++*/
+Index: linux-2.6.33/drivers/staging/rar/Kconfig
+===================================================================
+--- linux-2.6.33.orig/drivers/staging/rar/Kconfig
++++ linux-2.6.33/drivers/staging/rar/Kconfig
+@@ -6,7 +6,7 @@ menu "RAR Register Driver"
+ #
+ # Restricted Access Register Manager
+ #
+-config RAR_REGISTER
++config RAR_DRIVER
+ tristate "Restricted Access Region Register Driver"
+ default n
+ ---help---
+Index: linux-2.6.33/drivers/staging/rar/Makefile
+===================================================================
+--- linux-2.6.33.orig/drivers/staging/rar/Makefile
++++ linux-2.6.33/drivers/staging/rar/Makefile
+@@ -1,2 +1,2 @@
+ EXTRA_CFLAGS += -DLITTLE__ENDIAN
+-obj-$(CONFIG_RAR_REGISTER) += rar_driver.o
++obj-$(CONFIG_RAR_DRIVER) += rar_driver.o
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-sensor-driver-1.1.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-sensor-driver-1.1.patch
new file mode 100644
index 0000000..fce4524
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-sensor-driver-1.1.patch
@@ -0,0 +1,1836 @@
+From cffaf6b15ff40cfbeafd0d4728ba3a5a5fb6155f Mon Sep 17 00:00:00 2001
+From: Alan Olsen <alan.r.olsen@intel.com>
+Date: Thu, 15 Oct 2009 14:26:47 -0700
+Subject: [PATCH 062/104] Moorestown Sensor drivers v1.1 consolidated patch
+
+This patch contains the following patches:
+
+Alpha2-1.1-1-5-mrst-Sensors-ALS-Driver-for-Moorestown.patch
+
+ [PATCH] ALS Driver for Moorestown Sensors
+
+ This patch single patch for Alpha2:2.0. ALS driver will read
+ the latest Lux measurement based on the light brightness and
+ will report the LUX output through sysfs interface.
+
+ Signed-off-by: Kalhan Trisal <kalhan.trisal@intel.com>
+
+Alpha2-1.1-2-5-mrst-Sensors-Compass-Driver-for-Moorestown.patch
+
+ [PATCH] Compass Driver for Moorestown Sensors
+ This patch single patch for Alpha2:2.0.This driver will report
+ the heading values in degrees to the sysfs interface.The vlaues
+ returned are head . e.g. 245.6
+
+ Signed-off-by: Kalhan Trisal <kalhan.trisal@intel.com>
+
+Alpha2-1.1-3-5-mrst-Sensors-Accelerometer-Driver-for-Moorestown.patch
+
+ [PATCH] Accelerometer Driver for Moorestown Sensors
+
+ This patch single patch for Alpha2:2.0.Accelerometer driver will
+ read the x,y,z coordinate registers and provide the information to
+ user through sysfs interface.
+
+ Signed-off-by: Kalhan Trisal <kalhan.trisal@intel.com>
+
+Alpha2-1.1-4-5-mrst-Sensors-Vibrator-Driver-for-Moorestown.patch
+
+ [PATCH] Vibrator Driver for Moorestown Sensors
+
+ This patch single patch for Alpha2:2.0.Vibrator can be switched
+ on/off using sysfs interface.
+
+ Signed-off-by: Kalhan Trisal <kalhan.trisal@intel.com>
+
+Alpha2-1.1-5-5-mrst-Sensors-Thermal-Driver-for-Moorestown.patch
+
+ [PATCH] Thermal Driver for Moorestown Sensors
+
+ Moorestown Platform has EMC1403 chip which support three thermal
+ devices, one thermal zone is used by the EMC1403 chip itself and
+ second is used by processor and third one is used for platform
+ (skin temperature).Driver support poll and interrupt
+ mode,min/max/crit configuration can be done using sysfs interface.
+
+ The driver also support interrupt mode when the temperature crosses the
+ threshold configured value the min/max/crit. ALERT/THERM interrupt will
+ be triggered and driver register its callback with GPE driver, and send
+ the events to OSPM power management to take action. OSPM will take
+ action and set the new threshold values till it doesnot get ALERT/THERM
+ events.temp1 is used for configuring internal EMC1403 chip diode, temp2
+ is used to configure processor diode and temp3 is used to configure the
+ platform diode.
+
+ The interrupt mode code has dependency MRST PMIC_GPIO/MAX7315/OSPM.Flag
+ is added to differentiate the generic functionality of the driver with
+ moorestown specific.
+
+ Signed-off-by: Kalhan Trisal <kalhan.trisal at intel.com>
+
+Signed-off-by: Alan Olsen <alan.r.olsen@intel.com>
+---
+ drivers/hwmon/Kconfig | 35 ++
+ drivers/hwmon/Makefile | 4
+ drivers/hwmon/emc1403.c | 731 +++++++++++++++++++++++++++++++++++++++++++++++
+ drivers/hwmon/hmc6352.c | 250 ++++++++++++++++
+ drivers/hwmon/isl29020.c | 248 +++++++++++++++
+ drivers/hwmon/lis331dl.c | 322 ++++++++++++++++++++
+ drivers/misc/Kconfig | 7
+ drivers/misc/Makefile | 1
+ drivers/misc/mrst_vib.c | 99 ++++++
+ 9 files changed, 1697 insertions(+)
+ create mode 100644 drivers/hwmon/emc1403.c
+ create mode 100644 drivers/hwmon/hmc6352.c
+ create mode 100644 drivers/hwmon/isl29020.c
+ create mode 100644 drivers/hwmon/lis331dl.c
+ create mode 100644 drivers/misc/mrst_vib.c
+
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -28,6 +28,41 @@ config HWMON_VID
+ tristate
+ default n
+
++config SENSORS_ISL29020
++ tristate "Intersil ISL29020 ALS"
++ depends on I2C_MRST
++ help
++ If you say yes here you get support for the ALS Devices
++ Ambient Light Sensor monitoring chip.
++ Range values can be configured using sysfs.
++ Lux Data are accessible via sysfs.
++
++config SENSORS_HMC6352
++ tristate "Honeywell HMC6352 compass"
++ depends on I2C_MRST
++ help
++ If you say yes here you get support for the Compass Devices
++ Device can be configured using sysfs.
++ heading data can be accessible via sysfs.
++
++config SENSORS_LIS331DL
++ tristate "STMicroeletronics LIS331DL three-axis digital accelerometer"
++ depends on I2C_MRST
++ help
++ If you say yes here you get support for the Accelerometer Devices
++ Device can be configured using sysfs.
++ x y Z data can be accessible via sysfs.
++
++config SENSORS_EMC1403
++ tristate "SMSC EMC1403 Thermal"
++ depends on I2C_MRST && GPE && GPIO_MAX7315 && MSTWN_POWER_MGMT
++ help
++ If you say yes here you get support for the SMSC Devices
++ EMC1403 temperature monitoring chip.
++
++ Threshold values can be configured using sysfs.
++ Data from the different diode are accessible via sysfs.
++
+ config HWMON_DEBUG_CHIP
+ bool "Hardware Monitoring Chip debugging messages"
+ default n
+--- a/drivers/hwmon/Makefile
++++ b/drivers/hwmon/Makefile
+@@ -99,6 +99,10 @@ obj-$(CONFIG_SENSORS_W83L785TS) += w83l7
+ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o
+ obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o
+ obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o
++obj-$(CONFIG_SENSORS_ISL29020) += isl29020.o
++obj-$(CONFIG_SENSORS_HMC6352) += hmc6352.o
++obj-$(CONFIG_SENSORS_LIS331DL) += lis331dl.o
++obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o
+
+ ifeq ($(CONFIG_HWMON_DEBUG_CHIP),y)
+ EXTRA_CFLAGS += -DDEBUG
+--- /dev/null
++++ b/drivers/hwmon/emc1403.c
+@@ -0,0 +1,731 @@
++/*
++ * emc1403.c - SMSC Thermal Driver
++ *
++ * Copyright (C) 2008 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/i2c.h>
++#include <linux/hwmon.h>
++#include <linux/hwmon-sysfs.h>
++#include <linux/hwmon-vid.h>
++#include <linux/interrupt.h>
++#include <linux/workqueue.h>
++#include <linux/err.h>
++#include <linux/delay.h>
++#include <linux/mutex.h>
++#include <linux/sysfs.h>
++#include <linux/gpe.h>
++#include <linux/intel_mid.h>
++
++
++MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com");
++MODULE_DESCRIPTION("emc1403 Thermal Driver");
++MODULE_LICENSE("GPL v2");
++
++/* To support the interrupt mechanism for moorestown interrupt flag is added
++ * If the flag is not enabled it support generic emc1403 chip */
++
++#if defined(CONFIG_GPIO_LNWPMIC) && defined(CONFIG_GPIO_MAX7315) \
++ && defined(CONFIG_MSTWN_POWER_MGMT)
++#define MOORESTOWN_INTERRUPT_ENABLE
++#endif
++
++/* Limit status reg Therm/High/Low/Fault*/
++static const u8 THM_STAT_REG_TEMP[] = { 0x37, 0x35, 0x36, 0x1B, 0x02};
++
++/* Channel diode temp set */
++static const u8 THM_CHAN_TEMP[] = { 0x10, 0x08, 0x04, 0x02, 0x01 };
++
++/* Therm Limit reg store values */
++static const u8 THM_LIMIT_REG_TEMP[] = { 0x05, 0x06, 0x07, 0x08, 0x15, 0x16,
++ 0x19, 0x1A, 0x20, 0x21 };
++
++/* DATA REGISTERS */
++static const u8 THM_REG_CURR_TEMP[] = { 0x00, 0x01, 0x23 };
++
++#define THERMAL_PID_REG 0xfd
++#define THERMAL_SMSC_ID_REG 0xfe
++#define THERMAL_REVISION_REG 0xff
++#define THERMAL_ADC_UPDATE_BUSY 0x80
++#define I2C_THERMAL_SLAVE_ADDR 0x4C
++#define TEMP1 1
++#define TEMP2 2
++#define TEMP3 4
++#define IRQ_TYPE_MASK (1 << 15)
++#define HIGH_EVENT 1
++#define LOW_EVENT 2
++#define THERM_EVENT 3
++#define FAULT_EVENT 4
++#define ALERT_EVENT 1
++#define POWER_STA_ENABLE 0
++#define POWER_STA_DISABLE 1
++#define INTERRUPT_MODE_ENABLE 0
++#define INTERRUPT_MODE_DISABLE 1
++
++struct thermal_data {
++ struct i2c_client *client;
++ struct device *hwmon_dev;
++ int therm_irq;
++ int alert_irq;
++ struct work_struct therm_handler;
++ struct work_struct alert_handler;
++};
++
++static unsigned int i2c_read_current_data(struct i2c_client *client, u8 reg)
++{
++ unsigned int ret_val;
++
++ ret_val = i2c_smbus_read_byte_data(client, reg);
++ return ret_val;
++}
++
++static unsigned int i2c_write_current_data(struct i2c_client *client,
++ unsigned int reg, unsigned int value)
++{
++ int ret_val;
++
++ ret_val = i2c_smbus_write_byte_data(client, reg, value);
++ return ret_val;
++}
++
++static int calculate_offset(int type, int temp_ofs)
++{
++ int offset = 0;
++
++ switch (type) {
++ case TEMP1:
++ if (temp_ofs == 0)
++ offset = 1;
++ else if (temp_ofs == 1)
++ offset = 0;
++ else if (temp_ofs == 2)
++ offset = 8;
++ break;
++ case TEMP2:
++ if (temp_ofs == 0)
++ offset = 3;
++ else if (temp_ofs == 1)
++ offset = 2;
++ else if (temp_ofs == 2)
++ offset = 6;
++ break;
++ case TEMP3:
++ if (temp_ofs == 0)
++ offset = 5;
++ else if (temp_ofs == 1)
++ offset = 4;
++ else if (temp_ofs == 2)
++ offset = 7;
++ break;
++ default:
++ offset = -1;
++ printk(KERN_WARNING "emc1403: Invalid arg \n");
++ break;
++ }
++ return offset;
++
++}
++
++#ifdef MOORESTOWN_INTERRUPT_ENABLE
++static void status_reg_read(struct i2c_client *client)
++{
++ i2c_read_current_data(client, 0x36);
++ i2c_read_current_data(client, 0x35);
++ i2c_read_current_data(client, 0x1B);
++}
++
++/* when the thermal governor takes action we unmask the bit
++ * if the temp is lower tham threshold values then no new event will
++ * be raised else if the current temperature is still high the interrupt
++ * will be sent again */
++
++static void reg_unmask_intr(struct i2c_client *client, int offset,
++ int value)
++{
++ u8 ret_val, set_mask, ret = 0, alert = 0;
++
++ ret_val = i2c_read_current_data(client, 0x1F);
++ if (offset == 6 || offset == 7 || offset == 8) {
++ ret = i2c_read_current_data(client, 0x37); /* Themal status */
++ } else if (offset == 2 || offset == 3) {
++ if (((ret_val >> 1) & 1)) {
++ set_mask = (ret_val & 0x05);
++ alert = 1;
++ }
++ } else if (offset == 4 || offset == 5) {
++ if (((ret_val >> 2) & 1)) {
++ set_mask = (ret_val & 0x03);
++ alert = 1;
++ }
++ } else if (offset == 0 || offset == 1) {
++ if (ret_val & 1) {
++ set_mask = (ret_val & 0x06);
++ alert = 1;
++ }
++ }
++ /* only rest set the mask for alert events */
++ if (alert == 1) {
++ status_reg_read(client);
++ i2c_write_current_data(client, 0x1F, set_mask);
++ }
++}
++#endif
++
++static ssize_t show_temp_auto_offset(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct sensor_device_attribute_2 *s_attr = to_sensor_dev_attr_2(attr);
++ int temp_index = s_attr->index;
++ int temp_ofs = s_attr->nr;
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret_val = 0;
++ int ret_offset = 0;
++
++ ret_offset = calculate_offset(temp_index, temp_ofs);
++ if (ret_offset != -1) {
++ ret_val = i2c_read_current_data(client,
++ THM_LIMIT_REG_TEMP[ret_offset]);
++ return sprintf(buf, "%d\n", ret_val);
++ } else {
++ return -EINVAL;
++ }
++}
++
++static ssize_t store_temp_auto_offset(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct sensor_device_attribute_2 *s_attr = to_sensor_dev_attr_2(attr);
++ int temp_index = s_attr->index;
++ int temp_ofs = s_attr->nr;
++ struct i2c_client *client = to_i2c_client(dev);
++ unsigned long val;
++ int ret_offset = 0;
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++ ret_offset = calculate_offset(temp_index, temp_ofs);
++ if (ret_offset != -1) {
++ i2c_write_current_data(client,
++ THM_LIMIT_REG_TEMP[ret_offset], val);
++#ifdef MOORESTOWN_INTERRUPT_ENABLE
++ reg_unmask_intr(client, ret_offset, val);
++#endif
++ return count;
++ } else {
++ return -EINVAL;
++ }
++}
++
++static ssize_t show_temp_hyst(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret_val;
++
++ ret_val = i2c_read_current_data(client, THM_LIMIT_REG_TEMP[9]);
++ return sprintf(buf, "%d\n", ret_val);
++}
++
++static ssize_t store_temp_hyst(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ unsigned long val = 0;
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++ i2c_write_current_data(client, THM_LIMIT_REG_TEMP[9], val);
++ return count;
++}
++
++static ssize_t show_temp1_curr_temp(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret_val;
++
++ ret_val = i2c_read_current_data(client, THM_REG_CURR_TEMP[0]);
++ return sprintf(buf, "%d\n", ret_val);
++}
++
++static ssize_t show_temp2_curr_temp(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret_val;
++
++ ret_val = i2c_read_current_data(client, THM_REG_CURR_TEMP[1]);
++ return sprintf(buf, "%d\n", ret_val);
++}
++
++static ssize_t show_temp3_curr_temp(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret_val;
++
++ ret_val = i2c_read_current_data(client, THM_REG_CURR_TEMP[2]);
++ return sprintf(buf, "%d\n", ret_val);
++}
++
++static ssize_t show_status_reg(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret_val1, ret_val2, ret_val3, ret_val4;
++
++ ret_val1 = i2c_read_current_data(client, 0x1F);
++ ret_val2 = i2c_read_current_data(client, 0x35);
++ ret_val3 = i2c_read_current_data(client, 0x36);
++ ret_val4 = i2c_read_current_data(client, 0x37);
++ return sprintf(buf, "alarm=%x,High=%x,Low=%x,Therm=%x \n",
++ ret_val1, ret_val2, ret_val3, ret_val4);
++}
++
++static ssize_t show_power_state(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret_val;
++
++ ret_val = i2c_read_current_data(client, 0x03);
++ ret_val = ret_val & 0x40;
++ if (ret_val == 0x40)
++ ret_val = 1;
++ return sprintf(buf, "%x", ret_val);
++}
++
++static ssize_t store_power_state(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ unsigned long val = 0;
++ char curr_val;
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++
++ curr_val = i2c_read_current_data(client, 0x03);
++ if (val == POWER_STA_ENABLE)
++ curr_val = curr_val & 0xBF;
++ else if (val == POWER_STA_DISABLE)
++ curr_val = curr_val | 0x40;
++ else
++ return -EINVAL;
++ i2c_write_current_data(client, 0x03, curr_val);
++ return count;
++}
++
++static ssize_t show_mode(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret_val;
++
++ ret_val = i2c_read_current_data(client, 0x03);
++ ret_val = ret_val & 0x80;
++ if (ret_val == 0x80)
++ ret_val = 1;
++ return sprintf(buf, "%x", ret_val);
++}
++
++static ssize_t store_mode(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ unsigned long val = 0;
++ char curr_val;
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++
++ curr_val = i2c_read_current_data(client, 0x03);
++ if (val == INTERRUPT_MODE_ENABLE)
++ curr_val = curr_val & 0x7F;
++ else if (val == INTERRUPT_MODE_DISABLE)
++ curr_val = curr_val | 0x80;
++ else
++ return -EINVAL;
++ i2c_write_current_data(client, 0x03, curr_val);
++ return count;
++}
++
++static SENSOR_DEVICE_ATTR_2(temp1_min, S_IRUGO | S_IWUSR,
++ show_temp_auto_offset, store_temp_auto_offset, 0, 1);
++static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR,
++ show_temp_auto_offset, store_temp_auto_offset, 1, 1);
++static SENSOR_DEVICE_ATTR_2(temp1_crit, S_IRUGO | S_IWUSR,
++ show_temp_auto_offset, store_temp_auto_offset, 2, 1);
++static DEVICE_ATTR(temp1_curr, S_IRUGO, show_temp1_curr_temp, NULL);
++
++static SENSOR_DEVICE_ATTR_2(temp2_min, S_IRUGO | S_IWUSR,
++ show_temp_auto_offset, store_temp_auto_offset, 0, 2);
++static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR,
++ show_temp_auto_offset, store_temp_auto_offset, 1, 2);
++static SENSOR_DEVICE_ATTR_2(temp2_crit, S_IRUGO | S_IWUSR,
++ show_temp_auto_offset, store_temp_auto_offset, 2, 2);
++static DEVICE_ATTR(temp2_curr, S_IRUGO, show_temp2_curr_temp, NULL);
++
++static SENSOR_DEVICE_ATTR_2(temp3_min, S_IRUGO | S_IWUSR,
++ show_temp_auto_offset, store_temp_auto_offset, 0, 4);
++static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR,
++ show_temp_auto_offset, store_temp_auto_offset, 1, 4);
++static SENSOR_DEVICE_ATTR_2(temp3_crit, S_IRUGO | S_IWUSR,
++ show_temp_auto_offset, store_temp_auto_offset, 2, 4);
++static DEVICE_ATTR(temp3_curr, S_IRUGO, show_temp3_curr_temp, NULL);
++
++static DEVICE_ATTR(hyster, S_IRUGO | S_IWUSR, show_temp_hyst, store_temp_hyst);
++static DEVICE_ATTR(status, S_IRUGO, show_status_reg, NULL);
++
++static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
++ show_power_state, store_power_state);
++static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, show_mode, store_mode);
++
++static struct attribute *mid_att_thermal[] = {
++ &sensor_dev_attr_temp1_min.dev_attr.attr,
++ &sensor_dev_attr_temp1_max.dev_attr.attr,
++ &sensor_dev_attr_temp1_crit.dev_attr.attr,
++ &dev_attr_temp1_curr.attr,
++ &sensor_dev_attr_temp2_min.dev_attr.attr,
++ &sensor_dev_attr_temp2_max.dev_attr.attr,
++ &sensor_dev_attr_temp2_crit.dev_attr.attr,
++ &dev_attr_temp2_curr.attr,
++ &sensor_dev_attr_temp3_min.dev_attr.attr,
++ &sensor_dev_attr_temp3_max.dev_attr.attr,
++ &sensor_dev_attr_temp3_crit.dev_attr.attr,
++ &dev_attr_temp3_curr.attr,
++ &dev_attr_hyster.attr,
++ &dev_attr_status.attr,
++ &dev_attr_power_state.attr,
++ &dev_attr_mode.attr,
++ NULL
++};
++
++static struct attribute_group m_thermal_gr = {
++ .name = "emc1403",
++ .attrs = mid_att_thermal
++};
++
++static void emc1403_set_default_config(struct i2c_client *client)
++{
++ i2c_smbus_write_byte_data(client, 0x03, 0x00);
++ i2c_smbus_write_byte_data(client, 0x04, 0x02);
++ i2c_smbus_write_byte_data(client, 0x22, 0x00);
++}
++
++#ifdef MOORESTOWN_INTERRUPT_ENABLE
++static irqreturn_t therm_interrupt_handler(int id, void *dev)
++{
++ struct thermal_data *data = (struct thermal_data *)dev;
++ schedule_work(&data->therm_handler);
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t alert_interrupt_handler(int id, void *dev)
++{
++ struct thermal_data *data = (struct thermal_data *)dev;
++ schedule_work(&data->alert_handler);
++
++ return IRQ_HANDLED;
++}
++
++/* when the device raise the interrupt we mask the interrupt
++ * bit for that device as the status register is R-C
++ * so that till thermal governor doesnot take action we need
++ * not to send continuous events */
++
++static int interrupt_status(struct i2c_client *client, u8 diode_reg_val,
++ u8 *status, u8 event)
++{
++ u8 crit_st = 0, set_mask = 0;
++
++ set_mask = i2c_read_current_data(client, 0x1F);
++ if (diode_reg_val & THM_CHAN_TEMP[3]) {
++ set_mask = (set_mask | 0x02);
++ crit_st = (crit_st | 2);
++ }
++ if (diode_reg_val & THM_CHAN_TEMP[2]) {
++ set_mask = (set_mask | 0x04);
++ crit_st = (crit_st | 4);
++ }
++ if (diode_reg_val & THM_CHAN_TEMP[4]) {
++ set_mask = (set_mask | 0x01);
++ crit_st = (crit_st | 1);
++ }
++ if (event == ALERT_EVENT)
++ i2c_smbus_write_byte_data(client, 0x1F, set_mask);
++ *status = crit_st;
++ return 0;
++}
++
++static void ospm_event(int event_id, int sensor_id, int curr_temp)
++{
++ if (event_id == THERM_EVENT) {
++ printk(KERN_ALERT "emc1403: Sensor Id = %d crit event \
++ temp = %d \n", sensor_id, curr_temp);
++ ospm_generate_netlink_event(sensor_id,
++ OSPM_EVENT_THERMAL_CRITICAL);
++ }
++ if (event_id == HIGH_EVENT) {
++ printk(KERN_ALERT "emc1403: Sensor Id = %d AUX1 event \
++ temp = %d \n", sensor_id, curr_temp);
++ ospm_generate_netlink_event(sensor_id,
++ OSPM_EVENT_THERMAL_AUX1);
++ }
++ if (event_id == LOW_EVENT) {
++ printk(KERN_ALERT "emc1403: Sensor Id = %d AUX0 event \
++ temp = %d \n", sensor_id, curr_temp);
++ ospm_generate_netlink_event(sensor_id,
++ OSPM_EVENT_THERMAL_AUX0);
++ }
++ if (event_id == FAULT_EVENT) {
++ printk(KERN_ALERT "emc1403: Sensor Id = %d Fault event \
++ temp = %d \n", sensor_id, curr_temp);
++ ospm_generate_netlink_event(sensor_id,
++ OSPM_EVENT_THERMAL_DEV_FAULT);
++ }
++}
++
++static void send_event(struct i2c_client *client, int status, int event_id)
++{
++ int ret_val;
++
++ if (status & TEMP1) {
++ ret_val = i2c_read_current_data(client, THM_REG_CURR_TEMP[0]);
++ ospm_event(event_id, TEMP_DEV_ID1, ret_val);
++ }
++ if (status & TEMP2) {
++ ret_val = i2c_read_current_data(client, THM_REG_CURR_TEMP[1]);
++ ospm_event(event_id, TEMP_DEV_ID2, ret_val);
++ }
++ if (status & TEMP3) {
++ ret_val = i2c_read_current_data(client, THM_REG_CURR_TEMP[2]);
++ ospm_event(event_id, TEMP_DEV_ID3, ret_val);
++ }
++}
++
++static void therm_handle_intrpt(struct work_struct *work)
++{
++ u8 status, reg_val;
++ struct thermal_data *data = container_of(work,
++ struct thermal_data, therm_handler);
++
++ /* check if therm_module_info is initialized */
++ if (!data)
++ return;
++ /* Which DIODE has raised the interrupt 0x1B
++ internal/External1/External2 */
++ reg_val = i2c_smbus_read_byte_data(data->client,
++ THM_STAT_REG_TEMP[0]);
++ interrupt_status(data->client, reg_val, &status, THERM_EVENT);
++ send_event(data->client, status, THERM_EVENT);
++}
++
++static void alert_handle_intrpt(struct work_struct *work)
++{
++ int sta_reg_val, reg_val;
++ u8 status;
++ struct thermal_data *data = container_of(work,
++ struct thermal_data, alert_handler);
++ if (!data)
++ return;
++ /* HIGH/ LOW / FAULT Alert has occured for */
++ reg_val = i2c_smbus_read_byte_data(data->client, THM_STAT_REG_TEMP[4]);
++ /* High status bit is set */
++ if (reg_val & THM_CHAN_TEMP[0]) {
++ /* Which DIODE has raised the interrupt 0x1B
++ internal/External1/External2 */
++ sta_reg_val = i2c_smbus_read_byte_data(data->client,
++ THM_STAT_REG_TEMP[1]);
++ interrupt_status(data->client, sta_reg_val, &status,
++ ALERT_EVENT);
++ send_event(data->client, status, HIGH_EVENT);
++ }
++ /* Low status bit is set */
++ if (reg_val & THM_CHAN_TEMP[1]) {
++ sta_reg_val = i2c_smbus_read_byte_data(data->client,
++ THM_STAT_REG_TEMP[2]);
++ interrupt_status(data->client, sta_reg_val, &status,
++ ALERT_EVENT);
++ send_event(data->client, status, LOW_EVENT);
++ }
++ /* Fault status bit is set */
++ if (reg_val & THM_CHAN_TEMP[2]) {
++ sta_reg_val = i2c_smbus_read_byte_data(data->client,
++ THM_STAT_REG_TEMP[3]);
++ interrupt_status(data->client, sta_reg_val, &status,
++ ALERT_EVENT);
++ send_event(data->client, status, FAULT_EVENT);
++ }
++}
++#endif
++
++static int emc1403_probe(struct i2c_client *new_client,
++ const struct i2c_device_id *id)
++{
++ int res = 0;
++ struct thermal_data *data;
++ u16 pid, smsc_id, revision;
++
++#ifdef MOORESTOWN_INTERRUPT_ENABLE
++ u16 t_irq, a_irq;
++#endif
++ data = kzalloc(sizeof(struct thermal_data), GFP_KERNEL);
++
++ if (data == NULL) {
++ printk(KERN_WARNING "emc1403: Memory allocation failed");
++ return -ENOMEM;
++ }
++ data->client = new_client;
++ i2c_set_clientdata(new_client, data);
++
++ /* Check if thermal chip is SMSC and EMC1403 */
++ smsc_id = i2c_read_current_data(new_client,
++ THERMAL_SMSC_ID_REG);
++ if (smsc_id != 0x5d) {
++ printk(KERN_WARNING "emc1403: vendor id mismatch \n");
++ goto thermal_error1;
++ }
++ pid = i2c_read_current_data(new_client, THERMAL_PID_REG);
++ if (pid != 0x21) {
++ printk(KERN_WARNING "emc1403: Prod id mismatch \n");
++ goto thermal_error1;
++ }
++ revision = i2c_read_current_data(new_client,
++ THERMAL_REVISION_REG);
++ if (revision != 0x01) {
++ printk(KERN_WARNING "emc1403: Rev id mismatch is \n");
++ goto thermal_error1;
++ }
++ res = sysfs_create_group(&new_client->dev.kobj, &m_thermal_gr);
++ if (res) {
++ printk(KERN_WARNING "emc1403: create group failed! \n");
++ hwmon_device_unregister(data->hwmon_dev);
++ goto thermal_error1;
++ }
++ data->hwmon_dev = hwmon_device_register(&new_client->dev);
++ if (IS_ERR(data->hwmon_dev)) {
++ res = PTR_ERR(data->hwmon_dev);
++ data->hwmon_dev = NULL;
++ printk(KERN_WARNING "emc1403:Register hwmon dev Failed\n");
++ goto thermal_error1;
++ }
++#ifdef MOORESTOWN_INTERRUPT_ENABLE
++ INIT_WORK(&data->therm_handler, (void *)therm_handle_intrpt);
++ INIT_WORK(&data->alert_handler, (void *)alert_handle_intrpt);
++ t_irq = new_client->irq;
++ a_irq = *(short *)new_client->dev.platform_data;
++ data->therm_irq = t_irq & ~IRQ_TYPE_MASK;
++ data->alert_irq = a_irq & ~IRQ_TYPE_MASK;
++ /* interpret irq field */
++ if (data->therm_irq == 0x113) {
++ if (t_irq & IRQ_TYPE_MASK) {
++ /* irq -> GPE_ID */
++ res = request_gpe(data->therm_irq,
++ (gpio_function_t)therm_interrupt_handler,
++ data, DETECT_LEVEL_LOW);
++ if (res)
++ dev_crit(&new_client->dev, "%s(): cannot \
++ register therm gpe \n", __func__);
++ } else {
++ res = request_irq(data->therm_irq,
++ therm_interrupt_handler,
++ DETECT_LEVEL_LOW, "emc1403", data);
++ if (res)
++ dev_crit(&new_client->dev, "%s(): \
++ cannot get therm IRQ\n", __func__);
++ }
++ } else {
++ printk(KERN_WARNING"emc1403: IRQ mismatch \
++ sent for therm registration");
++ }
++ if (data->alert_irq == 0x114) {
++ if (a_irq & IRQ_TYPE_MASK) {
++ /* irq -> GPE_ID */
++ res = request_gpe(data->alert_irq,
++ (gpio_function_t)alert_interrupt_handler,
++ data, DETECT_LEVEL_LOW);
++ if (res)
++ dev_crit(&new_client->dev, "%s(): \
++ cannot register alert gpe \n", __func__);
++ } else {
++ res = request_irq(data->alert_irq,
++ alert_interrupt_handler, DETECT_LEVEL_LOW,
++ "emc1403", data);
++ if (res)
++ dev_crit(&new_client->dev, "%s(): cannot \
++ get alert IRQ\n", __func__);
++ }
++ } else {
++ printk(KERN_WARNING"emc1403: IRQ mismatch \
++ sent for alert registration");
++ }
++#endif
++ emc1403_set_default_config(new_client);
++ dev_info(&new_client->dev, "%s EMC1403 Thermal chip found \n",
++ new_client->name);
++ return res;
++thermal_error1:
++ i2c_set_clientdata(new_client, NULL);
++ kfree(data);
++ return res;
++}
++
++static int emc1403_remove(struct i2c_client *client)
++{
++ struct thermal_data *data = i2c_get_clientdata(client);
++
++ hwmon_device_unregister(data->hwmon_dev);
++ sysfs_remove_group(&client->dev.kobj, &m_thermal_gr);
++ kfree(data);
++ return 0;
++}
++
++static struct i2c_device_id emc1403_idtable[] = {
++ { "i2c_thermal", 0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(i2c, emc1403_idtable);
++
++static struct i2c_driver sensor_emc1403 = {
++ .driver = {
++ .name = "emc1403",
++ },
++ .probe = emc1403_probe,
++ .remove = emc1403_remove,
++ .id_table = emc1403_idtable,
++};
++
++static int __init sensor_emc1403_init(void)
++{
++ return i2c_add_driver(&sensor_emc1403);
++}
++
++static void __exit sensor_emc1403_exit(void)
++{
++ i2c_del_driver(&sensor_emc1403);
++}
++
++module_init(sensor_emc1403_init);
++module_exit(sensor_emc1403_exit);
+--- /dev/null
++++ b/drivers/hwmon/hmc6352.c
+@@ -0,0 +1,250 @@
++/*
++ * hmc6352.c - Honeywell Compass Driver
++ *
++ * Copyright (C) 2009 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/i2c.h>
++#include <linux/hwmon.h>
++#include <linux/hwmon-sysfs.h>
++#include <linux/hwmon-vid.h>
++#include <linux/err.h>
++#include <linux/delay.h>
++#include <linux/mutex.h>
++#include <linux/sysfs.h>
++#include <asm/ipc_defs.h>
++
++
++MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com");
++MODULE_DESCRIPTION("hmc6352 Compass Driver");
++MODULE_LICENSE("GPL v2");
++
++/* internal return values */
++#define COMP_CALIB_START 1
++#define COMP_CALIB_STOP 2
++#define COMP_SLEEP_MODE 0
++#define COMP_ACTIVE_MODE 1
++
++struct compass_data {
++ struct device *hwmon_dev;
++};
++
++static ssize_t compass_calibration_store(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret;
++ unsigned long val;
++ char cmd[] = {0x43};
++ char cmd1[] = {0x45};
++ struct i2c_msg msg[] = {
++ { client->addr, 0, 1, cmd },
++ };
++ struct i2c_msg msg1[] = {
++ { client->addr, 0, 1, cmd1 },
++ };
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++ if (val == COMP_CALIB_START) {
++ client->addr = 0x21;
++ ret = i2c_transfer(client->adapter, msg, 1);
++ if (ret != 1) {
++ printk(KERN_WARNING "hmc6352_comp : i2c callib start \
++ cmd failed \n");
++ return ret;
++ }
++ } else if (val == COMP_CALIB_STOP) {
++ client->addr = 0x21;
++ ret = i2c_transfer(client->adapter, msg1, 1);
++ if (ret != 1) {
++ printk(KERN_WARNING " hmc6352_comp : i2c callib stop \
++ cmd failed \n");
++ return ret;
++ }
++ } else
++ return -EINVAL;
++
++ return count;
++}
++
++static ssize_t compass_heading_data_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++
++ struct i2c_client *client = to_i2c_client(dev);
++ char cmd[] = {0x41};
++ unsigned char i2c_data[2] = {0, 0};
++ unsigned int ret, ret_val;
++ struct i2c_msg msg[] = {
++ { client->addr, 0, 1, cmd },
++ };
++ struct i2c_msg msg1[] = {
++ { client->addr, I2C_M_RD, 2, i2c_data },
++ };
++
++ client->addr = 0x21;
++ ret = i2c_transfer(client->adapter, msg, 1);
++ if (ret != 1) {
++ printk(KERN_WARNING "hmc6352 : i2c cmd 0x41 failed \n");
++ return ret;
++ }
++ msleep(10); /* sending 0x41 cmd we need to wait for 7-10 milli second*/
++ ret = i2c_transfer(client->adapter, msg1, 1);
++ if (ret != 1) {
++ printk(KERN_WARNING "hmc6352 : i2c read data cmd failed \n");
++ return ret;
++ }
++ ret_val = i2c_data[0];
++ ret_val = ((ret_val << 8) | i2c_data[1]);
++ return sprintf(buf, "%d.%d\n", ret_val/10, ret_val%10);
++}
++
++static ssize_t compass_power_mode_store(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++
++ struct i2c_client *client = to_i2c_client(dev);
++ unsigned long val;
++ unsigned int ret;
++ char cmd[] = {0x53};
++ char cmd1[] = {0x57};
++ struct i2c_msg msg[] = {
++ { client->addr, 0, 1, cmd },
++ };
++ struct i2c_msg msg1[] = {
++ { client->addr, 0, 1, cmd1 },
++ };
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++
++ if (val == COMP_SLEEP_MODE) {
++ ret = i2c_transfer(client->adapter, msg, 1);
++ if (ret != 1)
++ printk(KERN_WARNING "hmc6352: i2c cmd sleep mode \
++ failed \n");
++ } else if (val == COMP_ACTIVE_MODE) {
++ ret = i2c_transfer(client->adapter, msg1, 1);
++ if (ret != 1)
++ printk(KERN_WARNING "hmc6352: i2c cmd active mode \
++ failed \n");
++ } else
++ return -EINVAL;
++
++ return count;
++}
++
++static DEVICE_ATTR(heading, S_IRUGO, compass_heading_data_show, NULL);
++static DEVICE_ATTR(calibration, S_IWUSR, NULL, compass_calibration_store);
++static DEVICE_ATTR(power_state, S_IWUSR, NULL, compass_power_mode_store);
++
++static struct attribute *mid_att_compass[] = {
++ &dev_attr_heading.attr,
++ &dev_attr_calibration.attr,
++ &dev_attr_power_state.attr,
++ NULL
++};
++
++static struct attribute_group m_compass_gr = {
++ .name = "hmc6352",
++ .attrs = mid_att_compass
++};
++
++static int hmc6352_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ int res;
++ struct compass_data *data;
++
++ data = kzalloc(sizeof(struct compass_data), GFP_KERNEL);
++ if (data == NULL) {
++ printk(KERN_WARNING "hmc6352: Memory initialization failed");
++ return -ENOMEM;
++ }
++ i2c_set_clientdata(client, data);
++
++ res = sysfs_create_group(&client->dev.kobj, &m_compass_gr);
++ if (res) {
++ printk(KERN_WARNING "hmc6352: device_create_file failed!!\n");
++ goto compass_error1;
++ }
++ data->hwmon_dev = hwmon_device_register(&client->dev);
++ if (IS_ERR(data->hwmon_dev)) {
++ res = PTR_ERR(data->hwmon_dev);
++ data->hwmon_dev = NULL;
++ printk(KERN_WARNING "hmc6352: fail to register hwmon device\n");
++ sysfs_remove_group(&client->dev.kobj, &m_compass_gr);
++ goto compass_error1;
++ }
++ dev_info(&client->dev, "%s HMC6352 compass chip found \n",
++ client->name);
++ return res;
++
++compass_error1:
++ i2c_set_clientdata(client, NULL);
++ kfree(data);
++ return res;
++}
++
++static int hmc6352_remove(struct i2c_client *client)
++{
++ struct compass_data *data = i2c_get_clientdata(client);
++
++ hwmon_device_unregister(data->hwmon_dev);
++ sysfs_remove_group(&client->dev.kobj, &m_compass_gr);
++ kfree(data);
++ return 0;
++}
++
++static struct i2c_device_id hmc6352_id[] = {
++ { "i2c_compass", 0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(i2c, hmc6352_id);
++
++static struct i2c_driver hmc6352_driver = {
++ .driver = {
++ .name = "hmc6352",
++ },
++ .probe = hmc6352_probe,
++ .remove = hmc6352_remove,
++ .id_table = hmc6352_id,
++};
++
++static int __init sensor_hmc6352_init(void)
++{
++ int res;
++
++ res = i2c_add_driver(&hmc6352_driver);
++ return res;
++}
++
++static void __exit sensor_hmc6352_exit(void)
++{
++ i2c_del_driver(&hmc6352_driver);
++}
++
++module_init(sensor_hmc6352_init);
++module_exit(sensor_hmc6352_exit);
+--- /dev/null
++++ b/drivers/hwmon/isl29020.c
+@@ -0,0 +1,248 @@
++/*
++ * isl29020.c - Intersil ALS Driver
++ *
++ * Copyright (C) 2008 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/i2c.h>
++#include <linux/hwmon.h>
++#include <linux/hwmon-sysfs.h>
++#include <linux/hwmon-vid.h>
++#include <linux/err.h>
++#include <linux/delay.h>
++#include <linux/mutex.h>
++#include <linux/sysfs.h>
++#include <asm/ipc_defs.h>
++
++MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com");
++MODULE_DESCRIPTION("intersil isl29020 ALS Driver");
++MODULE_LICENSE("GPL v2");
++
++#define ALS_MIN_RANGE_VAL 0
++#define ALS_MAX_RANGE_VAL 5
++#define POWER_STA_ENABLE 1
++#define POWER_STA_DISABLE 0
++
++struct als_data {
++ struct device *hwmon_dev;
++};
++
++static unsigned int i2c_write_current_data(struct i2c_client *client,
++ unsigned int reg, unsigned int value)
++{
++ int ret_val;
++
++ ret_val = i2c_smbus_write_byte_data(client, reg, value);
++ return ret_val;
++}
++
++static ssize_t als_sensing_range_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int val;
++
++ val = i2c_smbus_read_byte_data(client, 0x00);
++ return sprintf(buf, "%d000\n", 1 << (2 * (val & 3)));
++
++}
++
++static ssize_t als_lux_output_data_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ unsigned int ret_val, val;
++ unsigned long int lux, max_count;
++ int tempv1, tempv2;
++
++ max_count = 65535;
++ tempv1 = i2c_smbus_read_byte_data(client, 0x02); /* MSB data */
++ tempv2 = i2c_smbus_read_byte_data(client, 0x01); /* LSB data */
++ ret_val = tempv1;
++ ret_val = (ret_val << 8 | tempv2);
++ val = i2c_smbus_read_byte_data(client, 0x00);
++ lux = ((((1 << (2 * (val & 3))))*1000) * ret_val) / max_count;
++ return sprintf(buf, "%ld\n", lux);
++}
++
++static ssize_t als_sensing_range_store(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ unsigned int ret_val, set_val = 0;
++ unsigned long val;
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++ ret_val = i2c_smbus_read_byte_data(client, 0x00);
++ ret_val = ret_val & 0xFC; /*reset the bit before setting them */
++ if (val == 1)
++ set_val = (ret_val | 0x00); /* setting the 1:0 bit */
++ else if (val == 2)
++ set_val = (ret_val | 0x01);
++ else if (val == 3)
++ set_val = (ret_val | 0x02);
++ else if (val == 4)
++ set_val = (ret_val | 0x03);
++ else
++ goto invarg;
++ i2c_write_current_data(client, 0x00, set_val);
++ return count;
++invarg:
++ return -EINVAL;
++}
++
++static ssize_t als_power_status_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret_val;
++
++ ret_val = i2c_smbus_read_byte_data(client, 0x00);
++ ret_val = ret_val & 0x80;
++ if (ret_val == 0x80)
++ ret_val = 1;
++ return sprintf(buf, "%x", ret_val);
++}
++
++static ssize_t als_power_status_store(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ unsigned long val = 0;
++ char curr_val;
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++
++ curr_val = i2c_smbus_read_byte_data(client, 0x00);
++ if (val == POWER_STA_ENABLE)
++ curr_val = curr_val | 0x80;
++ else if (val == POWER_STA_DISABLE)
++ curr_val = curr_val & 0x7F;
++ else
++ return -EINVAL;
++ i2c_write_current_data(client, 0x00, curr_val);
++ return count;
++}
++
++static DEVICE_ATTR(sensing_range, S_IRUGO | S_IWUSR,
++ als_sensing_range_show, als_sensing_range_store);
++static DEVICE_ATTR(lux_output, S_IRUGO, als_lux_output_data_show, NULL);
++static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
++ als_power_status_show, als_power_status_store);
++
++static struct attribute *mid_att_als[] = {
++ &dev_attr_sensing_range.attr,
++ &dev_attr_lux_output.attr,
++ &dev_attr_power_state.attr,
++ NULL
++};
++
++static struct attribute_group m_als_gr = {
++ .name = "isl29020",
++ .attrs = mid_att_als
++};
++
++static void als_set_default_config(struct i2c_client *client)
++{
++ i2c_write_current_data(client, 0x00, 0xc0);
++}
++
++static int isl29020_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ int res;
++ struct als_data *data;
++
++ data = kzalloc(sizeof(struct als_data), GFP_KERNEL);
++ if (data == NULL) {
++ printk(KERN_WARNING " isl29020: Memory initialization failed");
++ return -ENOMEM;
++ }
++ i2c_set_clientdata(client, data);
++
++ res = sysfs_create_group(&client->dev.kobj, &m_als_gr);
++ if (res) {
++ printk(KERN_WARNING "isl29020: device create file failed!!\n");
++ goto als_error1;
++ }
++ data->hwmon_dev = hwmon_device_register(&client->dev);
++ if (IS_ERR(data->hwmon_dev)) {
++ res = PTR_ERR(data->hwmon_dev);
++ data->hwmon_dev = NULL;
++ sysfs_remove_group(&client->dev.kobj, &m_als_gr);
++ printk(KERN_ALERT "isl29020:unable to register hwmon device\n");
++ goto als_error1;
++ }
++ dev_info(&client->dev, "%s isl29020: ALS chip found \n", client->name);
++ als_set_default_config(client);
++ return res;
++
++als_error1:
++ i2c_set_clientdata(client, NULL);
++ kfree(data);
++ return res;
++}
++
++static int isl29020_remove(struct i2c_client *client)
++{
++ struct als_data *data = i2c_get_clientdata(client);
++
++ hwmon_device_unregister(data->hwmon_dev);
++ sysfs_remove_group(&client->dev.kobj, &m_als_gr);
++ kfree(data);
++ return 0;
++}
++
++static struct i2c_device_id isl29020_id[] = {
++ { "i2c_als", 0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(i2c, isl29020_id);
++
++static struct i2c_driver isl29020_driver = {
++ .driver = {
++ .name = "isl29020",
++ },
++ .probe = isl29020_probe,
++ .remove = isl29020_remove,
++ .id_table = isl29020_id,
++};
++
++static int __init sensor_isl29020_init(void)
++{
++ int res;
++
++ res = i2c_add_driver(&isl29020_driver);
++ return res;
++}
++
++static void __exit sensor_isl29020_exit(void)
++{
++ i2c_del_driver(&isl29020_driver);
++}
++
++module_init(sensor_isl29020_init);
++module_exit(sensor_isl29020_exit);
+--- /dev/null
++++ b/drivers/hwmon/lis331dl.c
+@@ -0,0 +1,322 @@
++/*
++ * lis331dl.c - ST LIS331DL Accelerometer Driver
++ *
++ * Copyright (C) 2009 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/i2c.h>
++#include <linux/hwmon.h>
++#include <linux/hwmon-sysfs.h>
++#include <linux/hwmon-vid.h>
++#include <linux/err.h>
++#include <linux/delay.h>
++#include <linux/mutex.h>
++#include <linux/sysfs.h>
++
++
++MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com");
++MODULE_DESCRIPTION("STMacroelectronics LIS331DL Accelerometer Driver");
++MODULE_LICENSE("GPL v2");
++
++#define ACCEL_DATA_RATE_100HZ 0
++#define ACCEL_DATA_RATE_400HZ 1
++#define ACCEL_POWER_MODE_DOWN 0
++#define ACCEL_POWER_MODE_ACTIVE 1
++#define ACCEL_NORMAL_MODE 0
++#define ACCEL_MEMORY_REBOOT 1
++
++/* internal return values */
++
++struct acclero_data {
++ struct device *hwmon_dev;
++ struct mutex update_lock;
++};
++
++static unsigned int i2c_write_current_data(struct i2c_client *client,
++ unsigned int reg, unsigned int value)
++{
++ int ret_val;
++
++ ret_val = i2c_smbus_write_byte_data(client, reg, value);
++ return ret_val;
++}
++
++static ssize_t data_rate_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret_val, val;
++
++ val = i2c_smbus_read_byte_data(client, 0x20);
++ ret_val = (val & 0x80); /* 1= 400HZ 0= 100HZ */
++ if (ret_val == 0x80)
++ ret_val = 1;
++ return sprintf(buf, "%d\n", ret_val);
++
++}
++
++static ssize_t power_mode_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret_val, val;
++
++ val = i2c_smbus_read_byte_data(client, 0x20);
++ ret_val = (val & 0x40);
++ if (ret_val == 0x40)
++ ret_val = 1;
++ return sprintf(buf, "%d\n", ret_val);
++}
++
++static ssize_t x_pos_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret_val;
++
++ ret_val = i2c_smbus_read_byte_data(client, 0x29);
++ return sprintf(buf, "%d\n", ret_val);
++}
++
++static ssize_t y_pos_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret_val;
++
++ ret_val = i2c_smbus_read_byte_data(client, 0x2B);
++ return sprintf(buf, "%d\n", ret_val);
++}
++
++static ssize_t z_pos_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ int ret_val;
++
++ ret_val = i2c_smbus_read_byte_data(client, 0x2D);
++ return sprintf(buf, "%d\n", ret_val);
++}
++
++static ssize_t xyz_pos_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ int x, y, z;
++ struct i2c_client *client = to_i2c_client(dev);
++
++ x = i2c_smbus_read_byte_data(client, 0x29);
++ y = i2c_smbus_read_byte_data(client, 0x2B);
++ z = i2c_smbus_read_byte_data(client, 0x2D);
++ return sprintf(buf, "(%d,%d,%d)\n", x, y, z);
++}
++
++static ssize_t data_rate_store(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ struct acclero_data *data = i2c_get_clientdata(client);
++ unsigned int ret_val, set_val;
++ unsigned long val;
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++ ret_val = i2c_smbus_read_byte_data(client, 0x20);
++
++ mutex_lock(&data->update_lock);
++ if (val == ACCEL_DATA_RATE_100HZ)
++ set_val = (ret_val & 0x7F); /* setting the 8th bit to 0 */
++ else if (val == ACCEL_DATA_RATE_400HZ)
++ set_val = (ret_val | (1 << 7));
++ else
++ goto invarg;
++
++ i2c_write_current_data(client, 0x20, set_val);
++ mutex_unlock(&data->update_lock);
++ return count;
++invarg:
++ mutex_unlock(&data->update_lock);
++ return -EINVAL;
++}
++
++static ssize_t power_mode_store(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ struct acclero_data *data = i2c_get_clientdata(client);
++ unsigned int ret_val, set_val;
++ unsigned long val;
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++ ret_val = i2c_smbus_read_byte_data(client, 0x20);
++
++ mutex_lock(&data->update_lock);
++ if (val == ACCEL_POWER_MODE_DOWN)
++ set_val = ret_val & 0xBF; /* if value id 0 */
++ else if (val == ACCEL_POWER_MODE_ACTIVE)
++ set_val = (ret_val | (1<<6)); /* if value is 1 */
++ else
++ goto invarg;
++
++ i2c_write_current_data(client, 0x20, set_val);
++ mutex_unlock(&data->update_lock);
++ return count;
++invarg:
++ mutex_unlock(&data->update_lock);
++ return -EINVAL;
++}
++
++static ssize_t reboot_mem_store(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct i2c_client *client = to_i2c_client(dev);
++ struct acclero_data *data = i2c_get_clientdata(client);
++ unsigned int ret_val, set_val;
++ unsigned long val;
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++ ret_val = i2c_smbus_read_byte_data(client, 0x21);
++ if (val == ACCEL_MEMORY_REBOOT) {
++ mutex_lock(&data->update_lock);
++ set_val = (ret_val | (1 << 6)); /* setting the 6th bit */
++ i2c_write_current_data(client, 0x21, set_val);
++ mutex_unlock(&data->update_lock);
++ } else
++ return -EINVAL;
++ return count;
++}
++
++static DEVICE_ATTR(data_rate, S_IRUGO | S_IWUSR,
++ data_rate_show, data_rate_store);
++static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
++ power_mode_show, power_mode_store);
++static DEVICE_ATTR(reboot_mem, S_IWUSR, NULL,
++ reboot_mem_store);
++static DEVICE_ATTR(x, S_IRUGO, x_pos_show, NULL);
++static DEVICE_ATTR(y, S_IRUGO, y_pos_show, NULL);
++static DEVICE_ATTR(z, S_IRUGO, z_pos_show, NULL);
++static DEVICE_ATTR(curr_pos, S_IRUGO, xyz_pos_show, NULL);
++
++static struct attribute *mid_att_acclero[] = {
++ &dev_attr_data_rate.attr,
++ &dev_attr_power_state.attr,
++ &dev_attr_reboot_mem.attr,
++ &dev_attr_x.attr,
++ &dev_attr_y.attr,
++ &dev_attr_z.attr,
++ &dev_attr_curr_pos.attr,
++ NULL
++};
++
++static struct attribute_group m_acclero_gr = {
++ .name = "lis331dl",
++ .attrs = mid_att_acclero
++};
++
++static void accel_set_default_config(struct i2c_client *client)
++{
++ i2c_write_current_data(client, 0x20, 0x47);
++}
++
++static int lis331dl_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ int res;
++ struct acclero_data *data;
++
++ data = kzalloc(sizeof(struct acclero_data), GFP_KERNEL);
++ if (data == NULL) {
++ printk(KERN_WARNING "lis331dl: Memory initi failed \n");
++ return -ENOMEM;
++ }
++ mutex_init(&data->update_lock);
++ i2c_set_clientdata(client, data);
++
++ res = sysfs_create_group(&client->dev.kobj, &m_acclero_gr);
++ if (res) {
++ printk(KERN_WARNING "lis331dl: Sysfs group failed!!\n");
++ goto acclero_error1;
++ }
++ data->hwmon_dev = hwmon_device_register(&client->dev);
++ if (IS_ERR(data->hwmon_dev)) {
++ res = PTR_ERR(data->hwmon_dev);
++ data->hwmon_dev = NULL;
++ sysfs_remove_group(&client->dev.kobj, &m_acclero_gr);
++ printk(KERN_WARNING "lis331dl: unable to register \
++ hwmon device\n");
++ goto acclero_error1;
++ }
++ accel_set_default_config(client);
++
++ dev_info(&client->dev, "%s lis331dl: Accelerometer chip \
++ foundn", client->name);
++ return res;
++
++acclero_error1:
++ i2c_set_clientdata(client, NULL);
++ kfree(data);
++ return res;
++}
++
++static int lis331dl_remove(struct i2c_client *client)
++{
++ struct acclero_data *data = i2c_get_clientdata(client);
++
++ hwmon_device_unregister(data->hwmon_dev);
++ sysfs_remove_group(&client->dev.kobj, &m_acclero_gr);
++ kfree(data);
++ return 0;
++}
++
++static struct i2c_device_id lis331dl_id[] = {
++ { "i2c_accel", 0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(i2c, lis331dl_id);
++
++static struct i2c_driver lis331dl_driver = {
++ .driver = {
++ .name = "lis331dl",
++ },
++ .probe = lis331dl_probe,
++ .remove = lis331dl_remove,
++ .id_table = lis331dl_id,
++};
++
++static int __init sensor_lis331dl_init(void)
++{
++ int res;
++
++ res = i2c_add_driver(&lis331dl_driver);
++ return res;
++}
++
++static void __exit sensor_lis331dl_exit(void)
++{
++ i2c_del_driver(&lis331dl_driver);
++}
++
++module_init(sensor_lis331dl_init);
++module_exit(sensor_lis331dl_exit);
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -249,6 +249,13 @@ config SGI_GRU_DEBUG
+ This option enables addition debugging code for the SGI GRU driver. If
+ you are unsure, say N.
+
++config MRST_VIB
++ tristate "vibrator driver for Intel Moorestown platform"
++ help
++ Vibrator for Intel Moorestown platform.
++
++ If unsure, say N.
++
+ config ISL29003
+ tristate "Intersil ISL29003 ambient light sensor"
+ depends on I2C && SYSFS
+--- a/drivers/misc/Makefile
++++ b/drivers/misc/Makefile
+@@ -22,6 +22,7 @@ obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfg
+ obj-$(CONFIG_HP_ILO) += hpilo.o
+ obj-$(CONFIG_MRST) += intel_mrst.o
+ obj-$(CONFIG_ISL29003) += isl29003.o
++obj-$(CONFIG_MRST_VIB) += mrst_vib.o
+ obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
+ obj-$(CONFIG_DS1682) += ds1682.o
+ obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
+--- /dev/null
++++ b/drivers/misc/mrst_vib.c
+@@ -0,0 +1,99 @@
++/*
++ * mrst_vib.c - Intel vibrator Driver
++ *
++ * Copyright (C) 2008 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++#include <linux/platform_device.h>
++#include <linux/kernel.h>
++#include <linux/sysfs.h>
++#include <asm/ipc_defs.h>
++
++
++MODULE_AUTHOR("Kalhan Trisal");
++MODULE_DESCRIPTION("Intel Moorestown Thermal Driver");
++MODULE_LICENSE("GPL v2");
++
++#define VIB_START 1
++#define VIB_STOP 2
++static struct platform_device *vib_pdev;
++
++static ssize_t vib_store(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++
++ struct ipc_pmic_reg_data vib_power_reg_write = {0};
++ unsigned long val;
++
++ if (strict_strtoul(buf, 10, &val))
++ return -EINVAL;
++ if (val == VIB_START) {
++ vib_power_reg_write.ioc = TRUE;
++ vib_power_reg_write.pmic_reg_data[0].register_address = 0x49;
++ vib_power_reg_write.pmic_reg_data[0].value = 0xAD;
++ vib_power_reg_write.num_entries = 1;
++ if (ipc_pmic_register_write(&vib_power_reg_write, TRUE)) {
++ printk(KERN_WARNING "mrst_vib: failed to turn ON \
++ vib \n");
++ return -EINVAL;
++ }
++ } else if (val == VIB_STOP) {
++ vib_power_reg_write.ioc = TRUE;
++ vib_power_reg_write.pmic_reg_data[0].register_address = 0x49;
++ vib_power_reg_write.pmic_reg_data[0].value = 0x14;
++ vib_power_reg_write.num_entries = 1;
++ if (ipc_pmic_register_write(&vib_power_reg_write, TRUE)) {
++ printk(KERN_WARNING "mrst_vib: failed to turn OFF \
++ Vibrator \n");
++ return -EINVAL;
++ }
++ } else
++ return -EINVAL;
++
++ return count;
++}
++
++static struct device_attribute dev_attr_vib =
++ __ATTR(vib, S_IWUSR, NULL, vib_store);
++
++static int __init mrst_vib_init(void)
++{
++ int res = 0;
++
++ vib_pdev = platform_device_register_simple("mrst_vib", -1, NULL, 0);
++ if (IS_ERR(vib_pdev)) {
++ res = PTR_ERR(vib_pdev);
++ vib_pdev = NULL;
++ printk(KERN_WARNING "mrst_vib: unable to register platform \
++ device\n");
++ return res;
++ }
++ res = device_create_file(&vib_pdev->dev, &dev_attr_vib);
++ return res;
++}
++
++static void __exit mrst_vib_exit(void)
++{
++ device_remove_file(&vib_pdev->dev, &dev_attr_vib);
++ platform_device_unregister(vib_pdev);
++}
++
++module_init(mrst_vib_init);
++module_exit(mrst_vib_exit);
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-spi-slave-controller-driver-1.1.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-spi-slave-controller-driver-1.1.patch
new file mode 100644
index 0000000..2328f9a
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-spi-slave-controller-driver-1.1.patch
@@ -0,0 +1,2230 @@
+From f52838cd03de045aa67cc1a0c1614ea5bfb30fcc Mon Sep 17 00:00:00 2001
+From: Alan Olsen <alan.r.olsen@intel.com>
+Date: Wed, 11 Nov 2009 13:12:42 -0800
+Subject: [PATCH 065/104] Moorestown SPI Slave Controller driver v1.1 consolidation patch
+
+This patch contains the following patches:
+
+Alpha2-1.0-1-1-mrst-SPI-Slave-Core-Driver-K29.patch
+
+ [PATCH] SPI Slave Support Added to SPI Core Driver
+
+ Signed-off-by: Pranav K. Sanghadia <pranav.k.sanghadia@intel.com>
+
+Alpha2-1.0-1-1-mrst-SPI-Slave-controller-driver.patch
+
+ [PATCH] SPI slave controller driver for Moorestown platform
+
+ This driver currently supports only programmed IO mode.
+
+ Config settings are:
+
+ CONFIG_SPI_MRST_SLAVE=y
+ CONFIG_SPI_MRST_SLAVE_DMA is not set
+
+ Signed-off-by: Ken Mills <ken.k.mills@intel.com>
+
+Alpha2-1.0-1-1-DMA-Support-added-in-SPI-Slave-Controller-Driver.patch
+
+ [PATCH] This patch adds DMA support for SPI Slave Controller Driver. DMA provides
+ highspeed data transfer between SPI-SSP and external Master mode device
+
+ Signed-off-by: Pranav K. Sanghadia <pranav.k.sanghadia@intel.com>
+
+Alpha2-1.0-1-1-mrst-SPI-Slave-controller-fix-DMA-Issue.patch
+
+ [PATCH] Alpha2-1.0-1-1-mrst-SPI-Slave-controller-fix-DMA-Issue.patch
+
+[PATCH] Optimized SSP clock bitbang routine
+
+Signed-off-by: Ken Mills <ken.k.mills@intel.com>
+
+Signed-off-by: Alan Olsen <alan.r.olsen@intel.com>
+---
+ drivers/spi/Kconfig | 11 +
+ drivers/spi/Makefile | 1 +
+ drivers/spi/mrst_spi_slave.c | 1227 ++++++++++++++++++++++++++++++++++++
+ drivers/spi/spi.c | 403 +++++++++++-
+ include/linux/spi/mrst_spi_slave.h | 143 +++++
+ include/linux/spi/spi.h | 98 +++-
+ 6 files changed, 1862 insertions(+), 21 deletions(-)
+ create mode 100644 drivers/spi/mrst_spi_slave.c
+ create mode 100644 include/linux/spi/mrst_spi_slave.h
+
+diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
+index 9d4ff53..b94445b 100644
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -360,5 +360,16 @@ config SPI_TLE62X0
+ endif # SPI_MASTER
+
+ # (slave support would go here)
++config SPI_MRST_SLAVE
++ tristate "SPI slave controller driver for Intel Moorestown platform "
++ depends on SPI_MASTER
++ help
++ This is the SPI slave controller driver for Intel Moorestown platform
++
++config SPI_MRST_SLAVE_DMA
++ boolean "Enable DMA for MRST SPI Slave Controller"
++ depends on INTEL_LNW_DMAC1
++ help
++ This has to be enabled after Moorestown DMAC1 driver is enabled
+
+ endif # SPI
+diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
+index c78cb77..8acdd96 100644
+--- a/drivers/spi/Makefile
++++ b/drivers/spi/Makefile
+@@ -56,6 +56,7 @@ obj-$(CONFIG_SPI_TLE62X0) += tle62x0.o
+ # ... add above this line ...
+
+ # SPI slave controller drivers (upstream link)
++obj-$(CONFIG_SPI_MRST_SLAVE) += mrst_spi_slave.o
+ # ... add above this line ...
+
+ # SPI slave drivers (protocol for that link)
+diff --git a/drivers/spi/mrst_spi_slave.c b/drivers/spi/mrst_spi_slave.c
+new file mode 100644
+index 0000000..82a50b7
+--- /dev/null
++++ b/drivers/spi/mrst_spi_slave.c
+@@ -0,0 +1,1227 @@
++/*
++ * mrst_spi_slave.c - Moorestown SPI slave controller driver
++ * based on pxa2xx_spi.c
++ *
++ * Copyright (C) Intel 2009
++ * Ken Mills <ken.k.mills@intel.com>
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++
++/*
++ * Note:
++ *
++ * Supports interrupt programmed I/O, DMA and non-interrupt polled transfers.
++ *
++ */
++
++#include <linux/delay.h>
++#include <linux/highmem.h>
++#include <linux/pci.h>
++#include <linux/interrupt.h>
++
++#ifdef CONFIG_SPI_MRST_SLAVE_DMA
++#include <linux/dma-mapping.h>
++#include <linux/lnw_dma.h>
++#endif
++
++#include <linux/spi/spi.h>
++#include <linux/spi/mrst_spi_slave.h>
++
++
++#define DRIVER_NAME "mrst_spi_slave"
++
++#define SSP_NOT_SYNC 0x400000
++
++MODULE_AUTHOR("");
++MODULE_DESCRIPTION("Moorestown SPI Slave Contoller");
++MODULE_LICENSE("GPL");
++
++/*
++ * For testing SSCR1 changes that require SSP restart, basically
++ * everything except the service and interrupt enables
++ */
++#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_EBCEI | SSCR1_SCFR \
++ | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
++ | SSCR1_SFRMDIR \
++ | SSCR1_RWOT | SSCR1_TRAIL | SSCR1_PINTE \
++ | SSCR1_STRF | SSCR1_EFWR | SSCR1_RFT \
++ | SSCR1_TFT | SSCR1_SPH | SSCR1_SPO)
++
++#define DEFINE_SSP_REG(reg, off) \
++static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
++static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }
++
++DEFINE_SSP_REG(SSCR0, 0x00)
++DEFINE_SSP_REG(SSCR1, 0x04)
++DEFINE_SSP_REG(SSSR, 0x08)
++DEFINE_SSP_REG(SSITR, 0x0c)
++DEFINE_SSP_REG(SSDR, 0x10)
++DEFINE_SSP_REG(SSTO, 0x28)
++DEFINE_SSP_REG(SSPSP, 0x2c)
++
++DEFINE_SSP_REG(IPCCSR, 0x00);
++DEFINE_SSP_REG(IPCPISR, 0x08);
++DEFINE_SSP_REG(IPCPIMR, 0x10);
++
++DEFINE_SSP_REG(I2CCTRL, 0x00);
++DEFINE_SSP_REG(I2CDATA, 0x04);
++
++DEFINE_SSP_REG(GPLR1, 0x04);
++DEFINE_SSP_REG(GPDR1, 0x0c);
++DEFINE_SSP_REG(GPSR1, 0x14);
++DEFINE_SSP_REG(GPCR1, 0x1C);
++DEFINE_SSP_REG(GAFR1_U, 0x44);
++
++#define START_STATE ((void *)0)
++#define RUNNING_STATE ((void *)1)
++#define DONE_STATE ((void *)2)
++#define ERROR_STATE ((void *)-1)
++
++struct driver_data {
++ /* Driver model hookup */
++ struct pci_dev *pdev;
++
++ /* SPI framework hookup */
++ struct spi_slave *slave;
++
++ /* SSP register addresses */
++ void *paddr;
++ void *ioaddr;
++ u32 iolen;
++ int irq;
++
++ /* IPC registers */
++ void *IPC_paddr;
++ void *IPC_ioaddr;
++
++ /* I2C registers */
++ void *I2C_paddr;
++ void *I2C_ioaddr;
++
++ /* SSP masks*/
++ u32 dma_cr1;
++ u32 int_cr1;
++ u32 clear_sr;
++ u32 mask_sr;
++
++ struct tasklet_struct poll_transfer;
++
++ spinlock_t lock;
++ int busy;
++ int run;
++
++ /* Current message transfer state info */
++ struct spi_message *cur_msg;
++ size_t len;
++ void *tx;
++ void *tx_end;
++ void *rx;
++ void *rx_end;
++ int dma_mapped;
++ dma_addr_t rx_dma;
++ dma_addr_t tx_dma;
++ size_t rx_map_len;
++ size_t tx_map_len;
++ u8 n_bytes;
++ int (*write)(struct driver_data *drv_data);
++ int (*read)(struct driver_data *drv_data);
++ irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
++ void (*cs_control)(u32 command);
++
++#ifdef CONFIG_SPI_MRST_SLAVE_DMA
++ struct lnw_dma_slave dmas_tx;
++ struct lnw_dma_slave dmas_rx;
++ struct dma_chan *txchan;
++ struct dma_chan *rxchan;
++
++ int txdma_done;
++ int rxdma_done;
++ u64 tx_param;
++ u64 rx_param;
++ struct pci_dev *dmac1;
++#endif
++};
++
++struct chip_data {
++ u32 cr0;
++ u32 cr1;
++ u32 psp;
++ u32 timeout;
++ u8 n_bytes;
++ u32 threshold;
++ u8 enable_dma;
++ u8 poll_mode; /* 1 means use poll mode */
++ u8 bits_per_word;
++ int (*write)(struct driver_data *drv_data);
++ int (*read)(struct driver_data *drv_data);
++};
++
++static void flush(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++ u32 sssr;
++
++ /* If the transmit fifo is not empty, reset the interface. */
++ sssr = read_SSSR(reg);
++ if ((sssr & 0xf00) || (sssr & SSSR_TNF) == 0) {
++ write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
++ return;
++ }
++
++ while (read_SSSR(reg) & SSSR_RNE)
++ read_SSDR(reg);
++
++ write_SSSR(SSSR_ROR, reg);
++ write_SSSR(SSSR_TUR, reg);
++
++ return;
++}
++
++static int null_writer(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++ u8 n_bytes = drv_data->n_bytes;
++
++ if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
++ || (drv_data->tx == drv_data->tx_end))
++ return 0;
++
++ write_SSDR(0, reg);
++ drv_data->tx += n_bytes;
++
++ return 1;
++}
++
++static int null_reader(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++ u8 n_bytes = drv_data->n_bytes;
++
++ while ((read_SSSR(reg) & SSSR_RNE)
++ && (drv_data->rx < drv_data->rx_end)) {
++ read_SSDR(reg);
++ drv_data->rx += n_bytes;
++ }
++
++ return drv_data->rx == drv_data->rx_end;
++}
++
++static int u8_writer(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++ if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
++ || (drv_data->tx == drv_data->tx_end))
++ return 0;
++
++ write_SSDR(*(u8 *)(drv_data->tx), reg);
++ ++drv_data->tx;
++
++ return 1;
++}
++
++static int u8_reader(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++ while ((read_SSSR(reg) & SSSR_RNE)
++ && (drv_data->rx < drv_data->rx_end)) {
++ *(u8 *)(drv_data->rx) = read_SSDR(reg);
++ ++drv_data->rx;
++ }
++
++ return drv_data->rx == drv_data->rx_end;
++}
++
++static int u16_writer(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++ if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
++ || (drv_data->tx == drv_data->tx_end))
++ return 0;
++
++ write_SSDR(*(u16 *)(drv_data->tx), reg);
++ drv_data->tx += 2;
++
++ return 1;
++}
++
++static int u16_reader(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++ while ((read_SSSR(reg) & SSSR_RNE)
++ && (drv_data->rx < drv_data->rx_end)) {
++ *(u16 *)(drv_data->rx) = read_SSDR(reg);
++ drv_data->rx += 2;
++ }
++
++ return drv_data->rx == drv_data->rx_end;
++}
++
++static int u32_writer(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++ if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
++ || (drv_data->tx == drv_data->tx_end))
++ return 0;
++
++ write_SSDR(*(u32 *)(drv_data->tx), reg);
++ drv_data->tx += 4;
++
++ return 1;
++}
++
++static int u32_reader(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++ while ((read_SSSR(reg) & SSSR_RNE)
++ && (drv_data->rx < drv_data->rx_end)) {
++ *(u32 *)(drv_data->rx) = read_SSDR(reg);
++ drv_data->rx += 4;
++ }
++
++ return drv_data->rx == drv_data->rx_end;
++}
++
++
++
++/* caller already set message->status; dma and pio irqs are blocked */
++static void giveback(struct driver_data *drv_data)
++{
++ struct spi_message *msg;
++
++ msg = drv_data->cur_msg;
++ msg->state = NULL;
++ if (msg->complete)
++ msg->complete(msg->context);
++}
++
++#ifdef CONFIG_SPI_MRST_SLAVE_DMA
++
++static bool chan_filter(struct dma_chan *chan, void *param)
++{
++ struct driver_data *drv_data = (struct driver_data *)param;
++ bool ret = false;
++
++ if (!drv_data->dmac1)
++ return ret;
++
++ if (chan->device->dev == &drv_data->dmac1->dev)
++ ret = true;
++
++ return ret;
++}
++
++static void int_transfer_complete(struct driver_data *drv_data);
++
++static void mrst_spi_dma_done(void *arg)
++{
++ u64 *param = arg;
++ struct driver_data *drv_data;
++ int *done;
++
++ drv_data = (struct driver_data *)(u32)(*param >> 32);
++ done = (int *)(u32)(*param & 0xffffffff);
++ *done = 1;
++
++ if (!drv_data->txdma_done || !drv_data->rxdma_done)
++ return;
++ int_transfer_complete(drv_data);
++}
++
++static void mrst_spi_dma_init(struct driver_data *drv_data)
++{
++ struct lnw_dma_slave *rxs, *txs;
++ dma_cap_mask_t mask;
++
++ /* Use DMAC1 */
++ drv_data->dmac1 = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0814, NULL);
++ if (!drv_data->dmac1) {
++ printk(KERN_WARNING "SPI Slave:Can't find DMAC1\n");
++ return;
++ }
++
++ /* 1. init rx channel */
++ rxs = &drv_data->dmas_rx;
++
++ rxs->dirn = DMA_FROM_DEVICE;
++ rxs->hs_mode = LNW_DMA_HW_HS;
++ rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
++ rxs->src_width = LNW_DMA_WIDTH_16BIT;
++ rxs->dst_width = LNW_DMA_WIDTH_32BIT;
++ rxs->src_msize = LNW_DMA_MSIZE_8;
++ rxs->dst_msize = LNW_DMA_MSIZE_8;
++
++
++ dma_cap_zero(mask);
++ dma_cap_set(DMA_MEMCPY, mask);
++ dma_cap_set(DMA_SLAVE, mask);
++
++ drv_data->rxchan = dma_request_channel(mask, chan_filter, drv_data);
++ if (!drv_data->rxchan)
++ goto err_exit;
++
++ drv_data->rxchan->private = rxs;
++
++ /* 2. init tx channel */
++ txs = &drv_data->dmas_tx;
++
++ txs->dirn = DMA_TO_DEVICE;
++ txs->hs_mode = LNW_DMA_HW_HS;
++ txs->cfg_mode = LNW_DMA_MEM_TO_PER;
++ txs->src_width = LNW_DMA_WIDTH_32BIT;
++ txs->dst_width = LNW_DMA_WIDTH_16BIT;
++ txs->src_msize = LNW_DMA_MSIZE_8;
++ txs->dst_msize = LNW_DMA_MSIZE_8;
++
++
++ dma_cap_set(DMA_SLAVE, mask);
++ dma_cap_set(DMA_MEMCPY, mask);
++
++ drv_data->txchan = dma_request_channel(mask, chan_filter, drv_data);
++ if (!drv_data->txchan)
++ goto free_rxchan;
++ else
++
++ drv_data->txchan->private = txs;
++
++ /* set the dma done bit to 1 */
++ drv_data->txdma_done = 1;
++ drv_data->rxdma_done = 1;
++
++ drv_data->tx_param = ((u64)(u32)drv_data << 32)
++ | (u32)(&drv_data->txdma_done);
++ drv_data->rx_param = ((u64)(u32)drv_data << 32)
++ | (u32)(&drv_data->rxdma_done);
++ return;
++
++free_rxchan:
++ printk(KERN_ERR "SPI-Slave Error : DMA Channle Not available\n");
++ dma_release_channel(drv_data->rxchan);
++err_exit:
++ printk(KERN_ERR "SPI-Slave Error : DMA Channel Not available\n");
++ pci_dev_put(drv_data->dmac1);
++ return;
++}
++
++static void mrst_spi_dma_exit(struct driver_data *drv_data)
++{
++ dma_release_channel(drv_data->txchan);
++ dma_release_channel(drv_data->rxchan);
++ pci_dev_put(drv_data->dmac1);
++}
++
++static void dma_transfer(struct driver_data *drv_data)
++{
++ dma_addr_t ssdr_addr;
++ struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
++ struct dma_chan *txchan, *rxchan;
++ enum dma_ctrl_flags flag;
++
++ /* get Data Read/Write address */
++ ssdr_addr = (dma_addr_t)(u32)(drv_data->paddr + 0x10);
++
++ if (drv_data->tx_dma)
++ drv_data->txdma_done = 0;
++
++ if (drv_data->rx_dma)
++ drv_data->rxdma_done = 0;
++
++ /* 2. start the TX dma transfer */
++ txchan = drv_data->txchan;
++ rxchan = drv_data->rxchan;
++
++ flag = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
++
++ if (drv_data->rx_dma) {
++ rxdesc = rxchan->device->device_prep_dma_memcpy
++ (rxchan, /* DMA Channel */
++ drv_data->rx_dma, /* DAR */
++ ssdr_addr, /* SAR */
++ drv_data->len, /* Data Length */
++ flag); /* Flag */
++
++ rxdesc->callback = mrst_spi_dma_done;
++ rxdesc->callback_param = &drv_data->rx_param;
++ }
++
++ /* 3. start the RX dma transfer */
++ if (drv_data->tx_dma) {
++ txdesc = txchan->device->device_prep_dma_memcpy
++ (txchan, /* DMA Channel */
++ ssdr_addr, /* DAR */
++ drv_data->tx_dma, /* SAR */
++ drv_data->len, /* Data Length */
++ flag); /* Flag */
++
++ txdesc->callback = mrst_spi_dma_done;
++ txdesc->callback_param = &drv_data->tx_param;
++ }
++
++ if (rxdesc)
++ rxdesc->tx_submit(rxdesc);
++ if (txdesc)
++ txdesc->tx_submit(txdesc);
++
++}
++
++static int map_dma_buffers(struct driver_data *drv_data)
++{
++ drv_data->rx_dma = (dma_addr_t) virt_to_phys(drv_data->rx);
++ drv_data->tx_dma = (dma_addr_t) virt_to_phys(drv_data->tx);
++ return 1;
++}
++#endif
++
++static void int_error_stop(struct driver_data *drv_data, const char* msg)
++{
++ void *reg = drv_data->ioaddr;
++
++ /* Stop and reset SSP */
++ write_SSSR(drv_data->clear_sr, reg);
++ write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
++ write_SSTO(0, reg);
++ flush(drv_data);
++
++ dev_err(&drv_data->pdev->dev, "%s\n", msg);
++
++ drv_data->cur_msg->state = ERROR_STATE;
++}
++
++static void int_transfer_complete(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++
++ /* Clear Status Register */
++ write_SSSR(drv_data->clear_sr, reg);
++
++#ifdef CONFIG_SPI_MRST_SLAVE_DMA
++ /* Disable Triggers to DMA */
++ write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
++#else
++ /* Disable Interrupt */
++ write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
++#endif
++ /* Stop getting Time Outs */
++ write_SSTO(0, reg);
++
++ /* Update total byte transfered return count actual bytes read */
++ drv_data->cur_msg->actual_length += drv_data->len -
++ (drv_data->rx_end - drv_data->rx);
++
++ drv_data->cur_msg->status = 0;
++ giveback(drv_data);
++}
++
++static void transfer_complete(struct driver_data *drv_data)
++{
++ /* Update total byte transfered return count actual bytes read */
++ drv_data->cur_msg->actual_length +=
++ drv_data->len - (drv_data->rx_end - drv_data->rx);
++
++ drv_data->cur_msg->status = 0;
++ giveback(drv_data);
++}
++
++static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
++{
++ void *reg = drv_data->ioaddr;
++ u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ?
++ drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
++
++ u32 irq_status = read_SSSR(reg) & irq_mask;
++ if (irq_status & SSSR_ROR) {
++ int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
++ return IRQ_HANDLED;
++ }
++
++ if (irq_status & SSSR_TINT) {
++ write_SSSR(SSSR_TINT, reg);
++ if (drv_data->read(drv_data)) {
++ int_transfer_complete(drv_data);
++ return IRQ_HANDLED;
++ }
++ }
++
++ /* Drain rx fifo, Fill tx fifo and prevent overruns */
++ do {
++ if (drv_data->read(drv_data)) {
++ int_transfer_complete(drv_data);
++ return IRQ_HANDLED;
++ }
++ } while (drv_data->write(drv_data));
++
++ if (drv_data->read(drv_data)) {
++ int_transfer_complete(drv_data);
++ return IRQ_HANDLED;
++ }
++
++ if (drv_data->tx == drv_data->tx_end)
++ write_SSCR1(read_SSCR1(reg) & ~SSCR1_TIE, reg);
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t ssp_int(int irq, void *dev_id)
++{
++ struct driver_data *drv_data = dev_id;
++ void *reg = drv_data->ioaddr;
++ u32 status = read_SSSR(reg);
++
++ #ifdef CONFIG_SPI_MRST_SLAVE_DMA
++ if (status & SSSR_ROR || status & SSSR_TUR) {
++ printk(KERN_DEBUG "--- SPI ROR or TUR Occred : SSSR=%x\n", status);
++ write_SSSR(SSSR_ROR, reg); /* Clear ROR */
++ write_SSSR(SSSR_TUR, reg); /* Clear TUR */
++ return IRQ_HANDLED;
++ }
++ return IRQ_NONE;
++ #endif
++ /* just return if this is not our interrupt */
++ if (!(read_SSSR(reg) & drv_data->mask_sr))
++ return IRQ_NONE;
++
++ if (!drv_data->cur_msg) {
++ write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
++ write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
++ write_SSSR(drv_data->clear_sr, reg);
++
++ /* Never fail */
++ return IRQ_HANDLED;
++ }
++ return drv_data->transfer_handler(drv_data);
++}
++
++static void poll_transfer(unsigned long data)
++{
++ struct driver_data *drv_data = (struct driver_data *)data;
++
++ if (drv_data->tx)
++ while (drv_data->tx != drv_data->tx_end) {
++ drv_data->write(drv_data);
++ drv_data->read(drv_data);
++ }
++
++ while (!drv_data->read(drv_data))
++ ;
++
++ transfer_complete(drv_data);
++}
++
++static int transfer(struct spi_device *spi, struct spi_message *msg)
++{
++ struct driver_data *drv_data = \
++ spi_slave_get_devdata(spi->slave);
++ unsigned long flags;
++ struct chip_data *chip = NULL;
++ struct spi_transfer *transfer = NULL;
++ void *reg = drv_data->ioaddr;
++ void *i2cReg = drv_data->I2C_ioaddr;
++ u32 clk_div = 0;
++ u8 bits = 0;
++ u32 cr0;
++ u32 cr1;
++ u32 sssr;
++
++ spin_lock_irqsave(&drv_data->lock, flags);
++ msg->actual_length = 0;
++ msg->status = -EINPROGRESS;
++ drv_data->cur_msg = msg;
++ /* Initial message state*/
++ msg->state = START_STATE;
++
++ /* We handle only one transfer message since the protocol module has to
++ control the out of band signaling. */
++ transfer = list_entry(msg->transfers.next,
++ struct spi_transfer,
++ transfer_list);
++
++ chip = spi_get_ctldata(msg->spi);
++
++ drv_data->busy = 1;
++
++ /* Check transfer length */
++ if (transfer->len > 8192) {
++ dev_warn(&drv_data->pdev->dev, "SPI-SLAVE: transfer "
++ "length greater than 8192\n");
++ msg->status = -EINVAL;
++ giveback(drv_data);
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++ return 0;
++ }
++
++ /* Setup the transfer state based on the type of transfer */
++ flush(drv_data);
++ drv_data->n_bytes = chip->n_bytes;
++ drv_data->tx = (void *)transfer->tx_buf;
++ drv_data->tx_end = drv_data->tx + transfer->len;
++ drv_data->rx = transfer->rx_buf;
++ drv_data->rx_end = drv_data->rx + transfer->len;
++ drv_data->rx_dma = transfer->rx_dma;
++ drv_data->tx_dma = transfer->tx_dma;
++ drv_data->len = transfer->len;
++ drv_data->write = drv_data->tx ? chip->write : null_writer;
++ drv_data->read = drv_data->rx ? chip->read : null_reader;
++
++ /* Change speed and bit per word on a per transfer */
++ cr0 = chip->cr0;
++ if (transfer->bits_per_word) {
++
++ bits = chip->bits_per_word;
++
++ clk_div = 0x0;
++
++ if (transfer->bits_per_word)
++ bits = transfer->bits_per_word;
++
++
++ if (bits <= 8) {
++ drv_data->n_bytes = 1;
++ drv_data->read = drv_data->read != null_reader ?
++ u8_reader : null_reader;
++ drv_data->write = drv_data->write != null_writer ?
++ u8_writer : null_writer;
++ } else if (bits <= 16) {
++ drv_data->n_bytes = 2;
++ drv_data->read = drv_data->read != null_reader ?
++ u16_reader : null_reader;
++ drv_data->write = drv_data->write != null_writer ?
++ u16_writer : null_writer;
++ } else if (bits <= 32) {
++ drv_data->n_bytes = 4;
++ drv_data->read = drv_data->read != null_reader ?
++ u32_reader : null_reader;
++ drv_data->write = drv_data->write != null_writer ?
++ u32_writer : null_writer;
++ }
++
++ cr0 = clk_div
++ | SSCR0_Motorola
++ | SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
++ | SSCR0_SSE
++ | SSCR0_TIM
++ | SSCR0_RIM
++ | (bits > 16 ? SSCR0_EDSS : 0);
++ }
++
++
++#ifdef CONFIG_SPI_MRST_SLAVE_DMA
++ drv_data->dma_mapped = 0;
++ if (chip->enable_dma)
++ drv_data->dma_mapped = map_dma_buffers(drv_data);
++#endif
++
++ msg->state = RUNNING_STATE;
++ /* Ensure we have the correct interrupt handler */
++ drv_data->transfer_handler = interrupt_transfer;
++ /* Clear status */
++ cr1 = chip->cr1 | chip->threshold;
++ write_SSSR(drv_data->clear_sr, reg);
++
++ /* Reload the config and do bitbanging only if SSP not-enable or not-synchronized */
++ if( ( read_SSSR(reg) & SSP_NOT_SYNC ) || (!(read_SSCR0(reg) & SSCR0_SSE) ) ) {
++
++ write_SSSR(drv_data->clear_sr, reg); /* clear status */
++ write_SSCR0(cr0 & ~SSCR0_SSE, reg);
++ write_SSPSP(0x02010007, reg);
++ write_SSTO(chip->timeout, reg);
++ write_SSCR1(0x13001DC0, reg); /* TBD remove hardcoded value */
++ write_SSCR0(cr0, reg);
++
++ /*
++ * This routine uses the DFx block to override the SSP inputs
++ * and outputs allowing us to bit bang SSPSCLK. On Langwell,
++ * we have to generate the clock to clear busy.
++ */
++
++ write_I2CDATA(0x3, i2cReg);
++ udelay(10);
++ write_I2CCTRL(0x01070034, i2cReg);
++ udelay(10);
++ write_I2CDATA(0x00000099, i2cReg);
++ udelay(10);
++ write_I2CCTRL(0x01070038, i2cReg);
++ udelay(10);
++ sssr = read_SSSR(reg);
++
++ /* Bit bang the clock until CSS clears */
++ while (sssr & 0x400000) {
++ write_I2CDATA(0x2, i2cReg);
++ udelay(10);
++ write_I2CCTRL(0x01070034, i2cReg);
++ udelay(10);
++ write_I2CDATA(0x3, i2cReg);
++ udelay(10);
++ write_I2CCTRL(0x01070034, i2cReg);
++ udelay(10);
++ sssr = read_SSSR(reg);
++ }
++
++ write_I2CDATA(0x0, i2cReg);
++ udelay(10);
++ write_I2CCTRL(0x01070038, i2cReg);
++
++ } else {
++ write_SSTO(chip->timeout, reg);
++ write_SSCR1(0x13001DC0, reg); /* TBD: remove hardcoded value */
++ }
++
++ /* transfer using DMA */
++ if (drv_data->dma_mapped) {
++#ifdef CONFIG_SPI_MRST_SLAVE_DMA
++ cr1 = cr1 | drv_data->dma_cr1;
++ write_SSCR1(0x13701DC0, reg); /* TBD: remove hardcoded value */
++ dma_transfer(drv_data);
++#endif
++ }
++
++ /* transfer using non interrupt polling */
++ else if (chip->poll_mode)
++ tasklet_schedule(&drv_data->poll_transfer);
++
++ /* transfer using interrupt driven programmed I/O */
++ else {
++ cr1 = cr1 | drv_data->int_cr1;
++ write_SSCR1(cr1, reg);
++ }
++
++ spin_unlock_irqrestore(&drv_data->lock, flags);
++ return 0;
++}
++
++static int setup(struct spi_device *spi)
++{
++ struct mrst_spi_chip *chip_info = NULL;
++ struct chip_data *chip;
++
++ if (!spi->bits_per_word)
++ spi->bits_per_word = 8;
++
++ if ((spi->bits_per_word < 4 || spi->bits_per_word > 32))
++ return -EINVAL;
++
++ /* Only alloc on first setup */
++ chip = spi_get_ctldata(spi);
++ if (!chip) {
++ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
++ if (!chip) {
++ dev_err(&spi->dev,
++ "failed setup: can't allocate chip data\n");
++ return -ENOMEM;
++ }
++
++ chip->enable_dma = 1;
++ chip->poll_mode = 1;
++ chip->timeout = 1000;
++ chip->threshold = SSCR1_RxTresh(1) | SSCR1_TxTresh(1);
++ }
++
++ /*
++ * protocol drivers may change the chip settings, so...
++ * if chip_info exists, use it
++ */
++ chip_info = spi->controller_data;
++
++ /* chip_info isn't always needed */
++ chip->cr1 = 0;
++ if (chip_info) {
++
++ chip->timeout = chip_info->timeout;
++
++ chip->threshold = (SSCR1_RxTresh(chip_info->rx_threshold) &
++ SSCR1_RFT) |
++ (SSCR1_TxTresh(chip_info->tx_threshold) &
++ SSCR1_TFT);
++
++
++ if (chip_info->enable_loopback)
++ chip->cr1 = SSCR1_LBM;
++ }
++
++ chip->cr0 = SSCR0_Motorola
++ | SSCR0_DataSize(spi->bits_per_word > 16 ?
++ spi->bits_per_word - 16 : spi->bits_per_word)
++ | SSCR0_SSE
++ | SSCR0_TIM
++ | SSCR0_RIM
++ | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
++ chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH);
++ chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
++ | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
++ /* set slave mode */
++ chip->cr1 |= SSCR1_SCLKDIR | SSCR1_SFRMDIR;
++ chip->cr1 |= SSCR1_SCFR; /* slave clock is not free running */
++ dev_dbg(&spi->dev, "%d bits/word, mode %d\n",
++ spi->bits_per_word,
++ spi->mode & 0x3);
++
++ if (spi->bits_per_word <= 8) {
++ chip->n_bytes = 1;
++ chip->read = u8_reader;
++ chip->write = u8_writer;
++ } else if (spi->bits_per_word <= 16) {
++ chip->n_bytes = 2;
++ chip->read = u16_reader;
++ chip->write = u16_writer;
++ } else if (spi->bits_per_word <= 32) {
++ chip->cr0 |= SSCR0_EDSS;
++ chip->n_bytes = 4;
++ chip->read = u32_reader;
++ chip->write = u32_writer;
++ } else {
++ dev_err(&spi->dev, "invalid wordsize\n");
++ return -ENODEV;
++ }
++ chip->bits_per_word = spi->bits_per_word;
++ spi_set_ctldata(spi, chip);
++
++ return 0;
++}
++
++static void cleanup(struct spi_device *spi)
++{
++ struct chip_data *chip = spi_get_ctldata(spi);
++
++ kfree(chip);
++}
++
++static struct mrst_spi_chip spidev_chip_info = {
++ .tx_threshold = 8, /* SSP hardware FIFO threshold */
++ .rx_threshold = 8, /* SSP hardware FIFO threshold */
++ .timeout = 235, /* See Intel documentation */
++};
++
++/*
++ * mrst_parse_spi_dib - mrst-ssp parse the spi device info block
++ * table
++ * @pdev: spi controller pci device structure
++ * @drv_data: spi controller driver data
++ * Context: can sleep
++ *
++ * ssp controller needs to parse the spi device info block table
++ * saved in PCI bar 1 and register them with the spi core subsystem.
++ */
++static void mrst_parse_spi_dib(struct pci_dev *pdev,
++ struct driver_data *drv_data)
++{
++ u32 dib_len;
++ void *dib_vaddr;
++ unsigned long dib_paddr;
++ struct spi_board_info info[1];
++ struct spi_dib_header *header;
++ struct spi_dib *dib;
++ int info_num, i, j, dib_bar;
++ u16 *pval;
++
++ dib_bar = 1;
++ dib_paddr = pci_resource_start(pdev, dib_bar);
++ dib_len = pci_resource_len(pdev, dib_bar);
++
++ printk(KERN_INFO "SPI-Slave: %s() - paddr = 0x%08lx, "
++ "iolen = 0x%x\n", __func__, dib_paddr, dib_len);
++
++ dib_vaddr = ioremap(dib_paddr, dib_len);
++ if (!dib_vaddr) {
++ dev_err(&pdev->dev, "%s(): ioremap failed\n", __func__);
++ goto err_ioremap;
++ }
++
++ /* bar1 contains a pointer to the SPI DIB table */
++ if (dib_len == 8) {
++ u32 *ptemp = (u32 *)dib_vaddr;
++ dib_len = *(ptemp + 1);
++ dib_vaddr = ioremap(*(unsigned long *)dib_vaddr, dib_len);
++ iounmap(ptemp);
++ }
++
++ header = (struct spi_dib_header *)dib_vaddr;
++ info_num = (header->length - sizeof(*header)) /
++ sizeof(*dib);
++ dib = (struct spi_dib *)&header[1];
++
++ /* search for our dib entry. */
++ for (i = 0; i < info_num; i++)
++ if (dib[i].host_num == 3)
++ break;
++ if (i == info_num)
++ return;
++
++ strncpy(info[0].modalias, dib[i].name, SPI_DIB_NAME_LEN);
++ info[0].irq = dib[i].irq;
++ info[0].bus_num = dib[i].host_num;
++ info[0].chip_select = dib[i].cs;
++ info[0].mode = 0;
++ info[0].max_speed_hz = 0;
++
++ printk(KERN_INFO "SPI-Slave: name = %s, irq = 0x%x, "
++ "bus = %d, cs = %d\n", info[0].modalias, info[0].irq,
++ info[0].bus_num, info[0].chip_select);
++
++ pval = (u16 *)&(dib[i].dev_data[0]);
++
++ info[0].controller_data = &spidev_chip_info; /* Slave chip config */
++
++ for (j = 0; j < 5; j++) {
++ spidev_chip_info.extra_data[j] = *pval;
++ pval++;
++ }
++
++ spi_register_board_info(info, 1);
++
++err_ioremap:
++ pci_release_region(pdev, dib_bar);
++
++ return;
++}
++
++static int mrst_spi_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++ struct device *dev = &pdev->dev;
++ struct spi_slave *slave;
++ struct driver_data *drv_data = 0;
++ int status = 0;
++ int pci_bar = 0;
++
++ printk(KERN_INFO "SPI-Slave: found PCI SSP controller(ID: %04x:%04x)\n",
++ pdev->vendor, pdev->device);
++
++ status = pci_enable_device(pdev);
++ if (status)
++ return status;
++
++ /* Allocate Slave with space for drv_data and null dma buffer */
++ slave = spi_alloc_slave(dev, sizeof(struct driver_data));
++
++ if (!slave) {
++ dev_err(&pdev->dev, "cannot alloc spi_slave\n");
++ status = -ENOMEM;
++ goto err_free_slave0;
++ }
++
++ drv_data = spi_slave_get_devdata(slave);
++ drv_data->slave = slave;
++
++ drv_data->pdev = pdev;
++ spin_lock_init(&drv_data->lock);
++
++ slave->bus_num = 3;
++ slave->num_chipselect = 1;
++ slave->cleanup = cleanup;
++ slave->setup = setup;
++ slave->transfer = transfer;
++
++ /* get basic io resource and map it */
++ drv_data->paddr = (void *)pci_resource_start(pdev, pci_bar);
++ drv_data->iolen = pci_resource_len(pdev, pci_bar);
++
++ status = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
++ if (status)
++ goto err_free_slave1;
++
++ drv_data->ioaddr =
++ ioremap_nocache((u32)drv_data->paddr, drv_data->iolen);
++ if (!drv_data->ioaddr) {
++ status = -ENOMEM;
++ goto err_free_slave2;
++ }
++ printk(KERN_INFO "SPI-Slave: ioaddr = : %08x\n", (int)drv_data->ioaddr);
++ printk(KERN_INFO "SPI-Slave: attaching to IRQ: %04x\n", pdev->irq);
++
++ mrst_parse_spi_dib(pdev, drv_data);
++
++ /* get base address of IPC registers */
++ drv_data->IPC_paddr = (void *)0xffae8000;
++ drv_data->IPC_ioaddr =
++ ioremap_nocache((unsigned long)drv_data->IPC_paddr, 0x80);
++ if (!drv_data->IPC_ioaddr) {
++ status = -ENOMEM;
++ goto err_free_slave3;
++ }
++ /* get base address of I2C_Serbus registers */
++ drv_data->I2C_paddr = (void *)0xff12b000;
++ drv_data->I2C_ioaddr =
++ ioremap_nocache((unsigned long)drv_data->I2C_paddr, 0x10);
++ if (!drv_data->I2C_ioaddr) {
++ status = -ENOMEM;
++ goto err_free_slave4;
++ }
++
++ printk(KERN_INFO "SPI-Slave: IPC_ioaddr = : %08x\n",
++ (int)drv_data->IPC_ioaddr);
++ printk(KERN_INFO "SPI-Slave: IPCCSR = : %08x\n",
++ read_IPCCSR(drv_data->IPC_ioaddr));
++ write_IPCCSR(0x802, drv_data->IPC_ioaddr);
++ printk(KERN_INFO "SPI-Slave: IPCCSR = : %08x\n",
++ read_IPCCSR(drv_data->IPC_ioaddr));
++
++ /* Attach to IRQ */
++ drv_data->irq = pdev->irq;
++ status = request_irq(drv_data->irq, ssp_int, IRQF_SHARED,
++ "mrst_spi3", drv_data);
++ if (status < 0) {
++ dev_err(&pdev->dev, "can not get IRQ\n");
++ goto err_free_slave5;
++ }
++
++ drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
++ drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL;
++ drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
++ drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
++
++ tasklet_init(&drv_data->poll_transfer,
++ poll_transfer, (unsigned long)drv_data);
++
++ /* Setup DMA if requested */
++
++ /* Load default SSP configuration */
++ printk(KERN_INFO "SPI-Slave: setup default SSP configuration\n");
++ write_SSCR0(0, drv_data->ioaddr);
++ write_SSCR1(SSCR1_RxTresh(4) | SSCR1_TxTresh(12), drv_data->ioaddr);
++ write_SSCR0(SSCR0_Motorola
++ | SSCR0_DataSize(8),
++ drv_data->ioaddr);
++ write_SSTO(0, drv_data->ioaddr);
++ write_SSPSP(0x02010007, drv_data->ioaddr);
++
++ /* Register with the SPI framework */
++ printk(KERN_INFO "SPI-Slave: register with SPI framework\n");
++
++ status = spi_register_slave(slave);
++
++ if (status != 0) {
++ dev_err(&pdev->dev, "problem registering spi slave\n");
++ goto err_free_slave6;
++ }
++
++#ifdef CONFIG_SPI_MRST_SLAVE_DMA
++ mrst_spi_dma_init(drv_data);
++#endif
++
++ pci_set_drvdata(pdev, drv_data);
++
++ return status;
++
++err_free_slave6:
++ free_irq(drv_data->irq, drv_data);
++err_free_slave5:
++ iounmap(drv_data->I2C_ioaddr);
++err_free_slave4:
++ iounmap(drv_data->IPC_ioaddr);
++err_free_slave3:
++ iounmap(drv_data->ioaddr);
++err_free_slave2:
++ pci_release_region(pdev, pci_bar);
++err_free_slave1:
++ spi_slave_put(slave);
++err_free_slave0:
++ pci_disable_device(pdev);
++
++ return status;
++}
++
++static void __devexit mrst_spi_remove(struct pci_dev *pdev)
++{
++ struct driver_data *drv_data = pci_get_drvdata(pdev);
++
++ if (!drv_data)
++ return;
++
++ pci_set_drvdata(pdev, NULL);
++
++#ifdef CONFIG_SPI_MRST_SLAVE_DMA
++ mrst_spi_dma_exit(drv_data);
++ pci_dev_put(drv_data->dmac1);
++#endif
++
++ /* Release IRQ */
++ free_irq(drv_data->irq, drv_data);
++
++ iounmap(drv_data->ioaddr);
++ iounmap(drv_data->I2C_ioaddr);
++ iounmap(drv_data->IPC_ioaddr);
++
++ pci_release_region(pdev, 0);
++
++ /* disconnect from the SPI framework */
++ spi_unregister_slave(drv_data->slave);
++
++ pci_disable_device(pdev);
++
++ return;
++}
++
++#ifdef CONFIG_PM
++
++static int mrst_spi_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ struct driver_data *drv_data = pci_get_drvdata(pdev);
++ printk(KERN_ERR "spi-slave: suspend\n");
++
++ tasklet_disable(&drv_data->poll_transfer);
++
++ return 0;
++}
++
++static int mrst_spi_resume(struct pci_dev *pdev)
++{
++ struct driver_data *drv_data = pci_get_drvdata(pdev);
++ printk(KERN_ERR "spi-slave: resume\n");
++
++ tasklet_enable(&drv_data->poll_transfer);
++
++ return 0;
++}
++#else
++#define mrst_spi_suspend NULL
++#define mrst_spi_resume NULL
++#endif /* CONFIG_PM */
++
++
++static const struct pci_device_id pci_ids[] __devinitdata = {
++
++ {
++ .vendor = PCI_VENDOR_ID_INTEL,
++ .device = 0x0815,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ },
++ {},
++};
++
++static struct pci_driver mrst_spi_slave_driver = {
++ .name = DRIVER_NAME,
++ .id_table = pci_ids,
++ .probe = mrst_spi_probe,
++ .remove = __devexit_p(mrst_spi_remove),
++ .suspend = mrst_spi_suspend,
++ .resume = mrst_spi_resume,
++};
++
++static int __init mrst_spi_init(void)
++{
++ return pci_register_driver(&mrst_spi_slave_driver);
++}
++
++late_initcall_sync(mrst_spi_init);
++
++static void __exit mrst_spi_exit(void)
++{
++ pci_unregister_driver(&mrst_spi_slave_driver);
++}
++module_exit(mrst_spi_exit);
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index b76f246..f58f8c3 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -27,10 +27,16 @@
+ #include <linux/spi/spi.h>
+
+
+-/* SPI bustype and spi_master class are registered after board init code
+- * provides the SPI device tables, ensuring that both are present by the
+- * time controller driver registration causes spi_devices to "enumerate".
+- */
++/* SPI bustype, spi_master and spi_slave class are registered after board
++* init code provides the SPI device tables, ensuring that both are present
++* by the time controller driver registration causes spi_devices
++* to "enumerate".
++*/
++
++/* SPI Slave Support is added for new spi slave devices: It uses common APIs,
++* apart from few new APIs and a spi_slave structure.
++*/
++
+ static void spidev_release(struct device *dev)
+ {
+ struct spi_device *spi = to_spi_device(dev);
+@@ -43,11 +49,22 @@ static void spidev_release(struct device *dev)
+ kfree(dev);
+ }
+
++static void spidev_slave_release(struct device *dev)
++{
++ struct spi_device *spi = to_spi_device(dev);
++
++ /* spi slave may cleanup */
++ if (spi->slave->cleanup)
++ spi->slave->cleanup(spi);
++
++ spi_slave_put(spi->slave);
++ kfree(dev);
++}
++
+ static ssize_t
+ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
+ {
+ const struct spi_device *spi = to_spi_device(dev);
+-
+ return sprintf(buf, "%s\n", spi->modalias);
+ }
+
+@@ -177,10 +194,13 @@ int spi_register_driver(struct spi_driver *sdrv)
+ sdrv->driver.bus = &spi_bus_type;
+ if (sdrv->probe)
+ sdrv->driver.probe = spi_drv_probe;
++
+ if (sdrv->remove)
+ sdrv->driver.remove = spi_drv_remove;
++
+ if (sdrv->shutdown)
+ sdrv->driver.shutdown = spi_drv_shutdown;
++
+ return driver_register(&sdrv->driver);
+ }
+ EXPORT_SYMBOL_GPL(spi_register_driver);
+@@ -201,6 +221,7 @@ struct boardinfo {
+
+ static LIST_HEAD(board_list);
+ static DEFINE_MUTEX(board_lock);
++static DEFINE_MUTEX(slave_board_lock);
+
+ /**
+ * spi_alloc_device - Allocate a new SPI device
+@@ -221,28 +242,70 @@ static DEFINE_MUTEX(board_lock);
+ */
+ struct spi_device *spi_alloc_device(struct spi_master *master)
+ {
+- struct spi_device *spi;
++ struct spi_device *spi_m_dev;
+ struct device *dev = master->dev.parent;
+
+ if (!spi_master_get(master))
+ return NULL;
+
+- spi = kzalloc(sizeof *spi, GFP_KERNEL);
+- if (!spi) {
++ spi_m_dev = kzalloc(sizeof *spi_m_dev, GFP_KERNEL);
++ if (!spi_m_dev) {
+ dev_err(dev, "cannot alloc spi_device\n");
+ spi_master_put(master);
+ return NULL;
+ }
+
+- spi->master = master;
+- spi->dev.parent = dev;
+- spi->dev.bus = &spi_bus_type;
+- spi->dev.release = spidev_release;
+- device_initialize(&spi->dev);
+- return spi;
++ spi_m_dev->master = master;
++ spi_m_dev->using_slave = 0;
++ spi_m_dev->dev.parent = dev;
++ spi_m_dev->dev.bus = &spi_bus_type;
++ spi_m_dev->dev.release = spidev_release;
++ device_initialize(&spi_m_dev->dev);
++ return spi_m_dev;
+ }
+ EXPORT_SYMBOL_GPL(spi_alloc_device);
+
++/*
++* spi_alloc_slave_device - Allocate a new SPI device
++* @slave: Controller to which device is connected
++* Context: can sleep
++*
++* Allows a driver to allocate and initialize a spi_device without
++* registering it immediately. This allows a driver to directly
++* fill the spi_device with device parameters before calling
++* spi_add_slave_device() on it.
++*
++* Caller is responsible to call spi_add_slave_device() on the returned
++* spi_device structure to add it to the SPI slave. If the caller
++* needs to discard the spi_device without adding it, then it should
++* call spi_dev_slave_put() on it.
++* Returns a pointer to the new device, or NULL.
++*/
++struct spi_device *spi_alloc_slave_device(struct spi_slave *slave)
++{
++ struct spi_device *spi_s;
++ struct device *dev = slave->dev.parent;
++
++ if (!spi_slave_get(slave))
++ return NULL;
++
++ spi_s = kzalloc(sizeof *spi_s, GFP_KERNEL);
++ if (!spi_s) {
++ dev_err(dev, "cannot alloc spi_slave_device\n");
++ spi_slave_put(slave);
++ return NULL;
++ }
++
++ spi_s->slave = slave;
++ spi_s->using_slave = 9;
++ spi_s->dev.parent = dev;
++ spi_s->dev.bus = &spi_bus_type;
++ spi_s->dev.release = spidev_slave_release;
++ device_initialize(&spi_s->dev);
++ return spi_s;
++}
++EXPORT_SYMBOL_GPL(spi_alloc_slave_device);
++
+ /**
+ * spi_add_device - Add spi_device allocated with spi_alloc_device
+ * @spi: spi_device to register
+@@ -301,6 +364,7 @@ int spi_add_device(struct spi_device *spi)
+ if (status < 0)
+ dev_err(dev, "can't %s %s, status %d\n",
+ "add", dev_name(&spi->dev), status);
++
+ else
+ dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
+
+@@ -311,6 +375,74 @@ done:
+ EXPORT_SYMBOL_GPL(spi_add_device);
+
+ /**
++* spi_add_slave_device - Add spi_device allocated with spi_alloc_slave_device
++* @spi: spi_device to register
++*
++* Companion function to spi_alloc_slave_device. Devices allocated with
++* spi_alloc_slave_device can be added onto the spi bus with this function.
++*
++* Returns 0 on success; negative errno on failure
++*/
++int spi_add_slave_device(struct spi_device *spi)
++{
++ static DEFINE_MUTEX(spi_slave_add_lock);
++ struct device *dev = spi->slave->dev.parent;
++ int status;
++
++ /* Chipselects are numbered 0..max; validate. */
++ if (spi->chip_select >= spi->slave->num_chipselect) {
++ dev_err(dev, "cs%d >= max %d\n",
++ spi->chip_select,
++ spi->slave->num_chipselect);
++ return -EINVAL;
++ }
++
++ /* Set the bus ID string */
++ dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->slave->dev),
++ spi->chip_select);
++
++
++ /* We need to make sure there's no other device with this
++ * chipselect **BEFORE** we call setup(), else we'll trash
++ * its configuration. Lock against concurrent add() calls.
++ */
++ mutex_lock(&spi_slave_add_lock);
++
++ if (bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev))
++ != NULL) {
++ dev_err(dev, "chipselect %d already in use\n",
++ spi->chip_select);
++ status = -EBUSY;
++ goto done;
++ }
++
++ /* Drivers may modify this initial i/o setup, but will
++ * normally rely on the device being setup. Devices
++ * using SPI_CS_HIGH can't coexist well otherwise...
++ */
++ status = spi->slave->setup(spi);
++ if (status < 0) {
++ dev_err(dev, "can't %s %s, status %d\n",
++ "setup", dev_name(&spi->dev), status);
++ goto done;
++ }
++
++ /* Device may be bound to an active driver when this returns */
++ status = device_add(&spi->dev);
++ if (status < 0)
++ dev_err(dev, "can't %s %s, status %d\n",
++ "add", dev_name(&spi->dev), status);
++ else
++ dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
++
++done:
++ mutex_unlock(&spi_slave_add_lock);
++ return status;
++}
++EXPORT_SYMBOL_GPL(spi_add_slave_device);
++
++
++/**
+ * spi_new_device - instantiate one new SPI device
+ * @master: Controller to which device is connected
+ * @chip: Describes the SPI device
+@@ -341,6 +473,8 @@ struct spi_device *spi_new_device(struct spi_master *master,
+ if (!proxy)
+ return NULL;
+
++
++
+ WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
+
+ proxy->chip_select = chip->chip_select;
+@@ -363,6 +497,54 @@ struct spi_device *spi_new_device(struct spi_master *master,
+ EXPORT_SYMBOL_GPL(spi_new_device);
+
+ /**
++* spi_slave_new_device - instantiate one new SPI device
++* @slave: Controller to which device is connected
++* @chip: Describes the SPI device
++* Context: can sleep
++*
++* On typical mainboards, this is purely internal; and it's not needed
++* after board init creates the hard-wired devices. Some development
++* platforms may not be able to use spi_register_board_info though, and
++* this is exported so that for example a USB or parport based adapter
++* driver could add devices (which it would learn about out-of-band).
++*
++* Returns the new device, or NULL.
++*/
++struct spi_device *spi_slave_new_device(struct spi_slave *slave,
++ struct spi_board_info *chip)
++{
++ struct spi_device *proxy_slave;
++ int status;
++
++ proxy_slave = spi_alloc_slave_device(slave);
++
++ if (!proxy_slave)
++ return NULL;
++
++ WARN_ON(strlen(chip->modalias) >= sizeof(proxy_slave->modalias));
++
++ proxy_slave->chip_select = chip->chip_select;
++ proxy_slave->max_speed_hz = chip->max_speed_hz;
++ proxy_slave->mode = chip->mode;
++ proxy_slave->irq = chip->irq;
++ strlcpy(proxy_slave->modalias, chip->modalias,
++ sizeof(proxy_slave->modalias));
++ proxy_slave->dev.platform_data = (void *) chip->platform_data;
++ proxy_slave->controller_data = chip->controller_data;
++ proxy_slave->controller_state = NULL;
++
++ status = spi_add_slave_device(proxy_slave);
++ if (status < 0) {
++ spi_dev_put(proxy_slave);
++ return NULL;
++ }
++
++ return proxy_slave;
++}
++EXPORT_SYMBOL_GPL(spi_slave_new_device);
++
++
++/**
+ * spi_register_board_info - register SPI devices for a given board
+ * @info: array of chip descriptors
+ * @n: how many descriptors are provided
+@@ -389,6 +571,7 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
+ bi = kmalloc(sizeof(*bi) + n * sizeof *info, GFP_KERNEL);
+ if (!bi)
+ return -ENOMEM;
++
+ bi->n_board_info = n;
+ memcpy(bi->board_info, info, n * sizeof *info);
+
+@@ -398,6 +581,7 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
+ return 0;
+ }
+
++
+ /* FIXME someone should add support for a __setup("spi", ...) that
+ * creates board info from kernel command lines
+ */
+@@ -423,6 +607,28 @@ static void scan_boardinfo(struct spi_master *master)
+ mutex_unlock(&board_lock);
+ }
+
++static void spi_slave_scan_boardinfo(struct spi_slave *slave)
++{
++ struct boardinfo *bi;
++
++ mutex_lock(&slave_board_lock);
++ list_for_each_entry(bi, &board_list, list) {
++ struct spi_board_info *chip = bi->board_info;
++ unsigned n;
++
++ for (n = bi->n_board_info; n > 0; n--, chip++) {
++ if (chip->bus_num != slave->bus_num)
++ continue;
++ /* NOTE: this relies on spi_new_device to
++ * issue diagnostics when given bogus inputs
++ */
++ (void) spi_slave_new_device(slave, chip);
++
++ }
++ }
++ mutex_unlock(&slave_board_lock);
++}
++
+ /*-------------------------------------------------------------------------*/
+
+ static void spi_master_release(struct device *dev)
+@@ -439,6 +645,19 @@ static struct class spi_master_class = {
+ .dev_release = spi_master_release,
+ };
+
++static void spi_slave_release(struct device *dev)
++{
++ struct spi_slave *slave;
++
++ slave = container_of(dev, struct spi_slave, dev);
++ kfree(slave);
++}
++
++static struct class spi_slave_class = {
++ .name = "spi_slave",
++ .owner = THIS_MODULE,
++ .dev_release = spi_slave_release,
++};
+
+ /**
+ * spi_alloc_master - allocate SPI master controller
+@@ -480,6 +699,47 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
+ EXPORT_SYMBOL_GPL(spi_alloc_master);
+
+ /**
++* spi_alloc_slave - allocate SPI slave controller
++* @dev: the controller, possibly using the platform_bus
++* @size: how much zeroed driver-private data to allocate; the pointer to this
++* memory is in the driver_data field of the returned device,
++* accessible with spi_slave_get_devdata().
++* Context: can sleep
++*
++* This call is used only by SPI master controller drivers, which are the
++* only ones directly touching chip registers. It's how they allocate
++* an spi_master structure, prior to calling spi_register_slave().
++*
++* This must be called from context that can sleep. It returns the SPI
++* master structure on success, else NULL.
++*
++* The caller is responsible for assigning the bus number and initializing
++* the master's methods before calling spi_register_slave(); and (after errors
++* adding the device) calling spi_slave_put() to prevent a memory leak.
++*/
++struct spi_slave *spi_alloc_slave(struct device *dev, unsigned size)
++{
++ struct spi_slave *slave;
++
++ if (!dev)
++ return NULL;
++
++ slave = kzalloc(size + sizeof *slave, GFP_KERNEL);
++ if (!slave)
++ return NULL;
++
++ device_initialize(&slave->dev);
++ slave->dev.class = &spi_slave_class;
++ slave->dev.parent = get_device(dev);
++ spi_slave_set_devdata(slave, &slave[1]);
++
++ return slave;
++}
++EXPORT_SYMBOL_GPL(spi_alloc_slave);
++
++
++
++/**
+ * spi_register_master - register SPI master controller
+ * @master: initialized master, originally from spi_alloc_master()
+ * Context: can sleep
+@@ -531,7 +791,8 @@ int spi_register_master(struct spi_master *master)
+ status = device_add(&master->dev);
+ if (status < 0)
+ goto done;
+- dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
++
++ dev_dbg(dev, "spi_register_master() : %s%s\n", dev_name(&master->dev),
+ dynamic ? " (dynamic)" : "");
+
+ /* populate children from any spi device tables */
+@@ -542,6 +803,71 @@ done:
+ }
+ EXPORT_SYMBOL_GPL(spi_register_master);
+
++/**
++* spi_register_slave - register SPI slave controller
++* @master: initialized master, originally from spi_alloc_slave()
++* Context: can sleep
++*
++* SPI slave controllers connect to their drivers using some non-SPI bus,
++* such as the platform bus. The final stage of probe() in that code
++* includes calling spi_register_slave() to hook up to this SPI bus glue.
++*
++* SPI controllers use board specific (often SOC specific) bus numbers,
++* and board-specific addressing for SPI devices combines those numbers
++* with chip select numbers. Since SPI does not directly support dynamic
++* device identification, boards need configuration tables telling which
++* chip is at which address.
++*
++* This must be called from context that can sleep. It returns zero on
++* success, else a negative error code (dropping the slave's refcount).
++* After a successful return, the caller is responsible for calling
++* spi_unregister_slave().
++*/
++int spi_register_slave(struct spi_slave *slave)
++{
++ static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
++ struct device *dev = slave->dev.parent;
++ int status = -ENODEV;
++ int dynamic = 0;
++
++ if (!dev)
++ return -ENODEV;
++
++ /* even if it's just one always-selected device, there must
++ * be at least one chipselect
++ */
++ if (slave->num_chipselect == 0)
++ return -EINVAL;
++
++ /* convention: dynamically assigned bus IDs count down from the max */
++ if (slave->bus_num < 0) {
++ /* FIXME switch to an IDR based scheme, something like
++ * I2C now uses, so we can't run out of "dynamic" IDs
++ */
++ slave->bus_num = atomic_dec_return(&dyn_bus_id);
++ dynamic = 1;
++ }
++
++ /* register the device, then userspace will see it.
++ * registration fails if the bus ID is in use.
++ */
++ dev_set_name(&slave->dev, "spi%u", slave->bus_num);
++ status = device_add(&slave->dev);
++ if (status < 0)
++ goto done;
++
++ dev_dbg(dev, "registered slave %s%s\n", dev_name(&slave->dev),
++ dynamic ? " (dynamic)" : "");
++
++ /* populate children from any spi device tables */
++ spi_slave_scan_boardinfo(slave);
++ status = 0;
++done:
++ return status;
++}
++EXPORT_SYMBOL_GPL(spi_register_slave);
++
++
+
+ static int __unregister(struct device *dev, void *master_dev)
+ {
+@@ -571,6 +897,27 @@ void spi_unregister_master(struct spi_master *master)
+ }
+ EXPORT_SYMBOL_GPL(spi_unregister_master);
+
++/**
++* spi_unregister_slave - unregister SPI slave controller
++* @master: the slave being unregistered
++* Context: can sleep
++*
++* This call is used only by SPI slave controller drivers, which are the
++* only ones directly touching chip registers.
++*
++* This must be called from context that can sleep.
++*/
++void spi_unregister_slave(struct spi_slave *slave)
++{
++ int dummy;
++
++ dummy = device_for_each_child(slave->dev.parent, &slave->dev,
++ __unregister);
++ device_unregister(&slave->dev);
++}
++EXPORT_SYMBOL_GPL(spi_unregister_slave);
++
++
+ static int __spi_master_match(struct device *dev, void *data)
+ {
+ struct spi_master *m;
+@@ -718,7 +1065,12 @@ int spi_async(struct spi_device *spi, struct spi_message *message)
+
+ message->spi = spi;
+ message->status = -EINPROGRESS;
+- return master->transfer(spi, message);
++
++ /* TODO: ugly*/
++ if (spi->using_slave == 9)
++ return spi->slave->transfer(spi, message); /* Slave */
++ else
++ return spi->master->transfer(spi, message); /* Master */
+ }
+ EXPORT_SYMBOL_GPL(spi_async);
+
+@@ -773,6 +1125,18 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
+ }
+ EXPORT_SYMBOL_GPL(spi_sync);
+
++/* spi_transfer_async - Wraper function to allow spi_async to expose to
++* user protocol drivers for modem handshaking
++*/
++
++int spi_transfer_async(struct spi_device *spi, struct spi_message *message)
++{
++ int status;
++ status = spi_async(spi, message);
++ return status;
++}
++EXPORT_SYMBOL_GPL(spi_transfer_async);
++
+ /* portable code must never pass more than 32 bytes */
+ #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
+
+@@ -871,6 +1235,12 @@ static int __init spi_init(void)
+ status = class_register(&spi_master_class);
+ if (status < 0)
+ goto err2;
++
++ status = class_register(&spi_slave_class);
++
++ if (status < 0)
++ goto err2;
++
+ return 0;
+
+ err2:
+@@ -890,4 +1260,3 @@ err0:
+ * include needing to have boardinfo data structures be much more public.
+ */
+ postcore_initcall(spi_init);
+-
+diff --git a/include/linux/spi/mrst_spi_slave.h b/include/linux/spi/mrst_spi_slave.h
+new file mode 100644
+index 0000000..4d73f0e
+--- /dev/null
++++ b/include/linux/spi/mrst_spi_slave.h
+@@ -0,0 +1,143 @@
++/*
++ * Copyright (C) Intel 2009
++ * Ken Mills <ken.k.mills@intel.com>
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ */
++#ifndef MRST_SSP_H_
++#define MRST_SSP_H_
++
++
++/*
++ * Langwell SSP serial port register definitions
++ */
++
++#define SSCR0_DSS (0x0000000f) /* Data Size Select (mask) */
++#define SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..16] */
++#define SSCR0_FRF (0x00000030) /* FRame Format (mask) */
++#define SSCR0_Motorola (0x0 << 4) /* Motorola's SPI mode */
++#define SSCR0_ECS (1 << 6) /* External clock select */
++#define SSCR0_SSE (1 << 7) /* Synchronous Serial Port Enable */
++
++
++#define SSCR0_SCR (0x000fff00) /* Serial Clock Rate (mask) */
++#define SSCR0_SerClkDiv(x) (((x) - 1) << 8) /* Divisor [1..4096] */
++#define SSCR0_EDSS (1 << 20) /* Extended data size select */
++#define SSCR0_NCS (1 << 21) /* Network clock select */
++#define SSCR0_RIM (1 << 22) /* Receive FIFO overrrun int mask */
++#define SSCR0_TUM (1 << 23) /* Transmit FIFO underrun int mask */
++#define SSCR0_FRDC (0x07000000) /* Frame rate divider control (mask) */
++#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame */
++#define SSCR0_ADC (1 << 30) /* Audio clock select */
++#define SSCR0_MOD (1 << 31) /* Mode (normal or network) */
++
++
++#define SSCR1_RIE (1 << 0) /* Receive FIFO Interrupt Enable */
++#define SSCR1_TIE (1 << 1) /* Transmit FIFO Interrupt Enable */
++#define SSCR1_LBM (1 << 2) /* Loop-Back Mode */
++#define SSCR1_SPO (1 << 3) /* SSPSCLK polarity setting */
++#define SSCR1_SPH (1 << 4) /* Motorola SPI SSPSCLK phase setting */
++#define SSCR1_MWDS (1 << 5) /* Microwire Transmit Data Size */
++#define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */
++#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
++#define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */
++#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
++
++#define SSSR_TNF (1 << 2) /* Transmit FIFO Not Full */
++#define SSSR_RNE (1 << 3) /* Receive FIFO Not Empty */
++#define SSSR_BSY (1 << 4) /* SSP Busy */
++#define SSSR_TFS (1 << 5) /* Transmit FIFO Service Request */
++#define SSSR_RFS (1 << 6) /* Receive FIFO Service Request */
++#define SSSR_ROR (1 << 7) /* Receive FIFO Overrun */
++
++#define SSCR0_TIM (1 << 23) /* Transmit FIFO Under Run Int Mask */
++#define SSCR0_RIM (1 << 22) /* Receive FIFO Over Run int Mask */
++#define SSCR0_NCS (1 << 21) /* Network Clock Select */
++#define SSCR0_EDSS (1 << 20) /* Extended Data Size Select */
++
++#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */
++#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */
++#define SSCR1_TTELP (1 << 31) /* TXD Tristate Enable Last Phase */
++#define SSCR1_TTE (1 << 30) /* TXD Tristate Enable */
++#define SSCR1_EBCEI (1 << 29) /* Enable Bit Count Error interrupt */
++#define SSCR1_SCFR (1 << 28) /* Slave Clock free Running */
++#define SSCR1_ECRA (1 << 27) /* Enable Clock Request A */
++#define SSCR1_ECRB (1 << 26) /* Enable Clock request B */
++#define SSCR1_SCLKDIR (1 << 25) /* Serial Bit Rate Clock Direction */
++#define SSCR1_SFRMDIR (1 << 24) /* Frame Direction */
++#define SSCR1_RWOT (1 << 23) /* Receive Without Transmit */
++#define SSCR1_TRAIL (1 << 22) /* Trailing Byte */
++#define SSCR1_TSRE (1 << 21) /* Transmit Service Request Enable */
++#define SSCR1_RSRE (1 << 20) /* Receive Service Request Enable */
++#define SSCR1_TINTE (1 << 19) /* Receiver Time-out Interrupt enable */
++#define SSCR1_PINTE (1 << 18) /* Trailing Byte Interupt Enable */
++#define SSCR1_STRF (1 << 15) /* Select FIFO or EFWR */
++#define SSCR1_EFWR (1 << 14) /* Enable FIFO Write/Read */
++
++#define SSSR_BCE (1 << 23) /* Bit Count Error */
++#define SSSR_CSS (1 << 22) /* Clock Synchronisation Status */
++#define SSSR_TUR (1 << 21) /* Transmit FIFO Under Run */
++#define SSSR_EOC (1 << 20) /* End Of Chain */
++#define SSSR_TINT (1 << 19) /* Receiver Time-out Interrupt */
++#define SSSR_PINT (1 << 18) /* Peripheral Trailing Byte Interrupt */
++
++#define SSPSP_FSRT (1 << 25) /* Frame Sync Relative Timing */
++#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */
++#define SSPSP_SFRMWDTH(x) ((x) << 16) /* Serial Frame Width */
++#define SSPSP_SFRMDLY(x) ((x) << 9) /* Serial Frame Delay */
++#define SSPSP_DMYSTRT(x) ((x) << 7) /* Dummy Start */
++#define SSPSP_STRTDLY(x) ((x) << 4) /* Start Delay */
++#define SSPSP_ETDS (1 << 3) /* End of Transfer data State */
++#define SSPSP_SFRMP (1 << 2) /* Serial Frame Polarity */
++#define SSPSP_SCMODE(x) ((x) << 0) /* Serial Bit Rate Clock Mode */
++
++/* spi_board_info.controller_data for SPI slave devices,
++ * copied to spi_device.platform_data ... mostly for dma tuning
++ */
++struct mrst_spi_chip {
++ u8 tx_threshold;
++ u8 rx_threshold;
++ u8 dma_burst_size;
++ u32 timeout;
++ u8 enable_loopback;
++ u16 extra_data[5];
++};
++
++
++#define SPI_DIB_NAME_LEN 16
++#define SPI_DIB_SPEC_INFO_LEN 10
++
++struct spi_dib_header {
++ u32 signature;
++ u32 length;
++ u8 rev;
++ u8 checksum;
++ u8 dib[0];
++} __attribute__((packed));
++
++struct spi_dib {
++ u16 host_num;
++ u16 cs;
++ u16 irq;
++ char name[SPI_DIB_NAME_LEN];
++ u8 dev_data[SPI_DIB_SPEC_INFO_LEN];
++} __attribute__((packed));
++
++#endif /*MRST_SSP_H_*/
+diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
+index 97b60b3..87b4d12 100644
+--- a/include/linux/spi/spi.h
++++ b/include/linux/spi/spi.h
+@@ -23,15 +23,19 @@
+ #include <linux/mod_devicetable.h>
+
+ /*
+- * INTERFACES between SPI master-side drivers and SPI infrastructure.
+- * (There's no SPI slave support for Linux yet...)
++ * INTERFACES between SPI Master/Slave side drivers and
++ * SPI infrastructure.
++ * SPI Slave Support added : It uses few new APIs and
++ * a new spi_slave struct
+ */
+ extern struct bus_type spi_bus_type;
+
+ /**
+ * struct spi_device - Master side proxy for an SPI slave device
+ * @dev: Driver model representation of the device.
+- * @master: SPI controller used with the device.
++ * @master: SPI Master controller used with the device.
++ * @slave: SPI Slave Controller used with the device
++ * @using_slave: SPI Slave Flag used by spi_async()
+ * @max_speed_hz: Maximum clock rate to be used with this chip
+ * (on this board); may be changed by the device's driver.
+ * The spi_transfer.speed_hz can override this for each transfer.
+@@ -68,6 +72,8 @@ extern struct bus_type spi_bus_type;
+ struct spi_device {
+ struct device dev;
+ struct spi_master *master;
++ struct spi_slave *slave;
++ u8 using_slave;
+ u32 max_speed_hz;
+ u8 chip_select;
+ u8 mode;
+@@ -143,7 +149,6 @@ static inline void *spi_get_drvdata(struct spi_device *spi)
+ struct spi_message;
+
+
+-
+ /**
+ * struct spi_driver - Host side "protocol" driver
+ * @id_table: List of SPI devices supported by this driver
+@@ -295,16 +300,56 @@ struct spi_master {
+ void (*cleanup)(struct spi_device *spi);
+ };
+
++/**
++ * struct spi_slave - interface to SPI Slave Controller
++ * @dev: device interface to this driver
++ * @bus_num: board-specific (and often SOC-specific) identifier for a
++ * given SPI controller.
++ * @num_chipselect: chipselects are used to distinguish individual
++ * SPI slaves, and are numbered from zero to num_chipselects.
++ * each slave has a chipselect signal, but it's common that not
++ * every chipselect is connected to a slave.
++ * @setup: updates the device mode and clocking records used by a
++ * device's SPI controller; protocol code may call this. This
++ * must fail if an unrecognized or unsupported mode is requested.
++ * It's always safe to call this unless transfers are pending on
++ * the device whose settings are being modified.
++ * @transfer: adds a message to the controller's transfer queue.
++ * @cleanup: frees controller-specific state
++ */
++struct spi_slave {
++ struct device dev;
++ s16 bus_num;
++ u16 num_chipselect;
++
++ int (*setup)(struct spi_device *spi);
++
++ int (*transfer)(struct spi_device *spi,
++ struct spi_message *mesg);
++
++ void (*cleanup)(struct spi_device *spi);
++};
++
+ static inline void *spi_master_get_devdata(struct spi_master *master)
+ {
+ return dev_get_drvdata(&master->dev);
+ }
+
++static inline void *spi_slave_get_devdata(struct spi_slave *slave)
++{
++ return dev_get_drvdata(&slave->dev);
++}
++
+ static inline void spi_master_set_devdata(struct spi_master *master, void *data)
+ {
+ dev_set_drvdata(&master->dev, data);
+ }
+
++static inline void spi_slave_set_devdata(struct spi_slave *slave, void *data)
++{
++ dev_set_drvdata(&slave->dev, data);
++}
++
+ static inline struct spi_master *spi_master_get(struct spi_master *master)
+ {
+ if (!master || !get_device(&master->dev))
+@@ -312,20 +357,42 @@ static inline struct spi_master *spi_master_get(struct spi_master *master)
+ return master;
+ }
+
++static inline struct spi_slave *spi_slave_get(struct spi_slave *slave)
++{
++ if (!slave || !get_device(&slave->dev))
++ return NULL;
++ return slave;
++}
++
+ static inline void spi_master_put(struct spi_master *master)
+ {
+ if (master)
+ put_device(&master->dev);
+ }
+
++static inline void spi_slave_put(struct spi_slave *slave)
++{
++ if (slave)
++ put_device(&slave->dev);
++}
++
+
+ /* the spi driver core manages memory for the spi_master classdev */
+ extern struct spi_master *
+ spi_alloc_master(struct device *host, unsigned size);
+
++extern struct spi_slave *
++spi_alloc_slave(struct device *host, unsigned size);
++
++
+ extern int spi_register_master(struct spi_master *master);
++
++extern int spi_register_slave(struct spi_slave *slave);
++
+ extern void spi_unregister_master(struct spi_master *master);
+
++extern void spi_unregister_slave(struct spi_slave *slave);
++
+ extern struct spi_master *spi_busnum_to_master(u16 busnum);
+
+ /*---------------------------------------------------------------------------*/
+@@ -551,6 +618,18 @@ extern int spi_async(struct spi_device *spi, struct spi_message *message);
+
+ extern int spi_sync(struct spi_device *spi, struct spi_message *message);
+
++static inline int
++spi_slave_setup(struct spi_device *spi)
++{
++ return spi->slave->setup(spi);
++}
++
++
++/* spi_transfer_async() exposes spi_async() functionality */
++extern int spi_transfer_async(struct spi_device *spi,
++ struct spi_message *message);
++
++
+ /**
+ * spi_write - SPI synchronous write
+ * @spi: device to which data will be written
+@@ -759,12 +838,23 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
+ extern struct spi_device *
+ spi_alloc_device(struct spi_master *master);
+
++extern struct spi_device *
++spi_alloc_slave_device(struct spi_slave *slave);
++
+ extern int
+ spi_add_device(struct spi_device *spi);
+
++extern int
++spi_add_slave_device(struct spi_device *spi);
++
++
+ extern struct spi_device *
+ spi_new_device(struct spi_master *, struct spi_board_info *);
+
++extern struct spi_device *
++spi_slave_new_device(struct spi_slave *, struct spi_board_info *);
++
++
+ static inline void
+ spi_unregister_device(struct spi_device *spi)
+ {
+--
+1.6.2.5
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-thermal-emc1403-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-thermal-emc1403-driver.patch
new file mode 100644
index 0000000..667db02
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-thermal-emc1403-driver.patch
@@ -0,0 +1,285 @@
+From e11104658a3b8eabbb4eb74b38645a4f114b745d Mon Sep 17 00:00:00 2001
+From: Kalhan Trisal <kalhan.trisal@intel.com>
+Date: Fri, 4 Sep 2009 18:10:52 -0400
+Subject: [PATCH 075/104] Thermal patch for emc1403 driver
+
+Thermal driver will handle event generation even if alert handler is called multiple time due to GPE->GPIO changes. The IRQ has become now shared and the handler will be called on every event as shared with other devices also.
+
+Signed-off-by: Kalhan Trisal <kalhan.trisal@intel.com>
+---
+ drivers/hwmon/Kconfig | 2 +-
+ drivers/hwmon/emc1403.c | 128 +++++++++++++++++++++--------------------------
+ 2 files changed, 58 insertions(+), 72 deletions(-)
+
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
+index 55637c3..c8fefbc 100644
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -55,7 +55,7 @@ config SENSORS_LIS331DL
+
+ config SENSORS_EMC1403
+ tristate "SMSC EMC1403 Thermal"
+- depends on I2C_MRST && GPE && GPIO_MAX7315 && MSTWN_POWER_MGMT
++ depends on I2C
+ help
+ If you say yes here you get support for the SMSC Devices
+ EMC1403 temperature monitoring chip.
+diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
+index 45cf5d0..75e3b15 100644
+--- a/drivers/hwmon/emc1403.c
++++ b/drivers/hwmon/emc1403.c
+@@ -44,8 +44,7 @@ MODULE_LICENSE("GPL v2");
+ /* To support the interrupt mechanism for moorestown interrupt flag is added
+ * If the flag is not enabled it support generic emc1403 chip */
+
+-#if defined(CONFIG_GPIO_LNWPMIC) && defined(CONFIG_GPIO_MAX7315) \
+- && defined(CONFIG_MSTWN_POWER_MGMT)
++#if defined(CONFIG_GPIO_LANGWELL_PMIC) && defined(CONFIG_MSTWN_POWER_MGMT)
+ #define MOORESTOWN_INTERRUPT_ENABLE
+ #endif
+
+@@ -335,40 +334,6 @@ static ssize_t store_power_state(struct device *dev,
+ return count;
+ }
+
+-static ssize_t show_mode(struct device *dev,
+- struct device_attribute *attr, char *buf)
+-{
+- struct i2c_client *client = to_i2c_client(dev);
+- int ret_val;
+-
+- ret_val = i2c_read_current_data(client, 0x03);
+- ret_val = ret_val & 0x80;
+- if (ret_val == 0x80)
+- ret_val = 1;
+- return sprintf(buf, "%x", ret_val);
+-}
+-
+-static ssize_t store_mode(struct device *dev,
+- struct device_attribute *attr, const char *buf, size_t count)
+-{
+- struct i2c_client *client = to_i2c_client(dev);
+- unsigned long val = 0;
+- char curr_val;
+-
+- if (strict_strtoul(buf, 10, &val))
+- return -EINVAL;
+-
+- curr_val = i2c_read_current_data(client, 0x03);
+- if (val == INTERRUPT_MODE_ENABLE)
+- curr_val = curr_val & 0x7F;
+- else if (val == INTERRUPT_MODE_DISABLE)
+- curr_val = curr_val | 0x80;
+- else
+- return -EINVAL;
+- i2c_write_current_data(client, 0x03, curr_val);
+- return count;
+-}
+-
+ static SENSOR_DEVICE_ATTR_2(temp1_min, S_IRUGO | S_IWUSR,
+ show_temp_auto_offset, store_temp_auto_offset, 0, 1);
+ static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR,
+@@ -398,7 +363,6 @@ static DEVICE_ATTR(status, S_IRUGO, show_status_reg, NULL);
+
+ static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
+ show_power_state, store_power_state);
+-static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, show_mode, store_mode);
+
+ static struct attribute *mid_att_thermal[] = {
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+@@ -416,7 +380,6 @@ static struct attribute *mid_att_thermal[] = {
+ &dev_attr_hyster.attr,
+ &dev_attr_status.attr,
+ &dev_attr_power_state.attr,
+- &dev_attr_mode.attr,
+ NULL
+ };
+
+@@ -449,102 +412,124 @@ static irqreturn_t alert_interrupt_handler(int id, void *dev)
+ return IRQ_HANDLED;
+ }
+
++
+ /* when the device raise the interrupt we mask the interrupt
+ * bit for that device as the status register is R-C
+ * so that till thermal governor doesnot take action we need
+ * not to send continuous events */
+
+ static int interrupt_status(struct i2c_client *client, u8 diode_reg_val,
+- u8 *status, u8 event)
++ u8 *mask_status, u8 event, u8 *status)
+ {
+- u8 crit_st = 0, set_mask = 0;
++ u8 crit_st = 0, set_mask = 0, event_status = 0;
+
+ set_mask = i2c_read_current_data(client, 0x1F);
+ if (diode_reg_val & THM_CHAN_TEMP[3]) {
+- set_mask = (set_mask | 0x02);
+- crit_st = (crit_st | 2);
++ if (set_mask & TEMP2)
++ event_status = event_status | TEMP2;
++ else
++ set_mask = set_mask | 0x02;
++ crit_st = crit_st | 2;
+ }
+ if (diode_reg_val & THM_CHAN_TEMP[2]) {
+- set_mask = (set_mask | 0x04);
+- crit_st = (crit_st | 4);
++ if (set_mask & TEMP3)
++ event_status = event_status | TEMP3;
++ else
++ set_mask = set_mask | 0x04;
++ crit_st = crit_st | 4;
+ }
+ if (diode_reg_val & THM_CHAN_TEMP[4]) {
+- set_mask = (set_mask | 0x01);
+- crit_st = (crit_st | 1);
++ if (set_mask & TEMP1)
++ event_status = event_status | TEMP1;
++ else
++ set_mask = set_mask | 0x01;
++ crit_st = crit_st | 1;
+ }
+ if (event == ALERT_EVENT)
+- i2c_smbus_write_byte_data(client, 0x1F, set_mask);
+- *status = crit_st;
++ i2c_smbus_write_byte_data(client, 0x1f, set_mask);
++ *mask_status = crit_st;
++ *status = event_status;
+ return 0;
+ }
+
+ static void ospm_event(int event_id, int sensor_id, int curr_temp)
+ {
+ if (event_id == THERM_EVENT) {
+- printk(KERN_ALERT "emc1403: Sensor Id = %d crit event \
++ printk(KERN_ALERT "emc1403: sensor id = %d crit event \
+ temp = %d \n", sensor_id, curr_temp);
+ ospm_generate_netlink_event(sensor_id,
+ OSPM_EVENT_THERMAL_CRITICAL);
+ }
+ if (event_id == HIGH_EVENT) {
+- printk(KERN_ALERT "emc1403: Sensor Id = %d AUX1 event \
++ printk(KERN_ALERT "emc1403: sensor id = %d aux1 event \
+ temp = %d \n", sensor_id, curr_temp);
+ ospm_generate_netlink_event(sensor_id,
+ OSPM_EVENT_THERMAL_AUX1);
+ }
+ if (event_id == LOW_EVENT) {
+- printk(KERN_ALERT "emc1403: Sensor Id = %d AUX0 event \
++ printk(KERN_ALERT "emc1403: sensor id = %d aux0 event \
+ temp = %d \n", sensor_id, curr_temp);
+ ospm_generate_netlink_event(sensor_id,
+ OSPM_EVENT_THERMAL_AUX0);
+ }
+ if (event_id == FAULT_EVENT) {
+- printk(KERN_ALERT "emc1403: Sensor Id = %d Fault event \
++ printk(KERN_ALERT "emc1403: sensor id = %d fault event \
+ temp = %d \n", sensor_id, curr_temp);
+ ospm_generate_netlink_event(sensor_id,
+ OSPM_EVENT_THERMAL_DEV_FAULT);
+ }
+ }
+
+-static void send_event(struct i2c_client *client, int status, int event_id)
++static void send_event(struct i2c_client *client, int status, u8 mask_event,
++ int event_id)
+ {
+ int ret_val;
+
+ if (status & TEMP1) {
+- ret_val = i2c_read_current_data(client, THM_REG_CURR_TEMP[0]);
+- ospm_event(event_id, TEMP_DEV_ID1, ret_val);
++ if (!(TEMP1 & mask_event)) {
++ ret_val = i2c_read_current_data(client,
++ THM_REG_CURR_TEMP[0]);
++ ospm_event(event_id, TEMP_DEV_ID1, ret_val);
++ }
+ }
+ if (status & TEMP2) {
+- ret_val = i2c_read_current_data(client, THM_REG_CURR_TEMP[1]);
+- ospm_event(event_id, TEMP_DEV_ID2, ret_val);
++ if (!(TEMP2 & mask_event)) {
++ ret_val = i2c_read_current_data(client,
++ THM_REG_CURR_TEMP[1]);
++ ospm_event(event_id, TEMP_DEV_ID2, ret_val);
++ }
+ }
+ if (status & TEMP3) {
+- ret_val = i2c_read_current_data(client, THM_REG_CURR_TEMP[2]);
+- ospm_event(event_id, TEMP_DEV_ID3, ret_val);
++ if (!(TEMP3 & mask_event)) {
++ ret_val = i2c_read_current_data(client,
++ THM_REG_CURR_TEMP[2]);
++ ospm_event(event_id, TEMP_DEV_ID3, ret_val);
++ }
+ }
+ }
+
+ static void therm_handle_intrpt(struct work_struct *work)
+ {
+- u8 status, reg_val;
++ u8 status, reg_val, mask_status;
+ struct thermal_data *data = container_of(work,
+ struct thermal_data, therm_handler);
+
+ /* check if therm_module_info is initialized */
+ if (!data)
+ return;
+- /* Which DIODE has raised the interrupt 0x1B
+- internal/External1/External2 */
++ /* which diode has raised the interrupt 0x1b
++ internal/external1/external2 */
+ reg_val = i2c_smbus_read_byte_data(data->client,
+ THM_STAT_REG_TEMP[0]);
+- interrupt_status(data->client, reg_val, &status, THERM_EVENT);
+- send_event(data->client, status, THERM_EVENT);
++ interrupt_status(data->client, reg_val, &status, THERM_EVENT,
++ &mask_status);
++ send_event(data->client, status, 0, THERM_EVENT);
+ }
+
+ static void alert_handle_intrpt(struct work_struct *work)
+ {
+ int sta_reg_val, reg_val;
+- u8 status;
++ u8 status, mask_status;
+ struct thermal_data *data = container_of(work,
+ struct thermal_data, alert_handler);
+ if (!data)
+@@ -557,25 +542,26 @@ static void alert_handle_intrpt(struct work_struct *work)
+ internal/External1/External2 */
+ sta_reg_val = i2c_smbus_read_byte_data(data->client,
+ THM_STAT_REG_TEMP[1]);
++ /*chek if the mask is already enabled then donot send the event again*/
+ interrupt_status(data->client, sta_reg_val, &status,
+- ALERT_EVENT);
+- send_event(data->client, status, HIGH_EVENT);
++ ALERT_EVENT, &mask_status);
++ send_event(data->client, status, mask_status, HIGH_EVENT);
+ }
+ /* Low status bit is set */
+ if (reg_val & THM_CHAN_TEMP[1]) {
+ sta_reg_val = i2c_smbus_read_byte_data(data->client,
+ THM_STAT_REG_TEMP[2]);
+ interrupt_status(data->client, sta_reg_val, &status,
+- ALERT_EVENT);
+- send_event(data->client, status, LOW_EVENT);
++ ALERT_EVENT, &mask_status);
++ send_event(data->client, status, mask_status, LOW_EVENT);
+ }
+ /* Fault status bit is set */
+ if (reg_val & THM_CHAN_TEMP[2]) {
+ sta_reg_val = i2c_smbus_read_byte_data(data->client,
+ THM_STAT_REG_TEMP[3]);
+ interrupt_status(data->client, sta_reg_val, &status,
+- ALERT_EVENT);
+- send_event(data->client, status, FAULT_EVENT);
++ ALERT_EVENT, &mask_status);
++ send_event(data->client, status, mask_status, FAULT_EVENT);
+ }
+ }
+ #endif
+--
+1.6.2.5
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-touchscreen-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-touchscreen-driver.patch
new file mode 100644
index 0000000..1b11cd3
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-touchscreen-driver.patch
@@ -0,0 +1,996 @@
+From f7ba5de3af0c7d3abd7624676e07752e5d8b7ebd Mon Sep 17 00:00:00 2001
+From: Jacob Pan <jacob.jun.pan@intel.com>
+Date: Fri, 4 Dec 2009 10:57:07 -0800
+Subject: [PATCH 040/104] MRST: touch screen driver
+
+---
+ drivers/input/touchscreen/Kconfig | 9 +
+ drivers/input/touchscreen/Makefile | 2 +
+ drivers/input/touchscreen/mrstouch.c | 947 ++++++++++++++++++++++++++++++++++
+ 3 files changed, 958 insertions(+), 0 deletions(-)
+ create mode 100644 drivers/input/touchscreen/mrstouch.c
+
+diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
+index dfafc76..6dd2674 100644
+--- a/drivers/input/touchscreen/Kconfig
++++ b/drivers/input/touchscreen/Kconfig
+@@ -577,4 +577,13 @@ config TOUCHSCREEN_PCAP
+
+ To compile this driver as a module, choose M here: the
+ module will be called pcap_ts.
++
++config TOUCHSCREEN_MRSTOUCH
++ tristate "Intel Moorstown Resistive touchscreen"
++ depends on LNW_IPC
++ default y
++ help
++ Say Y here if you have a Intel Moorstown based Touchscreen
++ If unsure, say N.
++
+ endif
+diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
+index d61a3b4..15ad257 100644
+--- a/drivers/input/touchscreen/Makefile
++++ b/drivers/input/touchscreen/Makefile
+@@ -45,3 +45,5 @@ obj-$(CONFIG_TOUCHSCREEN_WM97XX_ATMEL) += atmel-wm97xx.o
+ obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o
+ obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o
+ obj-$(CONFIG_TOUCHSCREEN_W90X900) += w90p910_ts.o
++obj-$(CONFIG_TOUCHSCREEN_MRSTOUCH) += mrstouch.o
++
+diff --git a/drivers/input/touchscreen/mrstouch.c b/drivers/input/touchscreen/mrstouch.c
+new file mode 100644
+index 0000000..f6aba7d
+--- /dev/null
++++ b/drivers/input/touchscreen/mrstouch.c
+@@ -0,0 +1,947 @@
++/*
++ * mrstouch.c - Intel Moorestown Resistive Touch Screen Driver
++ *
++ * Copyright (C) 2008 Intel Corp
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; ifnot, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ * Questions/Comments/Bug fixes to Sreedhara (sreedhara.ds@intel.com)
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/input.h>
++#include <linux/interrupt.h>
++#include <linux/err.h>
++#include <linux/param.h>
++#include <linux/spi/spi.h>
++#include <linux/irq.h>
++#include <linux/delay.h>
++#include <linux/kthread.h>
++#include <asm/ipc_defs.h>
++
++MODULE_AUTHOR("Sreedhara Murthy. D.S, sreedhara.ds@intel.com");
++MODULE_DESCRIPTION("Intel Moorestown Resistive Touch Screen Driver");
++MODULE_LICENSE("GPL");
++
++#if defined(MRSTOUCH_DEBUG)
++#define mrstouch_debug(fmt, args...)\
++ do { \
++ printk(KERN_DEBUG "\n[MRSTOUCH(%d)] - ", __LINE__); \
++ printk(KERN_DEBUG fmt, ##args); \
++ } while (0);
++#else
++#define mrstouch_debug(fmt, args...)
++#endif
++
++#define mrstouch_error(fmt, args...)\
++ do { \
++ printk(KERN_ERR "\n[MRSTOUCH(%d)] - ", __LINE__); \
++ printk(KERN_ERR fmt, ##args); \
++ } while (0);
++
++/* PMIC Interrupt registers */
++#define PMIC_REG_ID1 0x00 /*PMIC ID1 register */
++
++/* PMIC Interrupt registers */
++#define PMIC_REG_INT 0x04 /*PMIC interrupt register */
++#define PMIC_REG_MINT 0x05 /*PMIC interrupt mask register */
++
++/* ADC Interrupt registers */
++#define PMIC_REG_ADCINT 0x5F /*ADC interrupt register */
++#define PMIC_REG_MADCINT 0x60 /*ADC interrupt mask register */
++
++/* ADC Control registers */
++#define PMIC_REG_ADCCNTL1 0x61 /*ADC control register */
++
++/* ADC Channel Selection registers */
++#define PMICADDR0 0xA4
++#define END_OF_CHANNEL 0x1F
++
++/* ADC Result register */
++#define PMIC_REG_ADCSNS0H 0x64
++
++/* ADC channels for touch screen */
++#define MRST_TS_CHAN10 0xA /* Touch screen X+ connection */
++#define MRST_TS_CHAN11 0xB /* Touch screen X- connection */
++#define MRST_TS_CHAN12 0xC /* Touch screen Y+ connection */
++#define MRST_TS_CHAN13 0xD /* Touch screen Y- connection */
++
++/* Touch screen coordinate constants */
++#define TOUCH_PRESSURE 5
++#define TOUCH_PRESSURE_FS 100
++
++#define XMOVE_LIMIT 5
++#define YMOVE_LIMIT 5
++#define XYMOVE_CNT 3
++
++#define MAX_10BIT ((1<<10)-1)
++
++/* Touch screen channel BIAS constants */
++#define XBIAS 0x20
++#define YBIAS 0x40
++#define ZBIAS 0x80
++
++/* Touch screen coordinates */
++#define MIN_X 10
++#define MAX_X 1024
++#define MIN_Y 10
++#define MAX_Y 1024
++#define WAIT_ADC_COMPLETION 10
++
++/* PMIC ADC round robin delays */
++#define ADC_LOOP_DELAY0 0x0 /* Continuous loop */
++#define ADC_LOOP_DELAY1 0x1 /* 4.5 ms approximate */
++
++/* PMIC Vendor Identifiers */
++#define PMIC_VENDOR_FS 0 /* PMIC vendor FreeScale */
++#define PMIC_VENDOR_MAXIM 1 /* PMIC vendor MAXIM */
++#define PMIC_VENDOR_NEC 2 /* PMIC vendor NEC */
++#define MRSTOUCH_MAX_CHANNELS 32 /* Maximum ADC channels */
++
++/* Touch screen device structure */
++struct mrstouch_dev {
++ struct spi_device *spi; /* SPI device associated with touch screen */
++ struct input_dev *input; /* input device for touchscreen*/
++ char phys[32]; /* Device name */
++ struct task_struct *pendet_thrd; /* PENDET interrupt handler */
++ struct semaphore lock; /* Sync between interrupt and PENDET handler */
++ bool busy; /* Busy flag */
++ u16 asr; /* Address selection register */
++ int irq; /* Touch screen IRQ # */
++ uint vendor; /* PMIC vendor */
++ uint rev; /* PMIC revision */
++ bool suspended; /* Device suspended status */
++ bool disabled; /* Device disabled status */
++ u16 x; /* X coordinate */
++ u16 y; /* Y coordinate */
++ bool pendown; /* PEN position */
++} ;
++
++
++/* Global Pointer to Touch screen device */
++static struct mrstouch_dev *mrstouchdevp;
++
++/* Utility to read PMIC ID */
++static int mrstouch_pmic_id(uint *vendor, uint *rev)
++{
++ int err;
++ struct ipc_pmic_reg_data ipcbuf;
++
++ ipcbuf.ioc = 1;
++ ipcbuf.num_entries = 1;
++ ipcbuf.pmic_reg_data[0].register_address = PMIC_REG_ID1;
++
++ err = ipc_pmic_register_read(&ipcbuf);
++ if (err)
++ return -1;
++
++ *vendor = (ipcbuf.pmic_reg_data[0].value) & 0x7;
++ *rev = (ipcbuf.pmic_reg_data[0].value >> 3) & 0x7;
++
++ return 0;
++}
++
++/*
++ * Parse ADC channels to find end of the channel configured by other ADC user
++ * NEC and MAXIM requires 4 channels and FreeScale needs 18 channels
++ */
++static int mrstouch_chan_parse(void)
++{
++ int err, i, j, chan, found;
++ struct ipc_pmic_reg_data ipcbuf;
++
++ ipcbuf.ioc = 1;
++
++ found = -1;
++ ipcbuf.num_entries = 4;
++
++ for (i = 0; i < MRSTOUCH_MAX_CHANNELS; i++) {
++ if (found >= 0)
++ break;
++
++ for (j = 0; j <= 3; j++)
++ ipcbuf.pmic_reg_data[j].register_address = PMICADDR0+i;
++
++ err = ipc_pmic_register_read(&ipcbuf);
++ if (err)
++ return -1;
++
++ for (j = 0; j < ipcbuf.num_entries; j++) {
++ chan = ipcbuf.pmic_reg_data[j].value;
++ if (chan == END_OF_CHANNEL) {
++ found = i;
++ break;
++ }
++ }
++ }
++
++ if (found < 0)
++ return 0;
++
++ if (mrstouchdevp->vendor == PMIC_VENDOR_FS) {
++ if (found && found > (MRSTOUCH_MAX_CHANNELS - 18))
++ return -1;
++ } else {
++ if (found && found > (MRSTOUCH_MAX_CHANNELS - 4))
++ return -1;
++ }
++ return found;
++}
++
++/* Utility to enable/disable pendet.
++ * pendet set to true enables PENDET interrupt
++ * pendet set to false disables PENDET interrupt
++ * Also clears RND mask bit
++*/
++static void pendet_enable(bool pendet)
++{
++ u8 adccntrl1 = 0;
++ u8 pendet_enabled = 0;
++ int retry = 0;
++ struct ipc_pmic_reg_data ipcbuf;
++
++ ipcbuf.ioc = 1;
++
++ ipcbuf.num_entries = 1;
++ ipcbuf.pmic_reg_data[0].register_address = PMIC_REG_ADCCNTL1;
++ ipc_pmic_register_read(&ipcbuf);
++ adccntrl1 = ipcbuf.pmic_reg_data[0].value;
++
++ if (pendet)
++ adccntrl1 |= 0x20; /* Enable pendet */
++ else
++ adccntrl1 &= 0xDF; /* Disable pendet */
++
++ ipcbuf.num_entries = 2;
++ ipcbuf.pmic_reg_data[0].register_address = PMIC_REG_MADCINT;
++ ipcbuf.pmic_reg_data[0].value = 0x0;
++ ipcbuf.pmic_reg_data[1].register_address = PMIC_REG_ADCCNTL1;
++ ipcbuf.pmic_reg_data[1].value = adccntrl1;
++ ipc_pmic_register_write(&ipcbuf, 1);
++
++ if (!pendet)
++ return;
++
++
++ /*
++ * Sometimes even after ipc_pmic_register_write success
++ * the PMIC register value is not updated. Retry few iterations
++ * to enable pendet.
++ */
++ ipcbuf.num_entries = 1;
++ ipcbuf.pmic_reg_data[0].register_address = PMIC_REG_ADCCNTL1;
++ ipc_pmic_register_read(&ipcbuf);
++ pendet_enabled = (ipcbuf.pmic_reg_data[0].value >> 5) & 0x01;
++
++ retry = 0;
++ while (!pendet_enabled) {
++ retry++;
++ msleep(10);
++ ipcbuf.pmic_reg_data[0].register_address = PMIC_REG_ADCCNTL1;
++ ipcbuf.pmic_reg_data[0].value = adccntrl1;
++ ipc_pmic_register_write(&ipcbuf, 1);
++
++ ipcbuf.pmic_reg_data[0].register_address = PMIC_REG_ADCCNTL1;
++ ipc_pmic_register_read(&ipcbuf);
++ pendet_enabled = (ipcbuf.pmic_reg_data[0].value >> 5) & 0x01;
++ if (retry >= 10) {
++ printk(KERN_ERR "Touch screen disabled\n");
++ break;
++ }
++ }
++}
++
++
++/* To read PMIC ADC touch screen result
++ * Reads ADC storage registers for higher 7 and lower 3 bits
++ * converts the two readings to single value and turns off gain bit
++ */
++static int mrstouch_ts_chan_read(u16 offset, u16 chan, u16 *vp, u16 *vm)
++{
++ int err, count;
++ u16 result;
++ struct ipc_pmic_reg_data ipcbuf;
++
++ ipcbuf.ioc = 1;
++ ipcbuf.num_entries = 4;
++
++ result = PMIC_REG_ADCSNS0H + offset;
++
++ if (chan == MRST_TS_CHAN12)
++ result += 4;
++
++ for (count = 0; count <= 3; count++)
++ ipcbuf.pmic_reg_data[count].register_address = result++;
++
++ err = ipc_pmic_register_read(&ipcbuf);
++ if (err)
++ return -1;
++
++ *vp = ipcbuf.pmic_reg_data[0].value << 3; /* Higher 7 bits */
++ *vp |= ipcbuf.pmic_reg_data[1].value & 0x7; /* Lower 3 bits */
++ *vp &= 0x3FF;
++
++ *vm = ipcbuf.pmic_reg_data[2].value << 3; /* Higher 7 bits */
++ *vm |= ipcbuf.pmic_reg_data[3].value & 0x7; /* Lower 3 bits */
++ *vm &= 0x3FF;
++
++ return 0;
++}
++
++/* To configure touch screen channels
++ * Writes touch screen channels to ADC address selection registers
++ */
++static int mrstouch_ts_chan_set(uint offset)
++{
++ int err, count;
++ u16 chan;
++ struct ipc_pmic_reg_data ipcbuf;
++
++ ipcbuf.ioc = 1;
++ ipcbuf.num_entries = 5;
++
++ chan = PMICADDR0 + offset;
++ for (count = 0; count <= 3; count++) {
++ ipcbuf.pmic_reg_data[count].register_address = chan++;
++ ipcbuf.pmic_reg_data[count].value = MRST_TS_CHAN10 + count;
++ }
++ ipcbuf.pmic_reg_data[count].register_address = chan;
++ ipcbuf.pmic_reg_data[count].value = END_OF_CHANNEL;
++
++ err = ipc_pmic_register_write(&ipcbuf, 1);
++ if (err)
++ return -1;
++
++ return 0;
++}
++
++/* Initialize ADC */
++static int mrstouch_adc_init(struct mrstouch_dev *tsdev)
++{
++ int err, start;
++ struct ipc_pmic_mod_reg_data ipcbuf;
++
++ err = mrstouch_pmic_id(&tsdev->vendor, &tsdev->rev);
++ if (err) {
++ printk(KERN_ERR "Error in reading PMIC Id");
++ return err;
++ }
++
++ start = mrstouch_chan_parse();
++ if (start == -1) {
++ printk(KERN_ERR "Error in parse channels");
++ return start;
++ }
++
++ tsdev->asr = start;
++
++ mrstouch_debug("Channel offset(%d): 0x%X\n", tsdev->asr, tsdev->vendor);
++
++ /* ADC power on, start, enable PENDET and set loop delay
++ * ADC loop delay is set to 4.5 ms approximately
++ * Loop delay more than this results in jitter in adc readings
++ * Setting loop delay to 0 (continous loop) in MAXIM stops PENDET
++ * interrupt generation sometimes.
++ */
++ ipcbuf.ioc = 1;
++ ipcbuf.num_entries = 2;
++ ipcbuf.pmic_mod_reg_data[0].register_address = PMIC_REG_ADCCNTL1;
++ ipcbuf.pmic_mod_reg_data[0].bit_map = 0xE7;
++
++ ipcbuf.pmic_mod_reg_data[1].register_address = PMIC_REG_MADCINT;
++ ipcbuf.pmic_mod_reg_data[1].bit_map = 0x03;
++
++ if (tsdev->vendor == PMIC_VENDOR_FS) {
++ ipcbuf.pmic_mod_reg_data[0].value = 0xE0 | ADC_LOOP_DELAY0;
++ ipcbuf.pmic_mod_reg_data[1].value = 0x5;
++ } else {
++ /* NEC and MAXIm not consistent with loop delay 0 */
++ ipcbuf.pmic_mod_reg_data[0].value = 0xE0 | ADC_LOOP_DELAY1;
++ ipcbuf.pmic_mod_reg_data[1].value = 0x0;
++
++ /* configure touch screen channels */
++ err = mrstouch_ts_chan_set(tsdev->asr);
++ if (err)
++ return err;
++ }
++
++ err = ipc_pmic_register_read_modify(&ipcbuf);
++
++ return err;
++}
++
++/* Reports x,y coordinates to event subsystem */
++static void mrstouch_report_xy(u16 x, u16 y, u16 z)
++{
++ int xdiff, ydiff;
++
++ if (mrstouchdevp->pendown && z <= TOUCH_PRESSURE) {
++ /* Pen removed, report button release */
++ mrstouch_debug("BTN REL(%d)", z);
++ input_report_key(mrstouchdevp->input, BTN_TOUCH, 0);
++ mrstouchdevp->pendown = false;
++ }
++
++ xdiff = abs(x - mrstouchdevp->x);
++ ydiff = abs(y - mrstouchdevp->y);
++
++ /*
++ if x and y values changes for XYMOVE_CNT readings it is considered
++ as stylus is moving. This is required to differentiate between stylus
++ movement and jitter
++ */
++ if (x < MIN_X || x > MAX_X || y < MIN_Y || y > MAX_Y) {
++ /* Spurious values, release button if touched and return */
++ if (mrstouchdevp->pendown) {
++ mrstouch_debug("BTN REL(%d)", z);
++ input_report_key(mrstouchdevp->input, BTN_TOUCH, 0);
++ mrstouchdevp->pendown = false;
++ }
++ return;
++ } else if (xdiff >= XMOVE_LIMIT || ydiff >= YMOVE_LIMIT) {
++ mrstouchdevp->x = x;
++ mrstouchdevp->y = y;
++
++ input_report_abs(mrstouchdevp->input, ABS_X, x);
++ input_report_abs(mrstouchdevp->input, ABS_Y, y);
++ input_sync(mrstouchdevp->input);
++ }
++
++
++ if (!mrstouchdevp->pendown && z > TOUCH_PRESSURE) {
++ /* Pen touched, report button touch */
++ mrstouch_debug("BTN TCH(%d, %d, %d)", x, y, z);
++ input_report_key(mrstouchdevp->input, BTN_TOUCH, 1);
++ mrstouchdevp->pendown = true;
++ }
++}
++
++
++/* Utility to start ADC, used by freescale handler */
++static int pendet_mask(void)
++{
++ int err = 0;
++ struct ipc_pmic_mod_reg_data ipcbuf;
++
++ ipcbuf.ioc = 1;
++ ipcbuf.num_entries = 1;
++ ipcbuf.pmic_mod_reg_data[1].register_address = PMIC_REG_MADCINT;
++ ipcbuf.pmic_mod_reg_data[1].bit_map = 0x02;
++ ipcbuf.pmic_mod_reg_data[1].value = 0x01;
++
++ err = ipc_pmic_register_read_modify(&ipcbuf);
++
++ return err;
++}
++
++/* Utility to stop ADC, used by freescale handler */
++static int pendet_umask(void)
++{
++ int err = 0;
++ struct ipc_pmic_mod_reg_data ipcbuf;
++
++ ipcbuf.ioc = 1;
++ ipcbuf.num_entries = 1;
++ ipcbuf.pmic_mod_reg_data[1].register_address = PMIC_REG_MADCINT;
++ ipcbuf.pmic_mod_reg_data[1].bit_map = 0x02;
++ ipcbuf.pmic_mod_reg_data[1].value = 0x0;
++
++ err = ipc_pmic_register_read_modify(&ipcbuf);
++
++ return err;
++}
++
++/* Utility to read ADC, used by freescale handler */
++static int mrstouch_pmic_fs_adc_read(struct mrstouch_dev *tsdev)
++{
++ int err;
++ u16 x, y, z, result;
++ struct ipc_pmic_reg_data ipcbuf;
++
++ result = PMIC_REG_ADCSNS0H + tsdev->asr;
++
++ ipcbuf.ioc = 1;
++ ipcbuf.num_entries = 4;
++ ipcbuf.pmic_reg_data[0].register_address = result + 4;
++ ipcbuf.pmic_reg_data[1].register_address = result + 5;
++ ipcbuf.pmic_reg_data[2].register_address = result + 16;
++ ipcbuf.pmic_reg_data[3].register_address = result + 17;
++
++ err = ipc_pmic_register_read(&ipcbuf);
++ if (err)
++ goto ipc_error;
++
++ x = ipcbuf.pmic_reg_data[0].value << 3; /* Higher 7 bits */
++ x |= ipcbuf.pmic_reg_data[1].value & 0x7; /* Lower 3 bits */
++ x &= 0x3FF;
++
++ y = ipcbuf.pmic_reg_data[2].value << 3; /* Higher 7 bits */
++ y |= ipcbuf.pmic_reg_data[3].value & 0x7; /* Lower 3 bits */
++ y &= 0x3FF;
++
++ /* Read Z value */
++ ipcbuf.num_entries = 2;
++ ipcbuf.pmic_reg_data[0].register_address = result + 28;
++ ipcbuf.pmic_reg_data[1].register_address = result + 29;
++
++ err = ipc_pmic_register_read(&ipcbuf);
++ if (err)
++ goto ipc_error;
++
++ z = ipcbuf.pmic_reg_data[0].value << 3; /* Higher 7 bits */
++ z |= ipcbuf.pmic_reg_data[1].value & 0x7; /* Lower 3 bits */
++ z &= 0x3FF;
++
++#if defined(MRSTOUCH_PRINT_XYZP)
++ mrstouch_debug("X: %d, Y: %d, Z: %d", x, y, z);
++#endif
++
++ if (z >= TOUCH_PRESSURE_FS) {
++ mrstouch_report_xy(x, y, TOUCH_PRESSURE - 1); /* Pen Removed */
++ return TOUCH_PRESSURE - 1;
++ } else {
++ mrstouch_report_xy(x, y, TOUCH_PRESSURE + 1); /* Pen Touched */
++ return TOUCH_PRESSURE + 1;
++ }
++
++ return 0;
++
++ipc_error:
++ printk(KERN_ERR "IPC Error: %s", __func__);
++ return -1;
++}
++
++/* To handle free scale pmic pendet interrupt */
++static int pmic0_pendet(void *data)
++{
++ int err, count;
++ u16 chan;
++ unsigned int touched;
++ struct ipc_pmic_reg_data ipcbuf;
++ struct mrstouch_dev *tsdev = (struct mrstouch_dev *)data;
++
++ chan = PMICADDR0 + tsdev->asr;
++
++ ipcbuf.ioc = 1;
++ /* Set X BIAS */
++ ipcbuf.num_entries = 5;
++ for (count = 0; count <= 3; count++) {
++ ipcbuf.pmic_reg_data[count].register_address = chan++;
++ ipcbuf.pmic_reg_data[count].value = 0x2A;
++ }
++ ipcbuf.pmic_reg_data[count].register_address = chan++; /* Dummy */
++ ipcbuf.pmic_reg_data[count].value = 0;
++
++ err = ipc_pmic_register_write(&ipcbuf, 1);
++ if (err)
++ goto ipc_error;
++
++ msleep(WAIT_ADC_COMPLETION);
++
++ /* Set Y BIAS */
++ ipcbuf.num_entries = 5;
++ for (count = 0; count <= 3; count++) {
++ ipcbuf.pmic_reg_data[count].register_address = chan++;
++ ipcbuf.pmic_reg_data[count].value = 0x4A;
++ }
++ ipcbuf.pmic_reg_data[count].register_address = chan++; /* Dummy */
++ ipcbuf.pmic_reg_data[count].value = 0;
++
++ err = ipc_pmic_register_write(&ipcbuf, 1);
++ if (err)
++ goto ipc_error;
++
++ msleep(WAIT_ADC_COMPLETION);
++
++ /* Set Z BIAS */
++ chan += 2;
++ ipcbuf.num_entries = 4;
++ for (count = 0; count <= 3; count++) {
++ ipcbuf.pmic_reg_data[count].register_address = chan++;
++ ipcbuf.pmic_reg_data[count].value = 0x8A;
++ }
++
++ err = ipc_pmic_register_write(&ipcbuf, 1);
++ if (err)
++ goto ipc_error;
++
++ msleep(WAIT_ADC_COMPLETION);
++
++ /*Read touch screen channels till pen removed
++ * Freescale reports constant value of z for all points
++ * z is high when screen is not touched and low when touched
++ * Map high z value to not touched and low z value to pen touched
++ */
++ touched = mrstouch_pmic_fs_adc_read(tsdev);
++ while (touched > TOUCH_PRESSURE) {
++ touched = mrstouch_pmic_fs_adc_read(tsdev);
++ msleep(WAIT_ADC_COMPLETION);
++ }
++
++ /* Clear all TS channels */
++ chan = PMICADDR0 + tsdev->asr;
++ ipcbuf.ioc = 1;
++ ipcbuf.num_entries = 5;
++ for (count = 0; count <= 4; count++) {
++ ipcbuf.pmic_reg_data[count].register_address = chan++;
++ ipcbuf.pmic_reg_data[count].value = 0x0;
++ }
++ err = ipc_pmic_register_write(&ipcbuf, 1);
++ if (err)
++ goto ipc_error;
++
++ for (count = 0; count <= 4; count++) {
++ ipcbuf.pmic_reg_data[count].register_address = chan++;
++ ipcbuf.pmic_reg_data[count].value = 0x0;
++ }
++ err = ipc_pmic_register_write(&ipcbuf, 1);
++ if (err)
++ goto ipc_error;
++
++ chan += 2;
++ for (count = 0; count <= 4; count++) {
++ ipcbuf.pmic_reg_data[count].register_address = chan++;
++ ipcbuf.pmic_reg_data[count].value = 0x0;
++ }
++ err = ipc_pmic_register_write(&ipcbuf, 1);
++ if (err)
++ goto ipc_error;
++
++ return 0;
++
++ipc_error:
++ printk(KERN_ERR "IPC Error: %s", __func__);
++ return -1;
++}
++
++
++/* To enable X, Y and Z bias values
++ * Enables YPYM for X channels and XPXM for Y channels
++ */
++static int mrstouch_ts_bias_set(uint offset, uint bias)
++{
++ int err, count;
++ u16 chan, start;
++ struct ipc_pmic_reg_data ipcbuf;
++
++ chan = PMICADDR0 + offset;
++ start = MRST_TS_CHAN10;
++
++ ipcbuf.ioc = 1;
++ ipcbuf.num_entries = 4;
++
++ for (count = 0; count <= 3; count++) {
++ ipcbuf.pmic_reg_data[count].register_address = chan++;
++ ipcbuf.pmic_reg_data[count].value = bias | (start + count);
++ }
++
++ err = ipc_pmic_register_write(&ipcbuf, 1);
++ if (err)
++ return -1;
++
++ return 0;
++}
++
++/* To read touch screen channel values */
++static int mrstouch_adc_read(struct mrstouch_dev *tsdev)
++{
++ int err;
++ u16 xp, xm, yp, ym, zp, zm;
++
++ /* configure Y bias for X channels */
++ err = mrstouch_ts_bias_set(tsdev->asr, YBIAS);
++ if (err)
++ goto ipc_error;
++
++ msleep(WAIT_ADC_COMPLETION);
++
++ /* read x+ and x- channels */
++ err = mrstouch_ts_chan_read(tsdev->asr, MRST_TS_CHAN10, &xp, &xm);
++ if (err)
++ goto ipc_error;
++
++ /* configure x bias for y channels */
++ err = mrstouch_ts_bias_set(tsdev->asr, XBIAS);
++ if (err)
++ goto ipc_error;
++
++ msleep(WAIT_ADC_COMPLETION);
++
++ /* read y+ and y- channels */
++ err = mrstouch_ts_chan_read(tsdev->asr, MRST_TS_CHAN12, &yp, &ym);
++ if (err)
++ goto ipc_error;
++
++ /* configure z bias for x and y channels */
++ err = mrstouch_ts_bias_set(tsdev->asr, ZBIAS);
++ if (err)
++ goto ipc_error;
++
++ msleep(WAIT_ADC_COMPLETION);
++
++ /* read z+ and z- channels */
++ err = mrstouch_ts_chan_read(tsdev->asr, MRST_TS_CHAN10, &zp, &zm);
++ if (err)
++ goto ipc_error;
++
++#if defined(MRSTOUCH_PRINT_XYZP)
++ printk(KERN_INFO "X+: %d, Y+: %d, Z+: %d\n", xp, yp, zp);
++#endif
++
++#if defined(MRSTOUCH_PRINT_XYZM)
++ printk(KERN_INFO "X-: %d, Y-: %d, Z-: %d\n", xm, ym, zm);
++#endif
++
++ mrstouch_report_xy(xp, yp, zp); /* report x and y to eventX */
++
++ return zp;
++
++ipc_error:
++ printk(KERN_ERR "IPC Error: %s", __func__);
++ return -1;
++}
++
++/* PENDET interrupt handler function for NEC and MAXIM */
++static void pmic12_pendet(void *data)
++{
++ unsigned int touched;
++ struct mrstouch_dev *tsdev = (struct mrstouch_dev *)data;
++
++ /* read touch screen channels till pen removed */
++ touched = mrstouch_adc_read(tsdev);
++ while (touched > TOUCH_PRESSURE) {
++ msleep(WAIT_ADC_COMPLETION);
++ touched = mrstouch_adc_read(tsdev);
++ }
++}
++
++/* Handler to process PENDET interrupt */
++int mrstouch_pendet(void *data)
++{
++ struct mrstouch_dev *tsdev = (struct mrstouch_dev *)data;
++ while (1) {
++ /* Wait for PENDET interrupt */
++ if (down_interruptible(&tsdev->lock)) {
++ msleep(WAIT_ADC_COMPLETION);
++ continue;
++ }
++
++ if (tsdev->busy)
++ return 0;
++
++ tsdev->busy = true;
++
++ if (mrstouchdevp->vendor == PMIC_VENDOR_NEC ||
++ mrstouchdevp->vendor == PMIC_VENDOR_MAXIM) {
++ /* PENDET must be disabled in NEC before reading ADC */
++ pendet_enable(false); /* Disbale PENDET */
++ pmic12_pendet(mrstouchdevp);
++ pendet_enable(true); /*Enable PENDET */
++ } else if (mrstouchdevp->vendor == PMIC_VENDOR_FS) {
++ pendet_umask(); /* Stop ADC */
++ pmic0_pendet(mrstouchdevp);
++ pendet_mask(); /* Stop ADC */
++ } else
++ printk(KERN_ERR "Unknown PMIC, Not supported\n");
++
++ tsdev->busy = false;
++
++ }
++ return 0;
++}
++
++/* PENDET interrupt handler */
++static irqreturn_t pendet_intr_handler(int irq, void *handle)
++{
++ struct mrstouch_dev *tsdev = (struct mrstouch_dev *)handle;
++
++ up(&tsdev->lock);
++ return IRQ_HANDLED;
++}
++
++/* Intializes input device and registers with input subsystem */
++static int ts_input_dev_init(struct mrstouch_dev *tsdev, struct spi_device *spi)
++{
++ int err = 0;
++
++ mrstouch_debug("%s", __func__);
++
++ tsdev->input = input_allocate_device();
++ if (!tsdev->input) {
++ mrstouch_error("%s", "Input dev allocation failed");
++ return -1;
++ }
++
++ tsdev->input->name = "mrst_touchscreen";
++ snprintf(tsdev->phys, sizeof(tsdev->phys),
++ "%s/input0", dev_name(&spi->dev));
++ tsdev->input->phys = tsdev->phys;
++ tsdev->input->dev.parent = &spi->dev;
++
++ tsdev->input->id.vendor = tsdev->vendor;
++ tsdev->input->id.version = tsdev->rev;
++
++ tsdev->input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
++ tsdev->input->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
++
++ input_set_abs_params(tsdev->input, ABS_X, MIN_X, MIN_Y, 0, 0);
++ input_set_abs_params(tsdev->input, ABS_Y, MIN_X, MIN_Y, 0, 0);
++
++ err = input_register_device(tsdev->input);
++ if (err) {
++ mrstouch_error("%s", "Input dev registration failed");
++ input_free_device(tsdev->input);
++ return -1;
++ }
++
++ mrstouch_debug("%s", "mrstouch initialized");
++
++ return 0;
++
++}
++
++/* Probe function for touch screen driver */
++static int __devinit mrstouch_probe(struct spi_device *mrstouch_spi)
++{
++ int err;
++ unsigned int myirq;
++ struct mrstouch_dev *tsdev;
++
++ mrstouch_debug("%s(%p)", __func__, mrstouch_spi);
++
++ mrstouchdevp = NULL;
++ myirq = mrstouch_spi->irq;
++
++ if (!mrstouch_spi->irq) {
++ mrstouch_error("%s(%d)", "No IRQ", myirq);
++ return -1;
++ }
++
++ tsdev = kzalloc(sizeof(struct mrstouch_dev), GFP_KERNEL);
++ if (!tsdev) {
++ mrstouch_error("%s", "ERROR: Memory failure");
++ return -ENOMEM;
++ }
++
++ tsdev->irq = myirq;
++ mrstouchdevp = tsdev;
++
++ if (mrstouch_adc_init(tsdev) != 0) {
++ mrstouch_error("%s", "ADC init failed");
++ goto mrstouch_err_free_mem;
++ }
++
++ dev_set_drvdata(&mrstouch_spi->dev, tsdev);
++ tsdev->spi = mrstouch_spi;
++
++ err = ts_input_dev_init(tsdev, mrstouch_spi);
++ if (err != 0) {
++ mrstouch_error("%s", "ts_input_dev_init failed");
++ goto mrstouch_err_free_mem;
++ }
++
++ sema_init(&tsdev->lock, 1);
++ if (down_interruptible(&tsdev->lock)) {
++ mrstouch_error("%s", "tsdev->lock Error");
++ goto mrstouch_err_free_mem;
++ }
++
++ mrstouch_debug("Requesting IRQ-%d", myirq);
++ err = request_irq(myirq, pendet_intr_handler,
++ 0, "mrstouch", tsdev);
++ if (err) {
++ mrstouch_error("IRQ Request Failed - %d", err);
++ goto mrstouch_err_free_mem;
++ }
++
++ tsdev->pendet_thrd = kthread_run(mrstouch_pendet,
++ (void *)tsdev, "pendet handler");
++ if (IS_ERR(tsdev->pendet_thrd)) {
++ dev_err(&tsdev->spi->dev, "kthread_run failed \n");
++ goto mrstouch_err_free_mem;
++ }
++
++ mrstouch_debug("%s", "Driver initialized");
++
++ return 0;
++
++ mrstouch_err_free_mem:
++ kfree(tsdev);
++ return -1;
++}
++
++static int mrstouch_suspend(struct spi_device *spi, pm_message_t msg)
++{
++ mrstouch_debug("%s", __func__);
++ mrstouchdevp->suspended = 1;
++ return 0;
++}
++
++static int mrstouch_resume(struct spi_device *spi)
++{
++ mrstouch_debug("%s", __func__);
++ mrstouchdevp->suspended = 0;
++ return 0;
++}
++
++static int mrstouch_remove(struct spi_device *spi)
++{
++ mrstouch_debug("%s", __func__);
++ free_irq(mrstouchdevp->irq, mrstouchdevp);
++ input_unregister_device(mrstouchdevp->input);
++ input_free_device(mrstouchdevp->input);
++ kfree(mrstouchdevp);
++ if (mrstouchdevp->pendet_thrd)
++ kthread_stop(mrstouchdevp->pendet_thrd);
++ return 0;
++}
++
++static struct spi_driver mrstouch_driver = {
++ .driver = {
++ .name = "pmic_touch",
++ .bus = &spi_bus_type,
++ .owner = THIS_MODULE,
++ },
++ .probe = mrstouch_probe,
++ .suspend = mrstouch_suspend,
++ .resume = mrstouch_resume,
++ .remove = mrstouch_remove,
++};
++
++static int __init mrstouch_module_init(void)
++{
++ int err;
++
++ mrstouch_debug("%s", __func__);
++ err = spi_register_driver(&mrstouch_driver);
++ if (err) {
++ mrstouch_debug("%s(%d)", "SPI PENDET failed", err);
++ return -1;
++ }
++
++ return 0;
++}
++
++static void __exit mrstouch_module_exit(void)
++{
++ mrstouch_debug("%s", __func__);
++ spi_unregister_driver(&mrstouch_driver);
++ return;
++}
++
++module_init(mrstouch_module_init);
++module_exit(mrstouch_module_exit);
+--
+1.6.2.5
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-usb-otg-and-still-image-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-usb-otg-and-still-image-driver.patch
new file mode 100644
index 0000000..40eecf6
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-moorestown-usb-otg-and-still-image-driver.patch
@@ -0,0 +1,8395 @@
+From f12c49e8bf1ac056946bc3098c6c361d51891916 Mon Sep 17 00:00:00 2001
+From: Henry Yuan <hang.yuan@intel.com>
+Date: Thu, 6 May 2010 19:30:00 +0800
+Subject: [PATCH] Moorestown USB-OTG drivers full patch 0.2 for MeeGo
+
+This is a consolidated full patch against K2.6.33. It
+includes USB-OTG client controller driver, transceiver
+driver, still image gadget driver and fixing for sighting
+3469616: OTG driver hangs in suspend function.
+
+OTG host, client functions and role switch per cable
+plugged are tested.
+Known issue: HNP/SRP have problem.
+
+Kernel config:
+CONFIG_USB_LANGWELL_OTG = y
+CONFIG_USB_OTG_WHITELIST = n
+CONFIG_USB_GADGET = y
+CONFIG_USB_GADGET_LANGWELL = y
+
+CONFIG_USB_STILL_IMAGE = y
+or select other gadget driver as needed.
+
+Signed-off-by: Henry Yuan <hang.yuan@intel.com>
+Patch-mainline: 2.6.34
+---
+ drivers/usb/gadget/Kconfig | 8 +
+ drivers/usb/gadget/Makefile | 2 +
+ drivers/usb/gadget/f_ecm.c | 22 +
+ drivers/usb/gadget/f_subset.c | 22 +
+ drivers/usb/gadget/langwell_udc.c | 582 ++++--
+ drivers/usb/gadget/langwell_udc.h | 13 +-
+ drivers/usb/gadget/still_image.c | 4566 +++++++++++++++++++++++++++++++++++++
+ drivers/usb/otg/Kconfig | 14 +
+ drivers/usb/otg/Makefile | 1 +
+ drivers/usb/otg/langwell_otg.c | 2260 ++++++++++++++++++
+ include/linux/usb/langwell_otg.h | 201 ++
+ include/linux/usb/langwell_udc.h | 13 +
+ 12 files changed, 7516 insertions(+), 188 deletions(-)
+ create mode 100644 drivers/usb/gadget/still_image.c
+ create mode 100644 drivers/usb/otg/langwell_otg.c
+ create mode 100644 include/linux/usb/langwell_otg.h
+
+diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
+index ee41120..94cc94f 100644
+--- a/drivers/usb/gadget/Kconfig
++++ b/drivers/usb/gadget/Kconfig
+@@ -853,6 +853,14 @@ config USB_G_MULTI_CDC
+
+ If unsure, say "y".
+
++config USB_STILL_IMAGE
++ tristate "Lite Still Image Gadget"
++ help
++ The Lite Still Image Gadget implements object transfer based on
++ spec PIMA 15740:2000.
++
++ Say "y" to link the driver statically, or "m" to build a dynamically
++ linked module called "g_still_image".
+
+ # put drivers that need isochronous transfer support (for audio
+ # or video class gadget drivers), or specific hardware, here.
+diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
+index 2e2c047..7ef974e 100644
+--- a/drivers/usb/gadget/Makefile
++++ b/drivers/usb/gadget/Makefile
+@@ -43,6 +43,7 @@ g_mass_storage-objs := mass_storage.o
+ g_printer-objs := printer.o
+ g_cdc-objs := cdc2.o
+ g_multi-objs := multi.o
++g_still_image-objs := still_image.o
+
+ obj-$(CONFIG_USB_ZERO) += g_zero.o
+ obj-$(CONFIG_USB_AUDIO) += g_audio.o
+@@ -55,4 +56,5 @@ obj-$(CONFIG_USB_G_PRINTER) += g_printer.o
+ obj-$(CONFIG_USB_MIDI_GADGET) += g_midi.o
+ obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc.o
+ obj-$(CONFIG_USB_G_MULTI) += g_multi.o
++obj-$(CONFIG_USB_STILL_IMAGE) += g_still_image.o
+
+diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
+index ecf5bdd..d004328 100644
+--- a/drivers/usb/gadget/f_ecm.c
++++ b/drivers/usb/gadget/f_ecm.c
+@@ -753,6 +753,26 @@ ecm_unbind(struct usb_configuration *c, struct usb_function *f)
+ kfree(ecm);
+ }
+
++static void
++ecm_suspend(struct usb_function *f)
++{
++ struct f_ecm *ecm = func_to_ecm(f);
++ struct eth_dev *dev = ecm->port.ioport;
++
++ if (dev)
++ gether_disconnect(&ecm->port);
++}
++
++static void
++ecm_resume(struct usb_function *f)
++{
++ struct f_ecm *ecm = func_to_ecm(f);
++ struct eth_dev *dev = ecm->port.ioport;
++
++ if (!dev)
++ gether_connect(&ecm->port);
++}
++
+ /**
+ * ecm_bind_config - add CDC Ethernet network link to a configuration
+ * @c: the configuration to support the network link
+@@ -821,6 +841,8 @@ int __init ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+ ecm->port.func.get_alt = ecm_get_alt;
+ ecm->port.func.setup = ecm_setup;
+ ecm->port.func.disable = ecm_disable;
++ ecm->port.func.suspend = ecm_suspend;
++ ecm->port.func.resume = ecm_resume;
+
+ status = usb_add_function(c, &ecm->port.func);
+ if (status) {
+diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
+index a9c98fd..893816d 100644
+--- a/drivers/usb/gadget/f_subset.c
++++ b/drivers/usb/gadget/f_subset.c
+@@ -353,6 +353,26 @@ geth_unbind(struct usb_configuration *c, struct usb_function *f)
+ kfree(func_to_geth(f));
+ }
+
++static void
++geth_suspend(struct usb_function *f)
++{
++ struct f_gether *geth = func_to_geth(f);
++ struct eth_dev *dev = geth->port.ioport;
++
++ if (dev)
++ gether_disconnect(&geth->port);
++}
++
++static void
++geth_resume(struct usb_function *f)
++{
++ struct f_gether *geth = func_to_geth(f);
++ struct eth_dev *dev = geth->port.ioport;
++
++ if (!dev)
++ gether_connect(&geth->port);
++}
++
+ /**
+ * geth_bind_config - add CDC Subset network link to a configuration
+ * @c: the configuration to support the network link
+@@ -411,6 +431,8 @@ int __init geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+ geth->port.func.unbind = geth_unbind;
+ geth->port.func.set_alt = geth_set_alt;
+ geth->port.func.disable = geth_disable;
++ geth->port.func.resume = geth_resume;
++ geth->port.func.suspend = geth_suspend;
+
+ status = usb_add_function(c, &geth->port.func);
+ if (status) {
+diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c
+index a391351..eb0e185 100644
+--- a/drivers/usb/gadget/langwell_udc.c
++++ b/drivers/usb/gadget/langwell_udc.c
+@@ -54,7 +54,7 @@
+
+
+ #define DRIVER_DESC "Intel Langwell USB Device Controller driver"
+-#define DRIVER_VERSION "16 May 2009"
++#define DRIVER_VERSION "Apr 30, 2010"
+
+ static const char driver_name[] = "langwell_udc";
+ static const char driver_desc[] = DRIVER_DESC;
+@@ -73,7 +73,6 @@ langwell_ep0_desc = {
+ .wMaxPacketSize = EP0_MAX_PKT_SIZE,
+ };
+
+-
+ /*-------------------------------------------------------------------------*/
+ /* debugging */
+
+@@ -114,104 +113,76 @@ static inline void print_all_registers(struct langwell_udc *dev)
+ int i;
+
+ /* Capability Registers */
+- printk(KERN_DEBUG "Capability Registers (offset: "
+- "0x%04x, length: 0x%08x)\n",
+- CAP_REG_OFFSET,
+- (u32)sizeof(struct langwell_cap_regs));
+- printk(KERN_DEBUG "caplength=0x%02x\n",
+- readb(&dev->cap_regs->caplength));
+- printk(KERN_DEBUG "hciversion=0x%04x\n",
+- readw(&dev->cap_regs->hciversion));
+- printk(KERN_DEBUG "hcsparams=0x%08x\n",
+- readl(&dev->cap_regs->hcsparams));
+- printk(KERN_DEBUG "hccparams=0x%08x\n",
+- readl(&dev->cap_regs->hccparams));
+- printk(KERN_DEBUG "dciversion=0x%04x\n",
+- readw(&dev->cap_regs->dciversion));
+- printk(KERN_DEBUG "dccparams=0x%08x\n",
+- readl(&dev->cap_regs->dccparams));
++ DBG(dev, "Capability Registers (offset: 0x%04x, length: 0x%08x)\n",
++ CAP_REG_OFFSET, (u32)sizeof(struct langwell_cap_regs));
++ DBG(dev, "caplength=0x%02x\n", readb(&dev->cap_regs->caplength));
++ DBG(dev, "hciversion=0x%04x\n", readw(&dev->cap_regs->hciversion));
++ DBG(dev, "hcsparams=0x%08x\n", readl(&dev->cap_regs->hcsparams));
++ DBG(dev, "hccparams=0x%08x\n", readl(&dev->cap_regs->hccparams));
++ DBG(dev, "dciversion=0x%04x\n", readw(&dev->cap_regs->dciversion));
++ DBG(dev, "dccparams=0x%08x\n", readl(&dev->cap_regs->dccparams));
+
+ /* Operational Registers */
+- printk(KERN_DEBUG "Operational Registers (offset: "
+- "0x%04x, length: 0x%08x)\n",
+- OP_REG_OFFSET,
+- (u32)sizeof(struct langwell_op_regs));
+- printk(KERN_DEBUG "extsts=0x%08x\n",
+- readl(&dev->op_regs->extsts));
+- printk(KERN_DEBUG "extintr=0x%08x\n",
+- readl(&dev->op_regs->extintr));
+- printk(KERN_DEBUG "usbcmd=0x%08x\n",
+- readl(&dev->op_regs->usbcmd));
+- printk(KERN_DEBUG "usbsts=0x%08x\n",
+- readl(&dev->op_regs->usbsts));
+- printk(KERN_DEBUG "usbintr=0x%08x\n",
+- readl(&dev->op_regs->usbintr));
+- printk(KERN_DEBUG "frindex=0x%08x\n",
+- readl(&dev->op_regs->frindex));
+- printk(KERN_DEBUG "ctrldssegment=0x%08x\n",
++ DBG(dev, "Operational Registers (offset: 0x%04x, length: 0x%08x)\n",
++ OP_REG_OFFSET, (u32)sizeof(struct langwell_op_regs));
++ DBG(dev, "extsts=0x%08x\n", readl(&dev->op_regs->extsts));
++ DBG(dev, "extintr=0x%08x\n", readl(&dev->op_regs->extintr));
++ DBG(dev, "usbcmd=0x%08x\n", readl(&dev->op_regs->usbcmd));
++ DBG(dev, "usbsts=0x%08x\n", readl(&dev->op_regs->usbsts));
++ DBG(dev, "usbintr=0x%08x\n", readl(&dev->op_regs->usbintr));
++ DBG(dev, "frindex=0x%08x\n", readl(&dev->op_regs->frindex));
++ DBG(dev, "ctrldssegment=0x%08x\n",
+ readl(&dev->op_regs->ctrldssegment));
+- printk(KERN_DEBUG "deviceaddr=0x%08x\n",
+- readl(&dev->op_regs->deviceaddr));
+- printk(KERN_DEBUG "endpointlistaddr=0x%08x\n",
++ DBG(dev, "deviceaddr=0x%08x\n", readl(&dev->op_regs->deviceaddr));
++ DBG(dev, "endpointlistaddr=0x%08x\n",
+ readl(&dev->op_regs->endpointlistaddr));
+- printk(KERN_DEBUG "ttctrl=0x%08x\n",
+- readl(&dev->op_regs->ttctrl));
+- printk(KERN_DEBUG "burstsize=0x%08x\n",
+- readl(&dev->op_regs->burstsize));
+- printk(KERN_DEBUG "txfilltuning=0x%08x\n",
+- readl(&dev->op_regs->txfilltuning));
+- printk(KERN_DEBUG "txttfilltuning=0x%08x\n",
++ DBG(dev, "ttctrl=0x%08x\n", readl(&dev->op_regs->ttctrl));
++ DBG(dev, "burstsize=0x%08x\n", readl(&dev->op_regs->burstsize));
++ DBG(dev, "txfilltuning=0x%08x\n", readl(&dev->op_regs->txfilltuning));
++ DBG(dev, "txttfilltuning=0x%08x\n",
+ readl(&dev->op_regs->txttfilltuning));
+- printk(KERN_DEBUG "ic_usb=0x%08x\n",
+- readl(&dev->op_regs->ic_usb));
+- printk(KERN_DEBUG "ulpi_viewport=0x%08x\n",
++ DBG(dev, "ic_usb=0x%08x\n", readl(&dev->op_regs->ic_usb));
++ DBG(dev, "ulpi_viewport=0x%08x\n",
+ readl(&dev->op_regs->ulpi_viewport));
+- printk(KERN_DEBUG "configflag=0x%08x\n",
+- readl(&dev->op_regs->configflag));
+- printk(KERN_DEBUG "portsc1=0x%08x\n",
+- readl(&dev->op_regs->portsc1));
+- printk(KERN_DEBUG "devlc=0x%08x\n",
+- readl(&dev->op_regs->devlc));
+- printk(KERN_DEBUG "otgsc=0x%08x\n",
+- readl(&dev->op_regs->otgsc));
+- printk(KERN_DEBUG "usbmode=0x%08x\n",
+- readl(&dev->op_regs->usbmode));
+- printk(KERN_DEBUG "endptnak=0x%08x\n",
+- readl(&dev->op_regs->endptnak));
+- printk(KERN_DEBUG "endptnaken=0x%08x\n",
+- readl(&dev->op_regs->endptnaken));
+- printk(KERN_DEBUG "endptsetupstat=0x%08x\n",
++ DBG(dev, "configflag=0x%08x\n", readl(&dev->op_regs->configflag));
++ DBG(dev, "portsc1=0x%08x\n", readl(&dev->op_regs->portsc1));
++ DBG(dev, "devlc=0x%08x\n", readl(&dev->op_regs->devlc));
++ DBG(dev, "otgsc=0x%08x\n", readl(&dev->op_regs->otgsc));
++ DBG(dev, "usbmode=0x%08x\n", readl(&dev->op_regs->usbmode));
++ DBG(dev, "endptnak=0x%08x\n", readl(&dev->op_regs->endptnak));
++ DBG(dev, "endptnaken=0x%08x\n", readl(&dev->op_regs->endptnaken));
++ DBG(dev, "endptsetupstat=0x%08x\n",
+ readl(&dev->op_regs->endptsetupstat));
+- printk(KERN_DEBUG "endptprime=0x%08x\n",
+- readl(&dev->op_regs->endptprime));
+- printk(KERN_DEBUG "endptflush=0x%08x\n",
+- readl(&dev->op_regs->endptflush));
+- printk(KERN_DEBUG "endptstat=0x%08x\n",
+- readl(&dev->op_regs->endptstat));
+- printk(KERN_DEBUG "endptcomplete=0x%08x\n",
++ DBG(dev, "endptprime=0x%08x\n", readl(&dev->op_regs->endptprime));
++ DBG(dev, "endptflush=0x%08x\n", readl(&dev->op_regs->endptflush));
++ DBG(dev, "endptstat=0x%08x\n", readl(&dev->op_regs->endptstat));
++ DBG(dev, "endptcomplete=0x%08x\n",
+ readl(&dev->op_regs->endptcomplete));
+
+ for (i = 0; i < dev->ep_max / 2; i++) {
+- printk(KERN_DEBUG "endptctrl[%d]=0x%08x\n",
++ DBG(dev, "endptctrl[%d]=0x%08x\n",
+ i, readl(&dev->op_regs->endptctrl[i]));
+ }
+ }
++#else
++
++#define print_all_registers(dev) do { } while (0)
++
+ #endif /* VERBOSE */
+
+
+ /*-------------------------------------------------------------------------*/
+
+-#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
++#define is_in(ep) (((ep)->ep_num == 0) ? ((ep)->dev->ep0_dir == \
++ USB_DIR_IN) : (usb_endpoint_dir_in((ep)->desc)))
+
+-#define is_in(ep) (((ep)->ep_num == 0) ? ((ep)->dev->ep0_dir == \
+- USB_DIR_IN) : ((ep)->desc->bEndpointAddress \
+- & USB_DIR_IN) == USB_DIR_IN)
++#define DIR_STRING(ep) (is_in(ep) ? "in" : "out")
+
+
+ #ifdef DEBUG
+-static char *type_string(u8 bmAttributes)
++static char *type_string(const struct usb_endpoint_descriptor *desc)
+ {
+- switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
++ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_BULK:
+ return "bulk";
+ case USB_ENDPOINT_XFER_ISOC:
+@@ -274,11 +245,13 @@ static void ep0_reset(struct langwell_udc *dev)
+ ep->dqh->dqh_ios = 1;
+ ep->dqh->dqh_mpl = EP0_MAX_PKT_SIZE;
+
+- /* FIXME: enable ep0-in HW zero length termination select */
++ /* enable ep0-in HW zero length termination select */
+ if (is_in(ep))
+ ep->dqh->dqh_zlt = 0;
+ ep->dqh->dqh_mult = 0;
+
++ ep->dqh->dtd_next = DTD_TERM;
++
+ /* configure ep0 control registers */
+ ep_reset(&dev->ep[0], 0, i, USB_ENDPOINT_XFER_CONTROL);
+ }
+@@ -300,7 +273,7 @@ static int langwell_ep_enable(struct usb_ep *_ep,
+ struct langwell_ep *ep;
+ u16 max = 0;
+ unsigned long flags;
+- int retval = 0;
++ int i, retval = 0;
+ unsigned char zlt, ios = 0, mult = 0;
+
+ ep = container_of(_ep, struct langwell_ep, ep);
+@@ -326,7 +299,7 @@ static int langwell_ep_enable(struct usb_ep *_ep,
+ * sanity check type, direction, address, and then
+ * initialize the endpoint capabilities fields in dQH
+ */
+- switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
++ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ ios = 1;
+ break;
+@@ -386,28 +359,31 @@ static int langwell_ep_enable(struct usb_ep *_ep,
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+- /* configure endpoint capabilities in dQH */
+- ep->dqh->dqh_ios = ios;
+- ep->dqh->dqh_mpl = cpu_to_le16(max);
+- ep->dqh->dqh_zlt = zlt;
+- ep->dqh->dqh_mult = mult;
+-
+ ep->ep.maxpacket = max;
+ ep->desc = desc;
+ ep->stopped = 0;
+- ep->ep_num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
++ ep->ep_num = usb_endpoint_num(desc);
+
+ /* ep_type */
+- ep->ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
++ ep->ep_type = usb_endpoint_type(desc);
+
+ /* configure endpoint control registers */
+ ep_reset(ep, ep->ep_num, is_in(ep), ep->ep_type);
+
++ /* configure endpoint capabilities in dQH */
++ i = ep->ep_num * 2 + is_in(ep);
++ ep->dqh = &dev->ep_dqh[i];
++ ep->dqh->dqh_ios = ios;
++ ep->dqh->dqh_mpl = cpu_to_le16(max);
++ ep->dqh->dqh_zlt = zlt;
++ ep->dqh->dqh_mult = mult;
++ ep->dqh->dtd_next = DTD_TERM;
++
+ DBG(dev, "enabled %s (ep%d%s-%s), max %04x\n",
+ _ep->name,
+ ep->ep_num,
+- DIR_STRING(desc->bEndpointAddress),
+- type_string(desc->bmAttributes),
++ DIR_STRING(ep),
++ type_string(desc),
+ max);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+@@ -617,7 +593,7 @@ static int queue_dtd(struct langwell_ep *ep, struct langwell_request *req)
+ VDBG(dev, "%s\n", ep->name);
+ else
+ /* ep0 */
+- VDBG(dev, "%s-%s\n", ep->name, is_in(ep) ? "in" : "out");
++ VDBG(dev, "%s-%s\n", ep->name, DIR_STRING(ep));
+
+ VDBG(dev, "ep_dqh[%d] addr: 0x%08x\n", i, (u32)&(dev->ep_dqh[i]));
+
+@@ -667,6 +643,9 @@ static int queue_dtd(struct langwell_ep *ep, struct langwell_request *req)
+ dqh->dtd_status &= dtd_status;
+ VDBG(dev, "dqh->dtd_status = 0x%x\n", dqh->dtd_status);
+
++ /* ensure that updates to the dQH will occure before priming */
++ wmb();
++
+ /* write 1 to endptprime register to PRIME endpoint */
+ bit_mask = is_in(ep) ? (1 << (ep->ep_num + 16)) : (1 << ep->ep_num);
+ VDBG(dev, "endprime bit_mask = 0x%08x\n", bit_mask);
+@@ -805,7 +784,7 @@ static int langwell_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+ req->ep = ep;
+ VDBG(dev, "---> %s()\n", __func__);
+
+- if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
++ if (usb_endpoint_xfer_isoc(ep->desc)) {
+ if (req->req.length > ep->ep.maxpacket)
+ return -EMSGSIZE;
+ is_iso = 1;
+@@ -844,7 +823,7 @@ static int langwell_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+
+ DBG(dev, "%s queue req %p, len %u, buf %p, dma 0x%08x\n",
+ _ep->name,
+- _req, _req->length, _req->buf, _req->dma);
++ _req, _req->length, _req->buf, (int)_req->dma);
+
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+@@ -1024,8 +1003,7 @@ static int langwell_ep_set_halt(struct usb_ep *_ep, int value)
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+- if (ep->desc && (ep->desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+- == USB_ENDPOINT_XFER_ISOC)
++ if (usb_endpoint_xfer_isoc(ep->desc))
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(&dev->lock, flags);
+@@ -1094,7 +1072,7 @@ static void langwell_ep_fifo_flush(struct usb_ep *_ep)
+ return;
+ }
+
+- VDBG(dev, "%s-%s fifo flush\n", _ep->name, is_in(ep) ? "in" : "out");
++ VDBG(dev, "%s-%s fifo flush\n", _ep->name, DIR_STRING(ep));
+
+ /* flush endpoint buffer */
+ if (ep->ep_num == 0)
+@@ -1181,6 +1159,7 @@ static int langwell_wakeup(struct usb_gadget *_gadget)
+ {
+ struct langwell_udc *dev;
+ u32 portsc1, devlc;
++ u8 devlc_byte2;
+ unsigned long flags;
+
+ if (!_gadget)
+@@ -1189,9 +1168,11 @@ static int langwell_wakeup(struct usb_gadget *_gadget)
+ dev = container_of(_gadget, struct langwell_udc, gadget);
+ VDBG(dev, "---> %s()\n", __func__);
+
+- /* Remote Wakeup feature not enabled by host */
+- if (!dev->remote_wakeup)
++ /* remote wakeup feature not enabled by host */
++ if (!dev->remote_wakeup) {
++ INFO(dev, "remote wakeup is disabled\n");
+ return -ENOTSUPP;
++ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+@@ -1201,23 +1182,25 @@ static int langwell_wakeup(struct usb_gadget *_gadget)
+ return 0;
+ }
+
+- /* LPM L1 to L0, remote wakeup */
+- if (dev->lpm && dev->lpm_state == LPM_L1) {
+- portsc1 |= PORTS_SLP;
+- writel(portsc1, &dev->op_regs->portsc1);
+- }
+-
+- /* force port resume */
+- if (dev->usb_state == USB_STATE_SUSPENDED) {
+- portsc1 |= PORTS_FPR;
+- writel(portsc1, &dev->op_regs->portsc1);
+- }
++ /* LPM L1 to L0 or legacy remote wakeup */
++ if (dev->lpm && dev->lpm_state == LPM_L1)
++ INFO(dev, "LPM L1 to L0 remote wakeup\n");
++ else
++ INFO(dev, "device remote wakeup\n");
+
+ /* exit PHY low power suspend */
+ devlc = readl(&dev->op_regs->devlc);
+ VDBG(dev, "devlc = 0x%08x\n", devlc);
+ devlc &= ~LPM_PHCD;
+- writel(devlc, &dev->op_regs->devlc);
++ /* FIXME: workaround for Langwell A1/A2/A3 sighting */
++ devlc_byte2 = (devlc >> 16) & 0xff;
++ writeb(devlc_byte2, (u8 *)&dev->op_regs->devlc + 2);
++ devlc = readl(&dev->op_regs->devlc);
++ VDBG(dev, "exit PHY low power suspend, devlc = 0x%08x\n", devlc);
++
++ /* force port resume */
++ portsc1 |= PORTS_FPR;
++ writel(portsc1, &dev->op_regs->portsc1);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+@@ -1346,6 +1329,7 @@ static const struct usb_gadget_ops langwell_ops = {
+ static int langwell_udc_reset(struct langwell_udc *dev)
+ {
+ u32 usbcmd, usbmode, devlc, endpointlistaddr;
++ u8 devlc_byte0, devlc_byte2;
+ unsigned long timeout;
+
+ if (!dev)
+@@ -1390,9 +1374,16 @@ static int langwell_udc_reset(struct langwell_udc *dev)
+ /* if support USB LPM, ACK all LPM token */
+ if (dev->lpm) {
+ devlc = readl(&dev->op_regs->devlc);
++ VDBG(dev, "devlc = 0x%08x\n", devlc);
++ /* FIXME: workaround for Langwell A1/A2/A3 sighting */
+ devlc &= ~LPM_STL; /* don't STALL LPM token */
+ devlc &= ~LPM_NYT_ACK; /* ACK LPM token */
+- writel(devlc, &dev->op_regs->devlc);
++ devlc_byte0 = devlc & 0xff;
++ devlc_byte2 = (devlc >> 16) & 0xff;
++ writeb(devlc_byte0, (u8 *)&dev->op_regs->devlc);
++ writeb(devlc_byte2, (u8 *)&dev->op_regs->devlc + 2);
++ devlc = readl(&dev->op_regs->devlc);
++ VDBG(dev, "ACK LPM token, devlc = 0x%08x\n", devlc);
+ }
+
+ /* fill endpointlistaddr register */
+@@ -1449,8 +1440,6 @@ static int eps_reinit(struct langwell_udc *dev)
+
+ INIT_LIST_HEAD(&ep->queue);
+ list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
+-
+- ep->dqh = &dev->ep_dqh[i];
+ }
+
+ VDBG(dev, "<--- %s()\n", __func__);
+@@ -1539,21 +1528,6 @@ static void stop_activity(struct langwell_udc *dev,
+
+ /*-------------------------------------------------------------------------*/
+
+-/* device "function" sysfs attribute file */
+-static ssize_t show_function(struct device *_dev,
+- struct device_attribute *attr, char *buf)
+-{
+- struct langwell_udc *dev = the_controller;
+-
+- if (!dev->driver || !dev->driver->function
+- || strlen(dev->driver->function) > PAGE_SIZE)
+- return 0;
+-
+- return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
+-}
+-static DEVICE_ATTR(function, S_IRUGO, show_function, NULL);
+-
+-
+ /* device "langwell_udc" sysfs attribute file */
+ static ssize_t show_langwell_udc(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+@@ -1659,13 +1633,15 @@ static ssize_t show_langwell_udc(struct device *_dev,
+ "Over-current Change: %s\n"
+ "Port Enable/Disable Change: %s\n"
+ "Port Enabled/Disabled: %s\n"
+- "Current Connect Status: %s\n\n",
++ "Current Connect Status: %s\n"
++ "LPM Suspend Status: %s\n\n",
+ (tmp_reg & PORTS_PR) ? "Reset" : "Not Reset",
+ (tmp_reg & PORTS_SUSP) ? "Suspend " : "Not Suspend",
+ (tmp_reg & PORTS_OCC) ? "Detected" : "No",
+ (tmp_reg & PORTS_PEC) ? "Changed" : "Not Changed",
+ (tmp_reg & PORTS_PE) ? "Enable" : "Not Correct",
+- (tmp_reg & PORTS_CCS) ? "Attached" : "Not Attached");
++ (tmp_reg & PORTS_CCS) ? "Attached" : "Not Attached",
++ (tmp_reg & PORTS_SLP) ? "LPM L1" : "LPM L0");
+ size -= t;
+ next += t;
+
+@@ -1676,7 +1652,7 @@ static ssize_t show_langwell_udc(struct device *_dev,
+ "Serial Transceiver : %d\n"
+ "Port Speed: %s\n"
+ "Port Force Full Speed Connenct: %s\n"
+- "PHY Low Power Suspend Clock Disable: %s\n"
++ "PHY Low Power Suspend Clock: %s\n"
+ "BmAttributes: %d\n\n",
+ LPM_PTS(tmp_reg),
+ (tmp_reg & LPM_STS) ? 1 : 0,
+@@ -1797,6 +1773,40 @@ static ssize_t show_langwell_udc(struct device *_dev,
+ static DEVICE_ATTR(langwell_udc, S_IRUGO, show_langwell_udc, NULL);
+
+
++/* device "remote_wakeup" sysfs attribute file */
++static ssize_t store_remote_wakeup(struct device *_dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct langwell_udc *dev = the_controller;
++#if defined(CONFIG_USB_DEBUG)
++ unsigned long flags;
++#endif
++ ssize_t rc = count;
++
++ if (count > 2)
++ return -EINVAL;
++
++ if (count > 0 && buf[count-1] == '\n')
++ ((char *) buf)[count-1] = 0;
++
++ if (buf[0] != '1')
++ return -EINVAL;
++
++#if defined(CONFIG_USB_DEBUG)
++ /* force remote wakeup enabled in case gadget driver doesn't support */
++ spin_lock_irqsave(&dev->lock, flags);
++ dev->remote_wakeup = 1;
++ dev->dev_status |= (1 << USB_DEVICE_REMOTE_WAKEUP);
++ spin_unlock_irqrestore(&dev->lock, flags);
++#endif
++
++ langwell_wakeup(&dev->gadget);
++
++ return rc;
++}
++static DEVICE_ATTR(remote_wakeup, S_IWUSR, NULL, store_remote_wakeup);
++
++
+ /*-------------------------------------------------------------------------*/
+
+ /*
+@@ -1818,6 +1828,9 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+
+ DBG(dev, "---> %s()\n", __func__);
+
++ if (unlikely(!driver || !driver->bind))
++ return -EINVAL;
++
+ if (dev->driver)
+ return -EBUSY;
+
+@@ -1839,34 +1852,24 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+ return retval;
+ }
+
+- retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
+- if (retval)
+- goto err_unbind;
+-
+ dev->usb_state = USB_STATE_ATTACHED;
+ dev->ep0_state = WAIT_FOR_SETUP;
+ dev->ep0_dir = USB_DIR_OUT;
+
++ /* bind OTG transceiver */
++ if (dev->transceiver)
++ (void)otg_set_peripheral(dev->transceiver, &dev->gadget);
++
+ /* enable interrupt and set controller to run state */
+ if (dev->got_irq)
+ langwell_udc_start(dev);
+
+ VDBG(dev, "After langwell_udc_start(), print all registers:\n");
+-#ifdef VERBOSE
+ print_all_registers(dev);
+-#endif
+
+ INFO(dev, "register driver: %s\n", driver->driver.name);
+- VDBG(dev, "<--- %s()\n", __func__);
+- return 0;
+-
+-err_unbind:
+- driver->unbind(&dev->gadget);
+- dev->gadget.dev.driver = NULL;
+- dev->driver = NULL;
+-
+ DBG(dev, "<--- %s()\n", __func__);
+- return retval;
++ return 0;
+ }
+ EXPORT_SYMBOL(usb_gadget_register_driver);
+
+@@ -1876,15 +1879,27 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+ {
+ struct langwell_udc *dev = the_controller;
+ unsigned long flags;
++ u32 devlc;
++ u8 devlc_byte2;
+
+ if (!dev)
+ return -ENODEV;
+
+ DBG(dev, "---> %s()\n", __func__);
+
+- if (unlikely(!driver || !driver->bind || !driver->unbind))
++ if (unlikely(!driver || !driver->unbind || !driver->disconnect))
+ return -EINVAL;
+
++ /* exit PHY low power suspend */
++ devlc = readl(&dev->op_regs->devlc);
++ VDBG(dev, "devlc = 0x%08x\n", devlc);
++ devlc &= ~LPM_PHCD;
++ /* FIXME: workaround for Langwell A1/A2/A3 sighting */
++ devlc_byte2 = (devlc >> 16) & 0xff;
++ writeb(devlc_byte2, (u8 *)&dev->op_regs->devlc + 2);
++ devlc = readl(&dev->op_regs->devlc);
++ VDBG(dev, "exit PHY low power suspend, devlc = 0x%08x\n", devlc);
++
+ /* unbind OTG transceiver */
+ if (dev->transceiver)
+ (void)otg_set_peripheral(dev->transceiver, 0);
+@@ -1908,8 +1923,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+ dev->gadget.dev.driver = NULL;
+ dev->driver = NULL;
+
+- device_remove_file(&dev->pdev->dev, &dev_attr_function);
+-
+ INFO(dev, "unregistered driver '%s'\n", driver->driver.name);
+ DBG(dev, "<--- %s()\n", __func__);
+ return 0;
+@@ -1917,6 +1930,55 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+ EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+
++/* gets the maximum power consumption */
++int langwell_udc_maxpower(int *mA)
++{
++ struct langwell_udc *dev = the_controller;
++ u32 usbmode, portsc1, usbcmd;
++
++ /* fatal error */
++ if (!dev) {
++ *mA = 0;
++ return -EOTGFAIL;
++ }
++
++ DBG(dev, "---> %s()\n", __func__);
++
++ /* contrller is not in device mode */
++ usbmode = readl(&dev->op_regs->usbmode);
++ if (MODE_CM(usbmode) != MODE_DEVICE) {
++ *mA = 0;
++ return -EOTGNODEVICE;
++ }
++
++ /* can't get maximum power */
++ usbcmd = readl(&dev->op_regs->usbcmd);
++ if (!(usbcmd & CMD_RUNSTOP)) {
++ *mA = 0;
++ return -EOTGCHARGER;
++ }
++
++ /* disconnect to USB host */
++ portsc1 = readl(&dev->op_regs->portsc1);
++ if (!(portsc1 & PORTS_CCS)) {
++ *mA = 0;
++ return -EOTGDISCONN;
++ }
++
++ /* set max power capability */
++ *mA = CONFIG_USB_GADGET_VBUS_DRAW;
++
++ if ((*mA < 8) || (*mA > 500)) {
++ *mA = 0;
++ return -EOTGINVAL;
++ }
++
++ DBG(dev, "<--- %s()\n", __func__);
++ return 0;
++}
++EXPORT_SYMBOL(langwell_udc_maxpower);
++
++
+ /*-------------------------------------------------------------------------*/
+
+ /*
+@@ -2113,8 +2175,7 @@ static void get_status(struct langwell_udc *dev, u8 request_type, u16 value,
+
+ if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+ /* get device status */
+- status_data = 1 << USB_DEVICE_SELF_POWERED;
+- status_data |= dev->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
++ status_data = dev->dev_status;
+ } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
+ /* get interface status */
+ status_data = 0;
+@@ -2129,6 +2190,8 @@ static void get_status(struct langwell_udc *dev, u8 request_type, u16 value,
+ status_data = ep_is_stall(epn) << USB_ENDPOINT_HALT;
+ }
+
++ DBG(dev, "get status data: 0x%04x\n", status_data);
++
+ dev->ep0_dir = USB_DIR_IN;
+
+ /* borrow the per device status_req */
+@@ -2247,22 +2310,37 @@ static void handle_setup_packet(struct langwell_udc *dev,
+ } else if ((setup->bRequestType & (USB_RECIP_MASK
+ | USB_TYPE_MASK)) == (USB_RECIP_DEVICE
+ | USB_TYPE_STANDARD)) {
+- if (!gadget_is_otg(&dev->gadget))
++ rc = 0;
++ switch (wValue) {
++ case USB_DEVICE_REMOTE_WAKEUP:
++ if (setup->bRequest == USB_REQ_SET_FEATURE) {
++ dev->remote_wakeup = 1;
++ dev->dev_status |= (1 << wValue);
++ } else {
++ dev->remote_wakeup = 0;
++ dev->dev_status &= ~(1 << wValue);
++ }
+ break;
+- else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE) {
++ case USB_DEVICE_B_HNP_ENABLE:
+ dev->gadget.b_hnp_enable = 1;
+ #ifdef OTG_TRANSCEIVER
+ if (!dev->lotg->otg.default_a)
+ dev->lotg->hsm.b_hnp_enable = 1;
+ #endif
+- } else if (setup->bRequest == USB_DEVICE_A_HNP_SUPPORT)
++ dev->dev_status |= (1 << wValue);
++ break;
++ case USB_DEVICE_A_HNP_SUPPORT:
+ dev->gadget.a_hnp_support = 1;
+- else if (setup->bRequest ==
+- USB_DEVICE_A_ALT_HNP_SUPPORT)
++ dev->dev_status |= (1 << wValue);
++ break;
++ case USB_DEVICE_A_ALT_HNP_SUPPORT:
+ dev->gadget.a_alt_hnp_support = 1;
+- else
++ dev->dev_status |= (1 << wValue);
+ break;
+- rc = 0;
++ default:
++ rc = -EOPNOTSUPP;
++ break;
++ }
+ } else
+ break;
+
+@@ -2387,7 +2465,7 @@ static int process_ep_req(struct langwell_udc *dev, int index,
+ } else {
+ /* transfers completed with errors */
+ if (dtd_status & DTD_STS_ACTIVE) {
+- DBG(dev, "request not completed\n");
++ DBG(dev, "dTD status ACTIVE dQH[%d]\n", index);
+ retval = 1;
+ return retval;
+ } else if (dtd_status & DTD_STS_HALTED) {
+@@ -2586,18 +2664,14 @@ static void handle_port_change(struct langwell_udc *dev)
+ /* LPM L0 to L1 */
+ if (dev->lpm && dev->lpm_state == LPM_L0)
+ if (portsc1 & PORTS_SUSP && portsc1 & PORTS_SLP) {
+- INFO(dev, "LPM L0 to L1\n");
+- dev->lpm_state = LPM_L1;
++ INFO(dev, "LPM L0 to L1\n");
++ dev->lpm_state = LPM_L1;
+ }
+
+ /* LPM L1 to L0, force resume or remote wakeup finished */
+ if (dev->lpm && dev->lpm_state == LPM_L1)
+ if (!(portsc1 & PORTS_SUSP)) {
+- if (portsc1 & PORTS_SLP)
+- INFO(dev, "LPM L1 to L0, force resume\n");
+- else
+- INFO(dev, "LPM L1 to L0, remote wakeup\n");
+-
++ INFO(dev, "LPM L1 to L0\n");
+ dev->lpm_state = LPM_L0;
+ }
+
+@@ -2634,7 +2708,10 @@ static void handle_usb_reset(struct langwell_udc *dev)
+
+ dev->ep0_dir = USB_DIR_OUT;
+ dev->ep0_state = WAIT_FOR_SETUP;
+- dev->remote_wakeup = 0; /* default to 0 on reset */
++
++ /* remote wakeup reset to 0 when the device is reset */
++ dev->remote_wakeup = 0;
++ dev->dev_status = 1 << USB_DEVICE_SELF_POWERED;
+ dev->gadget.b_hnp_enable = 0;
+ dev->gadget.a_hnp_support = 0;
+ dev->gadget.a_alt_hnp_support = 0;
+@@ -2699,6 +2776,7 @@ static void handle_usb_reset(struct langwell_udc *dev)
+ static void handle_bus_suspend(struct langwell_udc *dev)
+ {
+ u32 devlc;
++ u8 devlc_byte2;
+ DBG(dev, "---> %s()\n", __func__);
+
+ dev->resume_state = dev->usb_state;
+@@ -2706,7 +2784,8 @@ static void handle_bus_suspend(struct langwell_udc *dev)
+
+ #ifdef OTG_TRANSCEIVER
+ if (dev->lotg->otg.default_a) {
+- if (dev->lotg->hsm.b_bus_suspend_vld == 1) {
++ /* ignore host LPM capability checking during enumeration */
++ if (dev->lotg->hsm.b_bus_suspend_vld == 2) {
+ dev->lotg->hsm.b_bus_suspend = 1;
+ /* notify transceiver the state changes */
+ if (spin_trylock(&dev->lotg->wq_lock)) {
+@@ -2741,7 +2820,11 @@ static void handle_bus_suspend(struct langwell_udc *dev)
+ devlc = readl(&dev->op_regs->devlc);
+ VDBG(dev, "devlc = 0x%08x\n", devlc);
+ devlc |= LPM_PHCD;
+- writel(devlc, &dev->op_regs->devlc);
++ /* FIXME: workaround for Langwell A1/A2/A3 sighting */
++ devlc_byte2 = (devlc >> 16) & 0xff;
++ writeb(devlc_byte2, (u8 *)&dev->op_regs->devlc + 2);
++ devlc = readl(&dev->op_regs->devlc);
++ VDBG(dev, "enter PHY low power suspend, devlc = 0x%08x\n", devlc);
+
+ DBG(dev, "<--- %s()\n", __func__);
+ }
+@@ -2750,6 +2833,7 @@ static void handle_bus_suspend(struct langwell_udc *dev)
+ static void handle_bus_resume(struct langwell_udc *dev)
+ {
+ u32 devlc;
++ u8 devlc_byte2;
+ DBG(dev, "---> %s()\n", __func__);
+
+ dev->usb_state = dev->resume_state;
+@@ -2759,7 +2843,11 @@ static void handle_bus_resume(struct langwell_udc *dev)
+ devlc = readl(&dev->op_regs->devlc);
+ VDBG(dev, "devlc = 0x%08x\n", devlc);
+ devlc &= ~LPM_PHCD;
+- writel(devlc, &dev->op_regs->devlc);
++ /* FIXME: workaround for Langwell A1/A2/A3 sighting */
++ devlc_byte2 = (devlc >> 16) & 0xff;
++ writeb(devlc_byte2, (u8 *)&dev->op_regs->devlc + 2);
++ devlc = readl(&dev->op_regs->devlc);
++ VDBG(dev, "exit PHY low power suspend, devlc = 0x%08x\n", devlc);
+
+ #ifdef OTG_TRANSCEIVER
+ if (dev->lotg->otg.default_a == 0)
+@@ -2898,6 +2986,50 @@ static void gadget_release(struct device *_dev)
+ }
+
+
++/* enable SRAM caching if SRAM detected */
++static void sram_init(struct langwell_udc *dev)
++{
++ struct pci_dev *pdev = dev->pdev;
++
++ DBG(dev, "---> %s()\n", __func__);
++
++ dev->sram_addr = pci_resource_start(pdev, 1);
++ dev->sram_size = pci_resource_len(pdev, 1);
++ INFO(dev, "Found private SRAM at %x size:%x\n",
++ dev->sram_addr, dev->sram_size);
++ dev->got_sram = 1;
++
++ if (pci_request_region(pdev, 1, kobject_name(&pdev->dev.kobj))) {
++ WARNING(dev, "SRAM request failed\n");
++ dev->got_sram = 0;
++ } else if (!dma_declare_coherent_memory(&pdev->dev, dev->sram_addr,
++ dev->sram_addr, dev->sram_size, DMA_MEMORY_MAP)) {
++ WARNING(dev, "SRAM DMA declare failed\n");
++ pci_release_region(pdev, 1);
++ dev->got_sram = 0;
++ }
++
++ DBG(dev, "<--- %s()\n", __func__);
++}
++
++
++/* release SRAM caching */
++static void sram_deinit(struct langwell_udc *dev)
++{
++ struct pci_dev *pdev = dev->pdev;
++
++ DBG(dev, "---> %s()\n", __func__);
++
++ dma_release_declared_memory(&pdev->dev);
++ pci_release_region(pdev, 1);
++
++ dev->got_sram = 0;
++
++ INFO(dev, "release SRAM caching\n");
++ DBG(dev, "<--- %s()\n", __func__);
++}
++
++
+ /* tear down the binding between this driver and the pci device */
+ static void langwell_udc_remove(struct pci_dev *pdev)
+ {
+@@ -2910,19 +3042,25 @@ static void langwell_udc_remove(struct pci_dev *pdev)
+
+ dev->done = &done;
+
+- /* free memory allocated in probe */
++#ifndef OTG_TRANSCEIVER
++ /* free dTD dma_pool and dQH */
+ if (dev->dtd_pool)
+ dma_pool_destroy(dev->dtd_pool);
+
++ if (dev->ep_dqh)
++ dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
++ dev->ep_dqh, dev->ep_dqh_dma);
++
++ /* release SRAM caching */
++ if (dev->has_sram && dev->got_sram)
++ sram_deinit(dev);
++#endif
++
+ if (dev->status_req) {
+ kfree(dev->status_req->req.buf);
+ kfree(dev->status_req);
+ }
+
+- if (dev->ep_dqh)
+- dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
+- dev->ep_dqh, dev->ep_dqh_dma);
+-
+ kfree(dev->ep);
+
+ /* diable IRQ handler */
+@@ -2954,6 +3092,7 @@ static void langwell_udc_remove(struct pci_dev *pdev)
+
+ device_unregister(&dev->gadget.dev);
+ device_remove_file(&pdev->dev, &dev_attr_langwell_udc);
++ device_remove_file(&pdev->dev, &dev_attr_remote_wakeup);
+
+ #ifndef OTG_TRANSCEIVER
+ pci_set_drvdata(pdev, NULL);
+@@ -2976,9 +3115,9 @@ static int langwell_udc_probe(struct pci_dev *pdev,
+ struct langwell_udc *dev;
+ #ifndef OTG_TRANSCEIVER
+ unsigned long resource, len;
++ size_t size;
+ #endif
+ void __iomem *base = NULL;
+- size_t size;
+ int retval;
+
+ if (the_controller) {
+@@ -3049,7 +3188,15 @@ static int langwell_udc_probe(struct pci_dev *pdev,
+ goto error;
+ }
+
++ dev->has_sram = 1;
++ dev->got_sram = 0;
++ VDBG(dev, "dev->has_sram: %d\n", dev->has_sram);
++
+ #ifndef OTG_TRANSCEIVER
++ /* enable SRAM caching if detected */
++ if (dev->has_sram && !dev->got_sram)
++ sram_init(dev);
++
+ INFO(dev, "irq %d, io mem: 0x%08lx, len: 0x%08lx, pci mem 0x%p\n",
+ pdev->irq, resource, len, base);
+ /* enables bus-mastering for device dev */
+@@ -3094,6 +3241,7 @@ static int langwell_udc_probe(struct pci_dev *pdev,
+ goto error;
+ }
+
++#ifndef OTG_TRANSCEIVER
+ /* allocate device dQH memory */
+ size = dev->ep_max * sizeof(struct langwell_dqh);
+ VDBG(dev, "orig size = %d\n", size);
+@@ -3112,6 +3260,7 @@ static int langwell_udc_probe(struct pci_dev *pdev,
+ }
+ dev->ep_dqh_size = size;
+ VDBG(dev, "ep_dqh_size = %d\n", dev->ep_dqh_size);
++#endif
+
+ /* initialize ep0 status request structure */
+ dev->status_req = kzalloc(sizeof(struct langwell_request), GFP_KERNEL);
+@@ -3129,7 +3278,10 @@ static int langwell_udc_probe(struct pci_dev *pdev,
+ dev->resume_state = USB_STATE_NOTATTACHED;
+ dev->usb_state = USB_STATE_POWERED;
+ dev->ep0_dir = USB_DIR_OUT;
+- dev->remote_wakeup = 0; /* default to 0 on reset */
++
++ /* remote wakeup reset to 0 when the device is reset */
++ dev->remote_wakeup = 0;
++ dev->dev_status = 1 << USB_DEVICE_SELF_POWERED;
+
+ #ifndef OTG_TRANSCEIVER
+ /* reset device controller */
+@@ -3159,7 +3311,6 @@ static int langwell_udc_probe(struct pci_dev *pdev,
+ #ifndef OTG_TRANSCEIVER
+ /* reset ep0 dQH and endptctrl */
+ ep0_reset(dev);
+-#endif
+
+ /* create dTD dma_pool resource */
+ dev->dtd_pool = dma_pool_create("langwell_dtd",
+@@ -3172,6 +3323,7 @@ static int langwell_udc_probe(struct pci_dev *pdev,
+ retval = -ENOMEM;
+ goto error;
+ }
++#endif
+
+ /* done */
+ INFO(dev, "%s\n", driver_desc);
+@@ -3183,9 +3335,7 @@ static int langwell_udc_probe(struct pci_dev *pdev,
+ INFO(dev, "Support USB LPM: %s\n", dev->lpm ? "Yes" : "No");
+
+ VDBG(dev, "After langwell_udc_probe(), print all registers:\n");
+-#ifdef VERBOSE
+ print_all_registers(dev);
+-#endif
+
+ the_controller = dev;
+
+@@ -3197,9 +3347,15 @@ static int langwell_udc_probe(struct pci_dev *pdev,
+ if (retval)
+ goto error;
+
++ retval = device_create_file(&pdev->dev, &dev_attr_remote_wakeup);
++ if (retval)
++ goto error_attr1;
++
+ VDBG(dev, "<--- %s()\n", __func__);
+ return 0;
+
++error_attr1:
++ device_remove_file(&pdev->dev, &dev_attr_langwell_udc);
+ error:
+ if (dev) {
+ DBG(dev, "<--- %s()\n", __func__);
+@@ -3215,6 +3371,7 @@ static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
+ {
+ struct langwell_udc *dev = the_controller;
+ u32 devlc;
++ u8 devlc_byte2;
+
+ DBG(dev, "---> %s()\n", __func__);
+
+@@ -3226,10 +3383,21 @@ static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
+ free_irq(pdev->irq, dev);
+ dev->got_irq = 0;
+
+-
+ /* save PCI state */
+ pci_save_state(pdev);
+
++ /* free dTD dma_pool and dQH */
++ if (dev->dtd_pool)
++ dma_pool_destroy(dev->dtd_pool);
++
++ if (dev->ep_dqh)
++ dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
++ dev->ep_dqh, dev->ep_dqh_dma);
++
++ /* release SRAM caching */
++ if (dev->has_sram && dev->got_sram)
++ sram_deinit(dev);
++
+ /* set device power state */
+ pci_set_power_state(pdev, PCI_D3hot);
+
+@@ -3237,7 +3405,11 @@ static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
+ devlc = readl(&dev->op_regs->devlc);
+ VDBG(dev, "devlc = 0x%08x\n", devlc);
+ devlc |= LPM_PHCD;
+- writel(devlc, &dev->op_regs->devlc);
++ /* FIXME: workaround for Langwell A1/A2/A3 sighting */
++ devlc_byte2 = (devlc >> 16) & 0xff;
++ writeb(devlc_byte2, (u8 *)&dev->op_regs->devlc + 2);
++ devlc = readl(&dev->op_regs->devlc);
++ VDBG(dev, "enter PHY low power suspend, devlc = 0x%08x\n", devlc);
+
+ DBG(dev, "<--- %s()\n", __func__);
+ return 0;
+@@ -3249,6 +3421,8 @@ static int langwell_udc_resume(struct pci_dev *pdev)
+ {
+ struct langwell_udc *dev = the_controller;
+ u32 devlc;
++ u8 devlc_byte2;
++ size_t size;
+
+ DBG(dev, "---> %s()\n", __func__);
+
+@@ -3256,19 +3430,55 @@ static int langwell_udc_resume(struct pci_dev *pdev)
+ devlc = readl(&dev->op_regs->devlc);
+ VDBG(dev, "devlc = 0x%08x\n", devlc);
+ devlc &= ~LPM_PHCD;
+- writel(devlc, &dev->op_regs->devlc);
++ /* FIXME: workaround for Langwell A1/A2/A3 sighting */
++ devlc_byte2 = (devlc >> 16) & 0xff;
++ writeb(devlc_byte2, (u8 *)&dev->op_regs->devlc + 2);
++ devlc = readl(&dev->op_regs->devlc);
++ VDBG(dev, "exit PHY low power suspend, devlc = 0x%08x\n", devlc);
+
+ /* set device D0 power state */
+ pci_set_power_state(pdev, PCI_D0);
+
++ /* enable SRAM caching if detected */
++ if (dev->has_sram && !dev->got_sram)
++ sram_init(dev);
++
++ /* allocate device dQH memory */
++ size = dev->ep_max * sizeof(struct langwell_dqh);
++ VDBG(dev, "orig size = %d\n", size);
++ if (size < DQH_ALIGNMENT)
++ size = DQH_ALIGNMENT;
++ else if ((size % DQH_ALIGNMENT) != 0) {
++ size += DQH_ALIGNMENT + 1;
++ size &= ~(DQH_ALIGNMENT - 1);
++ }
++ dev->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
++ &dev->ep_dqh_dma, GFP_KERNEL);
++ if (!dev->ep_dqh) {
++ ERROR(dev, "allocate dQH memory failed\n");
++ return -ENOMEM;
++ }
++ dev->ep_dqh_size = size;
++ VDBG(dev, "ep_dqh_size = %d\n", dev->ep_dqh_size);
++
++ /* create dTD dma_pool resource */
++ dev->dtd_pool = dma_pool_create("langwell_dtd",
++ &dev->pdev->dev,
++ sizeof(struct langwell_dtd),
++ DTD_ALIGNMENT,
++ DMA_BOUNDARY);
++
++ if (!dev->dtd_pool)
++ return -ENOMEM;
++
+ /* restore PCI state */
+ pci_restore_state(pdev);
+
+ /* enable IRQ handler */
+- if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED, driver_name, dev)
+- != 0) {
++ if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED,
++ driver_name, dev) != 0) {
+ ERROR(dev, "request interrupt %d failed\n", pdev->irq);
+- return -1;
++ return -EBUSY;
+ }
+ dev->got_irq = 1;
+
+diff --git a/drivers/usb/gadget/langwell_udc.h b/drivers/usb/gadget/langwell_udc.h
+index 9719934..323c574 100644
+--- a/drivers/usb/gadget/langwell_udc.h
++++ b/drivers/usb/gadget/langwell_udc.h
+@@ -174,7 +174,7 @@ enum lpm_state {
+ struct langwell_udc {
+ /* each pci device provides one gadget, several endpoints */
+ struct usb_gadget gadget;
+- spinlock_t lock; /* device lock */
++ spinlock_t lock; /* device lock */
+ struct langwell_ep *ep;
+ struct usb_gadget_driver *driver;
+ struct otg_transceiver *transceiver;
+@@ -199,7 +199,9 @@ struct langwell_udc {
+ vbus_active:1,
+ suspended:1,
+ stopped:1,
+- lpm:1; /* LPM capability */
++ lpm:1, /* LPM capability */
++ has_sram:1, /* SRAM caching */
++ got_sram:1;
+
+ /* pci state used to access those endpoints */
+ struct pci_dev *pdev;
+@@ -224,5 +226,12 @@ struct langwell_udc {
+
+ /* make sure release() is done */
+ struct completion *done;
++
++ /* for private SRAM caching */
++ unsigned int sram_addr;
++ unsigned int sram_size;
++
++ /* device status data for get_status request */
++ u16 dev_status;
+ };
+
+diff --git a/drivers/usb/gadget/still_image.c b/drivers/usb/gadget/still_image.c
+new file mode 100644
+index 0000000..94c17ce
+--- /dev/null
++++ b/drivers/usb/gadget/still_image.c
+@@ -0,0 +1,4566 @@
++/*
++ * still_image.c -- Lite USB Still Image Capture Gadget, for USB development
++ * Copyright (C) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++
++/*
++ * This code is partly based on:
++ * File-backed USB Storage Gadget driver, Copyright (C) 2003-2008 Alan Stern
++ *
++ *
++ * Refer to the USB Device Class Definition for Still Image Capture Device:
++ * http://www.usb.org/developers/devclass_docs/usb_still_img10.zip
++ *
++ *
++ * Supported PIMA 15740/PTP operations:
++ * - GetDeviceInfo
++ * - OpenSession
++ * - CloseSession
++ * - GetStorageIDs
++ * - GetStorageInfo
++ * - GetNumObjects
++ * - GetObjectHandles
++ * - GetObjectInfo
++ * - GetObject
++ * - DeleteObject
++ * - SendObjectInfo
++ * - SendObject
++ * - CopyObject
++ * - MoveObject
++ *
++ * Supported object formats:
++ * - EXIF/JPEG, JFIF
++ * - PNG
++ * - TIFF, TIFF/IT, TIFF/EP
++ * - BMP
++ * - GIF
++ * - Unknown image object
++ * - Undefined non-image object
++ *
++ * Supported PIMA 15740/PTP events:
++ * - N/A
++ *
++ * Storage filesystem type:
++ * - Generic hierarchical
++ *
++ *
++ * Module options:
++ * folder=foldername Default NULL, name of the backing folder
++ * vendor=0xVVVV Default 0x8087 (Intel), USB Vendor ID
++ * product=0xPPPP Default 0x811e, USB Product ID
++ * release=0xRRRR Override the USB release number (bcdDevice)
++ * buflen=N Default N=16384, buffer size used (will be
++ * rounded down to a multiple of
++ * PAGE_CACHE_SIZE)
++ *
++ * Sysfs attribute file:
++ * folder read/write the name of the backing folder
++ *
++ */
++
++
++#define VERBOSE_DEBUG
++
++#include <linux/blkdev.h>
++#include <linux/completion.h>
++#include <linux/dcache.h>
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/fcntl.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/vfs.h>
++#include <linux/namei.h>
++#include <linux/kref.h>
++#include <linux/kthread.h>
++#include <linux/limits.h>
++#include <linux/rwsem.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/string.h>
++#include <linux/freezer.h>
++#include <linux/utsname.h>
++#include <linux/sort.h>
++
++#include <linux/usb/ch9.h>
++#include <linux/usb/gadget.h>
++
++#include "gadget_chips.h"
++
++#include "usbstring.c"
++#include "config.c"
++#include "epautoconf.c"
++
++
++/*-------------------------------------------------------------------------*/
++
++#define DRIVER_DESC "Still Image Gadget"
++#define DRIVER_NAME "g_still_image"
++#define DRIVER_VERSION "Apr 30, 2010"
++
++
++static const char longname[] = DRIVER_DESC;
++static const char shortname[] = DRIVER_NAME;
++
++
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_AUTHOR("Xiaochen Shen <xiaochen.shen@intel.com>; "
++ "Hang Yuan <hang.yuan@intel.com>");
++MODULE_VERSION(DRIVER_VERSION);
++MODULE_LICENSE("GPL");
++
++
++/*
++ * Intel Corporation donates this product ID.
++ *
++ * DO NOT REUSE THESE IDs with any other driver
++ * instead: allocate your own, using normal USB-IF procedures.
++ */
++#define DRIVER_VENDOR_ID 0x8087
++#define DRIVER_PRODUCT_ID 0x811e
++
++
++/*-------------------------------------------------------------------------*/
++
++#define MDBG(fmt, args...) \
++ pr_debug(DRIVER_NAME ": " fmt, ## args)
++#define MINFO(fmt, args...) \
++ pr_info(DRIVER_NAME ": " fmt, ## args)
++
++#ifdef DEBUG
++#define DBG(d, fmt, args...) \
++ dev_dbg(&(d)->gadget->dev, fmt, ## args)
++#else
++#define DBG(dev, fmt, args...) \
++ do { } while (0)
++#endif /* DEBUG */
++
++
++#ifndef DEBUG
++#undef VERBOSE_DEBUG
++#endif /* !DEBUG */
++
++#ifdef VERBOSE_DEBUG
++#define VDBG DBG
++#else
++#define VDBG(sti, fmt, args...) \
++ do { } while (0)
++#endif /* VERBOSE_DEBUG */
++
++#define ERROR(d, fmt, args...) \
++ dev_err(&(d)->gadget->dev, fmt, ## args)
++#define WARNING(d, fmt, args...) \
++ dev_warn(&(d)->gadget->dev, fmt, ## args)
++#define INFO(d, fmt, args...) \
++ dev_info(&(d)->gadget->dev, fmt, ## args)
++
++
++/*-------------------------------------------------------------------------*/
++
++/* encapsulate the module parameter settings */
++
++static struct {
++ char *folder;
++ unsigned short vendor;
++ unsigned short product;
++ unsigned short release;
++ unsigned int buflen;
++} mod_data = { /* default values */
++ .vendor = DRIVER_VENDOR_ID,
++ .product = DRIVER_PRODUCT_ID,
++ .release = 0xffff, /* use controller chip type */
++ .buflen = 16384,
++};
++
++
++module_param_named(folder, mod_data.folder, charp, S_IRUGO);
++MODULE_PARM_DESC(folder, "name of the backing folder");
++
++module_param_named(vendor, mod_data.vendor, ushort, S_IRUGO);
++MODULE_PARM_DESC(vendor, "USB Vendor ID");
++
++module_param_named(product, mod_data.product, ushort, S_IRUGO);
++MODULE_PARM_DESC(product, "USB Product ID");
++
++module_param_named(release, mod_data.release, ushort, S_IRUGO);
++MODULE_PARM_DESC(release, "USB release number");
++
++module_param_named(buflen, mod_data.buflen, uint, S_IRUGO);
++MODULE_PARM_DESC(buflen, "I/O buffer size");
++
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * DESCRIPTORS ... most are static, but strings and (full) configuration
++ * descriptors are built on demand. Also the (static) config and interface
++ * descriptors are adjusted during sti_bind().
++ */
++#define STRING_MANUFACTURER 1
++#define STRING_PRODUCT 2
++#define STRING_SERIAL 3
++#define STRING_CONFIG 4
++#define STRING_INTERFACE 5
++
++
++/* only one configuration */
++#define CONFIG_VALUE 1
++
++static struct usb_device_descriptor
++device_desc = {
++ .bLength = sizeof device_desc,
++ .bDescriptorType = USB_DT_DEVICE,
++
++ .bcdUSB = cpu_to_le16(0x0200),
++ .bDeviceClass = USB_CLASS_PER_INTERFACE,
++
++ /* the next three values can be overridden by module parameters */
++ .idVendor = cpu_to_le16(DRIVER_VENDOR_ID),
++ .idProduct = cpu_to_le16(DRIVER_PRODUCT_ID),
++ .bcdDevice = cpu_to_le16(0xffff),
++
++ .iManufacturer = STRING_MANUFACTURER,
++ .iProduct = STRING_PRODUCT,
++ .iSerialNumber = STRING_SERIAL,
++ .bNumConfigurations = 1,
++};
++
++static struct usb_config_descriptor
++config_desc = {
++ .bLength = sizeof config_desc,
++ .bDescriptorType = USB_DT_CONFIG,
++
++ /* wTotalLength computed by usb_gadget_config_buf() */
++ .bNumInterfaces = 1,
++ .bConfigurationValue = CONFIG_VALUE,
++ .iConfiguration = STRING_CONFIG,
++ .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
++ .bMaxPower = CONFIG_USB_GADGET_VBUS_DRAW / 2,
++};
++
++static struct usb_otg_descriptor
++otg_desc = {
++ .bLength = sizeof(otg_desc),
++ .bDescriptorType = USB_DT_OTG,
++
++ .bmAttributes = USB_OTG_SRP,
++};
++
++
++/* one interface */
++static struct usb_interface_descriptor
++intf_desc = {
++ .bLength = sizeof intf_desc,
++ .bDescriptorType = USB_DT_INTERFACE,
++
++ .bNumEndpoints = 3, /* adjusted during sti_bind() */
++ .bInterfaceClass = USB_CLASS_STILL_IMAGE,
++ .bInterfaceSubClass = 0x01, /* Still Image Capture device */
++ .bInterfaceProtocol = 0x01, /* Bulk-only protocol */
++ .iInterface = STRING_INTERFACE,
++};
++
++
++/* two full-speed endpoint descriptors: bulk-in, bulk-out */
++
++static struct usb_endpoint_descriptor
++fs_bulk_in_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ .bEndpointAddress = USB_DIR_IN,
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++ /* wMaxPacketSize set by autoconfiguration */
++};
++
++static struct usb_endpoint_descriptor
++fs_bulk_out_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ .bEndpointAddress = USB_DIR_OUT,
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++ /* wMaxPacketSize set by autoconfiguration */
++};
++
++static struct usb_endpoint_descriptor
++fs_intr_in_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ .bEndpointAddress = USB_DIR_IN,
++ .bmAttributes = USB_ENDPOINT_XFER_INT,
++ .wMaxPacketSize = cpu_to_le16(2),
++ .bInterval = 32, /* frames -> 32 ms */
++};
++
++static const struct usb_descriptor_header *fs_function[] = {
++ (struct usb_descriptor_header *) &otg_desc,
++ (struct usb_descriptor_header *) &intf_desc,
++ (struct usb_descriptor_header *) &fs_bulk_in_desc,
++ (struct usb_descriptor_header *) &fs_bulk_out_desc,
++ (struct usb_descriptor_header *) &fs_intr_in_desc,
++ NULL,
++};
++
++#define FS_FUNCTION_PRE_EP_ENTRIES 2
++
++
++/*
++ * USB 2.0 devices need to expose both high speed and full speed
++ * descriptors, unless they only run at full speed.
++ *
++ * That means alternate endpoint descriptors (bigger packets)
++ * and a "device qualifier" ... plus more construction options
++ * for the config descriptor.
++ */
++static struct usb_qualifier_descriptor
++dev_qualifier = {
++ .bLength = sizeof dev_qualifier,
++ .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
++
++ .bcdUSB = cpu_to_le16(0x0200),
++ .bDeviceClass = USB_CLASS_PER_INTERFACE,
++
++ .bNumConfigurations = 1,
++};
++
++static struct usb_endpoint_descriptor
++hs_bulk_in_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ /* bEndpointAddress copied from fs_bulk_in_desc during sti_bind() */
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++ .wMaxPacketSize = cpu_to_le16(512),
++};
++
++static struct usb_endpoint_descriptor
++hs_bulk_out_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ /* bEndpointAddress copied from fs_bulk_out_desc during sti_bind() */
++ .bmAttributes = USB_ENDPOINT_XFER_BULK,
++ .wMaxPacketSize = cpu_to_le16(512),
++ .bInterval = 1, /* NAK every 1 uframe */
++};
++
++static struct usb_endpoint_descriptor
++hs_intr_in_desc = {
++ .bLength = USB_DT_ENDPOINT_SIZE,
++ .bDescriptorType = USB_DT_ENDPOINT,
++
++ /* bEndpointAddress copied from fs_intr_in_desc during sti_bind() */
++ .bmAttributes = USB_ENDPOINT_XFER_INT,
++ .wMaxPacketSize = cpu_to_le16(2),
++ .bInterval = 9, /* 2**(9-1) = 256 uframes -> 32 ms */
++};
++
++static const struct usb_descriptor_header *hs_function[] = {
++ (struct usb_descriptor_header *) &otg_desc,
++ (struct usb_descriptor_header *) &intf_desc,
++ (struct usb_descriptor_header *) &hs_bulk_in_desc,
++ (struct usb_descriptor_header *) &hs_bulk_out_desc,
++ (struct usb_descriptor_header *) &hs_intr_in_desc,
++ NULL,
++};
++
++#define HS_FUNCTION_PRE_EP_ENTRIES 2
++
++
++/* maxpacket and other transfer characteristics vary by speed. */
++static struct usb_endpoint_descriptor *
++ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *fs,
++ struct usb_endpoint_descriptor *hs)
++{
++ if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
++ return hs;
++
++ return fs;
++}
++
++static char manufacturer[64];
++static char serial[13];
++
++/* static strings, in UTF-8 (for simplicity we use only ASCII characters) */
++static struct usb_string strings[] = {
++ {STRING_MANUFACTURER, manufacturer},
++ {STRING_PRODUCT, longname},
++ {STRING_SERIAL, serial},
++ {STRING_CONFIG, "Self-powered"},
++ {STRING_INTERFACE, "Still Image"},
++ {}
++};
++
++static struct usb_gadget_strings stringtab = {
++ .language = 0x0409, /* en-us */
++ .strings = strings,
++};
++
++
++/*-------------------------------------------------------------------------*/
++
++/* protocol, driver and device data structures */
++
++/* big enough to hold the biggest descriptor */
++#define EP0_BUFSIZE 256
++
++#define DELAYED_STATUS (EP0_BUFSIZE + 999)
++
++/* number of buffers we will use. 2 is enough for double-buffering */
++#define NUM_BUFFERS 2
++
++/* PIMA 15740, operation and response datasets have at most 5 parameters */
++#define PARAM_NUM_MAX 5
++
++/* PIMA 15740 generic container head length */
++#define PIMA15740_CONTAINER_LEN 12
++
++/* storage id, description */
++#define STORAGE_ID 0x00010001
++#define STORAGE_DESCRIPTION "Built-in Storage"
++
++/* Still Image class-specific requests */
++#define STI_CANCEL_REQUEST 0x64
++#define STI_GET_EXTENDED_EVENT_DATA 0x65
++#define STI_DEVICE_RESET_REQUEST 0x66
++#define STI_GET_DEVICE_STATUS 0x67
++
++#define STI_CANCEL_REQUEST_LENGTH 0x0006
++#define STI_CANCEL_REQUEST_CODE 0x4001
++
++/* supported PIMA 15740 operations */
++#define PIMA15740_OP_GET_DEVICE_INFO 0x1001
++#define PIMA15740_OP_OPEN_SESSION 0x1002
++#define PIMA15740_OP_CLOSE_SESSION 0x1003
++#define PIMA15740_OP_GET_STORAGE_IDS 0x1004
++#define PIMA15740_OP_GET_STORAGE_INFO 0x1005
++#define PIMA15740_OP_GET_NUM_OBJECTS 0x1006
++#define PIMA15740_OP_GET_OBJECT_HANDLES 0x1007
++#define PIMA15740_OP_GET_OBJECT_INFO 0x1008
++#define PIMA15740_OP_GET_OBJECT 0x1009
++#define PIMA15740_OP_DELETE_OBJECT 0x100b
++#define PIMA15740_OP_SEND_OBJECT_INFO 0x100c
++#define PIMA15740_OP_SEND_OBJECT 0x100d
++#define PIMA15740_OP_MOVE_OBJECT 0x1019
++#define PIMA15740_OP_COPY_OBJECT 0x101a
++
++/* PIMA 15740 responses definition */
++#define PIMA15740_RES_UNDEFINED 0x2000
++#define PIMA15740_RES_OK 0x2001
++#define PIMA15740_RES_GENERAL_ERROR 0x2002
++#define PIMA15740_RES_SESSION_NOT_OPEN 0x2003
++#define PIMA15740_RES_INVALID_TRANS_ID 0x2004
++#define PIMA15740_RES_OPERATION_NOT_SUPPORTED 0x2005
++#define PIMA15740_RES_PARAMETER_NOT_SUPPORTED 0x2006
++#define PIMA15740_RES_INCOMPLETE_TRANSFER 0x2007
++#define PIMA15740_RES_INVALID_STORAGE_ID 0x2008
++#define PIMA15740_RES_INVALID_OBJECT_HANDLE 0x2009
++#define PIMA15740_RES_DEVICE_PROP_NOT_SUPPORTED 0x200a
++#define PIMA15740_RES_INVALID_OBJECT_FORMAT 0x200b
++#define PIMA15740_RES_STORE_FULL 0x200c
++#define PIMA15740_RES_OBJECT_WRITE_PROTECTED 0x200d
++#define PIMA15740_RES_STORE_READ_ONLY 0x200e
++#define PIMA15740_RES_ACCESS_DENIED 0x200f
++#define PIMA15740_RES_NO_THUMBNAIL_PRESENT 0x2010
++#define PIMA15740_RES_SELF_TEST_FAILED 0x2011
++#define PIMA15740_RES_PARTIAL_DELETION 0x2012
++#define PIMA15740_RES_STORE_NOT_AVAILABLE 0x2013
++#define PIMA15740_RES_SPEC_BY_FORMAT_UNSUP 0x2014
++#define PIMA15740_RES_NO_VALID_OBJECT_INFO 0x2015
++#define PIMA15740_RES_INVALID_CODE_FORMAT 0x2016
++#define PIMA15740_RES_UNKNOWN_VENDOR_CODE 0x2017
++#define PIMA15740_RES_CAPTURE_ALREADY_TERM 0x2018
++#define PIMA15740_RES_DEVICE_BUSY 0x2019
++#define PIMA15740_RES_INVALID_PARENT_OBJECT 0x201a
++#define PIMA15740_RES_INVALID_DEV_PROP_FORMAT 0x201b
++#define PIMA15740_RES_INVALID_DEV_PROP_VALUE 0x201c
++#define PIMA15740_RES_INVALID_PARAMETER 0x201d
++#define PIMA15740_RES_SESSION_ALREADY_OPEN 0x201e
++#define PIMA15740_RES_TRANSACTION_CANCELLED 0x201f
++#define PIMA15740_RES_SPEC_OF_DESTINATION_UNSUP 0x2020
++
++/* PIMA 15740 functional mode */
++#define PIMA15740_STANDARD_MODE 0x0000
++#define PIMA15740_SLEEP_STATE_MODE 0x0001
++
++/* PIMA 15740 storage type */
++#define PIMA15740_STOR_UNDEFINED 0x0000
++#define PIMA15740_STOR_FIXED_ROM 0x0001
++#define PIMA15740_STOR_REMOVABLE_ROM 0x0002
++#define PIMA15740_STOR_FIXED_RAM 0x0003
++#define PIMA15740_STOR_REMOVABLE_RAM 0x0004
++
++/* PIMA 15740 filesystem type */
++#define PIMA15740_FS_UNDEFINED 0x0000
++#define PIMA15740_FS_GENERIC_FLAT 0x0001
++#define PIMA15740_FS_HIERARCHICAL 0x0002
++#define PIMA15740_FS_DCF 0x0003
++
++/* PIMA 15740 access capability */
++#define PIMA15740_ACCESS_CAP_RW 0x0000
++#define PIMA15740_ACCESS_CAP_RO_WO_DELITION 0x0001
++#define PIMA15740_ACCESS_CAP_RO_W_DELITION 0x0002
++
++/* PIMA 15740 object format codes */
++#define PIMA15740_FMT_A_UNDEFINED 0x3000
++#define PIMA15740_FMT_A_ASSOCIATION 0x3001
++#define PIMA15740_FMT_I_UNDEFINED 0x3800
++#define PIMA15740_FMT_I_EXIF_JPEG 0x3801
++#define PIMA15740_FMT_I_TIFF_EP 0x3802
++#define PIMA15740_FMT_I_FLASHPIX 0x3803
++#define PIMA15740_FMT_I_BMP 0x3804
++#define PIMA15740_FMT_I_CIFF 0x3805
++#define PIMA15740_FMT_I_GIF 0x3807
++#define PIMA15740_FMT_I_JFIF 0x3808
++#define PIMA15740_FMT_I_PCD 0x3809
++#define PIMA15740_FMT_I_PICT 0x380a
++#define PIMA15740_FMT_I_PNG 0x380b
++#define PIMA15740_FMT_I_TIFF 0x380d
++#define PIMA15740_FMT_I_TIFF_IT 0x380e
++#define PIMA15740_FMT_I_JP2 0x380f
++#define PIMA15740_FMT_I_JPX 0x3810
++
++/* PIMA 15740 object protection status */
++#define PIMA15740_OBJECT_NO_PROTECTION 0x0000
++#define PIMA15740_OBJECT_READ_ONLY 0x0001
++
++/* PIMA 15740 object association type */
++#define PIMA15740_AS_UNDEFINED 0x0000
++#define PIMA15740_AS_GENERIC_FOLDER 0x0001
++
++
++static const char storage_desc[] = STORAGE_DESCRIPTION;
++static const char device_version[] = DRIVER_VERSION;
++
++
++/*-------------------------------------------------------------------------*/
++
++/* PIMA 15740 data structure */
++
++enum pima15740_container_type {
++ TYPE_UNDEFINED = 0,
++ TYPE_COMMAND_BLOCK = 1,
++ TYPE_DATA_BLOCK = 2,
++ TYPE_RESPONSE_BLOCK = 3,
++ TYPE_EVENT_BLOCK = 4,
++};
++
++/* PIMA15740 generic container structure, little endian */
++struct pima15740_container {
++ __le32 container_len;
++ __le16 container_type;
++ __le16 code;
++ __le32 transaction_id;
++} __attribute__ ((packed));
++
++/* data stage of Get Extended Event Data */
++struct sti_ext_event {
++ u16 event_code;
++ u32 transaction_id;
++ u16 param_num;
++} __attribute__ ((packed));
++
++/* data stage of Get Device Status Data */
++struct sti_dev_status {
++ u16 wlength;
++ u16 code;
++} __attribute__ ((packed));
++
++
++/* DeviceInfo Dataset */
++struct pima15740_device_info {
++ u16 standard_version;
++ u32 vendor_extension_id;
++ u16 vendor_extension_version;
++ u8 vendor_extension_desc_len;
++ u8 vendor_extension_desc[0];
++ u16 functional_mode;
++ u32 operations_supported_count;
++ u16 operations_supported[14];
++ u32 events_supported_count;
++ u16 events_supported[0];
++ u32 device_properties_count;
++ u16 device_properties_supported[0];
++ u32 capture_formats_count;
++ u16 capture_formats[0];
++ u32 image_formats_count;
++ u16 image_formats[10];
++ u8 manufacturer_len;
++ u8 manufacturer[sizeof(manufacturer) * 2];
++ u8 model_len;
++ u8 model[sizeof(longname) * 2];
++ u8 device_version_len;
++ u8 device_version[sizeof(device_version) * 2];
++ u8 serial_number_len;
++ u8 serial_number[sizeof(serial) * 2];
++} __attribute__ ((packed));
++
++static struct pima15740_device_info sti_device_info = {
++ .standard_version = 100,
++ .vendor_extension_id = 0,
++ .vendor_extension_version = 0,
++ .vendor_extension_desc_len = 0,
++ .functional_mode = PIMA15740_STANDARD_MODE,
++ .operations_supported_count = 14,
++ .operations_supported = {
++ cpu_to_le16(PIMA15740_OP_GET_DEVICE_INFO),
++ cpu_to_le16(PIMA15740_OP_OPEN_SESSION),
++ cpu_to_le16(PIMA15740_OP_CLOSE_SESSION),
++ cpu_to_le16(PIMA15740_OP_GET_STORAGE_IDS),
++ cpu_to_le16(PIMA15740_OP_GET_STORAGE_INFO),
++ cpu_to_le16(PIMA15740_OP_GET_NUM_OBJECTS),
++ cpu_to_le16(PIMA15740_OP_GET_OBJECT_HANDLES),
++ cpu_to_le16(PIMA15740_OP_GET_OBJECT_INFO),
++ cpu_to_le16(PIMA15740_OP_GET_OBJECT),
++ cpu_to_le16(PIMA15740_OP_DELETE_OBJECT),
++ cpu_to_le16(PIMA15740_OP_SEND_OBJECT_INFO),
++ cpu_to_le16(PIMA15740_OP_SEND_OBJECT),
++ cpu_to_le16(PIMA15740_OP_COPY_OBJECT),
++ cpu_to_le16(PIMA15740_OP_MOVE_OBJECT)
++ },
++ .events_supported_count = 0,
++ .device_properties_count = 0,
++ .capture_formats_count = 0,
++ .image_formats_count = 10,
++ .image_formats = {
++ cpu_to_le16(PIMA15740_FMT_I_EXIF_JPEG),
++ cpu_to_le16(PIMA15740_FMT_I_JFIF),
++ cpu_to_le16(PIMA15740_FMT_I_PNG),
++ cpu_to_le16(PIMA15740_FMT_I_TIFF),
++ cpu_to_le16(PIMA15740_FMT_I_TIFF_EP),
++ cpu_to_le16(PIMA15740_FMT_I_TIFF_IT),
++ cpu_to_le16(PIMA15740_FMT_I_BMP),
++ cpu_to_le16(PIMA15740_FMT_I_GIF),
++ cpu_to_le16(PIMA15740_FMT_I_UNDEFINED),
++ cpu_to_le16(PIMA15740_FMT_A_UNDEFINED)
++ },
++ /* others will be filled in sti_bind() */
++};
++
++
++/* StorageInfo Dataset */
++struct pima15740_storage_info {
++ u16 storage_type;
++ u16 filesystem_type;
++ u16 access_capability;
++ u64 max_capacity;
++ u64 free_space_in_bytes;
++ u32 free_space_in_images;
++ u8 storage_desc_len;
++ u8 storage_desc[sizeof(storage_desc) * 2];
++ u8 volume_label_len;
++ u8 volume_label[0];
++} __attribute__ ((packed));
++
++static struct pima15740_storage_info sti_storage_info = {
++ .storage_type = cpu_to_le16(PIMA15740_STOR_FIXED_RAM),
++ .filesystem_type = cpu_to_le16(PIMA15740_FS_HIERARCHICAL),
++ .access_capability = cpu_to_le16(PIMA15740_ACCESS_CAP_RW),
++ .storage_desc_len = sizeof(storage_desc),
++ .volume_label_len = 0,
++ /* others will be filled later */
++};
++
++
++/* ObjectInfo Dataset */
++struct pima15740_object_info {
++ u32 storage_id;
++ u16 object_format;
++ u16 protection_status;
++ u32 object_compressed_size;
++ u16 thumb_format;
++ u32 thumb_compressed_size;
++ u32 thumb_pix_width;
++ u32 thumb_pix_height;
++ u32 image_pix_width;
++ u32 image_pix_height;
++ u32 image_bit_depth;
++ u32 parent_object;
++ u16 association_type;
++ u32 association_desc;
++ u32 sequence_number;
++ /* filename, capture date, modification date, keywords */
++ u8 obj_strings[]; /* size will be fixed later */
++} __attribute__ ((packed));
++
++/* object list element with object info data */
++struct sti_object {
++ struct list_head list;
++ u32 obj_handle;
++ u32 parent_object;
++ u32 storage_id;
++ int is_dir;
++ int send_valid;
++ size_t obj_info_size;
++ char filename[NAME_MAX];
++ char full_path[PATH_MAX];
++ struct pima15740_object_info obj_info;
++};
++
++
++/*-------------------------------------------------------------------------*/
++
++/* device data structure */
++
++enum sti_buffer_state {
++ BUF_STATE_EMPTY = 0,
++ BUF_STATE_FULL,
++ BUF_STATE_BUSY
++};
++
++struct sti_buffhd {
++ void *buf;
++ enum sti_buffer_state state;
++ struct sti_buffhd *next;
++ unsigned int bulk_out_intended_length;
++ struct usb_request *inreq;
++ int inreq_busy;
++ struct usb_request *outreq;
++ int outreq_busy;
++};
++
++enum sti_state {
++ STI_STATE_COMMAND_PHASE = -10, /* this one isn't used anywhere */
++ STI_STATE_DATA_PHASE,
++ STI_STATE_STATUS_PHASE,
++
++ STI_STATE_IDLE = 0,
++ STI_STATE_ABORT_BULK_OUT,
++ STI_STATE_CANCEL,
++ STI_STATE_RESET,
++ STI_STATE_INTERFACE_CHANGE,
++ STI_STATE_CONFIG_CHANGE,
++ STI_STATE_DISCONNECT,
++ STI_STATE_EXIT,
++ STI_STATE_TERMINATED
++};
++
++enum data_direction {
++ DATA_DIR_UNKNOWN = 0,
++ DATA_DIR_FROM_HOST,
++ DATA_DIR_TO_HOST,
++ DATA_DIR_NONE
++};
++
++struct sti_dev {
++ /* lock protects: device, req, endpoints states */
++ spinlock_t lock;
++
++ /* filesem protects: backing folder in use */
++ struct rw_semaphore filesem;
++
++ struct usb_gadget *gadget;
++
++ /* reference counting: wait until released */
++ struct kref ref;
++
++ /* handy copy of gadget->ep0 */
++ struct usb_ep *ep0;
++
++ /* for control responses */
++ struct usb_request *ep0req;
++ unsigned int ep0_req_tag;
++ const char *ep0req_name;
++
++ /* for interrupt responses */
++ struct usb_request *intreq;
++ int intreq_busy;
++ struct sti_buffhd *intr_buffhd;
++
++ /* for exception handling */
++ enum sti_state state;
++ unsigned int exception_req_tag;
++
++ unsigned int bulk_out_maxpacket;
++ u8 config, new_config;
++
++ unsigned int running:1;
++ unsigned int bulk_in_enabled:1;
++ unsigned int bulk_out_enabled:1;
++ unsigned int intr_in_enabled:1;
++ unsigned int registered:1;
++ unsigned int session_open:1;
++
++ unsigned long atomic_bitflags;
++#define REGISTERED 0
++#define CLEAR_BULK_HALTS 1
++#define SUSPENDED 2
++
++ struct usb_ep *bulk_in;
++ struct usb_ep *bulk_out;
++ struct usb_ep *intr_in;
++
++ struct sti_buffhd *next_buffhd_to_fill;
++ struct sti_buffhd *next_buffhd_to_drain;
++ struct sti_buffhd buffhds[NUM_BUFFERS];
++
++ int thread_wakeup_needed;
++ struct completion thread_notifier;
++ struct task_struct *thread_task;
++
++ __le32 container_len;
++ __le16 container_type;
++ __le16 code;
++ __le32 transaction_id;
++
++ __le16 response_code;
++
++ u32 ops_params[PARAM_NUM_MAX];
++ u32 session_id;
++ u32 storage_id;
++ u32 object_num;
++ u32 sub_object_num;
++
++ char root_path[PATH_MAX];
++ struct file *root_filp;
++ struct list_head obj_list;
++ struct list_head tmp_obj_list;
++
++ struct sti_ext_event ext_event_data;
++ struct sti_dev_status status_data;
++
++ struct device dev;
++};
++
++
++/*-------------------------------------------------------------------------*/
++
++#define backing_folder_is_open(sti) ((sti)->root_filp != NULL)
++
++
++typedef void (*sti_routine_t)(struct sti_dev *);
++
++static int exception_in_progress(struct sti_dev *sti)
++{
++ return (sti->state > STI_STATE_IDLE);
++}
++
++/* make bulk-out requests be divisible by the maxpacket size */
++static void set_bulk_out_req_length(struct sti_dev *sti,
++ struct sti_buffhd *bh, unsigned int length)
++{
++ unsigned int rem;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ bh->bulk_out_intended_length = length;
++ rem = length % sti->bulk_out_maxpacket;
++ if (rem > 0)
++ length += sti->bulk_out_maxpacket - rem;
++ bh->outreq->length = length;
++
++ VDBG(sti, "<--- %s()\n", __func__);
++}
++
++
++/* global variables */
++static struct sti_dev *the_sti;
++static struct usb_gadget_driver sti_driver;
++
++static void close_backing_folder(struct sti_dev *sti);
++
++
++/*-------------------------------------------------------------------------*/
++
++#ifdef VERBOSE_DEBUG
++
++static void dump_msg(struct sti_dev *sti, const char *label,
++ const u8 *buf, unsigned int length)
++{
++ if (length < 512) {
++ DBG(sti, "%s, length %u:\n", label, length);
++ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
++ 16, 1, buf, length, 0);
++ }
++}
++
++static void dump_cb(struct sti_dev *sti)
++{
++ print_hex_dump(KERN_DEBUG, "PIMA15740 Command Block: ",
++ DUMP_PREFIX_NONE, 16, 1, &sti->container_len,
++ PIMA15740_CONTAINER_LEN, 0);
++}
++
++static void dump_device_info(struct sti_dev *sti)
++{
++ int i;
++
++ VDBG(sti, "DeviceInfo Dataset:\n");
++ VDBG(sti, "\tstandard_version: %u\n",
++ sti_device_info.standard_version);
++ VDBG(sti, "\tvendor_extension_id: %u\n",
++ sti_device_info.vendor_extension_id);
++ VDBG(sti, "\tvendor_extension_version: %u\n",
++ sti_device_info.vendor_extension_version);
++ VDBG(sti, "\tvendor_extension_desc_len: %u\n",
++ sti_device_info.vendor_extension_desc_len);
++ VDBG(sti, "\tfunctional_mode: 0x%04x\n",
++ sti_device_info.functional_mode);
++ VDBG(sti, "\toperations_supported_count: %u\n",
++ sti_device_info.operations_supported_count);
++ VDBG(sti, "\toperations_supported:\n");
++ for (i = 0; i < sti_device_info.operations_supported_count; i++)
++ VDBG(sti, "\t\t0x%04x\n",
++ sti_device_info.operations_supported[i]);
++ VDBG(sti, "\tevents_supported_count: %u\n",
++ sti_device_info.events_supported_count);
++ VDBG(sti, "\tdevice_properties_count: %u\n",
++ sti_device_info.device_properties_count);
++ VDBG(sti, "\tcapture_formats_count: %u\n",
++ sti_device_info.capture_formats_count);
++ VDBG(sti, "\timage_formats_count: %u\n",
++ sti_device_info.image_formats_count);
++ VDBG(sti, "\tmanufacturer_len: %u\n",
++ sti_device_info.manufacturer_len);
++ VDBG(sti, "\tmanufacturer: %s\n", manufacturer);
++ VDBG(sti, "\tmodel_len: %u\n",
++ sti_device_info.model_len);
++ VDBG(sti, "\tmodel: %s\n", longname);
++ VDBG(sti, "\tdevice_version_len: %u\n",
++ sti_device_info.device_version_len);
++ VDBG(sti, "\tdevice_version: %s\n", device_version);
++ VDBG(sti, "\tserial_number_len: %u\n",
++ sti_device_info.serial_number_len);
++ VDBG(sti, "\tserial_number: %s\n", serial);
++}
++
++static void dump_storage_info(struct sti_dev *sti)
++{
++ VDBG(sti, "StorageInfo Dataset:\n");
++ VDBG(sti, "\tstorage_type: 0x%04x\n", sti_storage_info.storage_type);
++ VDBG(sti, "\tfilesystem_type: 0x%04x\n",
++ sti_storage_info.filesystem_type);
++ VDBG(sti, "\taccess_capability: 0x%04x\n",
++ sti_storage_info.access_capability);
++ VDBG(sti, "\tmax_capacity: %llu\n", sti_storage_info.max_capacity);
++ VDBG(sti, "\tfree_space_in_bytes: %llu\n",
++ sti_storage_info.free_space_in_bytes);
++ VDBG(sti, "\tfree_space_in_images: %u\n",
++ sti_storage_info.free_space_in_images);
++ VDBG(sti, "\tstorage_desc_len: %u\n",
++ sti_storage_info.storage_desc_len);
++ VDBG(sti, "\tstorage_desc: %s\n", storage_desc);
++ VDBG(sti, "\tvolume_label_len: %u\n",
++ sti_storage_info.volume_label_len);
++}
++
++static void dump_object_info(struct sti_dev *sti, struct sti_object *obj)
++{
++ u8 filename_len;
++
++ VDBG(sti, "ObjectInfo Dataset:\n");
++ VDBG(sti, "\tstorage_id: 0x%08x\n", obj->obj_info.storage_id);
++ VDBG(sti, "\tobject_format: 0x%04x\n", obj->obj_info.object_format);
++ VDBG(sti, "\tprotection_status: 0x%04x\n",
++ obj->obj_info.protection_status);
++ VDBG(sti, "\tobject_compressed_size: %u\n",
++ obj->obj_info.object_compressed_size);
++ VDBG(sti, "\tthumb_format: %u\n", obj->obj_info.thumb_format);
++ VDBG(sti, "\tthumb_compressed_size: %u\n",
++ obj->obj_info.thumb_compressed_size);
++ VDBG(sti, "\tthumb_pix_width: %u\n",
++ obj->obj_info.thumb_pix_width);
++ VDBG(sti, "\tthumb_pix_height: %u\n",
++ obj->obj_info.thumb_pix_height);
++ VDBG(sti, "\timage_pix_width: %u\n",
++ obj->obj_info.image_pix_width);
++ VDBG(sti, "\timage_pix_height: %u\n",
++ obj->obj_info.image_pix_height);
++ VDBG(sti, "\timage_bit_depth: %u\n",
++ obj->obj_info.image_bit_depth);
++ VDBG(sti, "\tparent_object: 0x%08x\n",
++ obj->obj_info.parent_object);
++ VDBG(sti, "\tassociation_type: 0x%04x\n",
++ obj->obj_info.association_type);
++ VDBG(sti, "\tassociation_desc: 0x%08x\n",
++ obj->obj_info.association_desc);
++ VDBG(sti, "\tsequence_number: 0x%08x\n",
++ obj->obj_info.sequence_number);
++ VDBG(sti, "\tfilename_len: %u\n", obj->obj_info.obj_strings[0]);
++ filename_len = obj->obj_info.obj_strings[0];
++ VDBG(sti, "\tfilename: %s\n", obj->filename);
++ VDBG(sti, "\tcapture_date_len: %u\n",
++ obj->obj_info.obj_strings[filename_len * 2 + 1]);
++ VDBG(sti, "\tmodification_date_len: %u\n",
++ obj->obj_info.obj_strings[filename_len * 2 + 2]);
++ VDBG(sti, "\tkeywords_len: %u\n",
++ obj->obj_info.obj_strings[filename_len * 2 + 3]);
++}
++
++#else
++
++static void dump_msg(struct sti_dev *sti, const char *label,
++ const u8 *buf, unsigned int length)
++{}
++
++static void dump_cb(struct sti_dev *sti)
++{}
++
++static void dump_device_info(struct sti_dev *sti)
++{}
++
++static void dump_storage_info(struct sti_dev *sti)
++{}
++
++static void dump_object_info(struct sti_dev *sti, struct sti_object *obj)
++{}
++
++#endif /* VERBOSE_DEBUG */
++
++
++/*-------------------------------------------------------------------------*/
++
++
++
++/*
++ * Config descriptors must agree with the code that sets configurations
++ * and with code managing interfaces and their altsettings. They must
++ * also handle different speeds and other-speed requests.
++ */
++static int populate_config_buf(struct usb_gadget *gadget,
++ u8 *buf, u8 type, unsigned index)
++{
++ enum usb_device_speed speed = gadget->speed;
++ int len;
++ const struct usb_descriptor_header **function;
++
++ if (index > 0)
++ return -EINVAL;
++
++ if (gadget_is_dualspeed(gadget) && type == USB_DT_OTHER_SPEED_CONFIG)
++ speed = (USB_SPEED_FULL + USB_SPEED_HIGH) - speed;
++ if (gadget_is_dualspeed(gadget) && speed == USB_SPEED_HIGH)
++ function = hs_function;
++ else
++ function = fs_function;
++
++ /* for now, don't advertise srp-only devices */
++ if (!gadget_is_otg(gadget))
++ function++;
++
++ len = usb_gadget_config_buf(&config_desc, buf, EP0_BUFSIZE, function);
++ ((struct usb_config_descriptor *) buf)->bDescriptorType = type;
++
++ return len;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++/* these routines may be called in process context or in_irq */
++
++/* caller must hold sti->lock */
++static void wakeup_thread(struct sti_dev *sti)
++{
++ VDBG(sti, "---> %s()\n", __func__);
++
++ /* tell the main thread that something has happened */
++ sti->thread_wakeup_needed = 1;
++ if (sti->thread_task)
++ wake_up_process(sti->thread_task);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++}
++
++
++static void raise_exception(struct sti_dev *sti, enum sti_state new_state)
++{
++ unsigned long flags;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ /*
++ * Do nothing if a higher-priority exception is already in progress.
++ * If a lower-or-equal priority exception is in progress, preempt it
++ * and notify the main thread by sending it a signal.
++ */
++ spin_lock_irqsave(&sti->lock, flags);
++ if (sti->state <= new_state) {
++ sti->exception_req_tag = sti->ep0_req_tag;
++ sti->state = new_state;
++ if (sti->thread_task)
++ send_sig_info(SIGUSR1, SEND_SIG_FORCED,
++ sti->thread_task);
++ }
++ spin_unlock_irqrestore(&sti->lock, flags);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++/*
++ * The disconnect callback and ep0 routines. These always run in_irq,
++ * except that ep0_queue() is called in the main thread to acknowledge
++ * completion of various requests: set config, set interface, and
++ * Bulk-only device reset.
++ */
++
++static void sti_disconnect(struct usb_gadget *gadget)
++{
++ struct sti_dev *sti = get_gadget_data(gadget);
++ VDBG(sti, "---> %s()\n", __func__);
++
++ DBG(sti, "disconnect or port reset\n");
++ raise_exception(sti, STI_STATE_DISCONNECT);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++}
++
++static int ep0_queue(struct sti_dev *sti)
++{
++ int rc;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ rc = usb_ep_queue(sti->ep0, sti->ep0req, GFP_ATOMIC);
++ if (rc != 0 && rc != -ESHUTDOWN) {
++ /* we can't do much more than wait for a reset */
++ WARNING(sti, "error in submission: %s --> %d\n",
++ sti->ep0->name, rc);
++ }
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++static void ep0_complete(struct usb_ep *ep, struct usb_request *req)
++{
++ struct sti_dev *sti = ep->driver_data;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ if (req->actual > 0)
++ dump_msg(sti, sti->ep0req_name, req->buf, req->actual);
++
++ if (req->status || req->actual != req->length)
++ VDBG(sti, "%s --> %d, %u/%u\n", __func__,
++ req->status, req->actual, req->length);
++
++ /* request was cancelled */
++ if (req->status == -ECONNRESET)
++ usb_ep_fifo_flush(ep);
++
++ if (req->status == 0 && req->context)
++ ((sti_routine_t) (req->context))(sti);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++/* endpoint completion handlers, always run in_irq */
++
++static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
++{
++ struct sti_dev *sti = ep->driver_data;
++ struct sti_buffhd *bh = req->context;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ if (req->status || req->actual != req->length)
++ VDBG(sti, "%s --> %d, %u/%u\n", __func__,
++ req->status, req->actual, req->length);
++ /* request was cancelled */
++ if (req->status == -ECONNRESET)
++ usb_ep_fifo_flush(ep);
++
++ /* hold the lock while we update the request and buffer states */
++ smp_wmb();
++ spin_lock(&sti->lock);
++ bh->inreq_busy = 0;
++ bh->state = BUF_STATE_EMPTY;
++ wakeup_thread(sti);
++ spin_unlock(&sti->lock);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++}
++
++static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
++{
++ struct sti_dev *sti = ep->driver_data;
++ struct sti_buffhd *bh = req->context;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ dump_msg(sti, "bulk-out", req->buf, req->actual);
++ if (req->status || req->actual != bh->bulk_out_intended_length)
++ VDBG(sti, "%s --> %d, %u/%u\n", __func__,
++ req->status, req->actual,
++ bh->bulk_out_intended_length);
++
++ /* request was cancelled */
++ if (req->status == -ECONNRESET)
++ usb_ep_fifo_flush(ep);
++
++ /* hold the lock while we update the request and buffer states */
++ smp_wmb();
++ spin_lock(&sti->lock);
++ bh->outreq_busy = 0;
++ bh->state = BUF_STATE_FULL;
++ wakeup_thread(sti);
++ spin_unlock(&sti->lock);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static int sti_set_halt(struct sti_dev *sti, struct usb_ep *ep)
++{
++ const char *name;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ if (ep == sti->bulk_in)
++ name = "bulk-in";
++ else if (ep == sti->bulk_out)
++ name = "bulk-out";
++ else
++ name = ep->name;
++
++ DBG(sti, "%s set halt\n", name);
++ VDBG(sti, "<--- %s()\n", __func__);
++
++ return usb_ep_set_halt(ep);
++}
++
++
++static void received_cancel_request(struct sti_dev *sti)
++{
++ struct usb_request *req = sti->ep0req;
++ u16 cancel_code;
++ u32 trans_id;
++ int rc;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ /* error in command transfer */
++ if (req->status || req->length != req->actual) {
++ /* wait for reset */
++ sti_set_halt(sti, sti->ep0);
++ return;
++ }
++
++ VDBG(sti, "receive cancel request\n");
++
++ if (!req->buf)
++ return;
++
++ cancel_code = get_unaligned_le16(req->buf);
++ if (cancel_code != cpu_to_le16(STI_CANCEL_REQUEST_CODE)) {
++ VDBG(sti, "invalid cancel_code: 0x%04x\n", cancel_code);
++ goto out;
++ }
++
++ trans_id = get_unaligned_le32(req->buf + 2);
++ if (trans_id != sti->transaction_id) {
++ VDBG(sti, "invalid trans_id:0x%04x\n", trans_id);
++ goto out;
++ }
++
++ /* stall bulk endpoints */
++ sti_set_halt(sti, sti->bulk_out);
++
++ rc = sti_set_halt(sti, sti->bulk_in);
++ if (rc == -EAGAIN)
++ VDBG(sti, "delayed bulk-in endpoint halt\n");
++
++ sti->response_code = PIMA15740_RES_DEVICE_BUSY;
++out:
++ raise_exception(sti, STI_STATE_CANCEL);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++}
++
++
++/* ep0 class-specific request handlers, always run in_irq */
++static int class_setup_req(struct sti_dev *sti,
++ const struct usb_ctrlrequest *ctrl)
++{
++ struct usb_request *req = sti->ep0req;
++ int value = -EOPNOTSUPP;
++ u16 w_index = le16_to_cpu(ctrl->wIndex);
++ u16 w_value = le16_to_cpu(ctrl->wValue);
++ u16 w_length = le16_to_cpu(ctrl->wLength);
++ VDBG(sti, "---> %s()\n", __func__);
++
++ if (!sti->config)
++ return value;
++
++ /* handle class-specific requests */
++ switch (ctrl->bRequest) {
++
++ case STI_CANCEL_REQUEST:
++ if (ctrl->bRequestType != (USB_DIR_OUT |
++ USB_TYPE_CLASS | USB_RECIP_INTERFACE))
++ break;
++ if (w_index != 0 || w_value != 0 || w_length != 6) {
++ value = -EDOM;
++ break;
++ }
++
++ DBG(sti, "cancel request\n");
++
++ value = w_length;
++ sti->ep0req->context = received_cancel_request;
++ break;
++
++ case STI_GET_EXTENDED_EVENT_DATA:
++ /* asynchronous events by interrupt endpoint */
++ if (ctrl->bRequestType != (USB_DIR_IN |
++ USB_TYPE_CLASS | USB_RECIP_INTERFACE))
++ break;
++ if (w_index != 0 || w_value != 0) {
++ value = -EDOM;
++ break;
++ }
++
++ DBG(sti, "get extended event data\n");
++
++ sti->ext_event_data.event_code = PIMA15740_RES_OK;
++ sti->ext_event_data.transaction_id = sti->transaction_id;
++ sti->ext_event_data.param_num = 0;
++
++ value = min_t(unsigned, w_length,
++ sizeof(struct sti_ext_event));
++ memcpy(req->buf, &sti->ext_event_data, value);
++ break;
++
++ case STI_DEVICE_RESET_REQUEST:
++ if (ctrl->bRequestType != (USB_DIR_OUT |
++ USB_TYPE_CLASS | USB_RECIP_INTERFACE))
++ break;
++ if (w_index != 0 || w_value != 0 || w_length != 0) {
++ value = -EDOM;
++ break;
++ }
++
++ /* Raise an exception to stop the current operation
++ * and reinitialize our state. */
++ DBG(sti, "device reset request\n");
++
++ sti->response_code = PIMA15740_RES_OK;
++ sti->session_open = 1;
++
++ raise_exception(sti, STI_STATE_RESET);
++ value = DELAYED_STATUS;
++ break;
++
++ case STI_GET_DEVICE_STATUS:
++ if (ctrl->bRequestType != (USB_DIR_IN |
++ USB_TYPE_CLASS | USB_RECIP_INTERFACE))
++ break;
++ if (w_index != 0 || w_value != 0) {
++ value = -EDOM;
++ break;
++ }
++
++ DBG(sti, "get device status\n");
++ sti->status_data.wlength = 4;
++ sti->status_data.code = sti->response_code;
++
++ value = min_t(unsigned, w_length,
++ sizeof(struct sti_dev_status));
++ memcpy(req->buf, &sti->status_data, value);
++ break;
++
++ default:
++ DBG(sti, "unknown class-specific control req "
++ "%02x.%02x v%04x i%04x l%u\n",
++ ctrl->bRequestType, ctrl->bRequest,
++ le16_to_cpu(ctrl->wValue), w_index, w_length);
++ break;
++ }
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return value;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++/* ep0 standard request handlers, always run in_irq */
++
++static int standard_setup_req(struct sti_dev *sti,
++ const struct usb_ctrlrequest *ctrl)
++{
++ struct usb_request *req = sti->ep0req;
++ int value = -EOPNOTSUPP;
++ u16 w_index = le16_to_cpu(ctrl->wIndex);
++ u16 w_value = le16_to_cpu(ctrl->wValue);
++ VDBG(sti, "---> %s()\n", __func__);
++
++ /* usually this just stores reply data in the pre-allocated ep0 buffer,
++ * but config change events will also reconfigure hardware */
++ switch (ctrl->bRequest) {
++
++ case USB_REQ_GET_DESCRIPTOR:
++ if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
++ USB_RECIP_DEVICE))
++ break;
++ switch (w_value >> 8) {
++
++ case USB_DT_DEVICE:
++ VDBG(sti, "get device descriptor\n");
++ value = sizeof device_desc;
++ memcpy(req->buf, &device_desc, value);
++ break;
++ case USB_DT_DEVICE_QUALIFIER:
++ VDBG(sti, "get device qualifier\n");
++ if (!gadget_is_dualspeed(sti->gadget))
++ break;
++ value = sizeof dev_qualifier;
++ memcpy(req->buf, &dev_qualifier, value);
++ break;
++
++ case USB_DT_OTHER_SPEED_CONFIG:
++ VDBG(sti, "get other-speed config descriptor\n");
++ if (!gadget_is_dualspeed(sti->gadget))
++ break;
++ goto get_config;
++ case USB_DT_CONFIG:
++ VDBG(sti, "get configuration descriptor\n");
++get_config:
++ value = populate_config_buf(sti->gadget,
++ req->buf,
++ w_value >> 8,
++ w_value & 0xff);
++ break;
++
++ case USB_DT_STRING:
++ VDBG(sti, "get string descriptor\n");
++
++ /* wIndex == language code */
++ value = usb_gadget_get_string(&stringtab,
++ w_value & 0xff, req->buf);
++ break;
++ }
++ break;
++
++ /* one config, two speeds */
++ case USB_REQ_SET_CONFIGURATION:
++ if (ctrl->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
++ USB_RECIP_DEVICE))
++ break;
++ VDBG(sti, "set configuration\n");
++ if (w_value == CONFIG_VALUE || w_value == 0) {
++ sti->new_config = w_value;
++
++ /* Raise an exception to wipe out previous transaction
++ * state (queued bufs, etc) and set the new config. */
++ raise_exception(sti, STI_STATE_CONFIG_CHANGE);
++ value = DELAYED_STATUS;
++ }
++ break;
++
++ case USB_REQ_GET_CONFIGURATION:
++ if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
++ USB_RECIP_DEVICE))
++ break;
++ VDBG(sti, "get configuration\n");
++ *(u8 *) req->buf = sti->config;
++ value = 1;
++ break;
++
++ case USB_REQ_SET_INTERFACE:
++ if (ctrl->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
++ USB_RECIP_INTERFACE))
++ break;
++ if (sti->config && w_index == 0) {
++
++ /* Raise an exception to wipe out previous transaction
++ * state (queued bufs, etc) and install the new
++ * interface altsetting. */
++ raise_exception(sti, STI_STATE_INTERFACE_CHANGE);
++ value = DELAYED_STATUS;
++ }
++ break;
++
++ case USB_REQ_GET_INTERFACE:
++ if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
++ USB_RECIP_INTERFACE))
++ break;
++ if (!sti->config)
++ break;
++ if (w_index != 0) {
++ value = -EDOM;
++ break;
++ }
++ VDBG(sti, "get interface\n");
++ *(u8 *) req->buf = 0;
++ value = 1;
++ break;
++
++ default:
++ VDBG(sti, "unknown control req %02x.%02x v%04x i%04x l%u\n",
++ ctrl->bRequestType, ctrl->bRequest,
++ w_value, w_index, le16_to_cpu(ctrl->wLength));
++ }
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return value;
++}
++
++static int sti_setup(struct usb_gadget *gadget,
++ const struct usb_ctrlrequest *ctrl)
++{
++ struct sti_dev *sti = get_gadget_data(gadget);
++ int rc;
++ int w_length = le16_to_cpu(ctrl->wLength);
++ VDBG(sti, "---> %s()\n", __func__);
++
++ /* record arrival of a new request */
++ ++sti->ep0_req_tag;
++ sti->ep0req->context = NULL;
++ sti->ep0req->length = 0;
++ dump_msg(sti, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));
++
++ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
++ rc = class_setup_req(sti, ctrl);
++ else
++ rc = standard_setup_req(sti, ctrl);
++
++ /* respond with data/status or defer until later */
++ if (rc >= 0 && rc != DELAYED_STATUS) {
++ rc = min(rc, w_length);
++ sti->ep0req->length = rc;
++ sti->ep0req->zero = rc < w_length;
++ sti->ep0req_name = (ctrl->bRequestType & USB_DIR_IN ?
++ "ep0-in" : "ep0-out");
++ rc = ep0_queue(sti);
++ }
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ /* device either stalls (rc < 0) or reports success */
++ return rc;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++/* all the following routines run in process context */
++
++/* use this for bulk or interrupt transfers, not ep0 */
++static void start_transfer(struct sti_dev *sti, struct usb_ep *ep,
++ struct usb_request *req, int *pbusy,
++ enum sti_buffer_state *state)
++{
++ int rc;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ if (ep == sti->bulk_in)
++ dump_msg(sti, "bulk-in", req->buf, req->length);
++ else if (ep == sti->intr_in)
++ dump_msg(sti, "intr-in", req->buf, req->length);
++
++ spin_lock_irq(&sti->lock);
++ *pbusy = 1;
++ *state = BUF_STATE_BUSY;
++ spin_unlock_irq(&sti->lock);
++
++ rc = usb_ep_queue(ep, req, GFP_KERNEL);
++ VDBG(sti, "start_transfer, rc: %d\n", rc);
++ if (rc != 0) {
++ *pbusy = 0;
++ *state = BUF_STATE_EMPTY;
++ if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
++ req->length == 0))
++ WARNING(sti, "error in submission: %s --> %d\n",
++ ep->name, rc);
++ }
++
++ VDBG(sti, "<--- %s()\n", __func__);
++}
++
++
++static int sleep_thread(struct sti_dev *sti)
++{
++ int rc = 0;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ /* wait until a signal arrives or we are woken up */
++ for (;;) {
++ try_to_freeze();
++ set_current_state(TASK_INTERRUPTIBLE);
++ if (signal_pending(current)) {
++ rc = -EINTR;
++ break;
++ }
++ if (sti->thread_wakeup_needed)
++ break;
++
++ schedule();
++ }
++
++ __set_current_state(TASK_RUNNING);
++ sti->thread_wakeup_needed = 0;
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static int fill_data_container(struct sti_buffhd *bh,
++ struct sti_dev *sti, unsigned int size)
++{
++ struct pima15740_container *rb;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ rb = bh->buf;
++
++ rb->container_len = size;
++ rb->container_type = TYPE_DATA_BLOCK;
++ rb->code = sti->code;
++ rb->transaction_id = sti->transaction_id;
++
++ bh->inreq->zero = 0;
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return 0;
++}
++
++
++static int send_response(struct sti_dev *sti, unsigned int code)
++{
++ struct sti_buffhd *bh;
++ struct pima15740_container *rb;
++ int rc = 0;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ /* wait for the next buffer to become available */
++ bh = sti->next_buffhd_to_fill;
++ while (bh->state != BUF_STATE_EMPTY) {
++ rc = sleep_thread(sti);
++ if (rc)
++ return rc;
++ }
++
++ rb = bh->buf;
++
++ rb->container_len = PIMA15740_CONTAINER_LEN;
++ rb->container_type = TYPE_RESPONSE_BLOCK;
++ rb->code = code;
++ rb->transaction_id = sti->transaction_id;
++
++ bh->inreq->length = PIMA15740_CONTAINER_LEN;
++ bh->state = BUF_STATE_FULL;
++ bh->inreq->zero = 0;
++
++ start_transfer(sti, sti->bulk_in, bh->inreq,
++ &bh->inreq_busy, &bh->state);
++
++ sti->next_buffhd_to_fill = bh->next;
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++static int send_params_response(struct sti_dev *sti, unsigned int code,
++ u32 p1, u32 p2, u32 p3, unsigned p_num)
++{
++ struct sti_buffhd *bh;
++ struct pima15740_container *rb;
++ int rc = 0;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ /* wait for the next buffer to become available */
++ bh = sti->next_buffhd_to_fill;
++ while (bh->state != BUF_STATE_EMPTY) {
++ rc = sleep_thread(sti);
++ if (rc)
++ return rc;
++ }
++
++ rb = bh->buf;
++
++ rb->container_len = PIMA15740_CONTAINER_LEN + p_num * 4;
++ rb->container_type = TYPE_RESPONSE_BLOCK;
++ rb->code = code;
++ rb->transaction_id = sti->transaction_id;
++
++ switch (p_num) {
++ case 3:
++ memcpy((u8 *)rb + PIMA15740_CONTAINER_LEN, &p1, 4);
++ memcpy((u8 *)rb + PIMA15740_CONTAINER_LEN + 4, &p2, 4);
++ memcpy((u8 *)rb + PIMA15740_CONTAINER_LEN + 8, &p3, 4);
++ break;
++ case 2:
++ memcpy((u8 *)rb + PIMA15740_CONTAINER_LEN, &p1, 4);
++ memcpy((u8 *)rb + PIMA15740_CONTAINER_LEN + 4, &p2, 4);
++ break;
++ case 1:
++ memcpy((u8 *)rb + PIMA15740_CONTAINER_LEN, &p1, 4);
++ break;
++ default:
++ break;
++ }
++
++ bh->inreq->length = PIMA15740_CONTAINER_LEN + p_num * 4;
++ bh->state = BUF_STATE_FULL;
++ bh->inreq->zero = 0;
++
++ start_transfer(sti, sti->bulk_in, bh->inreq,
++ &bh->inreq_busy, &bh->state);
++
++ sti->next_buffhd_to_fill = bh->next;
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++/* ISO-8859-1 to UTF-16LE */
++static unsigned short str_to_uni16(const char *src, char *dest)
++{
++ unsigned int i;
++
++ for (i = 0; i < strlen(src); i++) {
++ dest[i * 2] = src[i];
++ dest[i * 2 + 1] = '\0';
++ }
++
++ /* null-terminated string */
++ dest[i * 2] = dest[i * 2 + 1] = '\0';
++
++ return (i + 1) * 2;
++}
++
++/* UTF-16LE to ISO-8859-1 */
++static void uni16_to_str(const char *src, char *dest, unsigned short len)
++{
++ unsigned int i;
++
++ for (i = 0; i < len; i++)
++ dest[i] = src[i * 2];
++}
++
++
++static int do_get_device_info(struct sti_dev *sti, struct sti_buffhd *bh)
++{
++ size_t size;
++ int rc = 0;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ /* dump DeviceInfo Dataset */
++ dump_device_info(sti);
++
++ size = sizeof sti_device_info;
++ fill_data_container(bh, sti, PIMA15740_CONTAINER_LEN + size);
++
++ memcpy(bh->buf + PIMA15740_CONTAINER_LEN, &sti_device_info, size);
++
++ bh->inreq->length = PIMA15740_CONTAINER_LEN + size;
++ bh->state = BUF_STATE_FULL;
++ start_transfer(sti, sti->bulk_in, bh->inreq,
++ &bh->inreq_busy, &bh->state);
++ sti->next_buffhd_to_fill = bh->next;
++
++ /* send response */
++ rc = send_response(sti, PIMA15740_RES_OK);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++static int filldir_all(void *__buf, const char *name, int len,
++ loff_t pos, u64 ino, unsigned int d_type)
++{
++ struct sti_dev *sti = __buf;
++ struct sti_object *obj;
++ char *ext;
++ u8 filename_len;
++ char filename_utf16le[NAME_MAX * 2];
++ size_t obj_size;
++ u16 object_format = PIMA15740_FMT_A_UNDEFINED;
++ int rc = 0;
++ VDBG(sti, "---> %s()\n", __func__);
++ VDBG(sti, "name: %s, len: %d, pos: %lu, ino: %llu, d_type: %u\n",
++ name, len, (unsigned long)pos, ino, d_type);
++
++ /* ignore "." and ".." directories */
++ if (!strcmp(name, ".") || !strcmp(name, ".."))
++ goto out;
++
++ if (d_type != DT_DIR && d_type != DT_REG)
++ goto out;
++
++ /* filename strings length */
++ filename_len = len + 1;
++ VDBG(sti, "filename_len: %u\n", filename_len);
++
++ /* sti_object size */
++ obj_size = sizeof(struct sti_object) + 2 * filename_len + 4;
++ VDBG(sti, "obj_size: %u\n", obj_size);
++ /* obj_size > sizeof(struct sti_object) */
++ obj = kzalloc(obj_size, GFP_KERNEL);
++ if (!obj) {
++ rc = -ENOMEM;
++ goto out;
++ }
++
++ /* fill part of sti_object info */
++ obj->storage_id = STORAGE_ID;
++ obj->send_valid = 0;
++
++ /* ObjectInfo Dataset size */
++ obj->obj_info_size = sizeof(struct pima15740_object_info)
++ + 2 * filename_len + 4;
++ VDBG(sti, "obj_info_size: %u\n", obj->obj_info_size);
++
++ /* filename */
++ memset(obj->filename, 0, sizeof(obj->filename));
++ strncpy(obj->filename, name, len);
++
++ /* fill ObjectInfo Dataset */
++ obj->obj_info.storage_id = cpu_to_le32(STORAGE_ID);
++
++ if (d_type == DT_DIR) { /* association */
++ object_format = PIMA15740_FMT_A_ASSOCIATION;
++ obj->obj_info.association_type =
++ cpu_to_le16(PIMA15740_AS_GENERIC_FOLDER);
++ obj->is_dir = 1;
++ } else if (d_type == DT_REG) { /* regular file */
++ ext = strrchr(obj->filename, '.');
++ if (ext) {
++ /* image object */
++ if (!strcasecmp(ext, ".jpg") ||
++ !strcasecmp(ext, ".jpeg") ||
++ !strcasecmp(ext, ".jpe"))
++ object_format = PIMA15740_FMT_I_EXIF_JPEG;
++ else if (!strcasecmp(ext, ".jfif"))
++ object_format = PIMA15740_FMT_I_JFIF;
++ else if (!strcasecmp(ext, ".tif") ||
++ !strcasecmp(ext, ".tiff"))
++ object_format = PIMA15740_FMT_I_TIFF;
++ else if (!strcasecmp(ext, ".png"))
++ object_format = PIMA15740_FMT_I_PNG;
++ else if (!strcasecmp(ext, ".bmp"))
++ object_format = PIMA15740_FMT_I_BMP;
++ else if (!strcasecmp(ext, ".gif"))
++ object_format = PIMA15740_FMT_I_GIF;
++ else /* undefined non-image object */
++ object_format = PIMA15740_FMT_A_UNDEFINED;
++ } else /* file without extension */
++ object_format = PIMA15740_FMT_A_UNDEFINED;
++ obj->obj_info.association_type =
++ cpu_to_le16(PIMA15740_AS_UNDEFINED);
++ obj->is_dir = 0;
++ }
++ obj->obj_info.object_format = cpu_to_le16(object_format);
++
++ /* protection_status, object_compressed_size will be filled later */
++ obj->obj_info.thumb_format = cpu_to_le16(0);
++ obj->obj_info.thumb_compressed_size = cpu_to_le32(0);
++ obj->obj_info.thumb_pix_width = cpu_to_le32(0);
++ obj->obj_info.thumb_pix_height = cpu_to_le32(0);
++ obj->obj_info.image_pix_width = cpu_to_le32(0);
++ obj->obj_info.image_pix_height = cpu_to_le32(0);
++ obj->obj_info.image_bit_depth = cpu_to_le32(0);
++
++ obj->obj_info.association_desc = cpu_to_le32(0);
++ obj->obj_info.sequence_number = cpu_to_le32(0);
++
++ /* filename_utf16le: UTF-16LE unicode string */
++ obj->obj_info.obj_strings[0] = filename_len;
++ memset(filename_utf16le, 0, sizeof(filename_utf16le));
++ str_to_uni16(obj->filename, filename_utf16le);
++ memcpy(obj->obj_info.obj_strings + 1, filename_utf16le,
++ filename_len * 2);
++
++ /* capture date */
++ obj->obj_info.obj_strings[filename_len * 2 + 1] = 0;
++
++ /* modification date */
++ obj->obj_info.obj_strings[filename_len * 2 + 2] = 0;
++
++ /* keywords */
++ obj->obj_info.obj_strings[filename_len * 2 + 3] = 0;
++
++ /* increase object number */
++ sti->sub_object_num++;
++
++ /* add to temp object list */
++ list_add_tail(&obj->list, &sti->tmp_obj_list);
++out:
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++/* alphabetic sort function */
++static int alnumsort(const void *a, const void *b)
++{
++ const struct sti_object *oa = *(const struct sti_object **)a;
++ const struct sti_object *ob = *(const struct sti_object **)b;
++ return strcmp(oa->filename, ob->filename);
++}
++
++
++/* descend through the hierarchical folder recursively */
++static int list_objects(struct sti_dev *sti, const char *folder_name,
++ struct sti_object *folder_obj, bool recursive)
++{
++ struct file *filp;
++ struct dentry *dentry;
++ struct sti_object *obj = NULL;
++ struct sti_object *tmp_obj;
++ struct sti_object **pobj, **temp_pobj = NULL;
++ struct kstat stat;
++ u32 parent_object;
++ int i, rc = 0;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ /* root directory */
++ if (!strcmp(folder_name, sti->root_path)) {
++ filp = sti->root_filp;
++ parent_object = 0;
++ VDBG(sti, "root directory\n");
++ } else { /* subdirectory */
++ filp = filp_open(folder_name, O_RDONLY | O_DIRECTORY, 0);
++ if (IS_ERR(filp)) {
++ ERROR(sti, "unable to open folder: %s\n",
++ folder_name);
++ return PTR_ERR(filp);
++ }
++ VDBG(sti, "folder_name: %s\n", folder_name);
++ parent_object = folder_obj->obj_handle;
++ }
++ dentry = filp->f_dentry;
++
++ sti->sub_object_num = 0;
++ filp->f_pos = 0;
++ rc = vfs_readdir(filp, filldir_all, sti);
++ if (rc)
++ ERROR(sti, "vfs_readdir %s error: %d\n",
++ folder_name, rc);
++ VDBG(sti, "%d objects in folder %s\n",
++ sti->sub_object_num, folder_name);
++
++ /* no file in the directory */
++ if (!sti->sub_object_num)
++ goto out;
++
++ /* pre-allocated objects array */
++ pobj = kzalloc((sti->sub_object_num + 1) * sizeof(struct sti_object *),
++ GFP_KERNEL);
++ if (!pobj) {
++ rc = -ENOMEM;
++ goto out;
++ }
++
++ temp_pobj = pobj;
++
++ i = 0;
++ list_for_each_entry_safe(obj, tmp_obj, &sti->tmp_obj_list, list) {
++ pobj[i] = obj;
++ /* remove from temp object list */
++ list_del_init(&obj->list);
++ i++;
++ }
++ VDBG(sti, "i = %d\n", i);
++ pobj[i] = NULL;
++
++ /* sort the objects array */
++ sort(pobj, sti->sub_object_num, sizeof(struct sti_object *),
++ alnumsort, NULL);
++
++ while (*pobj) {
++ /* increase total object number */
++ sti->object_num++;
++
++ /* fill object handle */
++ (*pobj)->obj_handle = sti->object_num;
++
++ /* fill parent object */
++ (*pobj)->parent_object = cpu_to_le32(parent_object);
++ (*pobj)->obj_info.parent_object = cpu_to_le32(parent_object);
++
++ /* object full path */
++ memset((*pobj)->full_path, 0, sizeof((*pobj)->full_path));
++ snprintf((*pobj)->full_path, sizeof((*pobj)->full_path),
++ "%s/%s", folder_name, (*pobj)->filename);
++
++ VDBG(sti, "full_path: %s, obj_handle: 0x%08x, "
++ "parent_object: 0x%08x\n",
++ (*pobj)->full_path, (*pobj)->obj_handle,
++ parent_object);
++
++ /* get file statistics info */
++ rc = vfs_stat((char __user *)(*pobj)->full_path, &stat);
++ if (rc) {
++ ERROR(sti, "vfs_stat error: %d\n", rc);
++ goto out;
++ }
++
++ /* fill remained ObjectInfo Dataset */
++ if (stat.mode & S_IWUSR)
++ (*pobj)->obj_info.protection_status =
++ cpu_to_le16(PIMA15740_OBJECT_NO_PROTECTION);
++ else
++ (*pobj)->obj_info.protection_status =
++ cpu_to_le16(PIMA15740_OBJECT_READ_ONLY);
++
++ (*pobj)->obj_info.object_compressed_size =
++ cpu_to_le32((u32)stat.size);
++
++ /* add to object list */
++ list_add_tail(&(*pobj)->list, &sti->obj_list);
++
++ if ((*pobj)->is_dir && recursive)
++ list_objects(sti, (*pobj)->full_path, *pobj, true);
++
++ pobj++;
++ }
++
++out:
++ /* free pre-allocated objects array */
++ kfree(temp_pobj);
++
++ if (strcmp(folder_name, sti->root_path))
++ filp_close(filp, current->files);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++static int do_open_session(struct sti_dev *sti)
++{
++ struct sti_object *obj;
++ u8 filename_len;
++ int rc = 0;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ if (sti->session_open) {
++ sti->response_code = PIMA15740_RES_SESSION_ALREADY_OPEN;
++ goto out;
++ }
++
++ sti->session_id = sti->ops_params[0];
++ VDBG(sti, "session_id: 0x%08x\n", sti->session_id);
++ if (sti->session_id) {
++ sti->response_code = PIMA15740_RES_OK;
++ sti->session_open = 1;
++ } else {
++ sti->response_code = PIMA15740_RES_INVALID_PARAMETER;
++ sti->session_open = 0;
++ goto out;
++ }
++
++ /* reset total object number */
++ sti->object_num = 0;
++
++ /* root object init */
++ filename_len = strlen(sti->root_filp->f_dentry->d_name.name) + 1;
++ VDBG(sti, "root object: %s\n", sti->root_path);
++ VDBG(sti, "filename_len: %u\n", filename_len);
++ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
++ if (!obj) {
++ sti->response_code = PIMA15740_RES_DEVICE_BUSY;
++ goto out;
++ }
++
++ spin_lock_irq(&sti->lock);
++
++ obj->obj_handle = 0;
++ obj->parent_object = 0;
++ obj->storage_id = STORAGE_ID;
++ obj->is_dir = 1;
++ obj->send_valid = 0;
++ obj->obj_info_size = sizeof(struct pima15740_object_info);
++
++ /* root object filename */
++ memset(obj->filename, 0, sizeof(obj->filename));
++ strncpy(obj->filename, sti->root_filp->f_dentry->d_name.name,
++ sizeof(obj->filename));
++ VDBG(sti, "root object filename: %s\n", obj->filename);
++
++ /* root object full path */
++ memset(obj->full_path, 0, sizeof(obj->full_path));
++ strncpy(obj->full_path, sti->root_path, sizeof(obj->full_path));
++ VDBG(sti, "root object full path: %s\n", obj->full_path);
++
++ /* add to object list */
++ list_add_tail(&obj->list, &sti->obj_list);
++
++ spin_unlock_irq(&sti->lock);
++out:
++ /* send response */
++ rc = send_response(sti, sti->response_code);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++static int do_close_session(struct sti_dev *sti)
++{
++ struct sti_object *obj, *tmp_obj;
++ int rc = 0;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ if (sti->session_open) {
++ sti->response_code = PIMA15740_RES_OK;
++ sti->session_open = 0;
++ } else {
++ sti->response_code = PIMA15740_RES_SESSION_NOT_OPEN;
++ goto out;
++ }
++
++ spin_lock_irq(&sti->lock);
++
++ /* release object list */
++ list_for_each_entry_safe(obj, tmp_obj, &sti->obj_list, list) {
++ list_del_init(&obj->list);
++ kfree(obj);
++ }
++
++ spin_unlock_irq(&sti->lock);
++
++ DBG(sti, "release object list\n");
++out:
++ /* send response */
++ rc = send_response(sti, sti->response_code);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++static int do_get_storage_ids(struct sti_dev *sti, struct sti_buffhd *bh)
++{
++ size_t size;
++ u32 i;
++ int rc = 0;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ if (!sti->session_open) {
++ sti->response_code = PIMA15740_RES_SESSION_NOT_OPEN;
++ goto out;
++ }
++
++ sti->storage_id = cpu_to_le32(STORAGE_ID);
++ DBG(sti, "storage_id: 0x%08x\n", sti->storage_id);
++
++ /* 4 bytes array number and 4 bytes storage id */
++ size = 8;
++ fill_data_container(bh, sti, PIMA15740_CONTAINER_LEN + size);
++
++ /* support one storage id */
++ i = 1;
++ memcpy(bh->buf + PIMA15740_CONTAINER_LEN, &i, 4);
++ memcpy(bh->buf + PIMA15740_CONTAINER_LEN + 4, &sti->storage_id, 4);
++
++ bh->inreq->length = PIMA15740_CONTAINER_LEN + size;
++ bh->state = BUF_STATE_FULL;
++ start_transfer(sti, sti->bulk_in, bh->inreq,
++ &bh->inreq_busy, &bh->state);
++ sti->next_buffhd_to_fill = bh->next;
++
++ sti->response_code = PIMA15740_RES_OK;
++out:
++ /* send response */
++ rc = send_response(sti, sti->response_code);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++static int do_get_storage_info(struct sti_dev *sti, struct sti_buffhd *bh)
++{
++ size_t size;
++ u32 storage_id;
++ u64 sbytes_max, sbytes_free;
++ struct kstatfs sbuf;
++ int rc = 0;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ if (!sti->session_open) {
++ sti->response_code = PIMA15740_RES_SESSION_NOT_OPEN;
++ goto out;
++ }
++
++ /* storage id */
++ storage_id = sti->ops_params[0];
++ if (storage_id != sti->storage_id) {
++ WARNING(sti, "invalid storage id: 0x%08x\n", storage_id);
++ sti->response_code = PIMA15740_RES_INVALID_STORAGE_ID;
++ goto out;
++ }
++
++ /* get filesystem statistics info */
++ rc = vfs_statfs(sti->root_filp->f_dentry, &sbuf);
++ if (rc) {
++ sti->response_code = PIMA15740_RES_ACCESS_DENIED;
++ goto out;
++ }
++
++ /* fill remained items in StorageInfo Dataset */
++ sbytes_max = (u64) sbuf.f_bsize * sbuf.f_blocks;
++ sbytes_free = (u64) sbuf.f_bsize * sbuf.f_bfree;
++ sti_storage_info.max_capacity = cpu_to_le64(sbytes_max);
++ sti_storage_info.free_space_in_bytes = cpu_to_le64(sbytes_free);
++ sti_storage_info.free_space_in_images = cpu_to_le32((u32)~0);
++ str_to_uni16(storage_desc, sti_storage_info.storage_desc);
++
++ /* dump StorageInfo Dataset */
++ dump_storage_info(sti);
++
++ memcpy(bh->buf + PIMA15740_CONTAINER_LEN, &sti_storage_info,
++ sizeof(sti_storage_info));
++
++ size = PIMA15740_CONTAINER_LEN + sizeof(sti_storage_info);
++ fill_data_container(bh, sti, size);
++
++ bh->inreq->length = size;
++ bh->state = BUF_STATE_FULL;
++ start_transfer(sti, sti->bulk_in, bh->inreq,
++ &bh->inreq_busy, &bh->state);
++ sti->next_buffhd_to_fill = bh->next;
++
++ sti->response_code = PIMA15740_RES_OK;
++out:
++ /* send response */
++ rc = send_response(sti, sti->response_code);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++static int do_get_num_objects(struct sti_dev *sti, struct sti_buffhd *bh)
++{
++ int i;
++ int rc = 0;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ if (!sti->session_open) {
++ sti->response_code = PIMA15740_RES_SESSION_NOT_OPEN;
++ goto out;
++ }
++
++ for (i = 0; i < PARAM_NUM_MAX; i++)
++ VDBG(sti, "parameter[%u]: 0x%08x\n",
++ i + 1, sti->ops_params[i]);
++
++ if (!backing_folder_is_open(sti)) {
++ ERROR(sti, "backing folder is not open\n");
++ sti->response_code = PIMA15740_RES_STORE_NOT_AVAILABLE;
++ goto out;
++ }
++
++ DBG(sti, "total object number: %u\n", sti->object_num);
++
++ sti->response_code = PIMA15740_RES_OK;
++out:
++ /* send response */
++ rc = send_params_response(sti, sti->response_code,
++ sti->object_num, 0, 0,
++ 1);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++static int do_get_object_handles(struct sti_dev *sti, struct sti_buffhd *bh)
++{
++ size_t size;
++ u32 storage_id, obj_handle;
++ u32 new_obj_num, old_obj_num, tmp_obj_num;
++ char *cur_path = NULL;
++ struct sti_object *obj;
++ int i, rc = 0;
++
++ VDBG(sti, "---> %s()\n", __func__);
++
++ if (!sti->session_open) {
++ sti->response_code = PIMA15740_RES_SESSION_NOT_OPEN;
++ goto out;
++ }
++
++ for (i = 0; i < PARAM_NUM_MAX; i++)
++ VDBG(sti, "parameter[%u]: 0x%08x\n",
++ i + 1, sti->ops_params[i]);
++
++ if (!backing_folder_is_open(sti)) {
++ ERROR(sti, "backing folder is not open\n");
++ sti->response_code = PIMA15740_RES_STORE_NOT_AVAILABLE;
++ goto out;
++ }
++
++ storage_id = sti->ops_params[0];
++ obj_handle = sti->ops_params[2];
++ old_obj_num = sti->object_num;
++
++ if (storage_id == 0xffffffff) {
++ /* list all objects recursive */
++ rc = list_objects(sti, sti->root_path, NULL, true);
++ new_obj_num = sti->object_num;
++ } else {
++ /* list objects of current folder */
++ list_for_each_entry(obj, &sti->obj_list, list) {
++ if (obj->obj_handle == obj_handle)
++ break;
++ }
++
++ if (obj_handle == 0xffffffff)
++ cur_path = sti->root_path;
++ else
++ cur_path = obj->full_path;
++ VDBG(sti, "current path: %s\n", cur_path);
++
++ if (cur_path)
++ rc = list_objects(sti, cur_path, obj, false);
++ else {
++ sti->response_code = PIMA15740_RES_DEVICE_BUSY;
++ goto out;
++ }
++
++ new_obj_num = sti->sub_object_num;
++ }
++
++ if (rc) {
++ sti->response_code = PIMA15740_RES_DEVICE_BUSY;
++ goto out;
++ }
++
++ /* 4 bytes array number plus object handles size */
++ size = 4 + new_obj_num * 4;
++ VDBG(sti, "object number: %u, payload size: %u\n",
++ new_obj_num, size);
++ fill_data_container(bh, sti, PIMA15740_CONTAINER_LEN + size);
++
++ /* fill object handles array */
++ memcpy(bh->buf + PIMA15740_CONTAINER_LEN, &new_obj_num, 4);
++ for (i = 1; i <= new_obj_num; i++) {
++ tmp_obj_num = old_obj_num + i;
++ memcpy(bh->buf + PIMA15740_CONTAINER_LEN + i * 4,
++ &tmp_obj_num, 4);
++ }
++
++ bh->inreq->length = PIMA15740_CONTAINER_LEN + size;
++ bh->state = BUF_STATE_FULL;
++ start_transfer(sti, sti->bulk_in, bh->inreq,
++ &bh->inreq_busy, &bh->state);
++ sti->next_buffhd_to_fill = bh->next;
++
++ sti->response_code = PIMA15740_RES_OK;
++out:
++ /* send response */
++ rc = send_response(sti, sti->response_code);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++static int do_get_object_info(struct sti_dev *sti, struct sti_buffhd *bh)
++{
++ size_t size = 0;
++ u32 obj_handle;
++ struct sti_object *obj;
++ int rc = 0;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ if (!sti->session_open) {
++ sti->response_code = PIMA15740_RES_SESSION_NOT_OPEN;
++ goto out;
++ }
++
++ obj_handle = sti->ops_params[0];
++ if (obj_handle == 0 || obj_handle > sti->object_num) {
++ WARNING(sti, "invalid object handle: 0x%08x\n", obj_handle);
++ sti->response_code = PIMA15740_RES_INVALID_OBJECT_HANDLE;
++ goto out;
++ }
++
++ spin_lock_irq(&sti->lock);
++
++ /* find the object */
++ list_for_each_entry(obj, &sti->obj_list, list) {
++ if (obj->obj_handle == obj_handle)
++ break;
++ }
++
++ memcpy(bh->buf + PIMA15740_CONTAINER_LEN, &obj->obj_info,
++ obj->obj_info_size);
++ size = PIMA15740_CONTAINER_LEN + obj->obj_info_size;
++ fill_data_container(bh, sti, size);
++
++ bh->inreq->length = size;
++ bh->state = BUF_STATE_FULL;
++
++ spin_unlock_irq(&sti->lock);
++
++ start_transfer(sti, sti->bulk_in, bh->inreq,
++ &bh->inreq_busy, &bh->state);
++ sti->next_buffhd_to_fill = bh->next;
++
++ DBG(sti, "get object info: %s\n", obj->full_path);
++ VDBG(sti, "obj_handle: 0x%08x\n", obj->obj_handle);
++
++ /* dump ObjectInfo Dataset */
++ dump_object_info(sti, obj);
++
++ sti->response_code = PIMA15740_RES_OK;
++out:
++ /* send response */
++ rc = send_response(sti, sti->response_code);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++static int do_get_object(struct sti_dev *sti, struct sti_buffhd *bh)
++{
++ u32 obj_handle;
++ loff_t file_size, file_offset, file_offset_tmp;
++ unsigned int amount_left, amount;
++ ssize_t nread;
++ struct sti_object *obj;
++ struct file *filp = NULL;
++ struct inode *inode = NULL;
++ char __user *buf;
++ int rc = 0;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ if (!sti->session_open) {
++ sti->response_code = PIMA15740_RES_SESSION_NOT_OPEN;
++ goto out1;
++ }
++
++ obj_handle = sti->ops_params[0];
++ if (obj_handle == 0 || obj_handle > sti->object_num) {
++ WARNING(sti, "invalid object handle: 0x%08x\n", obj_handle);
++ sti->response_code = PIMA15740_RES_INVALID_OBJECT_HANDLE;
++ goto out1;
++ }
++
++ spin_lock_irq(&sti->lock);
++
++ /* find the object */
++ list_for_each_entry(obj, &sti->obj_list, list) {
++ if (obj->obj_handle == obj_handle)
++ break;
++ }
++
++ spin_unlock_irq(&sti->lock);
++
++ /* open object file */
++ filp = filp_open(obj->full_path, O_RDONLY | O_LARGEFILE, 0);
++ if (IS_ERR(filp)) {
++ ERROR(sti, "unable to open file: %s. Err = %d\n",
++ obj->full_path, (int) PTR_ERR(filp));
++ sti->response_code = PIMA15740_RES_STORE_NOT_AVAILABLE;
++ goto out1;
++ }
++
++ /* figure out the size and read the remaining amount */
++ inode = filp->f_dentry->d_inode;
++ file_size = i_size_read(inode->i_mapping->host);
++ VDBG(sti, "object file size: %llu\n", (unsigned long long) file_size);
++ if (unlikely(file_size == 0)) {
++ sti->response_code = PIMA15740_RES_STORE_NOT_AVAILABLE;
++ goto out2;
++ }
++
++ DBG(sti, "get object: %s\n", obj->full_path);
++
++ file_offset = 0;
++ amount_left = file_size;
++
++ while (amount_left > 0) {
++ bh = sti->next_buffhd_to_fill;
++ while (bh->state != BUF_STATE_EMPTY) {
++ rc = sleep_thread(sti);
++ if (rc) {
++ filp_close(filp, current->files);
++ return rc;
++ }
++ }
++
++ /* don't read more than the buffer size */
++ if (file_offset == 0) {
++ fill_data_container(bh, sti,
++ file_size + PIMA15740_CONTAINER_LEN);
++ buf = (char __user *) bh->buf +
++ PIMA15740_CONTAINER_LEN;
++ amount = min((unsigned int) amount_left,
++ mod_data.buflen - PIMA15740_CONTAINER_LEN);
++ } else {
++ buf = (char __user *) bh->buf;
++ amount = min((unsigned int) amount_left,
++ mod_data.buflen);
++ }
++
++ /* no more left to read */
++ if (amount == 0)
++ break;
++
++ /* perform the read */
++ file_offset_tmp = file_offset;
++ nread = vfs_read(filp, buf, amount, &file_offset_tmp);
++ VDBG(sti, "file read %u @ %llu -> %d\n", amount,
++ (unsigned long long) file_offset,
++ (int) nread);
++
++ if (signal_pending(current)) {
++ filp_close(filp, current->files);
++ return -EINTR;
++ }
++
++ if (nread < 0) {
++ WARNING(sti, "error in file read: %d\n",
++ (int) nread);
++ nread = 0;
++ } else if (nread < amount) {
++ WARNING(sti, "partial file read: %d/%u\n",
++ (int) nread, amount);
++ /* round down to a block */
++ nread -= (nread & 511);
++ }
++
++ /*
++ * PIMA 15740 generic container head resides in
++ * first data block payload
++ */
++ if (file_offset == 0)
++ bh->inreq->length = nread + PIMA15740_CONTAINER_LEN;
++ else
++ bh->inreq->length = nread;
++ bh->state = BUF_STATE_FULL;
++ bh->inreq->zero = 0;
++
++ file_offset += nread;
++ amount_left -= nread;
++
++ /* send this buffer and go read some more */
++ start_transfer(sti, sti->bulk_in, bh->inreq,
++ &bh->inreq_busy, &bh->state);
++ sti->next_buffhd_to_fill = bh->next;
++ }
++
++ sti->response_code = PIMA15740_RES_OK;
++out2:
++ filp_close(filp, current->files);
++out1:
++ /* send response */
++ rc = send_response(sti, sti->response_code);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++static int do_delete_object(struct sti_dev *sti, struct sti_buffhd *bh)
++{
++ u32 obj_handle;
++ struct sti_object *obj, *tmp_obj;
++ struct nameidata nd;
++ int i;
++ int rc = 0;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ if (!sti->session_open) {
++ sti->response_code = PIMA15740_RES_SESSION_NOT_OPEN;
++ goto out;
++ }
++
++ for (i = 0; i < PARAM_NUM_MAX; i++)
++ VDBG(sti, "parameter[%u]: 0x%08x\n",
++ i + 1, sti->ops_params[i]);
++
++ obj_handle = sti->ops_params[0];
++ if (obj_handle == 0 || obj_handle > sti->object_num) {
++ WARNING(sti, "invalid object handle: 0x%08x\n", obj_handle);
++ sti->response_code = PIMA15740_RES_INVALID_OBJECT_HANDLE;
++ goto out;
++ }
++
++ spin_lock_irq(&sti->lock);
++
++ /* find the object */
++ list_for_each_entry_safe(obj, tmp_obj, &sti->obj_list, list) {
++ if (obj->obj_handle == obj_handle) {
++ list_del_init(&obj->list);
++ kfree(obj);
++ break;
++ }
++ }
++
++ spin_unlock_irq(&sti->lock);
++
++ /* lookup the object file */
++ rc = path_lookup(obj->full_path, 0, &nd);
++ if (rc) {
++ ERROR(sti, "invalid object file path: %s\n", obj->full_path);
++ sti->response_code = PIMA15740_RES_STORE_NOT_AVAILABLE;
++ goto out;
++ }
++
++ /* unlink the file */
++ rc = vfs_unlink(nd.path.dentry->d_parent->d_inode, nd.path.dentry);
++ if (rc) {
++ ERROR(sti, "can't delete object\n");
++ sti->response_code = PIMA15740_RES_DEVICE_BUSY;
++ goto out;
++ }
++
++ DBG(sti, "delete object: %s\n", obj->full_path);
++
++ sti->response_code = PIMA15740_RES_OK;
++out:
++ /* send response */
++ rc = send_response(sti, sti->response_code);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++static int do_send_object_info(struct sti_dev *sti, struct sti_buffhd *bh)
++{
++ u8 filename_len;
++ u32 storage_id;
++ u32 parent_object = 0xffffffff;
++ unsigned int offset;
++ struct sti_object *obj, *parent_obj;
++ size_t obj_size;
++ int i;
++ int rc = 0;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ if (!sti->session_open) {
++ sti->response_code = PIMA15740_RES_SESSION_NOT_OPEN;
++ goto out2;
++ }
++
++ for (i = 0; i < PARAM_NUM_MAX; i++)
++ VDBG(sti, "parameter[%u]: 0x%08x\n",
++ i + 1, sti->ops_params[i]);
++
++ /* destination storage id */
++ storage_id = sti->ops_params[0];
++ if (storage_id != STORAGE_ID) {
++ WARNING(sti, "invalid storage id: 0x%08x\n", storage_id);
++ sti->response_code = PIMA15740_RES_INVALID_STORAGE_ID;
++ goto out2;
++ }
++
++ /* parent object handle where object should be placed */
++ parent_object = sti->ops_params[1];
++
++ /* if root directory, parent object is 0xffffffff */
++ if (parent_object == 0 || (parent_object > sti->object_num
++ && parent_object != 0xffffffff)) {
++ WARNING(sti, "invalid parent handle: 0x%08x\n",
++ parent_object);
++ sti->response_code = PIMA15740_RES_INVALID_PARENT_OBJECT;
++ goto out2;
++ }
++
++ /* queue a request to read ObjectInfo Dataset */
++ set_bulk_out_req_length(sti, bh, 512);
++ bh->outreq->short_not_ok = 1;
++ start_transfer(sti, sti->bulk_out, bh->outreq,
++ &bh->outreq_busy, &bh->state);
++
++ /* wait for the ObjectInfo Dataset to arrive */
++ while (bh->state != BUF_STATE_FULL) {
++ rc = sleep_thread(sti);
++ if (rc)
++ goto out1;
++ }
++
++ /* filename strings length */
++ offset = offsetof(struct pima15740_object_info, obj_strings[0]);
++ filename_len = *(u8 *)(bh->outreq->buf + PIMA15740_CONTAINER_LEN
++ + offset);
++ VDBG(sti, "filename_len: %u\n", filename_len);
++
++ /* sti_object size */
++ obj_size = sizeof(*obj) + 2 * filename_len + 4;
++ VDBG(sti, "obj_size: %u\n", obj_size);
++ obj = kzalloc(obj_size, GFP_KERNEL);
++ if (!obj) {
++ sti->response_code = PIMA15740_RES_STORE_NOT_AVAILABLE;
++ goto out2;
++ }
++
++ spin_lock_irq(&sti->lock);
++
++ /* increase total object number */
++ sti->object_num++;
++
++ /* fill sti_object info */
++ obj->obj_handle = sti->object_num;
++ VDBG(sti, "obj_handle: 0x%08x\n", obj->obj_handle);
++
++ if (parent_object == 0xffffffff)
++ obj->parent_object = 0;
++ else
++ obj->parent_object = parent_object;
++ VDBG(sti, "parent_object: 0x%08x\n", obj->parent_object);
++
++ obj->storage_id = storage_id;
++
++ /* mark object ready to send */
++ obj->send_valid = 1;
++
++ /* ObjectInfo Dataset size */
++ obj->obj_info_size = sizeof(struct pima15740_object_info)
++ + 2 * filename_len + 4;
++ VDBG(sti, "obj_info_size: %u\n", obj->obj_info_size);
++
++ /* filename */
++ offset = offsetof(struct pima15740_object_info, obj_strings[1]);
++ uni16_to_str(bh->outreq->buf + PIMA15740_CONTAINER_LEN + offset,
++ obj->filename, filename_len);
++
++ /* object full path */
++ memset(obj->full_path, 0, sizeof(obj->full_path));
++ if (parent_object == 0xffffffff) {
++ snprintf(obj->full_path, sizeof(obj->full_path), "%s/%s",
++ sti->root_path, obj->filename);
++ } else {
++ /* find the parent object */
++ list_for_each_entry(parent_obj, &sti->obj_list, list) {
++ if (parent_obj->obj_handle == parent_object)
++ break;
++ }
++ snprintf(obj->full_path, sizeof(obj->full_path), "%s/%s",
++ parent_obj->full_path, obj->filename);
++ }
++ VDBG(sti, "full_path: %s\n", obj->full_path);
++
++ /* fetch ObjectInfo Dataset from buffer */
++ memcpy(&obj->obj_info, bh->outreq->buf + PIMA15740_CONTAINER_LEN,
++ obj->obj_info_size);
++
++ /* root directory, modify parent object */
++ if (parent_object == 0xffffffff)
++ obj->obj_info.parent_object = cpu_to_le32(0);
++ else
++ obj->obj_info.parent_object = parent_object;
++
++ obj->obj_info.storage_id = storage_id;
++
++ /* capture date */
++ obj->obj_info.obj_strings[filename_len * 2 + 1] = 0;
++
++ /* modification date */
++ obj->obj_info.obj_strings[filename_len * 2 + 2] = 0;
++
++ /* keywords */
++ obj->obj_info.obj_strings[filename_len * 2 + 3] = 0;
++
++ bh->state = BUF_STATE_EMPTY;
++
++ /* add to object list */
++ list_add_tail(&obj->list, &sti->obj_list);
++
++ spin_unlock_irq(&sti->lock);
++
++ DBG(sti, "send object info: %s\n", obj->filename);
++
++ /* dump ObjectInfo Dataset */
++ dump_object_info(sti, obj);
++out2:
++ /* send response */
++ rc = send_params_response(sti, PIMA15740_RES_OK,
++ sti->storage_id, parent_object, sti->object_num,
++ 3);
++out1:
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++static int do_send_object(struct sti_dev *sti, struct sti_buffhd *bh)
++{
++ int rc = -EINVAL;
++ int get_some_more;
++ u32 amount_left_to_req, amount_left_to_write;
++ loff_t file_size, file_offset, file_offset_tmp,
++ usb_offset;
++ unsigned int amount;
++ ssize_t nwritten;
++ struct sti_object *obj;
++ struct file *filp = NULL;
++ char __user *buf;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ spin_lock_irq(&sti->lock);
++
++ /* find the object */
++ list_for_each_entry(obj, &sti->obj_list, list) {
++ if (obj->send_valid)
++ break;
++ }
++
++ /* mark object already sent */
++ obj->send_valid = 0;
++
++ spin_unlock_irq(&sti->lock);
++
++ /* open object file */
++ filp = filp_open(obj->full_path, O_CREAT | O_RDWR | O_LARGEFILE, 0666);
++ if (IS_ERR(filp)) {
++ ERROR(sti, "unable to open file: %s. Err = %d\n",
++ obj->full_path, (int) PTR_ERR(filp));
++ sti->response_code = PIMA15740_RES_STORE_NOT_AVAILABLE;
++ goto out1;
++ }
++
++ file_size = obj->obj_info.object_compressed_size;
++ VDBG(sti, "object file size: %llu\n",
++ (unsigned long long) file_size);
++ if (unlikely(file_size == 0)) {
++ sti->response_code = PIMA15740_RES_STORE_NOT_AVAILABLE;
++ goto out2;
++ }
++
++ DBG(sti, "send object: %s\n", obj->full_path);
++
++ /* carry out the file writes */
++ get_some_more = 1;
++ file_offset = usb_offset = 0;
++
++ amount_left_to_req = file_size + PIMA15740_CONTAINER_LEN;
++ amount_left_to_write = file_size;
++ VDBG(sti, "in total: amount_left_to_req: %u\n",
++ amount_left_to_req);
++ VDBG(sti, "in total: amount_left_to_write: %u\n",
++ amount_left_to_write);
++
++ while (amount_left_to_write > 0) {
++ bh = sti->next_buffhd_to_fill;
++ if (bh->state == BUF_STATE_EMPTY && get_some_more) {
++ amount = min(amount_left_to_req, mod_data.buflen);
++ amount = min((loff_t) amount, file_size
++ + PIMA15740_CONTAINER_LEN - usb_offset);
++ VDBG(sti, "usb amount: %u\n", amount);
++
++ /* no left data request to transfer */
++ if (amount == 0) {
++ get_some_more = 0;
++ continue;
++ }
++
++ /* get the next buffer */
++ usb_offset += amount;
++ amount_left_to_req -= amount;
++
++ if (amount_left_to_req == 0)
++ get_some_more = 0;
++
++ /* amount is always divisible by bulk-out
++ maxpacket size */
++ bh->outreq->length = bh->bulk_out_intended_length =
++ amount;
++ bh->outreq->short_not_ok = 1;
++ start_transfer(sti, sti->bulk_out, bh->outreq,
++ &bh->outreq_busy, &bh->state);
++ sti->next_buffhd_to_fill = bh->next;
++ continue;
++ }
++
++ /* write the received data to the backing folder */
++ bh = sti->next_buffhd_to_drain;
++
++ /* host stopped early */
++ if (bh->state == BUF_STATE_EMPTY && !get_some_more) {
++ WARNING(sti, "host stops early, bh->state: %d\n",
++ bh->state);
++ sti->response_code = PIMA15740_RES_INCOMPLETE_TRANSFER;
++ goto out2;
++ }
++
++ if (bh->state == BUF_STATE_FULL) {
++ smp_rmb();
++ sti->next_buffhd_to_drain = bh->next;
++ bh->state = BUF_STATE_EMPTY;
++
++ /* something go wrong with the transfer */
++ if (bh->outreq->status != 0) {
++ sti->response_code =
++ PIMA15740_RES_INCOMPLETE_TRANSFER;
++ goto out2;
++ }
++
++ /*
++ * PIMA 15740 generic container head resides in
++ * first data block payload
++ */
++ if (file_offset == 0) {
++ buf = (char __user *) bh->buf +
++ PIMA15740_CONTAINER_LEN;
++ amount = bh->outreq->actual -
++ PIMA15740_CONTAINER_LEN;
++ } else {
++ buf = (char __user *) bh->buf;
++ amount = bh->outreq->actual;
++ }
++ amount = min((loff_t) amount,
++ file_size - file_offset);
++
++ /* across page boundary, recalculate the length */
++ if (amount == 0) {
++ INFO(sti, "extra bulk out zlp packets\n");
++ usb_offset -= bh->outreq->length;
++ amount_left_to_req += bh->outreq->length;
++ continue;
++ }
++
++ /* perform the write */
++ file_offset_tmp = file_offset;
++ nwritten = vfs_write(filp, (char __user *) buf,
++ amount, &file_offset_tmp);
++ VDBG(sti, "file write %u @ %llu -> %d\n", amount,
++ (unsigned long long) file_offset,
++ (int) nwritten);
++
++ if (signal_pending(current)) {
++ filp_close(filp, current->files);
++ return -EINTR;
++ }
++
++ if (nwritten < 0) {
++ VDBG(sti, "error in file write: %d\n",
++ (int) nwritten);
++ nwritten = 0;
++ } else if (nwritten < amount) {
++ VDBG(sti, "partial file write: %d/%u\n",
++ (int) nwritten, amount);
++ /* round down to a block */
++ nwritten -= (nwritten & 511);
++ }
++
++ file_offset += nwritten;
++ amount_left_to_write -= nwritten;
++
++ VDBG(sti, "file_offset: %llu, "
++ "amount_left_to_write: %u\n",
++ (unsigned long long) file_offset,
++ amount_left_to_write);
++
++ /* error occurred */
++ if (nwritten < amount) {
++ sti->response_code =
++ PIMA15740_RES_INCOMPLETE_TRANSFER;
++ goto out2;
++ }
++ continue;
++ }
++
++ /* wait for something to happen */
++ rc = sleep_thread(sti);
++ if (rc) {
++ filp_close(filp, current->files);
++ return rc;
++ }
++ }
++
++ /* fsync object file */
++ vfs_fsync(filp, filp->f_path.dentry, 1);
++
++ sti->response_code = PIMA15740_RES_OK;
++out2:
++ filp_close(filp, current->files);
++out1:
++ /* send response */
++ rc = send_response(sti, sti->response_code);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++static int do_copy_object(struct sti_dev *sti, struct sti_buffhd *bh)
++{
++ int rc = 0, i;
++ size_t size = 0;
++ unsigned int old_obj_handle, new_obj_parent_handle;
++ unsigned int new_storage_id, amount, amount_left;
++ struct sti_object *old_obj = NULL, *new_obj_parent = NULL;
++ struct sti_object *new_obj, *tmp_obj;
++ char *new_obj_fname;
++ struct file *old_fp, *new_fp;
++ struct inode *inode = NULL;
++ char __user *buf;
++ loff_t file_size, file_offset, file_offset_tmp;
++ ssize_t nread, nwritten;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ if (!sti->session_open) {
++ sti->response_code = PIMA15740_RES_SESSION_NOT_OPEN;
++ goto out1;
++ }
++
++ old_obj_handle = sti->ops_params[0];
++ new_storage_id = sti->ops_params[1];
++ new_obj_parent_handle = sti->ops_params[2];
++
++ if ((old_obj_handle == 0) || (old_obj_handle > sti->object_num)) {
++ WARNING(sti, "invalid object handle: %u\n", old_obj_handle);
++ sti->response_code = PIMA15740_RES_INVALID_OBJECT_HANDLE;
++ goto out1;
++ }
++
++ if (new_storage_id != sti->storage_id) {
++ WARNING(sti, "invalid storage id: %u\n", new_storage_id);
++ sti->response_code = PIMA15740_RES_INVALID_STORAGE_ID;
++ goto out1;
++ }
++
++ if (new_obj_parent_handle > sti->object_num
++ && new_obj_parent_handle != 0xffffffff) {
++ WARNING(sti, "invalid parent object handle: %u\n",
++ new_obj_parent_handle);
++ sti->response_code = PIMA15740_RES_INVALID_PARENT_OBJECT;
++ goto out1;
++ }
++
++ spin_lock_irq(&sti->lock);
++
++ /* find the old object to be copied */
++ i = 0;
++ list_for_each_entry(tmp_obj, &sti->obj_list, list) {
++ if (tmp_obj->obj_handle == old_obj_handle) {
++ i++;
++ old_obj = tmp_obj;
++ }
++
++ if (tmp_obj->obj_handle == new_obj_parent_handle) {
++ i++;
++ new_obj_parent = tmp_obj;
++ }
++
++ if (i == 2)
++ break;
++ }
++
++ spin_unlock_irq(&sti->lock);
++
++ if (i != 2 || !old_obj || !new_obj_parent) {
++ WARNING(sti, "invalid objects %u or %u\n",
++ old_obj_handle, new_obj_parent_handle);
++ sti->response_code = PIMA15740_RES_INVALID_PARENT_OBJECT;
++ goto out1;
++ }
++
++ size = strlen(new_obj_parent->full_path) +
++ strlen(old_obj->filename) + 2;
++ new_obj_fname = kzalloc(size, GFP_KERNEL);
++ if (!new_obj_fname) {
++ sti->response_code = PIMA15740_RES_DEVICE_BUSY;
++ rc = -EINVAL;
++ goto out1;
++ }
++ strncpy(new_obj_fname, new_obj_parent->full_path, size);
++ strncat(new_obj_fname, "/", size);
++ strncat(new_obj_fname, old_obj->filename, size);
++
++ VDBG(sti, "copy object: from [%s] to [%s]\n",
++ old_obj->full_path, new_obj_fname);
++
++ old_fp = filp_open(old_obj->full_path, O_RDONLY | O_LARGEFILE, 0);
++ if (IS_ERR(old_fp)) {
++ ERROR(sti, "unable to open file: %s. Err = %d\n",
++ old_obj->full_path, (int) PTR_ERR(old_fp));
++ sti->response_code = PIMA15740_RES_DEVICE_BUSY;
++ rc = -EINVAL;
++ goto out2;
++ }
++
++ new_fp = filp_open(new_obj_fname, O_CREAT | O_RDWR | O_LARGEFILE, 0666);
++ if (IS_ERR(new_fp)) {
++ ERROR(sti, "unable to create file: %s. Err = %d\n",
++ new_obj_fname, (int) PTR_ERR(new_fp));
++ sti->response_code = PIMA15740_RES_DEVICE_BUSY;
++ rc = -EINVAL;
++ goto out3;
++ }
++
++ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
++ if (!buf) {
++ sti->response_code = PIMA15740_RES_OPERATION_NOT_SUPPORTED;
++ rc = -EINVAL;
++ goto out4;
++ }
++
++ inode = old_fp->f_dentry->d_inode;
++ file_size = i_size_read(inode->i_mapping->host);
++ VDBG(sti, "object file size: %llu\n", (unsigned long long) file_size);
++
++ if (unlikely(file_size == 0)) {
++ sti->response_code = PIMA15740_RES_STORE_NOT_AVAILABLE;
++ rc = -EIO;
++ goto out5;
++ }
++
++ file_offset = 0;
++ amount_left = file_size;
++
++ while (amount_left > 0) {
++ amount = min(amount_left, (unsigned int) PAGE_SIZE);
++ if (amount == 0)
++ break;
++
++ file_offset_tmp = file_offset;
++ nread = vfs_read(old_fp, buf, amount, &file_offset_tmp);
++
++ if (signal_pending(current)) {
++ rc = -EINTR;
++ goto out5;
++ }
++
++ if (nread < 0) {
++ DBG(sti, "error in file read: %d\n",
++ (int) nread);
++ nread = 0;
++ } else if (nread < amount) {
++ DBG(sti, "partial file read: %d/%u\n",
++ (int) nread, amount);
++ /* round down to a block */
++ nread -= (nread & 511);
++ }
++
++ amount = min(amount, (unsigned int) nread);
++ file_offset_tmp = file_offset;
++ nwritten = vfs_write(new_fp, buf, amount, &file_offset_tmp);
++
++ if (signal_pending(current)) {
++ rc = -EINTR;
++ goto out5;
++ }
++
++ if (nwritten < 0) {
++ VDBG(sti, "error in file write: %d\n",
++ (int) nwritten);
++ nwritten = 0;
++ } else if (nwritten < amount) {
++ VDBG(sti, "partial file write: %d/%u\n",
++ (int) nwritten, amount);
++ /* round down to a block */
++ nwritten -= (nwritten & 511);
++ }
++
++ amount = min(amount, (unsigned int) nwritten);
++ file_offset += amount;
++ amount_left -= amount;
++ }
++
++ size = sizeof(*old_obj);
++ new_obj = kzalloc(size, GFP_KERNEL);
++ if (!new_obj) {
++ rc = -ENOMEM;
++ goto out5;
++ }
++
++ spin_lock_irq(&sti->lock);
++
++ sti->object_num++;
++
++ /* change obj_handle */
++ new_obj->obj_handle = sti->object_num;
++
++ /* change parent object */
++ if (new_obj_parent_handle == 0xffffffff)
++ new_obj->parent_object = 0;
++ else
++ new_obj->parent_object = new_obj_parent_handle;
++
++ new_obj->storage_id = old_obj->storage_id;
++ new_obj->is_dir = old_obj->is_dir;
++ new_obj->send_valid = old_obj->send_valid;
++ new_obj->obj_info_size = old_obj->obj_info_size;
++ strncpy(new_obj->filename, old_obj->filename,
++ sizeof(new_obj->filename));
++
++ /* change full path name */
++ strncpy(new_obj->full_path, new_obj_fname, sizeof(new_obj->full_path));
++
++ /* copy object_info */
++ memcpy(&new_obj->obj_info, &old_obj->obj_info, old_obj->obj_info_size);
++
++ /* fill parent_object in object_info */
++ new_obj->obj_info.parent_object = new_obj->parent_object;
++
++ /* add to object list */
++ list_add_tail(&new_obj->list, &sti->obj_list);
++
++ spin_unlock_irq(&sti->lock);
++
++ sti->response_code = PIMA15740_RES_OK;
++out5:
++ kfree(buf);
++out4:
++ filp_close(new_fp, current->files);
++out3:
++ filp_close(old_fp, current->files);
++out2:
++ kfree(new_obj_fname);
++out1:
++ /* send response */
++ rc = send_params_response(sti, sti->response_code,
++ sti->object_num, 0, 0,
++ 1);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++static int do_move_object(struct sti_dev *sti, struct sti_buffhd *bh)
++{
++ int i, rc = 0;
++ size_t size = 0;
++ unsigned int old_obj_handle, new_obj_parent_handle;
++ unsigned int new_storage_id;
++ char *new_obj_fname;
++ struct file *old_fp, *new_fp;
++ struct inode *old_dir, *new_dir;
++ struct dentry *old_dentry, *new_dentry;
++ struct sti_object *old_obj = NULL;
++ struct sti_object *new_obj = NULL;
++ struct sti_object *new_obj_parent = NULL;
++ struct sti_object *tmp_obj = NULL;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ if (!sti->session_open) {
++ sti->response_code = PIMA15740_RES_SESSION_NOT_OPEN;
++ goto out1;
++ }
++
++ old_obj_handle = sti->ops_params[0];
++ new_storage_id = sti->ops_params[1];
++ new_obj_parent_handle = sti->ops_params[2];
++
++ if ((old_obj_handle == 0) || (old_obj_handle > sti->object_num)) {
++ WARNING(sti, "invalid object handle: %u\n", old_obj_handle);
++ sti->response_code = PIMA15740_RES_INVALID_OBJECT_HANDLE;
++ goto out1;
++ }
++
++ if (new_storage_id != sti->storage_id) {
++ WARNING(sti, "invalid storage id: %u\n", new_storage_id);
++ sti->response_code = PIMA15740_RES_INVALID_STORAGE_ID;
++ goto out1;
++ }
++
++ if (new_obj_parent_handle > sti->object_num
++ && new_obj_parent_handle != 0xffffffff) {
++ WARNING(sti, "invalid parent object handle: %u\n",
++ new_obj_parent_handle);
++ sti->response_code = PIMA15740_RES_INVALID_PARENT_OBJECT;
++ goto out1;
++ }
++
++ spin_lock_irq(&sti->lock);
++
++ /* find the old object to be moved */
++ i = 0;
++ list_for_each_entry(tmp_obj, &sti->obj_list, list) {
++ if (tmp_obj->obj_handle == old_obj_handle) {
++ i++;
++ old_obj = tmp_obj;
++ }
++
++ if (tmp_obj->obj_handle == new_obj_parent_handle) {
++ i++;
++ new_obj_parent = tmp_obj;
++ }
++
++ if (i == 2)
++ break;
++ }
++
++ spin_unlock_irq(&sti->lock);
++
++ if (i != 2 || !old_obj || !new_obj_parent) {
++ WARNING(sti, "invalid objects %u or %u\n",
++ old_obj_handle, new_obj_parent_handle);
++ sti->response_code = PIMA15740_RES_INVALID_PARENT_OBJECT;
++ goto out1;
++ }
++
++ size = strlen(new_obj_parent->full_path) +
++ strlen(old_obj->filename) + 2;
++ new_obj_fname = kzalloc(size, GFP_KERNEL);
++ if (!new_obj_fname) {
++ sti->response_code = PIMA15740_RES_DEVICE_BUSY;
++ rc = -EINVAL;
++ goto out1;
++ }
++ strncpy(new_obj_fname, new_obj_parent->full_path, size);
++ strncat(new_obj_fname, "/", size);
++ strncat(new_obj_fname, old_obj->filename, size);
++
++ VDBG(sti, "move object: from [%s] to [%s]\n",
++ old_obj->full_path, new_obj_fname);
++
++ old_fp = filp_open(old_obj->full_path, O_RDONLY | O_LARGEFILE, 0);
++ if (IS_ERR(old_fp)) {
++ ERROR(sti, "unable to open file: %s. Err = %d\n",
++ old_obj->full_path, (int) PTR_ERR(old_fp));
++ sti->response_code = PIMA15740_RES_DEVICE_BUSY;
++ rc = -EINVAL;
++ goto out2;
++ }
++
++ new_fp = filp_open(new_obj_fname, O_CREAT | O_RDWR | O_LARGEFILE, 0666);
++ if (IS_ERR(new_fp)) {
++ ERROR(sti, "unable to create file: %s. Err = %d\n",
++ new_obj_fname, (int) PTR_ERR(new_fp));
++ sti->response_code = PIMA15740_RES_DEVICE_BUSY;
++ rc = -EINVAL;
++ goto out3;
++ }
++
++ old_dir = old_fp->f_dentry->d_parent->d_inode;
++ new_dir = new_fp->f_dentry->d_parent->d_inode;
++ old_dentry = old_fp->f_dentry;
++ new_dentry = new_fp->f_dentry;
++
++ rc = vfs_rename(old_dir, old_dentry, new_dir, new_dentry);
++
++ if (rc) {
++ sti->response_code = PIMA15740_RES_OPERATION_NOT_SUPPORTED;
++ goto out4;
++ } else
++ sti->response_code = PIMA15740_RES_OK;
++
++ size = sizeof(*old_obj);
++ new_obj = kzalloc(size, GFP_KERNEL);
++ if (!new_obj) {
++ rc = -ENOMEM;
++ goto out4;
++ }
++
++ spin_lock_irq(&sti->lock);
++
++ /* change parent object */
++ if (new_obj_parent_handle == 0xffffffff)
++ new_obj->parent_object = 0;
++ else
++ new_obj->parent_object = new_obj_parent_handle;
++
++ new_obj->obj_handle = old_obj->obj_handle;
++ new_obj->storage_id = old_obj->storage_id;
++ new_obj->is_dir = old_obj->is_dir;
++ new_obj->send_valid = old_obj->send_valid;
++ new_obj->obj_info_size = old_obj->obj_info_size;
++ strncpy(new_obj->filename, old_obj->filename,
++ sizeof(new_obj->filename));
++
++ /* change full path name */
++ strncpy(new_obj->full_path, new_obj_fname, sizeof(new_obj->full_path));
++
++ /* copy object_info */
++ memcpy(&new_obj->obj_info, &old_obj->obj_info, old_obj->obj_info_size);
++
++ /* fill parent_object in object_info */
++ new_obj->obj_info.parent_object = new_obj->parent_object;
++
++ /* add to object list */
++ list_add_tail(&new_obj->list, &sti->obj_list);
++
++ /* remove from object list */
++ list_del_init(&old_obj->list);
++
++ spin_unlock_irq(&sti->lock);
++
++ kfree(old_obj);
++out4:
++ filp_close(new_fp, current->files);
++out3:
++ filp_close(old_fp, current->files);
++out2:
++ kfree(new_obj_fname);
++out1:
++ /* send response */
++ rc = send_response(sti, sti->response_code);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++/* TODO: PIMA 15740 Event handling via interrupt endpoint */
++static int send_status(struct sti_dev *sti)
++{
++ VDBG(sti, "---> %s()\n", __func__);
++ VDBG(sti, "<--- %s()\n", __func__);
++ return 0;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++/* handle supported PIMA 15740 operations */
++static int do_still_image_command(struct sti_dev *sti)
++{
++ struct sti_buffhd *bh;
++ int rc = -EINVAL;
++ int reply = -EINVAL;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ dump_cb(sti);
++
++ if (!backing_folder_is_open(sti)) {
++ ERROR(sti, "backing folder is not open\n");
++ return rc;
++ }
++
++ /* wait for the next buffer to become available for data or status */
++ bh = sti->next_buffhd_to_drain = sti->next_buffhd_to_fill;
++ while (bh->state != BUF_STATE_EMPTY) {
++ rc = sleep_thread(sti);
++ if (rc)
++ return rc;
++ }
++
++ down_read(&sti->filesem);
++ switch (sti->code) {
++
++ case PIMA15740_OP_GET_DEVICE_INFO:
++ DBG(sti, "PIMA15740 OPS: get device info\n");
++ reply = do_get_device_info(sti, bh);
++ break;
++
++ case PIMA15740_OP_OPEN_SESSION:
++ DBG(sti, "PIMA15740 OPS: open session\n");
++ reply = do_open_session(sti);
++ break;
++
++ case PIMA15740_OP_CLOSE_SESSION:
++ DBG(sti, "PIMA15740 OPS: close session\n");
++ reply = do_close_session(sti);
++ break;
++
++ case PIMA15740_OP_GET_STORAGE_IDS:
++ DBG(sti, "PIMA15740 OPS: get storage ids\n");
++ reply = do_get_storage_ids(sti, bh);
++ break;
++
++ case PIMA15740_OP_GET_STORAGE_INFO:
++ DBG(sti, "PIMA15740 OPS: get storage info\n");
++ reply = do_get_storage_info(sti, bh);
++ break;
++
++ case PIMA15740_OP_GET_NUM_OBJECTS:
++ DBG(sti, "PIMA15740 OPS: get num objects\n");
++ reply = do_get_num_objects(sti, bh);
++ break;
++
++ case PIMA15740_OP_GET_OBJECT_HANDLES:
++ DBG(sti, "PIMA15740 OPS: get object handles\n");
++ reply = do_get_object_handles(sti, bh);
++ break;
++
++ case PIMA15740_OP_GET_OBJECT_INFO:
++ DBG(sti, "PIMA15740 OPS: get object info\n");
++ reply = do_get_object_info(sti, bh);
++ break;
++
++ case PIMA15740_OP_GET_OBJECT:
++ DBG(sti, "PIMA15740 OPS: get object\n");
++ reply = do_get_object(sti, bh);
++ break;
++
++ case PIMA15740_OP_DELETE_OBJECT:
++ DBG(sti, "PIMA15740 OPS: delete object\n");
++ reply = do_delete_object(sti, bh);
++ break;
++
++ case PIMA15740_OP_SEND_OBJECT_INFO:
++ DBG(sti, "PIMA15740 OPS: send object info\n");
++ reply = do_send_object_info(sti, bh);
++ break;
++
++ case PIMA15740_OP_SEND_OBJECT:
++ DBG(sti, "PIMA15740 OPS: send object\n");
++ reply = do_send_object(sti, bh);
++ break;
++
++ case PIMA15740_OP_COPY_OBJECT:
++ DBG(sti, "PIMA15740 OPS: copy object\n");
++ reply = do_copy_object(sti, bh);
++ break;
++
++ case PIMA15740_OP_MOVE_OBJECT:
++ DBG(sti, "PIMA15740 OPS: move object\n");
++ reply = do_move_object(sti, bh);
++ break;
++
++ default:
++ WARNING(sti, "unknown PIMA15740 OPS 0x%04x\n", sti->code);
++ break;
++ }
++ up_read(&sti->filesem);
++
++ if (reply == -EINTR || signal_pending(current))
++ rc = -EINTR;
++
++ if (reply == -EINVAL)
++ rc = 0;
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++/* received PIMA 15740 Command Blocks */
++static int received_cb(struct sti_dev *sti, struct sti_buffhd *bh)
++{
++ struct usb_request *req = bh->outreq;
++ struct pima15740_container *cb = req->buf;
++ unsigned short n;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ /* this is not a real packet */
++ if (req->status)
++ return -EINVAL;
++
++ /* save the command for later */
++ sti->container_len = cb->container_len;
++ sti->container_type = cb->container_type;
++ sti->code = cb->code;
++ sti->transaction_id = cb->transaction_id;
++
++ /* get Command Block Parameters 1..N */
++ n = sti->container_len - PIMA15740_CONTAINER_LEN;
++ if (n != 0)
++ memcpy(sti->ops_params, cb + 1, n);
++
++ VDBG(sti, "Command Block: len=%u, type=0x%04x, "
++ "code=0x%04x, trans_id=0x%08x\n",
++ sti->container_len, sti->container_type,
++ sti->code, sti->transaction_id);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return 0;
++}
++
++
++static int get_next_command(struct sti_dev *sti)
++{
++ struct sti_buffhd *bh;
++ int rc = 0;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ /* wait for the next buffer to become available */
++ bh = sti->next_buffhd_to_fill;
++ while (bh->state != BUF_STATE_EMPTY) {
++ rc = sleep_thread(sti);
++ if (rc)
++ return rc;
++ }
++
++ /* queue a request to read a Bulk-only Command Block */
++ set_bulk_out_req_length(sti, bh, 512);
++ bh->outreq->short_not_ok = 1;
++ start_transfer(sti, sti->bulk_out, bh->outreq,
++ &bh->outreq_busy, &bh->state);
++
++ /* we will drain the buffer in software, which means we
++ * can reuse it for the next filling. No need to advance
++ * next_buffhd_to_fill. */
++
++ /* wait for the Command Block to arrive */
++ while (bh->state != BUF_STATE_FULL) {
++ rc = sleep_thread(sti);
++ if (rc)
++ return rc;
++ }
++ smp_rmb();
++ rc = received_cb(sti, bh);
++ bh->state = BUF_STATE_EMPTY;
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static int enable_endpoint(struct sti_dev *sti, struct usb_ep *ep,
++ const struct usb_endpoint_descriptor *d)
++{
++ int rc;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ ep->driver_data = sti;
++ rc = usb_ep_enable(ep, d);
++ if (rc)
++ ERROR(sti, "can't enable %s, result %d\n", ep->name, rc);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++static int alloc_request(struct sti_dev *sti, struct usb_ep *ep,
++ struct usb_request **preq)
++{
++ VDBG(sti, "---> %s()\n", __func__);
++
++ *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
++ if (*preq)
++ return 0;
++
++ ERROR(sti, "can't allocate request for %s\n", ep->name);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return -ENOMEM;
++}
++
++/*
++ * Reset interface setting and re-init endpoint state (toggle etc).
++ * Call with altsetting < 0 to disable the interface. The only other
++ * available altsetting is 0, which enables the interface.
++ */
++static int do_set_interface(struct sti_dev *sti, int altsetting)
++{
++ int rc = 0;
++ int i;
++ const struct usb_endpoint_descriptor *d;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ if (sti->running)
++ DBG(sti, "reset interface\n");
++
++reset:
++ /* deallocate the requests */
++ for (i = 0; i < NUM_BUFFERS; ++i) {
++ struct sti_buffhd *bh = &sti->buffhds[i];
++
++ if (bh->inreq) {
++ usb_ep_free_request(sti->bulk_in, bh->inreq);
++ bh->inreq = NULL;
++ }
++ if (bh->outreq) {
++ usb_ep_free_request(sti->bulk_out, bh->outreq);
++ bh->outreq = NULL;
++ }
++ }
++ if (sti->intreq) {
++ usb_ep_free_request(sti->intr_in, sti->intreq);
++ sti->intreq = NULL;
++ }
++
++ /* disable the endpoints */
++ if (sti->bulk_in_enabled) {
++ usb_ep_disable(sti->bulk_in);
++ sti->bulk_in_enabled = 0;
++ }
++ if (sti->bulk_out_enabled) {
++ usb_ep_disable(sti->bulk_out);
++ sti->bulk_out_enabled = 0;
++ }
++ if (sti->intr_in_enabled) {
++ usb_ep_disable(sti->intr_in);
++ sti->intr_in_enabled = 0;
++ }
++
++ sti->running = 0;
++ if (altsetting < 0 || rc != 0)
++ return rc;
++
++ DBG(sti, "set interface %d\n", altsetting);
++
++ /* enable the endpoints */
++ d = ep_desc(sti->gadget, &fs_bulk_in_desc, &hs_bulk_in_desc);
++ rc = enable_endpoint(sti, sti->bulk_in, d);
++ if (rc)
++ goto reset;
++ sti->bulk_in_enabled = 1;
++
++ d = ep_desc(sti->gadget, &fs_bulk_out_desc, &hs_bulk_out_desc);
++ rc = enable_endpoint(sti, sti->bulk_out, d);
++ if (rc)
++ goto reset;
++ sti->bulk_out_enabled = 1;
++ sti->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
++ clear_bit(CLEAR_BULK_HALTS, &sti->atomic_bitflags);
++
++ d = ep_desc(sti->gadget, &fs_intr_in_desc, &hs_intr_in_desc);
++ rc = enable_endpoint(sti, sti->intr_in, d);
++ if (rc)
++ goto reset;
++ sti->intr_in_enabled = 1;
++
++ /* allocate the requests */
++ for (i = 0; i < NUM_BUFFERS; ++i) {
++ struct sti_buffhd *bh = &sti->buffhds[i];
++
++ rc = alloc_request(sti, sti->bulk_in, &bh->inreq);
++ if (rc)
++ goto reset;
++
++ rc = alloc_request(sti, sti->bulk_out, &bh->outreq);
++ if (rc)
++ goto reset;
++
++ bh->inreq->buf = bh->outreq->buf = bh->buf;
++ bh->inreq->context = bh->outreq->context = bh;
++ bh->inreq->complete = bulk_in_complete;
++ bh->outreq->complete = bulk_out_complete;
++ }
++
++ rc = alloc_request(sti, sti->intr_in, &sti->intreq);
++ if (rc)
++ goto reset;
++
++ sti->running = 1;
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++/*
++ * Change our operational configuration. This code must agree with the code
++ * that returns config descriptors, and with interface altsetting code.
++ *
++ * It's also responsible for power management interactions. Some
++ * configurations might not work with our current power sources.
++ * For now we just assume the gadget is always self-powered.
++ */
++static int do_set_config(struct sti_dev *sti, u8 new_config)
++{
++ int rc = 0;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ /* disable the single interface */
++ if (sti->config != 0) {
++ DBG(sti, "reset config\n");
++ sti->config = 0;
++ rc = do_set_interface(sti, -1);
++ }
++
++ /* enable the interface */
++ if (new_config != 0) {
++ sti->config = new_config;
++ rc = do_set_interface(sti, 0);
++ if (rc)
++ sti->config = 0; /* reset on errors */
++ else {
++ char *speed;
++
++ switch (sti->gadget->speed) {
++ case USB_SPEED_LOW:
++ speed = "low";
++ break;
++ case USB_SPEED_FULL:
++ speed = "full";
++ break;
++ case USB_SPEED_HIGH:
++ speed = "high";
++ break;
++ default:
++ speed = "?";
++ break;
++ }
++ INFO(sti, "%s speed config #%d\n",
++ speed, sti->config);
++ }
++ }
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static void handle_exception(struct sti_dev *sti)
++{
++ siginfo_t info;
++ int sig;
++ int i;
++ int num_active;
++ struct sti_buffhd *bh;
++ enum sti_state old_state;
++ u8 new_config;
++ unsigned int exception_req_tag;
++ int rc;
++
++ VDBG(sti, "---> %s()\n", __func__);
++
++ /* Clear the existing signals. Anything but SIGUSR1 is converted
++ * into a high-priority EXIT exception. */
++ for (;;) {
++ sig = dequeue_signal_lock(current, &current->blocked, &info);
++ if (!sig)
++ break;
++
++ if (sig != SIGUSR1) {
++ if (sti->state < STI_STATE_EXIT)
++ DBG(sti, "main thread exiting on signal\n");
++ raise_exception(sti, STI_STATE_EXIT);
++ }
++ }
++
++ /* cancel all the pending transfers */
++ if (sti->intreq_busy)
++ usb_ep_dequeue(sti->intr_in, sti->intreq);
++
++ for (i = 0; i < NUM_BUFFERS; ++i) {
++ bh = &sti->buffhds[i];
++ if (bh->inreq_busy)
++ usb_ep_dequeue(sti->bulk_in, bh->inreq);
++ if (bh->outreq_busy)
++ usb_ep_dequeue(sti->bulk_out, bh->outreq);
++ }
++
++ /* wait until everything is idle */
++ for (;;) {
++ num_active = sti->intreq_busy;
++ for (i = 0; i < NUM_BUFFERS; ++i) {
++ bh = &sti->buffhds[i];
++ num_active += bh->inreq_busy + bh->outreq_busy;
++ }
++
++ if (num_active == 0)
++ break;
++
++ if (sleep_thread(sti))
++ return;
++ }
++
++ /* clear out the controller's fifos */
++ if (sti->bulk_in_enabled)
++ usb_ep_fifo_flush(sti->bulk_in);
++ if (sti->bulk_out_enabled)
++ usb_ep_fifo_flush(sti->bulk_out);
++ if (sti->intr_in_enabled)
++ usb_ep_fifo_flush(sti->intr_in);
++
++ /*
++ * Reset the I/O buffer states and pointers, the device
++ * state, and the exception. Then invoke the handler.
++ */
++ spin_lock_irq(&sti->lock);
++
++ for (i = 0; i < NUM_BUFFERS; ++i) {
++ bh = &sti->buffhds[i];
++ bh->state = BUF_STATE_EMPTY;
++ }
++ sti->next_buffhd_to_fill = sti->next_buffhd_to_drain =
++ &sti->buffhds[0];
++
++ exception_req_tag = sti->exception_req_tag;
++ new_config = sti->new_config;
++ old_state = sti->state;
++
++ if (old_state == STI_STATE_ABORT_BULK_OUT)
++ sti->state = STI_STATE_STATUS_PHASE;
++ else
++ sti->state = STI_STATE_IDLE;
++ spin_unlock_irq(&sti->lock);
++
++ /* carry out any extra actions required for the exception */
++ switch (old_state) {
++ default:
++ break;
++
++ case STI_STATE_CANCEL:
++ if (usb_ep_clear_halt(sti->bulk_out) ||
++ usb_ep_clear_halt(sti->bulk_in))
++ sti->response_code = PIMA15740_RES_DEVICE_BUSY;
++ else
++ sti->response_code = PIMA15740_RES_OK;
++ break;
++
++ case STI_STATE_ABORT_BULK_OUT:
++ send_status(sti);
++ spin_lock_irq(&sti->lock);
++ if (sti->state == STI_STATE_STATUS_PHASE)
++ sti->state = STI_STATE_IDLE;
++ spin_unlock_irq(&sti->lock);
++ break;
++
++ case STI_STATE_RESET:
++ /* in case we were forced against our will to halt a
++ * bulk endpoint, clear the halt now */
++ if (test_and_clear_bit(CLEAR_BULK_HALTS,
++ &sti->atomic_bitflags)) {
++ usb_ep_clear_halt(sti->bulk_in);
++ usb_ep_clear_halt(sti->bulk_out);
++ }
++
++ if (sti->ep0_req_tag == exception_req_tag)
++ /* complete the status stage */
++ ep0_queue(sti);
++ break;
++
++ case STI_STATE_INTERFACE_CHANGE:
++ rc = do_set_interface(sti, 0);
++ if (sti->ep0_req_tag != exception_req_tag)
++ break;
++ if (rc != 0) /* STALL on errors */
++ sti_set_halt(sti, sti->ep0);
++ else /* complete the status stage */
++ ep0_queue(sti);
++ break;
++
++ case STI_STATE_CONFIG_CHANGE:
++ rc = do_set_config(sti, new_config);
++ if (sti->ep0_req_tag != exception_req_tag)
++ break;
++ if (rc != 0) /* STALL on errors */
++ sti_set_halt(sti, sti->ep0);
++ else /* complete the status stage */
++ ep0_queue(sti);
++ break;
++
++ case STI_STATE_DISCONNECT:
++ do_set_config(sti, 0); /* unconfigured state */
++ break;
++
++ case STI_STATE_EXIT:
++ case STI_STATE_TERMINATED:
++ do_set_config(sti, 0); /* free resources */
++ spin_lock_irq(&sti->lock);
++ sti->state = STI_STATE_TERMINATED; /* stop the thread */
++ spin_unlock_irq(&sti->lock);
++ break;
++ }
++
++ VDBG(sti, "<--- %s()\n", __func__);
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static int sti_main_thread(void *sti_)
++{
++ struct sti_dev *sti = sti_;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ /*
++ * allow the thread to be killed by a signal, but set the signal mask
++ * to block everything but INT, TERM, KILL, and USR1
++ */
++ allow_signal(SIGINT);
++ allow_signal(SIGTERM);
++ allow_signal(SIGKILL);
++ allow_signal(SIGUSR1);
++
++ /* allow the thread to be frozen */
++ set_freezable();
++
++ /*
++ * arrange for userspace references to be interpreted as kernel
++ * pointers. That way we can pass a kernel pointer to a routine
++ * that expects a __user pointer and it will work okay.
++ */
++ set_fs(get_ds());
++
++ /* the main loop */
++ while (sti->state != STI_STATE_TERMINATED) {
++ if (exception_in_progress(sti) || signal_pending(current)) {
++ handle_exception(sti);
++ continue;
++ }
++
++ if (!sti->running) {
++ sleep_thread(sti);
++ continue;
++ }
++
++ if (get_next_command(sti))
++ continue;
++
++ spin_lock_irq(&sti->lock);
++ if (!exception_in_progress(sti))
++ sti->state = STI_STATE_DATA_PHASE;
++ spin_unlock_irq(&sti->lock);
++
++ if (do_still_image_command(sti))
++ continue;
++
++ spin_lock_irq(&sti->lock);
++ if (!exception_in_progress(sti))
++ sti->state = STI_STATE_STATUS_PHASE;
++ spin_unlock_irq(&sti->lock);
++
++ if (send_status(sti))
++ continue;
++
++ spin_lock_irq(&sti->lock);
++ if (!exception_in_progress(sti))
++ sti->state = STI_STATE_IDLE;
++ spin_unlock_irq(&sti->lock);
++ }
++
++ spin_lock_irq(&sti->lock);
++ sti->thread_task = NULL;
++ spin_unlock_irq(&sti->lock);
++
++ /* in case we are exiting because of a signal, unregister the
++ * gadget driver */
++ if (test_and_clear_bit(REGISTERED, &sti->atomic_bitflags))
++ usb_gadget_unregister_driver(&sti_driver);
++
++ /* let the unbind and cleanup routines know the thread has exited */
++ complete_and_exit(&sti->thread_notifier, 0);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static int open_backing_folder(struct sti_dev *sti, const char *folder_name)
++{
++ struct file *filp = NULL;
++ int rc = -EINVAL;
++ struct inode *inode = NULL;
++ size_t len;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ /* remove the trailing path sign */
++ len = strlen(folder_name);
++ if (len > 1 && folder_name[len-1] == '/')
++ ((char *) folder_name)[len-1] = 0;
++
++ memset(sti->root_path, 0, sizeof(sti->root_path));
++ strncpy(sti->root_path, folder_name, sizeof(sti->root_path));
++
++ filp = filp_open(sti->root_path, O_RDONLY | O_DIRECTORY, 0);
++ if (IS_ERR(filp)) {
++ ERROR(sti, "unable to open backing folder: %s\n",
++ sti->root_path);
++ return PTR_ERR(filp);
++ }
++
++ if (filp->f_path.dentry)
++ inode = filp->f_dentry->d_inode;
++
++ if (!inode || !S_ISDIR(inode->i_mode)) {
++ ERROR(sti, "%s is not a directory\n", sti->root_path);
++ goto out;
++ }
++
++ get_file(filp);
++
++ sti->root_filp = filp;
++
++ INFO(sti, "open backing folder: %s\n", folder_name);
++ rc = 0;
++out:
++ filp_close(filp, current->files);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return rc;
++}
++
++static void close_backing_folder(struct sti_dev *sti)
++{
++ VDBG(sti, "---> %s()\n", __func__);
++
++ if (sti->root_filp) {
++ INFO(sti, "close backing folder\n");
++ fput(sti->root_filp);
++ sti->root_filp = NULL;
++ }
++
++ VDBG(sti, "<--- %s()\n", __func__);
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++/* sysfs attribute files */
++static ssize_t show_folder(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct sti_dev *sti = dev_get_drvdata(dev);
++ char *p;
++ ssize_t rc;
++
++ down_read(&sti->filesem);
++ if (backing_folder_is_open(sti)) {
++ /* get the complete pathname */
++ p = d_path(&sti->root_filp->f_path, buf, PAGE_SIZE - 1);
++ if (IS_ERR(p))
++ rc = PTR_ERR(p);
++ else {
++ rc = strlen(p);
++ memmove(buf, p, rc);
++
++ /* add a newline */
++ buf[rc] = '\n';
++ buf[++rc] = 0;
++ }
++ } else { /* no file */
++ *buf = 0;
++ rc = 0;
++ }
++ up_read(&sti->filesem);
++
++ return rc;
++}
++
++
++static ssize_t store_folder(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct sti_dev *sti = dev_get_drvdata(dev);
++ int rc = 0;
++
++ /* remove a trailing newline */
++ if (count > 0 && buf[count-1] == '\n')
++ ((char *) buf)[count-1] = 0;
++
++ /* eject current medium */
++ down_write(&sti->filesem);
++ if (backing_folder_is_open(sti))
++ close_backing_folder(sti);
++
++ /* load new medium */
++ if (count > 0 && buf[0])
++ rc = open_backing_folder(sti, buf);
++
++ up_write(&sti->filesem);
++
++ return (rc < 0 ? rc : count);
++}
++
++/* the write permissions and store_xxx pointers are set in sti_bind() */
++static DEVICE_ATTR(folder, 0444, show_folder, NULL);
++
++
++/*-------------------------------------------------------------------------*/
++
++static void sti_release(struct kref *ref)
++{
++ struct sti_dev *sti = container_of(ref, struct sti_dev, ref);
++
++ while (!list_empty(&sti->obj_list)) {
++ struct sti_object *obj = NULL;
++ obj = list_entry(sti->obj_list.next, struct sti_object, list);
++ list_del_init(&obj->list);
++ kfree(obj);
++ }
++
++ while (!list_empty(&sti->tmp_obj_list)) {
++ struct sti_object *obj = NULL;
++ obj = list_entry(sti->tmp_obj_list.next, struct sti_object,
++ list);
++ list_del_init(&obj->list);
++ kfree(obj);
++ }
++
++ kfree(sti);
++}
++
++static void gadget_release(struct device *dev)
++{
++ struct sti_dev *sti = dev_get_drvdata(dev);
++ VDBG(sti, "---> %s()\n", __func__);
++ VDBG(sti, "<--- %s()\n", __func__);
++
++ kref_put(&sti->ref, sti_release);
++}
++
++
++static void /* __init_or_exit */ sti_unbind(struct usb_gadget *gadget)
++{
++ struct sti_dev *sti = get_gadget_data(gadget);
++ int i;
++ struct usb_request *req = sti->ep0req;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ DBG(sti, "unbind\n");
++ clear_bit(REGISTERED, &sti->atomic_bitflags);
++
++ /* unregister the sysfs attribute files */
++ if (sti->registered) {
++ device_remove_file(&sti->dev, &dev_attr_folder);
++ close_backing_folder(sti);
++ device_unregister(&sti->dev);
++ sti->registered = 0;
++ }
++
++ /* if the thread isn't already dead, tell it to exit now */
++ if (sti->state != STI_STATE_TERMINATED) {
++ raise_exception(sti, STI_STATE_EXIT);
++ wait_for_completion(&sti->thread_notifier);
++
++ /* the cleanup routine waits for this completion also */
++ complete(&sti->thread_notifier);
++ }
++
++ /* free the data buffers */
++ for (i = 0; i < NUM_BUFFERS; ++i)
++ kfree(sti->buffhds[i].buf);
++
++ /* free the request and buffer for endpoint 0 */
++ if (req) {
++ kfree(req->buf);
++ usb_ep_free_request(sti->ep0, req);
++ }
++
++ set_gadget_data(gadget, NULL);
++
++ VDBG(sti, "<--- %s()\n", __func__);
++}
++
++
++static int __init check_parameters(struct sti_dev *sti)
++{
++ int gcnum;
++ VDBG(sti, "---> %s()\n", __func__);
++
++ /* parameter wasn't set */
++ if (mod_data.release == 0xffff) {
++ gcnum = usb_gadget_controller_number(sti->gadget);
++ if (gcnum >= 0)
++ mod_data.release = 0x0300 + gcnum;
++ else {
++ WARNING(sti, "controller '%s' not recognized\n",
++ sti->gadget->name);
++ mod_data.release = 0x0399;
++ }
++ }
++
++ mod_data.buflen &= PAGE_CACHE_MASK;
++ if (mod_data.buflen <= 0) {
++ ERROR(sti, "invalid buflen\n");
++ return -ETOOSMALL;
++ }
++
++ VDBG(sti, "<--- %s()\n", __func__);
++ return 0;
++}
++
++
++static int __init sti_bind(struct usb_gadget *gadget)
++{
++ struct sti_dev *sti = the_sti;
++ int rc;
++ int i;
++ struct usb_ep *ep;
++ struct usb_request *req;
++
++ sti->gadget = gadget;
++ set_gadget_data(gadget, sti);
++ sti->ep0 = gadget->ep0;
++ sti->ep0->driver_data = sti;
++
++ rc = check_parameters(sti);
++ if (rc)
++ goto out;
++
++ /* enable store_xxx attributes */
++ dev_attr_folder.attr.mode = 0644;
++ dev_attr_folder.store = store_folder;
++
++ sti->dev.release = gadget_release;
++ sti->dev.parent = &gadget->dev;
++ sti->dev.driver = &sti_driver.driver;
++ dev_set_drvdata(&sti->dev, sti);
++ dev_set_name(&sti->dev, "%s", sti_driver.driver.name);
++
++ rc = device_register(&sti->dev);
++ if (rc) {
++ INFO(sti, "failed to register sti: %d\n", rc);
++ goto out;
++ }
++
++ rc = device_create_file(&sti->dev, &dev_attr_folder);
++ if (rc) {
++ device_unregister(&sti->dev);
++ goto out;
++ }
++
++ sti->registered = 1;
++ kref_get(&sti->ref);
++
++ /* initialize object list */
++ INIT_LIST_HEAD(&sti->obj_list);
++ INIT_LIST_HEAD(&sti->tmp_obj_list);
++
++ if (mod_data.folder && *mod_data.folder)
++ rc = open_backing_folder(sti, mod_data.folder);
++ if (rc)
++ goto out;
++
++ /* find all the endpoints we will use */
++ usb_ep_autoconfig_reset(gadget);
++ ep = usb_ep_autoconfig(gadget, &fs_bulk_in_desc);
++ if (!ep)
++ goto autoconf_fail;
++
++ /* claim bulk-in endpoint */
++ ep->driver_data = sti;
++ sti->bulk_in = ep;
++
++ ep = usb_ep_autoconfig(gadget, &fs_bulk_out_desc);
++ if (!ep)
++ goto autoconf_fail;
++
++ /* claim bulk-out endpoint */
++ ep->driver_data = sti;
++ sti->bulk_out = ep;
++
++ ep = usb_ep_autoconfig(gadget, &fs_intr_in_desc);
++ if (!ep)
++ goto autoconf_fail;
++
++ /* claim intr-in endpoint */
++ ep->driver_data = sti;
++ sti->intr_in = ep;
++
++ /* fix up the descriptors */
++ device_desc.bMaxPacketSize0 = sti->ep0->maxpacket;
++ device_desc.idVendor = cpu_to_le16(mod_data.vendor);
++ device_desc.idProduct = cpu_to_le16(mod_data.product);
++ device_desc.bcdDevice = cpu_to_le16(mod_data.release);
++
++ fs_function[3 + FS_FUNCTION_PRE_EP_ENTRIES] = NULL;
++
++ if (gadget_is_dualspeed(gadget)) {
++ hs_function[3 + HS_FUNCTION_PRE_EP_ENTRIES] = NULL;
++
++ /* assume ep0 uses the same maxpacket value for both speeds */
++ dev_qualifier.bMaxPacketSize0 = sti->ep0->maxpacket;
++
++ /* assume endpoint addresses are the same for both speeds */
++ hs_bulk_in_desc.bEndpointAddress =
++ fs_bulk_in_desc.bEndpointAddress;
++ hs_bulk_out_desc.bEndpointAddress =
++ fs_bulk_out_desc.bEndpointAddress;
++ hs_intr_in_desc.bEndpointAddress =
++ fs_intr_in_desc.bEndpointAddress;
++ }
++
++ if (gadget_is_otg(gadget))
++ otg_desc.bmAttributes |= USB_OTG_HNP;
++
++ rc = -ENOMEM;
++
++ /* allocate the request and buffer for endpoint 0 */
++ sti->ep0req = req = usb_ep_alloc_request(sti->ep0, GFP_KERNEL);
++ if (!req)
++ goto autoconf_fail;
++
++ req->buf = kmalloc(EP0_BUFSIZE, GFP_KERNEL);
++ if (!req->buf)
++ goto autoconf_fail;
++
++ req->complete = ep0_complete;
++
++ /* allocate the data buffers */
++ for (i = 0; i < NUM_BUFFERS; ++i) {
++ struct sti_buffhd *bh = &sti->buffhds[i];
++
++ /*
++ * Allocate for the bulk-in endpoint. We assume that
++ * the buffer will also work with the bulk-out (and
++ * interrupt-in) endpoint.
++ */
++ bh->buf = kmalloc(mod_data.buflen, GFP_KERNEL);
++ if (!bh->buf)
++ goto autoconf_fail;
++
++ bh->next = bh + 1;
++ }
++ sti->buffhds[NUM_BUFFERS - 1].next = &sti->buffhds[0];
++
++ /* this should reflect the actual gadget power source */
++ usb_gadget_set_selfpowered(gadget);
++
++ snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
++ init_utsname()->sysname, init_utsname()->release,
++ gadget->name);
++
++ DBG(sti, "manufacturer: %s\n", manufacturer);
++
++ /*
++ * on a real device, serial[] would be loaded from permanent
++ * storage. We just encode it from the driver version string.
++ */
++ for (i = 0; i < sizeof(serial) - 2; i += 2) {
++ unsigned char c = DRIVER_VERSION[i / 2];
++
++ if (!c)
++ break;
++
++ snprintf(&serial[i], sizeof(&serial[i]), "%02X", c);
++ }
++
++ /* fill remained device info */
++ sti_device_info.manufacturer_len = sizeof(manufacturer);
++ str_to_uni16(manufacturer, sti_device_info.manufacturer);
++
++ sti_device_info.model_len = sizeof(longname);
++ str_to_uni16(longname, sti_device_info.model);
++
++ sti_device_info.device_version_len = sizeof(device_version);
++ str_to_uni16(device_version, sti_device_info.device_version);
++
++ sti_device_info.serial_number_len = sizeof(serial);
++ str_to_uni16(serial, sti_device_info.serial_number);
++
++ /* create main kernel thread */
++ sti->thread_task = kthread_create(sti_main_thread, sti,
++ "still-image-gadget");
++
++ if (IS_ERR(sti->thread_task)) {
++ rc = PTR_ERR(sti->thread_task);
++ goto autoconf_fail;
++ }
++
++ INFO(sti, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
++ INFO(sti, "VendorID=x%04x, ProductID=x%04x, Release=x%04x\n",
++ mod_data.vendor, mod_data.product, mod_data.release);
++ INFO(sti, "I/O thread pid: %d, buflen=%u\n",
++ task_pid_nr(sti->thread_task), mod_data.buflen);
++
++ set_bit(REGISTERED, &sti->atomic_bitflags);
++
++ /* tell the thread to start working */
++ wake_up_process(sti->thread_task);
++
++ DBG(sti, "bind\n");
++ return 0;
++
++autoconf_fail:
++ ERROR(sti, "unable to autoconfigure all endpoints\n");
++ rc = -ENOTSUPP;
++out:
++ /* the thread is dead */
++ sti->state = STI_STATE_TERMINATED;
++
++ sti_unbind(gadget);
++ complete(&sti->thread_notifier);
++
++ VDBG(sti, "<---> %s()\n", __func__);
++ return rc;
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static void sti_suspend(struct usb_gadget *gadget)
++{
++ struct sti_dev *sti = get_gadget_data(gadget);
++
++ DBG(sti, "suspend\n");
++ set_bit(SUSPENDED, &sti->atomic_bitflags);
++}
++
++
++static void sti_resume(struct usb_gadget *gadget)
++{
++ struct sti_dev *sti = get_gadget_data(gadget);
++
++ DBG(sti, "resume\n");
++ clear_bit(SUSPENDED, &sti->atomic_bitflags);
++}
++
++
++/*-------------------------------------------------------------------------*/
++
++static struct usb_gadget_driver sti_driver = {
++#ifdef CONFIG_USB_GADGET_DUALSPEED
++ .speed = USB_SPEED_HIGH,
++#else
++ .speed = USB_SPEED_FULL,
++#endif
++ .function = (char *) longname,
++ .bind = sti_bind,
++ .unbind = sti_unbind,
++ .disconnect = sti_disconnect,
++ .setup = sti_setup,
++ .suspend = sti_suspend,
++ .resume = sti_resume,
++
++ .driver = {
++ .name = (char *) shortname,
++ .owner = THIS_MODULE,
++ /* .release = ... */
++ /* .suspend = ... */
++ /* .resume = ... */
++ },
++};
++
++
++static int __init sti_alloc(void)
++{
++ struct sti_dev *sti;
++
++ sti = kzalloc(sizeof *sti, GFP_KERNEL);
++ if (!sti)
++ return -ENOMEM;
++
++ spin_lock_init(&sti->lock);
++ init_rwsem(&sti->filesem);
++ kref_init(&sti->ref);
++ init_completion(&sti->thread_notifier);
++
++ the_sti = sti;
++
++ return 0;
++}
++
++
++static int __init sti_init(void)
++{
++ int rc;
++ struct sti_dev *sti;
++
++ rc = sti_alloc();
++ if (rc)
++ return rc;
++
++ sti = the_sti;
++
++ rc = usb_gadget_register_driver(&sti_driver);
++ if (rc)
++ kref_put(&sti->ref, sti_release);
++
++ return rc;
++}
++module_init(sti_init);
++
++
++static void __exit sti_cleanup(void)
++{
++ struct sti_dev *sti = the_sti;
++
++ /* unregister the driver if the thread hasn't already done */
++ if (test_and_clear_bit(REGISTERED, &sti->atomic_bitflags))
++ usb_gadget_unregister_driver(&sti_driver);
++
++ /* wait for the thread to finish up */
++ wait_for_completion(&sti->thread_notifier);
++
++ kref_put(&sti->ref, sti_release);
++}
++module_exit(sti_cleanup);
+diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
+index 3d2d3e5..69ff37b 100644
+--- a/drivers/usb/otg/Kconfig
++++ b/drivers/usb/otg/Kconfig
+@@ -69,4 +69,18 @@ config NOP_USB_XCEIV
+ built-in with usb ip or which are autonomous and doesn't require any
+ phy programming such as ISP1x04 etc.
+
++config USB_LANGWELL_OTG
++ tristate "Intel Langwell USB OTG dual-role support"
++ depends on USB && X86_MRST
++ select USB_OTG
++ select USB_OTG_UTILS
++ help
++ Say Y here if you want to build Intel Langwell USB OTG
++ transciever driver in kernel. This driver implements role
++ switch between EHCI host driver and Langwell USB OTG
++ client driver.
++
++ To compile this driver as a module, choose M here: the
++ module will be called langwell_otg.
++
+ endif # USB || OTG
+diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
+index aeb49a8..b6609db 100644
+--- a/drivers/usb/otg/Makefile
++++ b/drivers/usb/otg/Makefile
+@@ -9,6 +9,7 @@ obj-$(CONFIG_USB_OTG_UTILS) += otg.o
+ obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o
+ obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o
+ obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o
++obj-$(CONFIG_USB_LANGWELL_OTG) += langwell_otg.o
+ obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o
+ obj-$(CONFIG_USB_ULPI) += ulpi.o
+
+diff --git a/drivers/usb/otg/langwell_otg.c b/drivers/usb/otg/langwell_otg.c
+new file mode 100644
+index 0000000..46ae881
+--- /dev/null
++++ b/drivers/usb/otg/langwell_otg.c
+@@ -0,0 +1,2260 @@
++/*
++ * Intel Langwell USB OTG transceiver driver
++ * Copyright (C) 2008 - 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++/* This driver helps to switch Langwell OTG controller function between host
++ * and peripheral. It works with EHCI driver and Langwell client controller
++ * driver together.
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <linux/errno.h>
++#include <linux/interrupt.h>
++#include <linux/kernel.h>
++#include <linux/device.h>
++#include <linux/moduleparam.h>
++#include <linux/usb/ch9.h>
++#include <linux/usb/gadget.h>
++#include <linux/usb.h>
++#include <linux/usb/otg.h>
++#include <linux/notifier.h>
++#include <asm/ipc_defs.h>
++#include <linux/delay.h>
++#include "../core/hcd.h"
++
++#include <linux/usb/langwell_otg.h>
++
++#define DRIVER_DESC "Intel Langwell USB OTG transceiver driver"
++#define DRIVER_VERSION "March 19, 2010"
++
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_AUTHOR("Henry Yuan <hang.yuan@intel.com>, Hao Wu <hao.wu@intel.com>");
++MODULE_VERSION(DRIVER_VERSION);
++MODULE_LICENSE("GPL");
++
++static const char driver_name[] = "langwell_otg";
++
++static int langwell_otg_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id);
++static void langwell_otg_remove(struct pci_dev *pdev);
++static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message);
++static int langwell_otg_resume(struct pci_dev *pdev);
++
++static int langwell_otg_set_host(struct otg_transceiver *otg,
++ struct usb_bus *host);
++static int langwell_otg_set_peripheral(struct otg_transceiver *otg,
++ struct usb_gadget *gadget);
++static int langwell_otg_start_srp(struct otg_transceiver *otg);
++
++static const struct pci_device_id pci_ids[] = {{
++ .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
++ .class_mask = ~0,
++ .vendor = 0x8086,
++ .device = 0x0811,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++}, { /* end: all zeroes */ }
++};
++
++static struct pci_driver otg_pci_driver = {
++ .name = (char *) driver_name,
++ .id_table = pci_ids,
++
++ .probe = langwell_otg_probe,
++ .remove = langwell_otg_remove,
++
++ .suspend = langwell_otg_suspend,
++ .resume = langwell_otg_resume,
++};
++
++static const char *state_string(enum usb_otg_state state)
++{
++ switch (state) {
++ case OTG_STATE_A_IDLE:
++ return "a_idle";
++ case OTG_STATE_A_WAIT_VRISE:
++ return "a_wait_vrise";
++ case OTG_STATE_A_WAIT_BCON:
++ return "a_wait_bcon";
++ case OTG_STATE_A_HOST:
++ return "a_host";
++ case OTG_STATE_A_SUSPEND:
++ return "a_suspend";
++ case OTG_STATE_A_PERIPHERAL:
++ return "a_peripheral";
++ case OTG_STATE_A_WAIT_VFALL:
++ return "a_wait_vfall";
++ case OTG_STATE_A_VBUS_ERR:
++ return "a_vbus_err";
++ case OTG_STATE_B_IDLE:
++ return "b_idle";
++ case OTG_STATE_B_SRP_INIT:
++ return "b_srp_init";
++ case OTG_STATE_B_PERIPHERAL:
++ return "b_peripheral";
++ case OTG_STATE_B_WAIT_ACON:
++ return "b_wait_acon";
++ case OTG_STATE_B_HOST:
++ return "b_host";
++ default:
++ return "UNDEFINED";
++ }
++}
++
++/* HSM timers */
++static inline struct langwell_otg_timer *otg_timer_initializer
++(void (*function)(unsigned long), unsigned long expires, unsigned long data)
++{
++ struct langwell_otg_timer *timer;
++ timer = kmalloc(sizeof(struct langwell_otg_timer), GFP_KERNEL);
++ timer->function = function;
++ timer->expires = expires;
++ timer->data = data;
++ return timer;
++}
++
++static struct langwell_otg_timer *a_wait_vrise_tmr, *a_aidl_bdis_tmr,
++ *b_se0_srp_tmr, *b_srp_init_tmr;
++
++static struct list_head active_timers;
++
++static struct langwell_otg *the_transceiver;
++
++/* host/client notify transceiver when event affects HNP state */
++void langwell_update_transceiver()
++{
++ struct langwell_otg *langwell = the_transceiver;
++
++ otg_dbg("transceiver is updated\n");
++
++ if (!langwell->qwork)
++ return ;
++
++ queue_work(langwell->qwork, &langwell->work);
++}
++EXPORT_SYMBOL(langwell_update_transceiver);
++
++static int langwell_otg_set_host(struct otg_transceiver *otg,
++ struct usb_bus *host)
++{
++ otg->host = host;
++
++ return 0;
++}
++
++static int langwell_otg_set_peripheral(struct otg_transceiver *otg,
++ struct usb_gadget *gadget)
++{
++ otg->gadget = gadget;
++
++ return 0;
++}
++
++static int langwell_otg_set_power(struct otg_transceiver *otg,
++ unsigned mA)
++{
++ return 0;
++}
++
++/* A-device drives vbus, controlled through PMIC CHRGCNTL register*/
++static void langwell_otg_drv_vbus(int on)
++{
++ struct ipc_pmic_reg_data pmic_data = {0};
++ struct ipc_pmic_reg_data data = {0};
++
++ data.pmic_reg_data[0].register_address = 0xd2;
++ data.ioc = 0;
++ data.num_entries = 1;
++
++ if (ipc_pmic_register_read(&data)) {
++ otg_dbg("Failed to read PMIC register 0x00.\n");
++ return;
++ }
++
++ if (data.pmic_reg_data[0].value & 0x20)
++ otg_dbg("battery attached(%x)\n", data.pmic_reg_data[0].value);
++ else {
++ otg_dbg("no battery detected\n");
++ return;
++ }
++
++ pmic_data.ioc = 0;
++ pmic_data.pmic_reg_data[0].register_address = 0xd4;
++ pmic_data.num_entries = 1;
++ if (on)
++ pmic_data.pmic_reg_data[0].value = 0x20;
++ else
++ pmic_data.pmic_reg_data[0].value = 0xc0;
++
++ if (ipc_pmic_register_write(&pmic_data, TRUE))
++ otg_dbg("Failed to write PMIC.\n");
++}
++
++/* charge vbus or discharge vbus through a resistor to ground */
++static void langwell_otg_chrg_vbus(int on)
++{
++
++ u32 val;
++
++ val = readl(the_transceiver->regs + CI_OTGSC);
++
++ if (on)
++ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VC,
++ the_transceiver->regs + CI_OTGSC);
++ else
++ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VD,
++ the_transceiver->regs + CI_OTGSC);
++
++}
++
++/* Start SRP */
++static int langwell_otg_start_srp(struct otg_transceiver *otg)
++{
++ u32 val;
++
++ otg_dbg("Start SRP ->\n");
++
++ val = readl(the_transceiver->regs + CI_OTGSC);
++
++ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HADP,
++ the_transceiver->regs + CI_OTGSC);
++
++ /* Check if the data plus is finished or not */
++ msleep(8);
++ val = readl(the_transceiver->regs + CI_OTGSC);
++ if (val & (OTGSC_HADP | OTGSC_DP))
++ otg_dbg("DataLine SRP Error\n");
++
++ /* Disable interrupt - b_sess_vld */
++ val = readl(the_transceiver->regs + CI_OTGSC);
++ val &= (~(OTGSC_BSVIE | OTGSC_BSEIE));
++ writel(val, the_transceiver->regs + CI_OTGSC);
++
++ /* Start VBus SRP */
++ langwell_otg_drv_vbus(1);
++ msleep(15);
++ langwell_otg_drv_vbus(0);
++
++ /* Enable interrupt - b_sess_vld*/
++ val = readl(the_transceiver->regs + CI_OTGSC);
++ val |= (OTGSC_BSVIE | OTGSC_BSEIE);
++ writel(val, the_transceiver->regs + CI_OTGSC);
++
++ otg_dbg("Start SRP <-\n");
++ return 0;
++}
++
++/* stop SOF via bus_suspend */
++static void langwell_otg_loc_sof(int on)
++{
++ struct usb_hcd *hcd;
++ int err;
++
++ otg_dbg("loc_sof -> %d\n", on);
++
++ hcd = bus_to_hcd(the_transceiver->otg.host);
++ if (on)
++ err = hcd->driver->bus_resume(hcd);
++ else
++ err = hcd->driver->bus_suspend(hcd);
++
++ if (err)
++ otg_dbg("Failed to resume/suspend bus - %d\n", err);
++}
++
++static int langwell_otg_check_otgsc(void)
++{
++ struct langwell_otg *langwell;
++ u32 val_otgsc, val_usbcfg;
++
++ langwell = the_transceiver;
++
++ val_otgsc = readl(langwell->regs + CI_OTGSC);
++ val_usbcfg = readl(langwell->usbcfg);
++
++ otg_dbg("check sync OTGSC and USBCFG\n");
++ otg_dbg("OTGSC = %08x, USBCFG = %08x\n", val_otgsc, val_usbcfg);
++ otg_dbg("OTGSC_AVV = %d\n", !!(val_otgsc & OTGSC_AVV));
++ otg_dbg("USBCFG.VBUSVAL = %d\n", !!(val_usbcfg & USBCFG_VBUSVAL));
++ otg_dbg("OTGSC_ASV = %d\n", !!(val_otgsc & OTGSC_ASV));
++ otg_dbg("USBCFG.AVALID = %d\n", !!(val_usbcfg & USBCFG_AVALID));
++ otg_dbg("OTGSC_BSV = %d\n", !!(val_otgsc & OTGSC_BSV));
++ otg_dbg("USBCFG.BVALID = %d\n", !!(val_usbcfg & USBCFG_BVALID));
++ otg_dbg("OTGSC_BSE = %d\n", !!(val_otgsc & OTGSC_BSE));
++ otg_dbg("USBCFG.SESEND = %d\n", !!(val_usbcfg & USBCFG_SESEND));
++
++ /* Check USBCFG VBusValid/AValid/BValid/SessEnd */
++ if (!!(val_otgsc & OTGSC_AVV) ^ !!(val_usbcfg & USBCFG_VBUSVAL)) {
++ otg_dbg("OTGSC AVV and USBCFG VBUSVAL are not sync.\n");
++ return -1;
++ } else if (!!(val_otgsc & OTGSC_ASV) ^ !!(val_usbcfg & USBCFG_AVALID)) {
++ otg_dbg("OTGSC ASV and USBCFG AVALID are not sync.\n");
++ return -1;
++ } else if (!!(val_otgsc & OTGSC_BSV) ^ !!(val_usbcfg & USBCFG_BVALID)) {
++ otg_dbg("OTGSC BSV and USBCFG BVALID are not sync.\n");
++ return -1;
++ } else if (!!(val_otgsc & OTGSC_BSE) ^ !!(val_usbcfg & USBCFG_SESEND)) {
++ otg_dbg("OTGSC BSE and USBCFG SESSEN are not sync.\n");
++ return -1;
++ }
++
++ otg_dbg("OTGSC and USBCFG are synced\n");
++
++ return 0;
++}
++
++static void langwell_otg_phy_low_power(int on)
++{
++ u8 val, phcd;
++ int retval;
++
++ otg_dbg("phy low power mode-> %d start\n", on);
++
++ phcd = 0x40;
++
++ val = readb(the_transceiver->regs + CI_HOSTPC1 + 2);
++
++ if (on) {
++ /* Due to hardware issue, after set PHCD, sync will failed
++ * between USBCFG and OTGSC, so before set PHCD, check if
++ * sync is in process now. If the answer is "yes", then do
++ * not touch PHCD bit */
++ retval = langwell_otg_check_otgsc();
++ if (retval) {
++ otg_dbg("Skip PHCD programming..\n");
++ return ;
++ }
++
++ writeb(val | phcd, the_transceiver->regs + CI_HOSTPC1 + 2);
++ } else
++ writeb(val & ~phcd, the_transceiver->regs + CI_HOSTPC1 + 2);
++
++ otg_dbg("phy low power mode<- %d done\n", on);
++}
++
++/* After drv vbus, add 2 ms delay to set PHCD */
++static void langwell_otg_phy_low_power_wait(int on)
++{
++ otg_dbg("2 ms delay before set PHY low power mode\n");
++
++ mdelay(2);
++ langwell_otg_phy_low_power(on);
++}
++
++/* Enable/Disable OTG interrupts */
++static void langwell_otg_intr(int on)
++{
++ u32 val;
++
++ otg_dbg("interrupt -> %d\n", on);
++
++ val = readl(the_transceiver->regs + CI_OTGSC);
++
++ /* OTGSC_INT_MASK doesn't contains 1msInt */
++ if (on) {
++ val = val | (OTGSC_INT_MASK);
++ writel(val, the_transceiver->regs + CI_OTGSC);
++ } else {
++ val = val & ~(OTGSC_INT_MASK);
++ writel(val, the_transceiver->regs + CI_OTGSC);
++ }
++}
++
++/* set HAAR: Hardware Assist Auto-Reset */
++static void langwell_otg_HAAR(int on)
++{
++ u32 val;
++
++ otg_dbg("HAAR -> %d\n", on);
++
++ val = readl(the_transceiver->regs + CI_OTGSC);
++ if (on)
++ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HAAR,
++ the_transceiver->regs + CI_OTGSC);
++ else
++ writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HAAR,
++ the_transceiver->regs + CI_OTGSC);
++}
++
++/* set HABA: Hardware Assist B-Disconnect to A-Connect */
++static void langwell_otg_HABA(int on)
++{
++ u32 val;
++
++ otg_dbg("HABA -> %d\n", on);
++
++ val = readl(the_transceiver->regs + CI_OTGSC);
++ if (on)
++ writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HABA,
++ the_transceiver->regs + CI_OTGSC);
++ else
++ writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HABA,
++ the_transceiver->regs + CI_OTGSC);
++}
++
++static int langwell_otg_check_se0_srp(int on)
++{
++ u32 val;
++
++ int delay_time = TB_SE0_SRP * 10; /* step is 100us */
++
++ otg_dbg("check_se0_srp -> \n");
++
++ do {
++ udelay(100);
++ if (!delay_time--)
++ break;
++ val = readl(the_transceiver->regs + CI_PORTSC1);
++ val &= PORTSC_LS;
++ } while (!val);
++
++ otg_dbg("check_se0_srp <- \n");
++ return val;
++}
++
++/* The timeout callback function to set time out bit */
++static void set_tmout(unsigned long indicator)
++{
++ *(int *)indicator = 1;
++}
++
++void langwell_otg_nsf_msg(unsigned long indicator)
++{
++ switch (indicator) {
++ case 2:
++ case 4:
++ case 6:
++ case 7:
++ printk(KERN_ERR "OTG:NSF-%lu - deivce not responding\n",
++ indicator);
++ break;
++ case 3:
++ printk(KERN_ERR "OTG:NSF-%lu - deivce not supported\n",
++ indicator);
++ break;
++ default:
++ printk(KERN_ERR "Do not have this kind of NSF\n");
++ break;
++ }
++}
++
++/* Initialize timers */
++static void langwell_otg_init_timers(struct otg_hsm *hsm)
++{
++ /* HSM used timers */
++ a_wait_vrise_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_VRISE,
++ (unsigned long)&hsm->a_wait_vrise_tmout);
++ a_aidl_bdis_tmr = otg_timer_initializer(&set_tmout, TA_AIDL_BDIS,
++ (unsigned long)&hsm->a_aidl_bdis_tmout);
++ b_se0_srp_tmr = otg_timer_initializer(&set_tmout, TB_SE0_SRP,
++ (unsigned long)&hsm->b_se0_srp);
++ b_srp_init_tmr = otg_timer_initializer(&set_tmout, TB_SRP_INIT,
++ (unsigned long)&hsm->b_srp_init_tmout);
++}
++
++/* Free timers */
++static void langwell_otg_free_timers(void)
++{
++ kfree(a_wait_vrise_tmr);
++ kfree(a_aidl_bdis_tmr);
++ kfree(b_se0_srp_tmr);
++ kfree(b_srp_init_tmr);
++}
++
++/* The timeout callback function to set time out bit */
++static void langwell_otg_timer_fn(unsigned long indicator)
++{
++ struct langwell_otg *langwell;
++
++ langwell = the_transceiver;
++
++ *(int *)indicator = 1;
++
++ otg_dbg("kernel timer - timeout\n");
++
++ queue_work(langwell->qwork, &langwell->work);
++}
++
++/* kernel timer used instead of HW based interrupt */
++static void langwell_otg_add_ktimer(enum langwell_otg_timer_type timers)
++{
++ struct langwell_otg *langwell;
++ unsigned long j = jiffies;
++ unsigned long data, time;
++
++ langwell = the_transceiver;
++
++ switch (timers) {
++ case TA_WAIT_VRISE_TMR:
++ langwell->hsm.a_wait_vrise_tmout = 0;
++ data = (unsigned long)&langwell->hsm.a_wait_vrise_tmout;
++ time = TA_WAIT_VRISE;
++ break;
++ case TA_WAIT_BCON_TMR:
++ langwell->hsm.a_wait_bcon_tmout = 0;
++ data = (unsigned long)&langwell->hsm.a_wait_bcon_tmout;
++ time = TA_WAIT_BCON;
++ break;
++ case TA_AIDL_BDIS_TMR:
++ langwell->hsm.a_aidl_bdis_tmout = 0;
++ data = (unsigned long)&langwell->hsm.a_aidl_bdis_tmout;
++ time = TA_AIDL_BDIS;
++ break;
++ case TB_ASE0_BRST_TMR:
++ langwell->hsm.b_ase0_brst_tmout = 0;
++ data = (unsigned long)&langwell->hsm.b_ase0_brst_tmout;
++ time = TB_ASE0_BRST;
++ break;
++ case TB_SRP_INIT_TMR:
++ langwell->hsm.b_srp_init_tmout = 0;
++ data = (unsigned long)&langwell->hsm.b_srp_init_tmout;
++ time = TB_SRP_INIT;
++ break;
++ case TB_SRP_FAIL_TMR:
++ langwell->hsm.b_srp_fail_tmout = 0;
++ data = (unsigned long)&langwell->hsm.b_srp_fail_tmout;
++ time = TB_SRP_FAIL;
++ break;
++ case TB_BUS_SUSPEND_TMR:
++ langwell->hsm.b_bus_suspend_tmout = 0;
++ data = (unsigned long)&langwell->hsm.b_bus_suspend_tmout;
++ time = TB_BUS_SUSPEND;
++ break;
++ default:
++ otg_dbg("OTG: unkown timer, can not enable such timer\n");
++ return;
++ }
++
++ langwell->hsm_timer.data = data;
++ langwell->hsm_timer.function = langwell_otg_timer_fn;
++ langwell->hsm_timer.expires = j + time * HZ / 1000; /* milliseconds */
++
++ add_timer(&langwell->hsm_timer);
++
++ otg_dbg("OTG: add timer successfully\n");
++}
++
++/* Add timer to timer list */
++static void langwell_otg_add_timer(void *gtimer)
++{
++ struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer;
++ struct langwell_otg_timer *tmp_timer;
++ u32 val32;
++
++ /* Check if the timer is already in the active list,
++ * if so update timer count
++ */
++ list_for_each_entry(tmp_timer, &active_timers, list)
++ if (tmp_timer == timer) {
++ timer->count = timer->expires;
++ return;
++ }
++ timer->count = timer->expires;
++
++ if (list_empty(&active_timers)) {
++ val32 = readl(the_transceiver->regs + CI_OTGSC);
++ writel(val32 | OTGSC_1MSE, the_transceiver->regs + CI_OTGSC);
++ }
++
++ list_add_tail(&timer->list, &active_timers);
++}
++
++/* Remove timer from the timer list; clear timeout status */
++static void langwell_otg_del_timer(void *gtimer)
++{
++ struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer;
++ struct langwell_otg_timer *tmp_timer, *del_tmp;
++ u32 val32;
++
++ list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list)
++ if (tmp_timer == timer)
++ list_del(&timer->list);
++
++ if (list_empty(&active_timers)) {
++ val32 = readl(the_transceiver->regs + CI_OTGSC);
++ writel(val32 & ~OTGSC_1MSE, the_transceiver->regs + CI_OTGSC);
++ }
++}
++
++/* Reduce timer count by 1, and find timeout conditions.*/
++static int langwell_otg_tick_timer(u32 *int_sts)
++{
++ struct langwell_otg_timer *tmp_timer, *del_tmp;
++ int expired = 0;
++
++ list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list) {
++ tmp_timer->count--;
++ /* check if timer expires */
++ if (!tmp_timer->count) {
++ list_del(&tmp_timer->list);
++ tmp_timer->function(tmp_timer->data);
++ expired = 1;
++ }
++ }
++
++ if (list_empty(&active_timers)) {
++ otg_dbg("tick timer: disable 1ms int\n");
++ *int_sts = *int_sts & ~OTGSC_1MSE;
++ }
++ return expired;
++}
++
++static void reset_otg(void)
++{
++ u32 val;
++ int delay_time = 1000;
++
++ otg_dbg("reseting OTG controller ...\n");
++ val = readl(the_transceiver->regs + CI_USBCMD);
++ writel(val | USBCMD_RST, the_transceiver->regs + CI_USBCMD);
++ do {
++ udelay(100);
++ if (!delay_time--)
++ otg_dbg("reset timeout\n");
++ val = readl(the_transceiver->regs + CI_USBCMD);
++ val &= USBCMD_RST;
++ } while (val != 0);
++ otg_dbg("reset done.\n");
++}
++
++static void set_host_mode(void)
++{
++ u32 val;
++
++ reset_otg();
++ val = readl(the_transceiver->regs + CI_USBMODE);
++ val = (val & (~USBMODE_CM)) | USBMODE_HOST;
++ writel(val, the_transceiver->regs + CI_USBMODE);
++}
++
++static void set_client_mode(void)
++{
++ u32 val;
++
++ reset_otg();
++ val = readl(the_transceiver->regs + CI_USBMODE);
++ val = (val & (~USBMODE_CM)) | USBMODE_DEVICE;
++ writel(val, the_transceiver->regs + CI_USBMODE);
++}
++
++static void init_hsm(void)
++{
++ struct langwell_otg *langwell = the_transceiver;
++ u32 val32;
++
++ /* read OTGSC after reset */
++ val32 = readl(langwell->regs + CI_OTGSC);
++ otg_dbg("%s: OTGSC init value = 0x%x\n", __func__, val32);
++
++ /* set init state */
++ if (val32 & OTGSC_ID) {
++ langwell->hsm.id = 1;
++ langwell->otg.default_a = 0;
++ set_client_mode();
++ langwell->otg.state = OTG_STATE_B_IDLE;
++ langwell_otg_drv_vbus(0);
++ } else {
++ langwell->hsm.id = 0;
++ langwell->otg.default_a = 1;
++ set_host_mode();
++ langwell->otg.state = OTG_STATE_A_IDLE;
++ }
++
++ /* set session indicator */
++ if (val32 & OTGSC_BSE)
++ langwell->hsm.b_sess_end = 1;
++ if (val32 & OTGSC_BSV)
++ langwell->hsm.b_sess_vld = 1;
++ if (val32 & OTGSC_ASV)
++ langwell->hsm.a_sess_vld = 1;
++ if (val32 & OTGSC_AVV)
++ langwell->hsm.a_vbus_vld = 1;
++
++ /* defautly power the bus */
++ langwell->hsm.a_bus_req = 1;
++ langwell->hsm.a_bus_drop = 0;
++ /* defautly don't request bus as B device */
++ langwell->hsm.b_bus_req = 0;
++ /* no system error */
++ langwell->hsm.a_clr_err = 0;
++
++ langwell_otg_phy_low_power_wait(1);
++}
++
++static void update_hsm(void)
++{
++ struct langwell_otg *langwell = the_transceiver;
++ u32 val32;
++
++ /* read OTGSC */
++ val32 = readl(langwell->regs + CI_OTGSC);
++ otg_dbg("%s: OTGSC current value = 0x%x\n", __func__, val32);
++
++ langwell->hsm.id = !!(val32 & OTGSC_ID);
++ langwell->hsm.b_sess_end = !!(val32 & OTGSC_BSE);
++ langwell->hsm.b_sess_vld = !!(val32 & OTGSC_BSV);
++ langwell->hsm.a_sess_vld = !!(val32 & OTGSC_ASV);
++ langwell->hsm.a_vbus_vld = !!(val32 & OTGSC_AVV);
++}
++
++static irqreturn_t otg_dummy_irq(int irq, void *_dev)
++{
++ void __iomem *reg_base = _dev;
++ u32 val;
++ u32 int_mask = 0;
++
++ val = readl(reg_base + CI_USBMODE);
++ if ((val & USBMODE_CM) != USBMODE_DEVICE)
++ return IRQ_NONE;
++
++ val = readl(reg_base + CI_USBSTS);
++ int_mask = val & INTR_DUMMY_MASK;
++
++ if (int_mask == 0)
++ return IRQ_NONE;
++
++ /* clear hsm.b_conn here since host driver can't detect it
++ * otg_dummy_irq called means B-disconnect happened.
++ */
++ if (the_transceiver->hsm.b_conn) {
++ the_transceiver->hsm.b_conn = 0;
++ if (spin_trylock(&the_transceiver->wq_lock)) {
++ queue_work(the_transceiver->qwork,
++ &the_transceiver->work);
++ spin_unlock(&the_transceiver->wq_lock);
++ }
++ }
++ /* Clear interrupts */
++ writel(int_mask, reg_base + CI_USBSTS);
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t otg_irq(int irq, void *_dev)
++{
++ struct langwell_otg *langwell = _dev;
++ u32 int_sts, int_en;
++ u32 int_mask = 0;
++ int flag = 0;
++
++ int_sts = readl(langwell->regs + CI_OTGSC);
++ int_en = (int_sts & OTGSC_INTEN_MASK) >> 8;
++ int_mask = int_sts & int_en;
++ if (int_mask == 0)
++ return IRQ_NONE;
++
++ if (int_mask & OTGSC_IDIS) {
++ otg_dbg("%s: id change int\n", __func__);
++ langwell->hsm.id = (int_sts & OTGSC_ID) ? 1 : 0;
++ flag = 1;
++ }
++ if (int_mask & OTGSC_DPIS) {
++ otg_dbg("%s: data pulse int\n", __func__);
++ langwell->hsm.a_srp_det = (int_sts & OTGSC_DPS) ? 1 : 0;
++ flag = 1;
++ }
++ if (int_mask & OTGSC_BSEIS) {
++ otg_dbg("%s: b session end int\n", __func__);
++ langwell->hsm.b_sess_end = (int_sts & OTGSC_BSE) ? 1 : 0;
++ flag = 1;
++ }
++ if (int_mask & OTGSC_BSVIS) {
++ otg_dbg("%s: b session valid int\n", __func__);
++ langwell->hsm.b_sess_vld = (int_sts & OTGSC_BSV) ? 1 : 0;
++ flag = 1;
++ }
++ if (int_mask & OTGSC_ASVIS) {
++ otg_dbg("%s: a session valid int\n", __func__);
++ langwell->hsm.a_sess_vld = (int_sts & OTGSC_ASV) ? 1 : 0;
++ flag = 1;
++ }
++ if (int_mask & OTGSC_AVVIS) {
++ otg_dbg("%s: a vbus valid int\n", __func__);
++ langwell->hsm.a_vbus_vld = (int_sts & OTGSC_AVV) ? 1 : 0;
++ flag = 1;
++ }
++
++ if (int_mask & OTGSC_1MSS) {
++ /* need to schedule otg_work if any timer is expired */
++ if (langwell_otg_tick_timer(&int_sts))
++ flag = 1;
++ }
++
++ writel((int_sts & ~OTGSC_INTSTS_MASK) | int_mask,
++ langwell->regs + CI_OTGSC);
++ if (flag)
++ queue_work(langwell->qwork, &langwell->work);
++
++ return IRQ_HANDLED;
++}
++
++static void langwell_otg_work(struct work_struct *work)
++{
++ struct langwell_otg *langwell = container_of(work,
++ struct langwell_otg, work);
++ int retval;
++
++ otg_dbg("%s: old state = %s\n", __func__,
++ state_string(langwell->otg.state));
++
++ switch (langwell->otg.state) {
++ case OTG_STATE_UNDEFINED:
++ case OTG_STATE_B_IDLE:
++ if (!langwell->hsm.id) {
++ langwell_otg_del_timer(b_srp_init_tmr);
++ del_timer_sync(&langwell->hsm_timer);
++ langwell->otg.default_a = 1;
++ langwell->hsm.a_srp_det = 0;
++ langwell_otg_chrg_vbus(0);
++ set_host_mode();
++ langwell_otg_phy_low_power(1);
++ langwell->otg.state = OTG_STATE_A_IDLE;
++ queue_work(langwell->qwork, &langwell->work);
++ } else if (langwell->hsm.b_srp_init_tmout) {
++ langwell->hsm.b_srp_init_tmout = 0;
++ printk(KERN_WARNING "USB OTG: SRP init timeout\n");
++ } else if (langwell->hsm.b_srp_fail_tmout) {
++ langwell->hsm.b_srp_fail_tmout = 0;
++ langwell->hsm.b_bus_req = 0;
++ langwell_otg_nsf_msg(6);
++ } else if (langwell->hsm.b_sess_vld) {
++ langwell_otg_del_timer(b_srp_init_tmr);
++ del_timer_sync(&langwell->hsm_timer);
++ langwell->hsm.b_sess_end = 0;
++ langwell->hsm.a_bus_suspend = 0;
++ langwell_otg_chrg_vbus(0);
++ if (langwell->client_ops) {
++ langwell->client_ops->resume(langwell->pdev);
++ langwell->otg.state = OTG_STATE_B_PERIPHERAL;
++ } else
++ otg_dbg("client driver not loaded.\n");
++
++ } else if (langwell->hsm.b_bus_req &&
++ (langwell->hsm.b_sess_end)) {
++ del_timer_sync(&langwell->hsm_timer);
++ /* workaround for b_se0_srp detection */
++ retval = langwell_otg_check_se0_srp(0);
++ if (retval) {
++ langwell->hsm.b_bus_req = 0;
++ otg_dbg("LS is not SE0, try again later\n");
++ } else {
++ /* clear the PHCD before start srp */
++ langwell_otg_phy_low_power(0);
++
++ /* Start SRP */
++ langwell_otg_add_timer(b_srp_init_tmr);
++ langwell_otg_start_srp(&langwell->otg);
++ langwell_otg_del_timer(b_srp_init_tmr);
++ langwell_otg_add_ktimer(TB_SRP_FAIL_TMR);
++
++ /* reset PHY low power mode here */
++ langwell_otg_phy_low_power_wait(1);
++ }
++ }
++ break;
++ case OTG_STATE_B_SRP_INIT:
++ if (!langwell->hsm.id) {
++ langwell->otg.default_a = 1;
++ langwell->hsm.a_srp_det = 0;
++ langwell_otg_drv_vbus(0);
++ langwell_otg_chrg_vbus(0);
++ set_host_mode();
++ langwell_otg_phy_low_power(1);
++ langwell->otg.state = OTG_STATE_A_IDLE;
++ queue_work(langwell->qwork, &langwell->work);
++ } else if (langwell->hsm.b_sess_vld) {
++ langwell_otg_chrg_vbus(0);
++ if (langwell->client_ops) {
++ langwell->client_ops->resume(langwell->pdev);
++ langwell->otg.state = OTG_STATE_B_PERIPHERAL;
++ } else
++ otg_dbg("client driver not loaded.\n");
++ }
++ break;
++ case OTG_STATE_B_PERIPHERAL:
++ if (!langwell->hsm.id) {
++ langwell->otg.default_a = 1;
++ langwell->hsm.a_srp_det = 0;
++
++ langwell_otg_chrg_vbus(0);
++
++ if (langwell->client_ops) {
++ langwell->client_ops->suspend(langwell->pdev,
++ PMSG_FREEZE);
++ } else
++ otg_dbg("client driver has been removed.\n");
++
++ set_host_mode();
++ langwell_otg_phy_low_power(1);
++ langwell->otg.state = OTG_STATE_A_IDLE;
++ queue_work(langwell->qwork, &langwell->work);
++ } else if (!langwell->hsm.b_sess_vld) {
++ langwell->hsm.b_hnp_enable = 0;
++
++ if (langwell->client_ops) {
++ langwell->client_ops->suspend(langwell->pdev,
++ PMSG_FREEZE);
++ } else
++ otg_dbg("client driver has been removed.\n");
++
++ langwell->otg.state = OTG_STATE_B_IDLE;
++ } else if (langwell->hsm.b_bus_req && langwell->hsm.b_hnp_enable
++ && langwell->hsm.a_bus_suspend) {
++
++ if (langwell->client_ops) {
++ langwell->client_ops->suspend(langwell->pdev,
++ PMSG_FREEZE);
++ } else
++ otg_dbg("client driver has been removed.\n");
++
++ langwell_otg_HAAR(1);
++ langwell->hsm.a_conn = 0;
++
++ if (langwell->host_ops) {
++ langwell->host_ops->probe(langwell->pdev,
++ langwell->host_ops->id_table);
++ langwell->otg.state = OTG_STATE_B_WAIT_ACON;
++ } else
++ otg_dbg("host driver not loaded.\n");
++
++ langwell->hsm.a_bus_resume = 0;
++ langwell_otg_add_ktimer(TB_ASE0_BRST_TMR);
++ }
++ break;
++
++ case OTG_STATE_B_WAIT_ACON:
++ if (!langwell->hsm.id) {
++ /* delete hsm timer for a_wait_bcon_tmr */
++ del_timer_sync(&langwell->hsm_timer);
++
++ langwell->otg.default_a = 1;
++ langwell->hsm.a_srp_det = 0;
++
++ langwell_otg_chrg_vbus(0);
++
++ langwell_otg_HAAR(0);
++ if (langwell->host_ops)
++ langwell->host_ops->remove(langwell->pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++
++ set_host_mode();
++ langwell_otg_phy_low_power(1);
++ langwell->otg.state = OTG_STATE_A_IDLE;
++ queue_work(langwell->qwork, &langwell->work);
++ } else if (!langwell->hsm.b_sess_vld) {
++ /* delete hsm timer for a_wait_bcon_tmr */
++ del_timer_sync(&langwell->hsm_timer);
++
++ langwell->hsm.b_hnp_enable = 0;
++ langwell->hsm.b_bus_req = 0;
++ langwell_otg_chrg_vbus(0);
++ langwell_otg_HAAR(0);
++
++ if (langwell->host_ops)
++ langwell->host_ops->remove(langwell->pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++
++ set_client_mode();
++ langwell_otg_phy_low_power(1);
++ langwell->otg.state = OTG_STATE_B_IDLE;
++ } else if (langwell->hsm.a_conn) {
++ /* delete hsm timer for a_wait_bcon_tmr */
++ del_timer_sync(&langwell->hsm_timer);
++
++ langwell_otg_HAAR(0);
++ langwell->otg.state = OTG_STATE_B_HOST;
++ queue_work(langwell->qwork, &langwell->work);
++ } else if (langwell->hsm.a_bus_resume ||
++ langwell->hsm.b_ase0_brst_tmout) {
++ /* delete hsm timer for a_wait_bcon_tmr */
++ del_timer_sync(&langwell->hsm_timer);
++
++ langwell_otg_HAAR(0);
++ langwell_otg_nsf_msg(7);
++
++ if (langwell->host_ops)
++ langwell->host_ops->remove(langwell->pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++
++ langwell->hsm.a_bus_suspend = 0;
++ langwell->hsm.b_bus_req = 0;
++
++ if (langwell->client_ops)
++ langwell->client_ops->resume(langwell->pdev);
++ else
++ otg_dbg("client driver not loaded.\n");
++
++ langwell->otg.state = OTG_STATE_B_PERIPHERAL;
++ }
++ break;
++
++ case OTG_STATE_B_HOST:
++ if (!langwell->hsm.id) {
++ langwell->otg.default_a = 1;
++ langwell->hsm.a_srp_det = 0;
++
++ langwell_otg_chrg_vbus(0);
++ if (langwell->host_ops)
++ langwell->host_ops->remove(langwell->pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++
++ set_host_mode();
++ langwell_otg_phy_low_power(1);
++ langwell->otg.state = OTG_STATE_A_IDLE;
++ queue_work(langwell->qwork, &langwell->work);
++ } else if (!langwell->hsm.b_sess_vld) {
++ langwell->hsm.b_hnp_enable = 0;
++ langwell->hsm.b_bus_req = 0;
++ langwell_otg_chrg_vbus(0);
++ if (langwell->host_ops)
++ langwell->host_ops->remove(langwell->pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++
++ set_client_mode();
++ langwell_otg_phy_low_power(1);
++ langwell->otg.state = OTG_STATE_B_IDLE;
++ } else if ((!langwell->hsm.b_bus_req) ||
++ (!langwell->hsm.a_conn)) {
++ langwell->hsm.b_bus_req = 0;
++ langwell_otg_loc_sof(0);
++ if (langwell->host_ops)
++ langwell->host_ops->remove(langwell->pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++
++ langwell->hsm.a_bus_suspend = 0;
++
++ if (langwell->client_ops)
++ langwell->client_ops->resume(langwell->pdev);
++ else
++ otg_dbg("client driver not loaded.\n");
++
++ langwell->otg.state = OTG_STATE_B_PERIPHERAL;
++ }
++ break;
++
++ case OTG_STATE_A_IDLE:
++ langwell->otg.default_a = 1;
++ if (langwell->hsm.id) {
++ langwell->otg.default_a = 0;
++ langwell->hsm.b_bus_req = 0;
++ langwell->hsm.vbus_srp_up = 0;
++ langwell_otg_chrg_vbus(0);
++ set_client_mode();
++ langwell_otg_phy_low_power(1);
++ langwell->otg.state = OTG_STATE_B_IDLE;
++ queue_work(langwell->qwork, &langwell->work);
++ } else if (!langwell->hsm.a_bus_drop &&
++ (langwell->hsm.a_srp_det || langwell->hsm.a_bus_req)) {
++ langwell_otg_phy_low_power(0);
++ langwell_otg_drv_vbus(1);
++ langwell->hsm.a_srp_det = 1;
++ langwell->hsm.vbus_srp_up = 0;
++ langwell->hsm.a_wait_vrise_tmout = 0;
++ langwell_otg_add_timer(a_wait_vrise_tmr);
++ langwell->otg.state = OTG_STATE_A_WAIT_VRISE;
++ queue_work(langwell->qwork, &langwell->work);
++ } else if (!langwell->hsm.a_bus_drop &&
++ langwell->hsm.a_sess_vld) {
++ langwell->hsm.vbus_srp_up = 1;
++ } else if (!langwell->hsm.a_sess_vld &&
++ langwell->hsm.vbus_srp_up) {
++ msleep(10);
++ langwell_otg_phy_low_power(0);
++ langwell_otg_drv_vbus(1);
++ langwell->hsm.a_srp_det = 1;
++ langwell->hsm.vbus_srp_up = 0;
++ langwell->hsm.a_wait_vrise_tmout = 0;
++ langwell_otg_add_timer(a_wait_vrise_tmr);
++ langwell->otg.state = OTG_STATE_A_WAIT_VRISE;
++ queue_work(langwell->qwork, &langwell->work);
++ } else if (!langwell->hsm.a_sess_vld &&
++ !langwell->hsm.vbus_srp_up) {
++ langwell_otg_phy_low_power(1);
++ }
++ break;
++ case OTG_STATE_A_WAIT_VRISE:
++ if (langwell->hsm.id) {
++ langwell_otg_del_timer(a_wait_vrise_tmr);
++ langwell->hsm.b_bus_req = 0;
++ langwell->otg.default_a = 0;
++ langwell_otg_drv_vbus(0);
++ set_client_mode();
++ langwell_otg_phy_low_power_wait(1);
++ langwell->otg.state = OTG_STATE_B_IDLE;
++ } else if (langwell->hsm.a_vbus_vld) {
++ langwell_otg_del_timer(a_wait_vrise_tmr);
++ if (langwell->host_ops)
++ langwell->host_ops->probe(langwell->pdev,
++ langwell->host_ops->id_table);
++ else {
++ otg_dbg("host driver not loaded.\n");
++ break;
++ }
++ langwell->hsm.b_conn = 0;
++ /* Replace HW timer with kernel timer */
++ langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
++ langwell->otg.state = OTG_STATE_A_WAIT_BCON;
++ } else if (langwell->hsm.a_wait_vrise_tmout) {
++ if (langwell->hsm.a_vbus_vld) {
++ if (langwell->host_ops)
++ langwell->host_ops->probe(
++ langwell->pdev,
++ langwell->host_ops->id_table);
++ else {
++ otg_dbg("host driver not loaded.\n");
++ break;
++ }
++ langwell->hsm.b_conn = 0;
++ /* change to kernel timer */
++ langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
++ langwell->otg.state = OTG_STATE_A_WAIT_BCON;
++ } else {
++ langwell_otg_drv_vbus(0);
++ langwell_otg_phy_low_power_wait(1);
++ langwell->otg.state = OTG_STATE_A_VBUS_ERR;
++ }
++ }
++ break;
++ case OTG_STATE_A_WAIT_BCON:
++ if (langwell->hsm.id) {
++ /* delete hsm timer for a_wait_bcon_tmr */
++ del_timer_sync(&langwell->hsm_timer);
++
++ langwell->otg.default_a = 0;
++ langwell->hsm.b_bus_req = 0;
++ if (langwell->host_ops)
++ langwell->host_ops->remove(langwell->pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++ langwell_otg_drv_vbus(0);
++ set_client_mode();
++ langwell_otg_phy_low_power_wait(1);
++ langwell->otg.state = OTG_STATE_B_IDLE;
++ queue_work(langwell->qwork, &langwell->work);
++ } else if (!langwell->hsm.a_vbus_vld) {
++ /* delete hsm timer for a_wait_bcon_tmr */
++ del_timer_sync(&langwell->hsm_timer);
++
++ if (langwell->host_ops)
++ langwell->host_ops->remove(langwell->pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++ langwell_otg_drv_vbus(0);
++ langwell_otg_phy_low_power_wait(1);
++ langwell->otg.state = OTG_STATE_A_VBUS_ERR;
++ } else if (langwell->hsm.a_bus_drop ||
++ (langwell->hsm.a_wait_bcon_tmout &&
++ !langwell->hsm.a_bus_req)) {
++ /* delete hsm timer for a_wait_bcon_tmr */
++ del_timer_sync(&langwell->hsm_timer);
++
++ if (langwell->host_ops)
++ langwell->host_ops->remove(langwell->pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++ langwell_otg_drv_vbus(0);
++ langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
++ } else if (langwell->hsm.b_conn) {
++ /* delete hsm timer for a_wait_bcon_tmr */
++ del_timer_sync(&langwell->hsm_timer);
++
++ langwell->hsm.a_suspend_req = 0;
++ langwell->otg.state = OTG_STATE_A_HOST;
++ if (langwell->hsm.a_srp_det &&
++ !langwell->otg.host->b_hnp_enable) {
++ /* SRP capable peripheral-only device */
++ langwell->hsm.a_bus_req = 1;
++ langwell->hsm.a_srp_det = 0;
++ } else if (!langwell->hsm.a_bus_req &&
++ langwell->otg.host->b_hnp_enable) {
++ /* It is not safe enough to do a fast
++ * transistion from A_WAIT_BCON to
++ * A_SUSPEND */
++ msleep(10000);
++ if (langwell->hsm.a_bus_req)
++ break;
++
++ if (request_irq(langwell->pdev->irq,
++ otg_dummy_irq, IRQF_SHARED,
++ driver_name, langwell->regs) != 0) {
++ otg_dbg("request interrupt %d fail\n",
++ langwell->pdev->irq);
++ }
++
++ langwell_otg_HABA(1);
++ langwell->hsm.b_bus_resume = 0;
++ langwell->hsm.a_aidl_bdis_tmout = 0;
++ langwell_otg_add_timer(a_aidl_bdis_tmr);
++
++ langwell_otg_loc_sof(0);
++ /* clear PHCD to enable HW timer */
++ langwell_otg_phy_low_power(0);
++ langwell->otg.state = OTG_STATE_A_SUSPEND;
++ } else if (!langwell->hsm.a_bus_req &&
++ !langwell->otg.host->b_hnp_enable) {
++ struct pci_dev *pdev = langwell->pdev;
++ if (langwell->host_ops)
++ langwell->host_ops->remove(pdev);
++ else
++ otg_dbg("host driver removed.\n");
++ langwell_otg_drv_vbus(0);
++ langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
++ }
++ }
++ break;
++ case OTG_STATE_A_HOST:
++ if (langwell->hsm.id) {
++ langwell->otg.default_a = 0;
++ langwell->hsm.b_bus_req = 0;
++ if (langwell->host_ops)
++ langwell->host_ops->remove(langwell->pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++ langwell_otg_drv_vbus(0);
++ set_client_mode();
++ langwell_otg_phy_low_power_wait(1);
++ langwell->otg.state = OTG_STATE_B_IDLE;
++ queue_work(langwell->qwork, &langwell->work);
++ } else if (langwell->hsm.a_bus_drop ||
++ (!langwell->otg.host->b_hnp_enable &&
++ !langwell->hsm.a_bus_req)) {
++ if (langwell->host_ops)
++ langwell->host_ops->remove(langwell->pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++ langwell_otg_drv_vbus(0);
++ langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
++ } else if (!langwell->hsm.a_vbus_vld) {
++ if (langwell->host_ops)
++ langwell->host_ops->remove(langwell->pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++ langwell_otg_drv_vbus(0);
++ langwell_otg_phy_low_power_wait(1);
++ langwell->otg.state = OTG_STATE_A_VBUS_ERR;
++ } else if (langwell->otg.host->b_hnp_enable
++ && !langwell->hsm.a_bus_req) {
++ /* Set HABA to enable hardware assistance to signal
++ * A-connect after receiver B-disconnect. Hardware
++ * will then set client mode and enable URE, SLE and
++ * PCE after the assistance. otg_dummy_irq is used to
++ * clean these ints when client driver is not resumed.
++ */
++ if (request_irq(langwell->pdev->irq,
++ otg_dummy_irq, IRQF_SHARED, driver_name,
++ langwell->regs) != 0) {
++ otg_dbg("request interrupt %d failed\n",
++ langwell->pdev->irq);
++ }
++
++ /* set HABA */
++ langwell_otg_HABA(1);
++ langwell->hsm.b_bus_resume = 0;
++ langwell->hsm.a_aidl_bdis_tmout = 0;
++ langwell_otg_add_timer(a_aidl_bdis_tmr);
++ langwell_otg_loc_sof(0);
++ /* clear PHCD to enable HW timer */
++ langwell_otg_phy_low_power(0);
++ langwell->otg.state = OTG_STATE_A_SUSPEND;
++ } else if (!langwell->hsm.b_conn || !langwell->hsm.a_bus_req) {
++ langwell->hsm.a_wait_bcon_tmout = 0;
++ /* add kernel timer */
++ langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
++ langwell->otg.state = OTG_STATE_A_WAIT_BCON;
++ }
++ break;
++ case OTG_STATE_A_SUSPEND:
++ if (langwell->hsm.id) {
++ langwell_otg_del_timer(a_aidl_bdis_tmr);
++ langwell_otg_HABA(0);
++ free_irq(langwell->pdev->irq, langwell->regs);
++ langwell->otg.default_a = 0;
++ langwell->hsm.b_bus_req = 0;
++ if (langwell->host_ops)
++ langwell->host_ops->remove(langwell->pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++ langwell_otg_drv_vbus(0);
++ set_client_mode();
++ langwell_otg_phy_low_power(1);
++ langwell->otg.state = OTG_STATE_B_IDLE;
++ queue_work(langwell->qwork, &langwell->work);
++ } else if (langwell->hsm.a_bus_req ||
++ langwell->hsm.b_bus_resume) {
++ langwell_otg_del_timer(a_aidl_bdis_tmr);
++ langwell_otg_HABA(0);
++ free_irq(langwell->pdev->irq, langwell->regs);
++ langwell->hsm.a_suspend_req = 0;
++ langwell_otg_loc_sof(1);
++ langwell->otg.state = OTG_STATE_A_HOST;
++ } else if (langwell->hsm.a_aidl_bdis_tmout ||
++ langwell->hsm.a_bus_drop) {
++ langwell_otg_del_timer(a_aidl_bdis_tmr);
++ langwell_otg_HABA(0);
++ free_irq(langwell->pdev->irq, langwell->regs);
++ if (langwell->host_ops)
++ langwell->host_ops->remove(langwell->pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++ langwell_otg_drv_vbus(0);
++ langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
++ } else if (!langwell->hsm.b_conn &&
++ langwell->otg.host->b_hnp_enable) {
++ langwell_otg_del_timer(a_aidl_bdis_tmr);
++ langwell_otg_HABA(0);
++ free_irq(langwell->pdev->irq, langwell->regs);
++
++ if (langwell->host_ops)
++ langwell->host_ops->remove(langwell->pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++
++ langwell->hsm.b_bus_suspend = 0;
++ langwell->hsm.b_bus_suspend_vld = 0;
++
++ /* msleep(200); */
++ if (langwell->client_ops)
++ langwell->client_ops->resume(langwell->pdev);
++ else
++ otg_dbg("client driver not loaded.\n");
++
++ langwell_otg_add_ktimer(TB_BUS_SUSPEND_TMR);
++ langwell->otg.state = OTG_STATE_A_PERIPHERAL;
++ break;
++ } else if (!langwell->hsm.a_vbus_vld) {
++ langwell_otg_del_timer(a_aidl_bdis_tmr);
++ langwell_otg_HABA(0);
++ free_irq(langwell->pdev->irq, langwell->regs);
++ if (langwell->host_ops)
++ langwell->host_ops->remove(langwell->pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++ langwell_otg_drv_vbus(0);
++ langwell_otg_phy_low_power_wait(1);
++ langwell->otg.state = OTG_STATE_A_VBUS_ERR;
++ }
++ break;
++ case OTG_STATE_A_PERIPHERAL:
++ if (langwell->hsm.id) {
++ /* delete hsm timer for b_bus_suspend_tmr */
++ del_timer_sync(&langwell->hsm_timer);
++ langwell->otg.default_a = 0;
++ langwell->hsm.b_bus_req = 0;
++ if (langwell->client_ops)
++ langwell->client_ops->suspend(langwell->pdev,
++ PMSG_FREEZE);
++ else
++ otg_dbg("client driver has been removed.\n");
++ langwell_otg_drv_vbus(0);
++ set_client_mode();
++ langwell_otg_phy_low_power_wait(1);
++ langwell->otg.state = OTG_STATE_B_IDLE;
++ queue_work(langwell->qwork, &langwell->work);
++ } else if (!langwell->hsm.a_vbus_vld) {
++ /* delete hsm timer for b_bus_suspend_tmr */
++ del_timer_sync(&langwell->hsm_timer);
++ if (langwell->client_ops)
++ langwell->client_ops->suspend(langwell->pdev,
++ PMSG_FREEZE);
++ else
++ otg_dbg("client driver has been removed.\n");
++ langwell_otg_drv_vbus(0);
++ langwell_otg_phy_low_power_wait(1);
++ langwell->otg.state = OTG_STATE_A_VBUS_ERR;
++ } else if (langwell->hsm.a_bus_drop) {
++ /* delete hsm timer for b_bus_suspend_tmr */
++ del_timer_sync(&langwell->hsm_timer);
++ if (langwell->client_ops)
++ langwell->client_ops->suspend(langwell->pdev,
++ PMSG_FREEZE);
++ else
++ otg_dbg("client driver has been removed.\n");
++ langwell_otg_drv_vbus(0);
++ langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
++ } else if (langwell->hsm.b_bus_suspend) {
++ /* delete hsm timer for b_bus_suspend_tmr */
++ del_timer_sync(&langwell->hsm_timer);
++ if (langwell->client_ops)
++ langwell->client_ops->suspend(langwell->pdev,
++ PMSG_FREEZE);
++ else
++ otg_dbg("client driver has been removed.\n");
++ if (langwell->host_ops)
++ langwell->host_ops->probe(langwell->pdev,
++ langwell->host_ops->id_table);
++ else
++ otg_dbg("host driver not loaded.\n");
++ langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
++ langwell->otg.state = OTG_STATE_A_WAIT_BCON;
++ } else if (langwell->hsm.b_bus_suspend_tmout) {
++ u32 val;
++ val = readl(langwell->regs + CI_PORTSC1);
++ if (!(val & PORTSC_SUSP))
++ break;
++ if (langwell->client_ops)
++ langwell->client_ops->suspend(langwell->pdev,
++ PMSG_FREEZE);
++ else
++ otg_dbg("client driver has been removed.\n");
++ if (langwell->host_ops)
++ langwell->host_ops->probe(langwell->pdev,
++ langwell->host_ops->id_table);
++ else
++ otg_dbg("host driver not loaded.\n");
++ /* replaced with kernel timer */
++ langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
++ langwell->otg.state = OTG_STATE_A_WAIT_BCON;
++ }
++ break;
++ case OTG_STATE_A_VBUS_ERR:
++ if (langwell->hsm.id) {
++ langwell->otg.default_a = 0;
++ langwell->hsm.a_clr_err = 0;
++ langwell->hsm.a_srp_det = 0;
++ set_client_mode();
++ langwell_otg_phy_low_power(1);
++ langwell->otg.state = OTG_STATE_B_IDLE;
++ queue_work(langwell->qwork, &langwell->work);
++ } else if (langwell->hsm.a_clr_err) {
++ langwell->hsm.a_clr_err = 0;
++ langwell->hsm.a_srp_det = 0;
++ reset_otg();
++ init_hsm();
++ if (langwell->otg.state == OTG_STATE_A_IDLE)
++ queue_work(langwell->qwork, &langwell->work);
++ } else {
++ /* FIXME: Because FW will clear PHCD bit when any VBus
++ * event detected. Reset PHCD to 1 again */
++ langwell_otg_phy_low_power(1);
++ }
++ break;
++ case OTG_STATE_A_WAIT_VFALL:
++ if (langwell->hsm.id) {
++ langwell->otg.default_a = 0;
++ set_client_mode();
++ langwell_otg_phy_low_power(1);
++ langwell->otg.state = OTG_STATE_B_IDLE;
++ queue_work(langwell->qwork, &langwell->work);
++ } else if (langwell->hsm.a_bus_req) {
++ langwell_otg_drv_vbus(1);
++ langwell->hsm.a_wait_vrise_tmout = 0;
++ langwell_otg_add_timer(a_wait_vrise_tmr);
++ langwell->otg.state = OTG_STATE_A_WAIT_VRISE;
++ } else if (!langwell->hsm.a_sess_vld) {
++ langwell->hsm.a_srp_det = 0;
++ set_host_mode();
++ langwell_otg_phy_low_power(1);
++ langwell->otg.state = OTG_STATE_A_IDLE;
++ }
++ break;
++ default:
++ ;
++ }
++
++ otg_dbg("%s: new state = %s\n", __func__,
++ state_string(langwell->otg.state));
++}
++
++ static ssize_t
++show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
++{
++ struct langwell_otg *langwell;
++ char *next;
++ unsigned size;
++ unsigned t;
++
++ langwell = the_transceiver;
++ next = buf;
++ size = PAGE_SIZE;
++
++ t = scnprintf(next, size,
++ "\n"
++ "USBCMD = 0x%08x \n"
++ "USBSTS = 0x%08x \n"
++ "USBINTR = 0x%08x \n"
++ "ASYNCLISTADDR = 0x%08x \n"
++ "PORTSC1 = 0x%08x \n"
++ "HOSTPC1 = 0x%08x \n"
++ "OTGSC = 0x%08x \n"
++ "USBMODE = 0x%08x \n",
++ readl(langwell->regs + 0x30),
++ readl(langwell->regs + 0x34),
++ readl(langwell->regs + 0x38),
++ readl(langwell->regs + 0x48),
++ readl(langwell->regs + 0x74),
++ readl(langwell->regs + 0xb4),
++ readl(langwell->regs + 0xf4),
++ readl(langwell->regs + 0xf8)
++ );
++ size -= t;
++ next += t;
++
++ return PAGE_SIZE - size;
++}
++static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL);
++
++static ssize_t
++show_hsm(struct device *_dev, struct device_attribute *attr, char *buf)
++{
++ struct langwell_otg *langwell;
++ char *next;
++ unsigned size;
++ unsigned t;
++ enum usb_otg_state state;
++
++ langwell = the_transceiver;
++ next = buf;
++ size = PAGE_SIZE;
++ state = langwell->otg.state;
++
++ /* Add a_set_b_hnp_en */
++ if (state == OTG_STATE_A_HOST || state == OTG_STATE_A_SUSPEND)
++ langwell->hsm.a_set_b_hnp_en = langwell->otg.host->b_hnp_enable;
++ else
++ langwell->hsm.a_set_b_hnp_en = 0;
++
++ t = scnprintf(next, size,
++ "\n"
++ "current state = %s\n"
++ "a_bus_resume = \t%d\n"
++ "a_bus_suspend = \t%d\n"
++ "a_conn = \t%d\n"
++ "a_sess_vld = \t%d\n"
++ "a_srp_det = \t%d\n"
++ "a_vbus_vld = \t%d\n"
++ "b_bus_resume = \t%d\n"
++ "b_bus_suspend = \t%d\n"
++ "b_conn = \t%d\n"
++ "b_se0_srp = \t%d\n"
++ "b_sess_end = \t%d\n"
++ "b_sess_vld = \t%d\n"
++ "id = \t%d\n"
++ "a_set_b_hnp_en = \t%d\n"
++ "b_srp_done = \t%d\n"
++ "b_hnp_enable = \t%d\n"
++ "a_wait_vrise_tmout = \t%d\n"
++ "a_wait_bcon_tmout = \t%d\n"
++ "a_aidl_bdis_tmout = \t%d\n"
++ "b_ase0_brst_tmout = \t%d\n"
++ "a_bus_drop = \t%d\n"
++ "a_bus_req = \t%d\n"
++ "a_clr_err = \t%d\n"
++ "a_suspend_req = \t%d\n"
++ "b_bus_req = \t%d\n"
++ "b_bus_suspend_tmout = \t%d\n"
++ "b_bus_suspend_vld = \t%d\n",
++ state_string(langwell->otg.state),
++ langwell->hsm.a_bus_resume,
++ langwell->hsm.a_bus_suspend,
++ langwell->hsm.a_conn,
++ langwell->hsm.a_sess_vld,
++ langwell->hsm.a_srp_det,
++ langwell->hsm.a_vbus_vld,
++ langwell->hsm.b_bus_resume,
++ langwell->hsm.b_bus_suspend,
++ langwell->hsm.b_conn,
++ langwell->hsm.b_se0_srp,
++ langwell->hsm.b_sess_end,
++ langwell->hsm.b_sess_vld,
++ langwell->hsm.id,
++ langwell->hsm.a_set_b_hnp_en,
++ langwell->hsm.b_srp_done,
++ langwell->hsm.b_hnp_enable,
++ langwell->hsm.a_wait_vrise_tmout,
++ langwell->hsm.a_wait_bcon_tmout,
++ langwell->hsm.a_aidl_bdis_tmout,
++ langwell->hsm.b_ase0_brst_tmout,
++ langwell->hsm.a_bus_drop,
++ langwell->hsm.a_bus_req,
++ langwell->hsm.a_clr_err,
++ langwell->hsm.a_suspend_req,
++ langwell->hsm.b_bus_req,
++ langwell->hsm.b_bus_suspend_tmout,
++ langwell->hsm.b_bus_suspend_vld
++ );
++ size -= t;
++ next += t;
++
++ return PAGE_SIZE - size;
++}
++static DEVICE_ATTR(hsm, S_IRUGO, show_hsm, NULL);
++
++static ssize_t
++get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct langwell_otg *langwell;
++ char *next;
++ unsigned size;
++ unsigned t;
++
++ langwell = the_transceiver;
++ next = buf;
++ size = PAGE_SIZE;
++
++ t = scnprintf(next, size, "%d", langwell->hsm.a_bus_req);
++ size -= t;
++ next += t;
++
++ return PAGE_SIZE - size;
++}
++
++static ssize_t
++set_a_bus_req(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct langwell_otg *langwell;
++ langwell = the_transceiver;
++ if (!langwell->otg.default_a)
++ return -1;
++ if (count > 2)
++ return -1;
++
++ if (buf[0] == '0') {
++ langwell->hsm.a_bus_req = 0;
++ otg_dbg("a_bus_req = 0\n");
++ } else if (buf[0] == '1') {
++ /* If a_bus_drop is TRUE, a_bus_req can't be set */
++ if (langwell->hsm.a_bus_drop)
++ return -1;
++ langwell->hsm.a_bus_req = 1;
++ otg_dbg("a_bus_req = 1\n");
++ }
++
++ langwell_update_transceiver();
++
++ return count;
++}
++static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUGO, get_a_bus_req, set_a_bus_req);
++
++static ssize_t
++get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct langwell_otg *langwell;
++ char *next;
++ unsigned size;
++ unsigned t;
++
++ langwell = the_transceiver;
++ next = buf;
++ size = PAGE_SIZE;
++
++ t = scnprintf(next, size, "%d", langwell->hsm.a_bus_drop);
++ size -= t;
++ next += t;
++
++ return PAGE_SIZE - size;
++}
++
++static ssize_t
++set_a_bus_drop(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct langwell_otg *langwell;
++ langwell = the_transceiver;
++ if (!langwell->otg.default_a)
++ return -1;
++ if (count > 2)
++ return -1;
++
++ if (buf[0] == '0') {
++ langwell->hsm.a_bus_drop = 0;
++ otg_dbg("a_bus_drop = 0\n");
++ } else if (buf[0] == '1') {
++ langwell->hsm.a_bus_drop = 1;
++ langwell->hsm.a_bus_req = 0;
++ otg_dbg("a_bus_drop = 1, then a_bus_req = 0\n");
++ }
++
++ langwell_update_transceiver();
++
++ return count;
++}
++static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUGO,
++ get_a_bus_drop, set_a_bus_drop);
++
++static ssize_t
++get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct langwell_otg *langwell;
++ char *next;
++ unsigned size;
++ unsigned t;
++
++ langwell = the_transceiver;
++ next = buf;
++ size = PAGE_SIZE;
++
++ t = scnprintf(next, size, "%d", langwell->hsm.b_bus_req);
++ size -= t;
++ next += t;
++
++ return PAGE_SIZE - size;
++}
++
++static ssize_t
++set_b_bus_req(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct langwell_otg *langwell;
++ langwell = the_transceiver;
++
++ if (langwell->otg.default_a)
++ return -1;
++
++ if (count > 2)
++ return -1;
++
++ if (buf[0] == '0') {
++ langwell->hsm.b_bus_req = 0;
++ otg_dbg("b_bus_req = 0\n");
++ } else if (buf[0] == '1') {
++ langwell->hsm.b_bus_req = 1;
++ otg_dbg("b_bus_req = 1\n");
++ }
++
++ langwell_update_transceiver();
++
++ return count;
++}
++static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUGO, get_b_bus_req, set_b_bus_req);
++
++static ssize_t
++set_a_clr_err(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct langwell_otg *langwell;
++ langwell = the_transceiver;
++
++ if (!langwell->otg.default_a)
++ return -1;
++ if (count > 2)
++ return -1;
++
++ if (buf[0] == '1') {
++ langwell->hsm.a_clr_err = 1;
++ otg_dbg("a_clr_err = 1\n");
++ }
++
++ langwell_update_transceiver();
++
++ return count;
++}
++static DEVICE_ATTR(a_clr_err, S_IWUGO, NULL, set_a_clr_err);
++
++static struct attribute *inputs_attrs[] = {
++ &dev_attr_a_bus_req.attr,
++ &dev_attr_a_bus_drop.attr,
++ &dev_attr_b_bus_req.attr,
++ &dev_attr_a_clr_err.attr,
++ NULL,
++};
++
++static struct attribute_group debug_dev_attr_group = {
++ .name = "inputs",
++ .attrs = inputs_attrs,
++};
++
++int langwell_register_host(struct pci_driver *host_driver)
++{
++ int ret = 0;
++
++ the_transceiver->host_ops = host_driver;
++ queue_work(the_transceiver->qwork, &the_transceiver->work);
++ otg_dbg("host controller driver is registered\n");
++
++ return ret;
++}
++EXPORT_SYMBOL(langwell_register_host);
++
++void langwell_unregister_host(struct pci_driver *host_driver)
++{
++ if (the_transceiver->host_ops)
++ the_transceiver->host_ops->remove(the_transceiver->pdev);
++ the_transceiver->host_ops = NULL;
++ the_transceiver->hsm.a_bus_drop = 1;
++ queue_work(the_transceiver->qwork, &the_transceiver->work);
++ otg_dbg("host controller driver is unregistered\n");
++}
++EXPORT_SYMBOL(langwell_unregister_host);
++
++int langwell_register_peripheral(struct pci_driver *client_driver)
++{
++ int ret = 0;
++
++ if (client_driver)
++ ret = client_driver->probe(the_transceiver->pdev,
++ client_driver->id_table);
++ if (!ret) {
++ the_transceiver->client_ops = client_driver;
++ queue_work(the_transceiver->qwork, &the_transceiver->work);
++ otg_dbg("client controller driver is registered\n");
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL(langwell_register_peripheral);
++
++void langwell_unregister_peripheral(struct pci_driver *client_driver)
++{
++ if (the_transceiver->client_ops)
++ the_transceiver->client_ops->remove(the_transceiver->pdev);
++ the_transceiver->client_ops = NULL;
++ the_transceiver->hsm.b_bus_req = 0;
++ queue_work(the_transceiver->qwork, &the_transceiver->work);
++ otg_dbg("client controller driver is unregistered\n");
++}
++EXPORT_SYMBOL(langwell_unregister_peripheral);
++
++static int langwell_otg_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id)
++{
++ unsigned long resource, len;
++ void __iomem *base = NULL;
++ int retval;
++ u32 val32;
++ struct langwell_otg *langwell;
++ char qname[] = "langwell_otg_queue";
++
++ retval = 0;
++ otg_dbg("\notg controller is detected.\n");
++ if (pci_enable_device(pdev) < 0) {
++ retval = -ENODEV;
++ goto done;
++ }
++
++ langwell = kzalloc(sizeof *langwell, GFP_KERNEL);
++ if (langwell == NULL) {
++ retval = -ENOMEM;
++ goto done;
++ }
++ the_transceiver = langwell;
++
++ /* control register: BAR 0 */
++ resource = pci_resource_start(pdev, 0);
++ len = pci_resource_len(pdev, 0);
++ if (!request_mem_region(resource, len, driver_name)) {
++ retval = -EBUSY;
++ goto err;
++ }
++ langwell->region = 1;
++
++ base = ioremap_nocache(resource, len);
++ if (base == NULL) {
++ retval = -EFAULT;
++ goto err;
++ }
++ langwell->regs = base;
++
++ if (!request_mem_region(USBCFG_ADDR, USBCFG_LEN, driver_name)) {
++ retval = -EBUSY;
++ goto err;
++ }
++ langwell->cfg_region = 1;
++
++ /* For the SCCB.USBCFG register */
++ base = ioremap_nocache(USBCFG_ADDR, USBCFG_LEN);
++ if (base == NULL) {
++ retval = -EFAULT;
++ goto err;
++ }
++ langwell->usbcfg = base;
++
++ if (!pdev->irq) {
++ otg_dbg("No IRQ.\n");
++ retval = -ENODEV;
++ goto err;
++ }
++
++ langwell->qwork = create_singlethread_workqueue(qname);
++ if (!langwell->qwork) {
++ otg_dbg("cannot create workqueue %s\n", qname);
++ retval = -ENOMEM;
++ goto err;
++ }
++ INIT_WORK(&langwell->work, langwell_otg_work);
++
++ /* OTG common part */
++ langwell->pdev = pdev;
++ langwell->otg.dev = &pdev->dev;
++ langwell->otg.label = driver_name;
++ langwell->otg.set_host = langwell_otg_set_host;
++ langwell->otg.set_peripheral = langwell_otg_set_peripheral;
++ langwell->otg.set_power = langwell_otg_set_power;
++ langwell->otg.start_srp = langwell_otg_start_srp;
++ langwell->otg.state = OTG_STATE_UNDEFINED;
++ if (otg_set_transceiver(&langwell->otg)) {
++ otg_dbg("can't set transceiver\n");
++ retval = -EBUSY;
++ goto err;
++ }
++
++ reset_otg();
++ init_hsm();
++
++ spin_lock_init(&langwell->lock);
++ spin_lock_init(&langwell->wq_lock);
++ INIT_LIST_HEAD(&active_timers);
++ langwell_otg_init_timers(&langwell->hsm);
++ init_timer(&langwell->hsm_timer);
++
++ if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
++ driver_name, langwell) != 0) {
++ otg_dbg("request interrupt %d failed\n", pdev->irq);
++ retval = -EBUSY;
++ goto err;
++ }
++
++ /* enable OTGSC int */
++ val32 = OTGSC_DPIE | OTGSC_BSEIE | OTGSC_BSVIE |
++ OTGSC_ASVIE | OTGSC_AVVIE | OTGSC_IDIE | OTGSC_IDPU;
++ writel(val32, langwell->regs + CI_OTGSC);
++
++ retval = device_create_file(&pdev->dev, &dev_attr_registers);
++ if (retval < 0) {
++ otg_dbg("Can't register sysfs attribute: %d\n", retval);
++ goto err;
++ }
++
++ retval = device_create_file(&pdev->dev, &dev_attr_hsm);
++ if (retval < 0) {
++ otg_dbg("Can't hsm sysfs attribute: %d\n", retval);
++ goto err;
++ }
++
++ retval = sysfs_create_group(&pdev->dev.kobj, &debug_dev_attr_group);
++ if (retval < 0) {
++ otg_dbg("Can't register sysfs attr group: %d\n", retval);
++ goto err;
++ }
++
++ if (langwell->otg.state == OTG_STATE_A_IDLE)
++ queue_work(langwell->qwork, &langwell->work);
++
++ return 0;
++
++err:
++ if (the_transceiver)
++ langwell_otg_remove(pdev);
++done:
++ return retval;
++}
++
++static void langwell_otg_remove(struct pci_dev *pdev)
++{
++ struct langwell_otg *langwell;
++
++ langwell = the_transceiver;
++
++ if (langwell->qwork) {
++ flush_workqueue(langwell->qwork);
++ destroy_workqueue(langwell->qwork);
++ }
++ langwell_otg_free_timers();
++
++ /* disable OTGSC interrupt as OTGSC doesn't change in reset */
++ writel(0, langwell->regs + CI_OTGSC);
++
++ if (pdev->irq)
++ free_irq(pdev->irq, langwell);
++ if (langwell->usbcfg)
++ iounmap(langwell->usbcfg);
++ if (langwell->cfg_region)
++ release_mem_region(USBCFG_ADDR, USBCFG_LEN);
++ if (langwell->regs)
++ iounmap(langwell->regs);
++ if (langwell->region)
++ release_mem_region(pci_resource_start(pdev, 0),
++ pci_resource_len(pdev, 0));
++
++ otg_set_transceiver(NULL);
++ pci_disable_device(pdev);
++ sysfs_remove_group(&pdev->dev.kobj, &debug_dev_attr_group);
++ device_remove_file(&pdev->dev, &dev_attr_hsm);
++ device_remove_file(&pdev->dev, &dev_attr_registers);
++ kfree(langwell);
++ langwell = NULL;
++}
++
++static void transceiver_suspend(struct pci_dev *pdev)
++{
++ pci_save_state(pdev);
++ pci_set_power_state(pdev, PCI_D3hot);
++ langwell_otg_phy_low_power(1);
++}
++
++static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message)
++{
++ struct langwell_otg *langwell;
++ struct pci_driver *ops;
++ int ret = 0;
++
++ langwell = the_transceiver;
++
++ /* Disbale OTG interrupts */
++ langwell_otg_intr(0);
++
++ if (pdev->irq)
++ free_irq(pdev->irq, langwell);
++
++ /* Prevent more otg_work */
++ flush_workqueue(langwell->qwork);
++ destroy_workqueue(langwell->qwork);
++ langwell->qwork = NULL;
++
++ /* start actions */
++ switch (langwell->otg.state) {
++ case OTG_STATE_A_WAIT_VFALL:
++ langwell->otg.state = OTG_STATE_A_IDLE;
++ case OTG_STATE_A_IDLE:
++ case OTG_STATE_A_VBUS_ERR:
++ case OTG_STATE_B_IDLE:
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_A_WAIT_VRISE:
++ langwell_otg_del_timer(a_wait_vrise_tmr);
++ langwell->hsm.a_srp_det = 0;
++ langwell_otg_drv_vbus(0);
++ langwell->otg.state = OTG_STATE_A_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_A_WAIT_BCON:
++ del_timer_sync(&langwell->hsm_timer);
++ ops = langwell->host_ops;
++
++ switch (message.event) {
++ case PM_EVENT_SUSPEND:
++ if (ops && ops->driver.pm && ops->driver.pm->suspend)
++ ret = ops->driver.pm->suspend(&pdev->dev);
++ break;
++ case PM_EVENT_FREEZE:
++ if (ops && ops->driver.pm && ops->driver.pm->freeze)
++ ret = ops->driver.pm->freeze(&pdev->dev);
++ break;
++ case PM_EVENT_HIBERNATE:
++ if (ops && ops->driver.pm && ops->driver.pm->poweroff)
++ ret = ops->driver.pm->poweroff(&pdev->dev);
++ break;
++ default:
++ otg_dbg("not suspend/freeze/hibernate pm event\n");
++ ret = -EINVAL;
++ break;
++ }
++
++ if (ret) {
++ otg_dbg("pm suspend function error = %d\n", ret);
++ /* restart timer */
++ langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
++ goto error;
++ }
++
++ if (ops && ops->remove)
++ ops->remove(pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++
++ langwell->hsm.a_srp_det = 0;
++ langwell_otg_drv_vbus(0);
++ langwell->otg.state = OTG_STATE_A_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_A_HOST:
++ ops = langwell->host_ops;
++
++ switch (message.event) {
++ case PM_EVENT_SUSPEND:
++ if (ops && ops->driver.pm && ops->driver.pm->suspend)
++ ret = ops->driver.pm->suspend(&pdev->dev);
++ break;
++ case PM_EVENT_FREEZE:
++ if (ops && ops->driver.pm && ops->driver.pm->freeze)
++ ret = ops->driver.pm->freeze(&pdev->dev);
++ break;
++ case PM_EVENT_HIBERNATE:
++ if (ops && ops->driver.pm && ops->driver.pm->poweroff)
++ ret = ops->driver.pm->poweroff(&pdev->dev);
++ break;
++ default:
++ otg_dbg("not suspend/freeze/hibernate pm event\n");
++ ret = -EINVAL;
++ break;
++ }
++
++ if (ret) {
++ otg_dbg("pm suspend function error = %d\n", ret);
++ goto error;
++ }
++
++ if (ops && ops->remove)
++ ops->remove(pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++
++ langwell->hsm.a_srp_det = 0;
++ langwell_otg_drv_vbus(0);
++ langwell->otg.state = OTG_STATE_A_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_A_SUSPEND:
++ langwell_otg_del_timer(a_aidl_bdis_tmr);
++ langwell_otg_HABA(0);
++ if (langwell->host_ops && langwell->host_ops->remove)
++ langwell->host_ops->remove(pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++ langwell->hsm.a_srp_det = 0;
++ langwell_otg_drv_vbus(0);
++ langwell->otg.state = OTG_STATE_A_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_A_PERIPHERAL:
++ del_timer_sync(&langwell->hsm_timer);
++ if (langwell->client_ops && langwell->client_ops->suspend)
++ ret = langwell->client_ops->suspend(pdev, message);
++ else
++ otg_dbg("client driver has been removed.\n");
++
++ if (ret) {
++ otg_dbg("pm suspend function error = %d\n", ret);
++ goto error;
++ }
++
++ langwell_otg_drv_vbus(0);
++ langwell->hsm.a_srp_det = 0;
++ langwell->otg.state = OTG_STATE_A_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_B_HOST:
++ if (langwell->host_ops && langwell->host_ops->remove)
++ langwell->host_ops->remove(pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++ langwell->hsm.b_bus_req = 0;
++ langwell->otg.state = OTG_STATE_B_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_B_PERIPHERAL:
++ if (langwell->client_ops && langwell->client_ops->suspend)
++ ret = langwell->client_ops->suspend(pdev, message);
++ else
++ otg_dbg("client driver has been removed.\n");
++
++ if (ret) {
++ otg_dbg("pm suspend function error = %d\n", ret);
++ goto error;
++ }
++
++ langwell->otg.state = OTG_STATE_B_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ case OTG_STATE_B_WAIT_ACON:
++ /* delete hsm timer for b_ase0_brst_tmr */
++ del_timer_sync(&langwell->hsm_timer);
++
++ langwell_otg_HAAR(0);
++ if (langwell->host_ops && langwell->host_ops->remove)
++ langwell->host_ops->remove(pdev);
++ else
++ otg_dbg("host driver has been removed.\n");
++ langwell->hsm.b_bus_req = 0;
++ langwell->otg.state = OTG_STATE_B_IDLE;
++ transceiver_suspend(pdev);
++ break;
++ default:
++ otg_dbg("error state before suspend\n ");
++ break;
++ }
++
++ return ret;
++error:
++ langwell->qwork = create_singlethread_workqueue("langwell_otg_queue");
++ if (!langwell->qwork) {
++ otg_dbg("cannot create workqueue langwell_otg_queue\n");
++ return -ENOMEM;
++ }
++
++ if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
++ driver_name, the_transceiver) != 0) {
++ otg_dbg("request interrupt %d failed\n", pdev->irq);
++ return -EBUSY;
++ }
++
++ /* enable OTG interrupts */
++ langwell_otg_intr(1);
++
++ return ret;
++}
++
++static void transceiver_resume(struct pci_dev *pdev)
++{
++ pci_restore_state(pdev);
++ pci_set_power_state(pdev, PCI_D0);
++}
++
++static int langwell_otg_resume(struct pci_dev *pdev)
++{
++ struct langwell_otg *langwell;
++ int ret = 0;
++
++ langwell = the_transceiver;
++
++ transceiver_resume(pdev);
++
++ langwell->qwork = create_singlethread_workqueue("langwell_otg_queue");
++ if (!langwell->qwork) {
++ otg_dbg("cannot create workqueue langwell_otg_queue\n");
++ ret = -ENOMEM;
++ goto error;
++ }
++
++ if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
++ driver_name, the_transceiver) != 0) {
++ otg_dbg("request interrupt %d failed\n", pdev->irq);
++ ret = -EBUSY;
++ goto error;
++ }
++
++ /* enable OTG interrupts */
++ langwell_otg_intr(1);
++
++ update_hsm();
++
++ langwell_update_transceiver();
++
++ return ret;
++error:
++ langwell_otg_intr(0);
++ transceiver_suspend(pdev);
++ return ret;
++}
++
++static int __init langwell_otg_init(void)
++{
++ return pci_register_driver(&otg_pci_driver);
++}
++module_init(langwell_otg_init);
++
++static void __exit langwell_otg_cleanup(void)
++{
++ pci_unregister_driver(&otg_pci_driver);
++}
++module_exit(langwell_otg_cleanup);
+diff --git a/include/linux/usb/langwell_otg.h b/include/linux/usb/langwell_otg.h
+new file mode 100644
+index 0000000..cbb204b
+--- /dev/null
++++ b/include/linux/usb/langwell_otg.h
+@@ -0,0 +1,201 @@
++/*
++ * Intel Langwell USB OTG transceiver driver
++ * Copyright (C) 2008, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef __LANGWELL_OTG_H__
++#define __LANGWELL_OTG_H__
++
++/* notify transceiver driver about OTG events */
++extern void langwell_update_transceiver(void);
++/* HCD register bus driver */
++extern int langwell_register_host(struct pci_driver *host_driver);
++/* HCD unregister bus driver */
++extern void langwell_unregister_host(struct pci_driver *host_driver);
++/* DCD register bus driver */
++extern int langwell_register_peripheral(struct pci_driver *client_driver);
++/* DCD unregister bus driver */
++extern void langwell_unregister_peripheral(struct pci_driver *client_driver);
++/* No silent failure, output warning message */
++extern void langwell_otg_nsf_msg(unsigned long message);
++
++#define CI_USBCMD 0x30
++# define USBCMD_RST BIT(1)
++# define USBCMD_RS BIT(0)
++#define CI_USBSTS 0x34
++# define USBSTS_SLI BIT(8)
++# define USBSTS_URI BIT(6)
++# define USBSTS_PCI BIT(2)
++#define CI_PORTSC1 0x74
++# define PORTSC_PP BIT(12)
++# define PORTSC_LS (BIT(11) | BIT(10))
++# define PORTSC_SUSP BIT(7)
++# define PORTSC_CCS BIT(0)
++#define CI_HOSTPC1 0xb4
++# define HOSTPC1_PHCD BIT(22)
++#define CI_OTGSC 0xf4
++# define OTGSC_DPIE BIT(30)
++# define OTGSC_1MSE BIT(29)
++# define OTGSC_BSEIE BIT(28)
++# define OTGSC_BSVIE BIT(27)
++# define OTGSC_ASVIE BIT(26)
++# define OTGSC_AVVIE BIT(25)
++# define OTGSC_IDIE BIT(24)
++# define OTGSC_DPIS BIT(22)
++# define OTGSC_1MSS BIT(21)
++# define OTGSC_BSEIS BIT(20)
++# define OTGSC_BSVIS BIT(19)
++# define OTGSC_ASVIS BIT(18)
++# define OTGSC_AVVIS BIT(17)
++# define OTGSC_IDIS BIT(16)
++# define OTGSC_DPS BIT(14)
++# define OTGSC_1MST BIT(13)
++# define OTGSC_BSE BIT(12)
++# define OTGSC_BSV BIT(11)
++# define OTGSC_ASV BIT(10)
++# define OTGSC_AVV BIT(9)
++# define OTGSC_ID BIT(8)
++# define OTGSC_HABA BIT(7)
++# define OTGSC_HADP BIT(6)
++# define OTGSC_IDPU BIT(5)
++# define OTGSC_DP BIT(4)
++# define OTGSC_OT BIT(3)
++# define OTGSC_HAAR BIT(2)
++# define OTGSC_VC BIT(1)
++# define OTGSC_VD BIT(0)
++# define OTGSC_INTEN_MASK (0x7f << 24)
++# define OTGSC_INT_MASK (0x5f << 24)
++# define OTGSC_INTSTS_MASK (0x7f << 16)
++#define CI_USBMODE 0xf8
++# define USBMODE_CM (BIT(1) | BIT(0))
++# define USBMODE_IDLE 0
++# define USBMODE_DEVICE 0x2
++# define USBMODE_HOST 0x3
++#define USBCFG_ADDR 0xff10801c
++#define USBCFG_LEN 4
++# define USBCFG_VBUSVAL BIT(14)
++# define USBCFG_AVALID BIT(13)
++# define USBCFG_BVALID BIT(12)
++# define USBCFG_SESEND BIT(11)
++
++#define INTR_DUMMY_MASK (USBSTS_SLI | USBSTS_URI | USBSTS_PCI)
++
++struct otg_hsm {
++ /* Input */
++ int a_bus_resume;
++ int a_bus_suspend;
++ int a_conn;
++ int a_sess_vld;
++ int a_srp_det;
++ int a_vbus_vld;
++ int b_bus_resume;
++ int b_bus_suspend;
++ int b_conn;
++ int b_se0_srp;
++ int b_sess_end;
++ int b_sess_vld;
++ int id;
++
++ /* Internal variables */
++ int a_set_b_hnp_en;
++ int b_srp_done;
++ int b_hnp_enable;
++
++ /* Timeout indicator for timers */
++ int a_wait_vrise_tmout;
++ int a_wait_bcon_tmout;
++ int a_aidl_bdis_tmout;
++ int b_ase0_brst_tmout;
++ int b_bus_suspend_tmout;
++ int b_srp_init_tmout;
++ int b_srp_fail_tmout;
++
++ /* Informative variables */
++ int a_bus_drop;
++ int a_bus_req;
++ int a_clr_err;
++ int a_suspend_req;
++ int b_bus_req;
++
++ /* Output */
++ int drv_vbus;
++ int loc_conn;
++ int loc_sof;
++
++ /* Others */
++ int b_bus_suspend_vld;
++ int vbus_srp_up;
++};
++
++enum langwell_otg_timer_type {
++ TA_WAIT_VRISE_TMR,
++ TA_WAIT_BCON_TMR,
++ TA_AIDL_BDIS_TMR,
++ TB_ASE0_BRST_TMR,
++ TB_SE0_SRP_TMR,
++ TB_SRP_INIT_TMR,
++ TB_SRP_FAIL_TMR,
++ TB_BUS_SUSPEND_TMR
++};
++
++#define TA_WAIT_VRISE 100
++#define TA_WAIT_BCON 30000
++#define TA_AIDL_BDIS 15000
++#define TB_ASE0_BRST 5000
++#define TB_SE0_SRP 2
++#define TB_SRP_INIT 100
++#define TB_SRP_FAIL 5500
++#define TB_BUS_SUSPEND 500
++
++struct langwell_otg_timer {
++ unsigned long expires; /* Number of count increase to timeout */
++ unsigned long count; /* Tick counter */
++ void (*function)(unsigned long); /* Timeout function */
++ unsigned long data; /* Data passed to function */
++ struct list_head list;
++};
++
++struct langwell_otg {
++ struct otg_transceiver otg;
++ struct otg_hsm hsm;
++ void __iomem *regs;
++ void __iomem *usbcfg; /* SCCB USB config Reg */
++ unsigned region;
++ unsigned cfg_region;
++ struct pci_driver *host_ops;
++ struct pci_driver *client_ops;
++ struct pci_dev *pdev;
++ struct work_struct work;
++ struct workqueue_struct *qwork;
++ struct timer_list hsm_timer;
++ spinlock_t lock;
++ spinlock_t wq_lock;
++};
++
++static inline struct langwell_otg *otg_to_langwell(struct otg_transceiver *otg)
++{
++ return container_of(otg, struct langwell_otg, otg);
++}
++
++#ifdef DEBUG
++#define otg_dbg(fmt, args...) \
++ printk(KERN_DEBUG fmt , ## args)
++#else
++#define otg_dbg(fmt, args...) \
++ do { } while (0)
++#endif /* DEBUG */
++#endif /* __LANGWELL_OTG_H__ */
+diff --git a/include/linux/usb/langwell_udc.h b/include/linux/usb/langwell_udc.h
+index c949178..fe2c698 100644
+--- a/include/linux/usb/langwell_udc.h
++++ b/include/linux/usb/langwell_udc.h
+@@ -306,5 +306,18 @@ struct langwell_op_regs {
+ #define EPCTRL_RXS BIT(0) /* RX endpoint STALL */
+ } __attribute__ ((packed));
+
++
++/* export function declaration */
++
++/* gets the maximum power consumption */
++extern int langwell_udc_maxpower(int *mA);
++
++/* return errors of langwell_udc_maxpower() */
++#define EOTGFAIL 1
++#define EOTGNODEVICE 2
++#define EOTGCHARGER 3
++#define EOTGDISCONN 4
++#define EOTGINVAL 5
++
+ #endif /* __LANGWELL_UDC_H */
+
+--
+1.5.4.5
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-multi-touch-input-driver-for-event-devices.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-multi-touch-input-driver-for-event-devices.patch
new file mode 100644
index 0000000..483862a
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-multi-touch-input-driver-for-event-devices.patch
@@ -0,0 +1,398 @@
+From 6317c631cb1fd32f34da98a945747781d5a8906d Mon Sep 17 00:00:00 2001
+From: Priya Vijayan <priya.vijayan@intel.com>
+Date: Tue, 4 May 2010 14:21:37 -0700
+Subject: [PATCH] Add mtdev driver and configs
+
+Add multi-touch driver and configs for event devices.
+This module is from He Min <min.he@intel.com>
+Code modifications and configs from Priya Vijayan <priya.vijayan@intel.com>
+
+Patch-mainline: 2.6.34
+
+Signed-off-by: Priya Vijayan <priya.vijayan@intel.com>
+---
+ drivers/input/Kconfig | 9 ++
+ drivers/input/Makefile | 1 +
+ drivers/input/input.c | 1 +
+ drivers/input/mtdev.c | 307 ++++++++++++++++++++++++++++++++++++++++++++++++
+ include/linux/input.h | 1 +
+ 5 files changed, 319 insertions(+), 0 deletions(-)
+ create mode 100644 drivers/input/mtdev.c
+
+diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
+index 07c2cd4..0264508 100644
+--- a/drivers/input/Kconfig
++++ b/drivers/input/Kconfig
+@@ -135,6 +135,15 @@ config INPUT_EVDEV
+ To compile this driver as a module, choose M here: the
+ module will be called evdev.
+
++config INPUT_MTDEV
++ tristate "Multitouch interface"
++ help
++ Say Y here if you want to enable Multi-touch input driver for event devices
++ If unsure, say N.
++
++ To compile this driver as a module, choose M here:the
++ module will be called mtdev.
++
+ config INPUT_EVBUG
+ tristate "Event debugging"
+ help
+diff --git a/drivers/input/Makefile b/drivers/input/Makefile
+index 7ad212d..96a4d94 100644
+--- a/drivers/input/Makefile
++++ b/drivers/input/Makefile
+@@ -15,6 +15,7 @@ obj-$(CONFIG_INPUT_MOUSEDEV) += mousedev.o
+ obj-$(CONFIG_INPUT_JOYDEV) += joydev.o
+ obj-$(CONFIG_INPUT_EVDEV) += evdev.o
+ obj-$(CONFIG_INPUT_EVBUG) += evbug.o
++obj-$(CONFIG_INPUT_MTDEV) += mtdev.o
+
+ obj-$(CONFIG_INPUT_KEYBOARD) += keyboard/
+ obj-$(CONFIG_INPUT_MOUSE) += mouse/
+diff --git a/drivers/input/input.c b/drivers/input/input.c
+index 86cb2d2..b589dec 100644
+--- a/drivers/input/input.c
++++ b/drivers/input/input.c
+@@ -47,6 +47,7 @@ static unsigned int input_abs_bypass_init_data[] __initdata = {
+ ABS_MT_BLOB_ID,
+ ABS_MT_TRACKING_ID,
+ ABS_MT_PRESSURE,
++ ABS_MT_CONTACT_COUNT,
+ 0
+ };
+ static unsigned long input_abs_bypass[BITS_TO_LONGS(ABS_CNT)];
+diff --git a/drivers/input/mtdev.c b/drivers/input/mtdev.c
+new file mode 100644
+index 0000000..8b01220
+--- /dev/null
++++ b/drivers/input/mtdev.c
+@@ -0,0 +1,312 @@
++#include <linux/module.h>
++#include <linux/input.h>
++#include <linux/init.h>
++#include <linux/device.h>
++#include <linux/hid.h>
++#include <linux/wait.h>
++#include <linux/kthread.h>
++
++#define MTDEV_MAX_POINTERS 5
++
++#ifndef ABS_MT_PRESSURE
++#define ABS_MT_PRESSURE 0x3a
++#endif
++#ifndef ABS_MT_CONTACT_COUNT
++#define ABS_MT_CONTACT_COUNT 0x3b
++#endif
++
++struct mtdev_input_dev
++{
++ struct input_dev* input_dev;
++ int id;
++ bool ready;
++ int x;
++ int y;
++ int z;
++ int touch;
++};
++
++struct mtdev_dev
++{
++ int count;
++ int last_count;
++ wait_queue_head_t wq;
++ struct input_handle handle;
++ struct mtdev_input_dev devs[MTDEV_MAX_POINTERS];
++};
++
++//id==-1 means to find an empty slot
++static int find_match_id(struct mtdev_dev * mtdev,int id)
++{
++ int i=0;
++
++ for (i=0;i<MTDEV_MAX_POINTERS;i++)
++ {
++ if(mtdev->devs[i].id==id)
++ {
++ return i;
++ }
++ }
++ return -1;
++}
++
++
++static int mtdev_kt(void *data)
++{
++ struct mtdev_dev *mtdev=(struct mtdev_dev*)data;
++ int i=0;
++ int err=0;
++ printk("mtdev_kt entered\n");
++ if(!mtdev)
++ return -1;
++ //wait_event_interruptible(mtdev->wq,kthread_should_stop());
++ for(;i<MTDEV_MAX_POINTERS;i++)
++ {
++ struct input_dev *pdev=NULL;
++ pdev=mtdev->devs[i].input_dev;
++
++ err=input_register_device(pdev);
++ if(err)
++ {
++ printk("error in register mtdev %d\n",err);
++ return err;
++ }
++ else
++ printk("successfully registered input %d\n",i);
++ mtdev->devs[i].ready=true;
++
++ }
++ return 0;
++}
++
++static void mtdev_event(struct input_handle * handle,
++ unsigned int type, unsigned int code, int value)
++{
++ static int i=0;
++ //int err=0;
++ int j=0;
++ struct mtdev_dev *mtdev=handle->private;
++ //printk("mtdev_event %x %x %x\n",type,code,value);
++ if(!mtdev->devs[1].ready||!mtdev->devs[0].ready)
++ return;
++ if(type==EV_ABS)
++ {
++ switch(code)
++ {
++ case ABS_MT_CONTACT_COUNT:
++ if(value!=0)
++ {
++ //we start from the first point
++ i=0;
++ //printk("mtdev:contact count is %d\n",value);
++ }
++ else if(value>MTDEV_MAX_POINTERS)
++ {
++ value=MTDEV_MAX_POINTERS;
++ }
++
++ //found last release fingers and send release event
++ for(j=0;j<MTDEV_MAX_POINTERS;j++)
++ {
++ if(mtdev->devs[j].touch==0
++ &&mtdev->devs[j].id!=-1)
++ {
++ input_report_key(mtdev->devs[j].input_dev,BTN_TOUCH,0);
++ input_sync(mtdev->devs[j].input_dev);
++ printk("%d id %d released\n",j,mtdev->devs[j].id);
++ mtdev->devs[j].id=-1;
++ }
++ mtdev->devs[j].touch=0;
++ }
++ mtdev->count=value;
++
++ mtdev->last_count=value;
++
++ break;
++ case ABS_MT_TRACKING_ID:
++ {
++ i=find_match_id(mtdev,value);
++ if(i==-1||i>=MTDEV_MAX_POINTERS)
++ {
++ i=find_match_id(mtdev,-1);
++ if(i==-1||i>=MTDEV_MAX_POINTERS)
++ {
++ printk("no empty slot for id %d\n",value);
++ break;
++ }
++ else
++ {
++ //newly pressed
++ mtdev->devs[i].touch=2;
++ mtdev->devs[i].id=value;
++ printk("found slot %d for id %d\n",i,value);
++ break;
++ }
++ }
++ //printk("found slot %d for id%d\n",i,value);
++ //keep the point
++ mtdev->devs[i].touch=1;
++
++ }
++ break;
++ case ABS_MT_POSITION_X:
++ if(i<MTDEV_MAX_POINTERS&&i!=-1)
++ mtdev->devs[i].x=value;
++ //printk("mt x :%d\n",value);
++ break;
++ case ABS_MT_POSITION_Y:
++ if(i<MTDEV_MAX_POINTERS&&i!=-1)
++ mtdev->devs[i].y=value;
++ //printk("mt y :%d\n",value);
++ break;
++ case ABS_MT_PRESSURE:
++ if(i<MTDEV_MAX_POINTERS&&i!=-1)
++ mtdev->devs[i].z=value;
++ break;
++ default:
++ break;
++ }
++ }
++ else if(type == EV_SYN && code == SYN_MT_REPORT)
++ {
++ if(i<MTDEV_MAX_POINTERS&&i!=-1)
++ {
++ if(mtdev->devs[i].touch==2)
++ {
++ input_report_key(mtdev->devs[i].input_dev,BTN_TOUCH,1);
++
++ }
++ input_report_abs(mtdev->devs[i].input_dev,ABS_X,mtdev->devs[i].x);
++ input_report_abs(mtdev->devs[i].input_dev,ABS_Y,mtdev->devs[i].y);
++ input_report_abs(mtdev->devs[i].input_dev,ABS_PRESSURE,mtdev->devs[i].z);
++ input_sync(mtdev->devs[i].input_dev);
++ //printk("mtdev_event %d id %d (%d,%d,%d)\n",i,mtdev->devs[i].id,mtdev->devs[i].x,mtdev->devs[i].y,mtdev->devs[i].z);
++ //i++;
++ }
++ }
++
++}
++/*
++ * grab all the input of mt device, create new single touch input devices
++ *
++ */
++static int mtdev_connect(struct input_handler *handler, struct input_dev *dev,
++ const struct input_device_id *id)
++{
++ struct mtdev_dev* mtdev;
++ struct task_struct * task=NULL;
++ int i=0;
++ int err=0;
++ printk("mtdev_connect\n");
++ mtdev=kzalloc(sizeof(struct mtdev_dev),GFP_KERNEL);
++ if(!mtdev)
++ return -ENOMEM;
++ mtdev->handle.dev=input_get_device(dev);
++ mtdev->handle.name="mtdev";
++ mtdev->handle.handler=handler;
++ mtdev->handle.private=mtdev;
++ mtdev->count=0;
++ mtdev->last_count=0;
++ init_waitqueue_head(&mtdev->wq);
++ for(;i<MTDEV_MAX_POINTERS;i++)
++ {
++ //we just store the data here, and will register it
++ //when the first event comes
++ struct input_dev *pdev=NULL;
++ mtdev->devs[i].ready=false;
++ mtdev->devs[i].id=-1;
++ mtdev->devs[i].touch=-1;
++ mtdev->devs[i].input_dev=input_allocate_device();
++ if(!mtdev->devs[i].input_dev)
++ return -ENOMEM;
++ pdev=mtdev->devs[i].input_dev;
++ memcpy(pdev->evbit,dev->evbit,sizeof(pdev->evbit));
++ memcpy(pdev->keybit,dev->keybit,sizeof(pdev->keybit));
++ memcpy(pdev->absbit,dev->absbit,sizeof(pdev->absbit));
++
++ memcpy(pdev->abs,dev->abs,sizeof(pdev->abs));
++ memcpy(pdev->absmax,dev->absmax,sizeof(pdev->absmax));
++ memcpy(pdev->absmin,dev->absmin,sizeof(pdev->absmin));
++
++ pdev->name="mtdev virtual input";
++ }
++
++ //create a thread to create the new input devices
++ //because there's a mutex,which may cause dead lock
++ task=kthread_run(mtdev_kt,mtdev,"mtdev thread");
++ if(!task)
++ printk("error !!!!\n");
++ else
++ printk("kthread created OK\n");
++
++
++ err=input_grab_device(&mtdev->handle);
++ if(err)
++ {
++ printk("error in grab device %d\n",err);
++ return err;
++ }
++ else
++ printk("successfully grab device \n");
++
++ wake_up_all(&mtdev->wq);
++ return 0;
++}
++
++static void mtdev_disconnect(struct input_handle *handle)
++{
++ printk("mtdev_disconnect\n");
++ input_release_device(handle);
++}
++
++static const struct input_device_id mtdev_ids[] = {
++ {
++ .flags=INPUT_DEVICE_ID_MATCH_VENDOR|INPUT_DEVICE_ID_MATCH_PRODUCT,
++ .vendor=0x1f87,
++ .product=0x0002,
++ },
++ {
++ .flags=INPUT_DEVICE_ID_MATCH_VENDOR|INPUT_DEVICE_ID_MATCH_PRODUCT,
++ .vendor=0x1f87,
++ .product=0x0001,
++ },
++ {
++ .flags=INPUT_DEVICE_ID_MATCH_VENDOR|INPUT_DEVICE_ID_MATCH_PRODUCT,
++ .vendor=0x0483,
++ .product=0x3261,
++ },
++ {
++ .flags=INPUT_DEVICE_ID_MATCH_VENDOR|INPUT_DEVICE_ID_MATCH_PRODUCT,
++ .vendor=0x2087,
++ .product=0x0a01,
++ },
++ {},
++};
++
++MODULE_DEVICE_TABLE(input,mtdev_ids);
++
++static struct input_handler mtdev_handler = {
++ .event = mtdev_event,
++ .connect = mtdev_connect,
++ .disconnect = mtdev_disconnect,
++ .name = "mtdev",
++ .id_table = mtdev_ids,
++};
++
++
++static int __init mtdev_init(void)
++{
++ return input_register_handler(&mtdev_handler);
++}
++
++static void __exit mtdev_exit(void)
++{
++ input_unregister_handler(&mtdev_handler);
++}
++
++module_init(mtdev_init);
++module_exit(mtdev_exit);
++
++MODULE_AUTHOR("He Min <min.he@intel.com>");
++MODULE_DESCRIPTION("Multi-touch input driver event devices");
++MODULE_LICENSE("GPL");
+diff --git a/include/linux/input.h b/include/linux/input.h
+index 663208a..55bf8bc 100644
+--- a/include/linux/input.h
++++ b/include/linux/input.h
+@@ -662,6 +662,7 @@ struct input_absinfo {
+ #define ABS_MT_BLOB_ID 0x38 /* Group a set of packets as a blob */
+ #define ABS_MT_TRACKING_ID 0x39 /* Unique ID of initiated contact */
+ #define ABS_MT_PRESSURE 0x3a /* Pressure on contact area */
++#define ABS_MT_CONTACT_COUNT 0x3b /* Contact count */
+
+ #define ABS_MAX 0x3f
+ #define ABS_CNT (ABS_MAX+1)
+--
+1.6.2.2
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-1-7.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-1-7.patch
new file mode 100644
index 0000000..fa66163
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-1-7.patch
@@ -0,0 +1,48 @@
+>From 6f9df3bc6571d6545c552151f408d69265e15f92 Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sun, 18 Apr 2010 10:25:19 -0700
+Subject: [PATCH 1/7] sched: add a comment to get_cpu_idle_time_us()
+Patch-mainline: in -mm tree as of 19 Apr 2010
+
+The exported function get_cpu_idle_time_us() has no comment
+describing it; add a kerneldoc comment
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+ kernel/time/tick-sched.c | 14 ++++++++++++++
+ 1 files changed, 14 insertions(+), 0 deletions(-)
+
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index f992762..54dc155 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -179,6 +179,20 @@ static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
+ return now;
+ }
+
++/**
++ * get_cpu_idle_time_us - get the total idle time of a cpu
++ * @cpu: CPU number to query
++ * @last_update_time: variable to store update time in
++ *
++ * Return the cummulative idle time (since boot) for a given
++ * CPU, in microseconds. The idle time returned includes
++ * the iowait time (unlike what "top" and co report).
++ *
++ * This time is measured via accounting rather than sampling,
++ * and is as accurate as ktime_get() is.
++ *
++ * This function returns -1 if NOHZ is not enabled.
++ */
+ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
+ {
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+--
+1.6.2.5
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at http://vger.kernel.org/majordomo-info.html
+Please read the FAQ at http://www.tux.org/lkml/
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-2-7.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-2-7.patch
new file mode 100644
index 0000000..ba34c4c
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-2-7.patch
@@ -0,0 +1,78 @@
+>From 166b7526ccfea8b44626b6023ff5b0a8eb869bb3 Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sun, 18 Apr 2010 10:33:02 -0700
+Subject: [PATCH 2/7] sched: introduce a function to update the idle statistics
+Patch-mainline: in -mm tree as of 19 Apr 2010
+
+Currently, two places update the idle statistics (and more to
+come later in this series).
+
+This patch creates a helper function for updating these statistics.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+ kernel/time/tick-sched.c | 29 +++++++++++++++++++----------
+ 1 files changed, 19 insertions(+), 10 deletions(-)
+
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 54dc155..ca2211d 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -150,14 +150,25 @@ static void tick_nohz_update_jiffies(ktime_t now)
+ touch_softlockup_watchdog();
+ }
+
+-static void tick_nohz_stop_idle(int cpu, ktime_t now)
++/*
++ * Updates the per cpu time idle statistics counters
++ */
++static void update_ts_time_stats(struct tick_sched *ts, ktime_t now)
+ {
+- struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+ ktime_t delta;
+
+- delta = ktime_sub(now, ts->idle_entrytime);
+ ts->idle_lastupdate = now;
+- ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
++ if (ts->idle_active) {
++ delta = ktime_sub(now, ts->idle_entrytime);
++ ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
++ }
++}
++
++static void tick_nohz_stop_idle(int cpu, ktime_t now)
++{
++ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
++
++ update_ts_time_stats(ts, now);
+ ts->idle_active = 0;
+
+ sched_clock_idle_wakeup_event(0);
+@@ -165,14 +176,12 @@ static void tick_nohz_stop_idle(int cpu, ktime_t now)
+
+ static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
+ {
+- ktime_t now, delta;
++ ktime_t now;
+
+ now = ktime_get();
+- if (ts->idle_active) {
+- delta = ktime_sub(now, ts->idle_entrytime);
+- ts->idle_lastupdate = now;
+- ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
+- }
++
++ update_ts_time_stats(ts, now);
++
+ ts->idle_entrytime = now;
+ ts->idle_active = 1;
+ sched_clock_idle_sleep_event();
+--
+1.6.2.5
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at http://vger.kernel.org/majordomo-info.html
+Please read the FAQ at http://www.tux.org/lkml/
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-3-7.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-3-7.patch
new file mode 100644
index 0000000..56c09f7
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-3-7.patch
@@ -0,0 +1,62 @@
+>From 60851b131900af03bf013afef69f3bcdbb04f1d6 Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sun, 18 Apr 2010 10:41:30 -0700
+Subject: [PATCH 3/7] sched: update the idle statistics in get_cpu_idle_time_us()
+Patch-mainline: in -mm tree as of 19 Apr 2010
+
+Right now, get_cpu_idle_time_us() only reports the idle statistics
+upto the point the CPU entered last idle; not what is valid right now.
+
+This patch adds an update of the idle statistics to get_cpu_idle_time_us(),
+so that calling this function always returns statistics that are accurate
+at the point of the call.
+
+This includes resetting the start of the idle time for accounting purposes
+to avoid double accounting.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+ kernel/time/tick-sched.c | 7 ++++++-
+ 1 files changed, 6 insertions(+), 1 deletions(-)
+
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index ca2211d..7dbad2f 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -161,6 +161,7 @@ static void update_ts_time_stats(struct tick_sched *ts, ktime_t now)
+ if (ts->idle_active) {
+ delta = ktime_sub(now, ts->idle_entrytime);
+ ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
++ ts->idle_entrytime = now;
+ }
+ }
+
+@@ -205,14 +206,18 @@ static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
+ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
+ {
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
++ ktime_t now;
+
+ if (!tick_nohz_enabled)
+ return -1;
+
++ now = ktime_get();
++ update_ts_time_stats(ts, now);
++
+ if (ts->idle_active)
+ *last_update_time = ktime_to_us(ts->idle_lastupdate);
+ else
+- *last_update_time = ktime_to_us(ktime_get());
++ *last_update_time = ktime_to_us(now);
+
+ return ktime_to_us(ts->idle_sleeptime);
+ }
+--
+1.6.2.5
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at http://vger.kernel.org/majordomo-info.html
+Please read the FAQ at http://www.tux.org/lkml/
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-4-7.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-4-7.patch
new file mode 100644
index 0000000..201b53c
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-4-7.patch
@@ -0,0 +1,89 @@
+>From e75d6cd203e43ea4c5e9919f19e2882c066491b8 Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sun, 18 Apr 2010 10:47:02 -0700
+Subject: [PATCH 4/7] sched: fold updating of the last update time into update_ts_time_stats()
+Patch-mainline: in -mm tree as of 19 Apr 2010
+
+This patch folds the updating of the last_update_time into the
+update_ts_time_stats() function, and updates the callers.
+
+This allows for further cleanups that are done in the next patch.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+ kernel/time/tick-sched.c | 22 +++++++++++-----------
+ 1 files changed, 11 insertions(+), 11 deletions(-)
+
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 7dbad2f..ac54543 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -153,7 +153,8 @@ static void tick_nohz_update_jiffies(ktime_t now)
+ /*
+ * Updates the per cpu time idle statistics counters
+ */
+-static void update_ts_time_stats(struct tick_sched *ts, ktime_t now)
++static void
++update_ts_time_stats(struct tick_sched *ts, ktime_t now, u64 *last_update_time)
+ {
+ ktime_t delta;
+
+@@ -163,13 +164,19 @@ static void update_ts_time_stats(struct tick_sched *ts, ktime_t now)
+ ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
+ ts->idle_entrytime = now;
+ }
++
++ if (ts->idle_active && last_update_time)
++ *last_update_time = ktime_to_us(ts->idle_lastupdate);
++ else
++ *last_update_time = ktime_to_us(now);
++
+ }
+
+ static void tick_nohz_stop_idle(int cpu, ktime_t now)
+ {
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+
+- update_ts_time_stats(ts, now);
++ update_ts_time_stats(ts, now, NULL);
+ ts->idle_active = 0;
+
+ sched_clock_idle_wakeup_event(0);
+@@ -181,7 +188,7 @@ static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
+
+ now = ktime_get();
+
+- update_ts_time_stats(ts, now);
++ update_ts_time_stats(ts, now, NULL);
+
+ ts->idle_entrytime = now;
+ ts->idle_active = 1;
+@@ -206,18 +213,11 @@ static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
+ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
+ {
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+- ktime_t now;
+
+ if (!tick_nohz_enabled)
+ return -1;
+
+- now = ktime_get();
+- update_ts_time_stats(ts, now);
+-
+- if (ts->idle_active)
+- *last_update_time = ktime_to_us(ts->idle_lastupdate);
+- else
+- *last_update_time = ktime_to_us(now);
++ update_ts_time_stats(ts, ktime_get(), last_update_time);
+
+ return ktime_to_us(ts->idle_sleeptime);
+ }
+--
+1.6.2.5
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at http://vger.kernel.org/majordomo-info.html
+Please read the FAQ at http://www.tux.org/lkml/
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-5-7.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-5-7.patch
new file mode 100644
index 0000000..d800e33
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-5-7.patch
@@ -0,0 +1,63 @@
+>From 526a9f347d5a953f37b67b4b2afb39d7b4d77a92 Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sun, 18 Apr 2010 10:49:30 -0700
+Subject: [PATCH 5/7] sched: eliminate the ts->idle_lastupdate field
+Patch-mainline: in -mm tree as of 19 Apr 2010
+
+Now that the only user of ts->idle_lastupdate is update_ts_time_stats(),
+the entire field can be eliminated.
+
+In update_ts_time_stats(), idle_lastupdate is first set to "now",
+and a few lines later, the only user is an if() statement that
+assigns a variable either to "now" or to ts->idle_lastupdate,
+which has the value of "now" at that point.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+ include/linux/tick.h | 1 -
+ kernel/time/tick-sched.c | 5 +----
+ 2 files changed, 1 insertions(+), 5 deletions(-)
+
+diff --git a/include/linux/tick.h b/include/linux/tick.h
+index d2ae79e..0343eed 100644
+--- a/include/linux/tick.h
++++ b/include/linux/tick.h
+@@ -60,7 +60,6 @@ struct tick_sched {
+ ktime_t idle_waketime;
+ ktime_t idle_exittime;
+ ktime_t idle_sleeptime;
+- ktime_t idle_lastupdate;
+ ktime_t sleep_length;
+ unsigned long last_jiffies;
+ unsigned long next_jiffies;
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index ac54543..326f5f8 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -158,16 +158,13 @@ update_ts_time_stats(struct tick_sched *ts, ktime_t now, u64 *last_update_time)
+ {
+ ktime_t delta;
+
+- ts->idle_lastupdate = now;
+ if (ts->idle_active) {
+ delta = ktime_sub(now, ts->idle_entrytime);
+ ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
+ ts->idle_entrytime = now;
+ }
+
+- if (ts->idle_active && last_update_time)
+- *last_update_time = ktime_to_us(ts->idle_lastupdate);
+- else
++ if (last_update_time)
+ *last_update_time = ktime_to_us(now);
+
+ }
+--
+1.6.2.5
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at http://vger.kernel.org/majordomo-info.html
+Please read the FAQ at http://www.tux.org/lkml/
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-6-7.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-6-7.patch
new file mode 100644
index 0000000..fb30105
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-6-7.patch
@@ -0,0 +1,122 @@
+>From c4dd11703034f2ecbc3180603663fab14c292d7c Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sun, 18 Apr 2010 10:57:43 -0700
+Subject: [PATCH 6/7] sched: introduce get_cpu_iowait_time_us()
+Patch-mainline: in -mm tree as of 19 Apr 2010
+
+For the ondemand cpufreq governor, it is desired that the iowait
+time is microaccounted in a similar way as idle time is.
+
+This patch introduces the infrastructure to account and expose
+this information via the get_cpu_iowait_time_us() function.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+ include/linux/tick.h | 4 ++++
+ kernel/time/tick-sched.c | 28 ++++++++++++++++++++++++++++
+ kernel/time/timer_list.c | 1 +
+ 3 files changed, 33 insertions(+), 0 deletions(-)
+
+diff --git a/include/linux/tick.h b/include/linux/tick.h
+index 0343eed..4aa3703 100644
+--- a/include/linux/tick.h
++++ b/include/linux/tick.h
+@@ -42,6 +42,7 @@ enum tick_nohz_mode {
+ * @idle_waketime: Time when the idle was interrupted
+ * @idle_exittime: Time when the idle state was left
+ * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped
++ * @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding
+ * @sleep_length: Duration of the current idle sleep
+ * @do_timer_lst: CPU was the last one doing do_timer before going idle
+ */
+@@ -60,6 +61,7 @@ struct tick_sched {
+ ktime_t idle_waketime;
+ ktime_t idle_exittime;
+ ktime_t idle_sleeptime;
++ ktime_t iowait_sleeptime;
+ ktime_t sleep_length;
+ unsigned long last_jiffies;
+ unsigned long next_jiffies;
+@@ -123,6 +125,7 @@ extern void tick_nohz_stop_sched_tick(int inidle);
+ extern void tick_nohz_restart_sched_tick(void);
+ extern ktime_t tick_nohz_get_sleep_length(void);
+ extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
++extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
+ # else
+ static inline void tick_nohz_stop_sched_tick(int inidle) { }
+ static inline void tick_nohz_restart_sched_tick(void) { }
+@@ -133,6 +136,7 @@ static inline ktime_t tick_nohz_get_sleep_length(void)
+ return len;
+ }
+ static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
++static inline u64 get_cpu_iowait(int cpu, u64 *unused) { return -1; }
+ # endif /* !NO_HZ */
+
+ #endif
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 326f5f8..a6104a8 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -161,6 +161,8 @@ update_ts_time_stats(struct tick_sched *ts, ktime_t now, u64 *last_update_time)
+ if (ts->idle_active) {
+ delta = ktime_sub(now, ts->idle_entrytime);
+ ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
++ if (nr_iowait_cpu() > 0)
++ ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
+ ts->idle_entrytime = now;
+ }
+
+@@ -220,6 +222,32 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
+ }
+ EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
+
++/*
++ * get_cpu_iowait_time_us - get the total iowait time of a cpu
++ * @cpu: CPU number to query
++ * @last_update_time: variable to store update time in
++ *
++ * Return the cummulative iowait time (since boot) for a given
++ * CPU, in microseconds.
++ *
++ * This time is measured via accounting rather than sampling,
++ * and is as accurate as ktime_get() is.
++ *
++ * This function returns -1 if NOHZ is not enabled.
++ */
++u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
++{
++ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
++
++ if (!tick_nohz_enabled)
++ return -1;
++
++ update_ts_time_stats(ts, ktime_get(), last_update_time);
++
++ return ktime_to_us(ts->iowait_sleeptime);
++}
++EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
++
+ /**
+ * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
+ *
+diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
+index 1a4a7dd..ab8f5e3 100644
+--- a/kernel/time/timer_list.c
++++ b/kernel/time/timer_list.c
+@@ -176,6 +176,7 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
+ P_ns(idle_waketime);
+ P_ns(idle_exittime);
+ P_ns(idle_sleeptime);
++ P_ns(iowait_sleeptime);
+ P(last_jiffies);
+ P(next_jiffies);
+ P_ns(idle_expires);
+--
+1.6.2.5
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at http://vger.kernel.org/majordomo-info.html
+Please read the FAQ at http://www.tux.org/lkml/
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-7-7.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-7-7.patch
new file mode 100644
index 0000000..32a2bdb
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-ondemand-fix-7-7.patch
@@ -0,0 +1,114 @@
+>From 27966bedabea83c4f3ae77507eceb746b1f6ebae Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@linux.intel.com>
+Date: Sun, 18 Apr 2010 11:15:56 -0700
+Subject: [PATCH 7/7] ondemand: Solve the big performance issue with ondemand during disk IO
+Patch-mainline: in -mm tree as of 19 Apr 2010
+
+The ondemand cpufreq governor uses CPU busy time (e.g. not-idle time) as
+a measure for scaling the CPU frequency up or down.
+If the CPU is busy, the CPU frequency scales up, if it's idle, the CPU
+frequency scales down. Effectively, it uses the CPU busy time as proxy
+variable for the more nebulous "how critical is performance right now"
+question.
+
+This algorithm falls flat on its face in the light of workloads where
+you're alternatingly disk and CPU bound, such as the ever popular
+"git grep", but also things like startup of programs and maildir using
+email clients... much to the chagarin of Andrew Morton.
+
+This patch changes the ondemand algorithm to count iowait time as busy,
+not idle, time. As shown in the breakdown cases above, iowait is performance
+critical often, and by counting iowait, the proxy variable becomes a more
+accurate representation of the "how critical is performance" question.
+
+The problem and fix are both verified with the "perf timechar" tool.
+
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+---
+ drivers/cpufreq/cpufreq_ondemand.c | 30 ++++++++++++++++++++++++++++--
+ 1 files changed, 28 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
+index bd444dc..ed472f8 100644
+--- a/drivers/cpufreq/cpufreq_ondemand.c
++++ b/drivers/cpufreq/cpufreq_ondemand.c
+@@ -73,6 +73,7 @@ enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
+
+ struct cpu_dbs_info_s {
+ cputime64_t prev_cpu_idle;
++ cputime64_t prev_cpu_iowait;
+ cputime64_t prev_cpu_wall;
+ cputime64_t prev_cpu_nice;
+ struct cpufreq_policy *cur_policy;
+@@ -148,6 +149,16 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
+ return idle_time;
+ }
+
++static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
++{
++ u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
++
++ if (iowait_time == -1ULL)
++ return 0;
++
++ return iowait_time;
++}
++
+ /*
+ * Find right freq to be set now with powersave_bias on.
+ * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
+@@ -470,14 +481,15 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
+
+ for_each_cpu(j, policy->cpus) {
+ struct cpu_dbs_info_s *j_dbs_info;
+- cputime64_t cur_wall_time, cur_idle_time;
+- unsigned int idle_time, wall_time;
++ cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
++ unsigned int idle_time, wall_time, iowait_time;
+ unsigned int load, load_freq;
+ int freq_avg;
+
+ j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
+
+ cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
++ cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
+
+ wall_time = (unsigned int) cputime64_sub(cur_wall_time,
+ j_dbs_info->prev_cpu_wall);
+@@ -487,6 +499,10 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
+ j_dbs_info->prev_cpu_idle);
+ j_dbs_info->prev_cpu_idle = cur_idle_time;
+
++ iowait_time = (unsigned int) cputime64_sub(cur_iowait_time,
++ j_dbs_info->prev_cpu_iowait);
++ j_dbs_info->prev_cpu_iowait = cur_iowait_time;
++
+ if (dbs_tuners_ins.ignore_nice) {
+ cputime64_t cur_nice;
+ unsigned long cur_nice_jiffies;
+@@ -504,6 +520,16 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
+ idle_time += jiffies_to_usecs(cur_nice_jiffies);
+ }
+
++ /*
++ * For the purpose of ondemand, waiting for disk IO is an
++ * indication that you're performance critical, and not that
++ * the system is actually idle. So subtract the iowait time
++ * from the cpu idle time.
++ */
++
++ if (idle_time >= iowait_time)
++ idle_time -= iowait_time;
++
+ if (unlikely(!wall_time || wall_time < idle_time))
+ continue;
+
+--
+1.6.2.5
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at http://vger.kernel.org/majordomo-info.html
+Please read the FAQ at http://www.tux.org/lkml/
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-can.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-can.patch
new file mode 100644
index 0000000..1d7d8d2
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-can.patch
@@ -0,0 +1,10765 @@
+From: Masayuki Ohtake <masa-korg@dsn.okisemi.com>
+Subject: OKI Semiconductor PCH CAN driver
+
+This driver implements CAN controls for PCH.
+
+Signed-off-by: Masayuki Ohtake <masa-korg@dsn.okisemi.com>
+Acked-by: Wang Qi <qi.wang@intel.com>
+
+---
+ drivers/net/can/Kconfig | 5 ++
+ drivers/net/can/Makefile | 1
+ drivers/net/can/pch_can/Makefile | 7
+ drivers/net/can/pch_can/pch_can_hal.c | 4785
+ drivers/net/can/pch_can/pch_can_hal.h | 1407
+ drivers/net/can/pch_can/pch_can_main.c | 1681
+ drivers/net/can/pch_can/pch_can_main.h | 826
+ drivers/net/can/pch_can/pch_can_pci.c | 1134
+ drivers/net/can/pch_can/pch_can_pci.h | 105
+ drivers/net/can/pch_can/pch_can_utils.c | 397
+ drivers/net/can/pch_can/pch_can_utils.h | 127
+ drivers/net/can/pch_can/pch_common.h | 146
+ drivers/net/can/pch_can/pch_debug.h | 60
++++++++++++++++++++++++++++++++ 13 files changed, 10682 insertions(+)
+diff -urN linux-2.6.33-rc3/drivers/net/can/Kconfig topcliff-2.6.33-rc3/drivers/net/can/Kconfig
+--- linux-2.6.33-rc3/drivers/net/can/Kconfig 2010-01-06 09:02:46.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/can/Kconfig 2010-03-05 11:04:53.000000000 +0900
+@@ -79,4 +79,9 @@
+ a problem with CAN support and want to see more of what is going
+ on.
+
++config PCH_CAN
++ tristate "PCH CAN"
++ ---help---
++ for PCH CAN
++
+ endmenu
+diff -urN linux-2.6.33-rc3/drivers/net/can/Makefile topcliff-2.6.33-rc3/drivers/net/can/Makefile
+--- linux-2.6.33-rc3/drivers/net/can/Makefile 2010-01-06 09:02:46.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/can/Makefile 2010-03-05 23:15:46.000000000 +0900
+@@ -15,5 +15,6 @@
+ obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
+ obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
+ obj-$(CONFIG_CAN_BFIN) += bfin_can.o
++obj-$(CONFIG_PCH_CAN) += pch_can/
+
+ ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
+diff -urN linux-2.6.33-rc3/drivers/net/can/pch_can/Makefile topcliff-2.6.33-rc3/drivers/net/can/pch_can/Makefile
+--- linux-2.6.33-rc3/drivers/net/can/pch_can/Makefile 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/can/pch_can/Makefile 2010-03-05 23:32:20.000000000 +0900
+@@ -0,0 +1,7 @@
++ifeq ($(CONFIG_CAN_DEBUG),y)
++EXTRA_CFLAGS += -DDEBUG
++endif
++
++obj-$(CONFIG_PCH_CAN) += pch_can.o
++
++pch_can-objs := pch_can_utils.o pch_can_hal.o pch_can_pci.o pch_can_main.o
+diff -urN linux-2.6.33-rc3/drivers/net/can/pch_can/pch_can_hal.c topcliff-2.6.33-rc3/drivers/net/can/pch_can/pch_can_hal.c
+--- linux-2.6.33-rc3/drivers/net/can/pch_can/pch_can_hal.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/can/pch_can/pch_can_hal.c 2010-03-10 20:03:53.000000000 +0900
+@@ -0,0 +1,4785 @@
++/*!
++ * @file ioh_can_hal.c
++ * @brief Provides the function definition for CAN HAL APIs.
++ * @version 1.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * <hr>
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ *
++ */
++
++/* includes */
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/io.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++
++#include "pch_common.h"
++#include "pch_debug.h"
++#include "pch_can_main.h"
++#include "pch_can_hal.h"
++#include "pch_can_utils.h"
++#include "pch_can_pci.h"
++
++#define COUNTER_LIMIT (0xFFFF)
++
++/*! @ingroup Global
++ @var ioh_msg_obj_conf
++ @brief This variable is used to store the configuration
++ (receive /transmit) of the available message
++ objects.
++ @remarks This variable is used for storing the message object
++ configuration related information. It includes
++ the
++ information about which message object is used
++ as
++ Receiver and Transmitter.
++ @note The information is filled during the probe stage of
++ the device.
++
++ @see
++ - ioh_can_probe
++
++ <hr>
++*/
++unsigned int ioh_msg_obj_conf[MAX_MSG_OBJ] = {
++ 3, 3, 3, 3,
++ 3, 3, 3, 3,
++ 3, 3, 3, 3,
++ 3, 3, 3, 3,
++ 3, 3, 3, 3,
++ 3, 3, 3, 3,
++ 3, 3, 3, 3,
++ 3, 3, 3, 3
++};
++
++/*! @ingroup HALLayerAPI
++ @fn static int ioh_can_rx_enable_all(int handle)
++
++ <hr>
++*/
++static int ioh_can_rx_enable_all(int handle);
++
++/*! @ingroup HALLayerAPI
++ @fn static int ioh_can_tx_enable_all(int handle)
++
++ <hr>
++*/
++static int ioh_can_tx_enable_all(int handle);
++
++/*! @ingroup HALLayerFacilitators
++ @var restartMode
++ @brief The variable used to store the restart mode.
++
++ <hr>
++*/
++static enum ioh_can_auto_restart restartMode = CAN_MANUAL;
++
++/*! @ingroup InterfaceLayer
++ @struct can_hw
++ @brief This structure defines format for the storage of base
++ address.
++
++ <hr>
++*/
++struct can_hw {
++ unsigned char *io_base; /* Device registers */
++};
++
++/* Array to store the timing settings. */
++/*! @ingroup Global
++ @var can_rec_timing
++ @brief This variable is used to store the time
++ settings of the CAN device.
++ @remarks This variable is used for storing the timing
++ related information depending upon the
++ clock rate of the CAN device. It is used for
++ setting the baud rate of the CAN device.
++
++ @see
++ - ioh_can_pci_init
++ - ioh_can_set_baud_simple
++
++ <hr>
++*/
++struct ioh_can_timing can_rec_timing[] = {
++ /*<Baud rate> <BRP> <TS1> <TS2> <SJW> */
++ /* settings for 62.5MHz */
++ {0xa, 0x250, 0x7, 0x0, 0x0, 0x0, 0x0}, /**< 10 kbits/s */
++ {0x14, 0x8D, 0xB, 0x5, 0x0, 0x0, 0x0}, /**< 20 kbits/s */
++ {0x32, 0x5C, 0x7, 0x0, 0x0, 0x0, 0x0}, /**< 50 kbits/s */
++ {0x7d, 0x18, 0xC, 0x5, 0x0, 0x0, 0x0}, /**< 125 kbits/s */
++ {0xfa, 0x18, 0x7, 0x0, 0x0, 0x0, 0x0}, /**< 250 kbits/s */
++ {0x1f4, 0x8, 0x9, 0x2, 0x0, 0x0, 0x0}, /**< 500 kbits/s */
++ {0x320, 0x5, 0x8, 0x2, 0x0, 0x0, 0x0}, /**< 800 kbits/s */
++ {0x3e8, 0x2, 0xC, 0x6, 0x0, 0x0, 0x0}, /**< 1000 kbits/s */
++
++ /* settings for 24MHz */
++ {0xa, 0xCF, 0x7, 0x0, 0x0, 0x0, 0x0}, /**< 10 kbits/s */
++ {0x14, 0x57, 0x7, 0x0, 0x0, 0x0, 0x0}, /**< 20 kbits/s */
++ {0x32, 0xF, 0x7, 0x0, 0x0, 0x0, 0x0}, /**< 50 kbits/s */
++ {0x7d, 0xF, 0x8, 0x1, 0x0, 0x0, 0x0}, /**< 125 kbits/s */
++ {0xfa, 0x7, 0x8, 0x1, 0x0, 0x0, 0x0}, /**< 250 kbits/s */
++ {0x1f4, 0x3, 0x8, 0x1, 0x0, 0x0, 0x0}, /**< 500 kbits/s */
++ {0x320, 0x2, 0x7, 0x0, 0x0, 0x0, 0x0}, /**< 800 kbits/s */
++ {0x3e8, 0x1, 0x8, 0x1, 0x0, 0x0, 0x0}, /**< 1000 kbits/s */
++
++ /* settings for 50MHz */
++ {0xa, 0xFA, 0xC, 0x5, 0x1, 0x0, 0x0}, /**< 10 kbits/s */
++ {0x14, 0x7D, 0xC, 0x5, 0x1, 0x0, 0x0}, /**< 20 kbits/s */
++ {0x32, 0x32, 0xF, 0x2, 0x0, 0x0, 0x0}, /**< 50 kbits/s */
++ {0x7d, 0x19, 0xC, 0x1, 0x0, 0x0, 0x0}, /**< 125 kbits/s */
++ {0xfa, 0xA, 0xF, 0x2, 0x0, 0x0, 0x0}, /**< 250 kbits/s */
++ {0x1f4, 0x5, 0xF, 0x2, 0x0, 0x0, 0x0}, /**< 500 kbits/s */
++ {0x320, 0x5, 0x8, 0x2, 0x1, 0x0, 0x0}, /**< 800 kbits/s */
++ {0x3e8, 0x2, 0xF, 0x7, 0x0, 0x0, 0x0} /**< 1000 kbits/s */
++ /* Add the new clock settings here. */
++};
++
++/* The offset to the settings array depending on the clock rate entries.*/
++
++/*! @ingroup HALLayer
++ @def IOH_CAN_CLOCK_DEFAULT_OFFSET
++ @brief The default clock rate offset to the @ref
++ can_rec_timing array.
++ @see ioh_can_set_baud_simple
++
++ <hr>
++*/
++#define IOH_CAN_CLOCK_DEFAULT_OFFSET (0)
++/*! @ingroup HALLayer
++ @def IOH_CAN_CLOCK_62_5_OFFSET
++ @brief The offset to the @ref can_rec_timing array when the
++ clock
++ rate is 62.5MHz.
++ @see ioh_can_set_baud_simple
++
++ <hr>
++*/
++#define IOH_CAN_CLOCK_62_5_OFFSET (0)
++/*! @ingroup HALLayer
++ @def IOH_CAN_CLOCK_24_OFFSET
++ @brief The offset to the @ref can_rec_timing array when the
++ clock
++ rate is 24MHz.
++ @see ioh_can_set_baud_simple
++
++ <hr>
++*/
++#define IOH_CAN_CLOCK_24_OFFSET (8)
++
++/*! @ingroup HALLayer
++ @def IOH_CAN_CLOCK_50_OFFSET
++ @brief The offset to the @ref can_rec_timing array when the
++ clock
++ rate is 24MHz.
++ @see ioh_can_set_baud_simple
++
++ <hr>
++*/
++#define IOH_CAN_CLOCK_50_OFFSET (16)
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_create(u8 * io_base)
++ @brief Creates a device handle for the other APIs.
++ @remarks
++ This function creates a device handle that can
++ be used by
++ other HAL APIs for accessing the device
++ specific resources.
++ The main tasks performed by this function are :
++ - Allocates memory for the handle.
++ - Creates the handle and returns it to the
++ called function.
++
++ @param io_base [@ref IN] The remapped address
++ for handle creation.
++
++ @retval int <br>
++ - Handle to the device --> Handle creation
++ successful.
++ - @ref IOH_CAN_NULL --> Handle creation failed.
++
++ @see
++ - ioh_can_probe
++
++ <hr>
++*/
++int ioh_can_create(u8 *io_base)
++{
++ struct can_hw *can = NULL;
++ int retval = (int) IOH_CAN_NULL;
++
++ if (io_base == NULL) {
++ IOH_LOG(KERN_ERR, "ioh_can_create -> Invalid IO Base\n");
++ }
++
++ else {
++ /* Allocates memory for the handle. */
++ can = (struct can_hw *) CAN_MEM_ALLOC(sizeof(struct can_hw));
++
++ if (can == NULL) { /* Allocation failed */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_create -> CAN Memory allocation \
++ failed\n");
++ } else { /* Allocation successful */
++
++ can->io_base = io_base;
++ retval = (int) can;
++ IOH_DEBUG
++ ("ioh_can_create -> Handle Creation successful.\n");
++ }
++ }
++
++ IOH_DEBUG("ioh_can_create -> Return value: %x\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn void ioh_can_destroy(int handle)
++ @brief Destroys (frees) the device handle.
++ @remarks This function destroys the previously created handle of
++ the
++ device. It also de-allocates the memory
++ allocated during the
++ handle creation. The main tasks performed by
++ this function
++ are :
++ - Verifies whether the passed argument is valid.
++ - If valid frees the allocated memory whose
++ reference is
++ provided by the passed argument.
++
++ @param handle [@ref IN] The device handle to be
++ destroyed.
++
++ @retval None.
++
++ @see
++ - ioh_can_remove
++
++ <hr>
++*/
++void ioh_can_destroy(int handle)
++{
++ struct can_hw *can = (struct can_hw *) handle;
++
++ if (handle != (int) 0) {
++ /*Free the memory for the handle. */
++ CAN_MEM_FREE(can);
++ IOH_DEBUG("ioh_can_destroy -> Free successful.\n");
++ } else {
++ IOH_LOG(KERN_ERR, "ioh_can_destroy -> Invalid handle.\n");
++ }
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_set_run_mode(
++ int handle,enum ioh_can_run_mode mode)
++ @brief Set run/stop mode of the CAN device.
++ @remarks This API set the CAN device in RUN/STOP mode. It does
++ this only if the passed arguments are valid.
++ The main tasks performed by this function are :
++ - Validates whether the passed arguments are
++ valid. If invalid
++ error status code is returned.
++ - Depending on the mode to be set, suitably
++ sets/resets the
++ INIT bit of the CANCONT register.
++ --> RUM Mode : Resets the INIT bit.
++ --> STOP Mode: Sets the INIT bit.
++
++ @param handle [@ref IN] Handle to the device.
++ @param mode [@ref IN] The mode to be set.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation fails.
++
++ @see
++ - ioh_can_ioctl
++ - ioh_can_suspend
++ - ioh_can_resume
++
++ <hr>
++*/
++int ioh_can_set_run_mode(int handle, enum ioh_can_run_mode mode)
++{
++ int retval = IOH_CAN_SUCCESS;
++ u8 *can_baseaddress = NULL;
++
++ if (handle == (int) 0) { /* handle invalid */
++ IOH_LOG(KERN_ERR, "ioh_can_set_run_mode -> Invalid Handle \n");
++ retval = IOH_CAN_FAIL;
++ }
++
++ else {
++ /* Retrieving base address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ switch (mode) {
++ case IOH_CAN_RUN:
++ IOH_CAN_BIT_CLEAR((can_baseaddress + CAN_CONT_OFFSET),
++ CAN_CTRL_INIT);
++ IOH_DEBUG
++ ("ioh_can_set_run_mode -> Can set to RUN Mode.\n");
++ break;
++
++ case IOH_CAN_STOP:
++ IOH_CAN_BIT_SET((can_baseaddress + CAN_CONT_OFFSET),
++ CAN_CTRL_INIT);
++ IOH_DEBUG
++ ("ioh_can_set_run_mode -> Can set to STOP Mode.\n");
++ break;
++
++ default:
++ IOH_LOG(KERN_ERR,
++ "ioh_can_set_run_mode -> Invalid run mode.\n");
++ retval = IOH_CAN_FAIL;
++ break;
++ }
++ }
++
++ IOH_DEBUG("ioh_can_set_run_mode -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_get_run_mode(int handle,enum
++ ioh_can_run_mode *mode)
++ @brief This function gets current the run/stop mode of the CAN
++ device.
++ @remarks Retrieves the current mode(RUN/STOP) of the device
++ by checking the corresponding settings on the
++ hardware.
++ The main tasks performed by this function are :
++ - Validates whether the passed arguments are
++ valid. If invalid
++ returns the error status code.
++ - Depending on the INIT bit value of the CANCONT
++ register
++ the appropriate mode is copied to the
++ passed mode
++ reference variable passed as argument.
++
++ INIT bit value 0: RUN Mode.
++ INIT bit value 1: STOP mode.
++
++ @param handle [@ref IN] The handle to the device.
++ @param mode [@ref OUT] The current mode of the device.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_ioctl
++ - ioh_can_suspend
++
++ <hr>
++*/
++int ioh_can_get_run_mode(int handle, enum ioh_can_run_mode *mode)
++{
++ u32 reg_val;
++ struct can_hw *can;
++ int retval = IOH_CAN_SUCCESS;
++ u8 *can_baseaddress;
++
++ if ((handle == (int) 0) || (mode == NULL)) { /* if invalid
++ parameter. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_get_run_mode -> Invalid parameter.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Obtaining the remap address for access. */
++ can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ reg_val = IOH_READ_LONG(can_baseaddress + CAN_CONT_OFFSET);
++
++ /* Checking the Init bit of Can Control Register.
++ Init Bit 1 -> Stop
++ Init Bit 0 -> Run
++ */
++ if ((reg_val & CAN_CTRL_INIT) != 0) {
++ *mode = IOH_CAN_STOP;
++ IOH_DEBUG
++ ("ioh_can_get_run_mode -> Mode is IOH_CAN_STOP\n");
++ } else {
++ *mode = IOH_CAN_RUN;
++ IOH_DEBUG
++ ("ioh_can_get_run_mode -> Mode is IOH_CAN_RUN\n");
++ }
++ }
++
++ IOH_DEBUG("ioh_can_get_run_mode -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_set_arbiter_mode(
++ int handle,enum ioh_can_arbiter mode)
++ @brief This function sets the arbiter mode of the CAN device.
++ @remarks Sets the arbiter mode of the CAN device. The main tasks
++ performed by this function are :
++ - Validates whether the passed arguments and
++ mode are valid.
++ If invalid returns the error status code.
++ - Sets the arbiter mode.
++ @note Only Fixed Priority mode is supported.
++
++ @param handle [@ref IN] The handle to the device.
++ @param mode [@ref IN] The arbiter mode to be set.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++int ioh_can_set_arbiter_mode(int handle, enum ioh_can_arbiter mode)
++{
++ int retval = IOH_CAN_SUCCESS;
++
++ if (handle == (int) 0) { /* if invalid handle */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_set_arbiter_mode -> Invalid Handle\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* IOH CAN Controller supports only IOH_CAN_FIXED_PRIORITY
++ arbiter mode.
++ */
++ switch (mode) {
++ case IOH_CAN_FIXED_PRIORITY:
++ IOH_DEBUG("ioh_can_set_arbiter_mode -> FIXED "
++ "PRIORITY is set for Arbiter mode\n");
++ break;
++
++ case IOH_CAN_ROUND_ROBIN:
++ default:
++ IOH_DEBUG("ioh_can_set_arbiter_mode -> "
++ "Invalid arbiter mode\n");
++ retval = IOH_CAN_FAIL;
++ break;
++ }
++ }
++
++ IOH_DEBUG("ioh_can_set_arbiter_mode -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_get_arbiter_mode(
++ int handle,enum ioh_can_arbiter *mode
++ @brief This function gets the arbiter mode of the CAN device.
++ @remarks Gets the current arbiter mode of the CAN device. The
++ main tasks performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid returns the error status code.
++ - Copies the current arbiter mode to the passed
++ mode reference variable.
++ @note Only Fixed Priority mode is supported.
++
++ @param handle [@ref IN] The handle to the device.
++ @param mode [@ref OUT] The current arbiter mode of the
++ device.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++int ioh_can_get_arbiter_mode(int handle, enum ioh_can_arbiter *mode)
++{
++ int retval = IOH_CAN_SUCCESS;
++
++ if ((handle == (int) 0) || (mode == NULL)) { /* if invalid
++ parameter. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_get_arbiter_mode -> Invalid parameter\n");
++ retval = IOH_CAN_FAIL;
++ }
++
++ else {
++ /* IOH CAN Controller supports only IOH_CAN_FIXED_PRIORITY
++ arbiter mode.
++ */
++ *mode = IOH_CAN_FIXED_PRIORITY;
++ IOH_DEBUG("ioh_can_get_arbiter_mode -> Arbiter Mode "
++ "is IOH_CAN_FIXED_PRIORITY\n");
++ }
++
++ IOH_DEBUG("ioh_can_get_arbiter_mode returns %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_set_restart_mode(int handle,enum
++ ioh_can_auto_restart mode)
++ @brief This function sets the restart mode of the CAN device.
++ @remarks Sets the restart mode of the CAN device. The main
++ tasks performed by this function are :
++ - Validates whether the passed arguments and
++ mode are valid.
++ If invalid returns the error status code.
++ - Updates the current restart mode variable
++ @ref restartMode
++ to given mode.
++
++ @param handle [@ref IN] The handle to the device.
++ @param mode [@ref IN] The restart mode to be set.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_ioctl
++ - ioh_can_open
++
++ <hr>
++*/
++int ioh_can_set_restart_mode(int handle,
++ enum ioh_can_auto_restart mode)
++{
++ int retval = IOH_CAN_SUCCESS;
++
++ if (handle == (int) 0) { /* if invalid handle. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_set_restart_mode -> Invalid Handle\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ switch (mode) {
++ case CAN_MANUAL:
++ restartMode = CAN_MANUAL;
++ IOH_DEBUG
++ ("ioh_can_set_restart_mode -> Value of variable "
++ " restartMode = 0. CAN_MANUAL mode set.\n");
++ break;
++
++ case CAN_AUTO:
++ restartMode = CAN_AUTO;
++ IOH_DEBUG
++ ("ioh_can_set_restart_mode -> Value of variable "
++ " restartMode = 1. CAN_AUTO mode set.\n");
++ break;
++
++ default:
++ IOH_DEBUG
++ ("ioh_can_set_restart_mode -> Invalid restart \
++ mode\n");
++ retval = IOH_CAN_FAIL;
++ break;
++ }
++ }
++
++ IOH_DEBUG("ioh_can_set_restart_mode returns %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_get_restart_mode(
++ int handle,enum ioh_can_auto_restart *mode)
++ @brief This function gets the restart mode of the CAN device.
++ @remarks Retrieves the currently set restart mode. The main tasks
++ performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid returns the error status code.
++ - Copies the current restart mode from the
++ variable @ref restartMode
++ to the passed mode reference variable.
++
++ @param handle [@ref IN] The handle to the device.
++ @param mode [@ref OUT] Reference to current restart
++ mode.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_ioctl
++ - ioh_can_callback
++
++ <hr>
++*/
++int ioh_can_get_restart_mode(int handle,
++ enum ioh_can_auto_restart *mode)
++{
++ int retval = IOH_CAN_SUCCESS;
++
++ if ((handle == (int) 0) || (mode == NULL)) { /* if invalid
++ parameter. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_get_restart_mode -> Invalid parameter.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ if (CAN_AUTO == restartMode) { /* Auto restart mode */
++ *mode = CAN_AUTO;
++ IOH_DEBUG
++ ("ioh_can_get_restart_mode -> Mode CAN_AUTO. \n");
++ } else { /* Manual restart mode. */
++
++ *mode = CAN_MANUAL;
++ IOH_DEBUG
++ ("ioh_can_get_restart_mode -> Mode CAN_MANUAL. \n");
++ }
++ }
++
++ IOH_DEBUG("ioh_can_get_restart_mode returns: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_set_listen_mode(
++ int handle, enum ioh_can_listen_mode mode
++ @brief This function sets the listen/active mode of the CAN device.
++ @remarks Sets the listen mode of the CAN device. The main tasks
++ performed by this function are :
++ - Validates whether the passed arguments and
++ mode are valid.
++ If invalid returns the error status code.
++ - Depending on the obatined mode appropriately
++ set/reset the
++ Silent bit of the CANOPT register.
++ Active Mode: Reset Silent bit.
++ Silent Mode: Set Silent bit.
++
++ @param handle [@ref IN] The handle to the device.
++ @param mode [@ref OUT] The listen mode to be set.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_ioctl
++ - ioh_can_open
++
++ <hr>
++*/
++int ioh_can_set_listen_mode(int handle, enum ioh_can_listen_mode mode)
++{
++ int retval = IOH_CAN_SUCCESS;
++ u8 *can_baseaddress;
++
++ if (handle == (int) 0) { /* invalid handle. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_set_listen_mode -> Invalid Handle\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Obtaining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ /* Setting for Bit3 of Can Extended function register for
++ appropriate mode.
++ Silent bit = 0 (Active mode)
++ Silent bit = 1 (Silent mode)
++ */
++ switch (mode) {
++ case IOH_CAN_LISTEN:
++ IOH_CAN_BIT_SET((can_baseaddress + CAN_CONT_OFFSET),
++ CAN_CTRL_OPT);
++ IOH_CAN_BIT_SET((can_baseaddress + CAN_OPT_OFFSET),
++ CAN_OPT_SILENT);
++ IOH_DEBUG
++ ("ioh_can_set_listen_mode -> IOH_CAN_LISTEN \
++ mode set.\n");
++ break;
++
++ case IOH_CAN_ACTIVE:
++ IOH_CAN_BIT_SET((can_baseaddress + CAN_CONT_OFFSET),
++ CAN_CTRL_OPT);
++ IOH_CAN_BIT_CLEAR((can_baseaddress + CAN_OPT_OFFSET),
++ CAN_OPT_SILENT);
++ IOH_DEBUG
++ ("ioh_can_set_listen_mode ->IOH_CAN_ACTIVE \
++ mode set.\n");
++ break;
++
++ default:
++ IOH_DEBUG
++ ("ioh_can_set_listen_mode ->Invalid listen mode\n");
++ retval = IOH_CAN_FAIL;
++ break;
++ }
++ }
++
++ IOH_DEBUG("ioh_can_set_listen_mode -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_get_listen_mode(
++ int handle,enum ioh_can_listen_mode *mode)
++ @brief This function gets the listen/active mode of the CAN device.
++ @remarks Retrieves the currently set listen mode of the CAN
++ device.
++ The main tasks performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ - Depending on the Silent bit value of the
++ CANOPT register
++ appropriately copy the mode to mode reference
++ variable.
++ Silent bit 0: Active Mode.
++ Silent bit 1: Silent mode.
++
++ @param handle [@ref IN] The handle to the device.
++ @param mode [@ref OUT] Reference to the current listen
++ mode.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_ioctl
++ - ioh_can_suspend
++
++ <hr>
++*/
++int ioh_can_get_listen_mode(int handle, enum ioh_can_listen_mode *mode)
++{
++ u32 reg_val;
++ int retval = IOH_CAN_SUCCESS;
++ u8 *can_baseaddress;
++
++ if ((handle == (int) 0) || (mode == NULL)) { /* if invalid
++ parameters. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_get_listen_mode -> Invalid Parameter\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Attaining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ reg_val = IOH_READ_LONG(can_baseaddress + CAN_OPT_OFFSET);
++
++ /* Checking for Bit3 of Can Extended function register
++ for silent mode
++ Silent bit = 0 (Active mode)
++ Silent bit = 1 (Silent mode)
++ */
++
++ if ((reg_val & CAN_OPT_SILENT) != 0) {
++ *mode = IOH_CAN_LISTEN;
++ IOH_DEBUG
++ ("ioh_can_get_listen_mode -> Mode is listen\n");
++ } else {
++ *mode = IOH_CAN_ACTIVE;
++ IOH_DEBUG
++ ("ioh_can_get_listen_mode -> Mode is active\n");
++ }
++ }
++
++ IOH_DEBUG("ioh_can_get_listen_mode -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_set_int_custom(
++ int handle, u32 interrupts
++ @brief This function sets which interrupts to enable.
++ @remarks Sets the specified interrupts. The maisn tasks
++ performed by this funation are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid return the error status code.
++ - Depending on the obtained value set/reset the
++ IE, SIE
++ and EIE bits of the CANCONT register.
++
++ @param handle [@ref IN] The handle to the device.
++ @param interrupts [@ref IN] The interrupts to be set.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_resume
++
++ <hr>
++*/
++int ioh_can_set_int_custom(int handle, u32 interrupts)
++{
++ int retval = IOH_CAN_SUCCESS;
++ u8 *can_baseaddress;
++
++ if (handle == (int) 0) { /* invalid handle. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_set_int_custom -> Invalid Handle.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Obtaining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ /* Clearing the IE, SIE and EIE bits of Can control register. */
++ IOH_CAN_BIT_CLEAR((can_baseaddress + CAN_CONT_OFFSET),
++ CAN_CTRL_IE_SIE_EIE);
++
++ /* Appropriately setting them. */
++ IOH_CAN_BIT_SET((can_baseaddress + CAN_CONT_OFFSET),
++ ((interrupts & MSK_CTRL_IE_SIE_EIE) <<
++ BIT_SHIFT_ONE));
++ IOH_DEBUG("ioh_can_set_int_custom -> Interrupts set.\n");
++ }
++
++ IOH_DEBUG("ioh_can_set_int_custom -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_get_int_enables(
++ int handle,u32 *enables)
++ @brief This function retrieves interrupt enabled for the CAN
++ device.
++ @remarks Retrieves the currently enabled interrupts. The main
++ tasks performed by this function are ..
++ - Validates whether the passed arguments are
++ valid.
++ If invalid returns the error status code.
++ - Depending on the value of the IE, SIE and EIE
++ bits
++ of the CANCONT register, determine which
++ interrupts are
++ enabled and return information accordingly.
++ The interrupts
++ are enabled if the corresponding bit is set to
++ 1.
++
++ @param handle [@ref IN] The handle to the device.
++ @param enables [@ref OUT] The currently enabled
++ interrupts.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_suspend
++
++ <hr>
++*/
++int ioh_can_get_int_enables(int handle, u32 *enables)
++{
++ u32 reg_ctrl_val;
++ int retval = IOH_CAN_SUCCESS;
++ u8 *can_baseaddress;
++
++ if ((handle == (int) 0) || (enables == NULL)) { /* Invalid
++ parameters. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_get_int_enables -> Invalid Parameter.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Obtaining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ /* Reading the Can control register. */
++ reg_ctrl_val = IOH_READ_LONG(can_baseaddress + CAN_CONT_OFFSET);
++
++ /* Obtaining the status of IE, SIE and EIE interrupt bits. */
++ *enables =
++ (((reg_ctrl_val & CAN_CTRL_IE_SIE_EIE) >> BIT_SHIFT_ONE));
++ IOH_DEBUG("ioh_can_get_int_enables -> Interrupt enabled "
++ "value: %d\n", *enables);
++ }
++
++ IOH_DEBUG("ioh_can_get_int_enables -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_set_int_enables(
++ int handle,enum ioh_can_interrupt interrupt)
++ @brief Sets interrupts.
++ @remarks This function enables the desired interrupts of the CAN
++ device.
++ The main tasks performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid returns the error status code.
++ - Depending on the obtained enum constants the
++ EI, SIE and
++ EIE bits of the CANCONT register are set/reset.
++
++ @param handle [@ref IN] The handle to the device.
++ @param interrupt [@ref IN] The interrupt to be enabled.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_suspend
++ - ioh_can_open
++ - ioh_can_msg_tx
++
++ <hr>
++*/
++int ioh_can_set_int_enables(int handle,
++ enum ioh_can_interrupt interrupt_no)
++{
++ int retval = IOH_CAN_SUCCESS;
++ u8 *can_baseaddress;
++
++ if (handle == (int) 0) { /* invalid handle. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_set_int_enables -> Invalid Handle.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++
++ /* Obatining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ /*
++ Appropriately setting the IE, SIE and EIE bits of Can control
++ register.
++ */
++ switch (interrupt_no) {
++ case CAN_ENABLE:
++ IOH_CAN_BIT_SET((can_baseaddress + CAN_CONT_OFFSET),
++ CAN_CTRL_IE);
++ IOH_DEBUG
++ ("ioh_can_set_int_enables -> CAN_ENABLE (IE) \
++ interrupt set.\n");
++ break;
++
++ case CAN_DISABLE:
++ IOH_CAN_BIT_CLEAR((can_baseaddress + CAN_CONT_OFFSET),
++ CAN_CTRL_IE);
++ IOH_DEBUG
++ ("ioh_can_set_int_enables -> CAN_DIABLE (IE) \
++ interrupt reset.\n");
++ break;
++
++ case CAN_ALL:
++ IOH_CAN_BIT_SET((can_baseaddress + CAN_CONT_OFFSET),
++ CAN_CTRL_IE_SIE_EIE);
++ IOH_DEBUG
++ ("ioh_can_set_int_enables -> CAN_ALL (IE,SIE,EIE) \
++ interrupt set.\n");
++ break;
++
++ case CAN_NONE:
++ IOH_CAN_BIT_CLEAR((can_baseaddress + CAN_CONT_OFFSET),
++ CAN_CTRL_IE_SIE_EIE);
++ IOH_DEBUG
++ ("ioh_can_set_int_enables -> CAN_NONE (IE,SIE,EIE) \
++ interrupt reset.\n");
++ break;
++
++ default:
++ IOH_DEBUG
++ ("ioh_can_set_int_enables -> Invalid parameter \
++ interrupt.\n");
++ retval = IOH_CAN_FAIL;
++ break;
++ }
++ }
++
++ IOH_DEBUG("ioh_can_set_int_enables -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn static int ioh_can_rx_enable_all(int handle)
++ @brief This function enables all receive message objects of the CAN
++ device.
++ @remarks Enables all the receive message objects. The main tasks
++ performed by this function are :
++ - Validates whether the passed arguments are
++ valid. If invalid returns the error status
++ code.
++ - Enables all the message objects which are
++ configured as receive objects by invoking
++ @ref ioh_can_set_rx_enable.
++ - If enabling of at least one message object is
++ successful,
++ it returns success status code.
++
++
++ @param handle [@ref IN] The handle to the device.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_open
++ - ioh_can_callback
++
++ <hr>
++*/
++static int ioh_can_rx_enable_all(int handle)
++{
++ u32 counter = 0;
++ int retval = IOH_CAN_SUCCESS;
++ u32 i;
++
++ if (handle == (int) 0) { /* invalid handle. */
++ IOH_LOG(KERN_ERR, "ioh_can_rx_enable_all -> Invalid Handle.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Traversing to obtain the object configured as receivers. */
++ for (i = 0; i < (ioh_can_tx_buf_size + ioh_can_rx_buf_size);
++ i++) {
++ if (ioh_msg_obj_conf[i] == MSG_OBJ_RX) {
++ /* Here i is the index, however (i+1) is object
++ number. */
++ retval =
++ ioh_can_set_rx_enable(handle, (i + 1),
++ ENABLE);
++
++ if (IOH_CAN_FAIL == retval) {
++ IOH_DEBUG
++ ("ioh_can_rx_enable_all -> Cannot "
++ "Enable receive object%u\n",
++ i + 1);
++ counter++;
++ } else {
++ IOH_DEBUG("ioh_can_rx_enable_all -> "
++ "Enabled receive object %u \n",
++ i + 1);
++ }
++ }
++ }
++
++ /* If enabling of all the receive object failed. */
++ if (counter == ioh_can_rx_buf_size) {
++ retval = IOH_CAN_FAIL;
++ IOH_LOG(KERN_ERR, "ioh_can_rx_enable_all failed. \n");
++ }
++ }
++
++ IOH_DEBUG("ioh_can_rx_enable_all -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_rx_disable_all(int handle)
++ @brief This function disables all receive message objects of the
++ CAN device.
++ @remarks Disables all the receive message object. The main tasks
++ performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid error status code is returned.
++ - Disables all the receive message objects by
++ invoking
++ @ref ioh_can_set_rx_enable.
++
++ @param handle [@ref IN] The handle to the device.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_suspend
++ - ioh_can_callback
++
++ <hr>
++*/
++int ioh_can_rx_disable_all(int handle)
++{
++ u32 counter = 0;
++ int retval = IOH_CAN_SUCCESS;
++ u32 i;
++
++ if (handle == (int) 0) { /* invalid handle. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_rx_disable_all -> Invalid Handle.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Traversing to obtain the object configured as receivers. */
++ for (i = 0; i < (ioh_can_rx_buf_size + ioh_can_tx_buf_size);
++ i++) {
++ if (ioh_msg_obj_conf[i] == MSG_OBJ_RX) {
++ /* Here i is the index, however (i+1) is the
++ object number. */
++ retval =
++ ioh_can_set_rx_enable(handle, (i + 1),
++ DISABLE);
++
++ if (IOH_CAN_FAIL == retval) {
++ IOH_DEBUG("ioh_can_rx_disable_all -> \
++ Disabling of Rx " \
++ "buffer %u failed.\n", (i + 1));
++ counter++;
++ } else {
++ IOH_DEBUG("ioh_can_rx_disable_all -> \
++ Disabled receive " \
++ "object %u \n", i + 1);
++ }
++ }
++ }
++
++ /* If disabling of all the receive object failed. */
++ if (counter == ioh_can_rx_buf_size) {
++ retval = IOH_CAN_FAIL;
++ IOH_LOG(KERN_ERR, "ioh_can_rx_disable_all failed. \n");
++ }
++ }
++
++ IOH_DEBUG("ioh_can_rx_disable_all -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn static int ioh_can_tx_enable_all(int handle)
++ @brief This function enables all transmit buffers of the CAN
++ device.
++ @remarks Enables all the transmit message object. The main tasks
++ performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid returns the error status code.
++ - Enables all the message objects configured as
++ transmit
++ objects by invoking @ref ioh_can_set_tx_enable.
++ - If enabling of at least on message object is
++ successful,
++ it returns success status code.
++
++ @param handle [@ref IN] The handle to the device.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_open
++ - ioh_can_callback
++
++ <hr>
++*/
++static int ioh_can_tx_enable_all(int handle)
++{
++ u32 counter = 0;
++ int retval = IOH_CAN_SUCCESS;
++ u32 i;
++
++ if (handle == (int) 0) { /* invalid handle. */
++ IOH_LOG(KERN_ERR, "ioh_can_tx_enable_all -> Invalid Handle.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Traversing to obtain the object configured as transmit
++ object. */
++ for (i = 0; i < (ioh_can_tx_buf_size + ioh_can_rx_buf_size);
++ i++) {
++ if (ioh_msg_obj_conf[i] == MSG_OBJ_TX) {
++ /* Here i denotes the index, however (i+1) is
++ the object number. */
++ retval =
++ ioh_can_set_tx_enable(handle, (i + 1),
++ ENABLE);
++
++ if (IOH_CAN_FAIL == retval) {
++ counter++;
++ IOH_DEBUG("ioh_can_tx_enable_all -> \
++ Cannot Enable " \
++ "transmit object %u\n", (i + 1));
++ } else {
++ IOH_DEBUG("ioh_can_tx_enable_all -> \
++ Enabled transmit " \
++ "object %u\n", (i + 1));
++ }
++ }
++ }
++
++ /* If enabling of all transmit object failed. */
++ if (counter == ioh_can_rx_buf_size) {
++ IOH_LOG(KERN_ERR, "ioh_can_tx_enable_all failed.\n");
++ retval = IOH_CAN_FAIL;
++ }
++
++ }
++
++ IOH_DEBUG("ioh_can_tx_enable_all -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_tx_disable_all(int handle)
++ @brief This function enables all transmit buffers of the CAN device.
++ @remarks Disables all the transmit message objects. The main
++ tasks
++ performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid error status code is returned.
++ - Disables all the message object configured as
++ transmit
++ objects by invoking @ref ioh_can_set_tx_enable.
++
++ @param handle [@ref IN] The handle to the device.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_suspend
++ - ioh_can_callback
++
++ <hr>
++*/
++int ioh_can_tx_disable_all(int handle)
++{
++ u32 counter = 0;
++ int retval = IOH_CAN_SUCCESS;
++ u32 i;
++
++ if (handle == (int) 0) { /* invalid handle. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_tx_disable_all -> Invalid Handle.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Traversing to obtain the object configured as transmit
++ object. */
++ for (i = 0; i < (ioh_can_tx_buf_size + ioh_can_tx_buf_size);
++ i++) {
++ if (ioh_msg_obj_conf[i] == MSG_OBJ_TX) {
++ /* Here i denotes the index, however (i+1) is
++ the object number. */
++
++ /*Disabling. */
++ retval =
++ ioh_can_set_tx_enable(handle, (i + 1),
++ DISABLE);
++
++ if (IOH_CAN_FAIL == retval) {
++ IOH_DEBUG("ioh_can_tx_disable_all -> \
++ Disabling Tx " \
++ "buffer %u failed.\n", (i + 1));
++ counter++;
++ } else {
++ IOH_DEBUG("ioh_can_tx_disable_all -> \
++ Disabled transmit " \
++ "object %u \n", (i + 1));
++ }
++ }
++ }
++
++ /* If disabling of all the transmit object failed. */
++ if (counter == ioh_can_tx_buf_size) {
++ IOH_LOG(KERN_ERR, "ioh_can_tx_disable_all -> \
++ ioh_can_tx_disable_all failed.\n");
++ retval = IOH_CAN_FAIL;
++ }
++ }
++
++ IOH_DEBUG("ioh_can_tx_disable_all -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_rx_init_filter(
++ int handle,u32 buff_num)
++ @brief This function sets the receive filter for a receive buffer
++ of the CAN device.
++ @remarks Initializes the filters for a specific receive message
++ object.
++ The main tasks performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid returns error status code.
++ - Initializes the default filter settings for
++ the
++ obtained receive object by invoking @ref
++ ioh_can_set_rx_filter.
++ @note The default filter settings is done so that the receive
++ object can receive frames with any IDs.
++
++ @param handle [@ref IN] The handle to the device.
++ @param buff_num [@ref IN] The message object number.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_open
++
++ <hr>
++*/
++int ioh_can_rx_init_filter(int handle, u32 buff_num)
++{
++ int retval = IOH_CAN_SUCCESS;
++ struct ioh_can_rx_filter filter;
++
++ if (handle == (int) 0) { /* if invalid handle. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_rx_init_filter -> Invalid Handle.\n");
++ retval = IOH_CAN_FAIL;
++ }
++ /* if invalid buffer number. */
++ else if ((ioh_msg_obj_conf[buff_num - 1] != MSG_OBJ_RX) ||
++ (buff_num > (ioh_can_tx_buf_size + ioh_can_rx_buf_size))) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_rx_init_filter -> Invalid buffer no:%d\n",
++ buff_num);
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Set all Rx filters to allow all msgs. */
++ filter.amr.id = (u32) 0;
++ filter.amr.id_ext = (u32) 0;
++
++ filter.aidr.id = (u32) 0;
++ filter.aidr.id_ext = (u32) 0;
++
++ filter.num = buff_num;
++ filter.umask = 1;
++
++ retval = ioh_can_set_rx_filter(handle, &filter);
++ }
++
++ IOH_DEBUG("ioh_can_rx_init_filter -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_set_rx_enable(
++ int handle,u32 buff_num,u32 set)
++ @brief This function enables or disables a particular receive
++ buffer of the CAN device.
++ @remarks Enables/Disables a specific receive message object. The
++ main
++ tasks performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid error status code is returned.
++ - Depending on whether the message object has to
++ enabled/disabled
++ the MsgVal and the RxIE bit of the message
++ object are
++ set/reset.
++ To Enable :Set MsgVal and RxIE bits.
++ To Disable: Reset MsgVal and RxIE bits.
++
++ @param handle [@ref IN] The handle to the device.
++ @param buff_num [@ref IN] Message object number.
++ @param set [@ref IN] Enable/Disable flag.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_ioctl
++ - ioh_can_rx_enable_all
++ - ioh_can_rx_disable_all
++
++ <hr>
++*/
++int ioh_can_set_rx_enable(int handle, u32 buff_num, u32 set)
++{
++ u32 counter;
++ int retval = IOH_CAN_SUCCESS;
++ u8 *can_baseaddress = NULL;
++ u32 if1_creq;
++
++ if (handle == (int) 0) { /* invalid handle. */
++ IOH_LOG(KERN_ERR, "ioh_can_set_rx_enable -> Invalid Handle.\n");
++ retval = IOH_CAN_FAIL;
++ }
++ /* if invalid buffer number. */
++ else if ((ioh_msg_obj_conf[buff_num - 1] != MSG_OBJ_RX) ||
++ (buff_num > (ioh_can_tx_buf_size + ioh_can_rx_buf_size))) {
++ IOH_LOG(KERN_ERR, "ioh_can_set_rx_enable -> Message object %u "
++ "not configured for receive.\n", buff_num);
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Obtaining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ /*Reading the receive buffer data from RAM to Interface1
++ registers */
++ IOH_WRITE_LONG(CAN_CMASK_RX_TX_GET,
++ (can_baseaddress + CAN_IF1_CMASK_OFFSET));
++ IOH_WRITE_LONG(buff_num,
++ (can_baseaddress + CAN_IF1_CREQ_OFFSET));
++
++ counter = COUNTER_LIMIT;
++ while (counter) {
++ if1_creq = \
++ (IOH_READ_LONG(can_baseaddress + CAN_IF1_CREQ_OFFSET))
++ & CAN_IF_CREQ_BUSY;
++ if (if1_creq == 0)
++ break;
++
++ counter--;
++ }
++
++
++ if ((counter == 0)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_set_rx_enable -> Cannot read "
++ "the message buffer object %u.\n", buff_num);
++ retval = IOH_CAN_FAIL;
++ }
++
++ else { /*Reading successful */
++
++ /*Setting the IF1MASK1 register to access MsgVal and
++ RxIE bits */
++ IOH_WRITE_LONG((CAN_CMASK_RDWR | CAN_CMASK_ARB |
++ CAN_CMASK_CTRL),
++ (can_baseaddress +
++ CAN_IF1_CMASK_OFFSET));
++
++ if (set == ENABLE) {
++ /*Setting the MsgVal and RxIE bits */
++ IOH_CAN_BIT_SET((can_baseaddress +
++ CAN_IF1_MCONT_OFFSET),
++ CAN_IF_MCONT_RXIE);
++ IOH_CAN_BIT_SET((can_baseaddress +
++ CAN_IF1_ID2_OFFSET),
++ CAN_ID_MSGVAL);
++
++ IOH_DEBUG
++ ("ioh_can_set_rx_enable -> Enabled receive "
++ "message buffer %u.\n", buff_num);
++ } else if (set == DISABLE) {
++ /*Resetting the MsgVal and RxIE bits */
++ IOH_CAN_BIT_CLEAR((can_baseaddress +
++ CAN_IF1_MCONT_OFFSET),
++ CAN_IF_MCONT_RXIE);
++ IOH_CAN_BIT_CLEAR((can_baseaddress +
++ CAN_IF1_ID2_OFFSET),
++ CAN_ID_MSGVAL);
++
++ IOH_DEBUG
++ ("ioh_can_set_rx_enable -> Disabled receive "
++ "message buffer %u", buff_num);
++ }
++
++ /*Updating the changes to the message object. */
++ IOH_WRITE_LONG(buff_num,
++ (can_baseaddress + CAN_IF1_CREQ_OFFSET));
++
++ /*Confirming the write by checking the busy bit. */
++ counter = COUNTER_LIMIT;
++ while (counter) {
++ if1_creq = \
++ (IOH_READ_LONG(
++ can_baseaddress + \
++ CAN_IF1_CREQ_OFFSET)) \
++ & CAN_IF_CREQ_BUSY;
++ if (if1_creq == 0)
++ break;
++
++ counter--;
++ }
++
++
++ if ((counter == 0)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_set_rx_enable -> Write failed.\n");
++ retval = IOH_CAN_FAIL;
++ }
++ }
++ }
++
++ IOH_DEBUG("ioh_can_set_rx_enable -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_get_rx_enable(
++ int handle,u32 buff_num,u32 *enable)
++ @brief This function gets the enable state of a receive buffer.
++ @remarks Retrieves the current state(Enabled/disabled) of a
++ receive message object.
++ The main tasks performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid error status code is returned.
++ - Depending on the value of the MsgVal and the
++ RxIE bits, the
++ enable/disable status is determined and passed
++ to the function
++ calling this function.
++ MsgVal and RxIE bits set :
++ The receive message object is enabled.
++ MsgVal and RxIE bits reset:
++ The receive message object is disabled.
++
++ @param handle [@ref IN] The handle to the device.
++ @param buff_num [@ref IN] The message object number.
++ @param enable [@ref OUT] The reference to the
++ enable/disable flag.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_ioctl
++ - ioh_can_suspend
++
++ <hr>
++*/
++int ioh_can_get_rx_enable(int handle, u32 buff_num, u32 *enable)
++{
++ u8 *can_baseaddress;
++ int retval = IOH_CAN_SUCCESS;
++ u32 counter;
++ u32 if1_creq;
++
++ if ((handle == (int) 0) || (enable == NULL)) { /* invalid parameter. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_get_rx_enable -> Invalid Parameter.\n");
++ retval = IOH_CAN_FAIL;
++ }
++ /* Invalid buffer number. */
++ else if ((ioh_msg_obj_conf[buff_num - 1] != MSG_OBJ_RX) ||
++ (buff_num > (ioh_can_tx_buf_size + ioh_can_rx_buf_size))) {
++ IOH_LOG(KERN_ERR, "ioh_can_get_rx_enable -> Message object %u "
++ "not configured for receive.\n", buff_num);
++ retval = IOH_CAN_FAIL;
++ }
++
++ else {
++ /* Obtaining the remap address fro access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ IOH_WRITE_LONG(CAN_CMASK_RX_TX_GET,
++ (can_baseaddress + CAN_IF1_CMASK_OFFSET));
++ IOH_WRITE_LONG(buff_num,
++ (can_baseaddress + CAN_IF1_CREQ_OFFSET));
++
++ counter = COUNTER_LIMIT;
++ while (counter) {
++ if1_creq = \
++ (IOH_READ_LONG((can_baseaddress + CAN_IF1_CREQ_OFFSET)))
++ & CAN_IF_CREQ_BUSY;
++ if (if1_creq == 0)
++ break;
++
++ counter--;
++ }
++
++ if (counter == 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_get_rx_enable -> Read Failed.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ if (((IOH_READ_LONG
++ (can_baseaddress +
++ CAN_IF1_ID2_OFFSET)) & CAN_ID_MSGVAL)
++ &&
++ ((IOH_READ_LONG
++ (can_baseaddress +
++ CAN_IF1_MCONT_OFFSET)) & CAN_IF_MCONT_RXIE)) {
++ *enable = ENABLE;
++
++ IOH_DEBUG
++ ("ioh_can_get_rx_enable -> Receive message "
++ "buffer %u is enabled.\n", buff_num);
++ } else {
++ *enable = DISABLE;
++
++ IOH_DEBUG
++ ("ioh_can_get_rx_enable -> Receive Message "
++ "buffer %u is disabled.\n", buff_num);
++ }
++ }
++
++ }
++
++ IOH_DEBUG("ioh_can_get_rx_enable -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_set_tx_enable(
++ int handle, u32 buff_num,u32 set)
++ @brief This function enables/disables a transmit message object.
++ @remarks Enables/Disables a specific transmit message object. The
++ main tasks performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid error status code is returned.
++ - Depending whether to enable/disable the
++ transmit object the
++ MsgVal and TxIE bits are set/reset.
++ To Enable : Set the MsgVal and TxIE
++ bits.
++ To disable: Reset the MsgVal and
++ RxIE bits.
++
++ @param handle [@ref IN] The handle to the device.
++ @param buff_num [@ref IN] The message object number.
++ @param set [@ref IN] Enable/Disable flag.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_ioctl
++ - ioh_can_tx_enable_all
++ - ioh_can_tx_disable_all
++
++ <hr>
++*/
++int ioh_can_set_tx_enable(int handle, u32 buff_num, u32 set)
++{
++ u8 *can_baseaddress;
++ int retval = IOH_CAN_SUCCESS;
++ u32 counter;
++ u32 if1_creq;
++
++ if (handle == (int) 0) { /* invalid handle. */
++ IOH_LOG(KERN_ERR, "ioh_can_set_tx_enable -> Invalid Handle");
++ retval = IOH_CAN_FAIL;
++ }
++ /* invalid buffer number. */
++ else if ((ioh_msg_obj_conf[buff_num - 1] != MSG_OBJ_TX) ||
++ (buff_num > (ioh_can_rx_buf_size + ioh_can_tx_buf_size))) {
++ IOH_LOG(KERN_ERR, "ioh_can_set_tx_enable -> Message object %u "
++ "not configured for transmit.\n", buff_num);
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Obtaining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ /*Reading the Message buffer from Message RAM to Interface2
++ registers. */
++ IOH_WRITE_LONG(CAN_CMASK_RX_TX_GET,
++ (can_baseaddress + CAN_IF1_CMASK_OFFSET));
++ IOH_WRITE_LONG(buff_num,
++ (can_baseaddress + CAN_IF1_CREQ_OFFSET));
++
++ counter = COUNTER_LIMIT;
++ while (counter) {
++ if1_creq = \
++ (IOH_READ_LONG(can_baseaddress + CAN_IF1_CREQ_OFFSET)) &
++ CAN_IF_CREQ_BUSY;
++ if (if1_creq == 0)
++ break;
++
++ counter--;
++ }
++
++ if ((counter == 0)) {
++ IOH_LOG(KERN_ERR, "ioh_can_set_tx_enable -> Reading "
++ "transmit buffer failed.\n");
++ retval = IOH_CAN_FAIL;
++ } else { /*Reading successful. */
++
++ /*Setting the IF2CMASK register for accessing the MsgVal
++ and TxIE bits */
++ IOH_WRITE_LONG((CAN_CMASK_RDWR | CAN_CMASK_ARB |
++ CAN_CMASK_CTRL),
++ (can_baseaddress +
++ CAN_IF1_CMASK_OFFSET));
++
++ if (set == ENABLE) {
++ /*Setting the MsgVal and TxIE bits */
++ IOH_CAN_BIT_SET((can_baseaddress +
++ CAN_IF1_MCONT_OFFSET),
++ CAN_IF_MCONT_TXIE);
++ IOH_CAN_BIT_SET((can_baseaddress +
++ CAN_IF1_ID2_OFFSET),
++ CAN_ID_MSGVAL);
++
++ IOH_DEBUG
++ ("ioh_can_set_tx_enable -> Enabled transmit "
++ "message buffer %u\n", buff_num);
++ } else if (set == DISABLE) {
++ /*Resetting the MsgVal and TxIE bits. */
++ IOH_CAN_BIT_CLEAR((can_baseaddress +
++ CAN_IF1_MCONT_OFFSET),
++ CAN_IF_MCONT_TXIE);
++ IOH_CAN_BIT_CLEAR((can_baseaddress +
++ CAN_IF1_ID2_OFFSET),
++ CAN_ID_MSGVAL);
++
++ IOH_DEBUG
++ ("ioh_can_set_tx_enable -> Disabled transmit "
++ "message buffer %u\n", buff_num);
++ }
++
++ /*Updating the changes to the message buffer. */
++ IOH_WRITE_LONG(buff_num,
++ (can_baseaddress + CAN_IF1_CREQ_OFFSET));
++
++ /*Confirming the updation. */
++ counter = COUNTER_LIMIT;
++ while (counter) {
++ if1_creq = \
++ (IOH_READ_LONG(
++ can_baseaddress + CAN_IF1_CREQ_OFFSET))
++ & CAN_IF_CREQ_BUSY;
++ if (if1_creq == 0)
++ break;
++
++ counter--;
++ }
++
++ if ((counter == 0)) { /*Updation failed. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_set_tx_enable -> Write failed.\n");
++ retval = IOH_CAN_FAIL;
++ }
++ }
++ }
++
++ IOH_DEBUG("ioh_can_set_tx_enable -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_get_tx_enable(
++ int handle,u32 buff_num,u32 *enable)
++ @brief This function gets the enable/disable status of a transmit
++ buffer.
++ @remarks Retrieves the current state(enabled/disabled) of a
++ transmit message object.
++ The main tasks performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid error status code is returned.
++ - Depending on the value of the MsgVal and the
++ TxIE bits, the
++ enable/disable status of the message object is
++ determined and
++ passed to the function calling this function.
++ MsgVal and TxIE bits set -->
++ The transmit object is enabled.
++ MsgVal and TxIE bit reset-->
++ The transmit object is disabled.
++
++ @param handle [@ref IN] The handle to the device.
++ @param buff_num [@ref IN] The message object number.
++ @param enable [@ref OUT] The reference to the
++ enable/disable flag.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_ioctl
++ - ioh_can_suspend
++
++ <hr>
++*/
++int ioh_can_get_tx_enable(int handle, u32 buff_num, u32 *enable)
++{
++ u8 *can_baseaddress;
++ int retval = IOH_CAN_SUCCESS;
++ u32 counter;
++ u32 if1_creq;
++
++ if ((handle == (int) 0) || (enable == NULL)) { /* invalid
++ parameters. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_get_tx_enable -> Invalid Parameter.\n");
++ retval = IOH_CAN_FAIL;
++ }
++ /* invalid buffer number. */
++ else if ((ioh_msg_obj_conf[buff_num - 1] != MSG_OBJ_TX) ||
++ (buff_num > (ioh_can_rx_buf_size + ioh_can_tx_buf_size))) {
++ IOH_LOG(KERN_ERR, "ioh_can_get_tx_enable -> Invalid Message "
++ "object %u.\n", buff_num);
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Obtaining the buffer number for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ IOH_WRITE_LONG(CAN_CMASK_RX_TX_GET,
++ (can_baseaddress + CAN_IF1_CMASK_OFFSET));
++ IOH_WRITE_LONG(buff_num,
++ (can_baseaddress + CAN_IF1_CREQ_OFFSET));
++
++ counter = COUNTER_LIMIT;
++ while (counter) {
++ if1_creq = \
++ (IOH_READ_LONG(can_baseaddress + CAN_IF1_CREQ_OFFSET)) &
++ CAN_IF_CREQ_BUSY;
++ if (if1_creq == 0)
++ break;
++
++ counter--;
++ }
++
++ if (counter == 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_get_tx_enable -> Read Failed.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ if (((IOH_READ_LONG
++ (can_baseaddress +
++ CAN_IF1_ID2_OFFSET)) & CAN_ID_MSGVAL)
++ &&
++ ((IOH_READ_LONG
++ (can_baseaddress +
++ CAN_IF1_MCONT_OFFSET)) & CAN_IF_MCONT_TXIE)) {
++ *enable = ENABLE;
++
++ IOH_DEBUG
++ ("ioh_can_get_rx_enable -> Transmit message "
++ "buffer %u is enabled.\n", buff_num);
++ } else {
++ *enable = DISABLE;
++
++ IOH_DEBUG
++ ("ioh_can_get_rx_enable -> Transmit message "
++ "buffer %u is disabled.\n", buff_num);
++ }
++ }
++ }
++
++ IOH_DEBUG("ioh_can_get_tx_enable -> Return value: %d.\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_int_pending(int handle)
++ @brief This function returns whether or not interrupts are pending
++ for the CAN device.
++ @remarks Retrieves the pending interrupts. The main tasks
++ performed by this function are :
++ - Validates whether the passed arguments are
++ valid. If
++ invalid error status code is returned.
++ - Reads the value of the CANINT register and
++ returns it to the function calling this
++ function.
++
++ @param handle [@ref IN] The handle to the device.
++
++ @retval int
++ - 0 --> No interrupts are pending
++ - >0 --> Interrupts are pending.
++ - @ref IOH_CAN_FAIL --> Operation failed.
++
++ @see
++ - ioh_can_handler
++
++ <hr>
++*/
++int ioh_can_int_pending(int handle)
++{
++ int retval = IOH_CAN_SUCCESS;
++ u8 *can_baseaddress;
++
++ if (handle == (int) 0) { /* invalid handle. */
++ IOH_LOG(KERN_ERR, "ioh_can_int_pending -> Invalid Handle.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Obatining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ retval = (IOH_READ_LONG(can_baseaddress + CAN_INT_OFFSET) &
++ MSK_ALL_SIXTEEN);
++ }
++
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_set_baud_simple(
++ int handle, enum ioh_can_baud baud)
++ @brief This function sets the baud rate of the CAN device.
++ @remarks Sets the Timing baud settings of the CAN device. The
++ main tasks performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid error status code is returned.
++ - Depending on the baud rate passed and the
++ clock rate
++ the CANBITT and CANBRPE register values are
++ determined and written to these registers.
++
++ @param handle [@ref IN] The handle to the device.
++ @param baud [@ref IN] The baud settings to be done.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_ioctl
++ - ioh_can_open
++
++ <hr>
++*/
++int ioh_can_set_baud_simple(int handle, enum ioh_can_baud baud)
++{
++ u32 reg_val;
++ int retval = IOH_CAN_SUCCESS;
++ u8 *can_baseaddress;
++
++ if (handle == (int) 0) { /* invalid handle. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_set_baud_simple -> Invalid Handle.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Obtaining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ u32 offset;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ /* Setting the CCE bit of Can control register for accessing the
++ Timing Register. */
++ IOH_CAN_BIT_SET((can_baseaddress + CAN_CONT_OFFSET),
++ CAN_CTRL_CCE);
++
++ /* Calculating the offset of the settings array for the current
++ clock. */
++ switch (ioh_can_clock) {
++ case 62500:
++ offset = IOH_CAN_CLOCK_62_5_OFFSET;
++ break;
++
++ case 24000:
++ offset = IOH_CAN_CLOCK_24_OFFSET;
++ break;
++
++ case 50000:
++ offset = IOH_CAN_CLOCK_50_OFFSET;
++ break;
++
++ /* The default section will not be invoked since
++ the clock frequency
++ has been validated at the module init procedure.
++ */
++ default:
++ offset = IOH_CAN_CLOCK_DEFAULT_OFFSET;
++ break;
++ }
++
++ /* Getting the appropriate register value. */
++ reg_val =
++ (((can_rec_timing[baud + offset].
++ cfg_bitrate & MSK_BITT_BRP) << BIT_BITT_BRP) |
++ (can_rec_timing[baud + offset].
++ cfg_tseg1 << BIT_BITT_TSEG1) | (can_rec_timing[baud +
++ offset].
++ cfg_tseg2 <<
++ BIT_BITT_TSEG2) |
++ (can_rec_timing[baud + offset].cfg_sjw << BIT_BITT_SJW));
++
++ /* Writing to Can Timing register. */
++ IOH_WRITE_LONG(reg_val, (can_baseaddress + CAN_BITT_OFFSET));
++ /* Writing to the CAN BRP register. */
++ IOH_WRITE_LONG(((can_rec_timing[baud + offset].
++ cfg_bitrate & MSK_BRPE_BRPE) >> BIT_BRPE_BRPE),
++ (can_baseaddress + CAN_BRPE_OFFSET));
++
++ /* Resetting the CCE bit of the Can control register. */
++ IOH_CAN_BIT_CLEAR((can_baseaddress + CAN_CONT_OFFSET),
++ CAN_CTRL_CCE);
++
++ IOH_DEBUG("ioh_can_set_baud_simple -> Timing Baud set.\n");
++ IOH_DEBUG("cfg_bitrate : %u\n",
++ can_rec_timing[baud + offset].cfg_bitrate);
++ IOH_DEBUG("cfg_tseg1 : %u\n",
++ can_rec_timing[baud + offset].cfg_tseg1);
++ IOH_DEBUG("cfg_tseg2 : %u\n",
++ can_rec_timing[baud + offset].cfg_tseg2);
++ IOH_DEBUG("cfg_sjw : %u\n",
++ can_rec_timing[baud + offset].cfg_sjw);
++ }
++
++ IOH_DEBUG("ioh_can_set_baud_simple -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_set_baud_custom(
++ int handle,struct ioh_can_timing *timing)
++ @brief This function sets a custom baud rate for the CAN device.
++ @remarks Sets the user specified Timing baud settings. The main
++ tasks
++ performed by this function are :
++ - Validates whether the passed arguments are
++ valid. If
++ invalid error status code is returned.
++ - Depending on the user provided settings the
++ CANBITT and CANBRPE
++ register values are determined and written to
++ these registers.
++
++ @param handle [@ref IN] The handle to the device.
++ @param timing [@ref IN] The reference to the timing
++ settings.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_ioctl
++ - ioh_can_resume
++
++ <hr>
++*/
++int ioh_can_set_baud_custom(int handle, struct ioh_can_timing *timing)
++{
++ u32 reg_val;
++ int retval = IOH_CAN_SUCCESS;
++ u8 *can_baseaddress;
++
++ if ((handle == (int) 0) || (timing == NULL)) { /* invalid parameter. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_set_baud_custom -> Invalid parameter.\n");
++ retval = IOH_CAN_FAIL;
++ }
++ /* max is MAX_BITRATE */
++ else if (timing->bitrate > MAX_BITRATE) {
++ IOH_LOG(KERN_ERR, "ioh_can_set_baud_custom -> Bit rate %x "
++ "is invalid.\n", timing->bitrate);
++ retval = IOH_CAN_FAIL;
++ }
++
++ else {
++ /* Obtaining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ /* Setting the CCE bit of Can control register for accessing the
++ Can Timing register. */
++ IOH_CAN_BIT_SET((can_baseaddress + CAN_CONT_OFFSET),
++ CAN_CTRL_CCE);
++
++ /* Obtaining the appropriate register value. */
++ reg_val =
++ (((timing->
++ cfg_bitrate & MSK_BITT_BRP) << BIT_BITT_BRP) | (timing->
++ cfg_tseg1
++ <<
++ BIT_BITT_TSEG1)
++ | (timing->cfg_tseg2 << BIT_BITT_TSEG2) | (timing->
++ cfg_sjw <<
++ BIT_BITT_SJW));
++
++ /* Writing to the timing register. */
++ IOH_WRITE_LONG(reg_val, (can_baseaddress + CAN_BITT_OFFSET));
++ /* Writing to the BRP register. */
++ IOH_WRITE_LONG(((timing->
++ cfg_bitrate & MSK_BRPE_BRPE) >> BIT_BRPE_BRPE),
++ (can_baseaddress + CAN_BRPE_OFFSET));
++
++ /* Resetting the CCE bit. */
++ IOH_CAN_BIT_CLEAR((can_baseaddress + CAN_CONT_OFFSET),
++ CAN_CTRL_CCE);
++
++ IOH_DEBUG("ioh_can_set_baud_custom -> Timing baud set.\n");
++ IOH_DEBUG("cfg_bitrate : %u\n", timing->cfg_bitrate);
++ IOH_DEBUG("cfg_tseg1 : %u\n", timing->cfg_tseg1);
++ IOH_DEBUG("cfg_tseg2 : %u\n", timing->cfg_tseg2);
++ IOH_DEBUG("cfg_sjw : %u\n", timing->cfg_sjw);
++ }
++
++ IOH_DEBUG("ioh_can_set_baud_custom -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_get_baud(
++ int handle,struct ioh_can_timing *timing)
++ @brief This function gets the baud rate for the CAN device.
++ @remarks Retrieves the currently set timing baud settings. The
++ main tasks
++ performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid error status code is returned.
++ - Depending on the CANBITT and CANBRPE register
++ values
++ the different fields of the timing structure is
++ filled
++ and returned to the function calling this
++ function.
++
++ @param handle [@ref IN] The handle to the device.
++ @param timing [@ref OUT] The reference to the timing baud
++ settings.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_ioctl
++ - ioh_can_suspend
++
++ <hr>
++*/
++int ioh_can_get_baud(int handle, struct ioh_can_timing *timing)
++{
++ u32 timing_bitt_reg;
++ u32 timing_brpe_reg;
++ int retval = IOH_CAN_SUCCESS;
++ u8 *can_baseaddress;
++
++ if ((handle == (int) 0) || (timing == NULL)) { /* invalid
++ parameters. */
++ IOH_LOG(KERN_ERR, "ioh_can_get_baud -> Invalid Parameter.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Obtaining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ timing_bitt_reg =
++ IOH_READ_LONG(can_baseaddress + CAN_BITT_OFFSET);
++ timing_brpe_reg =
++ IOH_READ_LONG(can_baseaddress + CAN_BRPE_OFFSET);
++
++ /* Separating the individual part from the values read. */
++ timing->cfg_bitrate = ((timing_bitt_reg & MSK_BITT_BRP) |
++ ((timing_brpe_reg & MSK_BRPE_GET) <<
++ BIT_BRPE_BRPE));
++ timing->cfg_tseg1 =
++ (timing_bitt_reg & MSK_BITT_TSEG1) >> BIT_BITT_TSEG1;
++ timing->cfg_tseg2 =
++ (timing_bitt_reg & MSK_BITT_TSEG2) >> BIT_BITT_TSEG2;
++ timing->cfg_sjw =
++ (timing_bitt_reg & MSK_BITT_SJW) >> BIT_BITT_SJW;
++
++ IOH_DEBUG
++ ("ioh_can_get_baud -> The timing structure filled \
++ successfully.\n");
++ IOH_DEBUG("cfg_bitrate : %u\n", timing->cfg_bitrate);
++ IOH_DEBUG("cfg_tseg1 : %u\n", timing->cfg_tseg1);
++ IOH_DEBUG("cfg_tseg2 : %u\n", timing->cfg_tseg2);
++ IOH_DEBUG("cfg_sjw : %u\n", timing->cfg_sjw);
++ }
++
++ IOH_DEBUG("ioh_can_get_baud -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_set_rx_filter(
++ int handle,struct ioh_can_rx_filter *filter)
++ @brief This function sets the receive filter for a receive buffer
++ of the CAN device.
++ @remarks Sets the receive filter for a specific receive message
++ object.
++ The main tasks performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid error status code is returned.
++ - Depending on the obtained filter settings, the
++ acceptance
++ filter settings of the receive message object is
++ updated.
++
++ @param handle [@ref IN] The handle to the device.
++ @param filter [@ref IN] the reference to the filter
++ settings
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_ioctl
++ - ioh_can_rx_init_filter
++ - ioh_can_resume
++
++ <hr>
++*/
++int ioh_can_set_rx_filter(int handle, struct ioh_can_rx_filter *filter)
++{
++ u32 reg1;
++ u32 reg2;
++ u32 counter;
++ u8 *can_baseaddress = NULL;
++ int retval = IOH_CAN_SUCCESS;
++ u32 if1_creq;
++
++ if ((handle == (int) 0) || (filter == NULL)) { /* invalid
++ parameters. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_set_rx_filter -> Invalid Parameter.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Obtaining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ IOH_WRITE_LONG(CAN_CMASK_RX_TX_GET, \
++ (can_baseaddress + CAN_IF1_CMASK_OFFSET));
++ /*Setting the CMASK for reading */
++ IOH_WRITE_LONG(filter->num, \
++ (can_baseaddress + CAN_IF1_CREQ_OFFSET));
++ /*Setting CREQ to specified Msg Obj. */
++
++ /*Confirming the read completion. */
++ counter = COUNTER_LIMIT;
++ while (counter) {
++ if1_creq = \
++ (IOH_READ_LONG(can_baseaddress + CAN_IF1_CREQ_OFFSET)) &
++ CAN_IF_CREQ_BUSY;
++ if (if1_creq == 0)
++ break;
++
++ counter--;
++ }
++
++ if ((counter == 0)) { /*Read Unsuccessful. */
++ IOH_LOG(KERN_ERR, "ioh_can_set_rx_filter -> Reading "
++ "of message buffer %u failed.\n", filter->num);
++ retval = IOH_CAN_FAIL;
++ } else { /*read successful. */
++
++ IOH_CAN_BIT_CLEAR((can_baseaddress + \
++ CAN_IF1_ID2_OFFSET), \
++ MSK_ALL_THIRTEEN);
++ /*Clearing the bit 0- 12 of ID2 */
++ IOH_CAN_BIT_CLEAR((can_baseaddress + \
++ CAN_IF1_ID2_OFFSET), \
++ CAN_ID2_XTD);
++ /*Clearing XTD bit */
++
++ if ((filter->aidr.id_ext == 1)) { /*Extended ID */
++ reg1 = filter->aidr.id & MSK_ALL_SIXTEEN;
++ /*ID1 value. */
++ /*ID2 value with XTD bit set. */
++ reg2 =
++ (((filter->aidr.
++ id & (MSK_ALL_THIRTEEN <<
++ BIT_SHIFT_SIXTEEN))
++ >> BIT_SHIFT_SIXTEEN) | CAN_ID2_XTD);
++ } else { /*Standard ID */
++
++ reg1 = 0; /*ID1 value */
++ reg2 = ((filter->aidr.id & MSK_ALL_ELEVEN) << \
++ BIT_SHIFT_TWO); /*ID2 value */
++ }
++
++ IOH_WRITE_LONG(reg1, (can_baseaddress + \
++ CAN_IF1_ID1_OFFSET)); /*Writing ID1 */
++ IOH_CAN_BIT_SET((can_baseaddress + \
++ CAN_IF1_ID2_OFFSET), reg2); /*Writing ID2 */
++
++ if (filter->umask == 1) {
++ /*If mask has to be set. */
++ IOH_CAN_BIT_CLEAR((can_baseaddress + \
++ CAN_IF1_MASK2_OFFSET), MSK_ALL_THIRTEEN);
++ /*Clearing bit 0-12 */
++ IOH_CAN_BIT_CLEAR((can_baseaddress + \
++ CAN_IF1_MASK2_OFFSET), CAN_MASK2_MDIR_MXTD);
++ /*Clearing Mdir & MXtd */
++
++ if (filter->amr.id_ext == 1) {
++ /*Extended Mask */
++ reg1 = filter->amr.id & MSK_ALL_SIXTEEN;
++ /*Mask1 value */
++ /*Mask2 value with MXtd set */
++ reg2 =
++ (((filter->amr.
++ id & (MSK_ALL_THIRTEEN <<
++ BIT_SHIFT_SIXTEEN))
++ >> BIT_SHIFT_SIXTEEN) |
++ CAN_IF_MASK2_MXTD);
++ } else {
++ reg1 = 0; /*Mask1 value */
++ reg2 = ((filter->amr.id & \
++ MSK_ALL_ELEVEN) << BIT_SHIFT_TWO);
++ /*Mask2 Value */
++ }
++
++ IOH_WRITE_LONG(reg1, \
++ (can_baseaddress + CAN_IF1_MASK1_OFFSET));
++ /*Writing MASK1 */
++ IOH_CAN_BIT_SET((can_baseaddress + \
++ CAN_IF1_MASK2_OFFSET), reg2);
++ /*Writing MASK2 */
++ IOH_CAN_BIT_SET((can_baseaddress + \
++ CAN_IF1_MCONT_OFFSET), CAN_IF_MCONT_UMASK);
++ /*Setting Umask bit */
++ } else {
++ IOH_CAN_BIT_CLEAR((can_baseaddress + \
++ CAN_IF1_MCONT_OFFSET), CAN_IF_MCONT_UMASK);
++ /*Resetting Umask bit. */
++ }
++
++ /*Setting CMASK for writing */
++ IOH_WRITE_LONG((CAN_CMASK_RDWR | CAN_CMASK_MASK |
++ CAN_CMASK_ARB | CAN_CMASK_CTRL),
++ (can_baseaddress +
++ CAN_IF1_CMASK_OFFSET));
++ IOH_WRITE_LONG(filter->num, (can_baseaddress + \
++ CAN_IF1_CREQ_OFFSET));
++ /*Setting CREQ for specified sg Obj. */
++
++ /*Confirming the write completion. */
++ counter = COUNTER_LIMIT;
++ while (counter) {
++ if1_creq = \
++ (IOH_READ_LONG
++ (can_baseaddress + CAN_IF1_CREQ_OFFSET))
++ & CAN_IF_CREQ_BUSY;
++ if (if1_creq == 0)
++ break;
++
++ counter--;
++ }
++
++ if (counter == 0) { /*Write failed */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_set_rx_filter -> \
++ Write failed.\n");
++ retval = IOH_CAN_FAIL;
++ } else { /*Write successful. */
++
++ IOH_DEBUG
++ ("ioh_can_set_rx_filter -> Filter set \
++ successful "
++ "for message object %u.\n", filter->num);
++ }
++ }
++ }
++
++ IOH_DEBUG("ioh_can_set_rx_filter -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_set_rx_buffer_link(
++ int handle,u32 buffer_num,u32 seti)
++ @brief This function sets receive buffer linking of the CAN device.
++ @remarks Enables/disables the buffer link for specific receive
++ buffer.
++ The main tasks performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid error status code is returned.
++ - Depending on whether to enable/disable buffer
++ link the EOB
++ bit of the message object is set/reset.
++ Enable buffer link : Reset the EOB
++ bit.
++ Disable buffer link: Set the EOB bit.
++
++ @param handle [@ref IN] The handle to the device.
++ @param buffer_num [@ref IN] The message object number.
++ @param set [@ref IN] The enable/disable flag.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_ioctl
++ - ioh_can_resume
++
++ <hr>
++*/
++int ioh_can_set_rx_buffer_link(int handle, u32 buffer_num, u32 set)
++{
++ u32 counter;
++ int retval = IOH_CAN_SUCCESS;
++ u8 *can_baseaddress;
++ u32 if1_creq;
++
++ if (handle == (int) 0) { /* if invalid handle. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_set_rx_buffer_link -> Invalid handle.\n");
++ retval = IOH_CAN_FAIL;
++ }
++ /* invalid buffer nummber. */
++ else if ((ioh_msg_obj_conf[buffer_num - 1] != MSG_OBJ_RX)
++ || (buffer_num > (ioh_can_rx_buf_size + \
++ ioh_can_tx_buf_size))) {
++ IOH_LOG(KERN_ERR, "ioh_can_set_rx_buffer_link -> Invalid "
++ "buffer number %u.\n", buffer_num);
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Obtaining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ /* Reading the corresponding object. */
++ IOH_WRITE_LONG(CAN_CMASK_RX_TX_GET,
++ (can_baseaddress + CAN_IF1_CMASK_OFFSET));
++ IOH_WRITE_LONG(buffer_num,
++ (can_baseaddress + CAN_IF1_CREQ_OFFSET));
++
++ counter = COUNTER_LIMIT;
++ while (counter) {
++ if1_creq = \
++ IOH_READ_LONG((can_baseaddress + CAN_IF1_CREQ_OFFSET)) &
++ CAN_IF_CREQ_BUSY;
++ if (if1_creq == 0)
++ break;
++
++ counter--;
++ }
++
++ /* Confirming read. */
++ if ((counter == 0)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_set_rx_buffer_link -> Read failed\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ IOH_WRITE_LONG((CAN_CMASK_RDWR | CAN_CMASK_CTRL),
++ (can_baseaddress +
++ CAN_IF1_CMASK_OFFSET));
++
++ /*
++ Setting/Resetting the EOD bit for Buffer link
++ operation.
++ EOB bit = 1 -> Buffer link disabled.
++ EOB bit = 0 -> Biffer link enabled.
++ */
++ if (set == ENABLE) {
++ IOH_CAN_BIT_CLEAR((can_baseaddress +
++ CAN_IF1_MCONT_OFFSET),
++ CAN_IF_MCONT_EOB);
++ IOH_DEBUG
++ ("ioh_can_set_rx_buffer_link -> \
++ Buffer Link enabled.\n");
++ } else {
++ IOH_CAN_BIT_SET((can_baseaddress +
++ CAN_IF1_MCONT_OFFSET),
++ CAN_IF_MCONT_EOB);
++ IOH_DEBUG
++ ("ioh_can_set_rx_buffer_link -> \
++ Buffer Link disabled.\n");
++ }
++
++ IOH_WRITE_LONG(buffer_num,
++ (can_baseaddress + CAN_IF1_CREQ_OFFSET));
++
++ counter = COUNTER_LIMIT;
++ while (counter) {
++ if1_creq = \
++ IOH_READ_LONG(
++ can_baseaddress + CAN_IF1_CREQ_OFFSET)
++ & CAN_IF_CREQ_BUSY;
++ if (if1_creq == 0)
++ break;
++
++ counter--;
++ }
++
++ if (counter == 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_set_rx_buffer_link -> \
++ Write failed.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ IOH_DEBUG
++ ("ioh_can_set_rx_buffer_link -> \
++ Write successful.\n");
++ retval = IOH_CAN_SUCCESS;
++ }
++
++ }
++ }
++
++ IOH_DEBUG("ioh_can_set_rx_buffer_link -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_get_rx_buffer_link(
++ int handle,u32 buffer_num,u32 *link)
++ @brief This function gets receive buffer linking of the CAN device.
++ @remarks Retrieves the current buffer link state(
++ enabled/disabled) of a receive object.
++ The main tasks performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid error status code is returned.
++ - Depending on the value of the EOB bit, the
++ enable/disable
++ status of the buffer link of the message object
++ is determined
++ and returned to the function calling this
++ function.
++ EOB bit set : Buffer link disabled.
++ EOB bit reset: Buffer link enabled.
++
++ @param handle [@ref IN] The handle to the device.
++ @param buffer_num [@ref IN] The message object number.
++ @param link [@ref OUT] The reference to the buffer link
++ state.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_ioctl
++ - ioh_can_suspend
++
++ <hr>
++*/
++int ioh_can_get_rx_buffer_link(int handle, u32 buffer_num,
++ u32 *link)
++{
++ u32 reg_val;
++ u32 counter;
++ int retval = IOH_CAN_SUCCESS;
++ u8 *can_baseaddress;
++ u32 if1_creq;
++
++ if ((handle == (int) 0) || (link == NULL)) { /* invalid
++ parameters. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_get_rx_buffer_link -> Invalid Parameter.\n");
++ retval = IOH_CAN_FAIL;
++ } else if ((ioh_msg_obj_conf[buffer_num - 1] != MSG_OBJ_RX) ||
++ (buffer_num > (ioh_can_rx_buf_size + ioh_can_tx_buf_size))) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_get_rx_buffer_link -> Invalid buffer "
++ "number %u.\n", buffer_num);
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Obatining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ /* Reading the corresponding message object. */
++ IOH_WRITE_LONG(CAN_CMASK_RX_TX_GET,
++ (can_baseaddress + CAN_IF1_CMASK_OFFSET));
++ IOH_WRITE_LONG(buffer_num,
++ (can_baseaddress + CAN_IF1_CREQ_OFFSET));
++
++ counter = COUNTER_LIMIT;
++ while (counter) {
++ if1_creq = \
++ (IOH_READ_LONG((can_baseaddress + CAN_IF1_CREQ_OFFSET))
++ & CAN_IF_CREQ_BUSY);
++ if (if1_creq == 0)
++ break;
++
++ counter--;
++ }
++
++ /* Confirming read. */
++ if ((counter == 0)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_get_rx_buffer_link -> Read Failed.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Checking for the EOB bit.
++ EOB bit = 1 -> Buffer link disabled.
++ EOB bit = 0 -> Biffer link enabled.
++ */
++ reg_val =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF1_MCONT_OFFSET);
++ *link = (reg_val & CAN_IF_MCONT_EOB) ? DISABLE : ENABLE;
++
++ IOH_DEBUG("ioh_can_get_rx_buffer_link -> EOB bit = "
++ "%d\n",
++ (reg_val & CAN_IF_MCONT_EOB) >>
++ BIT_SHIFT_SEVEN);
++ IOH_DEBUG
++ ("ioh_can_get_rx_buffer_link -> Buffer Link = "
++ "%u (1 - Set ,0 -> not set)\n", *link);
++ }
++ }
++
++ IOH_DEBUG("ioh_can_get_rx_buffer_link -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_get_rx_filter(
++ int handle,struct ioh_can_rx_filter *filter)
++ @brief This function sets the receive filter for a receive buffer
++ of the CAN device.
++ @remarks Retrieves the current receive filter settings of a
++ receive object.
++ The main tasks performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid error status code is returned.
++ - Depending on the settings of the acceptance
++ filter the
++ filter structure is filled and returned to the
++ function calling this function.
++
++ @param handle [@ref IN] The handle to the device.
++ @param filter [@ref OUT] the reference to the filter
++ settings.
++
++ @retval
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ success.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_ioctl
++ - ioh_can_suspend
++ <hr>
++*/
++int ioh_can_get_rx_filter(int handle, struct ioh_can_rx_filter *filter)
++{
++ u32 reg_val1;
++ u32 reg_val2;
++ u32 counter;
++ int retval = IOH_CAN_SUCCESS;
++ u8 *can_baseaddress;
++ u32 if1_creq;
++
++ if ((handle == (int) 0) || (filter == NULL)) { /* if invalid
++ parameters. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_get_rx_filter -> Invalid Parameter.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Obtaining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ /*Preparing to read the specified Msg Obj. */
++ IOH_WRITE_LONG(CAN_CMASK_RX_TX_GET,
++ (can_baseaddress + CAN_IF1_CMASK_OFFSET));
++ IOH_WRITE_LONG(filter->num,
++ (can_baseaddress + CAN_IF1_CREQ_OFFSET));
++
++ /*Confirming the read completion. */
++ counter = COUNTER_LIMIT;
++ while (counter) {
++ if1_creq = \
++ (IOH_READ_LONG(can_baseaddress + CAN_IF1_CREQ_OFFSET)) &
++ CAN_IF_CREQ_BUSY;
++ if (if1_creq == 0)
++ break;
++
++ counter--;
++ }
++
++ if ((counter == 0)) { /*Read unsuccessful. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_get_rx_filter -> Reading of receive \
++ buffer %u failed.\n",
++ filter->num);
++ retval = IOH_CAN_FAIL;
++ } else { /*read successful. */
++
++ /*Checking for Umask */
++ reg_val1 =
++ IOH_READ_LONG((can_baseaddress +
++ CAN_IF1_MCONT_OFFSET));
++ filter->umask =
++ ((CAN_IF_MCONT_UMASK & reg_val1) >>
++ BIT_SHIFT_TWELVE);
++
++ if (1 == filter->umask) { /*If Umask is set */
++ /* Getting the Mask data. */
++
++ /* Reading MASK2 register. */
++ reg_val1 =
++ IOH_READ_LONG((can_baseaddress +
++ CAN_IF1_MASK2_OFFSET));
++
++ if ((CAN_IF_MASK2_MXTD & reg_val1) != 0) {
++ /* Extended Mask set.
++ Mask ID is 29 bits */
++ reg_val2 =
++ IOH_READ_LONG((can_baseaddress +
++ CAN_IF1_MASK1_OFFSET));
++
++ /* Extracting the 16 MSB bits of the
++ 29bit ID. */
++ reg_val2 = reg_val2 & MSK_ALL_SIXTEEN;
++ /* Extracting the remaing 13 bits */
++ reg_val1 = reg_val1 & MSK_ALL_THIRTEEN;
++
++ /* Combing them to a single 29bit ID. */
++ reg_val1 =
++ reg_val1 << BIT_SHIFT_SIXTEEN;
++ reg_val1 = reg_val1 | reg_val2;
++
++ filter->amr.id = reg_val1;
++ filter->amr.id_ext = 1;
++ } else { /*Standard Mask 11bit Mask ID */
++
++ /* Extracting the 13 bits of MASK2
++ register. */
++ reg_val1 = reg_val1 & MSK_ALL_THIRTEEN;
++
++ /* Modifying it to represent 11bit Mask
++ ID */
++ reg_val1 = reg_val1 >> BIT_SHIFT_TWO;
++
++ filter->amr.id = reg_val1;
++ filter->amr.id_ext = 0;
++ }
++ }
++
++ reg_val1 =
++ IOH_READ_LONG((can_baseaddress +
++ CAN_IF1_ID2_OFFSET));
++
++ if ((CAN_ID2_XTD & reg_val1) != 0) { /*Extended ID
++ 29bits */
++ reg_val2 =
++ IOH_READ_LONG((can_baseaddress +
++ CAN_IF1_ID1_OFFSET));
++
++ /* Extracting the 16 MSB bits of the 29bit
++ ID. */
++ reg_val2 = reg_val2 & MSK_ALL_SIXTEEN;
++ /* Extracting the remaining 13 bit. */
++ reg_val1 = reg_val1 & MSK_ALL_THIRTEEN;
++
++ /* Combining them to represent 29bit ID. */
++ reg_val1 = reg_val1 << BIT_SHIFT_SIXTEEN;
++ reg_val1 = reg_val1 | reg_val2;
++
++ filter->aidr.id = reg_val1;
++ filter->aidr.id_ext = 1;
++ } else { /*Standard Id 11bits. */
++
++ /* Extracting the 13 bits of ID2 register */
++ reg_val1 = reg_val1 & MSK_ALL_THIRTEEN;
++ /* Modifying it to represent the 11 bit ID */
++ reg_val1 = reg_val1 >> BIT_SHIFT_TWO;
++
++ filter->aidr.id = reg_val1;
++ filter->aidr.id_ext = 0;
++ }
++
++ IOH_DEBUG("ioh_can_get_rx_filter -> Successfully read "
++ "the filter of Msg Obj %u.\n", filter->num);
++ }
++ }
++
++ IOH_DEBUG("ioh_can_get_rx_filter -> Return value: %d.\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_open(
++ int handle,enum ioh_can_listen_mode listen,enum ioh_can_arbiter arbiter)
++ @brief This function opens the CAN device for read/write.
++ @remarks Opens and initializes the CAN hardware devices for use.
++ The main tasks performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid error status code is returned.
++ - Configures the Receive and Transmit message
++ objects.
++ - Initializes the acceptance filter setting of
++ the
++ Receive message objects.
++ - Enables all the Receive and Transmit message
++ objects.
++ - Sets the baud rate to 500Kbps.
++ - Sets the restart mode to Auto.
++ - Sets the CAN device to RUN mode.
++
++ @param handle [@ref IN] The handle to the device.
++ @param listen [@ref IN] The listen mode to be set.
++ @param arbiter [@ref IN] The arbiter mode to be set.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_candev_open
++
++ <hr>
++*/
++int ioh_can_open(int handle, enum ioh_can_listen_mode listen,
++ enum ioh_can_arbiter arbiter)
++{
++ int retval;
++ s32 i;
++
++ if (handle == (int) 0) { /* invalid handle. */
++ IOH_LOG(KERN_ERR, "ioh_can_open -> Invalid handle.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ do {
++
++ /* Stopping the Can device. */
++ retval = ioh_can_set_run_mode(handle, IOH_CAN_STOP);
++ if (retval == IOH_CAN_FAIL) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_open -> ioh_can_set_run_mode "
++ "failed(returned %d).\n", retval);
++
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_can_open -> ioh_can_set_run_mode invoked \
++ successfully.\n");
++
++ /* Clearing all the message object buffers. */
++ retval = ioh_can_clear_buffers(handle);
++ if (retval == IOH_CAN_FAIL) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_open -> ioh_can_clear_buffers "
++ "failed(returned %d).\n", retval);
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_can_open -> ioh_can_clear_buffers invoked "
++ "successfully(returned %d).\n", retval);
++
++ /* Configuring the respective message object as either
++ receive/transmit object. */
++ ioh_can_config_rx_tx_buffers(handle);
++ IOH_DEBUG
++ ("ioh_can_open -> ioh_can_config_rx_tx_buffers "
++ "invoked successfully.\n");
++
++ /* Initializing filters for receive object. */
++ for (i = 0;
++ i < (ioh_can_tx_buf_size + ioh_can_rx_buf_size);
++ i++) {
++ if (ioh_msg_obj_conf[i] == MSG_OBJ_RX) {
++ /* Here i denotes the index, however
++ the object number is (i+1) */
++ retval =
++ ioh_can_rx_init_filter(handle,
++ (i + 1));
++
++ if (retval != IOH_CAN_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_rx_init_filter "
++ "failed for msg obj:%d\n",
++ (i + 1));
++ break;
++ }
++ }
++ }
++ if (retval != IOH_CAN_SUCCESS)
++ break;
++ IOH_DEBUG("ioh_can_open -> ioh_can_rx_init_filter "
++ "invoked successfully.\n");
++
++ /* Enabling all receive objects. */
++ retval = ioh_can_rx_enable_all(handle);
++
++ if (retval == IOH_CAN_FAIL) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_open -> ioh_can_rx_enable_all "
++ "failed(returned %d).\n", retval);
++ break;
++ }
++ IOH_DEBUG("ioh_can_open -> ioh_can_rx_enable_all "
++ "invoked successfully(returned %d).\n",
++ retval);
++
++ /* Enabling all transmit objects. */
++ retval = ioh_can_tx_enable_all(handle);
++
++ if (retval == IOH_CAN_FAIL) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_open -> ioh_can_tx_enable_all "
++ "failed(returned %d).\n", retval);
++ break;
++ }
++ IOH_DEBUG("ioh_can_open -> ioh_can_tx_enable_all "
++ "invoked successfully(returned %d).\n",
++ retval);
++
++ /* Setting the Baud Timing. */
++ retval =
++ ioh_can_set_baud_simple(handle, IOH_CAN_BAUD_500);
++ if (retval == IOH_CAN_FAIL) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_open -> ioh_can_set_baud_simple "
++ "failed(returned %d).\n", retval);
++ break;
++ }
++ IOH_DEBUG("ioh_can_open -> ioh_can_set_baud_simple "
++ "invoked successfully(returned %d).\n",
++ retval);
++
++ /* Setting the arbiter mode. */
++ retval = ioh_can_set_arbiter_mode(handle, arbiter);
++ if (retval == IOH_CAN_FAIL) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_open -> ioh_can_set_arbiter_mode "
++ "failed(returned %d).\n", retval);
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_can_open -> ioh_can_set_arbiter_mode invoked "
++ "successfully(returned %d).\n", retval);
++
++ /* Setting the listen mode. */
++ retval = ioh_can_set_listen_mode(handle, listen);
++ if (retval == IOH_CAN_FAIL) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_open -> ioh_can_set_listen_mode "
++ "failed(returned %d).\n", retval);
++ break;
++ }
++ IOH_DEBUG("ioh_can_open -> ioh_can_set_listen_mode "
++ "invoked successfully(returned %d).\n",
++ retval);
++
++ /* Enabling the interrupts. */
++ retval = ioh_can_set_int_enables(handle, CAN_ALL);
++ if (retval == IOH_CAN_FAIL) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_open -> ioh_can_set_int_enables "
++ "failed(returned %d).\n", retval);
++ break;
++ }
++ IOH_DEBUG("ioh_can_open -> ioh_can_set_int_enables "
++ "invoked successfully(returned %d).\n",
++ retval);
++
++ /* Setting the restart mode. */
++ retval = ioh_can_set_restart_mode(handle, CAN_AUTO);
++ if (retval == IOH_CAN_FAIL) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_open -> ioh_can_set_restart_mode "
++ "failed(returned %d).\n", retval);
++ break;
++ }
++ IOH_DEBUG("ioh_can_open -> ioh_can_set_restart_mode "
++ "invoked successfully(returned %d).\n",
++ retval);
++
++ /* Setting the CAN to run mode. */
++ retval = ioh_can_set_run_mode(handle, IOH_CAN_RUN);
++ if (retval == IOH_CAN_FAIL) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_open -> ioh_can_set_run_mode "
++ "failed(returned %d).\n", retval);
++ break;
++ }
++ IOH_DEBUG("ioh_can_open -> ioh_can_set_set_run_mode "
++ "invoked successfully(returned %d).\n",
++ retval);
++
++ } while (false);
++ }
++
++ IOH_DEBUG("ioh_can_open returns %d.\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_release(int handle)
++ @brief This function releases (closes) the CAN device. Call to
++ close an opened CAN device.
++ @remarks De-initializes the CAN device. The main tasks performed
++ by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid error status code is returned.
++ - Sets the CAN device to STOP mode.
++ - Disables the interrupts.
++ - Disables all the receive message objects.
++ - Disables all the transmit message objects.
++
++ @param handle [@ref IN] The handle to the device.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_candev_close
++
++ <hr>
++*/
++int ioh_can_release(int handle)
++{
++ int retval = IOH_CAN_SUCCESS;
++
++ if (handle == (int) 0) {
++ IOH_LOG(KERN_ERR, "ioh_can_release -> Invalid handle.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ do {
++ /* Stooping the CAN device. */
++ retval = ioh_can_set_run_mode(handle, IOH_CAN_STOP);
++
++ if (IOH_CAN_FAIL == retval) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_release -> ioh_can_set_run_mode "
++ "failed(returned %d).\n", retval);
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_can_release -> ioh_can_set_run_mode invoked "
++ "successfully(returned %d).\n", retval);
++
++ /* Disabling the interrupts. */
++ retval = ioh_can_set_int_enables(handle, CAN_NONE);
++
++ if (IOH_CAN_FAIL == retval) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_release -> ioh_can_set_int_enables "
++ "failed(returned %d).\n", retval);
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_can_release -> ioh_can_set_int_enables invoked "
++ "successfully(returned %d).\n", retval);
++
++ /* Disabling all the receive object. */
++ retval = ioh_can_rx_disable_all(handle);
++ if (IOH_CAN_FAIL == retval) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_release -> ioh_can_rx_disable_all "
++ "failed(returned %d).\n", retval);
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_can_release -> ioh_can_rx_disable_all invoked "
++ "successfully(returned %d).\n", retval);
++
++ /* Disabling all the transmit object. */
++ retval = ioh_can_tx_disable_all(handle);
++ if (IOH_CAN_FAIL == retval) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_release -> ioh_can_tx_disable_all "
++ "failed(returned %d).\n", retval);
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_can_release -> ioh_can_tx_disable_all invoked "
++ "successfully(returned %d).\n", retval);
++
++ } while (false);
++ }
++
++ IOH_DEBUG("ioh_can_release returns %d.\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn void ioh_can_int_clr(int handle,u32 mask)
++ @brief This function clears interrupt(s) from the CAN device.
++ @remarks Clears the interrupt. The main tasks performed by this
++ function are :
++ - If the interrupt is status interrupt, clear it
++ by reading the CANSTAT register.
++ - If the interrupt is message object interrupt
++ clear it by
++ determining whether it is a transmit/receive
++ message
++ object and appropriately clearing the IntPnd,
++ NewDat and
++ TxRqst bits.
++
++ @param handle [@ref IN] The handle to the device.
++ @param mask [@ref IN] The interrupts to be cleared.
++
++ @retval None.
++
++ @see
++ - ioh_can_callback
++
++ <hr>
++*/
++void ioh_can_int_clr(int handle, u32 mask)
++{
++ u32 counter;
++#ifdef DEBUG
++ u32 rtr;
++#endif
++ u8 *can_baseaddress;
++ u32 if2_creq;
++
++ if ((handle != (int) 0) && (mask != 0)) { /* if valid
++ parameters. */
++ /* Obtaining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ /* Clearing status interrupt. */
++ if (mask == CAN_STATUS_INT) {
++ IOH_READ_LONG((can_baseaddress + CAN_STAT_OFFSET));
++ IOH_DEBUG
++ ("ioh_can_int_clr -> Status Interrupt cleared.\n");
++ } else if ((mask > 0) && (mask <= MAX_MSG_OBJ)) {
++ /*Clear interrupt for transmit object */
++ if (ioh_msg_obj_conf[mask - 1] == MSG_OBJ_TX) {
++#ifdef DEBUG
++ /* Checking if the transmission is for remote
++ frame. */
++ rtr =
++ ((IOH_READ_LONG
++ ((can_baseaddress +
++ CAN_IF2_ID2_OFFSET)) & CAN_ID2_DIR) ==
++ 0);
++
++ if (rtr == 1) {
++
++ IOH_DEBUG
++ ("ioh_can_int_clr -> Remote frame \
++ transmission "
++ "interrupt cleared for message \
++ object "
++ "%d.\n", mask);
++ } else {
++ IOH_DEBUG
++ ("ioh_can_int_clr -> \
++ Data frame transmission "
++ "interrupt cleared for message \
++ object "
++ "%d.\n", mask);
++ }
++#endif
++
++ /*Setting CMASK for clearing interrupts for
++ frame transmission. */
++ IOH_WRITE_LONG((CAN_CMASK_RDWR | CAN_CMASK_CTRL
++ | CAN_CMASK_ARB),
++ (can_baseaddress +
++ CAN_IF2_CMASK_OFFSET));
++
++ /* Resetting the ID registers. */
++ IOH_CAN_BIT_SET((can_baseaddress +
++ CAN_IF2_ID2_OFFSET),
++ (CAN_ID2_DIR |
++ (MSK_ALL_ELEVEN << 2)));
++ IOH_WRITE_LONG(0x0,
++ (can_baseaddress +
++ CAN_IF2_ID1_OFFSET));
++
++ /*Claring NewDat, TxRqst & IntPnd */
++ IOH_CAN_BIT_CLEAR((can_baseaddress +
++ CAN_IF2_MCONT_OFFSET),
++ (CAN_IF_MCONT_NEWDAT |
++ CAN_IF_MCONT_INTPND |
++ CAN_IF_MCONT_TXRQXT));
++
++ IOH_WRITE_LONG(mask,
++ (can_baseaddress +
++ CAN_IF2_CREQ_OFFSET));
++
++ counter = COUNTER_LIMIT;
++ while (counter) {
++ if2_creq = \
++ (IOH_READ_LONG
++ (can_baseaddress
++ + CAN_IF2_CREQ_OFFSET)
++ & CAN_IF_CREQ_BUSY);
++ if (if2_creq == 0)
++ break;
++
++ counter--;
++ }
++
++
++ }
++ /*Clear interrupt for receive object */
++ else if (ioh_msg_obj_conf[mask - 1] == MSG_OBJ_RX) {
++#ifdef DEBUG
++ /* Checking if the reception is for remote
++ frame. */
++ rtr =
++ ((IOH_READ_LONG
++ ((can_baseaddress +
++ CAN_IF2_ID2_OFFSET)) & CAN_ID2_DIR) !=
++ 0);
++
++ if (rtr == 1) { /*if remote frame. */
++ IOH_DEBUG
++ ("ioh_can_int_clr -> Remote frame \
++ reception "
++ "interrupt cleared for message \
++ object "
++ "%d.\n", mask);
++ } else {
++ IOH_DEBUG
++ ("ioh_can_int_clr -> Data frame \
++ reception "
++ "interrupt cleared for message \
++ object "
++ "%d.\n", mask);
++ }
++#endif
++
++ /*Setting CMASK for clearing the reception
++ interrupts. */
++ IOH_WRITE_LONG((CAN_CMASK_RDWR | CAN_CMASK_CTRL
++ | CAN_CMASK_ARB),
++ (can_baseaddress +
++ CAN_IF2_CMASK_OFFSET));
++
++ /* Clearing the Dir bit. */
++ IOH_CAN_BIT_CLEAR((can_baseaddress +
++ CAN_IF2_ID2_OFFSET),
++ CAN_ID2_DIR);
++
++ /*Clearing NewDat & IntPnd */
++ IOH_CAN_BIT_CLEAR((can_baseaddress +
++ CAN_IF2_MCONT_OFFSET),
++ (CAN_IF_MCONT_NEWDAT |
++ CAN_IF_MCONT_INTPND));
++
++ IOH_WRITE_LONG(mask,
++ (can_baseaddress +
++ CAN_IF2_CREQ_OFFSET));
++
++ counter = COUNTER_LIMIT;
++ while (counter) {
++ if2_creq = \
++ IOH_READ_LONG
++ (can_baseaddress +
++ CAN_IF2_CREQ_OFFSET)
++ & CAN_IF_CREQ_BUSY;
++ if (if2_creq == 0)
++ break;
++
++ counter--;
++ }
++
++ }
++ }
++ }
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_msg_tx(
++ int handle,struct ioh_can_msg *msg)
++ @brief This function transmits a CAN message.
++ @remarks Sends a message object for transmission. The main
++ tasks performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid error status code is returned.
++ - Transmits the obtained message object data by
++ appropriately
++ filling the currently available transmit message
++ object.
++
++ @param handle [@ref IN] The handle to the device.
++ @param msg [@ref IN] The message to be
++ transmitted.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++ - @ref IOH_CAN_NO_TX_BUFF --> No transmit buffer
++ object is available.
++
++ @see
++ - ioh_can_write
++
++ <hr>
++*/
++int ioh_can_msg_tx(int handle, struct ioh_can_msg *msg)
++{
++ u32 id1 = 0;
++ u32 id2 = 0;
++ u32 data_a1 = 0;
++ u32 data_a2 = 0;
++ u32 data_b1 = 0;
++ u32 data_b2 = 0;
++ u32 tx_disable_counter = 0;
++ u32 buffer_status = 0;
++ u32 tx_buffer_avail = 0;
++ u32 status;
++ u32 i;
++ u32 counter;
++ enum ioh_can_run_mode run_mode;
++ int retval = IOH_CAN_SUCCESS;
++ u8 *can_baseaddress;
++ u32 if1_creq;
++
++ if ((handle == (int) 0) || (msg == NULL)) { /* If invalid
++ parameters. */
++ IOH_LOG(KERN_ERR, "ioh_can_msg_tx -> Invalid Parameter.\n");
++ retval = IOH_CAN_FAIL;
++ }
++
++ else {
++ /* Obatining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ /*Getting the current CAN mode. */
++ (void)ioh_can_get_run_mode(handle, &run_mode);
++
++ /*If CAN is in STOP mode. */
++ if (run_mode != IOH_CAN_RUN) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_msg_tx -> CAN stopped on transmit \
++ attempt.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ unsigned long flags;
++ /* Attaining the lock. */
++ spin_lock_irqsave(&can_os->tx_spinlock, flags);
++
++ /*Getting the message object status. */
++ buffer_status = (u32) ioh_can_get_buffer_status(handle);
++
++ /*Getting the free transmit message object. */
++ for (i = 0;
++ i < (ioh_can_rx_buf_size + ioh_can_tx_buf_size);
++ i++) {
++ if ((ioh_msg_obj_conf[i] == MSG_OBJ_TX)) {
++ /* Checking whether the object is \
++ enabled. */
++ (void)ioh_can_get_tx_enable(handle,
++ (i + 1),
++ &status);
++
++ if ((ENABLE == status)) {
++ if ((((buffer_status >> i) & 1)
++ == 0)) {
++ tx_buffer_avail =
++ (i + 1);
++ break;
++ }
++ } else {
++ tx_disable_counter++;
++ }
++ }
++ }
++
++ /*If no transmit object available. */
++ if (tx_buffer_avail == 0) {
++ IOH_DEBUG
++ ("ioh_can_msg_tx -> tx_disable_counter "
++ "= %d.\n", tx_disable_counter);
++ /* If no object is enabled. */
++ if ((tx_disable_counter == \
++ ioh_can_tx_buf_size)){
++ retval = IOH_CAN_FAIL;
++ IOH_LOG(KERN_ERR,
++ "ioh_can_msg_tx -> \
++ All transmit buffers "
++ "are disabled.\n");
++ } else {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_msg_tx -> \
++ No transmit buffer free.\n");
++ retval = IOH_CAN_NO_TX_BUFF;
++ }
++ } else {
++ IOH_DEBUG
++ ("ioh_can_msg_tx -> \
++ Transmit buffer obtained.\n");
++
++ /*Reading the message object from the Message
++ RAM to the Interface register. */
++ IOH_WRITE_LONG(CAN_CMASK_RX_TX_GET,
++ (can_baseaddress +
++ CAN_IF1_CMASK_OFFSET));
++ IOH_WRITE_LONG(tx_buffer_avail,
++ (can_baseaddress +
++ CAN_IF1_CREQ_OFFSET));
++
++ /*Confirming the read. */
++ counter = COUNTER_LIMIT;
++ while (counter) {
++ if1_creq = \
++ (IOH_READ_LONG
++ (can_baseaddress +
++ CAN_IF1_CREQ_OFFSET)) &
++ CAN_IF_CREQ_BUSY;
++ if (if1_creq == 0)
++ break;
++
++ counter--;
++ }
++
++ /*If Read not successful. */
++ if (counter == 0) {
++ (void)ioh_can_set_tx_enable(handle,
++ tx_buffer_avail,
++ ENABLE);
++ retval = IOH_CAN_FAIL;
++ } else {
++ /*Setting the CMASK register. */
++ IOH_CAN_BIT_SET((can_baseaddress +
++ CAN_IF1_CMASK_OFFSET),
++ CAN_CMASK_ALL);
++
++ /*If ID extended is set. */
++ if (msg->ide == 1) {
++ /* Setting 29 bit ID with XTD
++ bit set. */
++ id1 =
++ (msg->id & MSK_ALL_SIXTEEN);
++ id2 =
++ ((msg->
++ id & (MSK_ALL_THIRTEEN <<
++ BIT_SHIFT_SIXTEEN))
++ >> BIT_SHIFT_SIXTEEN);
++
++ id2 |= CAN_ID2_XTD;
++ } else {
++ /* Setting 11bit ID with XTD bit
++ reset. */
++ id1 = 0;
++ id2 =
++ ((msg->
++ id & MSK_ALL_ELEVEN) <<
++ BIT_SHIFT_TWO);
++ }
++ IOH_CAN_BIT_CLEAR((can_baseaddress +
++ CAN_IF1_ID1_OFFSET),
++ MSK_ALL_SIXTEEN);
++ IOH_CAN_BIT_CLEAR((can_baseaddress +
++ CAN_IF1_ID2_OFFSET),
++ (MSK_ALL_THIRTEEN |
++ CAN_ID2_XTD));
++
++ IOH_CAN_BIT_SET((can_baseaddress +
++ CAN_IF1_ID1_OFFSET),
++ id1);
++ IOH_CAN_BIT_SET((can_baseaddress +
++ CAN_IF1_ID2_OFFSET),
++ id2);
++
++ /* If remote frame has to be
++ transmitted.. */
++ if (msg->rtr == 1) {
++ IOH_CAN_BIT_CLEAR(\
++ (can_baseaddress + CAN_IF1_ID2_OFFSET), CAN_ID2_DIR);
++ msg->dlc = 0;
++
++ IOH_DEBUG
++ ("ioh_can_msg_tx -> \
++ Transmitting a "
++ "remote frame.\n");
++ } else { /* Data frame
++ transmission. */
++
++ msg->dlc &= MSK_ALL_FOUR;
++
++ IOH_DEBUG
++ ("ioh_can_msg_tx -> \
++ Transmitting a "
++ "data frame.\n");
++ }
++
++ /*Writing the data and the DLC */
++ switch (msg->dlc) {
++ case 0:
++ break;
++
++ case 1:
++ data_a1 = msg->data[0];
++ break;
++ case 2:
++ data_a1 = msg->data[0];
++ data_a1 |=
++ (((u32) msg->
++ data[1]) <<
++ BIT_SHIFT_EIGHT);
++ break;
++ case 3:
++ data_a1 = msg->data[0];
++ data_a1 |=
++ (((u32) msg->
++ data[1]) <<
++ BIT_SHIFT_EIGHT);
++ data_a2 = msg->data[2];
++ break;
++ case 4:
++ data_a1 = msg->data[0];
++ data_a1 |=
++ (((u32) msg->
++ data[1]) <<
++ BIT_SHIFT_EIGHT);
++ data_a2 = msg->data[2];
++ data_a2 |=
++ (((u32) msg->
++ data[3]) <<
++ BIT_SHIFT_EIGHT);
++ break;
++ case 5:
++ data_a1 = msg->data[0];
++ data_a1 |=
++ (((u32) msg->
++ data[1]) <<
++ BIT_SHIFT_EIGHT);
++ data_a2 = msg->data[2];
++ data_a2 |=
++ (((u32) msg->
++ data[3]) <<
++ BIT_SHIFT_EIGHT);
++ data_b1 = msg->data[4];
++ break;
++ case 6:
++ data_a1 = msg->data[0];
++ data_a1 |=
++ (((u32) msg->
++ data[1]) <<
++ BIT_SHIFT_EIGHT);
++ data_a2 = msg->data[2];
++ data_a2 |=
++ (((u32) msg->
++ data[3]) <<
++ BIT_SHIFT_EIGHT);
++ data_b1 = msg->data[4];
++ data_b1 |=
++ (((u32) msg->
++ data[5]) <<
++ BIT_SHIFT_EIGHT);
++ break;
++ case 7:
++ data_a1 = msg->data[0];
++ data_a1 |=
++ (((u32) msg->
++ data[1]) <<
++ BIT_SHIFT_EIGHT);
++ data_a2 = msg->data[2];
++ data_a2 |=
++ (((u32) msg->
++ data[3]) <<
++ BIT_SHIFT_EIGHT);
++ data_b1 = msg->data[4];
++ data_b1 |=
++ (((u32) msg->
++ data[5]) <<
++ BIT_SHIFT_EIGHT);
++ data_b2 = msg->data[6];
++ break;
++ case 8:
++ default:
++ data_a1 = msg->data[0];
++ data_a1 |=
++ (((u32) msg->
++ data[1]) <<
++ BIT_SHIFT_EIGHT);
++ data_a2 = msg->data[2];
++ data_a2 |=
++ (((u32) msg->
++ data[3]) <<
++ BIT_SHIFT_EIGHT);
++ data_b1 = msg->data[4];
++ data_b1 |=
++ (((u32) msg->
++ data[5]) <<
++ BIT_SHIFT_EIGHT);
++ data_b2 = msg->data[6];
++ data_b2 |=
++ (((u32) msg->
++ data[7]) <<
++ BIT_SHIFT_EIGHT);
++ break;
++
++ }
++
++ /* Writing the DATA registers. */
++ IOH_WRITE_LONG(data_a1,
++ (can_baseaddress +
++ CAN_IF1_DATAA1_OFFSET));
++ IOH_WRITE_LONG(data_a2,
++ (can_baseaddress +
++ CAN_IF1_DATAA2_OFFSET));
++ IOH_WRITE_LONG(data_b1,
++ (can_baseaddress +
++ CAN_IF1_DATAB1_OFFSET));
++ IOH_WRITE_LONG(data_b2,
++ (can_baseaddress +
++ CAN_IF1_DATAB2_OFFSET));
++
++ /* Updating the size of the data. */
++ IOH_CAN_BIT_CLEAR((can_baseaddress +
++ CAN_IF1_MCONT_OFFSET),
++ MSK_ALL_FOUR);
++ IOH_CAN_BIT_SET((can_baseaddress +
++ CAN_IF1_MCONT_OFFSET),
++ msg->dlc);
++
++ /*Clearing IntPend, NewDat & TxRqst */
++ IOH_CAN_BIT_CLEAR((can_baseaddress + \
++ CAN_IF1_MCONT_OFFSET),\
++ (CAN_IF_MCONT_NEWDAT | \
++ CAN_IF_MCONT_INTPND | \
++ CAN_IF_MCONT_TXRQXT));
++
++ /*Setting NewDat, TxRqst bits */
++ IOH_CAN_BIT_SET((can_baseaddress +
++ CAN_IF1_MCONT_OFFSET),
++ (CAN_IF_MCONT_NEWDAT |
++ CAN_IF_MCONT_TXRQXT));
++
++ /*Writing the updation to the Message
++ object. */
++ IOH_WRITE_LONG(tx_buffer_avail,
++ (can_baseaddress +
++ CAN_IF1_CREQ_OFFSET));
++
++ /*Confirming the updation. */
++ counter = COUNTER_LIMIT;
++ while (counter) {
++ if1_creq = \
++ (IOH_READ_LONG
++ (can_baseaddress +
++ CAN_IF1_CREQ_OFFSET))
++ & CAN_IF_CREQ_BUSY;
++ if (if1_creq == 0)
++ break;
++
++ counter--;
++ }
++
++ if ((counter == 0)) {
++ retval = IOH_CAN_FAIL;
++ } else {
++ IOH_DEBUG
++ ("ioh_can_msg_tx -> \
++ Updation of transmit "
++ "buffer successful.\n");
++ IOH_DEBUG
++ ("ioh_can_msg_tx -> \
++ Message object enabled "
++ "for transmission.\n");
++
++ }
++
++ } /*if message read object successful */
++ } /* if transmit buffer available */
++
++ /* Releasing the lock. */
++ spin_unlock_irqrestore(&can_os->tx_spinlock, flags);
++ } /*if device in run mode */
++ } /* if parameters valid */
++
++ IOH_DEBUG("ioh_can_msg_tx -> Return value: %d.\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_clear_buffers(int handle)
++ @brief Clears the message objects.
++ @remarks This function clears all transmit and receive buffers of the
++ CAN device.
++
++ @param handle [@ref IN] The handle to the device.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_open
++
++ <hr>
++*/
++int ioh_can_clear_buffers(int handle)
++{
++ u32 i;
++ u32 rx_buff_num;
++ u32 tx_buff_num;
++ int retval = IOH_CAN_SUCCESS;
++ u8 *can_baseaddress;
++
++ if (handle == (int) 0) { /* if handle invalid. */
++ IOH_LOG(KERN_ERR, "ioh_can_clear_buffers -> Invalid Handle.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Obatining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ IOH_WRITE_LONG(CAN_CMASK_RX_TX_SET,
++ (can_baseaddress + CAN_IF1_CMASK_OFFSET));
++ IOH_WRITE_LONG(CAN_CMASK_RX_TX_SET,
++ (can_baseaddress + CAN_IF2_CMASK_OFFSET));
++
++ IOH_WRITE_LONG(MSK_ALL_SIXTEEN,
++ (can_baseaddress + CAN_IF1_MASK1_OFFSET));
++ IOH_WRITE_LONG(MSK_ALL_SIXTEEN,
++ (can_baseaddress + CAN_IF1_MASK2_OFFSET));
++ IOH_WRITE_LONG(MSK_ALL_SIXTEEN,
++ (can_baseaddress + CAN_IF2_MASK1_OFFSET));
++ IOH_WRITE_LONG(MSK_ALL_SIXTEEN,
++ (can_baseaddress + CAN_IF2_MASK2_OFFSET));
++
++ IOH_WRITE_LONG(0x0, (can_baseaddress + CAN_IF1_ID1_OFFSET));
++ IOH_WRITE_LONG(0x0, (can_baseaddress + CAN_IF1_ID2_OFFSET));
++ IOH_WRITE_LONG(0x0, (can_baseaddress + CAN_IF2_ID1_OFFSET));
++ IOH_WRITE_LONG(0x0, (can_baseaddress + CAN_IF2_ID2_OFFSET));
++
++ IOH_WRITE_LONG(0x0, (can_baseaddress + CAN_IF1_MCONT_OFFSET));
++ IOH_WRITE_LONG(0x0, (can_baseaddress + CAN_IF2_MCONT_OFFSET));
++
++ IOH_WRITE_LONG(0x0, (can_baseaddress + CAN_IF1_DATAA1_OFFSET));
++ IOH_WRITE_LONG(0x0, (can_baseaddress + CAN_IF1_DATAA2_OFFSET));
++ IOH_WRITE_LONG(0x0, (can_baseaddress + CAN_IF1_DATAB1_OFFSET));
++ IOH_WRITE_LONG(0x0, (can_baseaddress + CAN_IF1_DATAB2_OFFSET));
++ IOH_WRITE_LONG(0x0, (can_baseaddress + CAN_IF2_DATAA1_OFFSET));
++ IOH_WRITE_LONG(0x0, (can_baseaddress + CAN_IF2_DATAA2_OFFSET));
++ IOH_WRITE_LONG(0x0, (can_baseaddress + CAN_IF2_DATAB1_OFFSET));
++ IOH_WRITE_LONG(0x0, (can_baseaddress + CAN_IF2_DATAB2_OFFSET));
++
++ for (i = 1; i <= (MAX_MSG_OBJ / 2); i++) {
++ rx_buff_num = 2 * i;
++ tx_buff_num = (2 * i) - 1;
++
++ IOH_WRITE_LONG(rx_buff_num,
++ (can_baseaddress + CAN_IF1_CREQ_OFFSET));
++ IOH_WRITE_LONG(tx_buff_num,
++ (can_baseaddress + CAN_IF2_CREQ_OFFSET));
++
++ mdelay(10);
++
++ IOH_DEBUG
++ ("ioh_can_clear_buffers -> \
++ Cleared receive object %d "
++ "and tranmit object %d.\n", rx_buff_num,
++ tx_buff_num);
++ }
++ }
++
++ IOH_DEBUG("ioh_can_clear_buffers returns %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_get_buffer_status(int handle)
++ @brief This function gets the buffer status of the CAN device.
++ @remarks Retrieves the message object status. The main tasks
++ performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid error status code is returned.
++ - Reads the values of the CANTREQ1 and CANTREQ2
++ registers
++ and returns it to the function calling this
++ function as
++ 32 bit value representing TxRqst bit of each
++ message object.
++
++ @param handle [@ref IN] The handle to the device.
++
++ @retval int
++ - 0 --> All transmit buffers available
++ - >0 --> Some transmit buffers are not available.
++ - @ref IOH_CAN_FAIL --> If the operation fails.
++
++ @see
++ ioh_can_msg_tx
++
++ <hr>
++*/
++int ioh_can_get_buffer_status(int handle)
++{
++ u32 reg_treq1;
++ u32 reg_treq2;
++ int retval = IOH_CAN_SUCCESS;
++ u8 *can_baseaddress;
++
++ if (handle == (int) 0) { /* if handle invalid. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_get_buffer_status -> Invalid Handle.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Obtaining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ /* Reading the transmission request registers. */
++ reg_treq1 =
++ (IOH_READ_LONG(can_baseaddress + CAN_TREQ1_OFFSET) &
++ MSK_ALL_SIXTEEN);
++ reg_treq2 =
++ ((IOH_READ_LONG(can_baseaddress + CAN_TREQ2_OFFSET) &
++ MSK_ALL_SIXTEEN) << BIT_SHIFT_SIXTEEN);
++
++ retval = (reg_treq1 | reg_treq2);
++ }
++
++ IOH_DEBUG("ioh_can_get_buffer_status -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_rx_dequeue(
++ int handle,struct ioh_can_msg *msg,u32 buff_num)
++ @brief This function gets a pending message from the CAN device.
++ @remarks Retrieves the message from a received message object.
++
++ @param handle [@ref IN] The handle to the device.
++ @param msg [@ref OUT] Reference to the received
++ message object.
++ @param buff_num [@ref IN] The message object number.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_callback
++
++ <hr>
++*/
++int ioh_can_rx_dequeue(int handle, struct ioh_can_msg *msg,
++ u32 buff_num)
++{
++ s32 i;
++ u32 reg;
++ int retval = IOH_CAN_FAIL;
++ u8 *can_baseaddress;
++
++ if ((handle == (int) 0) || (msg == NULL)) { /* invalid
++ parameters. */
++ IOH_LOG(KERN_ERR, "ioh_can_rx_dequeue -> Invalid Parameter.\n");
++ }
++ /* invalid buffer number. */
++
++ else if ((ioh_msg_obj_conf[buff_num - 1] != MSG_OBJ_RX) ||
++ (buff_num > (ioh_can_rx_buf_size + ioh_can_tx_buf_size))) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_rx_dequeue -> Invalid Buffer number.\n");
++ }
++
++ else {
++ /* Obtaining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ msg->ide = 0;
++ msg->id = 0;
++ msg->dlc = 0;
++ for (i = 0; i < IOH_CAN_MSG_DATA_LEN; i++)
++ msg->data[i] = 0;
++
++ /* Read the ID type. */
++ msg->ide =
++ ((IOH_READ_LONG(can_baseaddress + CAN_IF2_ID2_OFFSET)) &
++ CAN_ID2_XTD)
++ >> BIT_SHIFT_FOURTEEN;
++
++ /* Extracting the ID. */
++ if (msg->ide == 1) { /* Extended 29bit ID. */
++ msg->id =
++ (IOH_READ_LONG(can_baseaddress + CAN_IF2_ID1_OFFSET)
++ & MSK_ALL_SIXTEEN);
++ msg->id |=
++ (((IOH_READ_LONG
++ (can_baseaddress +
++ CAN_IF2_ID2_OFFSET)) & MSK_ALL_THIRTEEN) <<
++ BIT_SHIFT_SIXTEEN);
++ } else { /* Standard 11bit ID. */
++
++ msg->id =
++ (((IOH_READ_LONG
++ (can_baseaddress +
++ CAN_IF2_ID2_OFFSET)) & (MSK_ALL_ELEVEN <<
++ BIT_SHIFT_TWO)) >>
++ BIT_SHIFT_TWO);
++ }
++
++ /* Getting the size of the data and the Remote frame bit. */
++ if (msg->rtr == 1) {
++ msg->dlc = 0;
++
++ IOH_DEBUG("ioh_can_rx_dequeue -> Remote frame "
++ "read with message id: %x.\n", msg->id);
++ } else {
++ msg->dlc =
++ ((IOH_READ_LONG
++ (can_baseaddress + CAN_IF2_MCONT_OFFSET))
++ & MSK_ALL_FOUR);
++
++ IOH_DEBUG("ioh_can_rx_dequeue -> Data frame "
++ "read with message id: %x.\n", msg->id);
++ }
++
++ /* Reading back the data. */
++ switch (msg->dlc) {
++ case 0:
++ break;
++
++ case 1:
++ reg =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF2_DATAA1_OFFSET);
++ msg->data[0] = reg & MSK_ALL_EIGHT;
++ break;
++
++ case 2:
++ reg =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF2_DATAA1_OFFSET);
++ msg->data[0] = reg & MSK_ALL_EIGHT;
++ msg->data[1] =
++ ((reg & (MSK_ALL_EIGHT << BIT_SHIFT_EIGHT))
++ >> BIT_SHIFT_EIGHT);
++ break;
++
++ case 3:
++ reg =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF2_DATAA1_OFFSET);
++ msg->data[0] = reg & MSK_ALL_EIGHT;
++ msg->data[1] =
++ ((reg & (MSK_ALL_EIGHT << BIT_SHIFT_EIGHT))
++ >> BIT_SHIFT_EIGHT);
++
++ reg =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF2_DATAA2_OFFSET);
++ msg->data[2] = reg & MSK_ALL_EIGHT;
++ break;
++
++ case 4:
++ reg =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF2_DATAA1_OFFSET);
++ msg->data[0] = reg & MSK_ALL_EIGHT;
++ msg->data[1] =
++ ((reg & (MSK_ALL_EIGHT << BIT_SHIFT_EIGHT))
++ >> BIT_SHIFT_EIGHT);
++
++ reg =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF2_DATAA2_OFFSET);
++ msg->data[2] = reg & MSK_ALL_EIGHT;
++ msg->data[3] =
++ ((reg & (MSK_ALL_EIGHT << BIT_SHIFT_EIGHT))
++ >> BIT_SHIFT_EIGHT);
++ break;
++
++ case 5:
++ reg =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF2_DATAA1_OFFSET);
++ msg->data[0] = reg & MSK_ALL_EIGHT;
++ msg->data[1] =
++ ((reg & (MSK_ALL_EIGHT << BIT_SHIFT_EIGHT))
++ >> BIT_SHIFT_EIGHT);
++
++ reg =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF2_DATAA2_OFFSET);
++ msg->data[2] = reg & MSK_ALL_EIGHT;
++ msg->data[3] =
++ ((reg & (MSK_ALL_EIGHT << BIT_SHIFT_EIGHT))
++ >> BIT_SHIFT_EIGHT);
++
++ reg =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF2_DATAB1_OFFSET);
++ msg->data[4] = reg & MSK_ALL_EIGHT;
++ break;
++
++ case 6:
++ reg =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF2_DATAA1_OFFSET);
++ msg->data[0] = reg & MSK_ALL_EIGHT;
++ msg->data[1] =
++ ((reg & (MSK_ALL_EIGHT << BIT_SHIFT_EIGHT))
++ >> BIT_SHIFT_EIGHT);
++
++ reg =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF2_DATAA2_OFFSET);
++ msg->data[2] = reg & MSK_ALL_EIGHT;
++ msg->data[3] =
++ ((reg & (MSK_ALL_EIGHT << BIT_SHIFT_EIGHT))
++ >> BIT_SHIFT_EIGHT);
++
++ reg =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF2_DATAB1_OFFSET);
++ msg->data[4] = reg & MSK_ALL_EIGHT;
++ msg->data[5] =
++ ((reg & (MSK_ALL_EIGHT << BIT_SHIFT_EIGHT))
++ >> BIT_SHIFT_EIGHT);
++ break;
++
++ case 7:
++ reg =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF2_DATAA1_OFFSET);
++ msg->data[0] = reg & MSK_ALL_EIGHT;
++ msg->data[1] =
++ ((reg & (MSK_ALL_EIGHT << BIT_SHIFT_EIGHT))
++ >> BIT_SHIFT_EIGHT);
++
++ reg =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF2_DATAA2_OFFSET);
++ msg->data[2] = reg & MSK_ALL_EIGHT;
++ msg->data[3] =
++ ((reg & (MSK_ALL_EIGHT << BIT_SHIFT_EIGHT))
++ >> BIT_SHIFT_EIGHT);
++
++ reg =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF2_DATAB1_OFFSET);
++ msg->data[4] = reg & MSK_ALL_EIGHT;
++ msg->data[5] =
++ ((reg & (MSK_ALL_EIGHT << BIT_SHIFT_EIGHT))
++ >> BIT_SHIFT_EIGHT);
++
++ reg =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF2_DATAB2_OFFSET);
++ msg->data[6] = reg & MSK_ALL_EIGHT;
++ break;
++
++ case 8:
++ default:
++ reg =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF2_DATAA1_OFFSET);
++ msg->data[0] = reg & MSK_ALL_EIGHT;
++ msg->data[1] =
++ ((reg & (MSK_ALL_EIGHT << BIT_SHIFT_EIGHT))
++ >> BIT_SHIFT_EIGHT);
++
++ reg =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF2_DATAA2_OFFSET);
++ msg->data[2] = reg & MSK_ALL_EIGHT;
++ msg->data[3] =
++ ((reg & (MSK_ALL_EIGHT << BIT_SHIFT_EIGHT))
++ >> BIT_SHIFT_EIGHT);
++
++ reg =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF2_DATAB1_OFFSET);
++ msg->data[4] = reg & MSK_ALL_EIGHT;
++ msg->data[5] =
++ ((reg & (MSK_ALL_EIGHT << BIT_SHIFT_EIGHT))
++ >> BIT_SHIFT_EIGHT);
++
++ reg =
++ IOH_READ_LONG(can_baseaddress +
++ CAN_IF2_DATAB2_OFFSET);
++ msg->data[6] = reg & MSK_ALL_EIGHT;
++ msg->data[7] =
++ ((reg & (MSK_ALL_EIGHT << BIT_SHIFT_EIGHT))
++ >> BIT_SHIFT_EIGHT);
++
++ break;
++ }
++ retval = IOH_CAN_SUCCESS;
++ }
++
++ IOH_DEBUG("ioh_can_rx_dequeue -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn void ioh_can_config_rx_tx_buffers(int handle)
++ @brief This function configures receive and transmit buffers.
++ @remarks Configures the receive and the transmit buffers.
++
++ @param handle [@ref IN] The handle to the device.
++
++ @retval None.
++
++ @see
++ - ioh_can_open
++ - ioh_can_resume
++
++ <hr>
++*/
++void ioh_can_config_rx_tx_buffers(int handle)
++{
++ u32 i;
++ u32 counter;
++ u8 *can_baseaddress;
++ u32 if1_creq;
++ u32 if2_creq;
++
++ if (handle != (int) 0) { /* if handle valid. */
++ /* Obtaining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ /*For accssing MsgVal, ID and EOB bit */
++ IOH_WRITE_LONG((CAN_CMASK_RDWR | CAN_CMASK_ARB |
++ CAN_CMASK_CTRL),
++ (can_baseaddress + CAN_IF1_CMASK_OFFSET));
++ IOH_WRITE_LONG((CAN_CMASK_RDWR | CAN_CMASK_ARB |
++ CAN_CMASK_CTRL),
++ (can_baseaddress + CAN_IF2_CMASK_OFFSET));
++
++ IOH_WRITE_LONG(0x0, (can_baseaddress + CAN_IF1_ID1_OFFSET));
++ IOH_WRITE_LONG(0x0, (can_baseaddress + CAN_IF1_ID2_OFFSET));
++ /*Resetting DIR bit for reception */
++ IOH_WRITE_LONG(0x0, (can_baseaddress + CAN_IF2_ID1_OFFSET));
++ IOH_WRITE_LONG((CAN_ID2_DIR | (MSK_ALL_ELEVEN << 2)), \
++ (can_baseaddress + CAN_IF2_ID2_OFFSET));
++ /*Setting DIR bit for transmission */
++
++ IOH_WRITE_LONG(CAN_IF_MCONT_EOB, \
++ (can_baseaddress + CAN_IF1_MCONT_OFFSET));
++ /*Setting EOB bit for receiver */
++ IOH_WRITE_LONG(CAN_IF_MCONT_EOB, \
++ (can_baseaddress + CAN_IF2_MCONT_OFFSET));
++ /*Setting EOB bit for transmitter */
++
++ for (i = 0; i < (ioh_can_tx_buf_size + ioh_can_rx_buf_size);
++ i++) {
++ counter = COUNTER_LIMIT;
++ /* Configure the receive message objects */
++ if (ioh_msg_obj_conf[i] == MSG_OBJ_RX) {
++
++ IOH_WRITE_LONG((i + 1),
++ (can_baseaddress +
++ CAN_IF1_CREQ_OFFSET));
++
++ while (counter) {
++ if1_creq = \
++ (ioread32
++ (can_baseaddress +
++ CAN_IF1_CREQ_OFFSET))
++ & CAN_IF_CREQ_BUSY;
++ if (if1_creq == 0)
++ break;
++
++ counter--;
++ }
++
++ if ((counter == 0)) {
++ IOH_DEBUG
++ ("ioh_can_config_rx_tx_buffers -> \
++ Config failed "
++ "for receive message object %u\n",
++ (i + 1));
++ }
++ }
++ /* Configure the transmit message objects */
++ else if (ioh_msg_obj_conf[i] == MSG_OBJ_TX) {
++ IOH_WRITE_LONG((i + 1),
++ (can_baseaddress +
++ CAN_IF2_CREQ_OFFSET));
++
++ while (counter) {
++ if2_creq = \
++ (ioread32
++ (can_baseaddress +
++ CAN_IF2_CREQ_OFFSET))
++ & CAN_IF_CREQ_BUSY;
++ if (if2_creq == 0)
++ break;
++
++ counter--;
++ }
++
++
++
++ if ((counter == 0)) {
++ IOH_DEBUG
++ ("ioh_can_config_rx_tx_buffers -> \
++ Config failed "
++ "for transmit message object %u\n",
++ (i + 1));
++ }
++ }
++
++ }
++ }
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_get_error_stats(
++ int handle,struct ioh_can_error *error
++ @brief This function gets the error statics of the CAN device.
++ @remarks Retrieves the error status. The main tasks performed by
++ this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid error status code is returned.
++ - Depending on the value of the CANERRC register
++ the
++ error status structure is filled and returned to
++ the function
++ calling this function.
++
++ @param handle [@ref IN] The handle to the device.
++ @param error [@ref OUT] The reference to the error
++ status.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++int ioh_can_get_error_stats(int handle, struct ioh_can_error *error)
++{
++ u8 *can_baseaddress;
++ u32 reg_val;
++ int retval = IOH_CAN_SUCCESS;
++
++ if ((handle == (int) 0) || (error == NULL)) { /* invalid
++ parameters. */
++ IOH_LOG(KERN_ERR,
++ "ioh_can_get_error_stats -> Invalid Parameter.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Obtaining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) handle;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ /* Reading the error count register. */
++ reg_val = IOH_READ_LONG(can_baseaddress + CAN_ERRC_OFFSET);
++
++ error->rxgte96 = 0;
++ error->txgte96 = 0;
++
++ error->rx_err_cnt =
++ (reg_val & MSK_ES_RXERRCNT) >> BIT_ES_RXERRCNT;
++ error->tx_err_cnt =
++ (reg_val & MSK_ES_TXERRCNT) >> BIT_ES_TXERRCNT;
++
++ /* receive error count > 96 */
++ if (error->rx_err_cnt >= ERROR_COUNT)
++ error->rxgte96 = 1;
++ /* transmit error count > 96. */
++ if (error->tx_err_cnt >= ERROR_COUNT)
++ error->txgte96 = 1;
++
++ /* Reading the Can status register. */
++ reg_val = IOH_READ_LONG(can_baseaddress + CAN_STAT_OFFSET);
++
++ /* EPass */
++ if ((reg_val & (1 << BIT_SHIFT_FIVE)) != 0)
++ error->error_stat = 1;
++ /* Buss Off */
++ else if ((reg_val & (1 << BIT_SHIFT_SEVEN)) != 0)
++ error->error_stat = 3;
++ else
++ error->error_stat = 0;
++
++ IOH_DEBUG("rxgte96 (Rx > 96) : %u\n", error->rxgte96);
++ IOH_DEBUG("txgte96 (Rx > 96) : %u\n", error->txgte96);
++ IOH_DEBUG("error_stat : %u\n", error->error_stat);
++ IOH_DEBUG("rx_err_cnt : %u\n", error->rx_err_cnt);
++ IOH_DEBUG("tx_err_cnt : %u\n", error->tx_err_cnt);
++ }
++
++ IOH_DEBUG("ioh_can_get_error_stats -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int ioh_can_reset(struct ioh_can_os *can_os)
++ @brief Performs soft reset.
++ @remarks Resets the CAN device. The main tasks
++ performed by this function are :
++ - Validates whether the passed arguments are
++ valid.
++ If invalid error status code is returned.
++ - The CAN device is reset by setting the SRST
++ bit of the
++ Soft Reset register.
++
++ @param can_os [@ref IN] Reference to the system
++ structure.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> If the operation is
++ successful.
++ - @ref IOH_CAN_FAIL --> If the operation
++ fails.
++
++ @see
++ - ioh_can_ioctl
++ - ioh_candev_open
++
++ <hr>
++
++*/
++int ioh_can_reset(struct ioh_can_os *can_os)
++{
++ int retval = IOH_CAN_SUCCESS;
++#ifndef FPGA
++ u8 *can_baseaddress;
++
++ if ((can_os == NULL)) {
++ IOH_LOG(KERN_ERR, "ioh_can_reset -> Invalid parameter.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Obtaining the remap address for access. */
++ struct can_hw *can = (struct can_hw *) can_os->can;
++ can_baseaddress = (u8 *) (can->io_base);
++
++ /* write to sw reset register */
++ IOH_WRITE_LONG(1, (can_baseaddress + CAN_SRST_OFFSET));
++ IOH_WRITE_LONG(0, (can_baseaddress + CAN_SRST_OFFSET));
++
++ IOH_DEBUG("ioh_can_reset -> Reset successful.\n");
++ }
++#endif
++
++ IOH_DEBUG("ioh_can_reset -> Return value: %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn irqreturn_t ioh_can_handler(
++ int irq, void *dev_id
++ @brief Handles the CAN interrupts.
++ @remarks Handles the interrupt. the main task perforemd by
++ this function is :
++ - Checks whether an interrupt is encontered.
++ If encountered
++ the callback function is invoked.
++
++ @param irq [@ref IN] The interrupt number.
++ @param dev_id [@ref IN] Reference to the device
++ structure.
++
++ @retval irqreturn_t
++ - IRQ_HANDLED --> CAN interrupt has been handled.
++ - IRQ_NONE --> No CAn interrupt source.
++
++ @see
++ - ioh_candev_open
++
++ <hr>
++*/
++irqreturn_t ioh_can_handler(int irq, void *dev_id)
++{
++ irqreturn_t retval = IRQ_NONE;
++
++ struct ioh_can_os *can_os = (struct ioh_can_os *) dev_id;
++ u32 int_stat;
++
++ IOH_DEBUG("ioh_can_handler -> Invoked.\n");
++
++ int_stat = ioh_can_int_pending(can_os->can);
++ IOH_DEBUG("ioh_can_handler -> ioh_can_int_pending "
++ "returned value: %x\n", int_stat);
++
++ if ((can_os != NULL) && (int_stat > 0)) {
++ can_os->int_stat = int_stat;
++ (*can_os->can_callback) (can_os);
++
++ IOH_DEBUG("ioh_can_handler -> Callback function "
++ "invoked successfully.\n");
++
++ retval = IRQ_HANDLED;
++ }
++
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn void ioh_can_log_message(u32 status)
++ @brief Logs the error messages.
++ @remarks Logs the error messages according to the error bits set.
++
++ @param status [@ref IN] Error status.
++
++ @retval None.
++
++ @see
++ - ioh_can_callback
++
++ <hr>
++*/
++void ioh_can_log_message(u32 status)
++{
++
++ switch ((status & MSK_ALL_THREE)) {
++
++ case 0:
++ IOH_DEBUG("ioh_can_log_message -> No Error\n");
++ break;
++ case 1:
++ IOH_LOG(KERN_ERR, "ioh_can_log_message -> Stuff Error\n");
++ break;
++ case 2:
++ IOH_LOG(KERN_ERR, "ioh_can_log_message -> Form Error.\n");
++ break;
++ case 3:
++ IOH_LOG(KERN_ERR, "ioh_can_log_message -> Ack Error\n");
++ break;
++ case 4:
++ IOH_LOG(KERN_ERR, "ioh_can_log_message -> Bit 1 Error\n");
++ break;
++ case 5:
++ IOH_LOG(KERN_ERR, "ioh_can_log_message -> Bit 0 Error.\n");
++ break;
++ case 6:
++ IOH_LOG(KERN_ERR, "ioh_can_log_message -> Crc Error\n");
++ break;
++ case 7:
++ IOH_LOG(KERN_ERR, "ioh_can_log_message -> Undefined Error\n");
++ break;
++ default:
++ break;
++ }
++}
++
++/*! @ingroup HALLayer
++ @fn void ioh_can_entcb(
++ void(*ioh_can_cb)(struct ioh_can_os *),struct ioh_can_os * p_can_os)
++ @brief Registers the call back function.
++ @remarks Registers the callback function for further use.
++
++ @param ioh_can_cb [@ref IN] Reference to the callback
++ function.
++ @param p_can_os [@ref IN] Reference to the device
++ structure.
++
++ @retval None.
++
++ @see
++ - ioh_candev_open
++
++ <hr>
++*/
++void ioh_can_entcb(void (*ioh_can_cb) (struct ioh_can_os *), \
++ struct ioh_can_os *p_can_os)
++{
++ if ((NULL != ioh_can_cb) && (NULL != p_can_os)) {
++ p_can_os->can_callback = ioh_can_cb;
++ IOH_DEBUG("ioh_can_entcb -> Callback function "
++ "set successful.\n");
++ } else {
++ IOH_LOG(KERN_ERR, "ioh_can_entcb -> Callback function set "
++ "unsuccessful.\n");
++ }
++
++}
++
++/*! @ingroup HALLayerAPI
++ @fn void ioh_can_callback(
++ struct ioh_can_os * can_os)
++ @brief The callback function.
++ @remarks The callback function to handle the interrupt.
++
++ @param can_os [@ref IN] Reference to the device
++ structure.
++
++ @retval None.
++
++ @see
++ - ioh_candev_open
++
++ <hr>
++*/
++void ioh_can_callback(struct ioh_can_os *can_os)
++{
++ u32 int_stat;
++ u32 reg;
++ u32 reg_stat;
++ u32 counter;
++ u8 *can_baseaddress;
++ struct ioh_can_msg receive_msg;
++ struct can_hw *can = (struct can_hw *) (can_os->can);
++ int retval = IOH_CAN_SUCCESS;
++ u32 if2_creq;
++
++ can_baseaddress = (u8 *) (can->io_base);
++
++ /* Get the interrupt status */
++ int_stat = can_os->int_stat;
++ can_os->int_stat = 0;
++
++ /*Checking for status interrupt */
++ if (CAN_STATUS_INT == int_stat) {
++ /* Reading of the CANSTAT register. */
++ reg_stat = IOH_READ_LONG((can_baseaddress + CAN_STAT_OFFSET));
++ reg_stat = reg_stat & MSK_ALL_EIGHT;
++ IOH_DEBUG("ioh_can_callback -> Status Register: %x.\n",
++ reg_stat);
++
++ /* If recovered from Bus-Off interrupt. */
++ if ((reg_stat == 0) && (can_os->bus_off_interrupt == 1)) {
++ can_os->bus_off_interrupt = 0;
++ (void)ioh_can_tx_enable_all(can_os->can);
++ (void)ioh_can_rx_enable_all(can_os->can);
++
++ IOH_LOG(KERN_ERR, "ioh_can_callback -> Bus off "
++ "stage recovered.\n");
++ } else {
++ /*Bus off interrupt. */
++ if (reg_stat & ((u32) 1 << BIT_SHIFT_SEVEN)) {
++ if (can_os->bus_off_interrupt == 0) {
++ enum ioh_can_auto_restart restart_mode \
++ = 0;
++
++ IOH_LOG(KERN_ERR,
++ "ioh_can_callback -> Bus off "
++ "interrupt.\n");
++
++ (void)ioh_can_tx_disable_all(can_os->
++ can);
++ (void)ioh_can_rx_disable_all(can_os->
++ can);
++
++ (void)ioh_can_get_restart_mode(can_os->
++ can,
++ &restart_mode);
++
++ if (CAN_AUTO == restart_mode) {
++ can_os->bus_off_interrupt = 1;
++
++ (void)
++ ioh_can_set_run_mode
++ (can_os->can, IOH_CAN_RUN);
++ IOH_DEBUG("ioh_can_callback -> "
++ "Device restarted.\n");
++ }
++ }
++ }
++ /*EWarn interrupt. */
++ if ((reg_stat & ((u32) 1 << BIT_SHIFT_SIX)) != 0) {
++ IOH_DEBUG
++ ("ioh_can_callback -> EWarn interrupt.\n");
++ }
++ /*EPass interrupt. */
++ if ((reg_stat & ((u32) 1 << BIT_SHIFT_FIVE)) != 0) {
++ IOH_DEBUG
++ ("ioh_can_callback -> EPass interrupt.\n");
++ }
++ /*RxOK interrupt. */
++ if ((reg_stat & ((u32) 1 << BIT_SHIFT_FOUR)) != 0) {
++ IOH_DEBUG
++ ("ioh_can_callback -> RxOK interrupt.\n");
++ reg_stat =
++ reg_stat & ~((u32) 1 << BIT_SHIFT_FOUR);
++ }
++ /*TxOK interrupt */
++ if ((reg_stat & ((u32) 1 << BIT_SHIFT_THREE)) != 0) {
++ IOH_DEBUG
++ ("ioh_can_callback -> TxOK interrupt.\n");
++ reg_stat =
++ reg_stat & ~((u32) 1 << BIT_SHIFT_THREE);
++ }
++ /*Error status */
++ ioh_can_log_message((reg_stat & MSK_ALL_THREE));
++ reg_stat = reg_stat & ~(MSK_ALL_THREE);
++
++ /*Clearing status register interrupt bits. */
++ IOH_WRITE_LONG(reg_stat,
++ (can_baseaddress + CAN_STAT_OFFSET));
++
++ int_stat = ioh_can_int_pending(can_os->can);
++ }
++ }
++
++ /*Message object interrupt. */
++ if ((int_stat > 0) && (int_stat <= MAX_MSG_OBJ)) {
++ /*Reading the messsage object from the Message RAM to the
++ interface registers. */
++ IOH_WRITE_LONG(CAN_CMASK_RX_TX_GET,
++ (can_baseaddress + CAN_IF2_CMASK_OFFSET));
++ IOH_WRITE_LONG((int_stat),
++ (can_baseaddress + CAN_IF2_CREQ_OFFSET));
++
++ /*Confirming the read. */
++ counter = COUNTER_LIMIT;
++ while (counter) {
++ if2_creq = \
++ (IOH_READ_LONG(can_baseaddress + CAN_IF2_CREQ_OFFSET)) &
++ CAN_IF_CREQ_BUSY;
++
++ if (if2_creq == 0)
++ break;
++
++ counter--;
++ }
++
++ if (counter > 0) { /* If read successful. */
++ /* Reading the MCONT register. */
++ reg =
++ IOH_READ_LONG((can_baseaddress +
++ CAN_IF2_MCONT_OFFSET));
++ reg &= MSK_ALL_SIXTEEN;
++
++ /* If MsgLost bit set. */
++ if ((reg & CAN_IF_MCONT_MSGLOST) != 0) {
++ IOH_CAN_BIT_CLEAR((can_baseaddress +
++ CAN_IF2_MCONT_OFFSET),
++ CAN_IF_MCONT_MSGLOST);
++
++ IOH_LOG(KERN_ERR,
++ "ioh_can_callback -> \
++ Message object %d has "
++ "been overwritten.\n", int_stat);
++ }
++
++ /* Read the direction bit for determination of remote
++ frame during reception. */
++ receive_msg.rtr =
++ ((IOH_READ_LONG
++ ((can_baseaddress +
++ CAN_IF2_ID2_OFFSET)) & CAN_ID2_DIR) != 0);
++
++ /* Clearing interrupts. */
++ ioh_can_int_clr(can_os->can, int_stat);
++ IOH_DEBUG
++ ("ioh_can_callback -> \
++ ioh_can_int_clr invoked successfully.\n");
++
++ /*Hanlde reception interrupt */
++ if (MSG_OBJ_RX == ioh_msg_obj_conf[int_stat - 1]) {
++ /*If new data arrived */
++ if ((reg & CAN_IF_MCONT_NEWDAT) != 0) {
++ /*Reading the message object content. */
++ retval =
++ ioh_can_rx_dequeue(can_os->can,
++ &receive_msg,
++ int_stat);
++
++ if ((IOH_CAN_SUCCESS == retval)) {
++ /*Inserting the message object
++ into the FIFO. */
++ retval =
++ write_can_fifo(can_os->
++ rx_fifo,
++ &receive_msg);
++
++ /*If insertion successful and
++ if the read call is waiting. */
++ if ((IOH_CAN_SUCCESS == retval)
++ && (can_os->
++ read_wait_flag == 1)) {
++ can_os->read_wait_flag =
++ 0;
++ wake_up_interruptible
++ (&can_os->
++ read_wait_queue);
++ }
++ }
++ IOH_DEBUG
++ ("ioh_can_callback -> \
++ Reception interrupt "
++ "handled for receive "
++ "message object %u.\n", int_stat);
++ }
++
++ }
++ /*Hanlde transmission interrupt */
++ else if (MSG_OBJ_TX == ioh_msg_obj_conf[int_stat - 1]) {
++ /*If the write system call is waiting. */
++ if (1 == can_os->write_wait_flag) {
++ can_os->write_wait_flag = 0;
++ wake_up_interruptible(&can_os->
++ write_wait_queue);
++ }
++
++ IOH_DEBUG
++ ("ioh_can_callback -> Transmission interrupt "
++ "handled for transmit "
++ "message object %u.\n", int_stat);
++ }
++ }
++ }
++}
+diff -urN linux-2.6.33-rc3/drivers/net/can/pch_can/pch_can_hal.h topcliff-2.6.33-rc3/drivers/net/can/pch_can/pch_can_hal.h
+--- linux-2.6.33-rc3/drivers/net/can/pch_can/pch_can_hal.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/can/pch_can/pch_can_hal.h 2010-03-10 08:57:34.000000000 +0900
+@@ -0,0 +1,1407 @@
++/*!
++ * @file ioh_can_hal.h
++ * @brief Provides the macro definitions used by the HAL Layer APIs.
++ * @version 1.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * <hr>
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++#ifndef __IOH_CAN_HAL_H__
++#define __IOH_CAN_HAL_H__
++
++/*! @ingroup HALLayer
++ @def MAX_CAN_DEVICES
++ @brief The maximum number of devices supported by this driver.
++
++ <hr>
++*/
++#define MAX_CAN_DEVICES (1)
++
++/*! @ingroup HALLayer
++ @def MAX_BITRATE
++ @brief The maximum bitrate(Kbps) that can be programmed for the
++ CAN device.
++
++ <hr>
++*/
++#define MAX_BITRATE (0x3e8)
++
++/*! @ingroup HALLayer
++ @def NUM_NODES
++ @brief The maximum number of software FIFO nodes.
++
++ <hr>
++*/
++#define NUM_NODES (2000) /* Maximum number of
++ Software FIFO nodes. */
++
++/*! @ingroup HALLayer
++ @def MAX_MSG_OBJ
++ @brief The maximum number of message objects available.
++
++ <hr>
++*/
++#define MAX_MSG_OBJ (32)
++
++/*! @ingroup HALLayer
++ @def MSG_OBJ_RX
++ @brief The receive message object flag.
++
++ <hr>
++*/
++#define MSG_OBJ_RX (0)
++
++/*! @ingroup HALLayer
++ @def MSG_OBJ_TX
++ @brief The transmit message object flag.
++
++ <hr>
++*/
++#define MSG_OBJ_TX (1)
++
++/*! @ingroup HALLayer
++ @def ENABLE
++ @brief The enable flag.
++
++ <hr>
++*/
++#define ENABLE (1)
++
++/*! @ingroup HALLayer
++ @def DISABLE
++ @brief The disable flag.
++
++ <hr>
++*/
++#define DISABLE (0)
++
++/* bit position of important controller bits. */
++/*! @ingroup HALLayer
++ @def CAN_CTRL_INIT
++ @brief The INIT bit of CANCONT register.
++
++ <hr>
++*/
++#define CAN_CTRL_INIT (0x0001)
++
++/*! @ingroup HALLayer
++ @def CAN_CTRL_IE
++ @brief The IE bit of CANCONT register.
++
++ <hr>
++*/
++#define CAN_CTRL_IE (0x0002)
++
++/*! @ingroup HALLayer
++ @def CAN_CTRL_IE_SIE_EIE
++ @brief The IE + SIE + EIE bits of CANCONT register.
++
++ <hr>
++*/
++#define CAN_CTRL_IE_SIE_EIE (0x000e)
++
++/*! @ingroup HALLayer
++ @def CAN_CTRL_OPT
++ @brief The OPT bit of CANCONT register.
++
++ <hr>
++*/
++#define CAN_CTRL_OPT (0x0080)
++
++/*! @ingroup HALLayer
++ @def CAN_OPT_SILENT
++ @brief The Silent bit of CANOPT register.
++
++ <hr>
++*/
++#define CAN_OPT_SILENT (0x0008)
++
++/*! @ingroup HALLayer
++ @def CAN_CTRL_CCE
++ @brief The configuration change bit of CANCONT register.
++
++ <hr>
++*/
++#define CAN_CTRL_CCE (0x0040)
++
++/*! @ingroup HALLayer
++ @def CAN_CMASK_RX_TX_SET
++ @brief The CMASK value used for writing the TX/RX message
++ object to the message RAM.
++
++ <hr>
++*/
++#define CAN_CMASK_RX_TX_SET (0x00f3)
++
++/*! @ingroup HALLayer
++ @def CAN_CMASK_RX_TX_GET
++ @brief The CMASK value used for reading the TX/RX message
++ object from the message RAM.
++
++ <hr>
++*/
++#define CAN_CMASK_RX_TX_GET (0x0073)
++
++/*! @ingroup HALLayer
++ @def CAN_CMASK_ALL
++ @brief The value for setting all the bits
++ of the CMASK register.
++
++ <hr>
++*/
++#define CAN_CMASK_ALL (0xff)
++
++/*! @ingroup HALLayer
++ @def CAN_CMASK_RDWR
++ @brief The RD/WR bit of the CMASK register.
++
++ <hr>
++*/
++#define CAN_CMASK_RDWR (0x80)
++
++/*! @ingroup HALLayer
++ @def CAN_CMASK_ARB
++ @brief The ARB bit of the CMASK register.
++
++ <hr>
++*/
++#define CAN_CMASK_ARB (0x20)
++
++/*! @ingroup HALLayer
++ @def CAN_CMASK_CTRL
++ @brief The CTRL bit of the CMASK register.
++
++ <hr>
++*/
++#define CAN_CMASK_CTRL (0x10)
++
++/*! @ingroup HALLayer
++ @def CAN_CMASK_MASK
++ @brief The MASK bit of the CMASK register.
++
++ <hr>
++*/
++#define CAN_CMASK_MASK (0x40)
++
++/*! @ingroup HALLayer
++ @def CAN_CMASK_CLPNT
++ @brief The ClrintPnd bit of the CMASK register.
++
++ <hr>
++*/
++#define CAN_CMASK_CLPNT (0x08)
++
++/*! @ingroup HALLayer
++ @def CAN_CMASK_NEWINT
++ @brief The TxRqst/NewDat bit for the CMASK register.
++
++ <hr>
++*/
++#define CAN_CMASK_NEWINT (0x04)
++
++/*! @ingroup HALLayer
++ @def CAN_IF_MCONT_NEWDAT
++ @brief The NewDat bit of the MCONT register.
++
++ <hr>
++*/
++#define CAN_IF_MCONT_NEWDAT (0x8000)
++
++/*! @ingroup HALLayer
++ @def CAN_IF_MCONT_INTPND
++ @brief The IntPnd bit of the MCONT register.
++
++ <hr>
++*/
++#define CAN_IF_MCONT_INTPND (0x2000)
++
++/*! @ingroup HALLayer
++ @def CAN_IF_MCONT_UMASK
++ @brief The UMask bit of the MCONT register.
++
++ <hr>
++*/
++#define CAN_IF_MCONT_UMASK (0x1000)
++
++/*! @ingroup HALLayer
++ @def CAN_IF_MCONT_TXIE
++ @brief The TxIE bit of the MCONT register.
++
++ <hr>
++*/
++#define CAN_IF_MCONT_TXIE (0x0800)
++
++/*! @ingroup HALLayer
++ @def CAN_IF_MCONT_RXIE
++ @brief The RxIE bit of the MCONT register.
++
++ <hr>
++*/
++#define CAN_IF_MCONT_RXIE (0x0400)
++
++/*! @ingroup HALLayer
++ @def CAN_IF_MCONT_RMTEN
++ @brief The RmtEn bit of the MCONT register.
++
++ <hr>
++*/
++#define CAN_IF_MCONT_RMTEN (0x0200)
++
++/*! @ingroup HALLayer
++ @def CAN_IF_MCONT_TXRQXT
++ @brief The TxRqst bit of the MCONT register.
++
++ <hr>
++*/
++#define CAN_IF_MCONT_TXRQXT (0x0100)
++
++/*! @ingroup HALLayer
++ @def CAN_IF_MCONT_EOB
++ @brief The E0B bit of the MCONT register.
++
++ <hr>
++*/
++#define CAN_IF_MCONT_EOB (0x0080)
++
++/*! @ingroup HALLayer
++ @def CAN_IF_MCONT_MSGLOST
++ @brief The MsgLst bit of the MCONT register.
++
++ <hr>
++*/
++#define CAN_IF_MCONT_MSGLOST (0x4000)
++
++/*! @ingroup HALLayer
++ @def CAN_MASK2_MDIR_MXTD
++ @brief The MXtd and Mdir bit of the MASK2 register.
++
++ <hr>
++*/
++#define CAN_MASK2_MDIR_MXTD (0xc000)
++
++/*! @ingroup HALLayer
++ @def CAN_ID2_MSGVAL_XTD_DIR
++ @brief The MsgVal, Xtd and Dir bits of the ID2 register.
++
++ <hr>
++*/
++#define CAN_ID2_MSGVAL_XTD_DIR (0xe000)
++
++/*! @ingroup HALLayer
++ @def CAN_ID2_MSGVAL_DIR
++ @brief The MsgVal and Dir bits of the ID2 register.
++
++ <hr>
++*/
++#define CAN_ID2_MSGVAL_DIR (0xa000)
++
++/*! @ingroup HALLayer
++ @def CAN_ID2_DIR
++ @brief The Dir bit of the ID2 register.
++
++ <hr>
++*/
++#define CAN_ID2_DIR (0x2000)
++
++/*! @ingroup HALLayer
++ @def CAN_ID_MSGVAL
++ @brief The MsgVal bit of the ID2 register.
++
++ <hr>
++*/
++#define CAN_ID_MSGVAL (0x8000)
++
++/*! @ingroup HALLayer
++ @def CAN_IF_MASK2_MDIR
++ @brief The MDir bit of the MASK2 register.
++
++ <hr>
++*/
++#define CAN_IF_MASK2_MDIR ((u32)1 << 14)
++
++/*! @ingroup HALLayer
++ @def CAN_IF_MASK2_MXTD
++ @brief The MXtd bit of the MASK2 register.
++
++ <hr>
++*/
++#define CAN_IF_MASK2_MXTD ((u32)1 << 15)
++
++/*! @ingroup HALLayer
++ @def CAN_STATUS_INT
++ @brief The status interrupt value of the CAN device.
++
++ <hr>
++*/
++#define CAN_STATUS_INT (0x8000)
++
++/*! @ingroup HALLayer
++ @def CAN_IF_CREQ_BUSY
++ @brief The Busy flag bit of the CREQ register.
++
++ <hr>
++*/
++#define CAN_IF_CREQ_BUSY (0x8000)
++
++/*! @ingroup HALLayer
++ @def CAN_ID2_XTD
++ @brief The Xtd bit of ID2 register.
++
++ <hr>
++*/
++#define CAN_ID2_XTD (0x4000)
++
++/*! @ingroup HALLayer
++ @def CAN_SRST_BIT
++ @brief The SRST bit of the Soft reset register.
++
++ <hr>
++*/
++#define CAN_SRST_BIT (0x0001)
++
++/* CAN register offset */
++ /* CAN registers */
++/*! @ingroup HALLayer
++ @def CAN_CONT_OFFSET
++ @brief The offset of CAN control register
++
++ <hr>
++*/
++#define CAN_CONT_OFFSET (0x00) /*Can Control register */
++
++/*! @ingroup HALLayer
++ @def CAN_STAT_OFFSET
++ @brief The offset of CAN status register.
++
++ <hr>
++*/
++#define CAN_STAT_OFFSET (0x04)
++
++/*! @ingroup HALLayer
++ @def CAN_ERRC_OFFSET
++ @brief The offset of CAN Error counter register
++
++ <hr>
++*/
++#define CAN_ERRC_OFFSET (0x08)
++
++/*! @ingroup HALLayer
++ @def CAN_BITT_OFFSET
++ @brief The offset of CAN timing register.
++
++ <hr>
++*/
++#define CAN_BITT_OFFSET (0x0c)
++
++/*! @ingroup HALLayer
++ @def CAN_INT_OFFSET
++ @brief The offset of CAN interrupt register.
++
++ <hr>
++*/
++#define CAN_INT_OFFSET (0x010)
++
++/*! @ingroup HALLayer
++ @def CAN_OPT_OFFSET
++ @brief The offset of CAN Option register
++
++ <hr>
++*/
++#define CAN_OPT_OFFSET (0x14) /*Extended function register */
++
++/*! @ingroup HALLayer
++ @def CAN_BRPE_OFFSET
++ @brief The offset of BRPE register.
++
++ <hr>
++*/
++#define CAN_BRPE_OFFSET (0x18)
++
++ /* Message interface one (IF1) registers */
++
++/*! @ingroup HALLayer
++ @def CAN_IF1_CREQ_OFFSET
++ @brief The offset of IF1 Command request register.
++
++ <hr>
++*/
++#define CAN_IF1_CREQ_OFFSET (0x020)
++
++/*! @ingroup HALLayer
++ @def CAN_IF1_CMASK_OFFSET
++ @brief The offset of IF1 Command Mask register.
++
++ <hr>
++*/
++#define CAN_IF1_CMASK_OFFSET (0x024)
++
++/*! @ingroup HALLayer
++ @def CAN_IF1_ID1_OFFSET
++ @brief The offset of IF1 ID1 register.
++
++ <hr>
++*/
++#define CAN_IF1_ID1_OFFSET (0x030)
++
++/*! @ingroup HALLayer
++ @def CAN_IF1_ID2_OFFSET
++ @brief The offset of IF1 ID2 register.
++
++ <hr>
++*/
++#define CAN_IF1_ID2_OFFSET (0x034)
++
++/*! @ingroup HALLayer
++ @def CAN_IF1_MCONT_OFFSET
++ @brief The offset of IF1 Message control register.
++
++ <hr>
++*/
++#define CAN_IF1_MCONT_OFFSET (0x038)
++
++/*! @ingroup HALLayer
++ @def CAN_IF1_DATAA1_OFFSET
++ @brief The offset of IF1 DATAA1 register.
++
++ <hr>
++*/
++#define CAN_IF1_DATAA1_OFFSET (0x03C)
++
++/*! @ingroup HALLayer
++ @def CAN_IF1_DATAA2_OFFSET
++ @brief The offset of IF1 DATAA2 register.
++
++ <hr>
++*/
++#define CAN_IF1_DATAA2_OFFSET (0x040)
++
++/*! @ingroup HALLayer
++ @def CAN_IF1_DATAB1_OFFSET
++ @brief The offset of IF1 DATAB1 register.
++
++ <hr>
++*/
++#define CAN_IF1_DATAB1_OFFSET (0x044)
++
++/*! @ingroup HALLayer
++ @def CAN_IF1_DATAB2_OFFSET
++ @brief The offset of IF1 DATAB2 register.
++
++ <hr>
++*/
++#define CAN_IF1_DATAB2_OFFSET (0x048)
++
++/*! @ingroup HALLayer
++ @def CAN_IF1_MASK1_OFFSET
++ @brief The offset of IF1 MASK1 register.
++
++ <hr>
++*/
++#define CAN_IF1_MASK1_OFFSET (0x028)
++
++/*! @ingroup HALLayer
++ @def CAN_IF1_MASK2_OFFSET
++ @brief The offset of IF1 MASK2 register.
++
++ <hr>
++*/
++#define CAN_IF1_MASK2_OFFSET (0x02c)
++
++ /* Message interface two (IF2) registers. */
++
++/*! @ingroup HALLayer
++ @def CAN_IF2_CREQ_OFFSET
++ @brief The offset of IF2 Command request register.
++
++ <hr>
++*/
++#define CAN_IF2_CREQ_OFFSET (0x080)
++
++/*! @ingroup HALLayer
++ @def CAN_IF2_CMASK_OFFSET
++ @brief The offset of IF2 Command mask register.
++
++ <hr>
++*/
++#define CAN_IF2_CMASK_OFFSET (0x084)
++
++/*! @ingroup HALLayer
++ @def CAN_IF2_ID1_OFFSET
++ @brief The offset of IF2 ID1 register.
++
++ <hr>
++*/
++#define CAN_IF2_ID1_OFFSET (0x090)
++
++/*! @ingroup HALLayer
++ @def CAN_IF2_ID2_OFFSET
++ @brief The offset of IF2 ID2 register.
++
++ <hr>
++*/
++#define CAN_IF2_ID2_OFFSET (0x094)
++
++/*! @ingroup HALLayer
++ @def CAN_IF2_MCONT_OFFSET
++ @brief The offset of IF2 Message control register.
++
++ <hr>
++*/
++#define CAN_IF2_MCONT_OFFSET (0x098)
++
++/*! @ingroup HALLayer
++ @def CAN_IF2_DATAA1_OFFSET
++ @brief The offset of IF2 DATAA1 register.
++
++ <hr>
++*/
++#define CAN_IF2_DATAA1_OFFSET (0x09c)
++
++/*! @ingroup HALLayer
++ @def CAN_IF2_DATAA2_OFFSET
++ @brief The offset of IF2 DATAA2 register.
++
++ <hr>
++*/
++#define CAN_IF2_DATAA2_OFFSET (0x0a0)
++
++/*! @ingroup HALLayer
++ @def CAN_IF2_DATAB1_OFFSET
++ @brief The offset of IF2 DATAB1 register.
++
++ <hr>
++*/
++#define CAN_IF2_DATAB1_OFFSET (0x0a4)
++
++/*! @ingroup HALLayer
++ @def CAN_IF2_DATAB2_OFFSET
++ @brief The offset of IF2 DATAB2 register.
++
++ <hr>
++*/
++#define CAN_IF2_DATAB2_OFFSET (0x0a8)
++
++/*! @ingroup HALLayer
++ @def CAN_IF2_MASK1_OFFSET
++ @brief The offset of IF2 MASK1 register.
++
++ <hr>
++*/
++#define CAN_IF2_MASK1_OFFSET (0x088)
++
++/*! @ingroup HALLayer
++ @def CAN_IF2_MASK2_OFFSET
++ @brief The offset of IF2 MASK2 register.
++
++ <hr>
++*/
++#define CAN_IF2_MASK2_OFFSET (0x08c)
++
++ /* Transmission request registers */
++/*! @ingroup HALLayer
++ @def CAN_TREQ1_OFFSET
++ @brief The offset of the CAN Transmission request register1.
++
++ <hr>
++*/
++#define CAN_TREQ1_OFFSET (0x100)
++
++/*! @ingroup HALLayer
++ @def CAN_TREQ2_OFFSET
++ @brief The offset of the CAN Transmission request register2.
++
++ <hr>
++*/
++#define CAN_TREQ2_OFFSET (0x104)
++
++ /* Soft Reset register. */
++/*! @ingroup HALLayer
++ @def CAN_SRST_OFFSET
++ @brief The offset of the CAN Soft reset register.
++
++ <hr>
++*/
++#define CAN_SRST_OFFSET (0x1FC)
++
++ /* macros for shift operations. */
++/*! @ingroup HALLayer
++ @def BIT_SHIFT_ONE
++ @brief Bit shift by one.
++
++ <hr>
++*/
++#define BIT_SHIFT_ONE (1)
++
++/*! @ingroup HALLayer
++ @def BIT_SHIFT_TWO
++ @brief Bit shift by two.
++
++ <hr>
++*/
++#define BIT_SHIFT_TWO (2)
++
++/*! @ingroup HALLayer
++ @def BIT_SHIFT_THREE
++ @brief Bit shift by three.
++
++ <hr>
++*/
++#define BIT_SHIFT_THREE (3)
++
++/*! @ingroup HALLayer
++ @def BIT_SHIFT_FOUR
++ @brief Bit shift by four.
++
++ <hr>
++*/
++#define BIT_SHIFT_FOUR (4)
++
++/*! @ingroup HALLayer
++ @def BIT_SHIFT_FIVE
++ @brief Bit shift by five.
++
++ <hr>
++*/
++#define BIT_SHIFT_FIVE (5)
++
++/*! @ingroup HALLayer
++ @def BIT_SHIFT_SIX
++ @brief Bit shift by six.
++
++ <hr>
++*/
++#define BIT_SHIFT_SIX (6)
++
++/*! @ingroup HALLayer
++ @def BIT_SHIFT_SEVEN
++ @brief Bit shift by seven.
++
++ <hr>
++*/
++#define BIT_SHIFT_SEVEN (7)
++
++/*! @ingroup HALLayer
++ @def BIT_SHIFT_EIGHT
++ @brief Bit shift by eight.
++
++ <hr>
++*/
++#define BIT_SHIFT_EIGHT (8)
++
++/*! @ingroup HALLayer
++ @def BIT_SHIFT_TWELVE
++ @brief Bit shift by twelve.
++
++ <hr>
++*/
++#define BIT_SHIFT_TWELVE (12)
++
++/*! @ingroup HALLayer
++ @def BIT_SHIFT_THIRTEEN
++ @brief Bit shift by thirteen.
++
++ <hr>
++*/
++#define BIT_SHIFT_THIRTEEN (13)
++
++/*! @ingroup HALLayer
++ @def BIT_SHIFT_FOURTEEN
++ @brief Bit shift by fourteen.
++
++ <hr>
++*/
++#define BIT_SHIFT_FOURTEEN (14)
++
++/*! @ingroup HALLayer
++ @def BIT_SHIFT_FIFTEEN
++ @brief Bit shift by fifteen.
++
++ <hr>
++*/
++#define BIT_SHIFT_FIFTEEN (15)
++
++/*! @ingroup HALLayer
++ @def BIT_SHIFT_SIXTEEN
++ @brief Bit shift by sixteen.
++
++ <hr>
++*/
++#define BIT_SHIFT_SIXTEEN (16)
++
++/* bit position of certain controller bits. */
++
++/*! @ingroup HALLayer
++ @def BIT_BITT_BRP
++ @brief The Baud Pre-scalar start bit position of the CANBITT
++ register.
++
++ <hr>
++*/
++#define BIT_BITT_BRP (0)
++
++/*! @ingroup HALLayer
++ @def BIT_BITT_SJW
++ @brief The SJW start bit position of the CANBITT register.
++
++ <hr>
++*/
++#define BIT_BITT_SJW (6)
++
++/*! @ingroup HALLayer
++ @def BIT_BITT_TSEG1
++ @brief The start bit position of the time segment before a
++ sampling point
++ of the CANBITT register.
++
++ <hr>
++*/
++#define BIT_BITT_TSEG1 (8)
++
++/*! @ingroup HALLayer
++ @def BIT_BITT_TSEG2
++ @brief The start bit position of the time segment after a
++ sampling point
++ of the CANBITT register.
++
++ <hr>
++*/
++#define BIT_BITT_TSEG2 (12)
++
++/*! @ingroup HALLayer
++ @def BIT_IF1_MCONT_RXIE
++ @brief The RxIE bit position of the MCONT register.
++
++ <hr>
++*/
++#define BIT_IF1_MCONT_RXIE (10)
++
++/*! @ingroup HALLayer
++ @def BIT_IF2_MCONT_TXIE
++ @brief The TxIE bit position of the MCONT register.
++
++ <hr>
++*/
++#define BIT_IF2_MCONT_TXIE (11)
++
++/*! @ingroup HALLayer
++ @def BIT_BRPE_BRPE
++ @brief The shift value for extracting the extended baud
++ rate prescalar for the CANBRPE register.
++
++ <hr>
++*/
++#define BIT_BRPE_BRPE (6)
++
++/*! @ingroup HALLayer
++ @def BIT_ES_TXERRCNT
++ @brief The start bit position of the TX Error counter bits
++ of CANERRC register.
++
++ <hr>
++*/
++#define BIT_ES_TXERRCNT (0)
++
++/*! @ingroup HALLayer
++ @def BIT_ES_RXERRCNT
++ @brief The start bit position of the RX Error counter bits
++ of CANERRC register.
++
++ <hr>
++*/
++#define BIT_ES_RXERRCNT (8)
++
++/* macros used for masking. */
++
++/*! @ingroup HALLayer
++ @def MSK_BITT_BRP
++ @brief The mask value for extracting Baud Rate prescalar values
++ for CANBITT register
++ from the user provided value.
++
++ <hr>
++*/
++#define MSK_BITT_BRP (0x3f)
++
++/*! @ingroup HALLayer
++ @def MSK_BITT_SJW
++ @brief The mask value for SJW bits.
++
++ <hr>
++*/
++#define MSK_BITT_SJW (0xc0)
++
++/*! @ingroup HALLayer
++ @def MSK_BITT_TSEG1
++ @brief The mask value for time segment bits (before a sampling
++ point).
++
++ <hr>
++*/
++#define MSK_BITT_TSEG1 (0xf00)
++
++/*! @ingroup HALLayer
++ @def MSK_BITT_TSEG2
++ @brief The mask value for time segment bits (after a sampling
++ point)
++
++ <hr>
++*/
++#define MSK_BITT_TSEG2 (0x7000)
++
++/*! @ingroup HALLayer
++ @def MSK_BRPE_BRPE
++ @brief The mask value for extracting Baud Rate prescalar value
++ for
++ CANBRPE register from user provided value.
++
++ <hr>
++*/
++#define MSK_BRPE_BRPE (0x3c0)
++
++/*! @ingroup HALLayer
++ @def MSK_BRPE_GET
++ @brief The mask for Baud Rate prescalar bits of CANBRPE
++ register.
++
++ <hr>
++*/
++#define MSK_BRPE_GET (0x0f)
++
++/*! @ingroup HALLayer
++ @def MSK_CTRL_IE_SIE_EIE
++ @brief The mask value for IE, SIE and EIE bits of the CANCONT
++ register.
++
++ <hr>
++*/
++#define MSK_CTRL_IE_SIE_EIE (0x07)
++
++/*! @ingroup HALLayer
++ @def MSK_MCONT_TXIE
++ @brief The Mask value for TxIE bit of the MCONT register.
++
++ <hr>
++*/
++#define MSK_MCONT_TXIE (0x08)
++
++/*! @ingroup HALLayer
++ @def MSK_MCONT_RXIE
++ @brief The Mask value for RxIE bit of the MCONT register.
++
++ <hr>
++*/
++#define MSK_MCONT_RXIE (0x10)
++
++/*! @ingroup HALLayer
++ @def MSK_ALL_THREE
++ @brief The mask value for the first three bits of any register.
++
++ <hr>
++*/
++#define MSK_ALL_THREE (0x07)
++
++/*! @ingroup HALLayer
++ @def MSK_ALL_FOUR
++ @brief The mask value for the first four bits of any register.
++
++ <hr>
++*/
++#define MSK_ALL_FOUR (0x0f)
++
++/*! @ingroup HALLayer
++ @def MSK_ALL_EIGHT
++ @brief The mask value for the first eight bits of any register.
++
++ <hr>
++*/
++#define MSK_ALL_EIGHT (0xff)
++
++/*! @ingroup HALLayer
++ @def MSK_ALL_ELEVEN
++ @brief The mask value for the first eleven bits of any
++ register.
++
++ <hr>
++*/
++#define MSK_ALL_ELEVEN (0x7ff)
++
++/*! @ingroup HALLayer
++ @def MSK_ALL_THIRTEEN
++ @brief The mask value for the first thirteen bits of any
++ register.
++
++ <hr>
++*/
++#define MSK_ALL_THIRTEEN (0x1fff)
++
++/*! @ingroup HALLayer
++ @def MSK_ALL_SIXTEEN
++ @brief The mask value for the first sixteen bits of any
++ register.
++
++ <hr>
++*/
++#define MSK_ALL_SIXTEEN (0xffff)
++
++/* Error */
++
++/*! @ingroup HALLayer
++ @def MSK_ES_TXERRCNT
++ @brief The mask value for the TX Error counter bits of the
++ CANERRC
++ register.
++
++ <hr>
++*/
++#define MSK_ES_TXERRCNT ((u32)0xff << BIT_ES_TXERRCNT) /* Tx err count */
++
++/*! @ingroup HALLayer
++ @def MSK_ES_RXERRCNT
++ @brief The mask value for the RX Error counter bits of the
++ CANERRC
++ register.
++
++ <hr>
++*/
++#define MSK_ES_RXERRCNT ((u32)0x7f << BIT_ES_RXERRCNT) /* Rx err count */
++
++#define IOH_CAN_BIT_SET(reg, bitmask) \
++ IOH_WRITE_LONG((IOH_READ_LONG((reg)) | ((u32)(bitmask))), (reg))
++#define IOH_CAN_BIT_CLEAR(reg, bitmask) \
++ IOH_WRITE_LONG((IOH_READ_LONG((reg)) & ~((u32)(bitmask))), (reg))
++
++/*! @ingroup HALLayer
++ @def IOH_CAN_NO_TX_BUFF
++ @brief The flag value for denoting the unavailability of the
++ transmit message
++ object.
++
++ @see
++ - ioh_can_msg_tx
++ - ioh_can_write
++
++ <hr>
++*/
++#define IOH_CAN_NO_TX_BUFF (1)
++
++/*! @ingroup HALLayer
++ @def ERROR_COUNT
++ @brief The maximum error counter value.
++
++ @see
++ - ioh_can_get_error_stats
++
++ <hr>
++*/
++#define ERROR_COUNT (96)
++
++/*! @ingroup HALLayer
++ @struct ioh_can_os
++ @brief Defines the fields for maintaining the CAN device
++ specific information.
++ @remarks This structure is used by the driver for internal uses.
++ It stores the details of the CAN device at a
++ particular
++ instance and uses this information for
++ performing
++ operations on it.
++
++ <hr>
++*/
++
++struct ioh_can_os {
++ int can; /**< CAN: device handle */
++ unsigned int opened; /**< Linux opened device */
++ unsigned int can_num; /**< Linux: CAN Number */
++ unsigned long pci_remap; /**< Linux: MMap regs */
++ struct pci_dev *dev; /**< Linux: PCI Device */
++ unsigned int irq; /**< Linux: IRQ */
++ int block_mode; /**< Blocking / non-blocking */
++ int rx_fifo; /**< Rx FIFO */
++ wait_queue_head_t read_wait_queue; /**< Linux: Read wait queue */
++ wait_queue_head_t write_wait_queue; /**< Linux: Write wait queue */
++ unsigned int write_wait_flag; /**< Linux: Write wait flag */
++ unsigned int read_wait_flag; /**< Linux: Read wait flag */
++ spinlock_t open_spinlock; /**< Linux: Open lock variable */
++ unsigned int is_suspending; /**< Linux: Is suspending state */
++ struct inode *inode; /**< Linux: inode */
++ struct ioh_can_timing timing; /**< CAN: timing */
++ enum ioh_can_run_mode run_mode; /**< CAN: run mode */
++ enum ioh_can_listen_mode listen_mode; /**< CAN: listen mode */
++ enum ioh_can_arbiter arbiter_mode; /**< CAN: arbiter mode */
++ unsigned int tx_enable[MAX_MSG_OBJ]; /**< CAN: Tx buffer state */
++ unsigned int rx_enable[MAX_MSG_OBJ]; /**< CAN: Rx buffer state */
++ unsigned int rx_link[MAX_MSG_OBJ]; /**< CAN: Rx link set */
++ unsigned int int_enables; /**< CAN: ints enabled */
++ unsigned int int_stat; /**< CAN: int status */
++ unsigned int bus_off_interrupt; /**< CAN: Buss off int flag */
++ struct ioh_can_rx_filter rx_filter[MAX_MSG_OBJ]; /**< CAN: Rx filters */
++ void (*can_callback) (struct ioh_can_os *); /**< CAN: callback function
++ pointer.*/
++ spinlock_t tx_spinlock; /**< CAN: transmission lock variable.*/
++};
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_create(u8 * io_base)
++
++ <hr>
++*/
++int ioh_can_create(u8 *io_base);
++
++/*! @ingroup HALlayerAPI
++ @fn void ioh_can_destroy(int handle
++
++ <hr>
++*/
++void ioh_can_destroy(int handle);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_set_run_mode(
++ int handle,enum ioh_can_run_mode mode)
++
++ <hr>
++*/
++int ioh_can_set_run_mode(int handle, enum ioh_can_run_mode mode);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_get_run_mode(
++ int handle,enum ioh_can_run_mode *mode)
++
++ <hr>
++*/
++int ioh_can_get_run_mode(int handle, enum ioh_can_run_mode *mode);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_set_listen_mode(
++ int handle,enum ioh_can_listen_mode mode)
++
++ <hr>
++*/
++int ioh_can_set_listen_mode(int handle,
++ enum ioh_can_listen_mode mode);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_get_listen_mode(
++ int handle,enum ioh_can_listen_mode *mode)
++
++ <hr>
++*/
++int ioh_can_get_listen_mode(int handle,
++ enum ioh_can_listen_mode *mode);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_set_arbiter_mode(
++ int handle,enum ioh_can_arbiter mode)
++
++ <hr>
++*/
++int ioh_can_set_arbiter_mode(int handle, enum ioh_can_arbiter mode);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_get_arbiter_mode(
++ int handle,enum ioh_can_arbiter *mode)
++
++ <hr>
++*/
++int ioh_can_get_arbiter_mode(int handle, enum ioh_can_arbiter *mode);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_set_restart_mode(
++ int handle,enum ioh_can_auto_restart mode)
++
++ <hr>
++*/
++int ioh_can_set_restart_mode(int handle,
++ enum ioh_can_auto_restart mode);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_get_restart_mode(
++ int handle,enum ioh_can_auto_restart *mode)
++
++ <hr>
++*/
++int ioh_can_get_restart_mode(int handle,
++ enum ioh_can_auto_restart *mode);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_set_baud_simple(
++ int handle,enum ioh_can_baud baud)
++
++ <hr>
++*/
++int ioh_can_set_baud_simple(int handle, enum ioh_can_baud baud);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_set_baud_custom(
++ int handle, struct ioh_can_timing *timing)
++
++ <hr>
++*/
++int ioh_can_set_baud_custom(int handle, struct ioh_can_timing *timing);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_get_baud(
++ int handle,struct ioh_can_timing *timing)
++
++ <hr>
++*/
++int ioh_can_get_baud(int handle, struct ioh_can_timing *timing);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_set_rx_filter(
++ int handle,struct ioh_can_rx_filter *filter
++
++ <hr>
++*/
++int ioh_can_set_rx_filter(int handle,
++ struct ioh_can_rx_filter *filter);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_get_rx_filter(
++ int handle,struct ioh_can_rx_filter *filter
++
++ <hr>
++*/
++int ioh_can_get_rx_filter(int handle,
++ struct ioh_can_rx_filter *filter);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_msg_tx(
++ int handle,struct ioh_can_msg *msg)
++
++ <hr>
++*/
++int ioh_can_msg_tx(int handle, struct ioh_can_msg *msg);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_open(
++int handle,enum ioh_can_listen_mode listen,enum ioh_can_arbiter arbiter)
++
++ <hr>
++*/
++int ioh_can_open(int handle, enum ioh_can_listen_mode listen,
++ enum ioh_can_arbiter arbiter);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_int_pending(int handle)
++
++ <hr>
++*/
++int ioh_can_int_pending(int handle);
++
++/*! @ingroup HALlayerAPI
++ @fn void ioh_can_int_clr(
++ int handle,u32 mask)
++
++ <hr>
++*/
++void ioh_can_int_clr(int handle, u32 mask);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_clear_buffers(
++ int handle)
++
++ <hr>
++*/
++int ioh_can_clear_buffers(int handle);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_rx_dequeue(
++ int handle,struct ioh_can_msg *msg,u32 buff_num)
++*/
++int ioh_can_rx_dequeue(int handle, struct ioh_can_msg *msg,
++ u32 buff_num);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_set_int_custom(
++ int handle,u32 interrupts)
++
++ <hr>
++*/
++int ioh_can_set_int_custom(int handle, u32 interrupts);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_set_int_enables(
++ int handle,enum ioh_can_interrupt interrupt)
++
++ <hr>
++*/
++int ioh_can_set_int_enables(int handle,
++ enum ioh_can_interrupt interrupt);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_get_int_enables(
++ int handle,u32 *enables)
++
++ <hr>
++*/
++int ioh_can_get_int_enables(int handle, u32 *enables);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_release(int handle)
++
++ <hr>
++*/
++int ioh_can_release(int handle);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_set_rx_buffer_link(
++ int handle,u32 buffer_num,u32 set)
++
++ <hr>
++*/
++int ioh_can_set_rx_buffer_link(int handle, u32 buffer_num,
++ u32 set);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_get_rx_buffer_link(
++ int handle,u32 buffer_num,u32 *link)
++
++ <hr>
++*/
++int ioh_can_get_rx_buffer_link(int handle, u32 buffer_num,
++ u32 *link);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_get_buffer_status(
++ int handle)
++
++ <hr>
++*/
++int ioh_can_get_buffer_status(int handle);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_rx_init_filter(
++ int handle,u32 buff_num)
++
++ <hr>
++*/
++int ioh_can_rx_init_filter(int handle, u32 buff_num);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_rx_disable_all(
++ int handle)
++
++ <hr>
++*/
++int ioh_can_rx_disable_all(int handle);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_tx_disable_all(
++ int handle)
++
++ <hr>
++*/
++int ioh_can_tx_disable_all(int handle);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_set_rx_enable(
++ int handle,u32 buff_num,u32 set)
++
++ <hr>
++*/
++int ioh_can_set_rx_enable(int handle, u32 buff_num, u32 set);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_get_rx_enable(
++ int handle,u32 buff_num,u32 *enable)
++
++ <hr>
++*/
++int ioh_can_get_rx_enable(int handle, u32 buff_num, u32 *enable);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_set_tx_enable(
++ int handle, u32 buff_num,u32 set)
++
++ <hr>
++*/
++int ioh_can_set_tx_enable(int handle, u32 buff_num, u32 set);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_get_tx_enable(
++ int handle,u32 buff_num,u32 *enable)
++
++ <hr>
++*/
++int ioh_can_get_tx_enable(int handle, u32 buff_num, u32 *enable);
++
++/*! @ingroup HALlayerAPI
++ @fn void ioh_can_config_rx_tx_buffers(
++ int handle)
++
++ <hr>
++*/
++void ioh_can_config_rx_tx_buffers(int handle);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_get_error_stats(
++ int handle,struct ioh_can_error *error)
++
++ <hr>
++*/
++int ioh_can_get_error_stats(int handle, struct ioh_can_error *error);
++
++/*! @ingroup HALlayerAPI
++ @fn irqreturn_t ioh_can_handler(
++ int irq, void *dev_id)
++
++ <hr>
++*/
++irqreturn_t ioh_can_handler(int irq, void *dev_id);
++
++/*! @ingroup HALlayerAPI
++ @fn void icp_can_log_message(u32 status)
++
++ <hr>
++*/
++void icp_can_log_message(u32 status);
++
++/*! @ingroup HALlayerAPI
++ @fn int ioh_can_reset(struct ioh_can_os *can_os)
++
++ <hr>
++*/
++int ioh_can_reset(struct ioh_can_os *can_os);
++
++/*! @ingroup HALlayerAPI
++ @fn void ioh_can_entcb(
++ void(*ioh_can_cb)(struct ioh_can_os *),struct ioh_can_os * p_can_os)
++
++ <hr>
++*/
++void ioh_can_entcb(void (*ioh_can_cb) (struct ioh_can_os *),
++ struct ioh_can_os *p_can_os);
++
++/*! @ingroup HALlayerAPI
++ @fn void ioh_can_callback(struct ioh_can_os * can_os)
++
++ <hr>
++*/
++void ioh_can_callback(struct ioh_can_os *can_os);
++#endif /* __IOH_CAN_HAL_H__ */
+diff -urN linux-2.6.33-rc3/drivers/net/can/pch_can/pch_can_main.c topcliff-2.6.33-rc3/drivers/net/can/pch_can/pch_can_main.c
+--- linux-2.6.33-rc3/drivers/net/can/pch_can/pch_can_main.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/can/pch_can/pch_can_main.c 2010-03-10 08:57:34.000000000 +0900
+@@ -0,0 +1,1681 @@
++/*!
++ * @file ioh_can_main.c
++ * @brief Provides the function definition for the CAN driver functions.
++ * @version 1.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * <hr>
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++/* includes */
++#include <linux/module.h>
++#include <linux/cdev.h>
++#include <linux/fs.h>
++#include <linux/uaccess.h>
++#include <linux/spinlock.h>
++#include <linux/interrupt.h>
++#include <linux/wait.h>
++#include <linux/io.h>
++#include <linux/sched.h>
++
++#include "pch_common.h"
++#include "pch_debug.h"
++#include "pch_can_main.h"
++#include "pch_can_hal.h"
++#include "pch_can_utils.h"
++#include "pch_can_pci.h"
++
++/*** Function prototypes ***/
++static int ioh_candev_open(struct inode *inode, struct file *file);
++static int ioh_candev_close(struct inode *inode, struct file *file);
++static ssize_t ioh_can_read(struct file *filep, char __user * buf, size_t count,
++ loff_t *f_pos);
++static ssize_t ioh_can_write(struct file *filp, const char __user * buf,
++ size_t count, loff_t *f_pos);
++static int ioh_can_ioctl(struct inode *inode, struct file *filp,
++ unsigned int cmd, unsigned long arg);
++static void can_ioctl_buff_size(unsigned int ctl_code, size_t *in_size,
++ size_t *out_size);
++
++/*********/
++
++/*! @ingroup InterfaceLayerFacilitators
++ @struct file_ops
++ @brief Kernel structure for holding references to CAN driver
++ methods.
++ Used during the CAN driver registeration.
++ @see
++ - ioh_can_probe
++
++ <hr>
++*/
++const struct file_operations file_ops = {
++ .owner = THIS_MODULE,
++ .read = ioh_can_read,
++ .write = ioh_can_write,
++ .ioctl = ioh_can_ioctl,
++ .open = ioh_candev_open,
++ .release = ioh_candev_close
++};
++
++#ifdef DEBUG
++/*! @ingroup Debug
++ @def IOCTL_CAN_DEBUG
++ @brief Included for debugging the CAN Driver API.
++ @remarks It provides an ioctl command through which,
++ the device registers can be read/write.
++ The argument for the command is the structure
++ shown below.
++
++ @note Can be used only when the DEBUG flag is set during
++ compiling.
++
++ <br>
++*/
++#define IOCTL_CAN_DEBUG (40)
++
++/*! @ingroup Debug
++ @struct debug
++ @brief Included for passing the debug commands to the
++ CAN Driver.
++ @remarks It provides a format for specifying the required
++ debug operation commands and associated
++ paramters
++ to the Driver.
++ @note Can be used only when the DEBUG flag is set during
++ compiling.
++
++ <br>
++*/
++struct debug {
++ unsigned char cmd; /**< (1: Read) (2: Write). */
++ unsigned short offset; /**< Offset of th register. */
++ unsigned long value; /**< Read Value/Value to be written */
++};
++
++#endif
++
++/*! @ingroup InterfaceLayerAPI
++ @fn static int ioh_candev_open(
++ struct inode *inode, struct file *file)
++ @brief Implements the open functionalities of the CAN driver.
++ @remarks
++ This function is used as the open function of
++ the driver. The main
++ tasks performed by this function are :
++ - Confirms that at a time only one device is
++ using the CAN device.
++ - Initializes the CAN device to be used by the
++ driver.
++ - Initializes the driver data structures for
++ further use.
++ @note This function is invoked by the kernel subsystem when a
++ process
++ issues an open system call on the associated
++ device driver.
++
++ @param inode [@ref INOUT] Reference to the inode structure
++ of the device file.
++ @param file [@ref INOUT] Reference to the file structure
++ of the device file.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> Operation successful.
++ - EBUSY --> Device already opened.
++ - -ENOMEM --> request_irq error status code.
++ - -EBUSY --> request_irq error status code.
++ - -ENOSYS --> request_irq error status code.
++ - @ref IOH_CAN_FAIL --> ioh_can_open fails/
++ create_can_fifo fails.
++
++ <hr>
++
++*/
++static int ioh_candev_open(struct inode *inode, struct file *file)
++{
++ int retval = IOH_CAN_SUCCESS;
++ struct ioh_can_os *dev_can_os = \
++ (struct ioh_can_os *)&(can_os[iminor(inode)]);
++ unsigned long flags;
++
++ /* Attaining lock for open. */
++ spin_lock_irqsave(&(dev_can_os->open_spinlock), flags);
++
++ /* Check if CAN is already open */
++ if (0 == (dev_can_os->opened)) {
++ /* Resetting the hardware. */
++/* (void)ioh_can_reset(dev_can_os); */
++/* IOH_DEBUG("ioh_candev_open -> Function ioh_can_reset invoked " \
++ "successfully.\n");
++*/
++ /* Initializing the CAN hardware. */
++ retval = ioh_can_open(dev_can_os->can,
++ file->
++ f_flags & O_RDONLY ? IOH_CAN_LISTEN :
++ IOH_CAN_ACTIVE, IOH_CAN_FIXED_PRIORITY);
++
++ if (retval != IOH_CAN_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_candev_open -> ioh_can_open failed "
++ "(returned %d).\n", retval);
++ } else {
++ IOH_DEBUG
++ ("ioh_candev_open -> \
++ ioh_can_open invoked successfully "
++ "(returned %d).\n", retval);
++
++ dev_can_os->rx_fifo = create_can_fifo(NUM_NODES);
++
++ if (!(dev_can_os->rx_fifo)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_candev_open -> create_can_fifo "
++ "failed.\n");
++
++ (void)ioh_can_release(dev_can_os->can);
++ IOH_DEBUG
++ ("ioh_candev_open -> ioh_can_release invoked "
++ "successfully.\n");
++
++ retval = IOH_CAN_FAIL;
++ } else {
++ IOH_DEBUG
++ ("ioh_candev_open -> create_can_fifo invoked "
++ "successfully.\n");
++
++ /* Registering the callback function for
++ interrupt handling. */
++ ioh_can_entcb(ioh_can_callback, dev_can_os);
++ IOH_DEBUG
++ ("ioh_candev_open -> ioh_can_entcb invoked \
++ successfully.\n");
++
++ /* Regsitering the interrupt. */
++ retval =
++ request_irq(dev_can_os->irq,
++ ioh_can_handler, IRQF_SHARED,
++ "can", &can_os[iminor(inode)]
++ );
++
++ if (0 != retval) {
++ (void)ioh_can_release(dev_can_os->can);
++ delete_can_fifo(dev_can_os->rx_fifo);
++ dev_can_os->rx_fifo = 0;
++ dev_can_os->can_callback = NULL;
++
++ IOH_LOG(KERN_ERR,
++ "ioh_candev_open -> \
++ request_irq failed on irq %d"
++ "(returned %d).\n",
++ dev_can_os->irq, retval);
++ } else {
++ IOH_DEBUG
++ ("ioh_candev_open -> request_irq \
++ invoked "
++ "successfully(returned %d).\n",
++ retval);
++
++ /* Assuming that no bus off
++ interrupt. */
++ dev_can_os->bus_off_interrupt = 0;
++ dev_can_os->write_wait_flag = 0;
++
++ /* Setting the block mode. */
++ dev_can_os->block_mode = 1;
++
++ dev_can_os->inode = inode;
++ dev_can_os->opened = 1;
++
++ /* Storing the can structure for further
++ use. */
++ file->private_data = dev_can_os;
++
++ retval = IOH_CAN_SUCCESS;
++ }
++ }
++ }
++ } else {
++ retval = -EBUSY;
++ IOH_LOG(KERN_ERR,
++ "ioh_candev_open -> CAN device already open.\n");
++ }
++
++ spin_unlock_irqrestore(&(dev_can_os->open_spinlock), flags);
++
++ IOH_DEBUG("ioh_candev_open returns %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup InterfaceLayerAPI
++ @fn static int ioh_candev_close(struct inode *inode,
++ struct file *file)
++ @brief Implements the close functionalities of the CAN driver.
++ @remarks This function is used as the close function of the
++ driver. The main
++ tasks performed by this function are :
++ - De-initializes the CAN device hardware so that
++ the device can be closed.
++ - Releases the resources attained during the
++ opening of the device.
++ - Re-initializes the data structures so that it
++ can be used further.
++ @note This function is invoked by the Kernel subsystem when an
++ application issues a close system call on the
++ associated device file.
++
++ @param inode [@ref INOUT] Reference to the inode structure
++ of the
++ device file.
++ @param file [@ref INOUT] Reference to the file structure
++ of the
++ device file.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> The close
++ operation was successful.
++
++ <hr>
++*/
++static int ioh_candev_close(struct inode *inode, struct file *file)
++{
++ int retval;
++ struct ioh_can_os *can_os = (struct ioh_can_os *) file->private_data;
++
++ /* Attaining the lock for close. */
++ spin_lock(&(can_os->open_spinlock));
++
++ retval = ioh_can_release(can_os->can);
++ IOH_DEBUG("ioh_candev_close -> ioh_can_release invoked successfully"
++ "(returned %d).\n", retval);
++
++ (void)ioh_can_set_int_enables(can_os->can, CAN_DISABLE);
++
++ free_irq(can_os->irq, &(can_os[iminor(can_os->inode)]));
++ IOH_DEBUG("ioh_candev_close -> free_irq invoked successfully.\n");
++
++ /* Delete FIFO. */
++ delete_can_fifo(can_os->rx_fifo);
++ IOH_DEBUG
++ ("ioh_candev_close -> delete_can_fifo invoked successfully.\n");
++
++ /* Resetting the open flag. */
++ can_os->opened = 0;
++ file->private_data = NULL;
++ can_os->rx_fifo = 0;
++ can_os->can_callback = NULL;
++
++ /* Releasing the lock. */
++ spin_unlock(&(can_os->open_spinlock));
++
++ if (retval != IOH_CAN_SUCCESS) {
++ IOH_LOG(KERN_ERR, "ioh_candev_close -> Release failed "
++ "on CAN device %d.\n", iminor(inode));
++
++ }
++
++ IOH_DEBUG("ioh_candev_close returns %d.\n", IOH_CAN_SUCCESS);
++ return IOH_CAN_SUCCESS;
++}
++
++/*! @ingroup InterfaceLayerAPI
++ @fn static ssize_t ioh_can_read(
++ struct file *filp, char __user *buf,
++ size_t count,loff_t *f_pos)
++ @brief Implements the read functionalities of the CAN driver.
++ @remarks This function is used as the read function of the
++ driver. The main tasks of this function are :
++ - Reads the received message data from the
++ software FIFO if it
++ is available and copies it to the user process
++ for further
++ use.
++ - If the CAN device is in blocking mode and if
++ no data is
++ available, this function waits until the
++ message data is
++ available within the software FIFO.
++ @note This function is invoked by the Kernel subsystem when a
++ process
++ issues read system call on the associated device
++ file.
++
++ @param filp [@ref INOUT] Reference to the file structure
++ of the device file.
++ @param buf [@ref OUT] Reference to the
++ user level buffer for
++ updating the read data.
++ @param count [@ref IN] The size to be read.
++ @param f_pos [@ref INOUT] Not used.
++
++ @retval ssize_t
++ - Size of the message object
++ --> Read operation successful.
++ - -ENOMEM --> copy_to_user fails.
++ - -EAGAIN --> Device in suspended
++ mode/non blocking
++ read with software FIFO
++ empty.
++ - -EIO --> read_can_fifo fails/read
++ wait in blocking mode
++ fails.
++ - -EINVAL --> Parameter buf/count is not
++ valid.
++
++ <hr>
++*/
++static ssize_t ioh_can_read(struct file *filp, char __user * buf, size_t count,
++ loff_t *f_pos)
++{
++ ssize_t retval = IOH_CAN_SUCCESS; /* Return status value. */
++ struct ioh_can_msg msg; /* Msg variable for reading. */
++ struct ioh_can_os *can_os = (struct ioh_can_os *) filp->private_data;
++
++ /* If device susupended */
++ if ((can_os->is_suspending) == 1) {
++ IOH_LOG(KERN_ERR, "ioh_can_read -> Device suspended.\n");
++ retval = -EAGAIN;
++ }
++ /* If invalid parameters. */
++ else if ((buf == NULL) || (count < sizeof(struct ioh_can_msg))) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_read -> Invalid parameter for read.\n");
++ retval = -EINVAL;
++ }
++ /* If NON_BLOCK mode and FIFO(software) empty. */
++ else if (((can_os->block_mode == 0))
++ && (check_can_fifo_status(can_os->rx_fifo) ==
++ IOH_CAN_FIFO_EMPTY)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_read -> Buffer empty and device in non-block \
++ mode.\n");
++ retval = -EAGAIN;
++ }
++
++ /* If block mode and FIFO(software) empty. */
++ else if ((can_os->block_mode == 1)
++ && (check_can_fifo_status(can_os->rx_fifo) ==
++ IOH_CAN_FIFO_EMPTY)) {
++ IOH_DEBUG
++ ("ioh_can_read -> \
++ Waiting for FIFO to be filled with data.\n");
++ /* Preparing to wait. */
++ can_os->read_wait_flag = 1;
++ retval = wait_event_interruptible(can_os->read_wait_queue,
++ (check_can_fifo_status
++ (can_os->rx_fifo) !=
++ IOH_CAN_FIFO_EMPTY));
++
++ /* Wait fails. */
++ if (-ERESTARTSYS == retval) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_read -> \
++ Wait_event_interruptible failed on read.\n");
++ retval = -EIO;
++ }
++ }
++
++ if (IOH_CAN_SUCCESS == retval) {
++ retval = read_can_fifo(can_os->rx_fifo, &msg);
++
++ if (retval) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_read -> Read from FIFO failed.\n");
++ retval = -EIO;
++ } else {
++ IOH_DEBUG
++ ("ioh_can_read -> Read from FIFO successful.\n");
++ IOH_DEBUG("ioh_can_read -> The read message is: \n");
++ IOH_DEBUG("Msg ID : 0x%x\n", msg.id);
++ IOH_DEBUG("EXT ID : %x\n", msg.ide);
++ IOH_DEBUG("Msg Size : %hu\n", msg.dlc);
++ IOH_DEBUG("Rment : %hu\n", msg.rtr);
++ IOH_DEBUG("Dat Byt1 : 0x%x\n", msg.data[0]);
++ IOH_DEBUG("Dat Byt2 : 0x%x\n", msg.data[1]);
++ IOH_DEBUG("Dat Byt3 : 0x%x\n", msg.data[2]);
++ IOH_DEBUG("Dat Byt4 : 0x%x\n", msg.data[3]);
++ IOH_DEBUG("Dat Byt5 : 0x%x\n", msg.data[4]);
++ IOH_DEBUG("Dat Byt6 : 0x%x\n", msg.data[5]);
++ IOH_DEBUG("Dat Byt7 : 0x%x\n", msg.data[6]);
++ IOH_DEBUG("Dat Byt8 : 0x%x\n", msg.data[7]);
++
++ retval = copy_to_user(buf, &msg, sizeof(msg));
++
++ if (retval) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_read -> \
++ Copy to user failed for CAN.\n");
++ retval = -ENOMEM;
++ } else {
++ retval = sizeof(struct ioh_can_msg);
++ }
++ }
++ }
++
++ IOH_DEBUG("ioh_can_read -> Return Value: %d.\n", retval);
++ return retval;
++}
++
++/*! @ingroup InterfaceLayerAPI
++ @fn ssize_t ioh_can_write(
++ struct file *filp, const char __user *buf, size_t count,loff_t *f_pos)
++ @brief Implements the write functionalities of the CAN Driver.
++ @remarks This function is used as the write function of the
++ driver.
++ The main tasks performed by this function are :
++ - Obtains the data from the user process and
++ updates it into the
++ hardware buffers (if available) for
++ transmission as message objects.
++ - If the CAN device is in Blocking mode and if
++ no transmit message
++ object is available, then this function waits
++ until a transmit
++ buffer is available for transmission.
++ @note This function is invoked by the Kernel subsystem when a
++ process issues a write system call on the
++ associated device file.
++
++ @param filp [@ref INOUT] Reference to the file structure
++ of the device file
++ @param buf [@ref IN] Reference to the
++ user level buffer containing
++ data to be written(transmitted).
++ @param count [@ref IN] Size of the data to be
++ written.
++ @param f_pos [@ref INOUT] Not Used.
++
++ @retval ssize_t
++ - Size of the message object
++ --> Write is successful.
++ - -ENOMEM --> copy_from_user error status code
++ - -EAGAIN --> Device in suspended mode.
++ - -EIO --> Non-blocking write fails
++ - -EINVAL --> Size of CAN message not valid.
++ - @ref IOH_CAN_FAIL --> Transmit fails.
++
++ <hr>
++*/
++ssize_t ioh_can_write(struct file *filp, const char __user *buf, size_t count,
++ loff_t *f_pos)
++{
++ struct ioh_can_msg msg; /* The message object for writing. */
++ int err; /* error variable. */
++ struct ioh_can_os *can_os = (struct ioh_can_os *) filp->private_data;
++ ssize_t ret;
++
++ /* If device suspended. */
++ if ((can_os->is_suspending) == 1) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_write -> Device is in suspend mode.\n");
++ IOH_DEBUG("ioh_can_write returns %d\n", -EAGAIN);
++ return -EAGAIN;
++ }
++
++ /* if invalid count. */
++ if (count != sizeof(struct ioh_can_msg)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_write -> Write user buffer size invalid "
++ "for CAN.\n");
++ IOH_DEBUG("ioh_can_write returns %d\n", -EINVAL);
++ return -EINVAL;
++ }
++
++ err = copy_from_user(&msg, buf, count);
++
++ if (err) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_write -> Copy from user failed for CAN in "
++ "write operation.\n");
++ IOH_DEBUG("ioh_can_write returns %d\n", -ENOMEM);
++ return -ENOMEM;
++ }
++
++ /* The wait flag. */
++ can_os->write_wait_flag = 1;
++
++ /* Transmitting the message. */
++ err = ioh_can_msg_tx(can_os->can, &msg);
++
++ if ((err != IOH_CAN_SUCCESS)) {
++ /* Transmission failed due to unavailability of transmit object
++ and it is block mode. */
++ if ((IOH_CAN_NO_TX_BUFF == err) && (can_os->block_mode == 1)) {
++ IOH_DEBUG
++ ("ioh_can_write -> Waiting for transmit message "
++ "object.\n");
++
++ /* Prpearing to wait. */
++ err = wait_event_interruptible(can_os->write_wait_queue,
++ can_os->
++ write_wait_flag == 0);
++
++ if (-ERESTARTSYS == err) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_write -> \
++ Write wait failed.\n");
++ IOH_DEBUG("ioh_can_write returns %d\n",
++ -EAGAIN);
++ return -EAGAIN;
++ }
++
++ /* Transmitting again. */
++ err = ioh_can_msg_tx(can_os->can, &msg);
++
++ /* If again error. */
++ if (err != IOH_CAN_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_write -> Transmit failed "
++ "after 2 attempts.\n");
++ IOH_DEBUG("ioh_can_write returns %d\n",
++ IOH_CAN_FAIL);
++ return IOH_CAN_FAIL;
++ }
++ } else { /* If failed due to some other reasons. */
++
++ IOH_LOG(KERN_ERR,
++ "ioh_can_write -> Write from CAN device "
++ "failed %d.\n", -EIO);
++ IOH_DEBUG("ioh_can_write returns %d\n", -EIO);
++ return -EIO;
++ }
++ }
++ IOH_DEBUG
++ ("ioh_can_write -> Message send for transmission successfully.\n");
++ IOH_DEBUG("The transmitted Message is :\n");
++ IOH_DEBUG("Msg ID : 0x%x\n", msg.id);
++ IOH_DEBUG("EXT ID : %hu\n", msg.ide);
++ IOH_DEBUG("Msg Size : %hu\n", msg.dlc);
++ IOH_DEBUG("Rment : %hu\n", msg.rtr);
++ IOH_DEBUG("Dat Byt1 : 0x%x\n", msg.data[0]);
++ IOH_DEBUG("Dat Byt2 : 0x%x\n", msg.data[1]);
++ IOH_DEBUG("Dat Byt3 : 0x%x\n", msg.data[2]);
++ IOH_DEBUG("Dat Byt4 : 0x%x\n", msg.data[3]);
++ IOH_DEBUG("Dat Byt5 : 0x%x\n", msg.data[4]);
++ IOH_DEBUG("Dat Byt6 : 0x%x\n", msg.data[5]);
++ IOH_DEBUG("Dat Byt7 : 0x%x\n", msg.data[6]);
++ IOH_DEBUG("Dat Byt8 : 0x%x\n", msg.data[7]);
++
++ IOH_DEBUG("ioh_can_write -> Write from CAN device successful "
++ "( returns %d).", sizeof(struct ioh_can_msg));
++
++ ret = sizeof(struct ioh_can_msg);
++ return ret;
++}
++
++/*! @ingroup InterfaceLayerAPI
++ @fn static int ioh_can_ioctl(
++struct inode *inode, struct file *filp, unsigned int cmd,unsigned long arg)
++ @brief Implements the ioctl functionalities of the CAN driver.
++ @remarks This function is used as the ioctl function of the
++ Driver.
++ The main tasks performed by this function are :
++ - Checks for the validity of the obtained IOCTL
++ command.
++ - Performs the associated operation for the
++ valid IOCTL
++ command by invoking the corresponding HAL
++ APIs.
++ - Copies the data if required back to the user
++ process.
++ @note This function is invoked by the Kernel subsystem when
++ a process issues an ioctl system call on the
++ associated
++ device file.
++
++ @param inode [@ref INOUT] Reference to the inode
++ structure of the device
++ file.
++ @param filp [@ref INOUT] Reference to the file
++ structure of the device
++ file.
++ @param cmd [@ref IN] The ioctl command to be
++ executed.
++ @param arg [@ref INOUT] The argument reference
++ to be used.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> The operation was
++ successful.
++ - -ENOMEM --> copy_to_user/copy_from_user fails.
++ - -EAGAIN --> Memory allocation fails for input
++ /output buffer.
++ - @ref IOH_CAN_FAIL --> IOCTL fails.
++
++ <hr>
++*/
++static int ioh_can_ioctl
++ (struct inode *inode, struct file *filp, unsigned int cmd,
++ unsigned long arg) {
++ struct ioh_can_os *can_os;
++ int retval = IOH_CAN_SUCCESS;
++ void *in = NULL;
++ void *out = NULL;
++ size_t in_buff_size = 0;
++ size_t out_buff_size = 0;
++ can_os = (struct ioh_can_os *) filp->private_data;
++
++ can_ioctl_buff_size(cmd, &in_buff_size, &out_buff_size);
++
++ if (can_os->is_suspending == 1) {
++ IOH_LOG(KERN_ERR, "ioh_can_ioctl -> Device suspended.\n");
++ retval = -EAGAIN;
++ }
++
++ if ((in_buff_size != 0) && (retval == IOH_CAN_SUCCESS)) {
++ in = kmalloc(in_buff_size, GFP_KERNEL);
++
++ if (in != NULL) {
++ retval = copy_from_user(in, (void *)arg, in_buff_size);
++ if (retval != 0) {
++ IOH_LOG(KERN_ERR, "ioh_can_ioctl -> Copy from "
++ "user failed.\n");
++ retval = -ENOMEM;
++ }
++ } else {
++ IOH_LOG(KERN_ERR, "ioh_can_ioctl -> Memory allocation "
++ "failed for input argument.\n");
++ retval = -EAGAIN;
++ }
++ }
++
++ if ((out_buff_size != 0) && (IOH_CAN_SUCCESS == retval)) {
++ out = kmalloc(out_buff_size, GFP_KERNEL);
++
++ if (out == NULL) {
++ IOH_LOG(KERN_ERR, "ioh_can_ioctl -> Memory allocation "
++ "failed for output argument.\n");
++ retval = -EAGAIN;
++ }
++ }
++
++ if (IOH_CAN_SUCCESS == retval) {
++ switch (cmd) {
++ case IOCTL_CAN_RESET:
++ IOH_DEBUG("ioh_can_ioctl -> IOCTL_CAN_RESET\n");
++ retval = ioh_can_reset(can_os);
++ break;
++
++ case IOCTL_CAN_RUN:
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_RUN\n");
++ retval = ioh_can_set_run_mode(can_os->can, IOH_CAN_RUN);
++ break;
++
++ case IOCTL_CAN_RUN_GET:
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_RUN_GET\n");
++ retval =
++ ioh_can_get_run_mode(can_os->can,
++ (enum ioh_can_run_mode *) out);
++ break;
++
++ case IOCTL_CAN_STOP:
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_STOP:\n");
++ retval =
++ ioh_can_set_run_mode(can_os->can, IOH_CAN_STOP);
++ break;
++
++ case IOCTL_CAN_SIMPLE:
++ {
++ enum ioh_can_run_mode curr_mode;
++ IOH_DEBUG("ioh_can_ioctl -> Command "
++ "received IOCTL_CAN_SIMPLE\n");
++
++ (void)ioh_can_get_run_mode(can_os->can,
++ &curr_mode);
++ if (curr_mode == IOH_CAN_RUN) {
++ (void)ioh_can_set_run_mode(can_os->can,
++ IOH_CAN_STOP);
++ }
++ retval =
++ ioh_can_set_baud_simple(can_os->can,
++ *((enum ioh_can_baud *)
++ in));
++ if (curr_mode == IOH_CAN_RUN) {
++ (void)ioh_can_set_run_mode(can_os->can,
++ IOH_CAN_RUN);
++ }
++ }
++ break;
++
++ case IOCTL_CAN_CUSTOM:
++ {
++ enum ioh_can_run_mode curr_mode;
++
++ IOH_DEBUG("ioh_can_ioctl -> Command "
++ "received IOCTL_CAN_CUSTOM\n");
++
++ (void)ioh_can_get_run_mode(can_os->can,
++ &curr_mode);
++ if (curr_mode == IOH_CAN_RUN) {
++ (void)ioh_can_set_run_mode(can_os->can,
++ IOH_CAN_STOP);
++ }
++ retval =
++ ioh_can_set_baud_custom(can_os->can,
++ (struct ioh_can_timing *)
++ in);
++ if (curr_mode == IOH_CAN_RUN) {
++ (void)ioh_can_set_run_mode(can_os->can,
++ IOH_CAN_RUN);
++ }
++ }
++ break;
++
++ case IOCTL_CAN_TIMING_GET:
++ IOH_DEBUG("ioh_can_ioctl -> IOCTL_CAN_TIMING_GET\n");
++ retval =
++ ioh_can_get_baud(can_os->can,
++ (struct ioh_can_timing *) out);
++ break;
++
++ case IOCTL_CAN_FILTER:
++ {
++ unsigned int buff_num = 0;
++ int i = 0;
++ unsigned int status = 0;
++ struct ioh_can_rx_filter *rx_filter =
++ (struct ioh_can_rx_filter *) in;
++
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_FILTER\n");
++
++ for (i = 0;
++ i <
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size); i++) {
++ if (ioh_msg_obj_conf[i] == 0) {
++ buff_num++;
++ if (buff_num ==
++ (rx_filter->num)) {
++ break;
++ }
++ }
++ }
++
++ if (i ==
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size)) {
++ IOH_DEBUG("ioh_can_ioctl -> InValid "
++ "message buffer.");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Here i is the index, however (i+1) is
++ object number. */
++ rx_filter->num = (i + 1);
++
++ (void)ioh_can_get_rx_enable(can_os->can,
++ rx_filter->
++ num,
++ &status);
++ if (status == ENABLE) {
++ (void)
++ ioh_can_set_rx_enable
++ (can_os->can,
++ rx_filter->num, DISABLE);
++ }
++ retval =
++ ioh_can_set_rx_filter(can_os->can,
++ rx_filter);
++ if (status == ENABLE) {
++ (void)
++ ioh_can_set_rx_enable
++ (can_os->can,
++ rx_filter->num, ENABLE);
++ }
++ }
++ }
++ break;
++
++ case IOCTL_CAN_FILTER_GET:
++ {
++ unsigned int buff_num = 0;
++ int i = 0;
++ unsigned int status = 0;
++ struct ioh_can_rx_filter *rx_filter =
++ (struct ioh_can_rx_filter *) out;
++
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_FILTER_GET\n");
++
++ rx_filter->num =
++ ((struct ioh_can_rx_filter *) in)->num;
++
++ for (i = 0;
++ i <
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size); i++) {
++ if (ioh_msg_obj_conf[i] == 0) {
++ buff_num++;
++ if (buff_num == rx_filter->num)
++ break;
++ }
++ }
++
++ if (i ==
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size)) {
++ IOH_DEBUG
++ ("ioh_can_ioctl -> InValid message "
++ "buffer %x.\n", rx_filter->num);
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Here i is the index, however (i+1) is
++ object number. */
++ rx_filter->num = (i + 1);
++
++ (void)ioh_can_get_rx_enable(can_os->can,
++ rx_filter->
++ num,
++ &status);
++
++ if (status == ENABLE) {
++ (void)
++ ioh_can_set_rx_enable
++ (can_os->can,
++ rx_filter->num, DISABLE);
++ }
++
++ retval =
++ ioh_can_get_rx_filter(can_os->can,
++ rx_filter);
++
++ if (status == ENABLE) {
++ (void)
++ ioh_can_set_rx_enable
++ (can_os->can,
++ rx_filter->num, ENABLE);
++ }
++
++ rx_filter->num = buff_num;
++
++ IOH_DEBUG("Rx Msg Obj : %u\n",
++ rx_filter->num);
++ IOH_DEBUG("ID : %u\n",
++ rx_filter->aidr.id);
++ IOH_DEBUG("Extended ID : %u\n",
++ rx_filter->aidr.id_ext);
++ IOH_DEBUG("Umask : %u\n",
++ rx_filter->umask);
++ IOH_DEBUG("Mask Id : %u\n",
++ rx_filter->amr.id);
++ IOH_DEBUG("Ext Mask : %u\n",
++ rx_filter->amr.id_ext);
++
++ }
++
++ }
++ break;
++
++ case IOCTL_CAN_BLOCK:
++ IOH_DEBUG
++ ("ioh_can_ioctl -> Command received \
++ IOCTL_CAN_BLOCK\n");
++ can_os->block_mode = 1;
++ retval = IOH_CAN_SUCCESS;
++ IOH_DEBUG
++ ("ioh_can_ioctl -> Block mode set successfully.\n");
++ break;
++
++ case IOCTL_CAN_NON_BLOCK:
++ IOH_DEBUG
++ ("ioh_can_ioctl -> Command received \
++ IOCTL_CAN_NON_BLOCK\n");
++ can_os->block_mode = 0;
++ retval = IOH_CAN_SUCCESS;
++ IOH_DEBUG
++ ("ioh_can_ioctl -> Non-Block mode set \
++ successfully.\n");
++ break;
++
++ case IOCTL_CAN_BLOCK_GET:
++ IOH_DEBUG
++ ("ioh_can_ioctl -> Command received \
++ IOCTL_CAN_BLOCK_GET\n");
++ *((unsigned int *)out) = can_os->block_mode;
++ retval = IOH_CAN_SUCCESS;
++ IOH_DEBUG("ioh_can_ioctl -> Mode: "
++ "%s\n",
++ (((unsigned int)(*(unsigned int *)out) ==
++ 1) ? "BLOCK" : "NON BLOCK"));
++ break;
++
++ case IOCTL_CAN_LISTEN:
++ {
++ enum ioh_can_run_mode curr_mode;
++
++ IOH_DEBUG
++ ("ioh_can_ioctl -> Command received \
++ IOCTL_CAN_LISTEN\n");
++
++ (void)ioh_can_get_run_mode(can_os->can,
++ &curr_mode);
++ if (curr_mode == IOH_CAN_RUN) {
++ (void)ioh_can_set_run_mode(can_os->can,
++ IOH_CAN_STOP);
++ }
++
++ retval =
++ ioh_can_set_listen_mode(can_os->can,
++ IOH_CAN_LISTEN);
++
++ if (curr_mode == IOH_CAN_RUN) {
++ (void)ioh_can_set_run_mode(can_os->can,
++ IOH_CAN_RUN);
++ }
++ }
++ break;
++
++ case IOCTL_CAN_ACTIVE:
++ {
++ enum ioh_can_run_mode curr_mode;
++
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_ACTIVE\n");
++
++ (void)ioh_can_get_run_mode(can_os->can,
++ &curr_mode);
++ if (curr_mode == IOH_CAN_RUN) {
++ (void)ioh_can_set_run_mode(can_os->can,
++ IOH_CAN_STOP);
++ }
++
++ retval =
++ ioh_can_set_listen_mode(can_os->can,
++ IOH_CAN_ACTIVE);
++
++ if (curr_mode == IOH_CAN_RUN) {
++ (void)ioh_can_set_run_mode(can_os->can,
++ IOH_CAN_RUN);
++ }
++ }
++ break;
++
++ case IOCTL_CAN_LISTEN_GET:
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_LISTEN_GET\n");
++ retval =
++ ioh_can_get_listen_mode(can_os->can,
++ (enum ioh_can_listen_mode *)
++ out);
++ break;
++
++ case IOCTL_CAN_ARBITER_ROUND_ROBIN:
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_ARBITER_ROUND_ROBIN\n");
++ retval =
++ ioh_can_set_arbiter_mode(can_os->can,
++ IOH_CAN_ROUND_ROBIN);
++ break;
++
++ case IOCTL_CAN_ARBITER_FIXED_PRIORITY:
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_ARBITER_FIXED_PRIORITY\n");
++ retval =
++ ioh_can_set_arbiter_mode(can_os->can,
++ IOH_CAN_FIXED_PRIORITY);
++ break;
++
++ case IOCTL_CAN_ARBITER_GET:
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_ARBITER_GET\n");
++ retval =
++ ioh_can_get_arbiter_mode(can_os->can,
++ (enum ioh_can_arbiter *) out);
++ break;
++
++ case IOCTL_CAN_ERROR_STATS_GET:
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_ERROR_STATS_GET\n");
++ retval =
++ ioh_can_get_error_stats(can_os->can,
++ (struct ioh_can_error *) out);
++ break;
++
++ case IOCTL_CAN_RESTART_MODE_AUTO:
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_RESTART_MODE_AUTO\n");
++ retval =
++ ioh_can_set_restart_mode(can_os->can, CAN_AUTO);
++ break;
++
++ case IOCTL_CAN_RESTART_MODE_MANUAL:
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_RESTART_MODE_MANUAL\n");
++ retval =
++ ioh_can_set_restart_mode(can_os->can, CAN_MANUAL);
++ break;
++
++ case IOCTL_CAN_RESTART_MODE_GET:
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_RESTART_MODE_GET\n");
++ retval =
++ ioh_can_get_restart_mode(can_os->can,
++ (enum ioh_can_auto_restart *)
++ out);
++ break;
++
++ case IOCTL_CAN_BUFFER_LINK_SET:
++ {
++ unsigned int buff_num = 0, msg_obj = 0;
++ int i = 0;
++ unsigned int status = 0;
++
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_BUFFER_LINK_SET\n");
++ for (i = 0;
++ (i <
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size)); i++) {
++ if (ioh_msg_obj_conf[i] == 0) {
++ buff_num++;
++ if (buff_num ==
++ (*(unsigned int *)in)) {
++ break;
++ }
++ }
++ }
++
++ if (i ==
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size)) {
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Here i is the index, however (i+1) is
++ object number. */
++ msg_obj = (i + 1);
++
++ (void)ioh_can_get_rx_enable(can_os->can,
++ msg_obj,
++ &status);
++
++ if (status == ENABLE) {
++ (void)
++ ioh_can_set_rx_enable
++ (can_os->can, msg_obj,
++ DISABLE);
++ }
++
++ IOH_DEBUG
++ ("ioctl -> Calling \
++ ioh_can_set_rx_buffer_link "
++ "for Rx buffer %d[%d]\n",
++ (*(int *)in), msg_obj);
++ retval =
++ ioh_can_set_rx_buffer_link(can_os->
++ can,
++ msg_obj,
++ ENABLE);
++
++ if (status == ENABLE) {
++ (void)
++ ioh_can_set_rx_enable
++ (can_os->can, msg_obj,
++ ENABLE);
++ }
++ }
++
++ }
++ break;
++
++ case IOCTL_CAN_BUFFER_LINK_CLEAR:
++ {
++ unsigned int buff_num = 0, msg_obj = 0;
++ int i = 0;
++ unsigned status = 0;
++
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_BUFFER_LINK_CLEAR\n");
++ for (i = 0;
++ (i <
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size)); i++) {
++ if (ioh_msg_obj_conf[i] == 0) {
++ buff_num++;
++ if (buff_num ==
++ (*(unsigned int *)in)) {
++ break;
++ }
++ }
++ }
++
++ if (i ==
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size)) {
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Here i is the index, however (i+1)
++ is object number. */
++ msg_obj = (i + 1);
++
++ (void)ioh_can_get_rx_enable(can_os->can,
++ msg_obj,
++ &status);
++
++ if (status == ENABLE) {
++ (void)
++ ioh_can_set_rx_enable
++ (can_os->can, msg_obj,
++ DISABLE);
++ }
++
++ IOH_DEBUG
++ ("ioctl -> Calling \
++ ioh_can_set_rx_buffer_link for "
++ "Rx buffer %d[%d]\n", (*(int *)in),
++ msg_obj);
++ retval =
++ ioh_can_set_rx_buffer_link(can_os->
++ can,
++ msg_obj,
++ DISABLE);
++
++ if (status == ENABLE) {
++ (void)
++ ioh_can_set_rx_enable
++ (can_os->can, msg_obj,
++ ENABLE);
++ }
++ }
++
++ }
++ break;
++
++ case IOCTL_CAN_BUFFER_LINK_GET:
++ {
++ unsigned int buff_num = 0, msg_obj = 0;
++ int i = 0;
++
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_BUFFER_LINK_GET\n");
++ for (i = 0;
++ i <
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size); i++) {
++ if (ioh_msg_obj_conf[i] == 0) {
++ buff_num++;
++ if (buff_num ==
++ (*(unsigned int *)in)) {
++ break;
++ }
++ }
++ }
++
++ if (i ==
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size)) {
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Here i is the index, however (i+1) is
++ object number. */
++ msg_obj = (i + 1);
++
++ IOH_DEBUG
++ ("ioctl -> Calling \
++ ioh_can_get_rx_buffer_link for "
++ "Rx buffer %d[%d]\n", (*(int *)in),
++ msg_obj);
++ retval =
++ ioh_can_get_rx_buffer_link(can_os->
++ can,
++ msg_obj,
++ (unsigned
++ int *)
++ out);
++
++ }
++ }
++ break;
++
++ case IOCTL_CAN_RX_ENABLE_SET:
++ {
++ unsigned int buff_num = 0, msg_obj = 0;
++ int i = 0;
++ unsigned int status = 0;
++
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_RX_ENABLE_SET\n");
++ for (i = 0;
++ i <
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size); i++) {
++ if (ioh_msg_obj_conf[i] == 0) {
++ buff_num++;
++ if (buff_num == (*(int *)in))
++ break;
++ }
++ }
++
++ if (i ==
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size)) {
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Here i is the index, however (i+1) is
++ object number. */
++ msg_obj = (i + 1);
++
++ (void)ioh_can_get_rx_enable(can_os->can,
++ msg_obj,
++ &status);
++ if (status == DISABLE) {
++ retval =
++ ioh_can_set_rx_enable
++ (can_os->can, msg_obj,
++ ENABLE);
++ }
++
++ }
++ }
++ break;
++
++ case IOCTL_CAN_RX_ENABLE_CLEAR:
++ {
++ unsigned int buff_num = 0, msg_obj = 0;
++ int i = 0;
++ unsigned int status = 0;
++
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_RX_ENABLE_CLEAR\n");
++ for (i = 0;
++ i <
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size); i++) {
++ if (ioh_msg_obj_conf[i] == 0) {
++ buff_num++;
++ if (buff_num == (*(int *)in))
++ break;
++ }
++ }
++
++ if (i ==
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size)) {
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Here i is the index, however (i+1) is
++ object number. */
++ msg_obj = (i + 1);
++
++ (void)ioh_can_get_rx_enable(can_os->can,
++ msg_obj,
++ &status);
++ if (status == ENABLE) {
++ retval =
++ ioh_can_set_rx_enable
++ (can_os->can, msg_obj,
++ DISABLE);
++ }
++
++ }
++ }
++ break;
++
++ case IOCTL_CAN_RX_ENABLE_GET:
++ {
++ unsigned int buff_num = 0, msg_obj = 0;
++ int i = 0;
++
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_RX_ENABLE_GET\n");
++ for (i = 0;
++ i <
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size); i++) {
++ if (ioh_msg_obj_conf[i] == 0) {
++ buff_num++;
++ if (buff_num == (*(int *)in))
++ break;
++ }
++ }
++
++ if (i ==
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size)) {
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Here i is the index, however (i+1) is
++ object number. */
++ msg_obj = (i + 1);
++
++ retval =
++ ioh_can_get_rx_enable(can_os->can,
++ msg_obj,
++ (unsigned int
++ *)out);
++
++ }
++ }
++ break;
++
++ case IOCTL_CAN_TX_ENABLE_SET:
++ {
++ unsigned int buff_num = 0, msg_obj = 0;
++ int i = 0;
++ unsigned int status = 0;
++
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_TX_ENABLE_SET\n");
++ for (i = 0;
++ i <
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size); i++) {
++ if (ioh_msg_obj_conf[i] == 1) {
++ buff_num++;
++ if (buff_num == (*(int *)in))
++ break;
++ }
++ }
++
++ if (i ==
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size)) {
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Here i is the index, however (i+1) is
++ object number. */
++ msg_obj = (i + 1);
++
++ (void)ioh_can_get_tx_enable(can_os->can,
++ msg_obj,
++ &status);
++ if (status == DISABLE) {
++ retval =
++ ioh_can_set_tx_enable
++ (can_os->can, msg_obj,
++ ENABLE);
++ }
++
++ }
++ }
++ break;
++
++ case IOCTL_CAN_TX_ENABLE_CLEAR:
++ {
++ unsigned int buff_num = 0, msg_obj = 0;
++ int i = 0;
++ unsigned int status = 0;
++
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_TX_ENABLE_CLEAR\n");
++ for (i = 0;
++ i <
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size); i++) {
++ if (ioh_msg_obj_conf[i] == 1) {
++ buff_num++;
++ if (buff_num == (*(int *)in))
++ break;
++ }
++ }
++
++ if (i ==
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size)) {
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Here i is the index, however (i+1) is
++ object number. */
++ msg_obj = (i + 1);
++
++ (void)ioh_can_get_tx_enable(can_os->can,
++ msg_obj,
++ &status);
++ if (status == ENABLE) {
++ retval =
++ ioh_can_set_tx_enable
++ (can_os->can, msg_obj,
++ DISABLE);
++ }
++
++ }
++ }
++ break;
++
++ case IOCTL_CAN_TX_ENABLE_GET:
++ {
++ unsigned int buff_num = 0, msg_obj = 0;
++ int i = 0;
++
++ IOH_DEBUG("ioh_can_ioctl -> Command received "
++ "IOCTL_CAN_TX_ENABLE_GET\n");
++ for (i = 0;
++ i <
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size); i++) {
++ if (ioh_msg_obj_conf[i] == 1) {
++ buff_num++;
++ if (buff_num == (*(int *)in))
++ break;
++ }
++ }
++
++ if (i ==
++ (ioh_can_tx_buf_size +
++ ioh_can_rx_buf_size)) {
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Here i is the index, however (i+1) is
++ object number. */
++ msg_obj = (i + 1);
++
++ retval =
++ ioh_can_get_tx_enable(can_os->can,
++ msg_obj,
++ (unsigned int
++ *)out);
++
++ }
++ }
++ break;
++
++#ifdef DEBUG
++ /* Inlcuded for debugging. */
++ case IOCTL_CAN_DEBUG:
++ {
++ struct debug in;
++ retval =
++ copy_from_user((void *)&in, (void *)arg,
++ sizeof(struct debug));
++
++ if (in.cmd == 1) { /* Read operation. */
++ in.value =
++ (ioread32
++ ((void __iomem *)(can_os->
++ pci_remap +
++ in.offset))
++ & MSK_ALL_SIXTEEN);
++ IOH_DEBUG("Offset: 0x%x\nRead value: "
++ "0x%x\n",
++ (unsigned int)(in.offset),
++ (unsigned int)(in.
++ value &
++ MSK_ALL_SIXTEEN));
++
++ retval =
++ copy_to_user((void *)arg,
++ (void *)&in,
++ sizeof(struct debug));
++
++ }
++
++ else if (in.cmd == 2) {
++ (void)iowrite32(in.value,
++ (void *)(can_os->
++ pci_remap +
++ in.offset));
++ if (in.value ==
++ ((ioread32
++ ((void *)(can_os->pci_remap +
++ in.offset)))
++ & MSK_ALL_SIXTEEN)) {
++ retval = IOH_CAN_SUCCESS;
++ } else {
++ retval = IOH_CAN_FAIL;
++ }
++
++ }
++
++ else {
++ retval = IOH_CAN_FAIL;
++ }
++
++ }
++ break;
++#endif
++
++ default:
++ IOH_DEBUG("Unrecognizined IOCTL, skipping 0x%x. \n",
++ cmd);
++ retval = -EINVAL;
++ break;
++ }
++
++ if ((out_buff_size != 0) && (IOH_CAN_SUCCESS == retval)) {
++ retval = copy_to_user((void *)arg, out, out_buff_size);
++ if (retval != 0) {
++ IOH_LOG(KERN_ERR, "Copy to user failed for "
++ "for CAN in IOCTL operation.\n");
++ retval = -ENOMEM;
++ } else {
++ retval = IOH_CAN_SUCCESS;
++ }
++ }
++
++ }
++
++ if (in != NULL)
++ kfree(in);
++ if (out != NULL)
++ kfree(out);
++
++ IOH_DEBUG("ioh_can_ioctl returns %d.\n", retval);
++ return retval;
++}
++
++/*! @ingroup InterfaceLayerAPI
++ @fn static void can_ioctl_buff_size(
++ unsigned int ctl_code,size_t *in_size,size_t *out_size)
++ @brief Calculates the size of memory required for the IN and
++ OUT arguments of a
++ specific ioctl command.
++ @remarks This function is used to obtain the size of the memory
++ space required to store the data for a
++ particular IOCTL
++ command.
++ @note For an invalid IOCTL command this function returns IN
++ and
++ OUT size as 0.
++
++ @param ctl_code [@ref IN] The ioctl command.
++ @param in_size [@ref OUT] The size of the IN argument.
++ @param out_size [@ref OUT] The size of the OUT argument.
++
++ @retval None
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++static void can_ioctl_buff_size(unsigned int ctl_code, size_t *in_size,
++ size_t *out_size)
++{
++ *in_size = 0;
++ *out_size = 0;
++
++ switch (ctl_code) {
++
++ case IOCTL_CAN_SIMPLE:
++ *in_size = sizeof(enum ioh_can_baud);
++ break;
++
++ case IOCTL_CAN_CUSTOM:
++ *in_size = sizeof(struct ioh_can_timing);
++ break;
++
++ case IOCTL_CAN_FILTER:
++ *in_size = sizeof(struct ioh_can_rx_filter);
++ break;
++
++ case IOCTL_CAN_RUN_GET:
++ *out_size = sizeof(enum ioh_can_run_mode);
++ break;
++
++ case IOCTL_CAN_TIMING_GET:
++ *out_size = sizeof(struct ioh_can_timing);
++ break;
++
++ case IOCTL_CAN_FILTER_GET:
++ *out_size = sizeof(struct ioh_can_rx_filter);
++ *in_size = sizeof(struct ioh_can_rx_filter);
++ break;
++
++ case IOCTL_CAN_BLOCK_GET:
++ *out_size = sizeof(unsigned int);
++ break;
++
++ case IOCTL_CAN_LISTEN_GET:
++ *out_size = sizeof(enum ioh_can_listen_mode);
++ break;
++
++ case IOCTL_CAN_ARBITER_GET:
++ *out_size = sizeof(enum ioh_can_arbiter);
++ break;
++
++ case IOCTL_CAN_ERROR_STATS_GET:
++ *out_size = sizeof(struct ioh_can_error);
++ break;
++
++ case IOCTL_CAN_RESTART_MODE_GET:
++ *out_size = sizeof(enum ioh_can_auto_restart);
++ break;
++
++ case IOCTL_CAN_BUFFER_LINK_SET:
++ case IOCTL_CAN_BUFFER_LINK_CLEAR:
++ case IOCTL_CAN_RX_ENABLE_SET:
++ case IOCTL_CAN_RX_ENABLE_CLEAR:
++ case IOCTL_CAN_TX_ENABLE_SET:
++ case IOCTL_CAN_TX_ENABLE_CLEAR:
++ *in_size = sizeof(unsigned int);
++ break;
++
++ case IOCTL_CAN_BUFFER_LINK_GET:
++ case IOCTL_CAN_RX_ENABLE_GET:
++ case IOCTL_CAN_TX_ENABLE_GET:
++ *in_size = sizeof(unsigned int);
++ *out_size = sizeof(unsigned int);
++ break;
++
++ default:
++ break;
++ }
++
++ IOH_DEBUG("can_ioctl_buff_size -> In size: %u "
++ "Out Size: %u.\n", *in_size, *out_size);
++ IOH_DEBUG("can_ioctl_buff_size -> Invoked successfully.\n");
++}
+diff -urN linux-2.6.33-rc3/drivers/net/can/pch_can/pch_can_main.h topcliff-2.6.33-rc3/drivers/net/can/pch_can/pch_can_main.h
+--- linux-2.6.33-rc3/drivers/net/can/pch_can/pch_can_main.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/can/pch_can/pch_can_main.h 2010-03-10 08:57:34.000000000 +0900
+@@ -0,0 +1,826 @@
++/*!
++ * @file ioh_can_main.h
++ * @brief Provides the function declarations for CAN driver methods.
++ * @version 1.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * <hr>
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++#ifndef __IOH_CAN_MAIN_H__
++#define __IOH_CAN_MAIN_H__
++
++/*! @defgroup CAN
++ @brief Contains the different utilities used for implementing
++ the CAN module.
++*/
++
++/*! @defgroup Debug
++ @ingroup CAN
++ @brief Group containing the functionalities for debugging
++ the features of the module.
++ @remarks This group defines the functionalities that can be
++ used for debugging the different features of the
++ CAN. The main functionalities are of displaying
++ the
++ debug messages. For normal execution these
++ functionalities are disabled.
++
++ <hr>
++*/
++
++/*! @defgroup Global
++ @ingroup CAN
++ @brief This group describes the global entities within
++ the module.
++ @remarks This group includes all the global data structures
++ used within the modules. These are mainly used
++ to
++ store the device related information, so that it
++ can
++ be used by other functions of the modules.
++
++ <hr>
++*/
++/*! @defgroup PCILayer
++ @ingroup CAN
++ @brief This group describes the PCI layer interface
++ functionalities.
++ @remarks This group contains the functions and data structures
++ that are used to interface the module with PCI
++ Layer
++ subsystem of the Kernel. Most of the data
++ structures
++ and functions used within this group are Kernel
++ provided
++ ones.
++
++ <hr>
++*/
++/*! @defgroup InterfaceLayer
++ @ingroup CAN
++ @brief This group describes the Driver interface
++ functionalities.
++ @remarks This group contains the data structures and functions
++ used
++ to interface the module driver with the kernel
++ subsystem.
++
++ <hr>
++*/
++/*! @defgroup HALLayer
++ @ingroup CAN
++ @brief This group describes the hardware specific
++ functionalities.
++ @remarks This group contains the functions and data structures
++ used
++ by the module to communicate with the hardware.
++ These
++ functions are device specific and designed
++ according to the
++ device specifications.
++
++ <hr>
++*/
++/*! @defgroup Utilities
++ @ingroup CAN
++ @brief This group describes the utility functionalities.
++ @remarks This group contains the functions and data structures
++ used
++ to assist the other functionalities in their
++ operations.
++
++ <hr>
++*/
++
++/*! @defgroup PCILayerAPI
++ @ingroup PCILayer
++ @brief This group contains the API(functions) used as the PCI
++ interface between the Kernel subsystem and the
++ module.
++
++ <hr>
++*/
++
++/*! @defgroup PCILayerFacilitators
++ @ingroup PCILayer
++ @brief This group contains the data structures used by the PCI
++ Layer APIs for their functionalities.
++
++ <hr>
++*/
++
++/*! @defgroup InterfaceLayerAPI
++ @ingroup InterfaceLayer
++ @brief This group contains the API(functions) used as the
++ Driver
++ interface between the Kernel subsystem and the
++ module.
++
++ <hr>
++*/
++/*! @defgroup InterfaceLayerFacilitators
++ @ingroup InterfaceLayer
++ @brief This group contains the data structures used by the
++ Driver
++ interface APIs for their functionalities.
++
++ <hr>
++*/
++
++/*! @defgroup HALLayerAPI
++ @ingroup HALLayer
++ @brief This group contains the APIs(functions) used to interact
++ with
++ the hardware. These APIs act as an interface
++ between the
++ hardware and the other driver functions.
++
++ <hr>
++*/
++
++/*! @defgroup UtilitiesAPI
++ @ingroup Utilities
++ @brief This group contains the APIs(functions) used by other
++ functions
++ in their operations.
++
++ <hr>
++*/
++
++/*! @ingroup InterfaceLayer
++ @def IOH_CAN_MSG_DATA_LEN
++ @brief The length in bytes of the data part of the
++ CAN message object.
++ @note The maximum length of data that a message object
++ contains is 8bytes.
++
++ @see
++ - ioh_can_msg
++
++ <hr>
++*/
++#define IOH_CAN_MSG_DATA_LEN (8) /* CAN Msg data length */
++
++/*! @ingroup InterfaceLayer
++ @struct ioh_can_msg
++ @brief The structure defining the format of the CAN message.
++ @remarks This structure is used by the driver/user to specify
++ the message details. It is used during reading
++ and
++ transmitting of CAN message objects.
++
++ @see
++ - ioh_can_read
++ - ioh_can_write
++ - ioh_can_msg_tx
++ - ioh_can_rx_dequeue
++
++ <hr>
++*/
++struct ioh_can_msg {
++ unsigned short ide; /**< Standard/extended msg */
++ unsigned int id; /**< 11 or 29 bit msg id */
++ unsigned short dlc; /**< Size of data */
++ unsigned char data[IOH_CAN_MSG_DATA_LEN]; /**< Message pay load */
++ unsigned short rtr; /**< RTR message */
++};
++
++/*! @ingroup InterfaceLayer
++ @struct ioh_can_timing
++ @brief This structure defines the fields that are
++ used to define the CAN timing.
++ @remarks This structure is used by the user application
++ to specify the baud timing parameters which are
++ used to calculate the clock rate timing of the
++ CAN device.
++
++ @see
++ - IOCTL_CAN_CUSTOM
++ - IOCTL_CAN_TIMING_GET
++ - ioh_can_set_baud_custom
++ - ioh_can_get_baud
++
++ <hr>
++*/
++struct ioh_can_timing {
++ unsigned int bitrate; /**< Bitrate (kbps) */
++ unsigned int cfg_bitrate; /**< Bitrate */
++ unsigned int cfg_tseg1; /**< Tseg1 */
++ unsigned int cfg_tseg2; /**< Tseg2 */
++ unsigned int cfg_sjw; /**< Sync jump width */
++ unsigned int smpl_mode; /**< Sampling mode */
++ unsigned int edge_mode; /**< Edge R / D */
++};
++
++/*! @ingroup InterfaceLayer
++ @struct ioh_can_error
++ @brief This structure defines the format for a
++ CAN error status.
++ @remarks This structure is used by the driver to
++ specify the CAN device error status to the
++ user.
++
++ @see
++ - IOCTL_CAN_ERROR_STATS_GET
++ - ioh_can_get_error_stats
++ <hr>
++*/
++struct ioh_can_error {
++ unsigned int rxgte96; /**< Rx err cnt >=96 */
++ unsigned int txgte96; /**< Tx err cnt >=96 */
++ unsigned int error_stat; /**< Error state of CAN node
++ 00=error active (normal)
++ 01=error passive
++ 1x=bus off */
++ unsigned int rx_err_cnt; /**< Rx counter */
++ unsigned int tx_err_cnt; /**< Tx counter */
++};
++
++/*! @ingroup InterfaceLayer
++ @struct ioh_can_acc_filter
++ @brief This structure defines the format for specifying
++ the Mask/ID.
++ @remarks This structure is used for specifying the type of
++ ID or Mask (standard/extended).
++
++ @see
++ - ioh_can_rx_filter
++
++ <hr>
++*/
++struct ioh_can_acc_filter {
++ unsigned int id; /**< The id/mask data. */
++ unsigned int id_ext; /**< Standard/extended ID */
++ unsigned int rtr; /**< RTR message */
++};
++
++/*! @ingroup InterfaceLayer
++ @struct ioh_can_rx_filter
++ @brief This structure describes the ACR and AMR filter
++ for an Rx buffer.
++ @remarks This driver is used by the driver/user for specifying
++ the acceptance filter details.
++
++ @see
++ - IOCTL_CAN_FILTER
++ - IOCTL_CAN_FILTER_GET
++ - ioh_can_rx_init_filter
++ - ioh_can_set_rx_filter
++
++ <hr>
++*/
++struct ioh_can_rx_filter {
++ unsigned int num; /**< Filter number */
++ unsigned int umask; /**< UMask value */
++ struct ioh_can_acc_filter amr; /**< Acceptance Mask Reg */
++ struct ioh_can_acc_filter aidr; /**< Acceptance Control Reg */
++};
++
++/*! @ingroup InterfaceLayer
++ @enum ioh_can_listen_mode
++ @brief Identities the valid values for the Active/Listen
++ mode.
++ @remarks These enum constants are used to denote the
++ Active/Listen
++ mode of the CAN device. It is used both by the
++ user and
++ driver for specifying the corresponding mode.
++
++ @see
++ - IOCTL_CAN_LISTEN_GET
++ - ioh_can_set_listen_mode
++ - ioh_can_get_listen_mode
++
++ <hr>
++*/
++enum ioh_can_listen_mode {
++ IOH_CAN_ACTIVE = 0, /**< R/w to/from the CAN */
++ IOH_CAN_LISTEN /**< Only read from the CAN */
++};
++
++/*! @ingroup InterfaceLayer
++ @enum ioh_can_run_mode
++ @brief Identifies the valid values for the Run/Stop mode.
++ @remarks These enum constants are used by the driver and
++ user level application to specify the current
++ state(STOP/RUN) of the CAN device.
++
++ @see
++ - IOCTL_CAN_RUN_GET
++ - ioh_can_set_run_mode
++ - ioh_can_get_run_mode
++
++ <hr>
++*/
++enum ioh_can_run_mode {
++ IOH_CAN_STOP = 0, /**< CAN stopped */
++ IOH_CAN_RUN /**< CAN running */
++};
++
++/*! @ingroup InterfaceLayer
++ @enum ioh_can_arbiter
++ @brief Identifies the valid values for the arbitration mode.
++ @remarks These enum constants are used by the driver/user to
++ specify the arbitration/priority mode of the CAN
++ device.
++
++ @see
++ - IOCTL_CAN_ARBITER_GET
++ - ioh_can_set_arbiter_mode
++ - ioh_can_get_arbiter_mode
++
++ <hr>
++*/
++enum ioh_can_arbiter {
++ IOH_CAN_ROUND_ROBIN = 0, /**< Equal priority */
++ IOH_CAN_FIXED_PRIORITY /**< Buffer num priority */
++};
++
++/*! @ingroup InterfaceLayer
++ @enum ioh_can_auto_restart
++ @brief Identifies the valid values for the auto-restart mode.
++ @remarks These enum constants are used by the driver/user to
++ specify the restart mode of the CAN device.
++ @note If the restart mode is CAN_AUTO, the CAN device will
++ automatically recover from the BUS-OFF stage.
++ Else
++ the user would have to manually perform the
++ required
++ procedures.
++
++ @see
++ - IOCTL_CAN_RESTART_MODE_GET
++ - ioh_can_set_restart_mode
++ - ioh_can_get_restart_mode
++
++ <hr>
++*/
++enum ioh_can_auto_restart {
++ CAN_MANUAL = 0, /**< Manual restart */
++ CAN_AUTO /**< Automatic restart */
++};
++
++/*! @ingroup InterfaceLayer
++ @enum ioh_can_baud
++ @brief Identifies common baud rates.
++ @remarks These enum constants are used by the driver/user to
++ denote the standard baud rates used by the CAN
++ device.
++
++ @see
++ - IOCTL_CAN_SIMPLE
++ - ioh_can_set_baud_simple
++
++ <hr>
++*/
++enum ioh_can_baud {
++ IOH_CAN_BAUD_10 = 0, /**< 10 kbps */
++ IOH_CAN_BAUD_20, /**< 20 kbps */
++ IOH_CAN_BAUD_50, /**< 50 kbps */
++ IOH_CAN_BAUD_125, /**< 125 kbps */
++ IOH_CAN_BAUD_250, /**< 250 kbps */
++ IOH_CAN_BAUD_500, /**< 500 kbps */
++ IOH_CAN_BAUD_800, /**< 800 kbps */
++ IOH_CAN_BAUD_1000 /**< 1000 kbps */
++};
++
++/*! @ingroup InterfaceLayer
++ @enum ioh_can_interrupt
++ @brief Identifies interrupt enables/disables.
++ @remarks These enum constants are used by the driver
++ to enable/disable the different combination
++ of the interrupts supported by the CAN device.
++
++ @see
++ - ioh_can_set_int_enables
++
++ <hr>
++*/
++enum ioh_can_interrupt {
++ CAN_ENABLE, /**< Enable IE bit only */
++ CAN_DISABLE, /**< Disable IE bit only */
++ CAN_ALL, /**< All ints */
++ CAN_NONE /**< No ints */
++};
++
++/*! @ingroup InterfaceLayer
++ @def MAGIC
++ @brief The one byte constant used for the generation
++ of IOCTL commands.
++
++ <hr>
++*/
++#define MAGIC (0x88)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_RESET
++ @brief The IOCTL command for CAN reset.
++
++ @see
++ - ioh_can_ioctl
++ <hr>
++*/
++#define IOCTL_CAN_RESET _IO(MAGIC, 0)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_RUN
++ @brief The IOCTL command for setting the CAN in either RUN
++ mode.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_RUN _IO(MAGIC, 1)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_STOP
++ @brief The IOCTL command for setting the CAN in either STOP
++ mode.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_STOP _IO(MAGIC, 2)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_RUN_GET
++ @brief The IOCTL command to get the current mode (RUN/STOP) of
++ the CNA device.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_RUN_GET _IOR(MAGIC, 3, enum ioh_can_run_mode)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_FILTER
++ @brief The IOCTL command for setting the filter for a receive
++ buffer.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_FILTER _IOW(MAGIC, 4, struct ioh_can_rx_filter)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_FILTER_GET
++ @brief The IOCTL command for getting the current filter
++ settings
++ of a receive buffer.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_FILTER_GET _IOR(MAGIC, 5, struct ioh_can_rx_filter)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_CUSTOM
++ @brief The IOCTL command for setting the user specified time
++ settings for the CAN device.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_CUSTOM _IOW(MAGIC, 6, struct ioh_can_timing)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_SIMPLE
++ @brief The IOCTL command for setting the standard time settings
++ for the CAN device.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_SIMPLE _IOW(MAGIC, 7, enum ioh_can_baud)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_TIMING_GET
++ @brief The IOCTL command for getting the current CAN time
++ settings.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_TIMING_GET _IOR(MAGIC, 8, struct ioh_can_timing)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_BLOCK
++ @brief The IOCTL command for setting the block mode for the CAN
++ device.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_BLOCK _IO(MAGIC, 9)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_NON_BLOCK
++ @brief The IOCTL command for setting the non-block mode for the
++ CAN device.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_NON_BLOCK _IO(MAGIC, 10)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_BLOCK_GET
++ @brief The IOCTL command for getting the current block mode
++ settings
++ for the CAN device operations.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_BLOCK_GET _IOR(MAGIC, 11, u32)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_LISTEN
++ @brief The IOCTL command for setting the CAN device to listen
++ mode.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_LISTEN _IO(MAGIC, 12)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_ACTIVE
++ @brief The IOCTL command for setting the CAN device to active
++ mode.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_ACTIVE _IO(MAGIC, 13)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_LISTEN_GET
++ @brief The IOCTL command for getting the current listen mode of
++ the
++ CAN device.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_LISTEN_GET _IOR(MAGIC, 14, enum ioh_can_listen_mode)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_ARBITER_ROUND_ROBIN
++ @brief The IOCTL command to set the arbiter priority mode as
++ ROUND
++ ROBIN
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_ARBITER_ROUND_ROBIN _IO(MAGIC, 15)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_ARBITER_FIXED_PRIORITY
++ @brief The IOCTL command to set the arbiter priority mode as
++ FIXED
++ PRIORITY.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_ARBITER_FIXED_PRIORITY _IO(MAGIC, 16)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_ARBITER_GET
++ @brief The IOCTL command for getting the currently set arbiter
++ priority mode.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_ARBITER_GET _IOR(MAGIC, 17, enum ioh_can_arbiter)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_ERROR_STATS_GET
++ @brief The IOCTL command for getting the current error status
++ of the
++ CAN device.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_ERROR_STATS_GET _IOR(MAGIC, 18, struct ioh_can_error)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_BUFFER_LINK_CLEAR
++ @brief The IOCTL command to clear the link mode of a receive
++ buffer.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_BUFFER_LINK_CLEAR _IOW(MAGIC, 20, u32)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_BUFFER_LINK_GET
++ @brief The IOCTL command for getting the current link mode
++ settings
++ of a receive buffer.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_BUFFER_LINK_GET _IOWR(MAGIC, 21, u32)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_RX_ENABLE_SET
++ @brief The IOCTL command for enabling a receive buffer.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_RX_ENABLE_SET _IOW(MAGIC, 22, u32)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_RX_ENABLE_CLEAR
++ @brief The IOCTL command for disabling a receive buffer.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_RX_ENABLE_CLEAR _IOW(MAGIC, 23, u32)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_RX_ENABLE_GET
++ @brief The IOCTL command for getting the current enable status
++ of
++ a receive buffer.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_RX_ENABLE_GET _IOWR(MAGIC, 24, u32)
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_TX_ENABLE_SET
++ @brief The IOCTL command for enabling a transmit buffer.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_TX_ENABLE_SET _IOW(MAGIC, 25, u32)
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_TX_ENABLE_CLEAR
++ @brief The IOCTL command for disabling a transmit buffer.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_TX_ENABLE_CLEAR _IOW(MAGIC, 26, u32)
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_TX_ENABLE_GET
++ @brief The IOCTL command for getting the current enable status
++ of a transmit buffer.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_TX_ENABLE_GET _IOWR(MAGIC, 27, u32)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_RESTART_MODE_AUTO
++ @brief The IOCTL command to set the restart mode as AUTO.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_RESTART_MODE_AUTO _IO(MAGIC, 28)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_RESTART_MODE_MANUAL
++ @brief The IOCTL command to set the restart mode as MANUAL.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_RESTART_MODE_MANUAL _IO(MAGIC, 29)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_RESTART_MODE_GET
++ @brief The IOCTL command for getting the currently set restart
++ mode
++ of the CAN device.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_RESTART_MODE_GET _IOR(MAGIC, 30, enum ioh_can_auto_restart)
++
++/*! @ingroup InterfaceLayer
++ @def IOCTL_CAN_BUFFER_LINK_SET
++ @brief The IOCTL command to set the link mode of a receive
++ buffer.
++
++ @see
++ - ioh_can_ioctl
++
++ <hr>
++*/
++#define IOCTL_CAN_BUFFER_LINK_SET _IOW(MAGIC, 19, u32)
++
++/*! @ingroup InterfaceLayer
++ @def IOH_CAN_SUCCESS
++ @brief The value returned by certain functions on success.
++
++ <hr>
++*/
++#define IOH_CAN_SUCCESS (0) /* CAN success return value. */
++
++/*! @ingroup InterfaceLayer
++ @def IOH_CAN_FAIL
++ @brief The value returned by certain functions on failure.
++
++ <hr>
++*/
++#define IOH_CAN_FAIL (-1) /* CAN failure return value. */
++
++#endif /* ifndef __IOH_CAN_H__ */
+diff -urN linux-2.6.33-rc3/drivers/net/can/pch_can/pch_can_pci.c topcliff-2.6.33-rc3/drivers/net/can/pch_can/pch_can_pci.c
+--- linux-2.6.33-rc3/drivers/net/can/pch_can/pch_can_pci.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/can/pch_can/pch_can_pci.c 2010-03-12 14:10:54.000000000 +0900
+@@ -0,0 +1,1134 @@
++/*!
++ * @file ioh_can_pci.c
++ * @brief Provides the function definition for the PCI Layer APIs.
++ * @version 1.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * <hr>
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++/* includes */
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/pci.h>
++#include <linux/cdev.h>
++#include <linux/fs.h>
++#include <linux/io.h>
++#include <linux/wait.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include "pch_common.h"
++#include "pch_debug.h"
++#include "pch_can_main.h"
++#include "pch_can_hal.h"
++#include "pch_can_pci.h"
++
++MODULE_DESCRIPTION("Controller Area Network Driver");
++MODULE_LICENSE("GPL");
++MODULE_VERSION("0.94");
++
++/*** Module parameter variables ***/
++
++/*! @ingroup Global
++ @var ioh_can_rx_buf_size
++ @brief The number of message objects that has to be configured
++ as receive
++ objects.
++ @note The value can be specified during the loading of the
++ module. The default value is 16.
++
++ <hr>
++ */
++unsigned int ioh_can_rx_buf_size = 16;
++
++/*! @ingroup Global
++ @var ioh_can_tx_buf_size
++ @brief The number of message objects that has to be configured
++ as transmit
++ objects.
++ @note The value can be specified during the loading of the
++ module. The default value is 16.
++
++ <hr>
++ */
++unsigned int ioh_can_tx_buf_size = 16;
++
++/*! @ingroup Global
++ @var ioh_can_clock
++ @brief The clock rate frequency in KHz.
++ @note The value can be specified during the loading of the
++ module. The default value is 62500KHz (62.5MHz).
++ @see
++ - ioh_can_pci_init
++
++ <hr>
++ */
++#ifndef FPGA_BOARD
++int ioh_can_clock = 50000;
++#else
++int ioh_can_clock = 62500;
++#endif
++
++/*! @ingroup Global
++ @var major
++ @brief The major number to be allocated to the device driver.
++ @note The value can be specified during the loading of the
++ module. If no value is specified the default value is
++ 0 and the major number would be allocated dynamically.
++ @see
++ - ioh_can_probe
++
++ <hr>
++*/
++static int major;
++/*******/
++
++/*** global variables ***/
++/*! @ingroup Global
++ @var MODULE_NAME
++ @brief The name of the module.
++ @note This variable denotes the module name which is displayed
++ along
++ with the debug messages during logging of debug
++ messages.
++
++ <hr>
++*/
++#define MODULE_NAME "pch_can"
++/*! @ingroup Global
++ @var can_os
++ @brief The global variable used to store the device
++ information.
++ @note This variable is used to store the device specific
++ information,
++ that can be used by other module functions in
++ their course of operations.
++
++ <hr>
++*/
++struct ioh_can_os can_os[MAX_CAN_DEVICES];
++
++/********/
++
++/*** static variables *****/
++
++/*! @ingroup Global
++ @var ioh_can_major
++ @brief The global variable used to store the major number of
++ the device driver.
++ @note This variable is used to store the major number of the
++ device driver. If the major number is allocated
++ dynamically
++ then the kernel provided major number is stored
++ in it. During
++ static allocation the unique major number is
++ calculated with
++ the user provided major number and stored in it.
++
++ @see
++ - ioh_can_probe
++ - ioh_can_remove
++
++ <hr>
++*/
++static dev_t ioh_can_major; /* Device variable used to calculate the major
++ number. */
++
++/*! @ingroup Global
++ @var ioh_can_dev
++ @brief The global variable used to store the device driver
++ information.
++ @note This variable is used to store the device driver
++ specific information.
++ It is used during the registration of the device
++ driver.
++
++ @see
++ - ioh_can_probe
++ - ioh_can_remove
++
++ <hr>
++*/
++static struct cdev ioh_can_dev; /* char device reg'd by the driver */
++
++/*******/
++
++/*** Defining as module parameter. ***/
++
++module_param_named(major, major, int, 444);
++module_param_named(ioh_can_rx_buf_size, ioh_can_rx_buf_size, int, 444);
++module_param_named(ioh_can_tx_buf_size, ioh_can_tx_buf_size, int, 444);
++module_param_named(ioh_can_clock, ioh_can_clock, int, 444);
++
++/*****/
++
++/*** Function prototypes. ***/
++
++static int ioh_can_probe(struct pci_dev *dev, const struct pci_device_id *id);
++static void ioh_can_remove(struct pci_dev *dev);
++static int ioh_can_suspend(struct pci_dev *dev, pm_message_t state);
++static int ioh_can_resume(struct pci_dev *dev);
++static void ioh_can_shutdown(struct pci_dev *dev);
++
++/*****/
++
++/*! @ingroup PCILayerFacilitators
++ @struct ioh_can_pcidev_id
++ @brief Instance of Linux Kernel structure pci_device_id for
++ specifying the Vendor
++ and deviceID of the supported PCI device.
++ @remarks
++ This structure is the instance of the linux
++ kernel provided structure
++ pci_device_id. It is used to register the
++ vendor and device ID
++ of the PCI device with the kernel subsystem.
++ This structure is used by the kernel for module
++ registration for the
++ appropriate device during the device
++ recognization(probing). In short
++ it describes the devices for which this module
++ can be used. It forms
++ a part of the large structure
++ @ref ioh_can_pcidev used for module registration.
++ @note This driver is used as a part of another structure
++ @ref ioh_can_pcidev
++ @see
++ - ioh_can_pcidev
++ <hr>
++*/
++static const struct pci_device_id ioh_can_pcidev_id[] __devinitdata = {
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOH1_CAN)},
++ {}
++};
++
++MODULE_DEVICE_TABLE(pci, ioh_can_pcidev_id);
++
++/*! @ingroup PCILayerFacilitators
++ @struct ioh_can_pcidev
++ @brief Instance of the Linux Kernel structure pci_driver for
++ specifying the PCI Driver features to the
++ kernel subsystem.
++ @remarks
++ This structure is the instance of the Linux
++ kernel provided structure
++ pci_driver. It is used to register the PCI
++ driver features with the kernel subsystem.
++ This structure specifies the PCI driver features
++ such as the Name
++ of the Module, Entry point, Exit point and Power
++ Management functions for the driver.
++ @note This structure is used only during the registration and
++ un-registration of the PCI driver.
++ @see
++ - ioh_can_pci_init
++ - ioh_can_pci_exit
++ <hr>
++*/
++static struct pci_driver ioh_can_pcidev = {
++ .name = "ioh_can",
++ .id_table = ioh_can_pcidev_id,
++ .probe = ioh_can_probe,
++ .remove = __devexit_p(ioh_can_remove),
++#ifdef CONFIG_PM
++ .suspend = ioh_can_suspend,
++ .resume = ioh_can_resume,
++#endif
++ .shutdown = ioh_can_shutdown
++};
++
++/*! @ingroup PCILayerAPI
++ @fn static int ioh_can_pci_init(void)
++ @brief This function registers the module as a PCI Driver.
++ @remarks
++ This function is invoked during the loading of
++ the module. The main
++ tasks performed by this function are.
++ - Evaluates the validity of the parameter passed
++ during loading.
++ - Validates whether the obtained clock frequency
++ is valid.
++ - Registers the module as PCI driver module.
++
++ @note This function is called during the loading of the
++ device.
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> The loading of the
++ module successful.
++ - @ref IOH_CAN_FAIL --> The loading
++ of the module failed due to invalid
++ module parameters or invlaid clock frequency.
++ - -EEXIST --> pci_register_driver failed.
++ - -EINVAL --> pci_register_driver failed.
++ - -ENOMEM --> pci_register_driver failed.
++ <hr>
++*/
++static int __init ioh_can_pci_init(void)
++{
++ int retval;
++
++ IOH_DEBUG("ioh_can_pci_init -> Obtained parameters: \n"
++ "ioh_can_tx_buf_size = %d\n"
++ "ioh_can_rx_buf_size = %d\n"
++ "ioh_can_clock = %d\n"
++ "major = %d\n", ioh_can_tx_buf_size,
++ ioh_can_rx_buf_size, ioh_can_clock, major);
++
++ /* Checking if the total message objects to be used exceeds
++ the supported message object available and whether the obtained
++ clock frequency is greater than 0. */
++ if (((ioh_can_tx_buf_size + ioh_can_rx_buf_size) <= MAX_MSG_OBJ)
++ && (ioh_can_clock > 0)) {
++
++ /* Checking the validity of the clock variable. */
++ switch (ioh_can_clock) {
++ /* List of valid clock frequency
++ Add new clock frequency here
++ for validation.
++ */
++ case 62500:
++ case 24000:
++ case 50000:
++ retval = IOH_CAN_SUCCESS;
++ break;
++
++ default:
++ IOH_LOG(KERN_ERR,
++ "ioh_can_pci_init -> Invalid clock frequency "
++ "%u", ioh_can_clock);
++ retval = IOH_CAN_FAIL;
++ }
++
++ if (IOH_CAN_SUCCESS == retval) {
++
++ /* Register as a PCI driver. */
++ retval = pci_register_driver(&ioh_can_pcidev);
++
++ /* Registeration unsuccessful. */
++ if (0 != retval) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_pci_init -> \
++ pci_register_driver failed(returned %d).\n",
++ retval);
++ }
++ /* Registeration successful. */
++ else {
++ IOH_DEBUG
++ ("ioh_can_pci_init -> pci_register_driver \
++ successful(returned %d).\n",
++ retval);
++
++ IOH_DEBUG
++ ("ioh_can_pci_init invoked successfully.\n");
++ }
++ }
++ }
++ /* Message object exceeds the available message object. */
++ else {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_pci_init -> Invalid module parameters.\n\t"
++ "Expected Message objects to be configured <= %d"
++ " and clock frequency expected > 0\n", MAX_MSG_OBJ);
++
++ retval = IOH_CAN_FAIL;
++ }
++
++ IOH_DEBUG("ioh_can_pci_init returns %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup PCILayerAPI
++ @fn static int ioh_can_pci_exit(void)
++ @brief Un-registers the PCI Driver.
++ @note This function is called by the kernel subsystem during
++ unloading
++ of the module.
++ @remarks
++ This function is invoked when the module is
++ unloaded from the
++ kernel subsystem. The main tasks performed by
++ this function are.
++ - Un-registers the PCI Driver.
++
++ @retval None.
++
++ <hr>
++*/
++static void __exit ioh_can_pci_exit(void)
++{
++ /* Unregistering the registered PCI Driver. */
++ pci_unregister_driver(&ioh_can_pcidev);
++ IOH_DEBUG
++ ("ioh_can_pci_exit -> pci_unregister_driver invoked successfully.\n");
++
++ IOH_DEBUG("ioh_can_pci_exit invoked successfully. \n");
++}
++
++/*! @ingroup PCILayerAPI
++ @fn static int __devinit ioh_can_probe(
++ struct pci_dev *pdev, const struct pci_device_id *id)
++ @brief Implements the probe functionalities of the PCI Driver.
++ @remarks
++ This function is used as the probe function of
++ the PCI Driver.
++ The main tasks performed by this function are:
++ - Enables the PCI device and other associated
++ features such as interrupts.
++ - Attains the remapped section corresponding to
++ the device for user interaction.
++ - Registers a character device driver for
++ accessing the device.
++ - Initializes the driver specific data
++ structures for use by
++ other functions of the driver.
++ @note This function is called by the kernel subsystem only
++ when a supported PCI device has been detected.
++
++ @param pdev [@ref INOUT] Reference to the PCI
++ device descriptor.
++ @param id [@ref IN] Reference to the
++ supported PCI Device IDs
++ table.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> Probe has been
++ successful.
++ - @ref IOH_CAN_FAIL --> Probe operation
++ failed.
++ - -EIO --> pci_enable_device error
++ status code.
++ - -EINVAL --> pci_enable_device error
++ status code.
++ - -EBUSY --> pci_request_regions/
++ alloc_chrdev_region/
++ register_chrdev_region error status code.
++ - -ENOMEM --> pci_iomap fails/
++ alloc_chrdev_region/
++ register_chrdev_region/cdev_add error status
++ code.
++ - -ENODEV --> pci_iomap error status code.
++
++ @see
++ - ioh_can_pcidev
++
++ <hr>
++*/
++static int __devinit ioh_can_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id)
++{
++ unsigned int can_num = 0; /* Variable to denote the CAN
++ structure index. */
++ int resources_allocated = false; /* Flag variables for denting
++ resource allocation. */
++ int driver_registered = false; /* Flag variable to denote driver
++ registration. */
++ int retval = IOH_CAN_SUCCESS; /* Variable storing the return
++ status value. */
++
++ {
++ do {
++ /* Enable the PCI device */
++ retval = pci_enable_device(pdev);
++ if (0 != retval) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_probe -> pci_enable_device failed"
++ "(returned %d).\n", retval);
++ IOH_DEBUG
++ ("ioh_can_probe -> Couldn't enable PCI device "
++ "with Vendor ID:0x%x and Device ID:0x%x. "
++ "Exiting\n", pdev->vendor, pdev->device);
++
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_can_probe -> Enable PCI device successful"
++ "(returned %d).\n", retval);
++
++ /* Request the PCI regions. */
++ retval = pci_request_regions(pdev, DRIVER_NAME);
++ if (0 != retval) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_probe -> pci_request_regions "
++ "failed(returned %d).\n", retval);
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_can_probe -> pci_request_regions successful"
++ "(returned %d).\n", retval);
++ resources_allocated = true;
++
++ /* Remap the PCI user space regions to kernel space. */
++ /* Wipro 1/13/2010 Use Mem BAR */
++ can_os[can_num].pci_remap =
++ (unsigned long)pci_iomap(pdev, 1, 0);
++ if (0 == can_os[can_num].pci_remap) {
++ IOH_LOG(KERN_ERR,
++ "pci_iomap failed(returned 0).\n");
++ retval = -ENOMEM;
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_can_probe -> \
++ pci_iomap successful. Remap address: "
++ "%lu\n", can_os[can_num].pci_remap);
++
++ /* If major number is not given as module parameter. */
++ if (major == 0) {
++ /* Registering the driver. */
++ retval = alloc_chrdev_region(
++ &ioh_can_major, 0, 1, DRIVER_NAME);
++ if (0 != retval) {
++
++ IOH_LOG(KERN_ERR,
++ "ioh_can_probe -> alloc_chrdev_region "
++ "failed(returned %d).\n",
++ retval);
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_can_probe -> alloc_chrdev_region \
++ successful"
++ "(returned %d).\n", retval);
++ } else { /* If major number is provided as module
++ parameter. */
++
++ /* Attaining a device id. */
++ ioh_can_major = MKDEV(major, 0);
++
++ /* Registering the driver. */
++ retval = register_chrdev_region(
++ ioh_can_major, 1, DRIVER_NAME);
++ if (0 != retval) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_probe -> \
++ register_chrdev_region "
++ "failed(returned %d).\n",
++ retval);
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_can_probe -> register_chrdev_region \
++ successful"
++ "(returned %d).\n", retval);
++ }
++
++ /* Initializing the cdev structure. */
++ cdev_init(&ioh_can_dev, &file_ops);
++ ioh_can_dev.owner = THIS_MODULE;
++ ioh_can_dev.ops = &file_ops;
++
++ /* Adding the device to the system. */
++ retval = cdev_add(&ioh_can_dev, ioh_can_major, 1);
++ if (0 != retval) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_probe -> cdev_add failed"
++ "(returned %d).\n", retval);
++ unregister_chrdev_region(ioh_can_major, 1);
++ IOH_DEBUG
++ ("ioh_can_probe -> unregister_chrdev_region \
++ invoked successfully.\n");
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_can_probe -> cdev_add successful(returned %d).\n",
++ retval);
++ driver_registered = true;
++
++ /* Filling in the details of the CAN into the can
++ structure. */
++ can_os[can_num].can_num = can_num; /* Can number
++ (index to the structure) */
++ can_os[can_num].irq = pdev->irq; /* IRQ allocated
++ to this device. */
++
++ /* Creating the device handle denoting the remap base
++ address. */
++ can_os[can_num].can =
++ ioh_can_create((void *)can_os[can_num].pci_remap);
++
++ /* If handle creation fails. */
++ if ((int) NULL == can_os[can_num].can) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_probe -> ioh_can_create failed.\n");
++ retval = IOH_CAN_FAIL;
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_can_probe -> ioh_can_create successful.\n");
++
++ can_os[can_num].dev = pdev; /* Reference to
++ pci_device structure. */
++ can_os[can_num].opened = 0; /* Open flag denoting
++ the device usage. */
++ can_os[can_num].is_suspending = 0; /* Flag denoting
++ the suspend stage. */
++
++ /* Initializing the wait queues. */
++ init_waitqueue_head(&(can_os[can_num].read_wait_queue));
++ init_waitqueue_head(&
++ (can_os[can_num].write_wait_queue));
++
++ /* Initailzing the lock variables. */
++ spin_lock_init(&(can_os[can_num].open_spinlock));
++ spin_lock_init(&(can_os[can_num].tx_spinlock));
++ /* OKISEMI 090721 add */
++
++ /* Storing the reference to the structure for
++ use in other PCI functions.
++ */
++/* pdev->dev.driver_data = (void *)&(can_os[can_num]); */
++ dev_set_drvdata(&pdev->dev, (void *)&(can_os[can_num]));
++
++ {
++ /* Initializing the message object array to
++ identify the
++ objects used as receive and transmit
++ object.
++ According to this logic, the initial
++ objects will be
++ configured as receive objects followed by
++ transmit objects.
++
++ For example.
++ 1) Receive Object(R) : 16
++ Transmit Object(T): 16
++ -----------------------------------------------------------------
++ |R|R|R|R|R|R|R|R|R|R|R|R|R|R|R|R|T|T|T|T|T|T|T|T|T|T|T|T|T|T|T|T|
++ -----------------------------------------------------------------
++
++ 2) Receive Object(R) : 6
++ Transmit Object(T): 12
++ -----------------------------------------------------------------
++ |R|R|R|R|R|R|T|T|T|T|T|T|T|T|T|T|T|T|N|N|N|N|N|N|N|N|N|N|N|N|N|N|
++ -----------------------------------------------------------------
++
++ 3) Receive Object(R) : 18
++ Transmit Object(T): 5
++ -----------------------------------------------------------------
++ |R|R|R|R|R|R|R|R|R|R|R|R|R|R|R|R|R|R|T|T|T|T|T|N|N|N|N|N|N|N|N|N|
++ -----------------------------------------------------------------
++
++ The above figure shows 32 message object
++ starting from message object 1
++ to object 32, being used according to the
++ user specified receive and
++ transmit message object number.
++
++ Here R -> used as receive object.
++ T -> used as transmit object.
++ N -> not used.
++ */
++
++ int index;
++
++ for (index = 0; index < ioh_can_rx_buf_size;
++ index++) {
++ ioh_msg_obj_conf[index] = MSG_OBJ_RX;
++ }
++
++ for (index = index;
++ index <
++ (ioh_can_rx_buf_size +
++ ioh_can_tx_buf_size); index++) {
++ ioh_msg_obj_conf[index] = MSG_OBJ_TX;
++ }
++
++ }
++
++ retval = IOH_CAN_SUCCESS;
++ IOH_DEBUG("ioh_can_probe successful.\n");
++ } while (false);
++
++ /* If any of the process fails. */
++ if (IOH_CAN_SUCCESS != retval) {
++ /* If the resources are allocated. */
++ if (true == resources_allocated) {
++ if (0 != can_os[can_num].pci_remap) {
++ pci_iounmap(pdev,
++ (void *)can_os[can_num].
++ pci_remap);
++ IOH_DEBUG("ioh_can_probe -> "
++ "pci_iounmap invoked successfully.\n");
++ can_os[can_num].pci_remap = 0;
++ }
++
++ pci_release_regions(pdev);
++ IOH_DEBUG("ioh_can_probe -> "
++ "pci_release_regions invoked successfully.\n");
++ }
++
++ /* If driver has been registered. */
++ if (true == driver_registered) {
++ cdev_del(&ioh_can_dev);
++ IOH_DEBUG("ioh_can_probe -> "
++ "cdev_del invoked successfully.\n");
++ unregister_chrdev_region(ioh_can_major, 1);
++ IOH_DEBUG("ioh_can_probe -> "
++ "unregister_chrdev_region \
++ invoked successfully.\n");
++ }
++
++ pci_disable_device(pdev);
++ IOH_DEBUG("ioh_can_probe -> "
++ "pci_disable_region invoked successfully.\n");
++
++ IOH_DEBUG("ioh_can_probe failed.\n");
++ }
++
++ }
++
++ /* If probe fails retval contains the status code of the failed API.
++ If it is successful it contains IOH_CAN_SUCCESS(0).
++ */
++ IOH_DEBUG("ioh_can_probe returns %d\n", retval);
++ return retval;
++}
++
++/*! @ingroup PCILayerAPI
++ @fn static void __devexit ioh_can_remove(
++ struct pci_dev *pdev)
++ @brief Implements the remove functionalities of the PCI Driver.
++ @remarks
++ This function is used as the remove function of
++ the PCI Driver.
++ The main tasks performed by this function
++ include :
++ - Un-maps the remapped user space from kernel
++ space.
++ - Releases all the resources attained during
++ probe.
++ - Un-registers the character device driver
++ registered during probe.
++ @note This function is called by the kernel subsystem when the
++ supported
++ PCI device is removed/unloaded.
++
++ @param pdev [@ref INOUT] Reference to the PCI
++ device descriptor.
++
++ @retval None.
++
++ @see
++ - ioh_can_pcidev
++
++ <hr>
++*/
++static void __devexit ioh_can_remove(struct pci_dev *pdev)
++{
++/* struct ioh_can_os *can_os = pdev->dev.driver_data; */
++ struct ioh_can_os *can_os = \
++ (struct ioh_can_os *) dev_get_drvdata(&pdev->dev);
++
++ /* Unmapping the remmaped user space from kernel space. */
++ pci_iounmap(pdev, (void *)can_os->pci_remap);
++ IOH_DEBUG("ioh_can_remove -> pci_iounmap invoked successfully.\n");
++
++ /* Releasing the driver specific resources. */
++ ioh_can_destroy(can_os->can);
++ IOH_DEBUG("ioh_can_remove -> ioh_can_destroy invoked successfully.\n");
++
++ /* Releasing the acquired resources. */
++ pci_release_regions(pdev);
++ IOH_DEBUG
++ ("ioh_can_remove -> pci_release_regions invoked successfully.\n");
++
++ /* Removing the device */
++ cdev_del(&ioh_can_dev);
++ IOH_DEBUG("ioh_can_remove -> cdev_del invoked successfully.\n");
++
++ /* Unregistering the driver. */
++ unregister_chrdev_region(ioh_can_major, 1);
++ IOH_DEBUG
++ ("ioh_can_remove -> unregister_chrdev_region invoked successfully.\n");
++
++ /* disabling the device. */
++ pci_disable_device(pdev);
++ IOH_DEBUG
++ ("ioh_can_remove -> pci_disable_device invoked successfully.\n");
++}
++
++#ifdef CONFIG_PM
++/*! @ingroup PCILayerAPI
++ @fn static int ioh_can_suspend(struct pci_dev *pdev,
++ pm_message_t state)
++ @brief Implements the suspend functionalities of the PCI
++ Driver.
++ @remarks
++ This function is used as the suspend function of
++ the PCI driver.
++ The main tasks performed by this function are :
++ - Manipulates the power management of the
++ supported device during
++ system suspension.
++ - Maintains the crucial device data, so that the
++ state of the system
++ can be maintained during resumption.
++ @note This function is called by the Kernel Power Management
++ subsystem during system suspend operation.
++
++ @param pdev [@ref INOUT] Reference to the PCI
++ Device descriptor.
++ @param state [@ref IN] The state of the
++ PCI Device.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> The suspend
++ process was successful.
++ - -ENOMEM --> pci_save_state error status code
++
++ @see
++ - ioh_can_pci_dev
++
++ <hr>
++*/
++static int ioh_can_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ int i; /* Counter variable. */
++ int retval; /* Return value. */
++/* struct ioh_can_os *can_os = pdev->dev.driver_data; */
++ struct ioh_can_os *can_os = \
++ (struct ioh_can_os *) dev_get_drvdata(&pdev->dev);
++
++ /* If the device is opened get the current run mode. */
++ if (1 == can_os->opened) {
++ /* Save the Run Mode. */
++ (void)ioh_can_get_run_mode(can_os->can, &(can_os->run_mode));
++ }
++
++ /* Stop the CAN controller */
++ (void)ioh_can_set_run_mode(can_os->can, IOH_CAN_STOP);
++
++ /* Indicate that we are aboutto/in suspend */
++ can_os->is_suspending = 1;
++
++ if (1 == can_os->opened) {
++ u32 buf_stat; /* Variable for reading the transmit buffer
++ status. */
++ u32 counter = 0xFFFFFF;
++
++ /*
++ Waiting for all transmission to complete.
++ This is done by checking the TXQST pending
++ register. The loop teriminates when no
++ transmission is pending.
++ */
++ while (counter) {
++ buf_stat = ioh_can_get_buffer_status(can_os->can);
++ if (buf_stat == 0)
++ break;
++
++ counter--;
++ }
++
++ if (counter > 0) {
++ IOH_DEBUG
++ ("ioh_can_suspend -> No transmission is pending.\n");
++ } else {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_suspend -> Transmission time "
++ "out.\n");
++ }
++
++ /* Free any waiting write threads */
++ can_os->write_wait_flag = 1;
++ wake_up_interruptible(&(can_os->write_wait_queue));
++
++ /* Save interrupt configuration and then disable them */
++ (void)ioh_can_get_int_enables(can_os->can,
++ &(can_os->int_enables));
++ (void)ioh_can_set_int_enables(can_os->can, CAN_DISABLE);
++
++ /* Save Tx buffer enable state */
++ for (i = 0; i < (ioh_can_tx_buf_size + ioh_can_rx_buf_size);
++ i++) {
++ if (ioh_msg_obj_conf[i] == MSG_OBJ_TX) {
++ /* Here i is the index, however (i+1) is object
++ number. */
++ (void)ioh_can_get_tx_enable(can_os->can,
++ (i + 1),
++ &(can_os->
++ tx_enable[i]));
++ }
++ }
++
++ /* Disable all Transmit buffers */
++ (void)ioh_can_tx_disable_all(can_os->can);
++
++ /* Save Rx buffer enable state */
++ for (i = 0; i < (ioh_can_tx_buf_size + ioh_can_rx_buf_size);
++ i++) {
++ if (ioh_msg_obj_conf[i] == MSG_OBJ_RX) {
++ /* Here i is the index, however (i+1) is object
++ number. */
++
++ (void)ioh_can_get_rx_enable(can_os->can,
++ (i + 1),
++ &(can_os->
++ rx_enable[i]));
++ (void)ioh_can_get_rx_buffer_link(can_os->can,
++ (i + 1),
++ &(can_os->
++ rx_link[i]));
++
++ /* Save Rx Filters */
++ can_os->rx_filter[i].num = (i + 1);
++ (void)ioh_can_get_rx_filter(can_os->can,
++ &(can_os->
++ rx_filter[i]));
++ }
++ }
++
++ /* Disable all Receive buffers */
++ (void)ioh_can_rx_disable_all(can_os->can);
++
++ /* Save Context */
++ (void)ioh_can_get_baud(can_os->can, &(can_os->timing));
++ /* Timing. */
++ (void)ioh_can_get_listen_mode(can_os->can, \
++ &(can_os->listen_mode));
++ /* Listen mode */
++ (void)ioh_can_get_arbiter_mode(can_os->can, \
++ &(can_os->arbiter_mode));
++ /* Arbiter mode */
++
++ }
++
++ retval = pci_save_state(pdev);
++
++ if (0 != retval) {
++ /* Indicate that we have not suspended */
++ can_os->is_suspending = 0;
++
++ IOH_LOG(KERN_ERR,
++ "ioh_can_suspend -> pci_save_state \
++ failed(returned %d).\n",
++ retval);
++ } else {
++ IOH_DEBUG
++ ("ioh_can_suspend -> pci_save_state successful(returned %d).\n",
++ retval);
++
++ (void)pci_enable_wake(pdev, PCI_D3hot, 0);
++ IOH_DEBUG
++ ("ioh_can_suspend -> pci_enable_wake invoked successfully.\n");
++
++ pci_disable_device(pdev);
++ IOH_DEBUG
++ ("ioh_can_suspend -> pci_disable_device invoked \
++ successfully.\n");
++
++ (void)pci_set_power_state(pdev, pci_choose_state(pdev, state));
++ IOH_DEBUG
++ ("ioh_can_suspend -> pci_set_power_state invoked \
++ successfully.\n");
++ }
++
++ IOH_DEBUG("ioh_can_suspend returns %d.\n", retval);
++ return retval;
++}
++
++/*! @ingroup PCILayerAPI
++ @fn static int ioh_can_resume(struct pci_dev *pdev)
++ @brief Implements the resume functionalities of the PCI Driver.
++ @remarks
++ This function is used as the resume function of
++ the PCI Driver.
++ The main tasks performed by this function
++ includes :
++ - Restores the regular power state of the device
++ to D0.
++ - Restores the same state of the device as that
++ was before
++ suspension.
++ @note This function is invoked by the Kernel Power Management
++ subsystem
++ during system resume operation.
++
++ @param pdev [@ref INOUT] Reference to the PCI
++ device descriptor.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> The resume
++ operation was successful.
++ - -EIO --> pci_enable_device error status code.
++ - -EINVAL --> pci_enable_device error status
++ code.
++ @see
++ - ioh_can_pci_dev
++
++ <hr>
++*/
++static int ioh_can_resume(struct pci_dev *pdev)
++{
++ int i; /* Counter variable. */
++ int retval; /* Return variable. */
++/* struct ioh_can_os *can_os = pdev->dev.driver_data; */
++ struct ioh_can_os *can_os = \
++ (struct ioh_can_os *) dev_get_drvdata(&pdev->dev);
++
++ (void)pci_set_power_state(pdev, PCI_D0);
++ IOH_DEBUG
++ ("ioh_can_resume -> pci_set_power_state invoked successfully.\n");
++
++ (void)pci_restore_state(pdev);
++ IOH_DEBUG
++ ("ioh_can_resume -> pci_restore_state invoked successfully.\n");
++
++ retval = pci_enable_device(pdev);
++ if (0 != retval) {
++ IOH_LOG(KERN_ERR,
++ "ioh_can_resume -> pci_enable_device failed(returned %d).\n",
++ retval);
++ }
++
++ else {
++ IOH_DEBUG
++ ("ioh_can_resume -> pci_enable_device invoked successfully"
++ "(returned %d)\n", retval);
++ (void)pci_enable_wake(pdev, PCI_D3hot, 0);
++
++ /* Disabling all interrupts. */
++ (void)ioh_can_set_int_enables(can_os->can, CAN_DISABLE);
++
++ /* Setting the CAN device in Stop Mode. */
++ (void)ioh_can_set_run_mode(can_os->can, IOH_CAN_STOP);
++
++ /* Configuring the transmit and receive buffers. */
++ ioh_can_config_rx_tx_buffers(can_os->can);
++ IOH_DEBUG
++ ("ioh_can_resume -> ioh_can_config_rx_tx_buffers invoked "
++ "successfully.\n");
++
++ if (1 == can_os->opened) {
++ /* Reset write file operation wait flag */
++ can_os->write_wait_flag = 0;
++
++ /* Restore the CAN state */
++ (void)ioh_can_set_baud_custom(\
++ can_os->can, &(can_os->timing));
++ /*Timing */
++ (void)ioh_can_set_listen_mode(\
++ can_os->can, can_os->listen_mode);
++ /*Listen/Active */
++ (void)ioh_can_set_arbiter_mode(\
++ can_os->can, can_os->arbiter_mode);
++ /*Arbiter mode */
++
++ /* Enabling the transmit buffer. */
++ for (i = 0;
++ i < (ioh_can_tx_buf_size + ioh_can_rx_buf_size);
++ i++) {
++ if (ioh_msg_obj_conf[i] == MSG_OBJ_TX) {
++ /* Here i is the index, however (i+1) is
++ object number. */
++ (void)ioh_can_set_tx_enable(can_os->can,
++ (i + 1),
++ can_os->
++ tx_enable
++ [i]);
++ }
++ }
++
++ /* Configuring the receive buffer and enabling them. */
++ for (i = 0;
++ i < (ioh_can_tx_buf_size + ioh_can_rx_buf_size);
++ i++) {
++ if (ioh_msg_obj_conf[i] == MSG_OBJ_RX) {
++ /* Here i is the index, however (i+1) is
++ object number. */
++
++ /* Restore buffer link */
++ (void)
++ ioh_can_set_rx_buffer_link(can_os->
++ can,
++ (i + 1),
++ can_os->
++ rx_link
++ [i]);
++
++ /* Restore Rx Filters */
++ can_os->rx_filter[i].num = (i + 1);
++ (void)ioh_can_set_rx_filter(can_os->can,
++ &(can_os->
++ rx_filter
++ [i]));
++
++ /* Restore buffer enables */
++ (void)ioh_can_set_rx_enable(can_os->can,
++ (i + 1),
++ can_os->
++ rx_enable
++ [i]);
++ }
++ }
++
++ /* Enable CAN Interrupts */
++ (void)ioh_can_set_int_custom(can_os->can,
++ can_os->int_enables);
++
++ /* Restore Run Mode */
++ (void)ioh_can_set_run_mode(can_os->can,
++ can_os->run_mode);
++ }
++ /*if opened */
++ can_os->is_suspending = 0;
++ } /*else */
++
++ IOH_DEBUG("ioh_can_resume returns %d\n", retval);
++ return retval;
++}
++#endif
++
++/*! @ingroup PCILayerAPI
++ @fn static void ioh_can_shutdown(
++ struct pci_dev * pdev)
++ @brief Implements the shutdown functionalities of the PCI
++ Driver.
++ @remarks
++ This function is used as the shutdown function
++ of the PCI Driver.
++ The main tasks performed by this function
++ include :
++ - Prepare the system to enter the shutdown
++ state.
++ @note This function is called by the kernel subsystem during
++ system shutdown.
++
++ @param pdev [@ref INOUT] Reference to the PCI
++ device descriptor.
++
++ @retval None.
++
++ <hr>
++*/
++static void ioh_can_shutdown(struct pci_dev *pdev)
++{
++ (void)ioh_can_suspend(pdev, PMSG_SUSPEND);
++}
++
++module_init(ioh_can_pci_init);
++module_exit(ioh_can_pci_exit);
+diff -urN linux-2.6.33-rc3/drivers/net/can/pch_can/pch_can_pci.h topcliff-2.6.33-rc3/drivers/net/can/pch_can/pch_can_pci.h
+--- linux-2.6.33-rc3/drivers/net/can/pch_can/pch_can_pci.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/can/pch_can/pch_can_pci.h 2010-03-10 08:57:34.000000000 +0900
+@@ -0,0 +1,105 @@
++/*!
++ * @file ioh_can_pci.h
++ * @brief Provides the macro definitions used by the PCI Layer APIs.
++ * @version 1.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * <hr>
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++#ifndef __IOH_CAN_PCI_H__
++#define __IOH_CAN_PCI_H__
++
++/* The PCI vendor ID for the CAN device. */
++#ifdef PCI_VENDOR_ID_INTEL
++#undef PCI_VENDOR_ID_INTEL
++#endif
++
++/*! @ingroup PCILayer
++ @def PCI_VENDOR_ID_INTEL
++ @brief The Vendor ID of the supported device.
++ @remarks This ID is used during the registration of the
++ PCI Driver, so that whenever a PCI device with
++ specified Vendor ID is detected the
++ corresponding
++ registered functionalities are invoked.
++ @see
++ - ioh_can_pcidev_id
++
++ <hr>
++*/
++#ifndef FPGA_BOARD
++#define PCI_VENDOR_ID_INTEL (0x8086)
++#else
++#define PCI_VENDOR_ID_INTEL (0x10DB)
++#endif
++
++/* The PCI device ID for the CAN device . */
++/*! @ingroup PCILayer
++ @def PCI_DEVICE_ID_INTEL_IOH1_CAN
++ @brief The Device ID of the supported device.
++ @remarks This ID is used during the registration of the
++ PCI Driver, so that whenever a PCI device with
++ specified Device ID is detected the
++ corresponding
++ registered functionalities are invoked.
++ @see
++ - ioh_can_pcidev_id
++
++ <hr>
++*/
++#ifndef FPGA_BOARD
++#define PCI_DEVICE_ID_INTEL_IOH1_CAN (0x8818)
++#else
++#define PCI_DEVICE_ID_INTEL_IOH1_CAN (0x800A)
++#endif
++
++/* The driver name. */
++/*! @ingroup InterfaceLayer
++ @def DRIVER_NAME
++ @brief The Driver name
++ @see
++ - ioh_can_probe
++
++ <hr>
++*/
++#define DRIVER_NAME "can"
++
++/* external linkage */
++extern unsigned int ioh_msg_obj_conf[MAX_MSG_OBJ]; /* Array denoting the
++ usage of the respective message object. */
++extern struct ioh_can_timing can_rec_timing[]; /* Structure array for different
++ supported timing settings. */
++extern unsigned int ioh_can_rx_buf_size; /* The receive buffer size. */
++extern unsigned int ioh_can_tx_buf_size; /* The transmit buffer size. */
++extern int ioh_can_clock; /* The clock rate. */
++extern struct ioh_can_os can_os[MAX_CAN_DEVICES];
++ /* Structure to store the details of the CAN controller. */
++
++extern const struct file_operations file_ops; /* Structure to register the
++ character device driver. */
++#endif
+diff -urN linux-2.6.33-rc3/drivers/net/can/pch_can/pch_can_utils.c topcliff-2.6.33-rc3/drivers/net/can/pch_can/pch_can_utils.c
+--- linux-2.6.33-rc3/drivers/net/can/pch_can/pch_can_utils.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/can/pch_can/pch_can_utils.c 2010-03-10 08:57:34.000000000 +0900
+@@ -0,0 +1,397 @@
++/*!
++ * @file ioh_can_utils.c
++ * @brief Provides the function definition for FIFO utility functions.
++ * @version 1.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * <hr>
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++/* includes */
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++
++#include "pch_common.h"
++#include "pch_debug.h"
++#include "pch_can_utils.h"
++
++/*! @ingroup Utilities
++ @struct can_fifo_item
++ @brief This structure describes the FIFO node items.
++ @remarks This structure is used to implement the
++ software FIFO. It describes a particular
++ node within the FIFO chain.
++
++ <hr>
++*/
++struct can_fifo_item {
++ struct ioh_can_msg msg; /**< The msg object */
++ struct can_fifo_item *next; /**< The next pointer. */
++};
++
++/*! @ingroup Utilities
++ @struct can_fifo
++ @brief This structure is used for maintaining the software
++ FIFO chain.
++ @remarks This structure maintains the reference to the FIFO
++ chain. It specifies which node within the FIFO
++ has to be read and written.
++
++ <hr>
++*/
++struct can_fifo {
++ struct can_fifo_item *head; /**< The node where insertion
++ can be done. */
++ struct can_fifo_item *tail; /**< The node where reading can
++ be done. */
++ unsigned int size; /**< The number of FIFO nodes. */
++};
++
++/*! @ingroup UtilitiesAPI
++ @fn int check_can_fifo_status(int handle)
++ @brief Checks the status (Empty/Full/Non-Empty) of the software
++ fifo.
++ @remarks Checks the status (Empty/Full/Non-Empty) of the software
++ fifo.
++ The main tasks performed by this API are :
++ - Checks the software FIFO status and returns
++ the values denoting
++ the status.
++ - The return values along with the specified
++ meaning is described
++ in the Return values Section.
++
++ @param handle [@ref IN] The handle to the device.
++
++ @retval int
++ - @ref IOH_CAN_FIFO_NOT_EMPTY --> FIFO is not empty.
++ - @ref IOH_CAN_FIFO_EMPTY --> FIFO is empty.
++ - @ref IOH_CAN_FIFO_FULL --> FIFO is full.
++
++ @see
++ - ioh_can_read
++
++ <hr>
++
++*/
++int check_can_fifo_status(int handle)
++{
++ int ret_val;
++ struct can_fifo *f = (struct can_fifo *) handle;
++
++ if (f->head == f->tail) {
++ ret_val = IOH_CAN_FIFO_EMPTY; /*FIFO empty */
++ IOH_DEBUG("check_can_fifo_status -> FIFO empty.\n");
++ } else if (f->head->next == f->tail) {
++ ret_val = IOH_CAN_FIFO_FULL; /*FIFO full */
++ IOH_DEBUG("check_can_fifo_status -> FIFO full.\n");
++ } else {
++ ret_val = IOH_CAN_FIFO_NOT_EMPTY; /* FIFO non empty. */
++ IOH_DEBUG("check_can_fifo_status -> FIFO non-empty.\n");
++ }
++
++ IOH_DEBUG("check_can_fifo_status returns %d\n", ret_val);
++ return ret_val;
++}
++
++/*! @ingroup UtilitiesAPI
++ @fn int create_can_fifo(unsigned int fifo_entries)
++ @brief Creates the CAN FIFO.
++ @remarks Creates the CAN FIFO depending on the argument value
++ passed.
++ The main tasks performed by this API are :
++ - Depending on obtained argument value allocates
++ memory for
++ the software FIFO nodes.
++ - If any of the memory allocation fails,
++ releases the allocated
++ memory.
++ - Updates the software FIFO related structures
++ with the
++ reference values so that the FIFO can
++ be accessed by other
++ APIs
++
++ @param fifo_entries [@ref IN] The number of FIFO nodes
++ to be created.
++
++ @retval int
++ - >0 --> Creation successful.
++ - @ref IOH_CAN_NULL --> Creation failed.
++
++ @see
++ - ioh_candev_open
++
++ <hr>
++
++*/
++int create_can_fifo(unsigned int fifo_entries)
++{
++ unsigned int i;
++ struct can_fifo_item *curr;
++ struct can_fifo *f;
++ int retval;
++
++ /* Allocating the Main start node. */
++ f = (struct can_fifo *) CAN_MEM_ALLOC(sizeof(struct can_fifo));
++
++ if (f == NULL) {
++ IOH_LOG(KERN_ERR,
++ "create_can_fifo -> msg queue allocation failed.\n");
++ retval = (int) IOH_CAN_NULL;
++ } else {
++ /* Allocating the first node. */
++ f->head =
++ (struct can_fifo_item *) CAN_MEM_ALLOC(\
++ sizeof(struct can_fifo_item));
++
++ if ((f->head == NULL)) { /* Failed. */
++ CAN_MEM_FREE(f);
++ retval = (int) IOH_CAN_NULL;
++ } else {
++ /* Initially empty. */
++ f->tail = f->head;
++ curr = f->head;
++
++ /* Rest of the nod ecreation Node creation. */
++ for (i = 1; i <= fifo_entries; i++) {
++ curr->next =
++ (struct can_fifo_item *)
++ CAN_MEM_ALLOC(sizeof(struct can_fifo_item));
++
++ /* If allocation failed. */
++ if ((curr->next == NULL)) {
++ IOH_LOG(KERN_ERR,
++ "create_can_fifo -> \
++ Allocation failed.\n");
++ i = (i - 1);
++ /* Freeing the already allocated
++ nodes. */
++ while (i > 0) {
++ curr = f->head;
++ f->head = curr->next;
++ CAN_MEM_FREE(curr);
++
++ i--;
++ }
++
++ /* Freeing the main start node. */
++ CAN_MEM_FREE(f);
++ f = NULL;
++ retval =
++ (int) IOH_CAN_NULL;
++ break;
++ }
++
++ curr = curr->next;
++ }
++
++ if (NULL != f) {
++ /*Making it circular. */
++ curr->next = f->head;
++ f->size = fifo_entries;
++
++ retval = (int) f;
++ IOH_DEBUG("create_can_fifo sucessful.\n");
++ }
++ }
++ }
++
++ IOH_DEBUG("create_can_fifo returns %u.\n", retval);
++ return retval;
++}
++
++/*! @ingroup UtilitiesAPI
++ @fn void delete_can_fifo(int handle)
++ @brief Deletes the software FIFO.
++ @remarks Deletes the previously created software FIFO nodes.
++ The main task performed by this API is :
++ - Releases all the memory allocated for FIFO
++ nodes.
++
++ @param handle [@ref IN] The handle to the device.
++
++ @retval None.
++
++ @see
++ - ioh_candev_close
++
++ <hr>
++*/
++void delete_can_fifo(int handle)
++{
++ unsigned int i;
++ struct can_fifo_item *curr = NULL;
++ struct can_fifo_item *next = NULL;
++ struct can_fifo *f = (struct can_fifo *) handle;
++
++ if (handle != (int) 0) { /* valid handle */
++ curr = f->head;
++
++ if (f->head != NULL) {
++ next = curr->next;
++
++ /* Freeing individual node. */
++ for (i = 0; i < f->size; i++) {
++ CAN_MEM_FREE(curr);
++ curr = next;
++ next = (struct can_fifo_item *) curr->next;
++ }
++ }
++
++ /* Free the START node. */
++ CAN_MEM_FREE(f);
++ } else {
++ IOH_LOG(KERN_ERR, "delete_can_fifo -> Invalid handle.\n");
++ }
++
++ IOH_DEBUG("delete_can_fifo successful.\n");
++}
++
++/*! @ingroup UtilitiesAPI
++ @fn int read_can_fifo(
++ int handle,struct ioh_can_msg *msg)
++ @brief Reads data from the software FIFO.
++ @remarks Reads data from the software FIFO when read system call
++ is issued. The main tasks performed by this API
++ are :
++ - Reads the node from the FIFO under the Read
++ reference locator.
++ - Advances the Read reference locator to the
++ next node to be
++ read.
++
++ @param handle [@ref IN] The handle to the device.
++ @param msg [@ref OUT] Reference to the read
++ message.
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> Operation successful.
++ - @ref IOH_CAN_FAIL --> Operation failed.
++
++ @see
++ - ioh_can_read
++
++ <hr>
++*/
++int read_can_fifo(int handle, struct ioh_can_msg *msg)
++{
++ int i;
++ int retval = IOH_CAN_SUCCESS;
++ struct can_fifo *f = (struct can_fifo *) handle;
++ struct ioh_can_msg msg_tmp;
++
++ if ((handle == (int) 0) || (msg == NULL)) { /* invalid parameters.*/
++ IOH_LOG(KERN_ERR, "read_can_fifo -> Invalid parameter.\n");
++ retval = IOH_CAN_FAIL;
++ } else if (f->head == f->tail) { /* Buffer Empty */
++ IOH_DEBUG("read_can_fifo -> FIFO empty.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Accessing the msg object in the FIFO. */
++ msg_tmp = f->tail->msg;
++
++ /* Filling in the msg object. */
++ msg->ide = msg_tmp.ide;
++ msg->id = msg_tmp.id;
++ msg->dlc = msg_tmp.dlc;
++ msg->rtr = msg_tmp.rtr;
++
++ for (i = 0; i < IOH_CAN_MSG_DATA_LEN; i++)
++ msg->data[i] = msg_tmp.data[i];
++
++ /* Proceeding the FIFO read pointer. */
++ f->tail = f->tail->next;
++ IOH_DEBUG("read_can_fifo successful.\n");
++ }
++
++ IOH_DEBUG("read_can_fifo returns %d.\n", retval);
++ return retval;
++}
++
++/*! @ingroup UtilitiesAPI
++ @fn int write_can_fifo(
++ int handle,struct ioh_can_msg *msg)
++ @brief Write data to the software FIFO.
++ @remarks Write data to the software FIFO when a message is
++ received.
++ The main tasks performed by this API are :
++ - Writes the obtained data to the FIFO node
++ under the Write
++ Reference locator.
++ - Advances the Write reference locator to the
++ next node to be written.
++
++ @param handle [@ref IN] The handle to the device.
++ @param msg [@ref IN] Reference to the data
++ to be written
++
++ @retval int
++ - @ref IOH_CAN_SUCCESS --> Operation successful.
++ - @ref IOH_CAN_FAIL --> Operation failed.
++
++ @see
++ - ioh_can_callback
++
++ <hr>
++
++*/
++int write_can_fifo(int handle, struct ioh_can_msg *msg)
++{
++ int i;
++ int retval = IOH_CAN_SUCCESS;
++ struct can_fifo *f = (struct can_fifo *) handle;
++ struct ioh_can_msg *msg_tmp;
++
++ if ((handle == (int) 0) || (msg == NULL)) { /* invalid parameters.*/
++ IOH_LOG(KERN_ERR, "write_can_fifo -> Invalid parameters.\n");
++ retval = IOH_CAN_FAIL;
++ } else if (f->head->next == f->tail) {
++ IOH_DEBUG("write_can_fifo -> FIFO Full.\n");
++ retval = IOH_CAN_FAIL;
++ } else {
++ /* Accessing the write node in the FIFO */
++ msg_tmp = &(f->head->msg);
++
++ /* Filling in the FIFO node. */
++ msg_tmp->ide = msg->ide;
++ msg_tmp->rtr = msg->rtr;
++ msg_tmp->id = msg->id;
++ msg_tmp->dlc = msg->dlc;
++
++ for (i = 0; i < IOH_CAN_MSG_DATA_LEN; i++)
++ msg_tmp->data[i] = msg->data[i];
++
++ /* Proceeding the write node. */
++ f->head = f->head->next;
++ IOH_DEBUG("write_can_fifo successful.\n");
++ }
++
++ IOH_DEBUG("write_can_fifo returns %d.\n", retval);
++ return retval;
++}
+diff -urN linux-2.6.33-rc3/drivers/net/can/pch_can/pch_can_utils.h topcliff-2.6.33-rc3/drivers/net/can/pch_can/pch_can_utils.h
+--- linux-2.6.33-rc3/drivers/net/can/pch_can/pch_can_utils.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/can/pch_can/pch_can_utils.h 2010-03-10 08:57:34.000000000 +0900
+@@ -0,0 +1,127 @@
++/*!
++ * @file ioh_can_utils.h
++ * @brief Provides the function prototype of the Utilis Layer APIs.
++ * @version 1.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * <hr>
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++#ifndef __IOH_CAN_FIFO_H__
++#define __IOH_CAN_FIFO_H__
++
++#include "pch_can_main.h"
++
++#define CAN_MEM_ALLOC(size) (kmalloc((size), GFP_KERNEL))
++#define CAN_MEM_FREE(ptr) (kfree((ptr)))
++
++/*! @ingroup Utilities
++ @def IOH_CAN_FIFO_NOT_EMPTY
++ @brief Flag value denoting that the software fifo is not empty.
++ @see
++ - check_can_fifo_status
++ - ioh_can_read
++
++ <hr>
++*/
++#define IOH_CAN_FIFO_NOT_EMPTY (0)
++
++/*! @ingroup Utilities
++ @def IOH_CAN_FIFO_EMPTY
++ @brief Flag value denoting that the software fifo is empty.
++ @see
++ - check_can_fifo_status
++ - ioh_can_read
++
++ <hr>
++*/
++#define IOH_CAN_FIFO_EMPTY (1)
++
++/*! @ingroup Utilities
++ @def IOH_CAN_FIFO_FULL
++ @brief Flag value denoting that the software fifo is full.
++ @see
++ - check_can_fifo_status
++ - ioh_can_read
++
++ <hr>
++*/
++#define IOH_CAN_FIFO_FULL (2)
++
++/*! @ingroup Utilities
++ @def IOH_CAN_NULL
++ @brief Denoting NULL value.
++
++ @see
++ - ioh_can_create
++ - create_can_fifo
++ <hr>
++*/
++#define IOH_CAN_NULL (NULL)
++
++/*! @ingroup UtilitiesAPI
++ @fn int check_can_fifo_status(int handle)
++ @brief Checks the status (Empty/Full/Non-Empty) of the software
++ fifo.
++
++ <hr>
++*/
++int check_can_fifo_status(int handle);
++
++/*! @ingroup UtilitiesAPI
++ @fn int create_can_fifo(unsigned int num_nodes)
++ @brief Creates the CAN FIFO.
++
++ <hr>
++*/
++int create_can_fifo(unsigned int num_nodes);
++
++/*! @ingroup UtilitiesAPI
++ @fn void delete_can_fifo(int handle)
++ @brief Deletes the software FIFO.
++
++ <hr>
++*/
++void delete_can_fifo(int handle);
++
++/*! @ingroup UtilitiesAPI
++ @fn int read_can_fifo(int handle, struct ioh_can_msg *msg)
++ @brief Reads data from the software FIFO.
++
++ <hr>
++*/
++int read_can_fifo(int handle, struct ioh_can_msg *msg);
++
++/*! @ingroup UtilitiesAPI
++ @fn int write_can_fifo(int handle, struct ioh_can_msg *msg)
++ @brief Write data to the software FIFO.
++
++ <hr>
++*/
++int write_can_fifo(int handle, struct ioh_can_msg *msg);
++
++#endif /* __IOH_CAN_FIFO_H__ */
+diff -urN linux-2.6.33-rc3/drivers/net/can/pch_can/pch_common.h topcliff-2.6.33-rc3/drivers/net/can/pch_can/pch_common.h
+--- linux-2.6.33-rc3/drivers/net/can/pch_can/pch_common.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/can/pch_can/pch_common.h 2010-03-09 05:56:11.000000000 +0900
+@@ -0,0 +1,146 @@
++/*!
++ * @file ioh_common.h
++ * @brief Provides the macro definitions used by all files.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++#ifndef __IOH_COMMON_H__
++#define __IOH_COMMON_H__
++
++/*! @ingroup Global
++@def IOH_WRITE8
++@brief Macro for writing 8 bit data to an io/mem address
++*/
++#define IOH_WRITE8(val, addr) iowrite8((val), (void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_LOG
++@brief Macro for writing 16 bit data to an io/mem address
++*/
++#define IOH_WRITE16(val, addr) iowrite16((val), (void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_LOG
++@brief Macro for writing 32 bit data to an io/mem address
++*/
++#define IOH_WRITE32(val, addr) iowrite32((val), (void __iomem *)(addr))
++
++/*! @ingroup Global
++@def IOH_READ8
++@brief Macro for reading 8 bit data from an io/mem address
++*/
++#define IOH_READ8(addr) ioread8((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_READ16
++@brief Macro for reading 16 bit data from an io/mem address
++*/
++#define IOH_READ16(addr) ioread16((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_READ32
++@brief Macro for reading 32 bit data from an io/mem address
++*/
++#define IOH_READ32(addr) ioread32((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_WRITE32_F
++@brief Macro for writing 32 bit data to an io/mem address
++*/
++#define IOH_WRITE32_F(val, addr) do \
++ { IOH_WRITE32((val), (addr)); (void)IOH_READ32((addr)); } while (0);
++
++/*! @ingroup Global
++@def IOH_WRITE_BYTE
++@brief Macro for writing 1 byte data to an io/mem address
++*/
++#define IOH_WRITE_BYTE IOH_WRITE8
++/*! @ingroup Global
++@def IOH_WRITE_WORD
++@brief Macro for writing 1 word data to an io/mem address
++*/
++#define IOH_WRITE_WORD IOH_WRITE16
++/*! @ingroup Global
++@def IOH_WRITE_LONG
++@brief Macro for writing long data to an io/mem address
++*/
++#define IOH_WRITE_LONG IOH_WRITE32
++
++/*! @ingroup Global
++@def IOH_READ_BYTE
++@brief Macro for reading 1 byte data from an io/mem address
++*/
++#define IOH_READ_BYTE IOH_READ8
++/*! @ingroup Global
++@def IOH_READ_WORD
++@brief Macro for reading 1 word data from an io/mem address
++*/
++#define IOH_READ_WORD IOH_READ16
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief Macro for reading long data from an io/mem address
++*/
++#define IOH_READ_LONG IOH_READ32
++
++/* Bit Manipulation Macros */
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bit(mask) at the
++ specified address
++*/
++#define IOH_SET_ADDR_BIT(addr, bitmask) IOH_WRITE_LONG((IOH_READ_LONG(addr) |\
++ (bitmask)), (addr))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bit(mask) at the specified address
++*/
++#define IOH_CLR_ADDR_BIT(addr, bitmask) IOH_WRITE_LONG((IOH_READ_LONG(addr) &\
++ ~(bitmask)), (addr))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bitmask for a variable
++*/
++#define IOH_SET_BITMSK(var, bitmask) ((var) |= (bitmask))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bitmask for a variable
++*/
++#define IOH_CLR_BITMSK(var, bitmask) ((var) &= (~(bitmask)))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bit for a variable
++*/
++#define IOH_SET_BIT(var, bit) ((var) |= (1<<(bit)))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bit for a variable
++*/
++#define IOH_CLR_BIT(var, bit) ((var) &= ~(1<<(bit)))
++
++#endif
+diff -urN linux-2.6.33-rc3/drivers/net/can/pch_can/pch_debug.h topcliff-2.6.33-rc3/drivers/net/can/pch_can/pch_debug.h
+--- linux-2.6.33-rc3/drivers/net/can/pch_can/pch_debug.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/can/pch_can/pch_debug.h 2010-03-09 05:37:47.000000000 +0900
+@@ -0,0 +1,60 @@
++/*!
++ * @file ioh_debug.h
++ * @brief Provides the macro definitions used for debugging.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++#ifndef __IOH_DEBUG_H__
++#define __IOH_DEBUG_H__
++
++#ifdef MODULE
++#define IOH_LOG(level, fmt, args...) printk(level "%s:" fmt "\n",\
++ THIS_MODULE->name, ##args)
++#else
++#define IOH_LOG(level, fmt, args...) printk(level "%s:" fmt "\n" ,\
++ __FILE__, ##args)
++#endif
++
++
++#ifdef DEBUG
++ #define IOH_DEBUG(fmt, args...) IOH_LOG(KERN_DEBUG, fmt, ##args)
++#else
++ #define IOH_DEBUG(fmt, args...)
++#endif
++
++#ifdef IOH_TRACE_ENABLED
++ #define IOH_TRACE IOH_DEBUG
++#else
++ #define IOH_TRACE(fmt, args...)
++#endif
++
++#define IOH_TRACE_ENTER IOH_TRACE("Enter %s", __func__)
++#define IOH_TRACE_EXIT IOH_TRACE("Exit %s", __func__)
++
++
++#endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-dma.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-dma.patch
new file mode 100644
index 0000000..e3fddb2
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-dma.patch
@@ -0,0 +1,4133 @@
+---
+ drivers/dma/Kconfig | 9
+ drivers/dma/Makefile | 1
+ drivers/dma/pch_dma/Makefile | 5
+ drivers/dma/pch_dma/pch_common.h | 146 ++++
+ drivers/dma/pch_dma/pch_debug.h | 60 +
+ drivers/dma/pch_dma/pch_dma_hal.c | 1203 +++++++++++++++++++++++++++++++++++++
+ drivers/dma/pch_dma/pch_dma_hal.h | 594 ++++++++++++++++++
+ drivers/dma/pch_dma/pch_dma_main.c | 1026 +++++++++++++++++++++++++++++++
+ drivers/dma/pch_dma/pch_dma_main.h | 264 ++++++++
+ drivers/dma/pch_dma/pch_dma_pci.c | 694 +++++++++++++++++++++
+ drivers/dma/pch_dma/pch_dma_pci.h | 74 ++
+ 11 files changed, 4076 insertions(+)
+
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -51,6 +51,15 @@ config LNW_DMA_DEBUG
+ help
+ Enable logging in the LNW DMA drivers
+
++config PCH_UART_DMA
++ tristate "PCH DMA Controller"
++ depends on PCI && SERIAL_8250_PCH_DMA
++ select DMA_ENGINE
++ default y
++ help
++ This value must equal to SERIAL_8250_PCH. This config PCH_UART_DMA is
++ referred by PCH UART.
++
+ config INTEL_IOATDMA
+ tristate "Intel I/OAT DMA support"
+ depends on PCI && X86
+--- a/drivers/dma/Makefile
++++ b/drivers/dma/Makefile
+@@ -14,4 +14,5 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
+ obj-$(CONFIG_SH_DMAE) += shdma.o
+ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
+ obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
++obj-$(CONFIG_PCH_UART_DMA) += pch_dma/
+ obj-$(CONFIG_TIMB_DMA) += timb_dma.o
+--- /dev/null
++++ b/drivers/dma/pch_dma/Makefile
+@@ -0,0 +1,5 @@
++#enable for debug;this can be added in Kconfig
++#EXTRA_CFLAGS += -DDEBUG
++
++obj-$(CONFIG_PCH_UART_DMA) += pch_dma.o
++pch_dma-objs := pch_dma_pci.o pch_dma_hal.o pch_dma_main.o
+--- /dev/null
++++ b/drivers/dma/pch_dma/pch_common.h
+@@ -0,0 +1,146 @@
++/*!
++ * @file ioh_common.h
++ * @brief Provides the macro definitions used by all files.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++#ifndef __IOH_COMMON_H__
++#define __IOH_COMMON_H__
++
++/*! @ingroup Global
++@def IOH_WRITE8
++@brief Macro for writing 8 bit data to an io/mem address
++*/
++#define IOH_WRITE8(val, addr) iowrite8((val), (void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_LOG
++@brief Macro for writing 16 bit data to an io/mem address
++*/
++#define IOH_WRITE16(val, addr) iowrite16((val), (void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_LOG
++@brief Macro for writing 32 bit data to an io/mem address
++*/
++#define IOH_WRITE32(val, addr) iowrite32((val), (void __iomem *)(addr))
++
++/*! @ingroup Global
++@def IOH_READ8
++@brief Macro for reading 8 bit data from an io/mem address
++*/
++#define IOH_READ8(addr) ioread8((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_READ16
++@brief Macro for reading 16 bit data from an io/mem address
++*/
++#define IOH_READ16(addr) ioread16((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_READ32
++@brief Macro for reading 32 bit data from an io/mem address
++*/
++#define IOH_READ32(addr) ioread32((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_WRITE32_F
++@brief Macro for writing 32 bit data to an io/mem address
++*/
++#define IOH_WRITE32_F(val, addr) do \
++ { IOH_WRITE32((val), (addr)); (void)IOH_READ32((addr)); } while (0);
++
++/*! @ingroup Global
++@def IOH_WRITE_BYTE
++@brief Macro for writing 1 byte data to an io/mem address
++*/
++#define IOH_WRITE_BYTE IOH_WRITE8
++/*! @ingroup Global
++@def IOH_WRITE_WORD
++@brief Macro for writing 1 word data to an io/mem address
++*/
++#define IOH_WRITE_WORD IOH_WRITE16
++/*! @ingroup Global
++@def IOH_WRITE_LONG
++@brief Macro for writing long data to an io/mem address
++*/
++#define IOH_WRITE_LONG IOH_WRITE32
++
++/*! @ingroup Global
++@def IOH_READ_BYTE
++@brief Macro for reading 1 byte data from an io/mem address
++*/
++#define IOH_READ_BYTE IOH_READ8
++/*! @ingroup Global
++@def IOH_READ_WORD
++@brief Macro for reading 1 word data from an io/mem address
++*/
++#define IOH_READ_WORD IOH_READ16
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief Macro for reading long data from an io/mem address
++*/
++#define IOH_READ_LONG IOH_READ32
++
++/* Bit Manipulation Macros */
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bit(mask) at the
++ specified address
++*/
++#define IOH_SET_ADDR_BIT(addr, bitmask) IOH_WRITE_LONG((IOH_READ_LONG(addr) |\
++ (bitmask)), (addr))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bit(mask) at the specified address
++*/
++#define IOH_CLR_ADDR_BIT(addr, bitmask) IOH_WRITE_LONG((IOH_READ_LONG(addr) &\
++ ~(bitmask)), (addr))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bitmask for a variable
++*/
++#define IOH_SET_BITMSK(var, bitmask) ((var) |= (bitmask))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bitmask for a variable
++*/
++#define IOH_CLR_BITMSK(var, bitmask) ((var) &= (~(bitmask)))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bit for a variable
++*/
++#define IOH_SET_BIT(var, bit) ((var) |= (1<<(bit)))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bit for a variable
++*/
++#define IOH_CLR_BIT(var, bit) ((var) &= ~(1<<(bit)))
++
++#endif
+--- /dev/null
++++ b/drivers/dma/pch_dma/pch_debug.h
+@@ -0,0 +1,60 @@
++/*!
++ * @file ioh_debug.h
++ * @brief Provides the macro definitions used for debugging.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++#ifndef __IOH_DEBUG_H__
++#define __IOH_DEBUG_H__
++
++#ifdef MODULE
++#define IOH_LOG(level, fmt, args...) printk(level "%s:" fmt "\n",\
++ THIS_MODULE->name, ##args)
++#else
++#define IOH_LOG(level, fmt, args...) printk(level "%s:" fmt "\n" ,\
++ __FILE__, ##args)
++#endif
++
++
++#ifdef DEBUG
++ #define IOH_DEBUG(fmt, args...) IOH_LOG(KERN_DEBUG, fmt, ##args)
++#else
++ #define IOH_DEBUG(fmt, args...)
++#endif
++
++#ifdef IOH_TRACE_ENABLED
++ #define IOH_TRACE IOH_DEBUG
++#else
++ #define IOH_TRACE(fmt, args...)
++#endif
++
++#define IOH_TRACE_ENTER IOH_TRACE("Enter %s", __func__)
++#define IOH_TRACE_EXIT IOH_TRACE("Exit %s", __func__)
++
++
++#endif
+--- /dev/null
++++ b/drivers/dma/pch_dma/pch_dma_hal.c
+@@ -0,0 +1,1203 @@
++/**
++ * @file ioh_dma_hal.c
++ *
++ * @brief
++ * This file defines the IOH_DMA_CONTROLLER HAL API functions.
++ *
++ * @version 0.90
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * <hr>
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 08/14/2009
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/io.h>
++#include <linux/string.h>
++#include <linux/module.h>
++
++#include "pch_common.h"
++#include "pch_debug.h"
++#include "pch_dma_hal.h"
++
++/*! @ingroup HALLayer
++ @def IOH_DMA_BIT_SET
++ @brief Macro for setting selected bits of a register.
++ @remarks This macro is used to set the selected bits
++ at a given 32 bit location. Normally it is
++ used to set the bits of given register.
++*/
++#define IOH_DMA_BIT_SET(reg, bitmask) \
++ IOH_WRITE_LONG(((IOH_READ_LONG((reg)) | bitmask)), (reg))
++
++/*! @ingroup HALLayer
++ @def IOH_DMA_BIT_CLEAR
++ @brief Macro for re-setting selected bits of a register.
++ @remarks This macro is used to reset the selected bits
++ at a given 32 bit location. Normally it is
++ used to reset the bits of given register.
++*/
++#define IOH_DMA_BIT_CLEAR(regAddr, bitMask) \
++ IOH_WRITE_LONG((IOH_READ_LONG((regAddr)) & (~(bitMask))), \
++ (regAddr))
++
++/*! @ingroup HALLayer
++ @def DEFAULT_CONTROL_REGISTER_VALUE
++ @brief Macro for setting selected bits of control register.
++ @remarks This macro is used to set the mode and direction
++ bit of the control register of a specific
++ channel without affecting the settings of other
++ channels.
++*/
++#define DEFAULT_CONTROL_REGISTER_VALUE (0x33333333)
++
++/*! @ingroup HALLayer
++ @def dma_clear_interrupt_status
++ @brief Macro for clearing the interrupt status of the
++ DMA.
++ @remarks This macro is used to clear the interrupt status
++ bits of the DMA during handling of interrupts.
++*/
++#define dma_clear_interrupt_status(addr, stat0, stat2) \
++do { \
++ IOH_WRITE_LONG((stat0), ((addr) + DMA_STS0_OFFSET)); \
++ IOH_WRITE_LONG((stat2), ((addr) + DMA_STS2_OFFSET)); \
++} while (0)
++
++/*! @ingroup HALLayer
++ @def dma_get_interrupt_status
++ @brief Macro for getting the interrupt status of a
++ specific channel
++ @remarks This macro is used to get the interrupt status
++ of the DMA during handling of interrupts.
++*/
++#define dma_get_interrupt_status(ch, stat0, stat2) \
++( \
++ ((ch) < 8) ? \
++ (((stat0) & (DMA_INTERRUPT_OCCUR << ch)) != 0) \
++ : \
++ (((stat2) & (DMA_INTERRUPT_OCCUR << (ch - 8))) != 0) \
++)
++
++/*! @ingroup HALLayer
++ @def dma_get_abort_status
++ @brief Macro for getting the abort status of a specific
++ channel.
++ @remarks This macro is used to get the abort status
++ of the DMA during handling of interrupts.
++*/
++#define dma_get_abort_status(ch, stat0, stat2) \
++( \
++ ((ch) < 8) ? \
++ (((stat0) & (DMA_ABORT_OCCUR << ch)) != 0) \
++ : \
++ (((stat2) & (DMA_ABORT_OCCUR << (ch - 8))) != 0) \
++)
++
++/* Global Varibles */
++/*! @ingroup Global
++ @var ioh_dma_channel_info
++ @brief Retains the specific channel information.
++*/
++struct ioh_dma_controller_info ioh_dma_channel_info[IOH_DMA_CHANNELS_MAX];
++
++/* Channel Allocation Table for DMA */
++/*! @ingroup Global
++ @var ioh_dma_channel_table
++ @brief Retains the specific channel allocation
++ information.
++*/
++struct ioh_dma_channel_alloc_table ioh_dma_channel_table[IOH_DMA_CHANNELS_MAX]
++= {
++ /* 4 channel DMA device0 (Reserved for GE.) */
++ {IOH_DMA_4CH0, IOH_DMA_TX_DATA_REQ0, PCI_DEVICE_ID_IOH_SPI, 0, 0, 0, 0},
++ {IOH_DMA_4CH0, IOH_DMA_RX_DATA_REQ0, PCI_DEVICE_ID_IOH_SPI, 1, 0, 0, 0},
++ {IOH_DMA_4CH0, 0, 0, 2, 0, 0, 0},
++ {IOH_DMA_4CH0, 0, 0, 3, 0, 0, 0},
++
++ /* 4 channel DMA device1 (Not reserved.) */
++ {IOH_DMA_4CH1, 0, 0, 0, 0, 0, 0},
++ {IOH_DMA_4CH1, 0, 0, 1, 0, 0, 0},
++ {IOH_DMA_4CH1, 0, 0, 2, 0, 0, 0},
++ {IOH_DMA_4CH1, 0, 0, 3, 0, 0, 0},
++
++ /* 4 channel DMA device2 (Not reserved.) */
++ {IOH_DMA_4CH2, 0, 0, 0, 0, 0, 0},
++ {IOH_DMA_4CH2, 0, 0, 1, 0, 0, 0},
++ {IOH_DMA_4CH2, 0, 0, 2, 0, 0, 0},
++ {IOH_DMA_4CH2, 0, 0, 3, 0, 0, 0},
++
++ /* 4 channel DMA device3 (Not reserved.) */
++ {IOH_DMA_4CH3, 0, 0, 0, 0, 0, 0},
++ {IOH_DMA_4CH3, 0, 0, 1, 0, 0, 0},
++ {IOH_DMA_4CH3, 0, 0, 2, 0, 0, 0},
++ {IOH_DMA_4CH3, 0, 0, 3, 0, 0, 0},
++
++ /* 4 channel DMA device4 (Not reserved.) */
++ {IOH_DMA_4CH4, 0, 0, 0, 0, 0, 0},
++ {IOH_DMA_4CH4, 0, 0, 1, 0, 0, 0},
++ {IOH_DMA_4CH4, 0, 0, 2, 0, 0, 0},
++ {IOH_DMA_4CH4, 0, 0, 3, 0, 0, 0},
++
++ /* 8 channel DMA device0 (Reserved for GE.) */
++ {IOH_DMA_8CH0, IOH_DMA_TX_DATA_REQ0, PCI_DEVICE_ID_IOH_UART0, 0, 0, 0,
++ 0},
++ {IOH_DMA_8CH0, IOH_DMA_RX_DATA_REQ0, PCI_DEVICE_ID_IOH_UART0, 1, 0, 0,
++ 0},
++ {IOH_DMA_8CH0, IOH_DMA_TX_DATA_REQ0, PCI_DEVICE_ID_IOH_UART1, 2, 0, 0,
++ 0},
++ {IOH_DMA_8CH0, IOH_DMA_RX_DATA_REQ0, PCI_DEVICE_ID_IOH_UART1, 3, 0, 0,
++ 0},
++ {IOH_DMA_8CH0, IOH_DMA_TX_DATA_REQ0, PCI_DEVICE_ID_IOH_UART2, 4, 0, 0,
++ 0},
++ {IOH_DMA_8CH0, IOH_DMA_RX_DATA_REQ0, PCI_DEVICE_ID_IOH_UART2, 5, 0, 0,
++ 0},
++ {IOH_DMA_8CH0, IOH_DMA_TX_DATA_REQ0, PCI_DEVICE_ID_IOH_UART3, 6, 0, 0,
++ 0},
++ {IOH_DMA_8CH0, IOH_DMA_RX_DATA_REQ0, PCI_DEVICE_ID_IOH_UART3, 7, 0, 0,
++ 0},
++
++ /* 8 channel DMA device1 */
++ {IOH_DMA_8CH1, 0, 0, 0, 0, 0, 0},
++ {IOH_DMA_8CH1, 0, 0, 1, 0, 0, 0},
++ {IOH_DMA_8CH1, 0, 0, 2, 0, 0, 0},
++ {IOH_DMA_8CH1, 0, 0, 3, 0, 0, 0},
++ {IOH_DMA_8CH1, 0, 0, 4, 0, 0, 0},
++ {IOH_DMA_8CH1, 0, 0, 5, 0, 0, 0},
++ {IOH_DMA_8CH1, 0, 0, 6, 0, 0, 0},
++ {IOH_DMA_8CH1, 0, 0, 7, 0, 0, 0},
++
++ /* 8 channel DMA device2 */
++ {IOH_DMA_8CH2, 0, 0, 0, 0, 0, 0},
++ {IOH_DMA_8CH2, 0, 0, 1, 0, 0, 0},
++ {IOH_DMA_8CH2, 0, 0, 2, 0, 0, 0},
++ {IOH_DMA_8CH2, 0, 0, 3, 0, 0, 0},
++ {IOH_DMA_8CH2, 0, 0, 4, 0, 0, 0},
++ {IOH_DMA_8CH2, 0, 0, 5, 0, 0, 0},
++ {IOH_DMA_8CH2, 0, 0, 6, 0, 0, 0},
++ {IOH_DMA_8CH2, 0, 0, 7, 0, 0, 0},
++
++ /* 8 channel DMA device3 (Doubts in allocating.) */
++ {IOH_DMA_8CH3, 0, 0, 0, 0, 0, 0},
++ {IOH_DMA_8CH3, 0, 0, 1, 0, 0, 0},
++ {IOH_DMA_8CH3, 0, 0, 2, 0, 0, 0},
++ {IOH_DMA_8CH3, 0, 0, 3, 0, 0, 0},
++ {IOH_DMA_8CH3, 0, 0, 4, 0, 0, 0},
++ {IOH_DMA_8CH3, 0, 0, 5, 0, 0, 0},
++ {IOH_DMA_8CH3, 0, 0, 6, 0, 0, 0},
++ {IOH_DMA_8CH3, 0, 0, 7, 0, 0, 0},
++
++ /* 12 channel DMA device0 */
++ {IOH_DMA_12CH0, 0, 0, 0, 0, 0, 0},
++ {IOH_DMA_12CH0, 0, 0, 1, 0, 0, 0},
++ {IOH_DMA_12CH0, 0, 0, 2, 0, 0, 0},
++ {IOH_DMA_12CH0, 0, 0, 3, 0, 0, 0},
++ {IOH_DMA_12CH0, 0, 0, 4, 0, 0, 0},
++ {IOH_DMA_12CH0, 0, 0, 5, 0, 0, 0},
++ {IOH_DMA_12CH0, 0, 0, 6, 0, 0, 0},
++ {IOH_DMA_12CH0, 0, 0, 7, 0, 0, 0},
++ {IOH_DMA_12CH0, 0, 0, 8, 0, 0, 0},
++ {IOH_DMA_12CH0, 0, 0, 9, 0, 0, 0},
++ {IOH_DMA_12CH0, 0, 0, 10, 0, 0, 0},
++ {IOH_DMA_12CH0, 0, 0, 11, 0, 0, 0}
++};
++
++/* Function Definitions */
++
++/*! @ingroup HALLayerAPI
++ @fn void __init dma_init(u32 base, u32 dev_type)
++ @brief Initializes local data structures for the DMAC device.
++ @remarks This function is called when a DMA device is detected.
++ It initializes the data structures associated
++ with the obtained device. The main tasks
++ performed by this function are:
++ - Waits until the status of a DMA channel
++ becomes idle and then disables it.
++ - Initializes the data structures that can
++ be used further.
++
++ @param base [@ref IN] The base address.
++ @param dev_type [@ref IN] The type of the device.
++
++ @return None.
++
++ @see
++ - ioh_dma_probe
++ */
++
++void __init dma_init(u32 base, u32 dev_type)
++{
++ int i;
++ u32 counter;
++ u16 DMAStatus;
++
++ for (i = 0; i < IOH_DMA_CHANNELS_MAX; i++) {
++ if (ioh_dma_channel_table[i].dma_dev_id == dev_type) {
++ counter = COUNTER_LIMIT;
++
++ ioh_dma_channel_table[i].ch_found = 1;
++ ioh_dma_channel_table[i].ch_alloced = 0;
++ ioh_dma_channel_table[i].base = base;
++
++ do {
++ get_dma_status(i, &DMAStatus);
++ } while ((counter--) && (DMAStatus != DMA_STATUS_IDLE));
++
++ (void)dma_disable_ch(i);
++ IOH_DEBUG("dma_init -> Channel %d disabled.\n", i);
++
++ (void)dma_enable_disable_interrupt
++ (i, IOH_DMA_INTERRUPT_DISABLE);
++ IOH_DEBUG
++ ("dma_init -> Interrupt disabled for channel %d.\n",
++ i);
++ }
++ }
++
++ IOH_DEBUG("Function dma_init invoked successfully.\n");
++}
++
++/*! @ingroup HALLayerAPI
++ @fn void dma_exit(u32 dev_type)
++ @brief De-initializes the DMA device.
++ @remarks The main tasks performed by this function are:
++ - Waits for a small interval for each channel
++ if the channel is not idle so that it can
++ complete its transfer.
++ - Disables the channel.
++ - Disables the concerned interrupt.
++
++ @param dev_type [@ref IN] The type of the device.
++
++ @return None
++
++ @see
++ - ioh_dma_remove
++ - ioh_dma_suspend
++*/
++void dma_exit(u32 dev_type)
++{
++ int i;
++ u32 counter;
++ u16 DMAStatus;
++
++ for (i = 0; i < IOH_DMA_CHANNELS_MAX; i++) {
++ if (ioh_dma_channel_table[i].dma_dev_id == dev_type &&
++ ioh_dma_channel_table[i].ch_found == 1) {
++ counter = COUNTER_LIMIT;
++ get_dma_status(i, &DMAStatus);
++
++ while ((counter > 0) &&
++ (DMAStatus != DMA_STATUS_IDLE)) {
++ counter--;
++ get_dma_status(i, &DMAStatus);
++ }
++
++ (void)dma_disable_ch(i);
++ IOH_DEBUG("dma_exit -> Channel %d disabled.\n", i);
++
++ (void)dma_enable_disable_interrupt
++ (i, IOH_DMA_INTERRUPT_DISABLE);
++ IOH_DEBUG("dma_exit -> Interrupt disabled for channel "
++ "%d.\n", i);
++ }
++ }
++
++ IOH_DEBUG("Function dma_exit invoked successfully.\n");
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int dma_set_mode(int channel,
++ struct ioh_dma_mode_param stModeParam)
++ @brief Sets the Mode of transfer for DMA.
++ @remarks Does the setting of direction of transfer, access size
++ type and transfer mode. This function does not
++ perform any register write. The main tasks
++ performed by this function are:
++ - Set the DMATransferDirection field of @ref
++ ioh_dma_channel_info with the direction of
++ transfer specified.
++ - Set the DMAAccessSize field of @ref
++ ioh_dma_channel_info with the Access Size Type
++ specified.
++ - Set the DMATransferMode field of @ref
++ ioh_dma_channel_info structure with the DMA mode
++ specified.
++
++ @param channel [@ref IN] The channel for which mode is to be set.
++ @param stModeParam [@ref IN] Structure which contains the
++ parameters for the setting of Mode.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On success.
++
++ @see
++ - ioh_set_dma_mode
++ */
++int dma_set_mode(int channel, struct ioh_dma_mode_param stModeParam)
++{
++ ioh_dma_channel_info[channel].DMAAccessSize = stModeParam.DMASizeType;
++ ioh_dma_channel_info[channel].DMATransferMode =
++ stModeParam.DMATransferMode;
++ ioh_dma_channel_info[channel].DMATransferDirection =
++ stModeParam.TransferDirection;
++
++ IOH_DEBUG("Function dma_set_mode returns %d.\n", IOH_DMA_SUCCESS);
++ return IOH_DMA_SUCCESS;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int dma_set_addr(int channel, u32 iaddr, u32 oaddr)
++ @brief Sets the Inside and Outside address in the case
++ of ONE SHOT MODE
++ @remarks This function updates Inside address and outside
++ address to be set in ONE SHOT mode. The main
++ tasks performed by this function are:
++ - Set the field in_addr of the @ref
++ ioh_dma_channel_info structure of the
++ corresponding channel to the value of the
++ argument iaddr.
++ - Set the field out_addr of the @ref
++ ioh_dma_channle_info structure of the
++ corresponding channel to the value of the
++ argument oaddr.
++
++ @param channel [@ref IN] Channel for which addresses is
++ to be set.
++ @param iaddr [@ref IN] Inside address to be set
++ @param oaddr [@ref IN] Outside address to be set
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On Success.
++
++ @see
++ - ioh_set_dma_addr
++
++ */
++int dma_set_addr(int channel, u32 iaddr, u32 oaddr)
++{
++ ioh_dma_channel_info[channel].in_addr = iaddr;
++ ioh_dma_channel_info[channel].out_addr = oaddr;
++
++ IOH_DEBUG("Function dma_set_addr returns %d.\n", IOH_DMA_SUCCESS);
++ return IOH_DMA_SUCCESS;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int dma_enable_ch(int channel)
++ @brief Enables the DMA channel specified.
++ @remarks This function sets the entire DMA settings such as
++ the transfer direction, transfer mode and
++ enables the channel. The main tasks performed by
++ this function are:
++ - Sets the transfer direction.
++ - Sets the transfer mode.
++ - Enabling the channel.
++
++ @param channel [@ref IN] Channel number that
++ is to be enabled.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On success.
++
++ @see
++ - ioh_enable_dma
++ */
++int dma_enable_ch(int channel)
++{
++ u32 base_address;
++ u16 transfer_mode;
++ u32 ctl0;
++ u32 ctrl_val = DEFAULT_CONTROL_REGISTER_VALUE;
++ int ch;
++
++ /* Marking the channel as enabled. */
++ ioh_dma_channel_info[channel].bChEnabled = 1;
++
++ ch = ioh_dma_channel_table[channel].channel;
++ base_address = ioh_dma_channel_table[channel].base;
++
++ ctl0 = 0;
++
++ /* Setting of transfer direction. */
++ if (ioh_dma_channel_info[channel].DMATransferDirection ==
++ IOH_DMA_DIR_OUT_TO_IN) {
++ ctl0 |= IOH_DMA_DIR_OUT_TO_IN;
++ }
++
++ /* Setting the transfer mode features. */
++ transfer_mode = ioh_dma_channel_info[channel].DMATransferMode;
++
++ /* If scatter gather mode. */
++ if (transfer_mode == DMA_SCATTER_GATHER_MODE) {
++ u32 next_desc;
++
++ next_desc = ((u32) ioh_dma_channel_info[channel].pHeadOfList);
++ IOH_WRITE_LONG(next_desc, (base_address + (DMA_NX_AD_OFFSET +
++ (ch * 0x10))));
++
++ ctl0 |= DMA_SCATTER_GATHER_MODE;
++ }
++ /* If one shot mode. */
++ else {
++ u32 in_address = ioh_dma_channel_info[channel].in_addr;
++ u32 out_address = ioh_dma_channel_info[channel].out_addr;
++ u32 access_size = ioh_dma_channel_info[channel].DMAAccessSize;
++ u32 count = ioh_dma_channel_info[channel].DMATransferSize;
++
++ ctl0 |= DMA_ONE_SHOT_MODE;
++
++ count |= access_size;
++
++ IOH_WRITE_LONG(in_address,
++ (base_address +
++ (DMA_IN_AD_OFFSET + (ch * 0x10))));
++ IOH_WRITE_LONG(out_address,
++ (base_address +
++ (DMA_OUT_AD_OFFSET + (ch * 0x10))));
++ IOH_WRITE_LONG(count,
++ (base_address + (DMA_SZ_OFFSET + (ch * 0x10))));
++ }
++
++ /* Enabling the interrupts. */
++ (void)dma_enable_disable_interrupt(channel, IOH_DMA_INTERRUPT_ENABLE);
++
++ /* Updating Control register. */
++ if (ch < 8) {
++ /* Clearing the three bits corresponding
++ to the mode and transfer direction of
++ specific channel.
++ */
++ ctrl_val &= ~((MSK_ALL_THREE) << (ch * DMA_SHIFT_MODE_BITS));
++
++ /* Setting the transfer mode and direction. */
++ ctrl_val |= (ctl0 << (ch * DMA_SHIFT_MODE_BITS));
++
++ /* Updating to the register. */
++ IOH_WRITE_LONG(ctrl_val, (base_address + DMA_CTL0_OFFSET));
++
++ IOH_DEBUG("dma_enable -> Control register(0) value: "
++ "%x.\n",
++ IOH_READ_LONG((base_address + DMA_CTL0_OFFSET)));
++ } else {
++ /* Clearing the three bits corresponding
++ to the mode and transfer direction of
++ specific channel.
++ */
++ ctrl_val &=
++ ~((MSK_ALL_THREE) << ((ch - 8) * DMA_SHIFT_MODE_BITS));
++
++ /* Setting the transfer mode and direction. */
++ ctrl_val |= (ctl0 << ((ch - 8) * DMA_SHIFT_MODE_BITS));
++
++ /* Updating to the register. */
++ IOH_WRITE_LONG(ctrl_val, (base_address + DMA_CTL3_OFFSET));
++
++ IOH_DEBUG("dma_enable -> Control register(3) value: "
++ "%x.\n",
++ IOH_READ_LONG((base_address + DMA_CTL3_OFFSET)));
++ }
++
++ IOH_DEBUG("Function dma_enable_ch returns %d.\n", IOH_DMA_SUCCESS);
++ return IOH_DMA_SUCCESS;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int dma_disable_ch(int channel)
++ @brief Disables the DMA channel specified.
++ @remarks This function performs the necessary
++ register updation in-order to disable
++ the DMA channel.
++
++ @param channel [@ref IN] Channel to be disabled.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS
++
++ @see
++ - ioh_disable_dma
++ */
++int dma_disable_ch(int channel)
++{
++ u32 base_address;
++ u16 ch;
++
++ ch = ioh_dma_channel_table[channel].channel;
++ base_address = ioh_dma_channel_table[channel].base;
++
++ if (channel < 8) {
++ /* Clearing the mode bits of the channel */
++ IOH_DMA_BIT_CLEAR((base_address + DMA_CTL0_OFFSET),
++ (DMA_MASK_MODE_BITS <<
++ (ch * DMA_SHIFT_MODE_BITS)));
++ } else {
++ /* Clearing the mode bits of the channel */
++ IOH_DMA_BIT_CLEAR((base_address + DMA_CTL3_OFFSET),
++ (DMA_MASK_MODE_BITS <<
++ ((ch - 8) * DMA_SHIFT_MODE_BITS)));
++ }
++
++ /* Updating the enable variable. */
++ ioh_dma_channel_info[channel].bChEnabled = (u16) 0;
++
++ IOH_DEBUG("Function dma_disable_ch returns " "%d.\n", IOH_DMA_SUCCESS);
++ return IOH_DMA_SUCCESS;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int dma_set_count (int channel, u32 count)
++ @brief Sets the count value .
++ @remarks Updates the transfer size for ONE_SHOT_MODE
++ of DMA Transfer. The main tasks performed by
++ this function are:
++ - Set the DMATransferSize field of the
++ @ref ioh_dma_channel_info structure to the
++ value of the argument count.
++
++ @param channel [@ref IN] Channel number for
++ which value is to be set
++ @param count [@ref IN] Transfer Size value.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS
++
++ @see
++ - ioh_set_dma_count
++ */
++int dma_set_count(int channel, u32 count)
++{
++ ioh_dma_channel_info[channel].DMATransferSize = count;
++
++ IOH_DEBUG("Function dma_set_count returns %d.\n", IOH_DMA_SUCCESS);
++ return IOH_DMA_SUCCESS;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int dma_add_desc(int channel,
++ struct ioh_dma_desc *start,
++ struct ioh_dma_desc *end)
++ @brief Adds descriptors to the existing list of descriptors.
++ @remarks This function accepts the descriptor list and appends
++ it to the existing list of descriptors. The main
++ tasks performed by this function are:
++ - Obtains the virtual address of the end of the
++ currently set descriptor list. If it is not
++ successful returns with an error.
++ - Appends the value of the argument start to the
++ nextDesc field of the descriptor pointed by the
++ pTailOfList field of the
++ @ref ioh_dma_channel_info structure with the
++ value of the argument start after appropriately
++ setting the last two bits to denote
++ Follow_Next_Descriptor_Without_Interrupt.
++ - Updates the value of the argument end to the
++ pTailOfList field of the @ref
++ ioh_dma_channel_info structure for the
++ corresponding channel.
++
++ @param channel [@ref IN] Channel number.
++ @param start [@ref IN] Reference to first
++ descriptor of list.
++ @param end [@ref IN] Reference to last
++ descriptor of list.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> If appending of the
++ descriptor is successful.
++
++ @see
++ - ioh_add_dma_desc
++*/
++int dma_add_desc(int channel, struct ioh_dma_desc *start,
++ struct ioh_dma_desc *end)
++{
++ struct ioh_dma_desc *desc_addr;
++
++ desc_addr = ioh_dma_channel_info[channel].pTailOfList;
++
++ /* Obtaining the virtual address. */
++ desc_addr = (struct ioh_dma_desc *) phys_to_virt((u32) desc_addr);
++
++ /* If virtual address calculation successful. */
++ desc_addr->nextDesc = (u32) start;
++ ioh_dma_channel_info[channel].pTailOfList = end;
++
++ IOH_DEBUG("Function dma_add_desc returns %d.\n", IOH_DMA_SUCCESS);
++ return IOH_DMA_SUCCESS;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn void dma_set_callback (int channel, void (*ioh_dma_cbr)
++ ( int value,unsigned long data1),unsigned long data)
++ @brief To set callback function.
++ @remarks Sets the callback function to be called for a channel.
++ The main task performed by this function is:
++ - Updates the callback pointer for the channel
++ in the structure ioh_dma_channel_info with the
++ parameter passed.
++
++ @param channel [@ref IN] Channel number.
++ @param ioh_dma_cbr [@ref IN] Function pointer
++ to call back function.
++ @param data [@ref IN] The data to be passed to
++ the callback function during
++ invoking.
++
++ @return None.
++
++ @see
++ - ioh_dma_set_callback
++ */
++void dma_set_callback(int channel,
++ void (*ioh_dma_cbr) (int value, unsigned long data1),
++ unsigned long data)
++{
++ ioh_dma_channel_info[channel].call_back_func_ptr = ioh_dma_cbr;
++ ioh_dma_channel_info[channel].callback_data = data;
++
++ IOH_DEBUG("Function dma_set_callback invoked successfully.\n");
++}
++
++/*! @ingroup HALLayerAPI
++ @fn irqreturn_t dma_interrupt (int irq, void *dev_id)
++ @brief Interrupt handler.
++ @remarks Handles the interrupt for the DMA. The main tasks
++ performed by this function are:
++ - Checks each DMA channels whether a DMA
++ transmission end or DMA status interrupt has
++ occurred.
++ - If a transmission end interrupt has occurred,
++ then invoke the callback function with @ref
++ IOH_DMA_END, denoting that the DMA transmission
++ has end.
++ - If a DMA abort interrupt has occurred, then
++ invoke the callback function with @ref
++ IOH_DMA_ABORT, denoting that a DMA abort has
++ occurred.
++
++ @param irq [@ref IN] Interrupt Request number
++ @param dev_id [@ref IN] dev_id of device for which
++ interrupt is raised .
++
++ @return irqreturn_t
++ - IRQ_HANDLED --> If interrupt has been processed.
++ - IRQ_NONE --> If no interrupt has been processed.
++
++ */
++irqreturn_t dma_interrupt(int irq, void *dev_id)
++{
++ irqreturn_t retval = IRQ_NONE;
++ u32 status_reg0;
++ u32 status_reg2;
++ u32 base_address;
++ u32 dev_type;
++ u32 i;
++ u16 status;
++
++ base_address = ((struct ioh_dma_devices *) dev_id)->base_addr;
++ dev_type = ((struct ioh_dma_devices *) dev_id)->dev_typ;
++
++ /* Reading the status registers. */
++ status_reg0 = IOH_READ_LONG((base_address + DMA_STS0_OFFSET));
++ status_reg2 = IOH_READ_LONG((base_address + DMA_STS2_OFFSET));
++ IOH_DEBUG("dma_interrupt -> Status register STS0: %x STS2: "
++ "%x.\n", status_reg0, status_reg2);
++
++ /* Clearing the interrupts. */
++ dma_clear_interrupt_status(base_address, status_reg0, status_reg2);
++
++ /* Handling the interrupts. */
++ for (i = 0; i < IOH_DMA_CHANNELS_MAX; i++) {
++ if ((ioh_dma_channel_table[i].dma_dev_id == dev_type) &&
++ (ioh_dma_channel_table[i].ch_alloced == 1) &&
++ (ioh_dma_channel_info[i].bChEnabled == 1)
++ ) {
++ status =
++ dma_get_interrupt_status(ioh_dma_channel_table
++ [i].channel, status_reg0,
++ status_reg2);
++ IOH_DEBUG
++ ("dma_interrupt -> Interrupt status for ch: %d is "
++ "%x.\n", i, status);
++
++ if (status == 1) {
++ int value = IOH_DMA_END;
++
++ status =
++ dma_get_abort_status(ioh_dma_channel_table
++ [i].channel,
++ status_reg0,
++ status_reg2);
++
++ if (status == 1) {
++ value = IOH_DMA_ABORT;
++
++ IOH_DEBUG
++ ("dma_interrupt -> DMA Abort "
++ "interrupt from channel%d.\n", i);
++ }
++#ifdef DEBUG
++ else {
++ IOH_DEBUG
++ ("dma_interrupt -> DMA Completion "
++ "interrupt "
++ "from channel%d.\n", i);
++ }
++#endif
++ if (ioh_dma_channel_info[i].
++ call_back_func_ptr) {
++ u32 data =
++ ioh_dma_channel_info
++ [i].callback_data;
++ (ioh_dma_channel_info
++ [i].call_back_func_ptr) (value, data);
++ }
++
++ /* Determining whether the channel has been
++ disabled. */
++ {
++ u32 ctrl_val;
++ s32 ch =
++ ioh_dma_channel_table[i].channel;
++ u32 base_address =
++ ioh_dma_channel_table[i].base;
++
++ if (ch < 8) {
++ ctrl_val =
++ IOH_READ_LONG((base_address
++ + DMA_CTL0_OFFSET));
++
++ ctrl_val &=
++ ((0x3) <<
++ (ch * DMA_SHIFT_MODE_BITS));
++ } else {
++ ctrl_val =
++ IOH_READ_LONG((base_address
++ + DMA_CTL3_OFFSET));
++ ctrl_val &=
++ ((0x3) <<
++ ((ch - 8) *
++ DMA_SHIFT_MODE_BITS));
++ }
++
++ ioh_dma_channel_info[i].bChEnabled =
++ (ctrl_val != 0) ? 1 : 0;
++
++ } /* End */
++
++ retval = IRQ_HANDLED;
++ }
++ }
++ }
++
++ IOH_DEBUG("Function dma_interrupt returns %d.\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int dma_direct_start (int channel)
++ @brief To generate the DMA request which each Function-IP
++ transmits.
++ @remarks This function is used to initiate the DMA
++ transfer process. The main task performed by
++ this function is:
++ - Sets the value of DMAn Direct Start bit in the
++ Control register 2 to start DMA transfer on
++ channel n.
++
++ @param channel [@ref IN] Channel number for which DMA
++ transfer is to be started.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On Success.
++
++ @see
++ - ioh_dma_direct_start
++ */
++int dma_direct_start(int channel)
++{
++ int ch;
++ u32 base_address;
++
++ ch = ioh_dma_channel_table[channel].channel;
++ base_address = ioh_dma_channel_table[channel].base;
++
++ if (ch < 8) {
++ IOH_DMA_BIT_SET((base_address + DMA_CTL2_OFFSET),
++ (DMA_DIR_START << ch));
++ } else {
++ IOH_DMA_BIT_SET((base_address + DMA_CTL2_OFFSET),
++ (DMA_DIR_START << (ch + 6)));
++ }
++
++ IOH_DEBUG("dma_direct_start -> Direct2 RegValue: "
++ "%x.\n", IOH_READ_LONG((base_address + DMA_CTL2_OFFSET)));
++
++ IOH_DEBUG("Function dma_direct_start returns "
++ "%d.\n", IOH_DMA_SUCCESS);
++ return IOH_DMA_SUCCESS;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int dma_set_priority (int channel, int priority)
++ @brief Set the priority.
++ @remarks Sets the priority for a channel. The main task
++ performed by this function is:
++ - Set the value of DMAn Priority Level bits for
++ the channel in the Control register1.
++
++ @param channel [@ref IN] DMA channel number.
++ @param priority [@ref IN] Priority to be set for
++ the DMA channel.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On Success.
++
++ @see
++ - ioh_set_dma_priority
++ */
++int dma_set_priority(int channel, int priority)
++{
++ int ch;
++ u32 base_address;
++ u32 reg_val;
++
++ ch = ioh_dma_channel_table[channel].channel;
++ base_address = ioh_dma_channel_table[channel].base;
++
++ reg_val = IOH_READ_LONG((base_address + DMA_CTL1_OFFSET));
++
++ if (ch < 8) {
++ reg_val &=
++ ~(DMA_MASK_PRIORITY_BITS << (ch * DMA_SHIFT_PRIORITY_BITS));
++ reg_val |= (((u32) priority) << (ch * DMA_SHIFT_PRIORITY_BITS));
++ } else {
++ reg_val &=
++ ~(DMA_MASK_PRIORITY_BITS <<
++ (((ch - 8) * DMA_SHIFT_PRIORITY_BITS) + 2));
++ reg_val |=
++ (((u32) priority) <<
++ (((ch - 8) * DMA_SHIFT_PRIORITY_BITS) + 2));
++ }
++
++ IOH_WRITE_LONG(reg_val, (base_address + DMA_CTL1_OFFSET));
++
++ IOH_DEBUG("Function dma_set_priority returns "
++ "%d.\n", IOH_DMA_SUCCESS);
++ return IOH_DMA_SUCCESS;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int dma_enable_disable_interrupt (int channel, int bEnable)
++ @brief Enables or Disables Interrupts .
++ @remarks Writes the corresponding register to either
++ enable or disable interrupts. The main tasks
++ performed by this function are:
++ - If bEnable is DMA_INTERRUPT_ENABLE (1),
++ sets the DMAn Interrupt Enable bit in control
++ register2.
++ - If bEnable is DMA_INTERRUPT_DISABLE (0),
++ clears the DMAn Interrupt Enable bit in control
++ register2.
++
++ @param channel [@ref IN] Channel number
++ @param bEnable [@ref IN] Flag to indicate whether
++ to enable or disable interrupt.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On Success.
++
++ @see
++ - dma_init
++ - dma_exit
++ */
++int dma_enable_disable_interrupt(int channel, int bEnable)
++{
++ u32 base_address;
++ u16 ch;
++
++ ch = ioh_dma_channel_table[channel].channel;
++ base_address = ioh_dma_channel_table[channel].base;
++
++ if (ch < 8) {
++ if (IOH_DMA_INTERRUPT_ENABLE == bEnable) {
++ IOH_DMA_BIT_SET((base_address + DMA_CTL2_OFFSET),
++ (DMA_INTERRUPT_BIT << ch));
++ } else { /* if(bEnable == IOH_DMA_INTERRUPT_DISABLE) */
++
++ IOH_DMA_BIT_CLEAR((base_address + DMA_CTL2_OFFSET),
++ (DMA_INTERRUPT_BIT << ch));
++ }
++
++ } else {
++ if (IOH_DMA_INTERRUPT_ENABLE == bEnable) {
++ IOH_DMA_BIT_SET((base_address + DMA_CTL2_OFFSET),
++ (DMA_INTERRUPT_BIT << (ch + 8)));
++ } else { /* if(bEnable == IOH_DMA_INTERRUPT_DISABLE) */
++
++ IOH_DMA_BIT_CLEAR((base_address + DMA_CTL2_OFFSET),
++ (DMA_INTERRUPT_BIT << (ch + 8)));
++ }
++ }
++
++ IOH_DEBUG("dma_enable_disable_interrupt -> CTL2 Register Value: "
++ "%x.\n", IOH_READ_LONG((base_address + DMA_CTL2_OFFSET)));
++
++ IOH_DEBUG("Function dma_enable_disable_interrupt returns "
++ "%d.\n", IOH_DMA_SUCCESS);
++ return IOH_DMA_SUCCESS;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn void get_dma_status(int channel, u16 *pDMAStatus)
++ @brief Gets the Status of DMA.
++ @remarks Gets the status of the specified DMA Channel. The
++ main task performed by this function is:
++ - Reads the data in the DMAn (for channel .n.)
++ Status bit of Status register0 (4ch or 8ch) or
++ Status register2 (12ch) and copy the value into
++ pDMAStatus.
++
++ @param channel [@ref IN] Channel number.
++ @param pDMAStatus [@ref INOUT] Address of variable to
++ which
++ status information is copied.
++
++ @return None.
++
++ @see
++ - dma_exit
++ - dma_init
++ - ioh_set_dma_mode
++ - ioh_set_dma_addr
++ - ioh_set_dma_count
++ - ioh_set_dma_desc
++ - ioh_add_dma_desc
++ - ioh_enable_dma
++ - ioh_disable_dma
++ - ioh_set_dma_priority
++ - ioh_dma_direct_start
++
++ */
++
++void get_dma_status(int channel, u16 *pDMAStatus)
++{
++ u32 status_val;
++ u32 base_address;
++ u16 ch;
++
++ ch = ioh_dma_channel_table[channel].channel;
++ base_address = ioh_dma_channel_table[channel].base;
++
++ if (ch < 8) {
++ status_val = IOH_READ_LONG(base_address + DMA_STS0_OFFSET);
++ *pDMAStatus = (u16) ((status_val >> (DMA_SHIFT_STATUS_BITS +
++ (ch *
++ DMA_SIZE_STATUS_BITS))) &
++ (DMA_MASK_STATUS_BITS));
++ } else {
++ status_val = IOH_READ_LONG(base_address + DMA_STS2_OFFSET);
++ *pDMAStatus = (u16) ((status_val >> (DMA_SHIFT_STATUS_BITS +
++ ((ch -
++ 8) *
++ DMA_SIZE_STATUS_BITS))) &
++ (DMA_MASK_STATUS_BITS));
++ }
++
++ IOH_DEBUG("Function get_dma_status invoked successfully.\n");
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int dma_set_desc(int channel,
++ struct ioh_dma_desc *start,
++ struct ioh_dma_desc *end)
++ @brief Sets descriptors .
++ @remarks This functions sets the descriptor settings for
++ SCATTER GATHER mode. It does not perform any
++ register settings, instead retains the data for
++ further use. The main tasks performed by this
++ function are:
++ - Sets the pHeadOfList field of the @ref
++ ioh_dma_channel_info structure to the value of
++ the argument start.
++ - Set the pTailOfList field of the @ref
++ ioh_dma_channel_info structure to the value of
++ the argument end.
++
++ @param channel [@ref IN] Channel number.
++ @param start [@ref IN] Reference to first descriptor
++ of list.
++ @param end [@ref IN] Reference to last descriptor
++ of list.
++
++ @see
++ - ioh_set_dma_desc
++ */
++
++int dma_set_desc(int channel, struct ioh_dma_desc *start,
++ struct ioh_dma_desc *end)
++{
++ ioh_dma_channel_info[channel].pHeadOfList = start;
++ ioh_dma_channel_info[channel].pTailOfList = end;
++
++ IOH_DEBUG("Function dma_set_desc returns %d.\n", IOH_DMA_SUCCESS);
++ return IOH_DMA_SUCCESS;
++}
++
++/*! @ingroup InternalFunction
++ @fn void get_free_ch(int index)
++ @brief Get a free channel info entry and populate the entry.
++ @remarks Reset all the entries within the array
++ ioh_dma_channel_info[index]
++
++ @param index [@ref IN] Index in the
++ ioh_dma_channel_table
++
++ @return None
++
++ @see
++ - dma_request_ch
++ */
++void get_free_ch(int index)
++{
++ memset((void *)&ioh_dma_channel_info[index], 0,
++ sizeof(struct ioh_dma_controller_info));
++ IOH_DEBUG("Function get_free_ch invoked successfully.\n");
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int dma_request_ch(u32 req_dev_id, int dreq)
++ @brief Reserves a channel based on request.
++ @remarks This function is invoked when a kernel module requests
++ to reserve a DMA channel. The main tasks
++ performed by this function are:
++ - Checks the @ref ioh_dma_channel_table for a
++ matching entry corresponding to the dev_id of
++ the requesting device and dreq signal.
++ - If there is a matching entry, checks if this
++ channel is already allocated.
++ - If no invoke get_free_ch to reset the entries
++ for the corresponding channel and return the
++ entry index.
++ - If no matching entry is found return -EBUSY.
++
++ @param req_dev_id [@ref IN] Device id of the device
++ that requests DMA .
++ @param dreq [@ref IN] DMA request signal number.
++
++ @return int
++ - DMA channel number (>=0) --> On Success.
++ - -EBUSY --> DMA channel cannot be allocated..
++
++ @see
++ - ioh_request_dma
++ */
++
++int dma_request_ch(u32 req_dev_id, int dreq)
++{
++ int retval;
++ int i;
++
++ for (i = 0; i < IOH_DMA_CHANNELS_MAX; i++) {
++ if ((ioh_dma_channel_table[i].req_device_id == req_dev_id) &&
++ (ioh_dma_channel_table[i].request_signal == dreq)) {
++ if ((1 == ioh_dma_channel_table[i].ch_found) &&
++ (0 == ioh_dma_channel_table[i].ch_alloced)) {
++ get_free_ch(i);
++ IOH_DEBUG
++ ("dma_request_ch -> Function get_free_ch "
++ "invoked successfully.\n");
++ ioh_dma_channel_table[i].ch_alloced = 1;
++ retval = i;
++
++ break;
++ }
++ }
++ }
++
++ if (IOH_DMA_CHANNELS_MAX == i) {
++ retval = -EBUSY;
++ IOH_LOG(KERN_ERR, "dma_request_ch -> Not able to allocate "
++ "channel.\n");
++ }
++
++ IOH_DEBUG("Function dma_request_ch returns %d.\n", retval);
++ return retval;
++}
++
++/*! @ingroup HALLayerAPI
++ @fn int dma_free_ch(int channel)
++ @brief Frees the requested channel.
++ @remarks This function is invoked when a kernel
++ module requests to free a DMA channel. The main
++ tasks performed by this function are:
++ - If the channel is already free return
++ IOH_DMA_SUCCESS.
++ - Else disable the channel by invoking
++ @ref dma_disable_ch API.
++ - Disable the channel interrupt by invoking
++ @ref dma_enable_disable_interrupt
++ - Mark the channel as free in the structures
++ @ref ioh_dma_channel_info and @ref
++ ioh_dma_channel_table and return @ref
++ IOH_DMA_SUCCESS.
++
++ @param channel [@ref IN] DMA channel number to be freed.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On success.
++
++ @see
++ - ioh_free_dma
++ */
++
++int dma_free_ch(int channel)
++{
++ int retval;
++
++ if (ioh_dma_channel_table[channel].ch_alloced == (u16) 0) {
++ IOH_DEBUG("dma_free_ch -> Channel is already free\n");
++ retval = IOH_DMA_SUCCESS;
++ } else {
++ /* To stop any active transfer on DMA, disable DMA */
++ (void)dma_disable_ch(channel);
++ IOH_DEBUG("dma_free_ch -> Function dma_disable_ch invoked "
++ "successfully.\n");
++
++ (void)dma_enable_disable_interrupt(channel,
++ IOH_DMA_INTERRUPT_DISABLE);
++ IOH_DEBUG
++ ("dma_free_ch -> Function dma_enable_disable_interrupt "
++ "invoked successfully.\n");
++
++ ioh_dma_channel_table[channel].ch_alloced = 0;
++
++ retval = IOH_DMA_SUCCESS;
++ }
++
++ IOH_DEBUG("Function dma_free_ch returns %d.\n", IOH_DMA_SUCCESS);
++ return retval;
++}
+--- /dev/null
++++ b/drivers/dma/pch_dma/pch_dma_hal.h
+@@ -0,0 +1,594 @@
++/**
++ * @file ioh_dma_hal.h
++ *
++ * @brief
++ * This file declares the structures & data types used by the HAL
++ * functions of IOH_DMA_CONTROLLER driver.
++ *
++ * @version 0.90
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * <hr>
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 08/14/2009
++ *
++ */
++
++#ifndef __IOH_DMA_HAL_H__
++#define __IOH_DMA_HAL_H__
++
++#include <linux/interrupt.h>
++#include "pch_dma_main.h"
++
++/*!
++ @defgroup DMA
++*/
++
++/*! @defgroup Global
++ @ingroup DMA
++ @brief This group contains all the global data
++ structures used by the DMA module.
++*/
++
++/*! defgroup InternalFunction
++ @ingroup DMA
++ @brief This group contains all the function which
++ are used by other APIs for performing some
++ small tasks for facilitating the logic
++ of the driver.
++*/
++
++/*! @defgroup PCILayer
++ @ingroup DMA
++ @brief This group contains all the utilities
++ used to interface the DMA module with
++ the PCI subsystem of the Kernel.
++*/
++
++/*! @defgroup InterfaceLayer
++ @ingroup DMA
++ @brief This group contains all the utilities
++ used by the DMA module to interface with
++ the other modules.
++*/
++
++/*! @defgroup HALLayer
++ @ingroup DMA
++ @brief This group contains all the utilities
++ used to DMA module to interact with the
++ hardware.
++*/
++
++/*! @defgroup PCILayerAPI
++ @ingroup PCILayer
++ @brief This group contains the APIs used to
++ interface the DMA module with the PCI
++ subsystem of the Kernel.
++*/
++
++/*! @defgroup PCILayerFacilitators
++ @ingroup PCILayer
++ @brief This group contains the data structures
++ used by the PCILayerAPIs for their
++ functioning.
++*/
++
++/*! @defgroup HALLayerAPI
++ @ingroup HALLayer
++ @brief This group contains the APIs used to
++ communicate with the hardware.
++*/
++
++/*! @defgroup HALLayerFacilitators
++ @ingroup HALLayer
++ @brief This group contains the data structures
++ used to communicate with the hardware.
++*/
++
++/*! @defgroup InterfaceLayerAPI
++ @ingroup InterfaceLayer
++ @brief This group contains the APIs used by the
++ DMA module to interface with other modules.
++*/
++
++/*! @defgroup InterfaceLayerFacilitators
++ @ingroup InterfaceLayer
++ @brief This group contains the data structures
++ used by the DMA module to interface with
++ other modules.
++*/
++
++/*** Device specific limitations and properties. ***/
++
++/*! @ingroup DMA
++ @def IOH_DMA_CHANNELS_MAX
++ @brief The maximum number of channels allowed
++ in any of the IOH device.
++*/
++#define IOH_DMA_CHANNELS_MAX (64)
++
++/*! @ingroup DMA
++ @def IOH_DMA_MAX_DEVS
++ @brief The no. of DMA devices allowable.
++
++ @see
++ - ioh_dma_devices
++*/
++#define IOH_DMA_MAX_DEVS (4)
++
++/*! @ingroup DMA
++ @def IOH_DMA_8BIT_SIZE_MAX
++ @brief The maximum number of transfer size in
++ bytes for a channel if access size is set
++ to 8BIT.
++*/
++#define IOH_DMA_8BIT_SIZE_MAX (2047)
++
++/*! @ingroup DMA
++ @def IOH_DMA_16BIT_SIZE_MAX
++ @brief The maximum number of transfer size in
++ bytes for a channel if access size is set
++ to 16BIT.
++*/
++#define IOH_DMA_16BIT_SIZE_MAX (4094)
++
++/*! @ingroup DMA
++ @def IOH_DMA_32BIT_SIZE_MAX
++ @brief The maximum number of transfer size in
++ bytes for a channel if access size is set
++ to 32BIT.
++*/
++#define IOH_DMA_32BIT_SIZE_MAX (4096)
++
++/********/
++
++/*** Device IDs of DMA requesting devices. ***/
++/*! @ingroup DMA
++ @def PCI_DEVICE_ID_IOH_UART0
++ @brief The deviceID of the IOH GE UART
++ device 0 which can use the DMA features.
++*/
++#define PCI_DEVICE_ID_IOH_UART0 (0x8811)
++
++/*! @ingroup DMA
++ @def PCI_DEVICE_ID_IOH_UART1
++ @brief The deviceID of the IOH GE UART
++ device 1 which can use the DMA features.
++*/
++#define PCI_DEVICE_ID_IOH_UART1 (0x8812)
++
++/*! @ingroup DMA
++ @def PCI_DEVICE_ID_IOH_UART2
++ @brief The deviceID of the IOH GE UART
++ device 2 which can use the DMA features.
++*/
++#define PCI_DEVICE_ID_IOH_UART2 (0x8813)
++
++/*! @ingroup DMA
++ @def PCI_DEVICE_ID_IOH_UART3
++ @brief The deviceID of the IOH GE UART
++ device 3 which can use the DMA features.
++*/
++#define PCI_DEVICE_ID_IOH_UART3 (0x8814)
++
++/*! @ingroup DMA
++ @def PCI_DEVICE_ID_IOH_SPI
++ @brief The deviceID of the IOH GE SPI
++ device which can use the DMA features.
++*/
++#define PCI_DEVICE_ID_IOH_SPI (0x8816)
++
++/*** Internal device IDs used for identifing the DMAC . ***/
++/*! @ingroup Global
++ @def IOH_DMA_4CH0
++ @brief The device ID for the first DMA device
++ with 4 channels.
++*/
++#define IOH_DMA_4CH0 (0x40)
++
++/*! @ingroup Global
++ @def IOH_DMA_4CH1
++ @brief The device ID for the second DMA device
++ with 4 channels.
++*/
++#define IOH_DMA_4CH1 (0x41)
++
++/*! @ingroup Global
++ @def IOH_DMA_4CH2
++ @brief The device ID for the third DMA device
++ with 4 channels.
++*/
++#define IOH_DMA_4CH2 (0x42)
++
++/*! @ingroup Global
++ @def IOH_DMA_4CH3
++ @brief The device ID for the fourth DMA device
++ with 4 channels.
++*/
++#define IOH_DMA_4CH3 (0x43)
++
++/*! @ingroup Global
++ @def IOH_DMA_4CH4
++ @brief The device ID for the fifth DMA device
++ with 4 channels.
++*/
++#define IOH_DMA_4CH4 (0x44)
++
++/*! @ingroup Global
++ @def IOH_DMA_8CH0
++ @brief The device ID for the first DMA device
++ with 8 channels.
++*/
++#define IOH_DMA_8CH0 (0x80)
++
++/*! @ingroup Global
++ @def IOH_DMA_8CH1
++ @brief The device ID for the second DMA device
++ with 8 channels.
++*/
++#define IOH_DMA_8CH1 (0x81)
++
++/*! @ingroup Global
++ @def IOH_DMA_8CH2
++ @brief The device ID for the third DMA device
++ with 8 channels.
++*/
++#define IOH_DMA_8CH2 (0x82)
++
++/*! @ingroup Global
++ @def IOH_DMA_8CH3
++ @brief The device ID for the fourth DMA device
++ with 8 channels.
++*/
++#define IOH_DMA_8CH3 (0x83)
++
++/*! @ingroup Global
++ @def IOH_DMA_12CH0
++ @brief The device ID for the first DMA device
++ with 12 channels.
++*/
++#define IOH_DMA_12CH0 (0xC0)
++
++/******/
++
++/*** DMA Controller Register Offsets. ***/
++
++/*! @ingroup HALLayer
++ @def DMA_CTL0_OFFSET
++ @brief DMA Control register 0 offset.
++*/
++#define DMA_CTL0_OFFSET (0x00UL)
++
++/*! @ingroup HALLayer
++ @def DMA_CTL1_OFFSET
++ @brief DMA Control register 1 offset.
++*/
++#define DMA_CTL1_OFFSET (0x04UL)
++
++/*! @ingroup HALLayer
++ @def DMA_CTL2_OFFSET
++ @brief DMA Control register 2 offset.
++*/
++#define DMA_CTL2_OFFSET (0x08UL)
++
++/*! @ingroup HALLayer
++ @def DMA_CTL3_OFFSET
++ @brief DMA Control register 3 offset.
++*/
++#define DMA_CTL3_OFFSET (0x0CUL)
++
++/*! @ingroup HALLayer
++ @def DMA_STS0_OFFSET
++ @brief DMA Status register 0 offset.
++*/
++#define DMA_STS0_OFFSET (0x10UL)
++
++/*! @ingroup HALLayer
++ @def DMA_STS1_OFFSET
++ @brief DMA Status register 1 offset.
++*/
++#define DMA_STS1_OFFSET (0x14UL)
++
++/*! @ingroup HALLayer
++ @def DMA_STS2_OFFSET
++ @brief DMA Status register 2 offset.
++*/
++#define DMA_STS2_OFFSET (0x18UL)
++
++/*! @ingroup HALLayer
++ @def DMA_IN_AD_OFFSET
++ @brief DMA IN Address register offset.
++*/
++#define DMA_IN_AD_OFFSET (0x20UL)
++
++/*! @ingroup HALLayer
++ @def DMA_OUT_AD_OFFSET
++ @brief DMA Out Address register offset.
++*/
++#define DMA_OUT_AD_OFFSET (0x24UL)
++
++/*! @ingroup HALLayer
++ @def DMA_SZ_OFFSET
++ @brief DMA Size register offset.
++*/
++#define DMA_SZ_OFFSET (0x28UL)
++
++/*! @ingroup HALLayer
++ @def DMA_NX_AD_OFFSET
++ @brief DMA Next Address register offset.
++*/
++#define DMA_NX_AD_OFFSET (0x2CUL)
++
++/**********/
++
++/*** Individual register bits. ***/
++
++/*! @ingroup HALLayer
++ @def DMA_SIZE_TYPE_BITS
++ @brief The DMA size bits.
++*/
++#define DMA_SIZE_TYPE_BITS (0x00003000UL)
++
++/*! @ingroup HALLayer
++ @def DMA_SET_OR_CLEAR_DIR_BIT
++ @brief Mask for direction bit.
++*/
++#define DMA_SET_OR_CLEAR_DIR_BIT (0x00000004UL)
++
++/*! @ingroup HALLayer
++ @def DMA_MASK_MODE_BITS
++ @brief Mask for mode bits.
++*/
++#define DMA_MASK_MODE_BITS (0x00000003UL)
++
++/*! @ingroup HALLayer
++ @def DMA_SHIFT_MODE_BITS
++ @brief DMA shift mode bits.
++*/
++#define DMA_SHIFT_MODE_BITS (4)
++
++/*! @ingroup HALLayer
++ @def DMA_MASK_PRIORITY_BITS
++ @brief Mask for priority bits.
++*/
++#define DMA_MASK_PRIORITY_BITS (0x3UL)
++
++/*! @ingroup HALLayer
++ @def DMA_SHIFT_PRIORITY_BITS
++ @brief Shift value for DMA priority bits.
++*/
++#define DMA_SHIFT_PRIORITY_BITS (4)
++
++/*! @ingroup HALLayer
++ @def DMA_SHIFT_SIZE_TYPE_BITS
++ @brief Shift value for the DMA size bit.
++*/
++#define DMA_SHIFT_SIZE_TYPE_BITS (12)
++
++/*! @ingroup HALLayer
++ @def DMA_DIR_START
++ @brief Direct Start Bit Setting values.
++*/
++#define DMA_DIR_START (0x00000100UL)
++
++/*! @ingroup HALLayer
++ @def DMA_INTERRUPT_BIT
++ @brief Interrupt Enable Bit setting values.
++*/
++#define DMA_INTERRUPT_BIT (0x00000001UL)
++
++/*! @ingroup HALLayer
++ @def DMA_ABORT_OCCUR
++ @brief Abort notify Bit Setting Values
++*/
++#define DMA_ABORT_OCCUR (0x00000100UL)
++
++/*! @ingroup HALLayer
++ @def DMA_INTERRUPT_OCCUR
++ @brief Interrupt notify Bit Setting Values
++*/
++#define DMA_INTERRUPT_OCCUR (0x00000001UL)
++
++/*! @ingroup HALLayer
++ @def DMA_MASK_STATUS_BITS
++ @brief Mask for status bits.
++*/
++#define DMA_MASK_STATUS_BITS (0x3UL)
++
++/*! @ingroup HALLayer
++ @def DMA_SIZE_STATUS_BITS
++ @brief The DMA size status bits.
++*/
++#define DMA_SIZE_STATUS_BITS (2)
++
++/*! @ingroup HALLayer
++ @def DMA_SHIFT_STATUS_BITS
++ @brief The shift value for DMA status bits.
++*/
++#define DMA_SHIFT_STATUS_BITS (16)
++
++/*********/
++
++/*** Status denoting macros. ***/
++
++/*! @ingroup HALLayer
++ @def DMA_STATUS_IDLE
++ @brief Constant used to denote the transfer status as IDLE.
++ @note This constant is used by DMA modules to make the
++ other module aware of the DMA status.
++*/
++#define DMA_STATUS_IDLE (0)
++
++/*! @ingroup HALLayer
++ @def DMA_STATUS_DESC_READ
++ @brief Constant used to denote the transfer status as
++ DESCRIPTOR_READ.
++ @note This constant is used by DMA modules to make the
++ other module aware of the DMA status.
++*/
++#define DMA_STATUS_DESC_READ (1)
++
++/*! @ingroup HALLayer
++ @def DMA_STATUS_WAIT
++ @brief Constant used to denote the transfer status as WAIT.
++ @note This constant is used by DMA modules to make the
++ other module aware of the DMA status.
++*/
++#define DMA_STATUS_WAIT (2)
++
++/*! @ingroup HALLayer
++ @def DMA_STATUS_ACCESS
++ @brief Constant used to denote the transfer status as ACCESS
++ @note This constant is used by DMA modules to make the
++ other module aware of the DMA status.
++*/
++#define DMA_STATUS_ACCESS (3)
++
++/*! @ingroup HALLayer
++ @def IOH_DMA_INTERRUPT_DISABLE
++ @brief Constant used to denote disable interrupt.
++*/
++#define IOH_DMA_INTERRUPT_DISABLE (0)
++
++/*! @ingroup HALLayer
++ @def IOH_DMA_INTERRUPT_ENABLE
++ @brief Constant used to denote enable interrupt.
++*/
++#define IOH_DMA_INTERRUPT_ENABLE (1)
++
++/************/
++
++/*** Other Macros. ***/
++
++/*! @ingroup HALLayer
++ @def COUNTER_LIMIT
++ @brief The counter limit.
++*/
++#define COUNTER_LIMIT (0xFFFF)
++
++/*! @ingroup HALLayer
++ @def MSK_ALL_THREE
++ @brief Value used for masking the 3 LSB bits.
++*/
++#define MSK_ALL_THREE (0x7)
++
++/*******/
++/*** Data Structures for stroing device specific information. ***/
++
++/*! @ingroup HALLayerFacilitators
++ @struct __ioh_dma_devices
++ @brief Format for maintaining the device information.
++ @note This structure is used by the DMA module to retain
++ the information about the device.
++
++ @see
++ - ioh_dma_devices
++*/
++
++struct ioh_dma_devices {
++ u32 base_addr; /**< The remapped base address. */
++ u32 dev_typ; /**< The device type indicating number of DMA
++ channels */
++ void *dev; /**< The void pointer for storing any references
++ if required */
++};
++
++/*! @ingroup HALLayerFacilitators
++ @struct __ioh_dma_controller_info_t
++ @brief Format for storing the details of the
++ DMA channels.
++*/
++
++struct ioh_dma_controller_info {
++ u16 DMATransferMode; /**< DMA Transfer Mode */
++ u16 bChEnabled; /**< To know if channel is enabled or
++ not */
++ struct ioh_dma_desc *pHeadOfList; /**< Pointer to start
++ descriptor */
++ struct ioh_dma_desc *pTailOfList; /**< Pointer to last
++ descriptor */
++ void (*call_back_func_ptr) (int, unsigned long);/**< Address of the call
++ back function that is to be called when
++ an interrupt occurs */
++ u32 callback_data; /**< The data to passed to the callback
++ function during invocation */
++ u16 DMAAccessSize; /**< To store the access size (8bit,
++ 16bit or 32bit) */
++ u16 DMATransferSize; /**< To store the value of Transfer
++ Size */
++ u16 DMATransferDirection; /**< To store the Direction of Transfer
++ (IN to OUT or OUT to IN) */
++ u32 in_addr; /**< The in_address */
++ u32 out_addr; /**< The out_address */
++};
++
++/*! @ingroup HALLayerFacilitators
++ @struct ioh_dma_channel_alloc_table
++ @brief Format for storing the details of the
++ allocation details of the DMA channels.
++*/
++
++struct ioh_dma_channel_alloc_table {
++ u32 dma_dev_id; /**< The DMA device ID. */
++ enum ioh_channel_request_id request_signal; /**< The request type.*/
++ u32 req_device_id; /**< The device ID of the requested device */
++ u16 channel; /**< The channel number. */
++ u16 ch_found:1; /**< The flag variable for channel in use */
++ u16 ch_alloced:1; /**< The flag variable for channel allocate. */
++ u32 base; /**< The base address of the DMA device. */
++};
++
++ /*****/
++
++extern struct ioh_dma_channel_alloc_table
++ ioh_dma_channel_table[IOH_DMA_CHANNELS_MAX];
++extern struct ioh_dma_controller_info
++ ioh_dma_channel_info[IOH_DMA_CHANNELS_MAX];
++
++void dma_init(u32 base, u32 dev_type);
++int dma_free_ch(int channel);
++int dma_request_ch(u32 req_dev_id, int dreq);
++int dma_set_mode(int channel, struct ioh_dma_mode_param stModeParam);
++int dma_set_addr(int channel, u32 iaddr, u32 oaddr);
++int dma_enable_ch(int channel);
++int dma_disable_ch(int channel);
++int dma_set_count(int channel, u32 count);
++int dma_add_desc(int channel, struct ioh_dma_desc *start,
++ struct ioh_dma_desc *end);
++int dma_set_desc(int channel, struct ioh_dma_desc *start,
++ struct ioh_dma_desc *end);
++void dma_set_callback(int channel,
++ void (*ioh_dma_cbr) (int value, unsigned long data1),
++ unsigned long data);
++irqreturn_t dma_interrupt(int irq, void *dev_id);
++int dma_set_priority(int channel, int priority);
++int dma_direct_start(int channel);
++int dma_enable_disable_interrupt(int channel, int bEnable);
++void dma_get_abort_status(int channel, u16 *pAbortStatus);
++void dma_get_interrupt_status(int channel, u16 *pInterruptStatus);
++void get_dma_status(int channel, u16 *pDMAStatus);
++void get_free_ch(int index);
++void dma_exit(u32 dev_type);
++
++#endif
+--- /dev/null
++++ b/drivers/dma/pch_dma/pch_dma_main.c
+@@ -0,0 +1,1026 @@
++/**
++ * @file ioh_dma_main.c
++ *
++ * @brief
++ * This file defines the methods of IOH_DMA driver.
++ *
++ *
++ * @version 0.90
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * <hr>
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 08/14/2009
++ *
++ */
++
++#include <linux/spinlock.h>
++#include <linux/kernel.h>
++#include <linux/pci.h>
++
++#include "pch_debug.h"
++#include "pch_dma_hal.h"
++#include "pch_dma_pci.h"
++
++
++/*! @ingroup InterfaceLayerAPI
++ @fn int ioh_request_dma(struct pci_dev *pdev, int dreq)
++ @brief Used to request a DMA channel.
++ @remarks Requests to reserve a DMA channel that connects
++ to number 'dreq' (DMA request signal) of PCI
++ device 'pdev' to the appropriate DMA channel
++ allocated for it within the DMA Controller. This
++ function is called by functions from other
++ kernel modules. The tasks performed by this
++ function are:
++ - Verifies whether the obtained parameters are
++ valid,
++ if not suitable error status codes are returned
++ to the called function.
++ - If valid interacts with the HAL API and
++ returns the status code returned by the HAL API.
++
++ @note This function is accessible by other kernel modules.
++
++ @param dev [@ref IN] PCI device that requires the DMA
++ channel.
++ @param dreq [@ref IN] DMA request signal number.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On success.
++ - -EAGAIN --> Device is in suspend mode.
++ - -EINVAL --> pdev does not have a DMA request
++ type or number 'dreq' or 'pdev'
++ is NULL.
++*/
++int ioh_request_dma(struct pci_dev *pdev, int dreq)
++{
++ int retval;
++
++ /* Attaining the lock. */
++ spin_lock(&ioh_device_lock);
++
++ /* If device suspended. */
++ if (1 == ioh_device_suspended) {
++ IOH_LOG(KERN_ERR,
++ "ioh_request_dma -> Device is in suspend mode.\n");
++ retval = -EAGAIN;
++ }
++ /* Invalid device structure. */
++ else if (NULL == pdev) {
++ IOH_LOG(KERN_ERR,
++ "ioh_request_dma -> Obtained device structure "
++ "is NULL.\n");
++ retval = -EINVAL;
++ }
++ /* Invalid request signal. */
++ else if ((dreq < IOH_DMA_TX_DATA_REQ0) ||
++ (dreq > IOH_DMA_RX_DATA_REQ5)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_request_dma -> Invalid request signal.\n");
++ retval = -EINVAL;
++ } else {
++ /* Requesting for reserving a DMA channel. */
++ retval = dma_request_ch((u32) (pdev->device), dreq);
++ IOH_DEBUG("ioh_request_dma -> Function dma_request_ch returned "
++ "%d.\n", retval);
++ }
++
++ /* Releasing the lock. */
++ spin_unlock(&ioh_device_lock);
++
++ IOH_DEBUG("Function ioh_request_dma returns %d.\n", retval);
++ return retval;
++}
++EXPORT_SYMBOL(ioh_request_dma);
++
++/*! @ingroup InterfaceLayerAPI
++ @fn int ioh_free_dma(int channel)
++ @brief Used to free a DMA channel.
++ @remarks Frees the allocated DMA channel that is provided
++ as the argument to the function. This function
++ is called by the functions from other kernel
++ modules. The main tasks performed by this
++ function are:
++ - Verifies whether the obtained parameters are
++ valid, if not suitable error status codes are
++ returned to the called function.
++ - If valid interacts with the HAL API for
++ freeing the channel and returns the status code
++ returned by the HAL API.
++ @note This function is accessible by other kernel
++ modules.
++
++ @param channel [@ref IN] DMA channel number
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On success.
++ - -EAGAIN --> Device is in suspend mode.
++ - -ENODEV --> Specified DMA channel does
++ not exist.
++*/
++int ioh_free_dma(int channel)
++{
++ int retval;
++
++ if (1 == ioh_device_suspended) {
++ IOH_LOG(KERN_ERR,
++ "ioh_free_dma -> Device is in suspend mode.\n");
++ retval = -EAGAIN;
++ } else if ((channel >= IOH_DMA_CHANNELS_MAX) || (channel < 0)) {
++ IOH_LOG(KERN_ERR, "ioh_free_dma -> Invalid Channel number: "
++ "%d.\n", channel);
++ retval = -ENODEV;
++ } else {
++ retval = dma_free_ch(channel);
++ IOH_DEBUG("ioh_free_dma -> Function dma_free_ch "
++ "returned %d.\n", retval);
++ }
++
++ IOH_DEBUG("Function ioh_free_dma returns %d.\n", retval);
++ return retval;
++}
++EXPORT_SYMBOL(ioh_free_dma);
++
++/*! @ingroup InterfaceLayerAPI
++ @fn int ioh_set_dma_mode(int channel,struct
++ ioh_dma_mode_param stModeParam)
++ @brief Used to set the mode of the DMA.
++ @remarks Sets the mode of DMA transfer - One shot mode
++ or Scatter/gather mode. In addition to this,
++ the function also sets the direction of DMA
++ transfer and DMA Size type. This function is
++ called by functions from other kernel modules.
++ The main tasks performed by this function are:
++ - Verifies whether the obtained parameters are
++ valid, if not suitable error status codes are
++ returned to the called function.
++ - If valid interacts with the HAL API to set the
++ required settings and returns the status code
++ returned by the HAL API.
++
++ @note This function is accessible by other kernel modules.
++
++ @param channel [@ref IN] DMA channel number
++ @param stModeParam [@ref IN] Contains info about
++ direction of DMA transfer, mode
++ and Size type
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On success.
++ - -EAGAIN --> The device is in suspend
++ mode.
++ - -ENODEV --> Specified DMA channel does
++ not exist.
++ - -EINVAL --> Parameter passed is invalid.
++ - -EBUSY --> DMA channel is already
++ enabled.
++*/
++int ioh_set_dma_mode(int channel, struct ioh_dma_mode_param stModeParam)
++{
++ int retval;
++
++ /* Checking if device suspended. */
++ if (1 == ioh_device_suspended) {
++ IOH_LOG(KERN_ERR,
++ "ioh_set_dma_mode -> Device is in suspend mode.\n");
++ retval = -EAGAIN;
++ }
++ /* Checking for validity of channel number. */
++ else if ((channel >= IOH_DMA_CHANNELS_MAX) || (channel < 0)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_set_dma_mode -> Invalid Channel number : " "%d.\n",
++ channel);
++ retval = -ENODEV;
++ }
++ /* Checking whether channel not allocated. */
++ else if (ioh_dma_channel_table[channel].ch_alloced == (u16) 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_set_dma_mode -> Channel not allocated.\n");
++ retval = -EINVAL;
++ }
++ /* Checking if channel already enabled. */
++ else if (ioh_dma_channel_info[channel].bChEnabled == 1) {
++ IOH_LOG(KERN_ERR,
++ "ioh_set_dma_mode -> Channel already enabled.\n");
++ retval = -EBUSY;
++ }
++ /* Checking for validity of DMA Transfer MODE. */
++ else if ((stModeParam.DMATransferMode != (u16) DMA_ONE_SHOT_MODE) &&
++ (stModeParam.DMATransferMode !=
++ (u16) DMA_SCATTER_GATHER_MODE)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_set_dma_mode -> Invalid DMA Transfer mode.\n");
++ retval = -EINVAL;
++ }
++ /* Checking for validity of Transfer Direction. */
++ else if ((stModeParam.TransferDirection != (u16) IOH_DMA_DIR_OUT_TO_IN)
++ && (stModeParam.TransferDirection !=
++ (u16) IOH_DMA_DIR_IN_TO_OUT)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_set_dma_mode -> Invalid DMA Transfer Direction." \
++ "\n");
++ retval = -EINVAL;
++ }
++ /* Checking for validity of Transfer Size Type. */
++ else if ((stModeParam.DMASizeType != (u16) IOH_DMA_SIZE_TYPE_8BIT) &&
++ (stModeParam.DMASizeType != (u16) IOH_DMA_SIZE_TYPE_16BIT) &&
++ (stModeParam.DMASizeType != (u16) IOH_DMA_SIZE_TYPE_32BIT)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_set_dma_mode -> Invalid DMA Size Type.\n");
++ retval = -EINVAL;
++ } else {
++ /* Setting the required DMA mode. */
++ retval = dma_set_mode(channel, stModeParam);
++ IOH_DEBUG("ioh_set_dma_mode -> Function dma_set_mode "
++ "returned %d.\n", retval);
++ }
++
++ IOH_DEBUG("Function ioh_set_dma_mode returns %d.\n", retval);
++ return retval;
++}
++EXPORT_SYMBOL(ioh_set_dma_mode);
++
++/*! @ingroup InterfaceLayerAPI
++ @fn int ioh_set_dma_addr(int channel, unsigned int iaddr,
++ unsigned int oaddr)
++ @brief Used to set the in and out address of the DMA channel.
++ @remarks Sets the address of the inside bridge and the outside
++ bridge for the 'One Shot Mode' of DMA Transfer.
++ This function is invoked by functions from other
++ modules. The main tasks performed by this
++ function are:
++ - Verifies whether the obtained parameters are
++ valid, if not suitable error status codes are
++ returned to the called function.
++ - If valid interacts with the HAL API to set the
++ inside and outside address and returns the
++ status code returned by the HAL API.
++ @note This function is accessible by other kernel modules. The
++ following points has to be noted while passing
++ the in-address and out-address paramter.
++ - The address passed should be valid physical
++ address within the memory space.
++ - It should not be a configuration space or IO
++ space address.
++ - If the transfer is for large data, the address
++ should point to contagious alligned memory space
++ .
++
++ @param channel [@ref IN] DMA channel number .
++ @param iaddr [@ref IN] Address of inside bridge.
++ @param oaddr [@ref IN] Address of outside bridge.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On success.
++ - -EAGAIN --> The device is in suspend mode.
++ - -ENODEV --> Specified DMA channel does not exist.
++ - -EINVAL --> Parameter passed is invalid.
++ - -EBUSY --> DMA transfer in progress or channel is
++ already enabled.
++
++*/
++int ioh_set_dma_addr(int channel, unsigned int iaddr, unsigned int oaddr)
++{
++ int retval;
++
++ /* If the device is in suspend mode. */
++ if (1 == ioh_device_suspended) {
++ IOH_LOG(KERN_ERR,
++ "ioh_set_dma_addr -> Device is in suspend mode.\n");
++ retval = -EAGAIN;
++ }
++ /* Checking for validity of channel number */
++ else if ((channel >= IOH_DMA_CHANNELS_MAX) || (channel < 0)) {
++ IOH_LOG(KERN_ERR, "ioh_set_dma_addr -> Invalid Channel "
++ "number: %d.\n", channel);
++ retval = -ENODEV;
++ }
++ /* Checking whether channel is not allocated. */
++ else if (ioh_dma_channel_table[channel].ch_alloced == (u16) 0) {
++ IOH_LOG(KERN_ERR, "ioh_set_dma_addr -> Channel not "
++ "allocated.\n");
++ retval = -EINVAL;
++ }
++ /* Checking whether the channel is already enabled. */
++ else if (ioh_dma_channel_info[channel].bChEnabled == 1) {
++ IOH_LOG(KERN_ERR, "ioh_set_dma_addr -> Channel already "
++ "enabled.\n");
++ retval = -EBUSY;
++ }
++ /*Checking if addresses specified are NULL or not */
++ else if ((iaddr == 0) || (oaddr == 0)) {
++ IOH_LOG(KERN_ERR, "ioh_set_dma_addr -> Invalid address.\n");
++ retval = -EINVAL;
++ }
++ /* Checking if the mode of transfer is other than ONE_SHOT. */
++ else if (ioh_dma_channel_info[channel].DMATransferMode !=
++ (u16) DMA_ONE_SHOT_MODE) {
++ IOH_LOG(KERN_ERR,
++ "ioh_set_dma_addr -> Current Mode is "
++ "not DMA_ONE_SHOT_MODE.\n");
++ retval = -EINVAL;
++ } else {
++ /* setting the in and out address. */
++ retval = dma_set_addr(channel, iaddr, oaddr);
++ IOH_DEBUG("ioh_set_dma_addr -> Function dma_set_addr invoked "
++ "successfully returned %d.\n", retval);
++ }
++
++ IOH_DEBUG("Function ioh_set_dma_addr returns %d.\n", retval);
++ return retval;
++}
++EXPORT_SYMBOL(ioh_set_dma_addr);
++
++/*! @ingroup InterfaceLayerAPI
++ @fn int ioh_set_dma_count(int channel, unsigned int count)
++ @brief Used to set the DMA transfer count for a DMA channel.
++ @remarks Sets the value of DMA transfer count. This function
++ sets the count value only for the 'One Shot
++ Mode' of DMA Transfer. This function is invoked
++ by functions from other modules. The main tasks
++ performed by this function are:
++ - Verifies whether the obtained parameters are
++ valid, if not suitable error status codes are
++ returned to the called function.
++ - If valid interacts with the HAL API to set the
++ access count settings and returns the status
++ code returned by the HAL API.
++ @note This function is accessible by other kernel modules.
++
++ @param channel [@ref IN] DMA channel number.
++ @param count [@ref IN] The number of bytes to transfer.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On success.
++ - -EAGAIN --> The device is in suspend mode.
++ - -ENODEV --> Specified DMA channel does not
++ exist.
++ - -EBUSY --> DMA transfer in progress or channel
++ is already enabled.
++ - -EINVAL --> Parameter passed is invalid.
++
++ */
++int ioh_set_dma_count(int channel, unsigned int count)
++{
++ int retval = IOH_DMA_SUCCESS;
++
++ /* Checking if the device is in suspend mode. */
++ if (1 == ioh_device_suspended) {
++ IOH_LOG(KERN_ERR, "ioh_set_dma_count -> The device is in "
++ "suspend mode.");
++ retval = -EAGAIN;
++ }
++ /* Checking for validity of channel number. */
++ else if ((channel >= IOH_DMA_CHANNELS_MAX) || (channel < 0)) {
++ IOH_LOG(KERN_ERR, "ioh_set_dma_count -> Invalid Channel "
++ "number : %d.\n", channel);
++ retval = -ENODEV;
++ }
++ /* Checking whether channel is not allocated. */
++ else if (ioh_dma_channel_table[channel].ch_alloced == (u16) 0) {
++ IOH_LOG(KERN_ERR, "ioh_set_dma_count -> Channel is not "
++ "allocated.\n");
++ retval = -EINVAL;
++ }
++ /* Checking whether the channel is enabled. */
++ else if (ioh_dma_channel_info[channel].bChEnabled == 1) {
++ IOH_LOG(KERN_ERR, "ioh_set_dma_count -> Channel already "
++ "enabled.\n");
++ retval = -EBUSY;
++ }
++ /* Checking if the mode of transfer is other than ONE_SHOT. */
++ else if (ioh_dma_channel_info[channel].DMATransferMode !=
++ (u16) DMA_ONE_SHOT_MODE) {
++ IOH_LOG(KERN_ERR,
++ "ioh_set_dma_count -> Current Mode is "
++ "not DMA_ONE_SHOT_MODE.\n");
++ retval = -EINVAL;
++ }
++ /* Checking the limits of count value. */
++ else {
++ unsigned int max_count;
++
++ switch (ioh_dma_channel_info[channel].DMAAccessSize) {
++ case IOH_DMA_SIZE_TYPE_8BIT:
++ max_count = IOH_DMA_8BIT_COUNT_MAX;
++ break;
++
++ case IOH_DMA_SIZE_TYPE_16BIT:
++ max_count = IOH_DMA_16BIT_COUNT_MAX;
++ break;
++
++ case IOH_DMA_SIZE_TYPE_32BIT:
++ max_count = IOH_DMA_32BIT_COUNT_MAX;
++ break;
++
++ default:
++ IOH_LOG(KERN_ERR, "ioh_set_dma_count -> Invalid Access "
++ "Size.\n");
++ max_count = 0;
++ retval = -EINVAL;
++ break;
++ }
++
++ if ((retval == IOH_DMA_SUCCESS) && (count > max_count)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_set_dma_count -> Count (%d) exceeds "
++ "limit the maximum expected count (%d).\n",
++ count, max_count);
++ retval = -EINVAL;
++ }
++ }
++
++ if (IOH_DMA_SUCCESS == retval) {
++ /* Setting the count. */
++ retval = dma_set_count(channel, count);
++ IOH_DEBUG
++ ("ioh_set_dma_count -> Function dma_set_count returned "
++ "%d.\n", retval);
++ }
++
++ IOH_DEBUG("Function ioh_set_dma_count returns %d.\n", retval);
++ return retval;
++}
++EXPORT_SYMBOL(ioh_set_dma_count);
++
++/*! @ingroup InterfaceLayerAPI
++ @fn int ioh_set_dma_desc(int channel, struct ioh_dma_desc *start,
++ struct ioh_dma_desc *end)
++ @brief Used to set the DMA channel descriptors.
++ @remarks Sets the DMA descriptor for the 'Scatter/Gather mode'
++ of DMA transfer. This function is invoked by
++ functions from other kernel modules. The main
++ tasks performed by this function are:
++ - Verifies whether the obtained parameters are
++ valid, if not suitable error status codes are
++ returned to the called function.
++ - If valid interacts with the HAL API to set the
++ descriptor settings and returns the status code
++ returned by the HAL API.
++ @note This function is accessible by other kernel modules. The
++ following points have to be noted while passing
++ the "start" and "end" pointer of the descriptor.
++ - The address pointed by them should be physical
++ address with valid virtual address.
++ - The space should be alligned and accessible by
++ the DMA hardware.
++ - An easy way to perform this is to allocate the
++ descriptor memory using kmalloc.
++ - The last two bits of the physical address
++ should be suitably set so as to perform suitable
++ action after completion of each descriptor
++ action.
++ - The in-address and out-address within each
++ descriptor should be a valid memory space
++ physical address.
++
++ @param channel [@ref IN] DMA channel number
++ @param start [@ref IN] A pointer to the first descriptor.
++ @param end [@ref IN] A pointer to the last descriptor.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On success.
++ - -EAGAIN --> The device is in suspend.
++ - -EINVAL --> For invalid parameters.
++ - -ENODEV --> Specified DMA channel is not exist.
++ - -EBUSY --> If DMA transfer is in progress or
++ channel is already enabled.
++*/
++int ioh_set_dma_desc(int channel, struct ioh_dma_desc *start,
++ struct ioh_dma_desc *end)
++{
++ int retval;
++
++ /* Checking if the device is in suspend mode. */
++ if (1 == ioh_device_suspended) {
++ IOH_LOG(KERN_ERR, "ioh_set_dma_desc -> The device is in "
++ "suspend mode.\n");
++ retval = -EAGAIN;
++ }
++ /* Checking for validity of channel number */
++ else if ((channel >= IOH_DMA_CHANNELS_MAX) || (channel < 0)) {
++ IOH_LOG(KERN_ERR, "ioh_set_dma_desc -> Invalid Channel number "
++ ": %d.\n", channel);
++ retval = -ENODEV;
++ }
++ /* Checking whether channel is not allocated. */
++ else if (ioh_dma_channel_table[channel].ch_alloced == (u16) 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_set_dma_desc -> Channel not allocated.\n");
++ retval = -EINVAL;
++ }
++ /* Checking whether the channel is enabled. */
++ else if (ioh_dma_channel_info[channel].bChEnabled == 1) {
++ IOH_LOG(KERN_ERR,
++ "ioh_set_dma_desc -> Channel already enabled.\n");
++ retval = -EBUSY;
++ }
++ /* Checking if the mode is other than SCATTER_GATHER. */
++ else if (ioh_dma_channel_info[channel].DMATransferMode !=
++ (u16) DMA_SCATTER_GATHER_MODE) {
++ IOH_LOG(KERN_ERR,
++ "ioh_set_dma_desc -> Current mode id is not "
++ "SCATTER GATHER.\n");
++ retval = -EINVAL;
++ }
++ /* Checking whether start and end pointers are NULL or not */
++ else if ((start == NULL) || (end == NULL)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_set_dma_desc -> NULL pointer parameter.\n");
++ retval = -EINVAL;
++ } else {
++ /* Setting the descriptors. */
++ retval = dma_set_desc(channel, start, end);
++ IOH_DEBUG("ioh_set_dma_desc -> Function dma_set_desc "
++ "returned %d.\n", retval);
++ }
++
++ IOH_DEBUG("Function ioh_set_dma_desc returns %d.\n", retval);
++ return retval;
++}
++EXPORT_SYMBOL(ioh_set_dma_desc);
++
++/*! @ingroup InterfaceLayerAPI
++ @fn int ioh_add_dma_desc(int channel, struct ioh_dma_desc *start,
++ struct ioh_dma_desc *end)
++ @brief Used to append the DMA descriptors for a channel.
++ @remarks Used when a new chain of descriptors is to be appended
++ to the existing chain of descriptors. This
++ function is invoked by functions from other
++ modules. The main tasks performed by this
++ function are:
++ - Verifies whether the obtained parameters are
++ valid, if not suitable error status codes are
++ returned to the called function.
++ - If valid interacts with the HAL API to append
++ the descriptor settings and returns the status
++ code returned by the HAL API.
++ @note This function is accessible by other kernel modules.
++ The following points have to be noted while
++ passing the "start" and "end" pointer of the
++ descriptor.
++ - The address pointer by them should be physical
++ address with valid virtual address.
++ - The space should be alligned and accessible by
++ the DMA hardware.
++ - An easy way to perform this is to allocate the
++ descriptor memory using kmalloc.
++ - The last two bits of the physical address
++ should be suitably set so as to perform suitable
++ action after completion of each descriptor
++ action.
++ - The in-address and out-address within each
++ descriptor should be a valid memory space
++ physical address.
++
++ @param channel [@ref IN] DMA channel number
++ @param start [@ref IN] A pointer to the first descriptor.
++ @param end [@ref IN] A pointer to the last descriptor.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On success.
++ - -EAGAIN --> The device is in suspend mode.
++ - -ENODEV --> Specified DMA channel does not
++ exist.
++ - -EINVAL --> Invalid parameters passed.
++ - -EBUSY --> If DMA Transfer in progress or
++ channel is already enabled.
++ */
++int ioh_add_dma_desc(int channel, struct ioh_dma_desc *start,
++ struct ioh_dma_desc *end)
++{
++ int retval;
++
++ /* Checking whether the device is in suspend mode. */
++ if (1 == ioh_device_suspended) {
++ IOH_LOG(KERN_ERR,
++ "ioh_add_dma_desc -> The device is in suspend "
++ "mode.\n");
++ retval = -EAGAIN;
++ }
++ /* Checking for validity of channel number */
++ else if ((channel >= IOH_DMA_CHANNELS_MAX) || (channel < 0)) {
++ IOH_LOG(KERN_ERR, "ioh_add_dma_desc -> Invalid Channel "
++ "number : %d", channel);
++ retval = -ENODEV;
++ }
++ /* Checking whether channel is not allocated. */
++ else if (ioh_dma_channel_table[channel].ch_alloced == (u16) 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_add_dma_desc -> Channel not alloctaed.\n");
++ retval = -EINVAL;
++ }
++ /* Checking whether the channel is enabled. */
++ else if (ioh_dma_channel_info[channel].bChEnabled == 1) {
++ IOH_LOG(KERN_ERR,
++ "ioh_add_dma_desc -> Channel already enabled.\n");
++ retval = -EBUSY;
++ }
++ /* Checking whether the mode is other than SCATTER_GATHER. */
++ else if (ioh_dma_channel_info[channel].DMATransferMode !=
++ (u16) DMA_SCATTER_GATHER_MODE) {
++ IOH_LOG(KERN_ERR,
++ "ioh_add_dma_desc -> Current mode id is not "
++ "SCATTER_GATHER.\n");
++ retval = -EINVAL;
++ }
++ /* Checking if descriptor field of the channel is set earlier. */
++ else if ((ioh_dma_channel_info[channel].pHeadOfList == NULL) ||
++ (ioh_dma_channel_info[channel].pTailOfList == NULL)) {
++ IOH_LOG(KERN_ERR, "ioh_add_dma_desc -> Descriptor list not "
++ "set earlier.\n");
++ retval = -EINVAL;
++ }
++ /* Checking whether start and end pointers are NULL or not */
++ else if ((start == NULL) || (end == NULL)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_add_dma_desc -> NULL pointer parameter.\n");
++ retval = -EINVAL;
++ } else {
++ /* Appending the descriptors to the available list. */
++ retval = dma_add_desc(channel, start, end);
++ IOH_DEBUG
++ ("ioh_add_dma_desc -> Function dma_add_desc returned %d.\n",
++ retval);
++ }
++
++ IOH_DEBUG("Function ioh_add_dma_desc returns %d.\n", retval);
++ return retval;
++}
++EXPORT_SYMBOL(ioh_add_dma_desc);
++
++/*! @ingroup InterfaceLayerAPI
++ @fn int ioh_enable_dma(int channel)
++ @brief Used to enable a DMA channel.
++ @remarks Used when a DMA channel has to be enabled. This
++ function is invoked by functions from other
++ kernel modules. The main tasks performed by this
++ function are:
++ - Verifies whether the obtained parameters are
++ valid, if not suitable error status codes are
++ returned to the called function.
++ - If valid interacts with the HAL API to enable
++ the channel and returns the status code returned
++ by the HAL API.
++ @note This function is accessible by other kernel modules.
++
++ @param channel [@ref IN] DMA channel number .
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On success.
++ - -EAGAIN --> Device is in suspend mode.
++ - -ENODEV --> Specified DMA channel does
++ not exist.
++ - -EINVAL --> Specified channel is not
++ allocated.
++ - -EBUSY --> DMA Transfer already in
++ progress or channel is
++ already enabled.
++ */
++int ioh_enable_dma(int channel)
++{
++ int retval;
++
++ /* Checking whether the device is in suspend mode. */
++ if (ioh_device_suspended == 1) {
++ IOH_LOG(KERN_ERR, "ioh_enable_dma -> Device is in suspend "
++ "mode.\n");
++ retval = -EAGAIN;
++ }
++ /* Checking for validity of channel number */
++ else if ((channel >= IOH_DMA_CHANNELS_MAX) || (channel < 0)) {
++ IOH_LOG(KERN_ERR, "ioh_enable_dma ->Invalid Channel number "
++ ": %d.\n", channel);
++ retval = -ENODEV;
++ }
++ /* Checking whether channel is allocated. */
++ else if (ioh_dma_channel_table[channel].ch_alloced == (u16) 0) {
++ IOH_LOG(KERN_ERR, "ioh_enable_dma -> Channel not allocated.\n");
++ retval = -EINVAL;
++ }
++ /* Checking whether the channel is already enabled. */
++ else if (ioh_dma_channel_info[channel].bChEnabled == 1) {
++ IOH_LOG(KERN_ERR,
++ "ioh_enable_dma -> Channel already enabled.\n");
++ retval = -EBUSY;
++ } else {
++ /* Enabling the channel. */
++ retval = dma_enable_ch(channel);
++ IOH_DEBUG("ioh_enable_dma -> Function dma_enable_ch returned "
++ "%d.\n", retval);
++ }
++
++ IOH_DEBUG("Function ioh_enable_dma returns %d.\n", retval);
++ return retval;
++}
++EXPORT_SYMBOL(ioh_enable_dma);
++
++/*! @ingroup InterfaceLayerAPI
++ @fn int ioh_disable_dma(int channel)
++ @brief Used to disable a DMA channel.
++ @remarks Used when a DMA channel has to be disabled. This
++ function is invoked by functions from other
++ kernel modules. The main tasks performed by this
++ function are:
++ - Verifies whether the obtained parameters are
++ valid, if not suitable error status codes are
++ returned to the called function.
++ - If valid interacts with the HAL API to disable
++ the channel and returns the status code returned
++ by the HAL API.
++ @note This function is accessible by other kernel modules.
++
++ @param channel [@ref IN] DMA channel number .
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On success.
++ - -ENODEV --> Specified DMA channel does not
++ exist.
++ - -EINVAL --> Specified channel is not allocated.
++
++ */
++int ioh_disable_dma(int channel)
++{
++ int retval;
++ u16 statusInfo;
++
++ /* Checking whether the device is in suspend mode. */
++ if (ioh_device_suspended == 1) {
++ IOH_LOG(KERN_ERR, "ioh_disable_dma -> Device is in "
++ "suspend mode.\n");
++ retval = -EAGAIN;
++ }
++ /* Checking for validity of channel number. */
++ else if ((channel >= IOH_DMA_CHANNELS_MAX) || (channel < 0)) {
++ IOH_LOG(KERN_ERR, "ioh_disable_dma -> Invalid Channel "
++ "number : %d", channel);
++ retval = -ENODEV;
++ }
++ /* Checking whether channel is allocated. */
++ else if (ioh_dma_channel_table[channel].ch_alloced == (u16) 0) {
++ IOH_LOG(KERN_ERR, "ioh_disable_dma -> Channel not "
++ "allocated.\n");
++ retval = -EINVAL;
++ }
++ /* Check whether channel is already disabled. */
++ else if (ioh_dma_channel_info[channel].bChEnabled == (u16) 0) {
++ retval = IOH_DMA_SUCCESS;
++ } else {
++ u32 counter = COUNTER_LIMIT;
++
++ /* Wait for any DMA for certain interval transfer to end
++ before disabling the channel */
++ do {
++ get_dma_status(channel, &statusInfo);
++ } while ((counter--) && (statusInfo != (u16) DMA_STATUS_IDLE));
++
++ /* Disabling the channel. */
++ retval = dma_disable_ch(channel);
++ IOH_DEBUG("ioh_disable_dma -> Function dma_disable_ch "
++ "returned %d.\n", retval);
++
++ }
++
++ IOH_DEBUG("Function ioh_disable_dma returns " "%d.\n", retval);
++ return retval;
++}
++EXPORT_SYMBOL(ioh_disable_dma);
++
++/*! @ingroup InterfaceLayerAPI
++ @fn int ioh_dma_set_callback(int channel,
++ void (*ioh_dma_cbr)( int value,unsigned long data1),
++ unsigned long data)
++ @brief Used to set the callback function for particular DMA channel.
++ @remarks Sets the callback function to be called when an
++ interrupt occurs. This function is invoked by
++ functions from other kernel modules. The main
++ tasks performed by this function are:
++ - Verifies whether the obtained parameters are
++ valid, if not suitable error status codes are
++ returned to the called function.
++ - If valid interacts with the HAL API to set the
++ callback function settings and returns the
++ status code returned by the HAL API.
++ @note This function is accessible by other kernel modules.
++
++ @param channel [@ref IN] DMA channel number .
++ @param ioh_dma_cbr [@ref IN] Pointer to the call-back
++ function.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On success.
++ - -EAGAIN --> Device is in suspend mode.
++ - -EINVAL --> Parameter passed is invalid.
++ - -ENODEV --> Specified DMA channel does
++ not exist.
++ - -EBUSY --> If the channel is already
++ enabled.
++ */
++int ioh_dma_set_callback(int channel,
++ void (*ioh_dma_cbr) (int value, unsigned long data1),
++ unsigned long data)
++{
++ int retval;
++
++ /* Checking whether the device is in suspend mode. */
++ if (ioh_device_suspended == 1) {
++ IOH_LOG(KERN_ERR, "ioh_dma_set_callback -> The device is "
++ "in suspend mode.\n");
++ retval = -EAGAIN;
++ }
++ /* Checking for validity of channel number */
++ else if ((channel >= IOH_DMA_CHANNELS_MAX) || (channel < 0)) {
++ IOH_LOG(KERN_ERR, "ioh_dma_set_callback -> Invalid Channel "
++ "number : %d.\n", channel);
++ retval = -ENODEV;
++ }
++ /* Checking whether channel is not allocated. */
++ else if (ioh_dma_channel_table[channel].ch_alloced == (u16) 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_dma_set_callback -> Channel not allocated.\n");
++ retval = -EINVAL;
++ }
++ /* Checking whether the channel is already enabled. */
++ else if (ioh_dma_channel_info[channel].bChEnabled == 1) {
++ IOH_LOG(KERN_ERR,
++ "ioh_dma_set_callback -> Channel already enabled.\n");
++ retval = -EBUSY;
++ }
++ /* Checking whether function pointer is NULL or not */
++ else if (ioh_dma_cbr == NULL) {
++ IOH_LOG(KERN_ERR,
++ "ioh_dma_set_callback -> NULL pointer parameter.\n");
++ retval = -EINVAL;
++ } else {
++ /* Setting the callback. */
++ dma_set_callback(channel, ioh_dma_cbr, data);
++ IOH_DEBUG
++ ("ioh_dma_set_callback -> Function dma_set_callback invoked"
++ " successfully.\n");
++
++ retval = IOH_DMA_SUCCESS;
++ }
++
++ IOH_DEBUG("Function ioh_dma_set_callback " "returns %d.\n", retval);
++ return retval;
++}
++EXPORT_SYMBOL(ioh_dma_set_callback);
++
++/*! @ingroup InterfaceLayer
++ @fn int ioh_set_dma_priority (int channel, int priority)
++ @brief Sets the priority of the DMA channel.
++ @remarks Sets the priority that has to be assigned for a
++ particular channel. This function is invoked by
++ functions from other kernel modules. The main
++ tasks performed by this function are:
++ - Verifies whether the obtained parameters are
++ valid, if not, suitable error status codes are
++ returned to the called function.
++ - If valid, interacts with the HAL API to set
++ the DMA channel priority settings and returns
++ the status code returned by the HAL API.
++ @note This function is accessible by other kernel modules.
++
++ @param channel [@ref IN] DMA channel number.
++ @param priority [@ref IN] Priority to be set for the
++ DMA channel.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On success.
++ - -EAGAIN --> Device is in suspend mode.
++ - -EINVAL --> Parameter passed is invalid.
++ - -EBUSY --> If channel is in use.
++ - -ENODEV --> Specified DMA channel does not
++ exist.
++
++ */
++int ioh_set_dma_priority(int channel, int priority)
++{
++ int retval;
++
++ /* Checking whether the device is in suspend mode. */
++ if (ioh_device_suspended == 1) {
++ IOH_LOG(KERN_ERR, "ioh_set_dma_priority -> The device is "
++ "in suspend mode.\n");
++ retval = -EAGAIN;
++ }
++ /* Checking for validity of channel number */
++ else if ((channel >= IOH_DMA_CHANNELS_MAX) || (channel < 0)) {
++ IOH_LOG(KERN_ERR, "ioh_set_dma_priority -> Invalid Channel "
++ "number : %d", channel);
++ retval = -ENODEV;
++ }
++ /* Checking whether channel is not allocated. */
++ else if (ioh_dma_channel_table[channel].ch_alloced == (u16) 0) {
++ IOH_LOG(KERN_ERR, "ioh_set_dma_priority -> Channel not "
++ "allocated.\n");
++ retval = -EINVAL;
++ }
++ /* Checking whether the device is enabled. */
++ else if (ioh_dma_channel_info[channel].bChEnabled == 1) {
++ IOH_LOG(KERN_ERR, "ioh_set_dma_priority -> Channel already "
++ "enabled.\n");
++ retval = -EBUSY;
++ }
++ /* Check for validity of priority value */
++ else if ((priority > 3) || (priority < 0)) {
++ IOH_LOG(KERN_ERR, "ioh_set_dma_priority -> Invalid value "
++ "priority (%d)", priority);
++ retval = -EINVAL;
++ } else {
++ retval = dma_set_priority(channel, priority);
++ IOH_DEBUG("ioh_set_dma_priority -> Function dma_set_priority "
++ "returns %d.\n", retval);
++ }
++
++ IOH_DEBUG("Function ioh_set_dma_priority returns " "%d.\n", retval);
++ return retval;
++}
++EXPORT_SYMBOL(ioh_set_dma_priority);
++
++/*! @ingroup InterfaceLayerAPI
++ @fn int ioh_dma_direct_start (int channel)
++ @brief Used to initiate a DMA transfer.
++ @remarks Generates the DMA request to begin DMA transfer
++ on a particular channel. This function is
++ invoked by functions from other kernel modules.
++ The main tasks performed by this function are:
++ - Verifies whether the obtained parameters are
++ valid, if not suitable error status codes are
++ returned to the called function.
++ - If valid interacts with the HAL API to
++ initiate the DMA process and returns the status
++ code returned by the HAL API.
++ @note This function is accessible by other kernel modules.
++
++ @param channel DMA channel number.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On success.
++ - -EAGAIN --> Device is in suspend mode.
++ - -EBUSY --> Specified DMA channel is not idle.
++ - -ENODEV --> Specified DMA channel does not
++ exist.
++ - -EINVAL --> Specified channel is not allocated.
++
++ */
++int ioh_dma_direct_start(int channel)
++{
++ int retval = 0;
++
++ /* Checking whether the device is in suspend mode. */
++ if (ioh_device_suspended == 1) {
++ IOH_LOG(KERN_ERR, "ioh_dma_direct_start -> The device is in "
++ "suspend mode.\n");
++ retval = -EAGAIN;
++ }
++ /* Checking for validity of channel number */
++ else if ((channel >= IOH_DMA_CHANNELS_MAX) || (channel < 0)) {
++ IOH_LOG(KERN_ERR, "ioh_dma_direct_start -> Invalid Channel "
++ "number : %d.\n", channel);
++ retval = -ENODEV;
++ }
++ /* Checking whether channel is reserved or not */
++ else if (ioh_dma_channel_table[channel].ch_alloced == (u16) 0) {
++ IOH_LOG(KERN_ERR, "ioh_dma_direct_start -> Channel not "
++ "allocated.\n");
++ retval = -EINVAL;
++ }
++ /* Checking whether the device is not enabled. */
++ else if (ioh_dma_channel_info[channel].bChEnabled == 0) {
++ IOH_LOG(KERN_ERR, "ioh_dma_direct_start -> Channel not "
++ "enabled.\n");
++ retval = -EBUSY;
++ } else {
++ /* Initiating the DMA transfer */
++ retval = dma_direct_start(channel);
++ IOH_DEBUG("ioh_dma_direct_start -> Function dma_direct_start "
++ "returned %d.\n", retval);
++ }
++
++ IOH_DEBUG("Function ioh_dma_direct_start returns " "%d.\n", retval);
++ return retval;
++}
++EXPORT_SYMBOL(ioh_dma_direct_start);
+--- /dev/null
++++ b/drivers/dma/pch_dma/pch_dma_main.h
+@@ -0,0 +1,264 @@
++/**
++ * @file ioh_dma_main.h
++ *
++ * @brief
++ * This file declares the structures & data types used by the
++ * IOH_DMA driver.
++ *
++ * @version 0.90
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * <hr>
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 08/14/2009
++ *
++ */
++
++#ifndef __IOH_DMA_H__
++#define __IOH_DMA_H__
++
++/*! @ingroup InterfaceLayer
++ @def DMA_ONE_SHOT_MODE
++ @brief Constant used to denote the mode as ONE_SHOT.
++ @note This constant is used by other modules to make the
++ DMA module aware of the mode it requires.
++*/
++#define DMA_ONE_SHOT_MODE (0x2U)
++
++/*! @ingroup InterfaceLayer
++ @def DMA_SCATTER_GATHER_MODE
++ @brief Constant used to denote the mode as SCATTER_GATHER.
++ @note This constant is used by other modules to make the
++ DMA module aware of the mode it requires.
++*/
++#define DMA_SCATTER_GATHER_MODE (0x1U)
++
++/*! @ingroup InterfaceLayer
++ @def IOH_DMA_SIZE_TYPE_8BIT
++ @brief Constant used to denote the access size as 8BIT.
++ @note This constant is used by other modules to make the
++ DMA module aware of the access size it requires.
++*/
++#define IOH_DMA_SIZE_TYPE_8BIT ((0x3U << 12))
++
++/*! @ingroup InterfaceLayer
++ @def IOH_DMA_SIZE_TYPE_16BIT
++ @brief Constant used to denote the access size as 16BIT.
++ @note This constant is used by other modules to make the
++ DMA module aware of the access size it requires.
++*/
++#define IOH_DMA_SIZE_TYPE_16BIT ((0x2U << 12))
++
++/*! @ingroup InterfaceLayer
++ @def IOH_DMA_SIZE_TYPE_32BIT
++ @brief Constant used to denote the access size as 32BIT.
++ @note This constant is used by other modules to make the
++ DMA module aware of the access size it requires.
++*/
++#define IOH_DMA_SIZE_TYPE_32BIT (0x0U)
++
++/*! @ingroup InterfaceLayer
++ @def IOH_DMA_DIR_OUT_TO_IN
++ @brief Constant used to denote the transfer direction as
++ OUT_TO_IN.
++ @note This constant is used by other modules to make the
++ DMA module aware of the transfer direction it
++ requires.
++*/
++#define IOH_DMA_DIR_OUT_TO_IN (0x4)
++
++/*! @ingroup InterfaceLayer
++ @def IOH_DMA_DIR_IN_TO_OUT
++ @brief Constant used to denote the transfer direction as
++ IN_TO_OUT.
++ @note This constant is used by other modules to make the
++ DMA module aware of the transfer direction it
++ requires.
++*/
++#define IOH_DMA_DIR_IN_TO_OUT (0x0)
++
++/*! @ingroup InterfaceLayer
++ @def IOH_DMA_END
++ @brief Constant used to denote the transfer status as ACCESS
++ @note This constant is used by DMA modules to make the
++ other module aware that the DMA operation ended
++ normally.
++*/
++#define IOH_DMA_END (0)
++
++/*! @ingroup InterfaceLayer
++ @def IOH_DMA_ABORT
++ @brief Constant used to denote the transfer status as ACCESS
++ @note This constant is used by DMA modules to make the
++ other module aware that the DMA abort has
++ occurred.
++*/
++#define IOH_DMA_ABORT (-1)
++
++/* Bits to be sit as LSB2 bits of descriptor address. */
++/*! @ingroup InterfaceLayer
++ @def DMA_DESC_END_WITH_INTERRUPT
++ @brief Mask value for modifying the next descriptor
++ address, so that the descriptor end with
++ interrupt.
++*/
++#define DMA_DESC_END_WITH_INTERRUPT (0x00000001UL)
++
++/*! @ingroup InterfaceLayer
++ @def DMA_DESC_FOLLOW_WITH_INTERRUPT
++ @brief Mask value for modifying the next descriptor
++ address, so that the descriptor follow with
++ interrupt.
++
++*/
++#define DMA_DESC_FOLLOW_WITH_INTERRUPT (0x00000003UL)
++
++/*! @ingroup InterfaceLayer
++ @def DMA_DESC_END_WITHOUT_INTERRUPT
++ @brief Mask value for modifying the next descriptor
++ address, so that the descriptor end without
++ interrupt.
++*/
++#define DMA_DESC_END_WITHOUT_INTERRUPT (0x00000000UL)
++
++/*! @ingroup InterfaceLayer
++ @def DMA_DESC_FOLLOW_WITHOUT_INTERRUPT
++ @brief Mask value for modifying the next descriptor
++ address, so that the descriptor follow without
++ interrupt.
++
++*/
++#define DMA_DESC_FOLLOW_WITHOUT_INTERRUPT (0x00000002UL)
++
++/*! @ingroup InterfaceLayer
++ @def IOH_DMA_8BIT_COUNT_MAX
++ @brief The maximun transfer count that can be set for
++ a 8Bit Access.
++
++*/
++#define IOH_DMA_8BIT_COUNT_MAX (0x3FF)
++
++/*! @ingroup InterfaceLayer
++ @def IOH_DMA_16BIT_COUNT_MAX
++ @brief The maximun transfer count that can be set for
++ a 16Bit Access.
++
++*/
++#define IOH_DMA_16BIT_COUNT_MAX (0x3FF)
++
++/*! @ingroup InterfaceLayer
++ @def IOH_DMA_32BIT_COUNT_MAX
++ @brief The maximun transfer count that can be set for
++ a 32Bit Access.
++
++*/
++#define IOH_DMA_32BIT_COUNT_MAX (0x7FF)
++
++/*! @ingroup DMA
++ @def IOH_DMA_SUCCESS
++ @brief The value indicating a success.
++*/
++#define IOH_DMA_SUCCESS (0)
++
++/*! @ingroup DMA
++ @def IOH_DMA_FAILURE
++ @brief The value indicating a failure.
++*/
++#define IOH_DMA_FAILURE (-1)
++
++/*! @ingroup InterfaceLayerFacilitators
++ @enum ioh_channel_request_id
++ @brief Constant used to denote the channel request type.
++ @note These constants are used by other modules to make the
++ DMA module aware of the channel type it
++ requires.
++*/
++enum ioh_channel_request_id {
++ IOH_DMA_TX_DATA_REQ0 = 1, /**< Transmission channel 0. */
++ IOH_DMA_RX_DATA_REQ0, /**< Reception channel 0. */
++ IOH_DMA_TX_DATA_REQ1, /**< Transmission channel 1. */
++ IOH_DMA_RX_DATA_REQ1, /**< Reception channel 1. */
++ IOH_DMA_TX_DATA_REQ2, /**< Transmission channel 2. */
++ IOH_DMA_RX_DATA_REQ2, /**< Reception channel 2. */
++ IOH_DMA_TX_DATA_REQ3, /**< Transmission channel 3. */
++ IOH_DMA_RX_DATA_REQ3, /**< Reception channel 3. */
++ IOH_DMA_TX_DATA_REQ4, /**< Transmission channel 4. */
++ IOH_DMA_RX_DATA_REQ4, /**< Reception channel 4. */
++ IOH_DMA_TX_DATA_REQ5, /**< Transmission channel 5. */
++ IOH_DMA_RX_DATA_REQ5 /**< Reception channel 5. */
++};
++
++/*! @ingroup InterfaceLayerFacilitators
++ @struct __ioh_dma_mode_param
++ @brief Format for specifying the mode characteristics of
++ a channel.
++ @note This structure is used by other modules to make the
++ DMA module aware of the channel mode
++ characteristics.
++*/
++
++struct ioh_dma_mode_param {
++ u16 TransferDirection; /**< Direction of Transfer(IN to OUT or OUT to
++ IN). */
++ u16 DMASizeType; /**< Type of DMA Transfer size (8bit, 16bit or
++ 32bit). */
++ u16 DMATransferMode; /**< Mode of Transfer (ONE_SHOT_MODE or
++ SCATTER_GATHER_MODE). */
++};
++
++/*! @ingroup InterfaceLayerFacilitators
++ @struct __ioh_dma_desc
++ @brief Format for specifying the descriptors.
++ @note This structure is used by other modules to make the
++ DMA module aware of the channel descriptors in
++ SCATTER_GATHER_MODE.
++*/
++
++struct ioh_dma_desc {
++ u32 insideAddress; /**< Inside address. */
++ u32 outsideAddress; /**< Outside address. */
++ u32 size; /**< Size. */
++ u32 nextDesc; /**< Next Descriptor address.*/
++};
++
++extern int ioh_request_dma(struct pci_dev *dev, int dreq);
++extern int ioh_free_dma(int channel);
++extern int ioh_set_dma_mode(int channel, struct ioh_dma_mode_param stModeParam);
++extern int ioh_set_dma_addr(int channel, unsigned int iaddr,
++ unsigned int oaddr);
++extern int ioh_set_dma_count(int channel, unsigned int count);
++extern int ioh_set_dma_desc(int channel, struct ioh_dma_desc *start,
++ struct ioh_dma_desc *end);
++extern int ioh_add_dma_desc(int channel, struct ioh_dma_desc *start,
++ struct ioh_dma_desc *end);
++extern int ioh_enable_dma(int channel);
++extern int ioh_disable_dma(int channel);
++extern int ioh_dma_set_callback(int channel,
++ void (*ioh_dma_cbr) (int value,
++ unsigned long data1),
++ unsigned long data);
++extern int ioh_set_dma_priority(int channel, int priority);
++extern int ioh_dma_direct_start(int channel);
++
++#endif
+--- /dev/null
++++ b/drivers/dma/pch_dma/pch_dma_pci.c
+@@ -0,0 +1,694 @@
++/**
++ * @file ioh_dma_pci.c
++ *
++ * @brief
++ * This file defines the methods of IOH_DMA_CONTROLLER driver.
++ *
++ * @version 0.90
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * <hr>
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 08/14/2009
++ *
++ */
++
++/* inclusion of system specific header files. */
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/kernel.h>
++#include <linux/spinlock.h>
++
++/* inclusion of module specific header files. */
++#include "pch_debug.h"
++#include "pch_dma_pci.h"
++#include "pch_dma_hal.h"
++
++MODULE_LICENSE("GPL");
++
++/* Global variables */
++
++/*! @ingroup Global
++ @var MODULE_NAME
++ @brief The module name variable.
++ @remarks This variable is used as the module name.
++*/
++#define MODULE_NAME "pch_dma"
++/*! @ingroup Global
++ @var ioh_device_suspended
++ @brief Device suspend flag.
++ @remarks This variable is used as a flag variable
++ for denoting the device suspend state.
++ @see
++ - ioh_dma_suspend
++ - ioh_dma_resume
++*/
++unsigned char ioh_device_suspended;
++
++/*! @ingroup Global
++ @var ioh_device_lock
++ @brief Device lock variable.
++ @remarks This variable is used as a lock variable
++ for accessing the DMA channel.
++ @see
++ - ioh_request_dma
++*/
++spinlock_t ioh_device_lock;
++
++/*! @ingroup Global
++ @var ioh_dma_devices
++ @brief Stores the details of the DMA devices.
++ @remarks This variable is the instance of the structure
++ struct ioh_dma_devices, which includes fields
++ for storing the details of the detected DMA
++ devices. This variable facilitates easy transfer
++ of information among the different functions of
++ the DMA module.
++*/
++struct ioh_dma_devices ioh_dma_devices[IOH_DMA_MAX_DEVS];
++
++/*! @ingroup PCILayerFacilitators
++ @struct ioh_dma_pcidev_id
++ @brief The structure for specifying the supported
++ device IDs to the PCI Kernel subsystem.
++ @remarks This structure is the instance of the
++ kernel provided structure pci_device_id,
++ which is used to store the PCI devices
++ Vendor and Device ID. This structure is
++ used during the registration of the DMA module
++ as PCI Driver. This structure makes the Kernel
++ aware of the PCI devices supported by this
++ module.
++
++ @see
++ - ioh_dma_controller_driver
++*/
++
++static const struct pci_device_id ioh_dma_pcidev_id[] __devinitdata = {
++ /* 4 Channel DMA device IDs */
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOH1_DMA4_0)},
++
++ /* 8 Channel DMA device IDs */
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOH1_DMA8_0)},
++
++ /* 12 Channel DMA device IDs */
++ {}
++};
++
++/* Function prototypes */
++static int __devinit ioh_dma_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id);
++static void __devexit ioh_dma_remove(struct pci_dev *pdev);
++static int ioh_dma_suspend(struct pci_dev *pdev, pm_message_t state);
++static int ioh_dma_resume(struct pci_dev *pdev);
++static __init int ioh_dma_pci_init(void);
++static __exit void ioh_dma_pci_exit(void);
++static inline u32 get_dev_type(u32 devid);
++
++/*! @ingroup PCILayer
++ @def IOH_INVALID_DEVICE
++ @brief The return value of @ref get_dev_type for invalid
++ device type.
++
++ @see
++ - get_dev_type
++*/
++#define IOH_INVALID_DEVICE (0xFFFF)
++
++/*! @ingroup InternalFunction
++ @fn static inline u32 get_dev_type (u32 devid)
++ @brief Returns the IOH device type for given PCI device id.
++ @remarks This function returns the type of the detected DMA
++ device. The type specifies the number of DMA
++ channels contained within the detected device.
++ The tasks performed by this function include:
++ - Matches the PCI device ID passed to it with a
++ set of known device IDs.
++ - If a match is found it returns a constant
++ which indicates the device type (number of DMA
++ channels) within the device.
++ - If no match is found it returns @ref
++ IOH_INVALID_DEVICE.
++
++ @param devid [@ref IN] The device ID to be verified.
++
++ @return u32
++ - Values other than @ref IOH_INVALID_DEVICE
++ --> Detected device is valid and
++ supported.
++ - @ref IOH_INVALID_DEVICE --> Invalid device
++ detected.
++
++ @see
++ - ioh_dma_probe
++
++*/
++static inline u32 get_dev_type(u32 devid)
++{
++ u32 dev_type;
++
++ switch (devid) {
++ case PCI_DEVICE_ID_INTEL_IOH1_DMA4_0:
++ dev_type = IOH_DMA_4CH0;
++ break;
++
++ case PCI_DEVICE_ID_INTEL_IOH1_DMA8_0:
++ dev_type = IOH_DMA_8CH0;
++ break;
++
++ default:
++ IOH_LOG(KERN_ERR, "get_dev_type -> Unknown PCI "
++ "device 0x%x\n", devid);
++ dev_type = IOH_INVALID_DEVICE;
++ break;
++
++ }
++
++ IOH_DEBUG("Function get_dev_type returns %x.\n", dev_type);
++ return dev_type;
++}
++
++/*! @ingroup PCILayerAPI
++ @fn static int __devinit ioh_dma_probe(struct pci_dev* pdev,
++ const struct pci_device_id* id)
++ @brief Implements the probe function for the PCI driver.
++ @remarks This function acts as the probe function for
++ the PCI driver. The PCI core will be invoking
++ this function once it determines that this
++ driver is suitable for handling a particular
++ hardware. The main tasks performed by this
++ function are:
++ - Confirms whether the detected device is
++ supported by the driver.
++ - Enables the PCi device.
++ - Attains the device specific resources and
++ store it for further use.
++ - Enables the device and registers the handler
++ for handling the device interrupts.
++ - Initializes the device specific data
++ structures.
++
++ @param pdev [@ref INOUT] Reference to the pci_device
++ structure.
++ @param id [@ref IN] Reference to the pci_device_id
++ for which this device matches.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On success.
++ - -EIO --> pci_enable_device error status code.
++ - -EBUSY: --> pci_request_regions/request_irq
++ error status code.
++ - -EINVAL --> pci_enable_device/request_irq error
++ status code/invalid device ID.
++ - -ENOMEM --> request_irq/pci_iomap error status
++ code.
++ - -ENOSYS --> request_irq error status code.
++
++ @see
++ - ioh_dma_controller_driver
++ */
++static int __devinit ioh_dma_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id)
++{
++ static unsigned int ioh_dma_dcount;
++ int retval;
++ u32 dev_type;
++ u32 base_addr = 0;
++ u8 device_enabled = 0;
++ u8 regions_requested = 0;
++ u8 irq_registered = 0;
++
++ do {
++ /* Getting the internally used device ID of the detected
++ device. */
++ dev_type = get_dev_type(id->device);
++ /* If invalid device. */
++ if ((IOH_INVALID_DEVICE == dev_type)) {
++ IOH_LOG(KERN_ERR, "ioh_dma_probe -> Invalid device ID "
++ "%x.\n", id->device);
++ retval = -EINVAL;
++ break;
++ }
++ IOH_DEBUG("ioh_dma_probe -> Valid device ID detected %x.\n",
++ id->device);
++
++ /* Enabling the detected device */
++ retval = pci_enable_device(pdev);
++ if (0 != retval) {
++ IOH_LOG(KERN_ERR,
++ "ioh_dma_probe -> Function pci_enable_device "
++ "failed, returned %d.\n", retval);
++ break;
++ }
++ device_enabled = 1;
++ IOH_DEBUG
++ ("ioh_dma_probe -> Function pci_enable_device invoked "
++ "successfully returned %d.\n", retval);
++
++ pci_set_master(pdev);
++ IOH_DEBUG("ioh_dma_probe -> Function pci_set_master invoked "
++ "successfully.\n");
++
++ /* Requesting the PCI device regions. */
++ retval = pci_request_regions(pdev, MODULE_NAME);
++ if (0 != retval) {
++ IOH_LOG(KERN_ERR,
++ "ioh_dma_probe -> Function pci_request_regions "
++ "failed, returned %d.\n", retval);
++ break;
++ }
++ regions_requested = 1;
++ IOH_DEBUG
++ ("ioh_dma_probe -> Function pci_request_regions invoked "
++ "successfully returned %d.\n", retval);
++
++ /* Remapping the device space to kernel space. */
++ /* Wipro 1/13/2010 Use Mem BAR */
++ base_addr = (u32) pci_iomap(pdev, 1, 0);
++ if (0 == base_addr) {
++ IOH_LOG(KERN_ERR,
++ "ioh_dma_probe -> Function pci_iomap failed "
++ "returned %x.\n", base_addr);
++ retval = -ENOMEM;
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_dma_probe -> Function pci_iomap invoked successfully."
++ "\n");
++
++ /* Filling in the details within the device structure. */
++ ioh_dma_devices[ioh_dma_dcount].dev_typ = dev_type;
++ ioh_dma_devices[ioh_dma_dcount].base_addr = base_addr;
++ ioh_dma_devices[ioh_dma_dcount].dev = (void *)pdev;
++
++ /* Registering the interrupt handler. */
++ retval =
++ request_irq(pdev->irq, dma_interrupt, IRQF_SHARED,
++ MODULE_NAME, &ioh_dma_devices[ioh_dma_dcount]);
++ if (0 != retval) {
++ IOH_LOG(KERN_ERR,
++ "ioh_dma_probe -> Function request_irq failed, "
++ "returned %d.\n", retval);
++
++ break;
++ }
++ irq_registered = 1;
++ IOH_DEBUG
++ ("ioh_dma_probe -> Function request_irq invoked "
++ "successfully returned %d.\n", retval);
++
++ /* Initializing the DMA device. */
++ dma_init(base_addr, dev_type);
++ IOH_DEBUG
++ ("ioh_dma_probe -> Function dma_init invoked successfully."
++ "\n");
++
++ /* Stroing the device structure reference for further use. */
++ pci_set_drvdata(pdev, &ioh_dma_devices[ioh_dma_dcount]);
++
++ /* Initializing the suspend flag and lock variable. */
++ if (0 == ioh_dma_dcount) { /* Initialize only once. */
++ ioh_device_suspended = 0;
++ spin_lock_init(&ioh_device_lock);
++ }
++
++ /* Incrementing the device structure index. */
++ ioh_dma_dcount++;
++
++ /* Probe successful. */
++ retval = IOH_DMA_SUCCESS;
++ IOH_DEBUG("ioh_dma_probe -> Probe successful.\n");
++
++ } while (0);
++
++ if (IOH_DMA_SUCCESS != retval) {
++ /* Un-registering the interrupt handler. */
++ if (1 == irq_registered) {
++ free_irq(pdev->irq, &ioh_dma_devices[ioh_dma_dcount]);
++ IOH_DEBUG("ioh_dma_probe -> Function free_irq invoked "
++ "successfully.\n");
++ }
++ /* Unmapping the remapped region. */
++ if (0 != base_addr) {
++ pci_iounmap(pdev, (void *)base_addr);
++ IOH_DEBUG
++ ("ioh_dma_probe -> Function pci_iounmap invoked "
++ "successfully.\n");
++ }
++ /* Releasing the requested regions. */
++ if (1 == regions_requested) {
++ pci_release_regions(pdev);
++ IOH_DEBUG
++ ("ioh_dma_probe -> Function pci_release_regions "
++ "invoked successfully.\n");
++ }
++ /* Disabling the device. */
++ if (1 == device_enabled) {
++ pci_disable_device(pdev);
++ IOH_DEBUG
++ ("ioh_dma_probe -> Function pci_disable_device "
++ "invoked successfully.\n");
++ }
++
++ IOH_DEBUG("ioh_dma_probe -> Probe failed.\n");
++ }
++
++ IOH_DEBUG("Function ioh_dma_probe returns %d.\n", retval);
++ return retval;
++}
++
++/*! @ingroup PCILayerAPI
++ @fn static void __devexit ioh_dma_remove(struct pci_dev* pdev)
++ @brief Implements the remove function for the PCi driver.
++ @remarks This function is invoked by the PCI subsystem of the
++ Kernel when the DMA device is removed or the
++ module is unloaded.
++ It de-initializes and releases all the resources
++ attained during device detection. The main tasks
++ performed by this function are:
++ - De-initializes the DMA device.
++ - De-initializes the device specific data
++ structures.
++ - Releases all the resources attained during the
++ device detection phase.
++
++ @param pdev [@ref INOUT] Reference to the pci_device structure.
++
++ @return None.
++
++ @see
++ - ioh_dma_controller_driver
++ */
++static void __devexit ioh_dma_remove(struct pci_dev *pdev)
++{
++ struct ioh_dma_devices *dev;
++
++ /* Getting the driver data. */
++ dev = pci_get_drvdata(pdev);
++ /* Re-setting the driver data. */
++ pci_set_drvdata(pdev, NULL);
++
++ /* De-initializing the device. */
++ dma_exit(dev->dev_typ);
++ IOH_DEBUG("ioh_dma_remove -> Function dma_exit invoked "
++ "successfully.\n");
++
++ /* Un-registering the interrupt handler. */
++ free_irq(pdev->irq, dev);
++ IOH_DEBUG("ioh_dma_remove -> Function free_irq invoked "
++ "successfully.\n");
++
++ /* Un-mapping the remapped memory address. */
++ pci_iounmap(pdev, (void *)dev->base_addr);
++ dev->base_addr = 0;
++ IOH_DEBUG("ioh_dma_remove -> Function pci_iounmap invoked "
++ "successfully.\n");
++
++ /* Releasing the requested regions. */
++ pci_release_regions(pdev);
++ IOH_DEBUG("ioh_dma_remove -> Function pci_release_regions "
++ "invoked successfully.\n");
++
++ /* Disabling the device. */
++ pci_disable_device(pdev);
++ IOH_DEBUG("ioh_dma_remove -> Function pci_disable_device "
++ "invoked successfully.\n");
++
++ IOH_DEBUG("Function ioh_dma_remove invoked "
++ "successfully for device %x.\n", pdev->device);
++}
++
++#ifdef CONFIG_PM
++/*! @ingroup PCILayerAPI
++ @fn static int ioh_dma_suspend(struct pci_dev* pdev,
++ pm_message_t state)
++ @brief Implements the suspend function for the pci_driver.
++ @remarks This function is used as the suspend function of the PCI
++ Driver.
++ The PCI core will be invoking this function once
++ it receives a suspend event from the PM layer.
++ The main tasks performed by this functions are:
++ - Prepares the device so that it can enter the
++ suspend state by saving the current state.
++ - Disables all the DMA channels and the
++ associated interrupts.
++ - Changes the power state of the device to low
++ power state.
++
++ @param pdev [@ref INOUT] Reference to the pci_device structure.
++ @param state [@ref IN] The state of the device.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On success.
++ - -ENOMEM --> pci_save_state error status code.
++
++ @see
++ - ioh_dma_controller_driver
++ */
++static int ioh_dma_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ int retval;
++ struct ioh_dma_devices *dev;
++
++ /* Setting flag for denoting Suspension. */
++ ioh_device_suspended = 1;
++
++ /* Getting the driver data. */
++ dev = pci_get_drvdata(pdev);
++
++ /* Saving the current state of the device. */
++ retval = pci_save_state(pdev);
++ if (retval == 0) {
++ IOH_DEBUG("ioh_dma_suspend -> Function pci_save_state invoked "
++ "successfully (returned %d).\n", retval);
++
++ /* De-initializing the device for suspension. */
++ dma_exit(dev->dev_typ);
++ IOH_DEBUG("ioh_dma_suspend -> Function dma_exit invoked "
++ "successfully.\n");
++
++ /* Disabling the wake-up feature. */
++ pci_enable_wake(pdev, PCI_D3hot, 0);
++ IOH_DEBUG("ioh_dma_suspend -> Function pci_enable_wake "
++ "invoked successfully.\n");
++
++ /* Setting the device to new state. */
++ pci_set_power_state(pdev, pci_choose_state(pdev, state));
++ IOH_DEBUG("ioh_dma_suspend -> Function pci_set_power_state "
++ "invoked successfully.\n");
++
++ /* Disabling the device. */
++ pci_disable_device(pdev);
++ IOH_DEBUG("ioh_dma_suspend -> Function pci_disable_device "
++ "invoked successfully.\n");
++
++ retval = IOH_DMA_SUCCESS;
++ IOH_DEBUG("ioh_dma_suspend -> Suspension successful for "
++ "the device %x.\n", pdev->device);
++ } else {
++ IOH_LOG(KERN_ERR,
++ "ioh_dma_suspend -> Function pci_save_state failed"
++ "returned %d.\n", retval);
++
++ /* De-setting the flag on Suspend failure. */
++ ioh_device_suspended = 0;
++
++ IOH_DEBUG("ioh_dma_suspend -> Suspension un-successful for "
++ "the device %x.\n", pdev->device);
++ }
++
++ IOH_DEBUG("Function ioh_dma_suspend returns %d.\n", retval);
++ return retval;
++}
++
++/*! @ingroup PCILayerAPI
++ @fn static int ioh_dma_resume(struct pci_dev* pdev)
++ @brief Implements the resume function for the pci_driver.
++ @remarks This function is used as the resume function of the
++ PCI driver. The PCI core will be invoking this
++ function once it receives a resume event from
++ the PM layer. The main tasks performed by this
++ function are:
++ - Restores the power state of the device to
++ normal state.
++ - Enables the device so that it returns to its
++ normal state.
++
++ @param pdev [@ref INOUT] Pointer to the pci_device
++ structure.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On success.
++ - -EIO --> pci_enable_device error status code.
++ - -EINVAL --> pci_enable_device .
++
++ @see
++ - ioh_dma_controller_driver
++
++ */
++static int ioh_dma_resume(struct pci_dev *pdev)
++{
++ int retval;
++
++ /* Setting the device to normal power state. */
++ (void)pci_set_power_state(pdev, PCI_D0);
++ IOH_DEBUG("ioh_dma_resume -> Function pci_set_power_state invoked "
++ "successfully.\n");
++
++ /* Restoring the device state. */
++ (void)pci_restore_state(pdev);
++ IOH_DEBUG("ioh_dma_resume -> Function pci_restore_state invoked "
++ "successfully.\n");
++
++ /* Enabling the device. */
++ retval = pci_enable_device(pdev);
++
++ if (0 == retval) {
++ IOH_DEBUG("ioh_dma_resume -> Function pci_enable_device "
++ "invoked successfully returned %d.\n", retval);
++
++ pci_set_master(pdev);
++ IOH_DEBUG("ioh_dma_resume -> Function pci_set_master invoked "
++ "successfully.\n");
++
++ (void)pci_enable_wake(pdev, PCI_D3hot, 0);
++ IOH_DEBUG("ioh_dma_resume -> Function pci_enable_wake invoked "
++ "successfully.\n");
++
++ retval = IOH_DMA_SUCCESS;
++
++ /* De-setting the suspend flag to denote resumption
++ successful. */
++ ioh_device_suspended = 0;
++
++ IOH_DEBUG("ioh_dma_resume -> Resume successful for the "
++ "device %x.\n", pdev->device);
++ } else {
++ IOH_LOG(KERN_ERR,
++ "ioh_dma_resume -> Function pci_enable_device failed "
++ "returned %d.\n", retval);
++
++ IOH_DEBUG("ioh_dma_resume -> Resume failed for the device "
++ "%x.\n", pdev->device);
++ }
++
++ IOH_DEBUG("Function ioh_dma_resume returns %d.\n", retval);
++ return retval;
++}
++#endif
++
++/*! @ingroup PCILayerFacilitators
++ @struct ioh_dma_controller_driver
++ @brief Used for registering the PCI driver functionalities.
++ @remarks This is an instance of the structure pci_driver which
++ stores references to the PCI Driver
++ functionalities.
++ It is used during PCI driver registration for
++ interfacing the DMA module functionalities with
++ that of the Kernel subsystem.
++
++ @see
++ - ioh_dma_pci_init
++ - ioh_dma_pci_exit
++*/
++
++static struct pci_driver ioh_dma_controller_driver = {
++ .name = "ioh_dma", /**< Name of the module. */
++ .id_table = ioh_dma_pcidev_id, /**< The list of supported devices. */
++ .probe = ioh_dma_probe, /**< The probe function. */
++ .remove = __devexit_p(ioh_dma_remove), /**< The remove function. */
++#ifdef CONFIG_PM
++ .suspend = ioh_dma_suspend, /**< The suspend function. */
++ .resume = ioh_dma_resume /**< The resume function. */
++#endif
++};
++
++/*! @ingroup PCILayerAPI
++ @fn static __init int ioh_dma_pci_init(void)
++ @brief Module initialization routine.
++ @remarks This function is invoked when the module is
++ loaded. The main tasks performed by this
++ function are:
++ - Initializes the module.
++ - Initializes the local structures
++ and registers the module as PCI Driver
++ with the kernel subsystem.
++
++ @return int
++ - @ref IOH_DMA_SUCCESS --> On success.
++ - -EEXIST --> pci_register_driver error status
++ code.
++ - -EINVAL --> pci_register_driver error status
++ code.
++ - -ENOMEM --> pci_register_driver error status
++ code.
++
++ */
++static __init int ioh_dma_pci_init(void)
++{
++ int retval;
++
++ /* Registering the module as PCI Driver. */
++ retval = pci_register_driver(&ioh_dma_controller_driver);
++
++ if (0 == retval) {
++ IOH_DEBUG
++ ("ioh_dma_pci_init -> Function pci_register_driver invoked "
++ "successfully returned %d.\n", retval);
++
++ retval = IOH_DMA_SUCCESS;
++ } else {
++ IOH_LOG(KERN_ERR,
++ "ioh_dma_pci_init -> Function pci_register_driver "
++ "failed returned %d.\n", retval);
++ }
++
++ IOH_DEBUG("Function ioh_dma_pci_init returns %d.\n", retval);
++ return retval;
++}
++
++/*! @ingroup PCILayerAPI
++ @fn static __exit void ioh_dma_pci_exit(void)
++ @brief Module exit handler.
++ @remarks Kernel subsystem will be invoking this routine
++ once the module gets unloaded. The main tasks
++ performed by this function are:
++ - Un-registers the PCI driver.
++ - Unloads the module.
++
++ @return None.
++ */
++static __exit void ioh_dma_pci_exit(void)
++{
++ /* Un-registering the module as PCI Driver. */
++ pci_unregister_driver(&ioh_dma_controller_driver);
++ IOH_DEBUG("ioh_dma_pci_exit -> Function pci_unregister_driver "
++ "invoked successfully.\n");
++
++ IOH_DEBUG("Function ioh_dma_pci_exit invoked successfully.\n");
++}
++
++MODULE_DEVICE_TABLE(pci, ioh_dma_pcidev_id);
++module_init(ioh_dma_pci_init);
++module_exit(ioh_dma_pci_exit);
+--- /dev/null
++++ b/drivers/dma/pch_dma/pch_dma_pci.h
+@@ -0,0 +1,74 @@
++/**
++ * @file ioh_dma_pci.h
++ *
++ * @brief
++ * This file declares the constants & functions used by the
++ * IOH_DMA_CONTROLLER driver.
++ *
++ * @version 0.90
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * <hr>
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 08/14/2009
++ *
++ */
++
++#ifndef __IOH_DMA_PCI_H__
++#define __IOH_DMA_PCI_H__
++
++/*! @ingroup PCILayer
++ @def PCI_DEVICE_ID_INTEL_IOH1_DMA4_0
++ @brief The Device ID of one of the DMA device
++ with 4 channels used for the GE devices.
++ @note This is used for registering the DMA module
++ with the PCI subsystem of the Kernel, so that
++ the module is loaded when the required device
++ is detected.
++
++ @see
++ - ioh_dma_pcidev_id
++ - get_dev_type
++*/
++#define PCI_DEVICE_ID_INTEL_IOH1_DMA4_0 (0x8815UL)
++
++/*! @ingroup PCILayer
++ @def PCI_DEVICE_ID_DMA8_0_CONTROLLER
++ @brief The Device ID of one of the DMA device
++ with 8 channels used for the GE devcies.
++ @note This is used for registering the DMA module
++ with the PCI subsystem of the Kernel, so that
++ the module is loaded when the required device
++ is detected.
++
++ @see
++ - ioh_dma_pcidev_id
++ - get_dev_type
++*/
++#define PCI_DEVICE_ID_INTEL_IOH1_DMA8_0 (0x8810UL)
++
++extern unsigned char ioh_device_suspended; /* The device suspend flag. */
++extern spinlock_t ioh_device_lock; /* The device lock variable. */
++
++#endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-gbe.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-gbe.patch
new file mode 100644
index 0000000..e1d51e7
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-gbe.patch
@@ -0,0 +1,8889 @@
+
+
+From: Masayuki Ohtake <masa-korg@dsn.okisemi.com>
+Subject: OKI Semiconductor PCH GbE driver
+
+This driver implements GbE controls for PCH.
+
+Signed-off-by: Masayuki Ohtake <masa-korg@dsn.okisemi.com>
+Acked-by: Wang Qi <qi.wang@intel.com>
+
+---
+ drivers/net/Kconfig | 5 ++
+ drivers/net/Makefile | 1
+ drivers/net/pch_gbe/Makefile | 6
+ drivers/net/pch_gbe/pch_gbe_api.c | 644
+ drivers/net/pch_gbe/pch_gbe_api.h | 252
+ drivers/net/pch_gbe/pch_gbe_defines.h | 367
+ drivers/net/pch_gbe/pch_gbe_ethtool.c | 1306
+ drivers/net/pch_gbe/pch_gbe.h | 230
+ drivers/net/pch_gbe/pch_gbe_hw.h | 259
+ drivers/net/pch_gbe/pch_gbe_mac.c | 522
+ drivers/net/pch_gbe/pch_gbe_mac.h | 121
+ drivers/net/pch_gbe/pch_gbe_main.c | 2973
+ drivers/net/pch_gbe/pch_gbe_nvm.c | 129
+ drivers/net/pch_gbe/pch_gbe_nvm.h | 85
+ drivers/net/pch_gbe/pch_gbe_osdep.h | 74
+ drivers/net/pch_gbe/pch_gbe_param.c | 594
+ drivers/net/pch_gbe/pch_gbe_pci_ids.h | 38
+ drivers/net/pch_gbe/pch_gbe_phy.c | 493
+ drivers/net/pch_gbe/pch_gbe_phy.h | 136
+ drivers/net/pch_gbe/pch_gbe_plat.c | 175
+ drivers/net/pch_gbe/pch_gbe_regs.h | 351
++++++++++++++++++++++++++++++++ 21 files changed, 8761 insertions(+)
+diff -urN linux-2.6.33-rc3/drivers/net/Kconfig topcliff-2.6.33-rc3/drivers/net/Kconfig
+--- linux-2.6.33-rc3/drivers/net/Kconfig 2010-01-06 09:02:46.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/Kconfig 2010-03-12 16:24:03.000000000 +0900
+@@ -1977,6 +1977,11 @@
+ If you say N, all options in this submenu will be skipped and disabled.
+
+ if NETDEV_1000
++config PCH_GBE
++ tristate "PCH Gigabit Ethernet"
++ ---help---
++ This is an gigabit ethernet driver for PCH.
++ resources.
+
+ config ACENIC
+ tristate "Alteon AceNIC/3Com 3C985/NetGear GA620 Gigabit support"
+diff -urN linux-2.6.33-rc3/drivers/net/Makefile topcliff-2.6.33-rc3/drivers/net/Makefile
+--- linux-2.6.33-rc3/drivers/net/Makefile 2010-01-06 09:02:46.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/Makefile 2010-03-05 10:25:56.000000000 +0900
+@@ -287,3 +287,4 @@
+ obj-$(CONFIG_WIMAX) += wimax/
+
+ obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
++obj-$(CONFIG_PCH_GBE) += pch_gbe/
+diff -urN linux-2.6.33-rc3/drivers/net/pch_gbe/Makefile topcliff-2.6.33-rc3/drivers/net/pch_gbe/Makefile
+--- linux-2.6.33-rc3/drivers/net/pch_gbe/Makefile 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/pch_gbe/Makefile 2010-03-11 09:35:16.000000000 +0900
+@@ -0,0 +1,6 @@
++ifeq ($(CONFIG_PCH_GBE_DEBUG_CORE),y)
++EXTRA_CFLAGS += -DDEBUG
++endif
++
++obj-$(CONFIG_PCH_GBE) += pch_gbe.o
++pch_gbe-objs := pch_gbe_mac.o pch_gbe_phy.o pch_gbe_nvm.o pch_gbe_ethtool.o pch_gbe_plat.o pch_gbe_param.o pch_gbe_api.o pch_gbe_main.o
+diff -urN linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_api.c topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_api.c
+--- linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_api.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_api.c 2010-03-11 15:12:34.000000000 +0900
+@@ -0,0 +1,646 @@
++/*!
++ * @file ioh_gbe_api.c
++ * @brief Linux IOH Gigabit Ethernet Driver HAL API source file
++ *
++ * @version 0.90
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR CO., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 09/21/2009
++ * modified:
++ *
++ */
++
++#include "pch_gbe_osdep.h"
++#include "pch_gbe_defines.h"
++#include "pch_gbe_hw.h"
++#include "pch_gbe_mac.h"
++#include "pch_gbe_api.h"
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_set_mac_type(struct ioh_gbe_hw *hw)
++ * @brief Sets MAC type
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks This function sets the mac type of the adapter based on the
++ * device ID stored in the hw structure.
++ * MUST BE FIRST FUNCTION CALLED (explicitly or through
++ * ioh_gbe_hal_setup_init_funcs()).
++ */
++s32 ioh_gbe_hal_set_mac_type(struct ioh_gbe_hw *hw)
++{
++ struct ioh_gbe_mac_info *mac = &hw->mac;
++ s32 ret_val = IOH_GBE_SUCCESS;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_set_mac_type");
++
++ switch ((u16) hw->device_id) {
++ case PCI_DEVICE_ID_INTEL_IOH1_GBE:
++ mac->type = IOH_GBE_MAC_TYPE_IOH1;
++ break;
++ default:
++ /* Should never have loaded on this device */
++ mac->type = IOH_GBE_MAC_TYPE_UNDEFINED;
++ ret_val = -IOH_GBE_ERR_MAC_INIT;
++ break;
++ }
++ IOH_GBE_TESTOUT("mac->type:0x%x ret_val:0x%x\n", mac->type, ret_val);
++ return ret_val;
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_setup_init_funcs(struct ioh_gbe_hw *hw)
++ * @brief Initializes function pointers
++ * @param hw [INOUT] pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks This function must be called by a driver in order to use the rest
++ * of the 'shared' code files. Called by drivers only.
++ */
++s32 ioh_gbe_hal_setup_init_funcs(struct ioh_gbe_hw *hw)
++{
++ s32 ret_val;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_setup_init_funcs");
++
++ /* Can't do much good without knowing the MAC type.
++ */
++ ret_val = ioh_gbe_hal_set_mac_type(hw);
++ if (ret_val) {
++ IOH_GBE_DBGOUT("ERROR: MAC type could not be set properly.\n");
++ goto out;
++ }
++
++ if (!hw->hw_addr) {
++ IOH_GBE_DBGOUT("ERROR: Registers not mapped\n");
++ ret_val = -IOH_GBE_ERR_CONFIG;
++ goto out;
++ }
++
++ /* Set up the init function pointers. These are functions within the
++ * adapter family file that sets up function pointers for the rest of
++ * the functions in that family.
++ */
++ switch (hw->mac.type) {
++ case IOH_GBE_MAC_TYPE_IOH1:
++ case IOH_GBE_MAC_TYPE_IOH2:
++ ioh_gbe_plat_init_function_pointers(hw);
++ break;
++ default:
++ IOH_GBE_DBGOUT("Hardware not supported\n");
++ ret_val = -IOH_GBE_ERR_CONFIG;
++ break;
++ }
++out:
++ IOH_GBE_TESTOUT("ret_val:0x%x\n", ret_val);
++ return ret_val;
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn void ioh_gbe_hal_get_bus_info(struct ioh_gbe_hw *hw)
++ * @brief Obtain bus information for adapter
++ * @param hw [INOUT] pointer to the HW structure
++ * @return None
++ * @remarks This will obtain information about the HW bus for which the
++ * adaper is attached and stores it in the hw structure. This is a
++ * function pointer entry point called by drivers.
++ */
++void ioh_gbe_hal_get_bus_info(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_get_bus_info");
++
++ if (hw->func.get_bus_info != NULL)
++ hw->func.get_bus_info(hw);
++ else
++ IOH_GBE_ERR("Error: configuration\n");
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn void ioh_gbe_hal_mc_addr_list_update(struct ioh_gbe_hw *hw,
++ * u8 *mc_addr_list, u32 mc_addr_count,
++ * u32 mar_used_count, u32 mar_count)
++ * @brief Update Multicast addresses
++ * @param hw [INOUT] Pointer to the HW structure
++ * @param mc_addr_list [IN]Array of multicast addresses to program
++ * @param mc_addr_count [IN]Number of multicast addresses to program
++ * @param mar_used_count [IN]The first MAC Address register free to program
++ * @param mar_count [IN]Total number of supported MAC Address Registers
++ * @return None
++ * @remarks
++ * Updates the MAC Address Registers and Multicast Table Array.
++ * The caller must have a packed mc_addr_list of multicast addresses.
++ * The parameter mar_count will usually be hw->mac.mar_entry_count
++ * unless there are workarounds that change this. Currently no func pointer
++ * exists and all implementations are handled in the generic version of this
++ * function.
++ */
++void
++ioh_gbe_hal_mc_addr_list_update(struct ioh_gbe_hw *hw,
++ u8 *mc_addr_list,
++ u32 mc_addr_count,
++ u32 mar_used_count, u32 mar_count)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_mc_addr_list_update");
++
++ if (hw->func.mc_addr_list_update != NULL) {
++ hw->func.mc_addr_list_update(hw,
++ mc_addr_list,
++ mc_addr_count,
++ mar_used_count, mar_count);
++ }
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_force_mac_fc(struct ioh_gbe_hw *hw)
++ * @brief Force MAC flow control
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * Force the MAC's flow control settings. Currently no func pointer exists
++ * and all implementations are handled in the generic version of this
++ * function.
++ */
++s32 ioh_gbe_hal_force_mac_fc(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_force_mac_fc");
++
++ return ioh_gbe_mac_force_mac_fc(hw);
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_reset_hw(struct ioh_gbe_hw *hw)
++ * @brief Reset hardware
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * This resets the hardware into a known state. This is a function pointer
++ * entry point called by drivers.
++ */
++s32 ioh_gbe_hal_reset_hw(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_reset_hw");
++
++ if (hw->func.reset_hw != NULL) {
++ hw->func.reset_hw(hw);
++ return IOH_GBE_SUCCESS;
++ } else {
++ IOH_GBE_ERR("Error: configuration\n");
++ return -IOH_GBE_ERR_CONFIG;
++ }
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_init_hw(struct ioh_gbe_hw *hw)
++ * @brief Initialize hardware
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * This inits the hardware readying it for operation. This is a function
++ * pointer entry point called by drivers.
++ */
++s32 ioh_gbe_hal_init_hw(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_init_hw");
++
++ if (hw->func.init_hw != NULL) {
++ return hw->func.init_hw(hw);
++ } else {
++ IOH_GBE_ERR("Error: configuration\n");
++ return -IOH_GBE_ERR_CONFIG;
++ }
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_setup_link(struct ioh_gbe_hw *hw)
++ * @brief Configures link and flow control
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * This configures link and flow control settings for the adapter. This
++ * is a function pointer entry point called by drivers. While modules can
++ * also call this, they probably call their own version of this function.
++ */
++s32 ioh_gbe_hal_setup_link(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_setup_link");
++
++ if (hw->func.setup_link != NULL) {
++ return hw->func.setup_link(hw);
++ } else {
++ IOH_GBE_ERR("Error: configuration\n");
++ return -IOH_GBE_ERR_CONFIG;
++ }
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_setup_led(struct ioh_gbe_hw *hw)
++ * @brief Configures SW controllable LED
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * This prepares the SW controllable LED for use and saves the current state
++ * of the LED so it can be later restored. This is a function pointer entry
++ * point called by drivers.
++ */
++s32 ioh_gbe_hal_setup_led(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_setup_led");
++
++ if (hw->func.setup_led != NULL)
++ return hw->func.setup_led(hw);
++ else
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_cleanup_led(struct ioh_gbe_hw *hw)
++ * @brief Restores SW controllable LED
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * This restores the SW controllable LED to the value saved off by
++ * ioh_gbe_hal_setup_led.
++ * This is a function pointer entry point called by drivers.
++ */
++s32 ioh_gbe_hal_cleanup_led(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_cleanup_led");
++
++ if (hw->func.cleanup_led != NULL)
++ return hw->func.cleanup_led(hw);
++ else
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_led_on(struct ioh_gbe_hw *hw)
++ * @brief Turn on SW controllable LED
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * Turns the SW defined LED on. This is a function pointer entry point
++ * called by drivers.
++ */
++s32 ioh_gbe_hal_led_on(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_led_on");
++
++ if (hw->func.led_on != NULL)
++ return hw->func.led_on(hw);
++ else
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_led_off(struct ioh_gbe_hw *hw)
++ * @brief Turn off SW controllable LED
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * Turns the SW defined LED off. This is a function pointer entry point
++ * called by drivers.
++ */
++s32 ioh_gbe_hal_led_off(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_led_off");
++
++ if (hw->func.led_off != NULL)
++ return hw->func.led_off(hw);
++ else
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn void ioh_gbe_hal_mar_set(struct ioh_gbe_hw *hw, u8 *addr, u32 index)
++ * @brief Sets a MAC address register
++ * @param hw [INOUT] Pointer to the HW structure
++ * @param addr [IN] Address to set the RAR to
++ * @param index [IN] The RAR to set
++ * @return None
++ * @remarks
++ * Sets a MAC Address Register (RAR) to the specified address.
++ * Currently no func pointer exists and all implementations are
++ * handled in the generic version of this function.
++ */
++void ioh_gbe_hal_mar_set(struct ioh_gbe_hw *hw, u8 *addr, u32 index)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_mar_set");
++
++ ioh_gbe_mac_mar_set(hw, addr, index);
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_read_phy_reg(struct ioh_gbe_hw *hw,
++ * u32 offset, u16 *data)
++ * @brief Reads PHY register
++ * @param hw [INOUT] Pointer to the HW structure
++ * @param offset [IN] The register to read
++ * @param data [IN] The buffer to store the 16-bit read.
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * Reads the PHY register and returns the value in data.
++ * This is a function pointer entry point called by drivers.
++ */
++s32 ioh_gbe_hal_read_phy_reg(struct ioh_gbe_hw *hw, u32 offset, u16 *data)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_read_phy_reg");
++
++ if (hw->func.read_phy_reg != NULL)
++ return hw->func.read_phy_reg(hw, offset, data);
++ else
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_write_phy_reg(struct ioh_gbe_hw *hw,
++ * u32 offset, u16 data)
++ * @brief Writes PHY register
++ * @param hw [INOUT] Pointer to the HW structure
++ * @param offset [IN] The register to write
++ * @param data [IN] The value to write.
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * Writes the PHY register at offset with the value in data.
++ * This is a function pointer entry point called by drivers.
++ */
++s32 ioh_gbe_hal_write_phy_reg(struct ioh_gbe_hw *hw, u32 offset, u16 data)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_write_phy_reg");
++
++ if (hw->func.write_phy_reg != NULL)
++ return hw->func.write_phy_reg(hw, offset, data);
++ else
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn void ioh_gbe_hal_phy_hw_reset(struct ioh_gbe_hw *hw)
++ * @brief Hard PHY reset
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return None
++ * @remarks
++ * Performs a hard PHY reset. This is a function pointer entry point called
++ * by drivers.
++ */
++void ioh_gbe_hal_phy_hw_reset(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_phy_hw_reset");
++
++ if (hw->func.reset_phy != NULL)
++ hw->func.reset_phy(hw);
++ else
++ IOH_GBE_ERR("Error: configuration\n");
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn void ioh_gbe_hal_phy_sw_reset(struct ioh_gbe_hw *hw)
++ * @brief Soft PHY reset
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return None
++ * @remarks
++ * Performs a soft PHY reset on those that apply. This is a function pointer
++ * entry point called by drivers.
++ */
++void ioh_gbe_hal_phy_sw_reset(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_phy_sw_reset");
++
++ if (hw->func.sw_reset_phy != NULL)
++ hw->func.sw_reset_phy(hw);
++ else
++ IOH_GBE_ERR("Error: configuration\n");
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_read_mac_addr(struct ioh_gbe_hw *hw)
++ * @brief Reads MAC address
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * Reads the MAC address out of the adapter and stores it in the HW structure.
++ * Currently no func pointer exists and all implementations are handled in the
++ * generic version of this function.
++ */
++s32 ioh_gbe_hal_read_mac_addr(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_read_mac_addr");
++
++ if (hw->func.read_mac_addr != NULL) {
++ return hw->func.read_mac_addr(hw);
++ } else {
++ IOH_GBE_ERR("Error: configuration\n");
++ return -IOH_GBE_ERR_CONFIG;
++ }
++}
++
++#ifdef CONFIG_PCH_PCIEQOS
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_validate_nvm_checksum(struct ioh_gbe_hw *hw)
++ * @brief Verifies NVM (EEPROM) checksum
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * Validates the NVM checksum is correct. This is a function pointer entry
++ * point called by drivers.
++ */
++s32 ioh_gbe_hal_validate_nvm_checksum(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_validate_nvm_checksum");
++
++ if (hw->func.validate_nvm != NULL) {
++ return hw->func.validate_nvm(hw);
++ } else {
++ IOH_GBE_ERR("Error: configuration\n");
++ return -IOH_GBE_ERR_CONFIG;
++ }
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_read_nvm(struct ioh_gbe_hw *hw,
++ * u32 offset, u8 *data)
++ * @brief Reads NVM (EEPROM)
++ * @param hw [INOUT] Pointer to the HW structure
++ * @param offset [IN] The word offset to read
++ * @param data [IN] Pointer to the properly sized buffer for the data.
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function
++ * pointer entry point called by drivers.
++ */
++s32 ioh_gbe_hal_read_nvm(struct ioh_gbe_hw *hw, u32 offset, u8 *data)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_read_nvm");
++
++ if (hw->func.read_nvm != NULL) {
++ return hw->func.read_nvm(hw, offset, data);
++ } else {
++ IOH_GBE_ERR("Error: configuration\n");
++ return -IOH_GBE_ERR_CONFIG;
++ }
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_write_nvm(struct ioh_gbe_hw *hw,
++ * u32 offset, u8 *data)
++ * @brief Writes to NVM (EEPROM)
++ * @param hw [INOUT] Pointer to the HW structure
++ * @param offset [IN] The word offset to read
++ * @param data [IN] Pointer to the properly sized buffer for the data.
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function
++ * pointer entry point called by drivers.
++ */
++s32 ioh_gbe_hal_write_nvm(struct ioh_gbe_hw *hw, u32 offset, u8 *data)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_write_nvm");
++
++ if (hw->func.write_nvm != NULL)
++ return hw->func.write_nvm(hw, offset, data);
++ else
++ return IOH_GBE_SUCCESS;
++}
++#endif /* CONFIG_PCH_PCIEQOS */
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn void ioh_gbe_hal_set_wol_event(struct ioh_gbe_hw *hw, u32 wu_evt)
++ * @brief Set wake-on-lan event
++ * @param hw [INOUT] Pointer to the HW structure
++ * @param wu_evt [IN] Wake up event
++ * @return None
++ */
++void ioh_gbe_hal_set_wol_event(struct ioh_gbe_hw *hw, u32 wu_evt)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_set_wol_event");
++
++ ioh_gbe_mac_set_wol_event(hw, wu_evt);
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn void ioh_gbe_hal_power_up_phy(struct ioh_gbe_hw *hw)
++ * @brief Power up PHY
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return None
++ */
++void ioh_gbe_hal_power_up_phy(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_power_up_phy");
++
++ if (hw->func.power_up_phy != NULL)
++ hw->func.power_up_phy(hw);
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn void ioh_gbe_hal_power_down_phy(struct ioh_gbe_hw *hw)
++ * @brief Power down PHY
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return None
++ */
++void ioh_gbe_hal_power_down_phy(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_power_down_phy");
++
++ if (hw->func.power_down_phy != NULL)
++ hw->func.power_down_phy(hw);
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn u16 ioh_gbe_hal_ctrl_miim(struct ioh_gbe_hw *hw,
++ * u32 addr, u32 dir, u32 reg, u16 data)
++ * @brief Control MII Management IF
++ * @param hw [INOUT] Pointer to the HW structure
++ * @param addr [IN] Address of PHY
++ * @param dir [IN] Operetion. (Write or Read)
++ * @param reg [IN] Access register of PHY
++ * @param data [IN] Write data
++ * @return None
++ */
++u16
++ioh_gbe_hal_ctrl_miim(struct ioh_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
++ u16 data)
++{
++ IOH_GBE_DBGOUT2("ioh_gbe_hal_ctrl_miim\n");
++
++ if (hw->func.ctrl_miim != NULL) {
++ return hw->func.ctrl_miim(hw, addr, dir, reg, data);
++ } else {
++ IOH_GBE_ERR("Error: configuration\n");
++ return IOH_GBE_SUCCESS;
++ }
++}
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn void ioh_gbe_hal_set_pause_packet(struct ioh_gbe_hw *hw)
++ * @brief Set pause packet
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return None
++ */
++void ioh_gbe_hal_set_pause_packet(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_hal_set_pause_packet");
++
++ if (hw->func.pause_packet != NULL)
++ hw->func.pause_packet(hw);
++ else
++ IOH_GBE_ERR("Error: configuration\n");
++}
+diff -urN linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_api.h topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_api.h
+--- linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_api.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_api.h 2010-03-11 15:11:51.000000000 +0900
+@@ -0,0 +1,252 @@
++/*!
++ * @file ioh_gbe_api.h
++ * @brief Linux IOH Gigabit Ethernet Driver HAL API header file
++ *
++ * @version 0.90
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR CO., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 09/21/2009
++ * modified:
++ *
++ */
++
++#ifndef _IOH_GBE_API_H_
++#define _IOH_GBE_API_H_
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_set_mac_type(struct ioh_gbe_hw *hw)
++ * @brief Sets MAC type
++ */
++s32 ioh_gbe_hal_set_mac_type(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_setup_init_funcs(struct ioh_gbe_hw *hw)
++ * @brief Initializes function pointers
++ */
++s32 ioh_gbe_hal_setup_init_funcs(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn void ioh_gbe_hal_get_bus_info(struct ioh_gbe_hw *hw)
++ * @brief Obtain bus information for adapter
++ */
++void ioh_gbe_hal_get_bus_info(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn void ioh_gbe_hal_mc_addr_list_update(struct ioh_gbe_hw *hw,
++ * u8 *mc_addr_list, u32 mc_addr_count,
++ * u32 mar_used_count, u32 mar_count)
++ * @brief Update Multicast addresses
++ */
++void ioh_gbe_hal_mc_addr_list_update(struct ioh_gbe_hw *hw,
++ u8 *mc_addr_list, u32 mc_addr_count,
++ u32 mar_used_count, u32 mar_count);
++
++/*
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_force_mac_fc(struct ioh_gbe_hw *hw)
++ * @brief Force MAC flow control
++ */
++s32 ioh_gbe_hal_force_mac_fc(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_reset_hw(struct ioh_gbe_hw *hw)
++ * @brief Reset hardware
++ */
++s32 ioh_gbe_hal_reset_hw(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_init_hw(struct ioh_gbe_hw *hw)
++ * @brief Initialize hardware
++ */
++s32 ioh_gbe_hal_init_hw(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_setup_link(struct ioh_gbe_hw *hw)
++ * @brief Configures link and flow control
++ */
++s32 ioh_gbe_hal_setup_link(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_setup_led(struct ioh_gbe_hw *hw)
++ * @brief Configures SW controllable LED
++ */
++s32 ioh_gbe_hal_setup_led(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_cleanup_led(struct ioh_gbe_hw *hw)
++ * @brief Restores SW controllable LED
++ */
++s32 ioh_gbe_hal_cleanup_led(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_led_on(struct ioh_gbe_hw *hw)
++ * @brief Turn on SW controllable LED
++ */
++s32 ioh_gbe_hal_led_on(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_led_off(struct ioh_gbe_hw *hw)
++ * @brief Turn off SW controllable LED
++ */
++s32 ioh_gbe_hal_led_off(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn void ioh_gbe_hal_mar_set(struct ioh_gbe_hw *hw, u8 *addr, u32 index)
++ * @brief Sets a MAC address register
++ */
++void ioh_gbe_hal_mar_set(struct ioh_gbe_hw *hw, u8 *addr, u32 index);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_read_phy_reg(struct ioh_gbe_hw *hw,
++ * u32 offset, u16 *data)
++ * @brief Reads PHY register
++ */
++s32 ioh_gbe_hal_read_phy_reg(struct ioh_gbe_hw *hw, u32 offset, u16 *data);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_write_phy_reg(struct ioh_gbe_hw *hw,
++ * u32 offset, u16 data)
++ * @brief Writes PHY register
++ */
++s32 ioh_gbe_hal_write_phy_reg(struct ioh_gbe_hw *hw, u32 offset, u16 data);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn void ioh_gbe_hal_phy_hw_reset(struct ioh_gbe_hw *hw)
++ * @brief Hard PHY reset
++ */
++void ioh_gbe_hal_phy_hw_reset(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn void ioh_gbe_hal_phy_sw_reset(struct ioh_gbe_hw *hw)
++ * @brief Soft PHY reset
++ */
++void ioh_gbe_hal_phy_sw_reset(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_read_mac_addr(struct ioh_gbe_hw *hw)
++ * @brief Reads MAC address
++ */
++s32 ioh_gbe_hal_read_mac_addr(struct ioh_gbe_hw *hw);
++
++#ifdef CONFIG_PCH_PCIEQOS
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_validate_nvm_checksum(struct ioh_gbe_hw *hw)
++ * @brief Verifies NVM (EEPROM) checksum
++ */
++s32 ioh_gbe_hal_validate_nvm_checksum(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_read_nvm(struct ioh_gbe_hw *hw,
++ * u32 offset, u8 *data)
++ * @brief Reads NVM (EEPROM)
++ */
++s32 ioh_gbe_hal_read_nvm(struct ioh_gbe_hw *hw, u32 offset, u8 *data);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn s32 ioh_gbe_hal_write_nvm(struct ioh_gbe_hw *hw,
++ * u32 offset, u8 *data)
++ * @brief Writes to NVM (EEPROM)
++ */
++s32 ioh_gbe_hal_write_nvm(struct ioh_gbe_hw *hw, u32 offset, u8 *data);
++#endif /* CONFIG_PCH_PCIEQOS */
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn void ioh_gbe_hal_set_wol_event(struct ioh_gbe_hw *hw, u32 wu_evt)
++ * @brief Set wake-on-lan event
++ */
++void ioh_gbe_hal_set_wol_event(struct ioh_gbe_hw *hw, u32 wu_evt);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn void ioh_gbe_hal_power_up_phy(struct ioh_gbe_hw *hw)
++ * @brief Power up PHY
++ */
++void ioh_gbe_hal_power_up_phy(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn void ioh_gbe_hal_power_down_phy(struct ioh_gbe_hw *hw)
++ * @brief Power down PHY
++ */
++void ioh_gbe_hal_power_down_phy(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn u16 ioh_gbe_hal_ctrl_miim(struct ioh_gbe_hw *hw,
++ * u32 addr, u32 dir, u32 reg, u16 data)
++ * @brief Control MII Management IF
++ */
++u16 ioh_gbe_hal_ctrl_miim(struct ioh_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
++ u16 data);
++
++/*!
++ * @ingroup HAL API Layer
++ * @fn void ioh_gbe_hal_set_pause_packet(struct ioh_gbe_hw *hw)
++ * @brief Set pause packet
++ */
++void ioh_gbe_hal_set_pause_packet(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL API Layer
++ * @def IOH_GBE_HAL_MIIM_READ
++ * @brief Read operation is done through MII Management IF
++ */
++#define IOH_GBE_HAL_MIIM_READ ((u32)0x00000000)
++
++/*!
++ * @ingroup HAL API Layer
++ * @def IOH_GBE_HAL_MIIM_WRITE
++ * @brief Write operation is done through MII Management IF
++ */
++#define IOH_GBE_HAL_MIIM_WRITE ((u32)0x04000000)
++
++/* pch_gbe_plat.c */
++/*!
++ * @ingroup HAL internal functions
++ * @fn void ioh_gbe_plat_init_function_pointers(struct ioh_gbe_hw *hw)
++ * @brief Init func ptrs.
++ */
++void ioh_gbe_plat_init_function_pointers(struct ioh_gbe_hw *hw);
++
++#endif
+diff -urN linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_defines.h topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_defines.h
+--- linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_defines.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_defines.h 2010-03-09 09:27:26.000000000 +0900
+@@ -0,0 +1,367 @@
++/*!
++ * @file ioh_gbe_defines.h
++ * @brief Linux IOH Gigabit Ethernet Driver defines macro header file
++ *
++ * @version 0.90
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR CO., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 09/21/2009
++ * modified:
++ *
++ */
++#ifndef _IOH_GBE_DEFINES_H_
++#define _IOH_GBE_DEFINES_H_
++
++#include "pch_gbe_pci_ids.h" /* Pci vender/device ID */
++
++/* DEBUG OPTION */
++/* #define DEBUG_TEST */
++/* #define NVM_MAC_FIX *//* MAC: 00 21 97 77 65 13 */
++/* #define NVM_DUMMY_READ */
++#define PHY_RESET_REG_INIT
++
++#ifdef DEBUG_TEST
++#define IOH_GBE_NETIF_MSG_DEFAULT 0x7fff /* ALL Enable */
++#else
++#define IOH_GBE_NETIF_MSG_DEFAULT 0x0000 /* All Disable */
++#endif
++/*-- Kind of Messege --------------------------
++ NETIF_MSG_DRV = 0x0001,
++ NETIF_MSG_PROBE = 0x0002,
++ NETIF_MSG_LINK = 0x0004,
++ NETIF_MSG_TIMER = 0x0008,
++ NETIF_MSG_IFDOWN = 0x0010,
++ NETIF_MSG_IFUP = 0x0020,
++ NETIF_MSG_RX_ERR = 0x0040,
++ NETIF_MSG_TX_ERR = 0x0080,
++ NETIF_MSG_TX_QUEUED = 0x0100,
++ NETIF_MSG_INTR = 0x0200,
++ NETIF_MSG_TX_DONE = 0x0400,
++ NETIF_MSG_RX_STATUS = 0x0800,
++ NETIF_MSG_PKTDATA = 0x1000,
++ NETIF_MSG_HW = 0x2000,
++ NETIF_MSG_WOL = 0x4000,
++-----------------------------------------------*/
++
++#ifdef DEBUG_TEST
++#define IOH_GBE_ERR(args...) printk(KERN_ERR DRV_NAME": " args)
++#define IOH_GBE_DBGOUT(S) printk(KERN_INFO S)
++/* #define IOH_GBE_DBGOUT1(S, A...) printk(KERN_INFO S, A) */
++#define IOH_GBE_DBGOUT1(S, A...)
++/* #define IOH_GBE_DBGOUT2(S) IOH_GBE_DBGOUT(S) */
++#define IOH_GBE_DBGOUT2(S, A...)
++#define IOH_GBE_TESTOUT(S, A...) printk(KERN_INFO " TEST_OUT:"S, A)
++/* #define IOH_GBE_TESTOUT2(S, A...) printk(KERN_INFO " TEST_OUT:"S, A) */
++#define IOH_GBE_TESTOUT2(S, A...)
++#define IOH_GBE_DBGFUNC(F) (IOH_GBE_DBGOUT(F "\n"))
++#else
++#define IOH_GBE_ERR(args...) printk(KERN_ERR DRV_NAME": " args)
++#define IOH_GBE_DBGOUT(S)
++#define IOH_GBE_DBGOUT1(S, A...)
++#define IOH_GBE_DBGOUT2(S, A...)
++#define IOH_GBE_TESTOUT(S, A...)
++#define IOH_GBE_TESTOUT2(S, A...)
++#define IOH_GBE_DBGFUNC(F)
++#endif
++
++/* IF OPTION */
++#define IOH_GBE_MAC_IFOP_RGMII
++#define IOH_GBE_MAC_RGMII_CTRL_SETTING ( \
++ IOH_GBE_CHIP_TYPE_INTERNAL | \
++ IOH_GBE_RGMII_MODE_RGMII | \
++ IOH_GBE_CRS_SEL \
++ )
++
++/* TX/RX descriptor defines */
++#define IOH_GBE_DEFAULT_TXD 256
++#define IOH_GBE_MAX_TXD 4096
++#define IOH_GBE_MIN_TXD 8
++#define IOH_GBE_DEFAULT_RXD 256
++#define IOH_GBE_MAX_RXD 4096
++#define IOH_GBE_MIN_RXD 8
++/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
++#define IOH_GBE_TX_DESC_MULTIPLE 8
++#define IOH_GBE_RX_DESC_MULTIPLE 8
++
++/* Checksum Offload defines Enable/Disable */
++#define IOH_GBE_DEFAULT_RX_CSUM TRUE /* TRUEorFALSE */
++#define IOH_GBE_DEFAULT_TX_CSUM TRUE /* TRUEorFALSE */
++
++/* Copybreak default */
++#define IOH_GBE_COPYBREAK_DEFAULT 256
++#define IOH_GBE_PCI_BAR 1
++
++/* Device Driver infomation */
++#define DRV_NAME "ioh_gbe"
++#define DRV_STRING "IOH Network Driver"
++#define DRV_EXT "-NAPI"
++#define DRV_VERSION "0.91"DRV_EXT
++#define DRV_DESCRIPTION \
++ "OKI semiconductor sample Linux driver for IOH Gigabit ethernet"
++#define DRV_COPYRIGHT "Copyright(c) 2009 OKI semiconductor"
++#define FIRM_VERSION "N/A"
++
++#define IOH_GBE_MAC_REGS_LEN 76
++#define IOH_GBE_PHY_REGS_LEN 32
++#define IOH_GBE_REGS_LEN (IOH_GBE_MAC_REGS_LEN + IOH_GBE_PHY_REGS_LEN)
++
++/* #define IOH_GBE_DMA_ALIGN 48 */
++#define IOH_GBE_DMA_ALIGN (32) /*for 2.6.33-rc3 */
++#define IOH_GBE_ETH_ALEN 6
++
++/* Initialize the wake-on-LAN settings */
++#define IOH_GBE_WL_INIT_SETTING ( \
++ IOH_GBE_WLC_BR |\
++ IOH_GBE_WLC_MLT |\
++ IOH_GBE_WLC_IND |\
++ IOH_GBE_WLC_MP \
++ )
++
++/* This defines the bits that are set in the Interrupt Mask
++ * Set/Read Register. Each bit is documented below:
++ * o RXT0 = Receiver Timer Interrupt (ring 0)
++ * o TXDW = Transmit Descriptor Written Back
++ * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
++ * o RXSEQ = Receive Sequence Error
++ * o LSC = Link Status Change
++ */
++#define IOH_GBE_INT_ENABLE_MASK ( \
++ IOH_GBE_INT_RX_DMA_CMPLT | \
++ IOH_GBE_INT_RX_DSC_EMP | \
++ IOH_GBE_INT_WOL_DET | \
++ IOH_GBE_INT_TX_CMPLT \
++ )
++
++/* Ethertype field values */
++#define IOH_GBE_MAX_JUMBO_FRAME_SIZE (10318)
++#define IOH_GBE_FRAME_SIZE_2048 (2048)
++#define IOH_GBE_FRAME_SIZE_4096 (4096)
++#define IOH_GBE_FRAME_SIZE_8192 (8192)
++
++/* watchdog time */
++#define IOH_GBE_WATCHDOG_PERIOD (1 * HZ)
++
++#define IOH_GBE_TX_WEIGHT 64
++#define IOH_GBE_RX_WEIGHT 64
++#define IOH_GBE_RX_BUFFER_WRITE 16
++
++#define DSC_INIT16 0xC000
++
++/* MAC Address */
++/* Number of high/low register pairs in the MAC_ADR. The MAC_ADR (MAC Address
++ * Registers) holds the directed and multicast addresses that we monitor.
++ * Technically, we have 16 spots. However, we reserve one of these spots
++ * (MAC_ADR[15]) for our directed address used by controllers with
++ * manageability enabled, allowing us room for 15 multicast addresses.
++ */
++#define IOH_GBE_MAR_ENTRIES 16
++#define IOH_GBE_SHORT_PKT 64
++
++/* PHY param */
++#define IOH_GBE_PHY_RESET_DELAY_US 10
++/* NVM param */
++#define IOH_GBE_NVM_WORD_SIZE 3 /* 16bit word size */
++
++/* Error Codes */
++#define IOH_GBE_SUCCESS 0
++#define IOH_GBE_ERR_NVM 1
++#define IOH_GBE_ERR_PHY 2
++#define IOH_GBE_ERR_CONFIG 3
++#define IOH_GBE_ERR_PARAM 4
++#define IOH_GBE_ERR_MAC_INIT 5
++#define IOH_GBE_ERR_PHY_TYPE 6
++#define IOH_GBE_ERR_RESET 9
++#define IOH_GBE_ERR_MASTER_REQUESTS_PENDING 10
++#define IOH_GBE_ERR_HOST_INTERFACE_COMMAND 11
++#define IOH_GBE_BLK_PHY_RESET 12
++#define IOH_GBE_ERR_SWFW_SYNC 13
++#define IOH_GBE_NOT_IMPLEMENTED 14
++
++#define PHY_MAX_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
++/* PHY 1000 MII Register/Bit Definitions */
++/* PHY Registers defined by IEEE */
++#define PHY_CONTROL 0x00 /* Control Register */
++#define PHY_STATUS 0x01 /* Status Regiser */
++#define PHY_ID1 0x02 /* Phy Id Register (word 1) */
++#define PHY_ID2 0x03 /* Phy Id Register (word 2) */
++#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
++#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
++#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Register */
++#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
++#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
++#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Register */
++#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Register */
++#define PHY_EXT_STATUS 0x0F /* Extended Status Register */
++#define PHY_PHYSP_CONTROL 0x10 /* PHY Specific Control Register */
++#define PHY_EXT_PHYSP_CONTROL 0x14 /* Extended PHY Specific Control Register */
++#define PHY_LED_CONTROL 0x18 /* LED Control Register */
++#define PHY_EXT_PHYSP_STATUS 0x1B /* Extended PHY Specific Status Register */
++
++/* PHY Control Register */
++#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
++#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
++#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
++#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
++#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
++#define MII_CR_POWER_DOWN 0x0800 /* Power down */
++#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
++#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
++#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
++#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
++#define MII_CR_SPEED_1000 0x0040
++#define MII_CR_SPEED_100 0x2000
++#define MII_CR_SPEED_10 0x0000
++
++/* PHY Status Register */
++#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
++#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
++#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
++#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
++#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
++#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
++#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
++#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
++#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
++#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
++#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
++#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
++#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
++#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
++#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
++
++/* Phy Id Register (word 2) */
++#define PHY_REVISION_MASK 0x000F
++
++/* Autoneg Advertisement Register */
++#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
++#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
++#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
++#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
++#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
++#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
++#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
++#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
++#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
++#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
++
++/* Link Partner Ability Register (Base Page) */
++#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
++#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */
++#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */
++#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */
++#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */
++#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */
++#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
++#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
++#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */
++#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */
++#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */
++
++/* Autoneg Expansion Register */
++#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
++#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */
++#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */
++#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
++#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */
++
++/* 1000BASE-T Control Register */
++#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
++#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
++#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
++#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */
++ /* 0=DTE device */
++#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
++ /* 0=Configure PHY as Slave */
++#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
++ /* 0=Automatic Master/Slave config */
++#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
++#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
++#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
++#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
++#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
++
++/* 1000BASE-T Status Register */
++#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */
++#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */
++#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
++#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
++#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
++#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
++#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
++#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
++
++/* PHY Specific Control Register */
++#define PHYSP_CTRL_ASSERT_CRS_TX 0x0800
++
++/* LED Control Register */
++#define PHY_LED_CTRL_ON 0x4103
++#define PHY_LED_CTRL_OFF 0x4102
++#define PHY_LED_CTRL_CLEANUP 0x4100
++
++/* Extended PHY Specific Status Register */
++#define HWCFG_MODE_GMII_COPPER 0x000F /* GMII to Copper */
++#define HWCFG_MODE_RGMII_COPPER 0x000B /* RGMII/Modiffied MII to Copper */
++#define HWCFG_MODE_GMII_FIBER 0x0007 /* GMII to Fiber */
++#define HWCFG_MODE_RGMII_FIBER 0x0003 /* RGMII to Fiber */
++#define HWCFG_MODE_GMII_SGMII 0x000E /* GMII to SGMII */
++#define HWCFG_MODE_RGMII_SGMII 0x0006 /* RGMII to SGMII */
++#define HWCFG_MODE_TBI_COPPER 0x000D /* TBI to Copper */
++#define HWCFG_MODE_RTBI_COPPER 0x0009 /* RTBI to Copper */
++#define HWCFG_MODE_MASK 0x000F
++
++#define PHY_SPEED_10 10
++#define PHY_SPEED_100 100
++#define PHY_SPEED_1000 1000
++#define PHY_HALF_DUPLEX 1
++#define PHY_FULL_DUPLEX 2
++
++#define PHY_ADVERTISE_10_HALF 0x0001
++#define PHY_ADVERTISE_10_FULL 0x0002
++#define PHY_ADVERTISE_100_HALF 0x0004
++#define PHY_ADVERTISE_100_FULL 0x0008
++#define PHY_ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
++#define PHY_ADVERTISE_1000_FULL 0x0020
++
++/* 1000/H is not supported, nor spec-compliant. */
++#define IOH_GBE_ALL_SPEED_DUPLEX (PHY_ADVERTISE_10_HALF | \
++ PHY_ADVERTISE_10_FULL | \
++ PHY_ADVERTISE_100_HALF | \
++ PHY_ADVERTISE_100_FULL | \
++ PHY_ADVERTISE_1000_FULL)
++#define IOH_GBE_ALL_NOT_GIG (PHY_ADVERTISE_10_HALF | \
++ PHY_ADVERTISE_10_FULL | \
++ PHY_ADVERTISE_100_HALF | \
++ PHY_ADVERTISE_100_FULL)
++#define IOH_GBE_ALL_100_SPEED (PHY_ADVERTISE_100_HALF | \
++ PHY_ADVERTISE_100_FULL)
++#define IOH_GBE_ALL_10_SPEED (PHY_ADVERTISE_10_HALF | \
++ PHY_ADVERTISE_10_FULL)
++#define IOH_GBE_ALL_FULL_DUPLEX (PHY_ADVERTISE_10_FULL | \
++ PHY_ADVERTISE_100_FULL | \
++ PHY_ADVERTISE_1000_FULL)
++#define IOH_GBE_ALL_HALF_DUPLEX (PHY_ADVERTISE_10_HALF | \
++ PHY_ADVERTISE_100_HALF)
++
++#define AUTONEG_ADVERTISE_SPEED_DEFAULT IOH_GBE_ALL_SPEED_DUPLEX
++
++#endif
+diff -urN linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_ethtool.c topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_ethtool.c
+--- linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_ethtool.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_ethtool.c 2010-03-11 15:12:31.000000000 +0900
+@@ -0,0 +1,1306 @@
++/*!
++ * @file ioh_gbe_ethtool.c
++ * @brief Linux IOH Gigabit Ethernet Ethtool Driver source file
++ *
++ * @version 0.90
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR CO., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 09/21/2009
++ * modified:
++ *
++ */
++
++#include <linux/pci.h>
++#include <linux/netdevice.h>
++#include <linux/ethtool.h>
++#include <linux/mii.h>
++#include <linux/sched.h>
++#include <linux/uaccess.h>
++
++#include "pch_gbe_osdep.h"
++#include "pch_gbe_regs.h"
++#include "pch_gbe_defines.h"
++#include "pch_gbe_hw.h"
++#include "pch_gbe_api.h"
++#include "pch_gbe.h"
++
++
++/* ----------------------------------------------------------------------------
++ Function prototype
++---------------------------------------------------------------------------- */
++static int ioh_gbe_get_settings(struct net_device *netdev,
++ struct ethtool_cmd *ecmd);
++static int ioh_gbe_set_settings(struct net_device *netdev,
++ struct ethtool_cmd *ecmd);
++static void ioh_gbe_get_drvinfo(struct net_device *netdev,
++ struct ethtool_drvinfo *drvinfo);
++static int ioh_gbe_get_regs_len(struct net_device *netdev);
++static void ioh_gbe_get_regs(struct net_device *netdev,
++ struct ethtool_regs *regs, void *p);
++static void ioh_gbe_get_wol(struct net_device *netdev,
++ struct ethtool_wolinfo *wol);
++static int ioh_gbe_set_wol(struct net_device *netdev,
++ struct ethtool_wolinfo *wol);
++static u32 ioh_gbe_get_msglevel(struct net_device *netdev);
++static void ioh_gbe_set_msglevel(struct net_device *netdev, u32 data);
++static int ioh_gbe_nway_reset(struct net_device *netdev);
++#ifdef CONFIG_PCH_PCIEQOS
++static int ioh_gbe_get_eeprom_len(struct net_device *netdev);
++static int ioh_gbe_get_eeprom(struct net_device *netdev,
++ struct ethtool_eeprom *eeprom, u8 *bytes);
++static int ioh_gbe_set_eeprom(struct net_device *netdev,
++ struct ethtool_eeprom *eeprom, u8 *bytes);
++#endif
++static void ioh_gbe_get_ringparam(struct net_device *netdev,
++ struct ethtool_ringparam *ring);
++static int ioh_gbe_set_ringparam(struct net_device *netdev,
++ struct ethtool_ringparam *ring);
++static void ioh_gbe_get_pauseparam(struct net_device *netdev,
++ struct ethtool_pauseparam *pause);
++static int ioh_gbe_set_pauseparam(struct net_device *netdev,
++ struct ethtool_pauseparam *pause);
++static u32 ioh_gbe_get_rx_csum(struct net_device *netdev);
++static int ioh_gbe_set_rx_csum(struct net_device *netdev, u32 data);
++static u32 ioh_gbe_get_tx_csum(struct net_device *netdev);
++static int ioh_gbe_set_tx_csum(struct net_device *netdev, u32 data);
++static void ioh_gbe_diag_test(struct net_device *netdev,
++ struct ethtool_test *eth_test, u64 *data);
++static void ioh_gbe_get_strings(struct net_device *netdev,
++ u32 stringset, u8 *data);
++static void ioh_gbe_led_blink_callback(unsigned long data);
++static int ioh_gbe_phys_id(struct net_device *netdev, u32 data);
++static void ioh_gbe_get_ethtool_stats(struct net_device *netdev,
++ struct ethtool_stats *stats, u64 *data);
++static int ioh_gbe_reg_test(struct ioh_gbe_adapter *adapter, uint64_t *data);
++static bool reg_pattern_test(struct ioh_gbe_adapter *adapter, uint64_t *data,
++ int reg, uint32_t mask, uint32_t write);
++
++/* ----------------------------------------------------------------------------
++ Data
++---------------------------------------------------------------------------- */
++/*!
++ * @ingroup Ethtool driver Layer
++ * @struct ioh_gbe_stats
++ * @brief Stats item infomation
++ */
++struct ioh_gbe_stats {
++ signed char stat_string[ETH_GSTRING_LEN];
++ int sizeof_stat;
++ int stat_offset;
++};
++
++#define IOH_GBE_STAT1(m) (int)(sizeof(((struct ioh_gbe_adapter *)0)->m))
++#define IOH_GBE_STAT2(m) offsetof(struct ioh_gbe_adapter, m)
++
++/*!
++ * @ingroup Ethtool driver Layer
++ * @struct ioh_gbe_gstrings_stats
++ * @brief ethtool information status name list
++ */
++static const struct ioh_gbe_stats ioh_gbe_gstrings_stats[] = {
++ {"rx_packets", IOH_GBE_STAT1(stats.rx_packets),
++ IOH_GBE_STAT2(stats.rx_packets)},
++ {"tx_packets", IOH_GBE_STAT1(stats.tx_packets),
++ IOH_GBE_STAT2(stats.tx_packets)},
++ {"rx_bytes", IOH_GBE_STAT1(stats.rx_bytes),
++ IOH_GBE_STAT2(stats.rx_bytes)},
++ {"tx_bytes", IOH_GBE_STAT1(stats.tx_bytes),
++ IOH_GBE_STAT2(stats.tx_bytes)},
++ {"rx_errors", IOH_GBE_STAT1(stats.rx_errors),
++ IOH_GBE_STAT2(stats.rx_errors)},
++ {"tx_errors", IOH_GBE_STAT1(stats.tx_errors),
++ IOH_GBE_STAT2(stats.tx_errors)},
++ {"rx_dropped", IOH_GBE_STAT1(stats.rx_dropped),
++ IOH_GBE_STAT2(stats.rx_dropped)},
++ {"tx_dropped", IOH_GBE_STAT1(stats.tx_dropped),
++ IOH_GBE_STAT2(stats.tx_dropped)},
++ {"multicast", IOH_GBE_STAT1(stats.multicast),
++ IOH_GBE_STAT2(stats.multicast)},
++ {"collisions", IOH_GBE_STAT1(stats.collisions),
++ IOH_GBE_STAT2(stats.collisions)},
++ {"rx_crc_errors", IOH_GBE_STAT1(stats.rx_crc_errors),
++ IOH_GBE_STAT2(stats.rx_crc_errors)},
++ {"rx_frame_errors", IOH_GBE_STAT1(stats.rx_frame_errors),
++ IOH_GBE_STAT2(stats.rx_frame_errors)},
++ {"rx_buff_failed", IOH_GBE_STAT1(stats.rx_alloc_buff_failed),
++ IOH_GBE_STAT2(stats.rx_alloc_buff_failed)},
++ {"tx_length_errors", IOH_GBE_STAT1(stats.tx_length_errors),
++ IOH_GBE_STAT2(stats.tx_length_errors)},
++ {"tx_aborted_errors", IOH_GBE_STAT1(stats.tx_aborted_errors),
++ IOH_GBE_STAT2(stats.tx_aborted_errors)},
++ {"tx_carrier_errors", IOH_GBE_STAT1(stats.tx_carrier_errors),
++ IOH_GBE_STAT2(stats.tx_carrier_errors)},
++ {"tx_timeout_count", IOH_GBE_STAT1(stats.tx_timeout_count),
++ IOH_GBE_STAT2(stats.tx_timeout_count)},
++ {"tx_restart_count", IOH_GBE_STAT1(stats.tx_restart_count),
++ IOH_GBE_STAT2(stats.tx_restart_count)},
++ {"intr_rx_dsc_empty_count",
++ IOH_GBE_STAT1(stats.intr_rx_dsc_empty_count),
++ IOH_GBE_STAT2(stats.intr_rx_dsc_empty_count)},
++ {"intr_rx_frame_err_count",
++ IOH_GBE_STAT1(stats.intr_rx_frame_err_count),
++ IOH_GBE_STAT2(stats.intr_rx_frame_err_count)},
++ {"intr_rx_fifo_err_count", IOH_GBE_STAT1(stats.intr_rx_fifo_err_count),
++ IOH_GBE_STAT2(stats.intr_rx_fifo_err_count)},
++ {"intr_rx_dma_err_count", IOH_GBE_STAT1(stats.intr_rx_dma_err_count),
++ IOH_GBE_STAT2(stats.intr_rx_dma_err_count)},
++ {"intr_tx_fifo_err_count", IOH_GBE_STAT1(stats.intr_tx_fifo_err_count),
++ IOH_GBE_STAT2(stats.intr_tx_fifo_err_count)},
++ {"intr_tx_dma_err_count", IOH_GBE_STAT1(stats.intr_tx_dma_err_count),
++ IOH_GBE_STAT2(stats.intr_tx_dma_err_count)},
++ {"intr_tcpip_err_count", IOH_GBE_STAT1(stats.intr_tcpip_err_count),
++ IOH_GBE_STAT2(stats.intr_tcpip_err_count)}
++};
++
++#define IOH_GBE_QUEUE_STATS_LEN 0
++#define IOH_GBE_GLOBAL_STATS_LEN \
++((int)sizeof(ioh_gbe_gstrings_stats) / (int)sizeof(struct ioh_gbe_stats))
++
++/*!
++ * @ingroup Ethtool driver
++ * @def IOH_GBE_STATS_LEN
++ * @brief The size of status
++*/
++#define IOH_GBE_STATS_LEN (IOH_GBE_GLOBAL_STATS_LEN + IOH_GBE_QUEUE_STATS_LEN)
++
++/*!
++ * @ingroup Ethtool driver Layer
++ * @struct ioh_gbe_gstrings_test
++ * @brief self test item name list
++ */
++static const signed char ioh_gbe_gstrings_test[][ETH_GSTRING_LEN] = {
++ "Register test (offline)"
++};
++
++/*!
++ * @ingroup Ethtool driver
++ * @def IOH_GBE_TEST_LEN
++ * @brief The size of test packet
++*/
++#define IOH_GBE_TEST_LEN \
++ ((int)sizeof(ioh_gbe_gstrings_test) / ETH_GSTRING_LEN)
++
++/*!
++ * @ingroup Ethtool driver
++ * @def IOH_GBE_ID_INTERVAL
++ * @brief Toggle LED 4 times per second = 2 "blinks" per second
++*/
++#define IOH_GBE_ID_INTERVAL (HZ/4)
++
++/*!
++ * @ingroup Ethtool driver
++ * @def IOH_GBE_LED_ON
++ * @brief Bit defines for adapter->led_status
++*/
++#define IOH_GBE_LED_ON 0
++
++/* ----------------------------------------------------------------------------
++ Function
++---------------------------------------------------------------------------- */
++/*!
++ * @ingroup Ethtool driver
++ * @fn static int ioh_gbe_get_settings(struct net_device *netdev,
++ * struct ethtool_cmd *ecmd)
++ * @brief Get device-specific settings
++ * @param netdev [INOUT] Network interface device structure
++ * @param ecmd [INOUT] Ethtool command
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++static int
++ioh_gbe_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ int ret;
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_get_settings");
++
++ ret = mii_ethtool_gset(&adapter->mii, ecmd);
++ ecmd->supported &= (u32) (~SUPPORTED_TP);
++ ecmd->supported &= (u32) (~SUPPORTED_1000baseT_Half);
++ ecmd->advertising &= (u32) (~ADVERTISED_TP);
++ ecmd->advertising &= (u32) (~ADVERTISED_1000baseT_Half);
++
++ if (netif_carrier_ok(adapter->netdev)) {
++ ;
++ } else {
++ ecmd->speed = 0xFFFF;
++ ecmd->duplex = 0xFF;
++ }
++ return ret;
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static int ioh_gbe_set_settings(struct net_device *netdev,
++ * struct ethtool_cmd *ecmd)
++ * @brief Set device-specific settings
++ * @param netdev [INOUT] Network interface device structure
++ * @param ecmd [INOUT] Ethtool command
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++static int
++ioh_gbe_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ int ret;
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_set_settings");
++
++ while (test_and_set_bit(__IOH_GBE_RESETTING, &adapter->flags) != 0)
++ msleep(1);
++ ioh_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET);
++
++ if (ecmd->speed == 0xFFFF)
++ ecmd->speed = SPEED_1000;
++ if (ecmd->duplex == 0xFF)
++ ecmd->duplex = DUPLEX_FULL;
++ ret = mii_ethtool_sset(&adapter->mii, ecmd);
++ if (ret != 0) {
++ IOH_GBE_ERR("Error: mii_ethtool_sset\n");
++ clear_bit(__IOH_GBE_RESETTING, &adapter->flags);
++ return ret;
++ }
++ hw->mac.link_speed = ecmd->speed;
++ hw->mac.link_duplex = ecmd->duplex;
++ hw->phy.autoneg_advertised = ecmd->advertising;
++ hw->mac.autoneg = ecmd->autoneg;
++ ioh_gbe_hal_phy_sw_reset(hw);
++
++ /* reset the link */
++ if (netif_running(adapter->netdev) != 0) {
++ ioh_gbe_down(adapter);
++ ret = ioh_gbe_up(adapter);
++ } else {
++ ioh_gbe_reset(adapter);
++ }
++
++ clear_bit(__IOH_GBE_RESETTING, &adapter->flags);
++ return ret;
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static void ioh_gbe_get_drvinfo(struct net_device *netdev,
++ * struct ethtool_drvinfo *drvinfo)
++ * @brief Report driver information
++ * @param netdev [INOUT] Network interface device structure
++ * @param drvinfo [INOUT] Driver information structure
++ * @return None
++ */
++static void
++ioh_gbe_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_get_drvinfo");
++
++ strcpy(drvinfo->driver, DRV_NAME);
++ strcpy(drvinfo->version, DRV_VERSION);
++ strcpy(drvinfo->fw_version, FIRM_VERSION);
++ strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
++
++ drvinfo->n_stats = IOH_GBE_STATS_LEN;
++ drvinfo->testinfo_len = IOH_GBE_TEST_LEN;
++ drvinfo->regdump_len = ioh_gbe_get_regs_len(netdev);
++#ifdef CONFIG_PCH_PCIEQOS
++ drvinfo->eedump_len = ioh_gbe_get_eeprom_len(netdev);
++#endif
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static int ioh_gbe_get_regs_len(struct net_device *netdev)
++ * @brief Report the size of device registers
++ * @param netdev [INOUT] Network interface device structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++static int ioh_gbe_get_regs_len(struct net_device *netdev)
++{
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_get_regs_len");
++
++ return IOH_GBE_REGS_LEN * (int)sizeof(u32);
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static void ioh_gbe_get_regs(struct net_device *netdev,
++ * struct ethtool_regs *regs, void *p)
++ * @brief Get device registers
++ * @param netdev [INOUT] Network interface device structure
++ * @param regs [INOUT] Ethtool register structure
++ * @param p [INOUT] Buffer pointer of read device register date
++ * @return None
++ */
++static void
++ioh_gbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ u32 *regs_buff = p;
++ u16 i, reg, tmp;
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_get_regs");
++
++ regs->version = hw->revision_id;
++ regs->version = 0x1000000 | (regs->version << 16) | hw->device_id;
++
++ memset(p, 0, IOH_GBE_REGS_LEN * (int)sizeof(u32));
++ /* 000: */
++ regs_buff[0] = IOH_GBE_READ_REG(hw, INT_ST);
++ regs_buff[1] = IOH_GBE_READ_REG(hw, INT_EN);
++ regs_buff[2] = IOH_GBE_READ_REG(hw, MODE);
++ regs_buff[3] = IOH_GBE_READ_REG(hw, RESET);
++ /* 010: */
++ regs_buff[4] = IOH_GBE_READ_REG(hw, TCPIP_ACC);
++ regs_buff[5] = IOH_GBE_READ_REG(hw, EX_LIST);
++ regs_buff[6] = IOH_GBE_READ_REG(hw, INT_ST_HOLD);
++ regs_buff[7] = IOH_GBE_READ_REG(hw, PHY_INT_CTRL);
++ /* 020: */
++ regs_buff[8] = IOH_GBE_READ_REG(hw, MAC_RX_EN);
++ regs_buff[9] = IOH_GBE_READ_REG(hw, RX_FCTRL);
++ regs_buff[10] = IOH_GBE_READ_REG(hw, PAUSE_REQ);
++ regs_buff[11] = IOH_GBE_READ_REG(hw, RX_MODE);
++ /* 030: */
++ regs_buff[12] = IOH_GBE_READ_REG(hw, TX_MODE);
++ regs_buff[13] = IOH_GBE_READ_REG(hw, RX_FIFO_ST);
++ regs_buff[14] = IOH_GBE_READ_REG(hw, TX_FIFO_ST);
++ regs_buff[15] = IOH_GBE_READ_REG(hw, TX_FID);
++ /* 040: */
++ regs_buff[16] = IOH_GBE_READ_REG(hw, TX_RESULT);
++ regs_buff[17] = IOH_GBE_READ_REG(hw, PAUSE_PKT1);
++ regs_buff[18] = IOH_GBE_READ_REG(hw, PAUSE_PKT2);
++ regs_buff[19] = IOH_GBE_READ_REG(hw, PAUSE_PKT3);
++ /* 050: */
++ regs_buff[20] = IOH_GBE_READ_REG(hw, PAUSE_PKT4);
++ regs_buff[21] = IOH_GBE_READ_REG(hw, PAUSE_PKT5);
++ regs_buff[22] = IOH_GBE_READ_REG(hw, MAC_ADR);
++ regs_buff[23] = IOH_GBE_READ_REG(hw, MAC_ADR1A);
++ /* 060: */
++ regs_buff[24] = IOH_GBE_READ_REG(hw, MAC_ADR1B);
++ regs_buff[25] = IOH_GBE_READ_REG(hw, MAC_ADR2A);
++ regs_buff[26] = IOH_GBE_READ_REG(hw, MAC_ADR2B);
++ regs_buff[27] = IOH_GBE_READ_REG(hw, MAC_ADR3A);
++ /* 070: */
++ regs_buff[28] = IOH_GBE_READ_REG(hw, MAC_ADR3B);
++ regs_buff[29] = IOH_GBE_READ_REG(hw, MAC_ADR4A);
++ regs_buff[30] = IOH_GBE_READ_REG(hw, MAC_ADR4B);
++ regs_buff[31] = IOH_GBE_READ_REG(hw, MAC_ADR5A);
++ /* 080: */
++ regs_buff[32] = IOH_GBE_READ_REG(hw, MAC_ADR5B);
++ regs_buff[33] = IOH_GBE_READ_REG(hw, MAC_ADR6A);
++ regs_buff[34] = IOH_GBE_READ_REG(hw, MAC_ADR6B);
++ regs_buff[35] = IOH_GBE_READ_REG(hw, MAC_ADR7A);
++ /* 090: */
++ regs_buff[36] = IOH_GBE_READ_REG(hw, MAC_ADR7B);
++ regs_buff[37] = IOH_GBE_READ_REG(hw, MAC_ADR8A);
++ regs_buff[38] = IOH_GBE_READ_REG(hw, MAC_ADR8B);
++ regs_buff[39] = IOH_GBE_READ_REG(hw, MAC_ADR9A);
++ /* 0a0: */
++ regs_buff[40] = IOH_GBE_READ_REG(hw, MAC_ADR9B);
++ regs_buff[41] = IOH_GBE_READ_REG(hw, MAC_ADR10A);
++ regs_buff[42] = IOH_GBE_READ_REG(hw, MAC_ADR10B);
++ regs_buff[43] = IOH_GBE_READ_REG(hw, MAC_ADR11A);
++ /* 0b0: */
++ regs_buff[44] = IOH_GBE_READ_REG(hw, MAC_ADR11B);
++ regs_buff[45] = IOH_GBE_READ_REG(hw, MAC_ADR12A);
++ regs_buff[46] = IOH_GBE_READ_REG(hw, MAC_ADR12B);
++ regs_buff[47] = IOH_GBE_READ_REG(hw, MAC_ADR13A);
++ /* 0c0: */
++ regs_buff[48] = IOH_GBE_READ_REG(hw, MAC_ADR13B);
++ regs_buff[49] = IOH_GBE_READ_REG(hw, MAC_ADR14A);
++ regs_buff[50] = IOH_GBE_READ_REG(hw, MAC_ADR14B);
++ regs_buff[51] = IOH_GBE_READ_REG(hw, MAC_ADR15A);
++ /* 0d0: */
++ regs_buff[52] = IOH_GBE_READ_REG(hw, MAC_ADR15B);
++ regs_buff[53] = IOH_GBE_READ_REG(hw, MAC_ADR16A);
++ regs_buff[54] = IOH_GBE_READ_REG(hw, MAC_ADR16B);
++ regs_buff[55] = IOH_GBE_READ_REG(hw, ADDR_MASK);
++ /* 0e0: */
++ regs_buff[56] = IOH_GBE_READ_REG(hw, MIIM);
++ regs_buff[57] = IOH_GBE_READ_REG(hw, RGMII_ST);
++ regs_buff[58] = IOH_GBE_READ_REG(hw, RGMII_CTRL);
++ regs_buff[59] = IOH_GBE_READ_REG(hw, DMA_CTRL);
++ /* 0f0: */
++ regs_buff[60] = IOH_GBE_READ_REG(hw, RX_DSC_BASE);
++ regs_buff[61] = IOH_GBE_READ_REG(hw, RX_DSC_SIZE);
++ regs_buff[62] = IOH_GBE_READ_REG(hw, RX_DSC_HW_P);
++ regs_buff[63] = IOH_GBE_READ_REG(hw, RX_DSC_HW_P_HLD);
++ /* 100: */
++ regs_buff[64] = IOH_GBE_READ_REG(hw, RX_DSC_SW_P);
++ regs_buff[65] = IOH_GBE_READ_REG(hw, TX_DSC_BASE);
++ regs_buff[66] = IOH_GBE_READ_REG(hw, TX_DSC_SIZE);
++ regs_buff[67] = IOH_GBE_READ_REG(hw, TX_DSC_HW_P);
++ /* 110: */
++ regs_buff[68] = IOH_GBE_READ_REG(hw, TX_DSC_HW_P_HLD);
++ regs_buff[69] = IOH_GBE_READ_REG(hw, TX_DSC_SW_P);
++ regs_buff[70] = IOH_GBE_READ_REG(hw, RX_DMA_ST);
++ regs_buff[71] = IOH_GBE_READ_REG(hw, TX_DMA_ST);
++ /* 120: */
++ regs_buff[72] = IOH_GBE_READ_REG(hw, WOL_ST);
++ regs_buff[73] = IOH_GBE_READ_REG(hw, WOL_CTRL);
++ regs_buff[74] = IOH_GBE_READ_REG(hw, WOL_ADDR_MASK);
++ regs_buff[75] = 0x00000000; /* Dummy read */
++
++ /* 130: */
++ /* PHY register */
++ for (i = IOH_GBE_MAC_REGS_LEN, reg = 0; reg < IOH_GBE_PHY_REGS_LEN;
++ i++, reg++) {
++ ioh_gbe_hal_read_phy_reg(&adapter->hw, reg, &tmp);
++ regs_buff[i] = tmp;
++ }
++
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static void ioh_gbe_get_wol(struct net_device *netdev,
++ * struct ethtool_wolinfo *wol)
++ * @brief Report whether Wake-on-Lan is enabled
++ * @param netdev [INOUT] Network interface device structure
++ * @param wol [OUT] Wake-on-Lan information
++ * @return None
++ */
++static void
++ioh_gbe_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_get_wol");
++
++ wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
++ wol->wolopts = 0;
++
++ if ((adapter->wake_up_evt & IOH_GBE_WLC_IND) != 0)
++ wol->wolopts |= WAKE_UCAST;
++ if ((adapter->wake_up_evt & IOH_GBE_WLC_MLT) != 0)
++ wol->wolopts |= WAKE_MCAST;
++ if ((adapter->wake_up_evt & IOH_GBE_WLC_BR) != 0)
++ wol->wolopts |= WAKE_BCAST;
++ if ((adapter->wake_up_evt & IOH_GBE_WLC_MP) != 0)
++ wol->wolopts |= WAKE_MAGIC;
++ return;
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static int ioh_gbe_set_wol(struct net_device *netdev,
++ * struct ethtool_wolinfo *wol)
++ * @brief Turn Wake-on-Lan on or off
++ * @param netdev [INOUT] Network interface device structure
++ * @param wol [IN] Pointer of wake-on-Lan information straucture
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++static int
++ioh_gbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_set_wol");
++
++ if ((wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) != 0)
++ return -EOPNOTSUPP;
++ /* these settings will always override what we currently have */
++ adapter->wake_up_evt = 0;
++
++ if ((wol->wolopts & WAKE_UCAST) != 0)
++ adapter->wake_up_evt |= IOH_GBE_WLC_IND;
++ if ((wol->wolopts & WAKE_MCAST) != 0)
++ adapter->wake_up_evt |= IOH_GBE_WLC_MLT;
++ if ((wol->wolopts & WAKE_BCAST) != 0)
++ adapter->wake_up_evt |= IOH_GBE_WLC_BR;
++ if ((wol->wolopts & WAKE_MAGIC) != 0)
++ adapter->wake_up_evt |= IOH_GBE_WLC_MP;
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static u32 ioh_gbe_get_msglevel(struct net_device *netdev)
++ * @brief Report driver message level
++ * @param netdev [INOUT] Network interface device structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++static u32 ioh_gbe_get_msglevel(struct net_device *netdev)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_get_msglevel");
++
++ return adapter->msg_enable;
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static void ioh_gbe_set_msglevel(struct net_device *netdev,
++ * u32 data)
++ * @brief Set driver message level
++ * @param netdev [INOUT] Network interface device structure
++ * @param data [IN] Driver message level
++ * @return None
++ */
++static void ioh_gbe_set_msglevel(struct net_device *netdev, u32 data)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_set_msglevel");
++
++ adapter->msg_enable = data;
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static int ioh_gbe_nway_reset(struct net_device *netdev)
++ * @brief Restart autonegotiation
++ * @param netdev [INOUT] Network interface device structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++static int ioh_gbe_nway_reset(struct net_device *netdev)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_nway_reset");
++
++ return mii_nway_restart(&adapter->mii);
++}
++
++#ifdef CONFIG_PCH_PCIEQOS
++/*!
++ * @ingroup Ethtool driver
++ * @fn static int ioh_gbe_get_eeprom_len(struct net_device *netdev)
++ * @brief Report the device EEPROM memory size
++ * @param netdev [INOUT] Network interface device structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++static int ioh_gbe_get_eeprom_len(struct net_device *netdev)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_get_eeprom_len");
++
++ return adapter->hw.nvm.word_size * 2;
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static int ioh_gbe_get_eeprom(struct net_device *netdev,
++ * struct ethtool_eeprom *eeprom, u8 *bytes)
++ * @brief Read data from the device EEPROM
++ * @param netdev [INOUT] Network interface device structure
++ * @param eeprom [INOUT] EEPROM get information structur
++ * @param bytes [OUT] Pointer of read data
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++static int
++ioh_gbe_get_eeprom(struct net_device *netdev,
++ struct ethtool_eeprom *eeprom, u8 *bytes)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ int ret = IOH_GBE_SUCCESS;
++ u32 offset;
++ u8 i;
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_get_eeprom");
++
++ if (eeprom->len == 0)
++ return -EINVAL;
++ eeprom->magic = (hw->vendor_id);
++
++ for (i = 0, offset = eeprom->offset; i < (eeprom->len); i++, offset++) {
++ ret = ioh_gbe_hal_read_nvm(hw, offset, (bytes + i));
++ if (ret)
++ break;
++ }
++ return ret;
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static int ioh_gbe_set_eeprom(struct net_device *netdev,
++ * struct ethtool_eeprom *eeprom, u8 *bytes)
++ * @brief Write data to the device EEPROM
++ * @param netdev [INOUT] Network interface device structure
++ * @param eeprom [INOUT] EEPROM get information structur
++ * @param bytes [IN] Pointer of write data
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++static int
++ioh_gbe_set_eeprom(struct net_device *netdev,
++ struct ethtool_eeprom *eeprom, u8 *bytes)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ int ret;
++ u32 offset;
++ u8 i;
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_set_eeprom");
++
++ if (eeprom->len == 0) {
++ IOH_GBE_ERR("EOPNOTSUPP\n");
++ return -EOPNOTSUPP;
++ }
++ if (eeprom->magic != (hw->vendor_id)) {
++ IOH_GBE_ERR("EFAULT\n");
++ IOH_GBE_TESTOUT("eeprom->magic : 0x%08x magic : %0x\n",
++ eeprom->magic, (hw->vendor_id));
++ return -EFAULT;
++ }
++
++ for (i = 0, offset = eeprom->offset; i < (eeprom->len); i++, offset++) {
++ ret = ioh_gbe_hal_write_nvm(hw, offset, (bytes + i));
++ if (ret)
++ return ret;
++ }
++ return IOH_GBE_SUCCESS;
++}
++#endif
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static void ioh_gbe_get_ringparam(struct net_device *netdev,
++ * struct ethtool_ringparam *ring)
++ * @brief Report ring sizes
++ * @param netdev [INOUT] Network interface device structure
++ * @param ring [OUT] Ring param structure
++ * @return None
++ */
++static void
++ioh_gbe_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ struct ioh_gbe_tx_ring *txdr = adapter->tx_ring;
++ struct ioh_gbe_rx_ring *rxdr = adapter->rx_ring;
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_get_ringparam");
++
++ ring->rx_max_pending = IOH_GBE_MAX_RXD;
++ ring->tx_max_pending = IOH_GBE_MAX_TXD;
++ ring->rx_mini_max_pending = 0;
++ ring->rx_jumbo_max_pending = 0;
++ ring->rx_pending = rxdr->count;
++ ring->tx_pending = txdr->count;
++ ring->rx_mini_pending = 0;
++ ring->rx_jumbo_pending = 0;
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static int ioh_gbe_set_ringparam(struct net_device *netdev,
++ * struct ethtool_ringparam *ring)
++ * @brief Set ring sizes
++ * @param netdev [INOUT] Network interface device structure
++ * @param ring [IN] Ring param structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++static int
++ioh_gbe_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ struct ioh_gbe_tx_ring *txdr, *tx_old;
++ struct ioh_gbe_rx_ring *rxdr, *rx_old;
++ int tx_ring_size, rx_ring_size;
++ int err = IOH_GBE_SUCCESS;
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_set_ringparam");
++
++ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
++ return -EINVAL;
++ tx_ring_size = (int)sizeof(struct ioh_gbe_tx_ring);
++ rx_ring_size = (int)sizeof(struct ioh_gbe_rx_ring);
++
++ while ((test_and_set_bit(__IOH_GBE_RESETTING, &adapter->flags)) != 0)
++ msleep(1);
++ if ((netif_running(adapter->netdev)) != 0)
++ ioh_gbe_down(adapter);
++ tx_old = adapter->tx_ring;
++ rx_old = adapter->rx_ring;
++
++ txdr = kzalloc(tx_ring_size, GFP_KERNEL);
++ if (!txdr) {
++ err = -ENOMEM;
++ goto err_alloc_tx;
++ }
++ rxdr = kzalloc(rx_ring_size, GFP_KERNEL);
++ if (!rxdr) {
++ err = -ENOMEM;
++ goto err_alloc_rx;
++ }
++ adapter->tx_ring = txdr;
++ adapter->rx_ring = rxdr;
++
++ rxdr->count = max(ring->rx_pending, (u32) IOH_GBE_MIN_RXD);
++ rxdr->count = min(rxdr->count, (u32) IOH_GBE_MAX_RXD);
++ IOH_GBE_ROUNDUP(rxdr->count, IOH_GBE_RX_DESC_MULTIPLE);
++
++ txdr->count = max(ring->tx_pending, (u32) IOH_GBE_MIN_TXD);
++ txdr->count = min(txdr->count, (u32) IOH_GBE_MAX_TXD);
++ IOH_GBE_ROUNDUP(txdr->count, IOH_GBE_TX_DESC_MULTIPLE);
++
++ if ((netif_running(adapter->netdev)) != 0) {
++ /* Try to get new resources before deleting old */
++ err = ioh_gbe_setup_rx_resources(adapter, adapter->rx_ring);
++ if (err != 0)
++ goto err_setup_rx;
++ err = ioh_gbe_setup_tx_resources(adapter, adapter->tx_ring);
++ if (err != 0)
++ goto err_setup_tx;
++ /* save the new, restore the old in order to free it,
++ * then restore the new back again */
++ adapter->rx_ring = rx_old;
++ adapter->tx_ring = tx_old;
++ ioh_gbe_free_rx_resources(adapter, adapter->rx_ring);
++ ioh_gbe_free_tx_resources(adapter, adapter->tx_ring);
++ kfree(tx_old);
++ kfree(rx_old);
++ adapter->rx_ring = rxdr;
++ adapter->tx_ring = txdr;
++ err = ioh_gbe_up(adapter);
++ }
++
++ clear_bit(__IOH_GBE_RESETTING, &adapter->flags);
++ return err;
++
++err_setup_tx:
++ ioh_gbe_free_rx_resources(adapter, adapter->rx_ring);
++err_setup_rx:
++ adapter->rx_ring = rx_old;
++ adapter->tx_ring = tx_old;
++ kfree(rxdr);
++err_alloc_rx:
++ kfree(txdr);
++err_alloc_tx:
++ if (netif_running(adapter->netdev))
++ ioh_gbe_up(adapter);
++ clear_bit(__IOH_GBE_RESETTING, &adapter->flags);
++ return err;
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static void ioh_gbe_get_pauseparam(struct net_device *netdev,
++ * struct ethtool_pauseparam *pause)
++ * @brief Report pause parameters
++ * @param netdev [INOUT] Network interface device structure
++ * @param pause [OUT] Pause parameters structure
++ * @return None
++ */
++static void
++ioh_gbe_get_pauseparam(struct net_device *netdev,
++ struct ethtool_pauseparam *pause)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ struct ioh_gbe_hw *hw = &adapter->hw;
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_get_pauseparam");
++
++ pause->autoneg =
++ ((hw->mac.fc_autoneg != 0) ? AUTONEG_ENABLE : AUTONEG_DISABLE);
++
++ if (hw->mac.fc == ioh_gbe_fc_rx_pause) {
++ pause->rx_pause = 1;
++ } else if (hw->mac.fc == ioh_gbe_fc_tx_pause) {
++ pause->tx_pause = 1;
++ } else if (hw->mac.fc == ioh_gbe_fc_full) {
++ pause->rx_pause = 1;
++ pause->tx_pause = 1;
++ }
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static int ioh_gbe_set_pauseparam(struct net_device *netdev,
++ * struct ethtool_pauseparam *pause)
++ * @brief Set pause paramters
++ * @param netdev [INOUT] Network interface device structure
++ * @param pause [IN] Pause parameters structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++static int
++ioh_gbe_set_pauseparam(struct net_device *netdev,
++ struct ethtool_pauseparam *pause)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ int ret = IOH_GBE_SUCCESS;
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_set_pauseparam");
++
++ hw->mac.fc_autoneg = pause->autoneg;
++
++ while ((test_and_set_bit(__IOH_GBE_RESETTING, &adapter->flags)) != 0)
++ msleep(1);
++ if ((pause->rx_pause) && (pause->tx_pause))
++ hw->mac.fc = ioh_gbe_fc_full;
++ else if ((pause->rx_pause) && (!pause->tx_pause))
++ hw->mac.fc = ioh_gbe_fc_rx_pause;
++ else if ((!pause->rx_pause) && (pause->tx_pause))
++ hw->mac.fc = ioh_gbe_fc_tx_pause;
++ else if ((!pause->rx_pause) && (!pause->tx_pause))
++ hw->mac.fc = ioh_gbe_fc_none;
++
++ if (hw->mac.fc_autoneg == AUTONEG_ENABLE) {
++ if ((netif_running(adapter->netdev)) != 0) {
++ ioh_gbe_down(adapter);
++ ret = ioh_gbe_up(adapter);
++ } else {
++ ioh_gbe_reset(adapter);
++ }
++ } else {
++ ret = ioh_gbe_hal_force_mac_fc(hw);
++ }
++ clear_bit(__IOH_GBE_RESETTING, &adapter->flags);
++ return ret;
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static u32 ioh_gbe_get_rx_csum(struct net_device *netdev)
++ * @brief Report whether receive checksums are turned on or off
++ * @param netdev [INOUT] Network interface device structure
++ * @return TRUE(1): Checksum On
++ * @return FALSE(0): Checksum Off
++ */
++static u32 ioh_gbe_get_rx_csum(struct net_device *netdev)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_get_rx_csum");
++
++ return adapter->rx_csum;
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static int ioh_gbe_set_rx_csum(struct net_device *netdev, u32 data)
++ * @brief Turn receive checksum on or off
++ * @param netdev [INOUT] Network interface device structure
++ * @param data [IN] Checksum On[TRUE] or Off[FALSE]
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++static int ioh_gbe_set_rx_csum(struct net_device *netdev, u32 data)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_set_rx_csum");
++
++ adapter->rx_csum = data;
++ if ((netif_running(netdev)) != 0)
++ ioh_gbe_reinit_locked(adapter);
++ else
++ ioh_gbe_reset(adapter);
++
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static u32 ioh_gbe_get_tx_csum(struct net_device *netdev)
++ * @brief Report whether transmit checksums are turned on or off
++ * @param netdev [INOUT] Network interface device structure
++ * @return TRUE(1): Checksum On
++ * @return FALSE(0): Checksum Off
++ */
++static u32 ioh_gbe_get_tx_csum(struct net_device *netdev)
++{
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_get_tx_csum");
++
++ return (netdev->features & NETIF_F_HW_CSUM) != 0;
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static int ioh_gbe_set_tx_csum(struct net_device *netdev, u32 data)
++ * @brief Turn transmit checksums on or off
++ * @param netdev [INOUT] Network interface device structure
++ * @param data [IN] Checksum on[TRUE] or off[FALSE]
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++static int ioh_gbe_set_tx_csum(struct net_device *netdev, u32 data)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_set_tx_csum");
++
++ adapter->tx_csum = data;
++
++ if (data != 0)
++ netdev->features |= NETIF_F_HW_CSUM;
++ else
++ netdev->features &= ~NETIF_F_HW_CSUM;
++
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static void ioh_gbe_diag_test(struct net_device *netdev,
++ * struct ethtool_test *eth_test, u64 *data)
++ * @brief Run specified self-tests
++ * @param netdev [IN] Network interface device structure
++ * @param eth_test [IN] Ethtool test structure
++ * @param data [OUT] Data for test result.
++ * @return None
++ */
++static void
++ioh_gbe_diag_test(struct net_device *netdev,
++ struct ethtool_test *eth_test, u64 *data)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ unsigned char if_running = netif_running(netdev);
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_diag_test");
++
++ set_bit(__IOH_GBE_TESTING, &adapter->flags);
++
++ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
++ /* Offline tests */
++ DPRINTK(HW, INFO, "offline testing starting\n");
++
++ if (if_running) {
++ /* indicate we're in test mode */
++ dev_close(netdev);
++ } else {
++ ioh_gbe_reset(adapter);
++ }
++ /* Register test */
++ if (ioh_gbe_reg_test(adapter, &data[0]))
++ eth_test->flags |= ETH_TEST_FL_FAILED;
++
++ ioh_gbe_reset(adapter);
++ clear_bit(__IOH_GBE_TESTING, &adapter->flags);
++ if (if_running)
++ dev_open(netdev);
++
++ } else {
++ /* Online tests */
++ DPRINTK(HW, INFO, "online testing starting\n");
++ data[0] = 0;
++ clear_bit(__IOH_GBE_TESTING, &adapter->flags);
++ }
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static void ioh_gbe_get_strings(struct net_device *netdev,
++ * u32 stringset, u8 *data)
++ * @brief Return a set of strings that describe the requested objects
++ * @param netdev [INOUT] Network interface device structure
++ * @param stringset [IN] Select the stringset. [ETH_SS_TEST] [ETH_SS_STATS]
++ * @param data [OUT]Pointer of read string data.
++ * @return None
++ */
++static void
++ioh_gbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
++{
++ u8 *p = data;
++ int i;
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_get_strings");
++
++ switch (stringset) {
++ case (u32) ETH_SS_TEST:
++ memcpy(data, *ioh_gbe_gstrings_test,
++ (int)sizeof(ioh_gbe_gstrings_test));
++ break;
++ case (u32) ETH_SS_STATS:
++ for (i = 0; i < IOH_GBE_GLOBAL_STATS_LEN; i++) {
++ memcpy(p, ioh_gbe_gstrings_stats[i].stat_string,
++ ETH_GSTRING_LEN);
++ p += ETH_GSTRING_LEN;
++ }
++ break;
++ }
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static void ioh_gbe_led_blink_callback(unsigned long data)
++ * @brief Callback function for blink led
++ * @param data [IN] Pointer Address of Board private structure
++ * @return None
++ */
++static void ioh_gbe_led_blink_callback(unsigned long data)
++{
++ struct ioh_gbe_adapter *adapter = (struct ioh_gbe_adapter *)data;
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_led_blink_callback");
++
++ if ((test_and_change_bit(IOH_GBE_LED_ON, &adapter->led_status)) != 0)
++ ioh_gbe_hal_led_off(&adapter->hw);
++ else
++ ioh_gbe_hal_led_on(&adapter->hw);
++ mod_timer(&adapter->blink_timer, jiffies + IOH_GBE_ID_INTERVAL);
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static int ioh_gbe_phys_id(struct net_device *netdev, u32 data)
++ * @brief Identify the device
++ * @param netdev [INOUT] Network interface device structure
++ * @param data [IN] Sleep time[ms]
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++static int ioh_gbe_phys_id(struct net_device *netdev, u32 data)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_phys_id");
++
++ if (!data || data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ))
++ data = (u32) (MAX_SCHEDULE_TIMEOUT / HZ);
++ if (!adapter->blink_timer.function) {
++ init_timer(&adapter->blink_timer);
++ adapter->blink_timer.function = ioh_gbe_led_blink_callback;
++ adapter->blink_timer.data = (unsigned long)adapter;
++ }
++ ioh_gbe_hal_setup_led(&adapter->hw);
++ mod_timer(&adapter->blink_timer, jiffies);
++ msleep_interruptible(data * 1000);
++ del_timer_sync(&adapter->blink_timer);
++
++ ioh_gbe_hal_led_off(&adapter->hw);
++ clear_bit(IOH_GBE_LED_ON, &adapter->led_status);
++ ioh_gbe_hal_cleanup_led(&adapter->hw);
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup Ethtool driver
++ * @fn static void ioh_gbe_get_ethtool_stats(struct net_device *netdev,
++ * struct ethtool_stats *stats, u64 *data)
++ * @brief Return statistics about the device
++ * @param netdev [INOUT] Network interface device structure
++ * @param stats [INOUT] Ethtool statue structure
++ * @param data [OUT] Pointer of read status area
++ * @return None
++ */
++static void
++ioh_gbe_get_ethtool_stats(struct net_device *netdev,
++ struct ethtool_stats *stats, u64 *data)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ int i;
++
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_get_ethtool_stats");
++
++ ioh_gbe_update_stats(adapter);
++ for (i = 0; i < IOH_GBE_GLOBAL_STATS_LEN; i++) {
++ signed char *p =
++ (signed char *)adapter +
++ ioh_gbe_gstrings_stats[i].stat_offset;
++ data[i] =
++ (ioh_gbe_gstrings_stats[i].sizeof_stat ==
++ (int)sizeof(u64)) ? *(u64 *) p:(*(u32 *) p);
++ }
++}
++
++/*!
++ * @ingroup Ethtool driver Layer
++ * @struct ioh_gbe_ethtool_ops
++ * @brief Store the pointers of ethtool interfaces to kernel
++ */
++static struct ethtool_ops ioh_gbe_ethtool_ops = {
++ .get_settings = ioh_gbe_get_settings,
++ .set_settings = ioh_gbe_set_settings,
++ .get_drvinfo = ioh_gbe_get_drvinfo,
++ .get_regs_len = ioh_gbe_get_regs_len,
++ .get_regs = ioh_gbe_get_regs,
++ .get_wol = ioh_gbe_get_wol,
++ .set_wol = ioh_gbe_set_wol,
++ .get_msglevel = ioh_gbe_get_msglevel,
++ .set_msglevel = ioh_gbe_set_msglevel,
++ .nway_reset = ioh_gbe_nway_reset,
++ .get_link = ethtool_op_get_link,
++#ifdef CONFIG_PCH_PCIEQOS
++ .get_eeprom_len = ioh_gbe_get_eeprom_len,
++ .get_eeprom = ioh_gbe_get_eeprom,
++ .set_eeprom = ioh_gbe_set_eeprom,
++#endif
++ .get_ringparam = ioh_gbe_get_ringparam,
++ .set_ringparam = ioh_gbe_set_ringparam,
++ .get_pauseparam = ioh_gbe_get_pauseparam,
++ .set_pauseparam = ioh_gbe_set_pauseparam,
++ .get_rx_csum = ioh_gbe_get_rx_csum,
++ .set_rx_csum = ioh_gbe_set_rx_csum,
++ .get_tx_csum = ioh_gbe_get_tx_csum,
++ .set_tx_csum = ioh_gbe_set_tx_csum,
++ .self_test = ioh_gbe_diag_test,
++ .get_strings = ioh_gbe_get_strings,
++ .phys_id = ioh_gbe_phys_id,
++ .get_ethtool_stats = ioh_gbe_get_ethtool_stats,
++};
++
++/*!
++ * @ingroup Ethtool driver internal functions
++ * @fn void ioh_gbe_set_ethtool_ops(struct net_device *netdev)
++ * @brief Set the Ethtool to network device data
++ * @param netdev [INOUT] Network interface device structure
++ * @return None
++ */
++void ioh_gbe_set_ethtool_ops(struct net_device *netdev)
++{
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_set_ethtool_ops");
++
++ SET_ETHTOOL_OPS(netdev, &ioh_gbe_ethtool_ops);
++}
++
++#define IOH_GBE_REG_PATTERN_TEST(reg, mask, write) \
++ do { \
++ if (reg_pattern_test(adapter, data, \
++ IOH_GBE_##reg, mask, write)) \
++ return 1; \
++ } while (0)
++
++/*!
++ * @ingroup Ethtool driver internal functions
++ * @fn static int ioh_gbe_reg_test(struct ioh_gbe_adapter *adapter, uint64_t *data)
++ * @brief Register test
++ * @param adapter [IN] Board private structure
++ * @param data [INOUT] Pointer to test result data
++ * @return IOH_GBE_SUCCESS
++ */
++static int ioh_gbe_reg_test(struct ioh_gbe_adapter *adapter, uint64_t *data)
++{
++ IOH_GBE_DBGFUNC("ethtool: ioh_gbe_reg_test");
++
++ ioh_gbe_reset(adapter);
++ IOH_GBE_REG_PATTERN_TEST(INT_EN, 0x11111F3F, 0x11111F3F);
++ IOH_GBE_REG_PATTERN_TEST(MODE, 0xC2000000, 0xC2000000);
++ IOH_GBE_REG_PATTERN_TEST(TCPIP_ACC, 0x0000000F, 0x0000000F);
++ IOH_GBE_REG_PATTERN_TEST(EX_LIST, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(PHY_INT_CTRL, 0x00010003, 0x00010003);
++ IOH_GBE_REG_PATTERN_TEST(MAC_RX_EN, 0x00000001, 0x00000001);
++ IOH_GBE_REG_PATTERN_TEST(RX_FCTRL, 0x80000000, 0x80000000);
++ IOH_GBE_REG_PATTERN_TEST(PAUSE_REQ, 0x80000000, 0x80000000);
++ IOH_GBE_REG_PATTERN_TEST(RX_MODE, 0xC000FE00, 0xC000FE00);
++ IOH_GBE_REG_PATTERN_TEST(TX_MODE, 0xF800FE00, 0xF800FE00);
++ IOH_GBE_REG_PATTERN_TEST(PAUSE_PKT1, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(PAUSE_PKT2, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(PAUSE_PKT3, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(PAUSE_PKT4, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(PAUSE_PKT5, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR1A, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR1B, 0x0000FFFF, 0x0000FFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR2A, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR2B, 0x0000FFFF, 0x0000FFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR3A, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR3B, 0x0000FFFF, 0x0000FFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR4A, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR4B, 0x0000FFFF, 0x0000FFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR5A, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR5B, 0x0000FFFF, 0x0000FFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR6A, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR6B, 0x0000FFFF, 0x0000FFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR7A, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR7B, 0x0000FFFF, 0x0000FFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR8A, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR8B, 0x0000FFFF, 0x0000FFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR9A, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR9B, 0x0000FFFF, 0x0000FFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR10A, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR10B, 0x0000FFFF, 0x0000FFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR11A, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR11B, 0x0000FFFF, 0x0000FFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR12A, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR12B, 0x0000FFFF, 0x0000FFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR13A, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR13B, 0x0000FFFF, 0x0000FFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR14A, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR14B, 0x0000FFFF, 0x0000FFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR15A, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR15B, 0x0000FFFF, 0x0000FFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR16A, 0xFFFFFFFF, 0xFFFFFFFF);
++ IOH_GBE_REG_PATTERN_TEST(MAC_ADR16B, 0x0000FFFF, 0x0000FFFF);
++ IOH_GBE_REG_PATTERN_TEST(ADDR_MASK, 0x0000FFFF, 0x0000FFFF);
++ IOH_GBE_REG_PATTERN_TEST(RGMII_CTRL, 0x0000001F, 0x0000001F);
++ IOH_GBE_REG_PATTERN_TEST(DMA_CTRL, 0x00000003, 0x00000003);
++ IOH_GBE_REG_PATTERN_TEST(RX_DSC_BASE, 0xFFFFFFF0, 0xFFFFFFF0);
++ IOH_GBE_REG_PATTERN_TEST(RX_DSC_SIZE, 0x0000FFF0, 0x0000FFF0);
++ IOH_GBE_REG_PATTERN_TEST(RX_DSC_HW_P, 0xFFFFFFF0, 0xFFFFFFF0);
++ IOH_GBE_REG_PATTERN_TEST(RX_DSC_SW_P, 0xFFFFFFF0, 0xFFFFFFF0);
++ IOH_GBE_REG_PATTERN_TEST(TX_DSC_BASE, 0xFFFFFFF0, 0xFFFFFFF0);
++ IOH_GBE_REG_PATTERN_TEST(TX_DSC_SIZE, 0x0000FFF0, 0x0000FFF0);
++ IOH_GBE_REG_PATTERN_TEST(TX_DSC_HW_P, 0xFFFFFFF0, 0xFFFFFFF0);
++ IOH_GBE_REG_PATTERN_TEST(TX_DSC_SW_P, 0xFFFFFFF0, 0xFFFFFFF0);
++ IOH_GBE_REG_PATTERN_TEST(WOL_ST, 0x0000000F, 0x0000000F);
++ IOH_GBE_REG_PATTERN_TEST(WOL_CTRL, 0x0001017F, 0x0001017F);
++ IOH_GBE_REG_PATTERN_TEST(WOL_ADDR_MASK, 0x0000FFFF, 0x0000FFFF);
++
++ *data = 0;
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup Ethtool driver internal functions
++ * @fn static bool reg_pattern_test(struct ioh_gbe_adapter *adapter,
++ * uint64_t *data, int reg,
++ * uint32_t mask, uint32_t write)
++ * @brief Register pattern test
++ * @param adapter [IN] Board private structure
++ * @param data [INOUT] Pointer to test result data
++ * @param reg [INOUT] Register address
++ * @param mask [INOUT] Mask pattern
++ * @param write [INOUT] Write data
++ * @return true : Successfully
++ * @return false : Failed
++ */
++static bool
++reg_pattern_test(struct ioh_gbe_adapter *adapter,
++ uint64_t *data, int reg, uint32_t mask, uint32_t write)
++{
++ static const uint32_t test[] = {
++ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
++ };
++ uint8_t __iomem *address = adapter->hw.hw_addr + reg;
++ uint32_t read;
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(test); i++) {
++ writel(write & test[i], address);
++ read = (readl(address) & mask);
++ if (read != (write & test[i] & mask)) {
++ DPRINTK(DRV, ERR, "pattern test reg %04X failed: "
++ "got 0x%08X expected 0x%08X\n",
++ reg, read, (write & test[i] & mask));
++ *data = reg;
++ return true;
++ }
++ }
++ return false;
++}
+diff -urN linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe.h topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe.h
+--- linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe.h 2010-03-09 09:27:26.000000000 +0900
+@@ -0,0 +1,230 @@
++/*!
++ * @file ioh_gbe.h
++ * @brief Linux IOH Gigabit Ethernet Driver main header file
++ *
++ * @version 0.90
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR CO., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 09/21/2009
++ * modified:
++ *
++ */
++
++#ifndef _IOH_GBE_H_
++#define _IOH_GBE_H_
++
++struct ioh_gbe_adapter;
++
++#define PFX "ioh_gbe: "
++#define DPRINTK(nlevel, klevel, fmt, args...) \
++ do { \
++ if (printk_ratelimit()) \
++ (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
++ printk(KERN_##klevel PFX "%s: %s: " fmt, \
++ adapter->netdev->name, __func__ , ## args)); \
++ } while (0)
++
++/* only works for sizes that are powers of 2 */
++#define IOH_GBE_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))
++
++/*!
++ * @ingroup Gigabit Ether driver Layer
++ * @struct ioh_gbe_buffer
++ * @brief Buffer infomation
++ * @remarks
++ * wrapper around a pointer to a socket buffer,
++ * so a DMA handle can be stored along with the buffer
++ */
++struct ioh_gbe_buffer {
++ struct sk_buff *skb; /**< pointer to a socket buffer */
++ struct sk_buff *kernel_skb;
++ /**< pointer to a socket buffer received from the kernel */
++ dma_addr_t dma; /**< DMA address */
++ unsigned long time_stamp; /**< time stamp */
++ u16 length; /**< data size */
++};
++
++/*!
++ * @ingroup Gigabit Ether driver Layer
++ * @struct ioh_gbe_tx_ring
++ * @brief tx ring infomation
++ */
++struct ioh_gbe_tx_ring {
++ void *desc; /**< pointer to the descriptor ring memory */
++ dma_addr_t dma; /**< physical address of the descriptor ring */
++ unsigned int size; /**< length of descriptor ring in bytes */
++ unsigned int count; /**< number of descriptors in the ring */
++ unsigned int next_to_use;
++ /**< next descriptor to associate a buffer with */
++ unsigned int next_to_clean;
++ /**< next descriptor to check for DD status bit */
++ struct ioh_gbe_buffer *buffer_info;
++ /**< array of buffer information structs */
++ spinlock_t tx_lock; /**< spinlock structs */
++};
++
++/*!
++ * @ingroup Gigabit Ether driver Layer
++ * @struct ioh_gbe_rx_ring
++ * @brief rx ring infomation
++ */
++struct ioh_gbe_rx_ring {
++ void *desc; /**< pointer to the descriptor ring memory */
++ dma_addr_t dma; /**< physical address of the descriptor ring */
++ unsigned int size; /**< length of descriptor ring in bytes */
++ unsigned int count; /**< number of descriptors in the ring */
++ unsigned int next_to_use;
++ /**< next descriptor to associate a buffer with */
++ unsigned int next_to_clean;
++ /**< next descriptor to check for DD status bit */
++ struct ioh_gbe_buffer *buffer_info;
++ /**< array of buffer information structs */
++};
++
++/*!
++ * @ingroup Gigabit Ether driver Layer
++ * @struct ioh_gbe_hw_stats
++ * @brief Statistics counters collected by the MAC
++ */
++struct ioh_gbe_hw_stats {
++ u64 rx_packets; /**< total packets received */
++ u64 tx_packets; /**< total packets transmitted */
++ u64 rx_bytes; /**< total bytes received */
++ u64 tx_bytes; /**< total bytes transmitted */
++ u64 rx_errors; /**< bad packets received */
++ u64 tx_errors; /**< packet transmit problems */
++ u64 rx_dropped; /**< no space in Linux buffers */
++ u64 tx_dropped; /**< no space available in Linux */
++ u64 multicast; /**< multicast packets received */
++ u64 collisions; /**< collisions */
++ u64 rx_crc_errors; /**< received packet with crc error */
++ u64 rx_frame_errors; /**< received frame alignment error */
++ u64 rx_alloc_buff_failed; /**< allocate failure of a receive buffer */
++ u64 tx_length_errors; /**< transmit length error */
++ u64 tx_aborted_errors; /**< transmit aborted error */
++ u64 tx_carrier_errors; /**< transmit carrier error */
++ u64 tx_timeout_count; /**< Number of transmit timeout */
++ u64 tx_restart_count; /**< Number of transmit restert */
++ u64 intr_rx_dsc_empty_count;
++ /**< Interrupt count of receive descriptor empty */
++ u64 intr_rx_frame_err_count;
++ /**< Interrupt count of receive frame error */
++ u64 intr_rx_fifo_err_count;
++ /**< Interrupt count of receive FIFO error */
++ u64 intr_rx_dma_err_count;
++ /**< Interrupt count of receive DMA error */
++ u64 intr_tx_fifo_err_count;
++ /**< Interrupt count of transmit FIFO error */
++ u64 intr_tx_dma_err_count;
++ /**< Interrupt count of transmit DMA error */
++ u64 intr_tcpip_err_count;
++ /**< Interrupt count of TCP/IP Accelerator */
++};
++
++/*!
++ * @ingroup Gigabit Ether driver Layer
++ * @struct ioh_gbe_adapter
++ * @brief board specific private data structure
++ */
++struct ioh_gbe_adapter {
++ /* OS defined structs */
++ struct net_device *netdev; /**< Pointer of network device structure */
++ struct pci_dev *pdev; /**< Pointer of pci device structure */
++ struct net_device_stats net_stats; /**< Network status */
++ struct net_device *polling_netdev;
++ /**< Pointer of polling network device structure */
++ struct napi_struct napi; /**< NAPI structure */
++
++ /* structs defined in ioh_gbe_hw.h */
++ struct ioh_gbe_hw hw; /**< Pointer of hardware structure */
++ struct ioh_gbe_hw_stats stats; /**< Hardware status */
++ struct work_struct reset_task; /**< Reset task */
++ struct mii_if_info mii; /**< MII information structure */
++ struct timer_list watchdog_timer; /**< Watchdog timer list */
++
++ u32 bd_number; /**< The number of the found NIC cards */
++ u32 wake_up_evt; /**< Wake up event */
++ u32 *config_space; /**< Configuration space */
++ int msg_enable; /**< Driver message level */
++
++ spinlock_t stats_lock; /**< Spinlock structure for status */
++ spinlock_t tx_queue_lock; /**< Spinlock structure for transmit */
++ spinlock_t int_en_lock; /**< Spinlock structure for IRQ enable */
++ atomic_t irq_sem; /**< Semaphore for interrupt */
++
++ struct timer_list blink_timer; /**< LED blink timer list */
++ unsigned long led_status; /**< LED status */
++
++ /* TX,RX */
++ struct ioh_gbe_tx_ring *tx_ring;
++ /**< Pointer of Tx descriptor ring structure */
++ struct ioh_gbe_rx_ring *rx_ring;
++ /**< Pointer of Rx descriptor ring structure */
++ unsigned long rx_buffer_len; /**< Receive buffer length */
++ unsigned long tx_queue_len; /**< Transmit queue length */
++
++ unsigned char rx_csum;
++ /**< Receive TCP/IP checksum enable/disable */
++ unsigned char tx_csum;
++ /**< Transmit TCP/IP checksum enable/disable */
++
++ unsigned char have_msi; /**< PCI MSI mode flag */
++
++ /* to not mess up cache alignment, always add to the bottom */
++ unsigned long flags; /**< Driver status flag */
++};
++
++/*!
++ * @ingroup Gigabit Ether driver Layer
++ * @def ioh_gbe_state_t
++ * @brief Driver Status
++ */
++enum ioh_gbe_state_t {
++ __IOH_GBE_TESTING, /**< Testing */
++ __IOH_GBE_RESETTING, /**< Reseting */
++};
++
++/* pch_gbe_main.c */
++int ioh_gbe_up(struct ioh_gbe_adapter *adapter);
++void ioh_gbe_down(struct ioh_gbe_adapter *adapter);
++void ioh_gbe_reinit_locked(struct ioh_gbe_adapter *adapter);
++void ioh_gbe_reset(struct ioh_gbe_adapter *adapter);
++int ioh_gbe_setup_tx_resources(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_tx_ring *txdr);
++int ioh_gbe_setup_rx_resources(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_rx_ring *rxdr);
++void ioh_gbe_free_tx_resources(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_tx_ring *tx_ring);
++void ioh_gbe_free_rx_resources(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_rx_ring *rx_ring);
++void ioh_gbe_update_stats(struct ioh_gbe_adapter *adapter);
++int ioh_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
++void ioh_gbe_mdio_write(struct net_device *netdev, int addr, int reg, int data);
++/* ioh_gbe_param.c */
++void ioh_gbe_check_options(struct ioh_gbe_adapter *adapter);
++
++/* ioh_gbe_ethtool.c */
++void ioh_gbe_set_ethtool_ops(struct net_device *netdev);
++
++
++#endif /* _IOH_GBE_H_ */
+diff -urN linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_hw.h topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_hw.h
+--- linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_hw.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_hw.h 2010-03-11 15:11:49.000000000 +0900
+@@ -0,0 +1,259 @@
++/*!
++ * @file ioh_gbe_hw.h
++ * @brief Linux IOH Gigabit Ethernet Driver Hardware layer header file
++ *
++ * @version 0.90
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR CO., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 09/21/2009
++ * modified:
++ *
++ */
++#ifndef _IOH_GBE_HW_H_
++#define _IOH_GBE_HW_H_
++
++struct ioh_gbe_hw;
++
++/* mac type values */
++#define IOH_GBE_MAC_TYPE_UNDEFINED 0
++#define IOH_GBE_MAC_TYPE_IOH1 1
++#define IOH_GBE_MAC_TYPE_IOH2 2
++
++/* bus type values */
++#define ioh_gbe_bus_type_unknown 0
++#define ioh_gbe_bus_type_pci 1
++#define ioh_gbe_bus_type_pcix 2
++#define ioh_gbe_bus_type_pci_express 3
++#define ioh_gbe_bus_type_reserved 4
++
++/* bus speed values */
++#define ioh_gbe_bus_speed_unknown 0
++#define ioh_gbe_bus_speed_33 1
++#define ioh_gbe_bus_speed_66 2
++#define ioh_gbe_bus_speed_100 3
++#define ioh_gbe_bus_speed_120 4
++#define ioh_gbe_bus_speed_133 5
++#define ioh_gbe_bus_speed_2500 6
++#define ioh_gbe_bus_speed_reserved 7
++
++/* bus width values */
++#define ioh_gbe_bus_width_unknown 0
++#define ioh_gbe_bus_width_pcie_x1 1
++#define ioh_gbe_bus_width_pcie_x2 2
++#define ioh_gbe_bus_width_pcie_x4 4
++#define ioh_gbe_bus_width_32 5
++#define ioh_gbe_bus_width_64 6
++#define ioh_gbe_bus_width_reserved 7
++
++/* flow control values */
++#define ioh_gbe_fc_none 0
++#define ioh_gbe_fc_rx_pause 1
++#define ioh_gbe_fc_tx_pause 2
++#define ioh_gbe_fc_full 3
++
++#define IOH_GBE_FC_DEFAULT ioh_gbe_fc_full
++
++/*!
++ * @ingroup Gigabit Ether driver Layer
++ * @struct ioh_gbe_rx_desc
++ * @brief Receive Descriptor
++ */
++struct ioh_gbe_rx_desc {
++ u32 buffer_addr; /** RX Frame Buffer Address */
++ u32 tcp_ip_status; /** TCP/IP Accelerator Status */
++ u16 rx_words_eob; /** RX word count and Byte position */
++ u16 gbec_status; /** GMAC Status */
++ u8 dma_status; /** DMA Status */
++ u8 reserved1; /** Reserved */
++ u16 reserved2; /** Reserved */
++};
++
++/*!
++ * @ingroup Gigabit Ether driver Layer
++ * @struct ioh_gbe_tx_desc
++ * @brief Transmit Descriptor
++ */
++struct ioh_gbe_tx_desc {
++ u32 buffer_addr; /** TX Frame Buffer Address */
++ u16 length; /** Data buffer length */
++ u16 reserved1; /** Reserved */
++ u16 tx_words_eob; /** TX word count and Byte position */
++ u16 tx_frame_ctrl; /** TX Frame Control */
++ u8 dma_status; /** DMA Status */
++ u8 reserved2; /** Reserved */
++ u16 gbec_status; /** GMAC Status */
++};
++
++/*!
++ * @ingroup Gigabit Ether driver Layer
++ * @struct ioh_gbe_functions
++ * @brief HAL APi function pointer
++ */
++struct ioh_gbe_functions {
++ /* Function pointers for the MAC. */
++ s32(*cleanup_led) (struct ioh_gbe_hw *);
++ /** for ioh_gbe_hal_cleanup_led */
++
++ void (*get_bus_info) (struct ioh_gbe_hw *);
++ /** for ioh_gbe_hal_get_bus_info */
++
++ s32(*led_on) (struct ioh_gbe_hw *);
++ /** for ioh_gbe_hal_led_on */
++
++ s32(*led_off) (struct ioh_gbe_hw *);
++ /** for ioh_gbe_hal_led_off */
++
++ void (*mc_addr_list_update) (struct ioh_gbe_hw *, u8 *, u32, u32, u32);
++ /** for ioh_gbe_hal_mc_addr_list_update */
++
++ void (*reset_hw) (struct ioh_gbe_hw *);
++ /** for ioh_gbe_hal_reset_hw */
++
++ s32(*init_hw) (struct ioh_gbe_hw *);
++ /** for ioh_gbe_hal_init_hw */
++
++ s32(*setup_link) (struct ioh_gbe_hw *);
++ /** for ioh_gbe_hal_setup_link */
++
++ s32(*setup_physical_interface) (struct ioh_gbe_hw *);
++ /** for setup link of PHY */
++
++ s32(*setup_led) (struct ioh_gbe_hw *);
++ /** for ioh_gbe_hal_setup_led */
++
++ void (*pause_packet) (struct ioh_gbe_hw *);
++ /** for ioh_gbe_hal_set_pause_packet */
++
++ /* Function pointers for the PHY. */
++ s32(*read_phy_reg) (struct ioh_gbe_hw *, u32, u16 *);
++ /** for ioh_gbe_hal_read_phy_reg */
++
++ s32(*write_phy_reg) (struct ioh_gbe_hw *, u32, u16);
++ /** for ioh_gbe_hal_write_phy_reg */
++
++ void (*reset_phy) (struct ioh_gbe_hw *);
++ /** for ioh_gbe_hal_phy_hw_reset */
++
++ void (*sw_reset_phy) (struct ioh_gbe_hw *);
++ /** for ioh_gbe_hal_phy_sw_reset */
++
++ void (*power_up_phy) (struct ioh_gbe_hw *hw);
++ /** for ioh_gbe_hal_power_up_phy */
++
++ void (*power_down_phy) (struct ioh_gbe_hw *hw);
++ /** for ioh_gbe_hal_power_down_phy */
++#ifdef CONFIG_PCH_PCIEQOS
++ /* Function pointers for the NVM. */
++ s32(*validate_nvm) (struct ioh_gbe_hw *);
++ /** for ioh_gbe_hal_validate_nvm_checksum */
++
++ s32(*read_nvm) (struct ioh_gbe_hw *, u32, u8 *);
++ /** for ioh_gbe_hal_read_nvm */
++
++ s32(*write_nvm) (struct ioh_gbe_hw *, u32, u8 *);
++ /** for ioh_gbe_hal_write_nvm */
++#endif
++ s32(*read_mac_addr) (struct ioh_gbe_hw *);
++ /** for ioh_gbe_hal_read_mac_addr */
++
++ u16(*ctrl_miim) (struct ioh_gbe_hw *, u32, u32, u32, u16);
++ /** for ioh_gbe_hal_ctrl_miim */
++};
++
++/*!
++ * @ingroup Gigabit Ether driver Layer
++ * @struct ioh_gbe_mac_info
++ * @brief MAC infomation
++ */
++struct ioh_gbe_mac_info {
++ u8 addr[6]; /** Store the MAC address */
++ u8 type; /** Type of MAC */
++ u8 fc; /** Mode of flow control */
++ u8 fc_autoneg; /** Auto negotiation enable for flow control setting */
++ u8 tx_fc_enable; /** Enable flag of Transmit flow control */
++ u32 max_frame_size; /** Max transmit frame size */
++ u32 min_frame_size; /** Min transmit frame size */
++ u16 mar_entry_count; /** Entry count of MAC address registers */
++ u8 autoneg; /** Auto negotiation enable */
++ u16 link_speed; /** Link speed */
++ u16 link_duplex; /** Link duplex */
++};
++
++/*!
++ * @ingroup Gigabit Ether driver Layer
++ * @struct ioh_gbe_phy_info
++ * @brief PHY infomation
++ */
++struct ioh_gbe_phy_info {
++ u32 addr; /** PHY address */
++ u32 id; /** PHY's identifier */
++ u32 revision; /** PHY's revision */
++ u32 reset_delay_us; /** HW reset delay time[us] */
++ u16 autoneg_advertised; /** Autoneg advertised */
++};
++
++/*!
++ * @ingroup Gigabit Ether driver Layer
++ * @struct ioh_gbe_nvm_info
++ * @brief NVM infomation
++ */
++struct ioh_gbe_nvm_info {
++ u16 word_size;
++};
++
++/*!
++ * @ingroup Gigabit Ether driver Layer
++ * @struct ioh_gbe_bus_info
++ * @brief Bus infomation
++ */
++struct ioh_gbe_bus_info {
++ u8 type;
++ u8 speed;
++ u8 width;
++};
++
++/*!
++ * @ingroup Gigabit Ether driver Layer
++ * @struct ioh_gbe_hw
++ * @brief Hardware infomation
++ */
++struct ioh_gbe_hw {
++ void *back;
++
++ u8 *hw_addr;
++ spinlock_t miim_lock;
++
++ struct ioh_gbe_functions func;
++ struct ioh_gbe_mac_info mac;
++ struct ioh_gbe_phy_info phy;
++ struct ioh_gbe_nvm_info nvm;
++ struct ioh_gbe_bus_info bus;
++
++ u16 vendor_id;
++ u16 device_id;
++ u16 subsystem_vendor_id;
++ u16 subsystem_device_id;
++ u8 revision_id;
++};
++
++#endif
+diff -urN linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_mac.c topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_mac.c
+--- linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_mac.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_mac.c 2010-03-11 15:11:58.000000000 +0900
+@@ -0,0 +1,522 @@
++/*!
++ * @file ioh_gbe_mac.c
++ * @brief Linux IOH Gigabit Ethernet Driver HAL internal function (MAC) source file
++ *
++ * @version 0.90
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR CO., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 09/21/2009
++ * modified:
++ *
++ */
++#include <linux/ethtool.h>
++#include "pch_gbe_osdep.h"
++#include "pch_gbe_regs.h"
++#include "pch_gbe_defines.h"
++#include "pch_gbe_hw.h"
++#include "pch_gbe_mac.h"
++#include "pch_gbe_api.h"
++
++/* Pause packet value */
++#define IOH_GBE_PAUSE_PKT1_VALUE 0x00C28001
++#define IOH_GBE_PAUSE_PKT2_VALUE 0x00000100
++#define IOH_GBE_PAUSE_PKT4_VALUE 0x01000888
++#define IOH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_mac_reset_hw(struct ioh_gbe_hw *hw)
++ * @brief Reset hardware
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * This resets the hardware into a known state (Reset only MAC).
++ * This is a function pointer entry point called by the api module.
++ */
++void ioh_gbe_mac_reset_hw(struct ioh_gbe_hw *hw)
++{
++ u32 tmp = 0;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_mac_reset_hw");
++
++#ifndef CONFIG_PCH_PCIEQOS
++ /* Read the MAC address. and store to the private data */
++ ioh_gbe_mac_read_mac_addr(hw);
++#endif
++
++ IOH_GBE_WRITE_REG(hw, RESET, IOH_GBE_ALL_RST);
++#ifdef IOH_GBE_MAC_IFOP_RGMII
++ IOH_GBE_WRITE_REG(hw, MODE, IOH_GBE_MODE_GMII_ETHER);
++#endif
++ while ((IOH_GBE_READ_REG(hw, RESET)) != 0) {
++ udelay(1);
++ tmp++;
++ if (tmp == 5) {
++ IOH_GBE_ERR("MAC HW RESET\n");
++ break;
++ }
++ }
++#ifndef CONFIG_PCH_PCIEQOS
++ /* Setup the receive address */
++ ioh_gbe_mac_mar_set(hw, hw->mac.addr, 0);
++#endif
++ return;
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_mac_init_rx_addrs(struct ioh_gbe_hw *hw,
++ * u16 mar_count)
++ * @brief Initialize receive address's
++ * @param hw [INOUT] Pointer to the HW structure
++ * @param mar_count [IN] Receive address registers
++ * @return None
++ * @remarks
++ * Setups the receive address registers by setting the base receive address
++ * register to the devices MAC address and clearing all the other receive
++ * address registers to 0.
++ * This is a function pointer entry point called by the api module.
++ */
++void ioh_gbe_mac_init_rx_addrs(struct ioh_gbe_hw *hw, u16 mar_count)
++{
++ u32 i;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_mac_init_rx_addrs");
++ IOH_GBE_DBGOUT("Programming MAC Address into MAC_ADDR[0]\n");
++ IOH_GBE_TESTOUT("Clearing MAC_ADDR[1-%u]\n", mar_count - 1);
++
++ /* Setup the receive address */
++ ioh_gbe_hal_mar_set(hw, hw->mac.addr, 0);
++
++ /* Zero out the other (mar_entry_count - 1) receive addresses */
++ for (i = 1; i < mar_count; i++) {
++ IOH_GBE_WRITE_REG_ARRAY(hw, MAC_ADR, (i << 1), 0);
++ IOH_GBE_WRITE_REG_ARRAY(hw, MAC_ADR, ((i << 1) + 1), 0);
++ }
++ IOH_GBE_WRITE_REG(hw, ADDR_MASK, 0xFFFE);
++ /* wait busy */
++ while ((IOH_GBE_READ_REG(hw, ADDR_MASK) & IOH_GBE_BUSY) != 0)
++ ;
++#ifdef DEBUG_TEST
++ {
++ unsigned char ti;
++ IOH_GBE_TESTOUT("ADDR_MASK reg(check index bit) : 0x%08x\n",
++ IOH_GBE_READ_REG(hw, ADDR_MASK));
++ for (ti = 0; ti < 16; ti++) {
++ IOH_GBE_TESTOUT("MAC_ADR%dAB reg : 0x%08x 0x%08x\n",
++ (ti + 1),
++ IOH_GBE_READ_REG(hw,
++ MAC_ADR1A +
++ (0x08 * ti)),
++ IOH_GBE_READ_REG(hw,
++ MAC_ADR1B +
++ (0x08 * ti)));
++ }
++ }
++#endif
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_mac_mar_set(struct ioh_gbe_hw *hw, u8 *addr, u32 index)
++ * @brief Set MAC address register
++ * @param hw [INOUT] Pointer to the HW structure
++ * @param addr [IN] Pointer to the MAC address
++ * @param index [IN] MAC address array register
++ * @return None
++ */
++void ioh_gbe_mac_mar_set(struct ioh_gbe_hw *hw, u8 * addr, u32 index)
++{
++ u32 mar_low, mar_high, adrmask;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_mac_mar_set");
++ IOH_GBE_TESTOUT("index : 0x%x\n", index);
++
++ /* HW expects these in little endian so we reverse the byte order
++ * from network order (big endian) to little endian
++ */
++ mar_low = ((u32) addr[0] |
++ ((u32) addr[1] << 8) |
++ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
++
++ mar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
++ /* Stop the MAC Address of index. */
++ adrmask = IOH_GBE_READ_REG(hw, ADDR_MASK);
++ IOH_GBE_WRITE_REG(hw, ADDR_MASK, (adrmask | (0x0001 << index)));
++
++ IOH_GBE_TESTOUT("ADDR_MASK reg : 0x%08x\n", adrmask);
++ IOH_GBE_TESTOUT("ADDR_MASK reg(check index bit) : 0x%08x\n",
++ IOH_GBE_READ_REG(hw, ADDR_MASK));
++ /* wait busy */
++ while ((IOH_GBE_READ_REG(hw, ADDR_MASK) & IOH_GBE_BUSY) != 0)
++ ;
++ IOH_GBE_TESTOUT("ADDR_MASK reg(check BUSY bit:1) : 0x%08x\n",
++ IOH_GBE_READ_REG(hw, ADDR_MASK));
++
++ /* Set the MAC address to the MAC address 1A/1B register */
++ IOH_GBE_WRITE_REG_ARRAY(hw, MAC_ADR, (index << 1), mar_low);
++ IOH_GBE_WRITE_REG_ARRAY(hw, MAC_ADR, ((index << 1) + 1), mar_high);
++ /* Start the MAC address of index */
++ IOH_GBE_WRITE_REG(hw, ADDR_MASK, (adrmask & ~(0x0001 << index)));
++ IOH_GBE_TESTOUT("ADDR_MASK reg(check index bit:0) : 0x%08x\n",
++ IOH_GBE_READ_REG(hw, ADDR_MASK));
++ /* wait busy */
++ while ((IOH_GBE_READ_REG(hw, ADDR_MASK) & IOH_GBE_BUSY) != 0)
++ ;
++ IOH_GBE_TESTOUT("ADDR_MASK reg(check BUSY bit) : 0x%08x\n",
++ IOH_GBE_READ_REG(hw, ADDR_MASK));
++ IOH_GBE_DBGFUNC("ioh_gbe_mac_mar_set:End");
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_mac_mc_addr_list_update(struct ioh_gbe_hw *hw,
++ * u8 *mc_addr_list, u32 mc_addr_count,
++ * u32 mar_used_count, u32 mar_total_num)
++ * @brief Update Multicast addresses
++ * @param hw [INOUT] Pointer to the HW structure
++ * @param mc_addr_list [IN] Array of multicast addresses to program
++ * @param mc_addr_count [IN] Number of multicast addresses to program
++ * @param mar_used_count [IN] The first MAC Address register free to program
++ * @param mar_total_num [IN] Total number of supported MAC Address Registers
++ * @return None
++ */
++void
++ioh_gbe_mac_mc_addr_list_update(struct ioh_gbe_hw *hw,
++ u8 *mc_addr_list, u32 mc_addr_count,
++ u32 mar_used_count, u32 mar_total_num)
++{
++ u32 i, adrmask;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_mac_mc_addr_list_update");
++
++#ifdef DEBUG_TEST
++ {
++ u32 ti, tj;
++ IOH_GBE_TESTOUT
++ ("mc_addr_count = %d mar_used_count = %d "
++ "mar_total_num = %d\n",
++ mc_addr_count, mar_used_count, mar_total_num);
++ for (ti = 0; ti < mc_addr_count; ti++) {
++ tj = ti * IOH_GBE_ETH_ALEN;
++ IOH_GBE_TESTOUT
++ ("mc_addr_list[%d] = 0x%02x %02x %02x "
++ "%02x %02x %02x \n",
++ ti, mc_addr_list[tj], mc_addr_list[tj + 1],
++ mc_addr_list[tj + 2], mc_addr_list[tj + 3],
++ mc_addr_list[tj + 4], mc_addr_list[tj + 5]);
++ }
++ }
++#endif
++ /* Load the first set of multicast addresses into the exact
++ * filters (RAR). If there are not enough to fill the RAR
++ * array, clear the filters.
++ */
++ for (i = mar_used_count; i < mar_total_num; i++) {
++ if (mc_addr_count != 0) {
++ ioh_gbe_mac_mar_set(hw, mc_addr_list, i);
++ mc_addr_count--;
++ mc_addr_list += IOH_GBE_ETH_ALEN;
++ } else {
++ /* Clear MAC address mask */
++ adrmask = IOH_GBE_READ_REG(hw, ADDR_MASK);
++ IOH_GBE_WRITE_REG(hw, ADDR_MASK,
++ (adrmask | (0x0001 << i)));
++ /* wait busy */
++ while ((IOH_GBE_READ_REG(hw, ADDR_MASK) & IOH_GBE_BUSY)
++ != 0) {
++ ;
++ }
++ /* Clear MAC address */
++ IOH_GBE_WRITE_REG_ARRAY(hw, MAC_ADR, i << 1, 0);
++ IOH_GBE_WRITE_REG_ARRAY(hw, MAC_ADR, (i << 1) + 1, 0);
++ }
++ }
++#ifdef DEBUG_TEST
++ {
++ unsigned char ti;
++ IOH_GBE_TESTOUT("ADDR_MASK reg(check index bit) : 0x%08x\n",
++ IOH_GBE_READ_REG(hw, ADDR_MASK));
++ for (ti = 0; ti < 16; ti++) {
++ IOH_GBE_TESTOUT("MAC_ADR%dAB reg : 0x%08x 0x%08x\n",
++ (ti + 1),
++ IOH_GBE_READ_REG(hw,
++ MAC_ADR1A +
++ (0x08 * ti)),
++ IOH_GBE_READ_REG(hw,
++ MAC_ADR1B +
++ (0x08 * ti)));
++ }
++ }
++#endif
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_mac_setup_link(struct ioh_gbe_hw *hw)
++ * @brief Setup flow control and link settings
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++s32 ioh_gbe_mac_setup_link(struct ioh_gbe_hw *hw)
++{
++ struct ioh_gbe_functions *func = &hw->func;
++ s32 ret_val = IOH_GBE_SUCCESS;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_mac_setup_link");
++
++ /* Call the necessary media_type subroutine to configure the link. */
++ ret_val = func->setup_physical_interface(hw);
++
++ return ret_val;
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_mac_force_mac_fc(struct ioh_gbe_hw *hw)
++ * @brief Force the MAC's flow control settings
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++s32 ioh_gbe_mac_force_mac_fc(struct ioh_gbe_hw *hw)
++{
++ struct ioh_gbe_mac_info *mac = &hw->mac;
++ u32 rx_fctrl;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_mac_force_mac_fc");
++ IOH_GBE_TESTOUT("mac->fc = %u\n", mac->fc);
++
++ rx_fctrl = IOH_GBE_READ_REG(hw, RX_FCTRL);
++
++ switch (mac->fc) {
++ case ioh_gbe_fc_none:
++ rx_fctrl &= ~IOH_GBE_FL_CTRL_EN;
++ mac->tx_fc_enable = FALSE;
++ break;
++ case ioh_gbe_fc_rx_pause:
++ rx_fctrl |= IOH_GBE_FL_CTRL_EN;
++ mac->tx_fc_enable = FALSE;
++ break;
++ case ioh_gbe_fc_tx_pause:
++ rx_fctrl &= ~IOH_GBE_FL_CTRL_EN;
++ mac->tx_fc_enable = TRUE;
++ break;
++ case ioh_gbe_fc_full:
++ rx_fctrl |= IOH_GBE_FL_CTRL_EN;
++ mac->tx_fc_enable = TRUE;
++ break;
++ default:
++ IOH_GBE_DBGOUT("Flow control param set incorrectly\n");
++ return -IOH_GBE_ERR_CONFIG;
++ }
++ if (mac->link_duplex == DUPLEX_HALF)
++ rx_fctrl &= ~IOH_GBE_FL_CTRL_EN;
++ IOH_GBE_WRITE_REG(hw, RX_FCTRL, rx_fctrl);
++ IOH_GBE_TESTOUT("RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n",
++ IOH_GBE_READ_REG(hw, RX_FCTRL), mac->tx_fc_enable);
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_mac_config_fc_after_link_up(struct ioh_gbe_hw *hw)
++ * @brief Configures flow control after link
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ */
++s32 ioh_gbe_mac_config_fc_after_link_up(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_mac_config_fc_after_link_up");
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_mac_set_wol_event(struct ioh_gbe_hw *hw, u32 wu_evt)
++ * @brief Set wake-on-lan event
++ * @param hw [INOUT] Pointer to the HW structure
++ * @param wu_evt [IN] Wake up event
++ * @return None
++ */
++void ioh_gbe_mac_set_wol_event(struct ioh_gbe_hw *hw, u32 wu_evt)
++{
++ u32 addr_mask;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_mac_set_wol_event");
++ IOH_GBE_TESTOUT("wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n",
++ wu_evt, IOH_GBE_READ_REG(hw, ADDR_MASK));
++
++ if (wu_evt != 0) {
++ /* Set Wake-On-Lan address mask */
++ addr_mask = IOH_GBE_READ_REG(hw, ADDR_MASK);
++ IOH_GBE_WRITE_REG(hw, WOL_ADDR_MASK, addr_mask);
++ /* wait busy */
++ while ((IOH_GBE_READ_REG(hw, WOL_ADDR_MASK) & IOH_GBE_WLA_BUSY)
++ != 0) {
++ ;
++ }
++ IOH_GBE_WRITE_REG(hw, WOL_ST, 0);
++ IOH_GBE_WRITE_REG(hw, WOL_CTRL,
++ (wu_evt | IOH_GBE_WLC_WOL_MODE));
++ IOH_GBE_WRITE_REG(hw, INT_EN, IOH_GBE_INT_ENABLE_MASK);
++ } else {
++ IOH_GBE_WRITE_REG(hw, WOL_CTRL, 0);
++ IOH_GBE_WRITE_REG(hw, WOL_ST, 0);
++ }
++
++#ifdef DEBUG_TEST
++ IOH_GBE_TESTOUT
++ ("WOL_ADDR_MASK reg : 0x%08x WOL_CTRL reg : 0x%08x "
++ "WOL_ST reg : 0x%08x\n",
++ IOH_GBE_READ_REG(hw, WOL_ADDR_MASK),
++ IOH_GBE_READ_REG(hw, WOL_CTRL),
++ IOH_GBE_READ_REG(hw, WOL_ST));
++#endif
++ return;
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn u16 ioh_gbe_mac_ctrl_miim(struct ioh_gbe_hw *hw, u32 addr,
++ * u32 dir, u32 reg, u16 data)
++ * @brief Set wake-on-lan event
++ * @param hw [INOUT] Pointer to the HW structure
++ * @param addr [IN] Address of PHY
++ * @param dir [IN] Operetion. (Write or Read)
++ * @param reg [IN] Access register of PHY
++ * @param data [IN] Write data.
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++u16
++ioh_gbe_mac_ctrl_miim(struct ioh_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
++ u16 data)
++{
++ u32 data_out = 0;
++ unsigned int i;
++ unsigned long flags;
++
++ IOH_GBE_DBGOUT2("ioh_gbe_mac_ctrl_miim\n");
++ spin_lock_irqsave(&hw->miim_lock, flags);
++
++ for (i = 100; i; --i) {
++ if ((IOH_GBE_READ_REG(hw, MIIM) & IOH_GBE_MIIM_OPER_READY) != 0)
++ break;
++ udelay(20);
++ }
++ if (i == 0) {
++ IOH_GBE_DBGOUT("ioh-sample-gbe.miim won't go Ready\n");
++ spin_unlock_irqrestore(&hw->miim_lock, flags);
++ return IOH_GBE_SUCCESS; /* No way to indicate timeout error */
++ }
++ IOH_GBE_WRITE_REG(hw, MIIM, ((reg << IOH_GBE_MIIM_REG_ADDR_SHIFT) |
++ (addr << IOH_GBE_MIIM_PHY_ADDR_SHIFT) |
++ dir | data));
++ for (i = 0; i < 100; i++) {
++ udelay(20);
++ data_out = IOH_GBE_READ_REG(hw, MIIM);
++ if ((data_out & IOH_GBE_MIIM_OPER_READY) != 0)
++ break;
++ }
++ spin_unlock_irqrestore(&hw->miim_lock, flags);
++
++ IOH_GBE_DBGOUT1("%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
++ dir == IOH_GBE_MIIM_OPER_READ ? "READ" : "WRITE",
++ addr, reg, data, data_out);
++ return (u16) data_out;
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_mac_set_pause_packet(struct ioh_gbe_hw *hw)
++ * @brief Set pause packet
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return None
++ */
++void ioh_gbe_mac_set_pause_packet(struct ioh_gbe_hw *hw)
++{
++ unsigned long tmp2, tmp3;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_mac_set_pause_packet");
++
++ /* Set Pause packet */
++ tmp2 = hw->mac.addr[1];
++ tmp2 = (tmp2 << 8) | hw->mac.addr[0];
++ tmp2 = IOH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
++
++ tmp3 = hw->mac.addr[5];
++ tmp3 = (tmp3 << 8) | hw->mac.addr[4];
++ tmp3 = (tmp3 << 8) | hw->mac.addr[3];
++ tmp3 = (tmp3 << 8) | hw->mac.addr[2];
++
++ IOH_GBE_WRITE_REG(hw, PAUSE_PKT1, IOH_GBE_PAUSE_PKT1_VALUE);
++ IOH_GBE_WRITE_REG(hw, PAUSE_PKT2, tmp2);
++ IOH_GBE_WRITE_REG(hw, PAUSE_PKT3, tmp3);
++ IOH_GBE_WRITE_REG(hw, PAUSE_PKT4, IOH_GBE_PAUSE_PKT4_VALUE);
++ IOH_GBE_WRITE_REG(hw, PAUSE_PKT5, IOH_GBE_PAUSE_PKT5_VALUE);
++
++ /* Transmit Pause Packet */
++ IOH_GBE_WRITE_REG(hw, PAUSE_REQ, IOH_GBE_PS_PKT_RQ);
++
++ IOH_GBE_TESTOUT
++ ("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
++ IOH_GBE_READ_REG(hw, PAUSE_PKT1), IOH_GBE_READ_REG(hw, PAUSE_PKT2),
++ IOH_GBE_READ_REG(hw, PAUSE_PKT3), IOH_GBE_READ_REG(hw, PAUSE_PKT4),
++ IOH_GBE_READ_REG(hw, PAUSE_PKT5));
++
++ return;
++}
++
++#ifndef CONFIG_PCH_PCIEQOS
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_mac_read_mac_addr(struct ioh_gbe_hw *hw)
++ * @brief Read MAC address
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS
++ */
++s32 ioh_gbe_mac_read_mac_addr(struct ioh_gbe_hw *hw)
++{
++ u32 adr1a, adr1b;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_mac_read_mac_addr");
++
++ adr1a = IOH_GBE_READ_REG(hw, MAC_ADR1A);
++ adr1b = IOH_GBE_READ_REG(hw, MAC_ADR1B);
++
++ hw->mac.addr[0] = (u8)(adr1a & 0xFF);
++ hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
++ hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
++ hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
++ hw->mac.addr[4] = (u8)(adr1b & 0xFF);
++ hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
++
++ IOH_GBE_TESTOUT("hw->mac.addr : 0x%02x %02x %02x %02x %02x %02x\n",
++ hw->mac.addr[0], hw->mac.addr[1], hw->mac.addr[2],
++ hw->mac.addr[3], hw->mac.addr[4], hw->mac.addr[5]);
++ return IOH_GBE_SUCCESS;
++}
++#endif /* CONFIG_PCH_PCIEQOS */
+diff -urN linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_mac.h topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_mac.h
+--- linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_mac.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_mac.h 2010-03-11 15:11:47.000000000 +0900
+@@ -0,0 +1,121 @@
++/*!
++ * @file ioh_gbe_mac.h
++ * @brief Linux IOH Gigabit Ethernet Driver HAL internal function (MAC) header file
++ *
++ * @version 0.90
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR CO., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 09/21/2009
++ * modified:
++ *
++ */
++#ifndef _IOH_GBE_MAC_H_
++#define _IOH_GBE_MAC_H_
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_mac_reset_hw(struct ioh_gbe_hw *hw)
++ * @brief Reset hardware
++ */
++void ioh_gbe_mac_reset_hw(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_mac_init_rx_addrs(struct ioh_gbe_hw *hw,
++ * u16 mar_count)
++ * @brief Initialize receive address's
++ */
++void ioh_gbe_mac_init_rx_addrs(struct ioh_gbe_hw *hw, u16 mar_count);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_mac_mar_set(struct ioh_gbe_hw *hw, u8 *addr, u32 index)
++ * @brief Set MAC address register
++ */
++void ioh_gbe_mac_mar_set(struct ioh_gbe_hw *hw, u8 *addr, u32 index);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_mac_mc_addr_list_update(struct ioh_gbe_hw *hw,
++ * u8 *mc_addr_list, u32 mc_addr_count,
++ * u32 mar_used_count, u32 mar_total_num)
++ * @brief Update Multicast addresses
++ */
++void ioh_gbe_mac_mc_addr_list_update(struct ioh_gbe_hw *hw,
++ u8 *mc_addr_list, u32 mc_addr_count,
++ u32 mar_used_count, u32 mar_count);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_mac_setup_link(struct ioh_gbe_hw *hw)
++ * @brief Setup flow control and link settings
++ */
++s32 ioh_gbe_mac_setup_link(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_mac_force_mac_fc(struct ioh_gbe_hw *hw)
++ * @brief Force the MAC's flow control settings
++ */
++s32 ioh_gbe_mac_force_mac_fc(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_mac_config_fc_after_link_up(struct ioh_gbe_hw *hw)
++ * @brief Configures flow control after link
++ */
++s32 ioh_gbe_mac_config_fc_after_link_up(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_mac_set_wol_event(struct ioh_gbe_hw *hw, u32 wu_evt)
++ * @brief Set wake-on-lan event
++ */
++void ioh_gbe_mac_set_wol_event(struct ioh_gbe_hw *hw, u32 wu_evt);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn u16 ioh_gbe_mac_ctrl_miim(struct ioh_gbe_hw *hw, u32 addr,
++ * u32 dir, u32 reg, u16 data)
++ * @brief Set wake-on-lan event
++ */
++u16 ioh_gbe_mac_ctrl_miim(struct ioh_gbe_hw *hw,
++ u32 addr, u32 dir, u32 reg, u16 data);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_mac_set_pause_packet(struct ioh_gbe_hw *hw)
++ * @brief Set pause packet
++ */
++void ioh_gbe_mac_set_pause_packet(struct ioh_gbe_hw *hw);
++
++#ifndef CONFIG_PCH_PCIEQOS
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_mac_read_mac_addr(struct ioh_gbe_hw *hw)
++ * @brief Read MAC address
++ */
++s32 ioh_gbe_mac_read_mac_addr(struct ioh_gbe_hw *hw);
++#endif /* CONFIG_PCH_PCIEQOS */
++
++#endif /* _IOH_GBE_MAC_H_ */
+diff -urN linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_main.c topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_main.c
+--- linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_main.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_main.c 2010-03-11 15:13:21.000000000 +0900
+@@ -0,0 +1,2973 @@
++/*!
++ * @file ioh_gbe_main.c
++ * @brief Linux IOH Gigabit Ethernet Driver main source file
++ *
++ * @version 0.90
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR CO., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 09/21/2009
++ * modified:
++ *
++ */
++
++#include <linux/pci.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/ethtool.h>
++#include <linux/mii.h>
++
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <net/ip.h>
++
++#include "pch_gbe_osdep.h"
++#include "pch_gbe_regs.h"
++#include "pch_gbe_defines.h"
++#include "pch_gbe_hw.h"
++#include "pch_gbe_api.h"
++#include "pch_gbe.h"
++
++/* ----------------------------------------------------------------------------
++ Function prototype
++---------------------------------------------------------------------------- */
++static int
++ioh_gbe_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id);
++static void ioh_gbe_remove(struct pci_dev *pdev);
++static int ioh_gbe_suspend(struct pci_dev *pdev, pm_message_t state);
++static int ioh_gbe_resume(struct pci_dev *pdev);
++static void ioh_gbe_shutdown(struct pci_dev *pdev);
++#ifdef CONFIG_NET_POLL_CONTROLLER
++static void ioh_gbe_netpoll(struct net_device *netdev);
++#endif
++static pci_ers_result_t
++ioh_gbe_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state);
++static pci_ers_result_t ioh_gbe_io_slot_reset(struct pci_dev *pdev);
++static void ioh_gbe_io_resume(struct pci_dev *pdev);
++
++static int ioh_gbe_init_module(void);
++static void ioh_gbe_exit_module(void);
++static int ioh_gbe_open(struct net_device *netdev);
++static int ioh_gbe_stop(struct net_device *netdev);
++static int ioh_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
++static struct net_device_stats *ioh_gbe_get_stats(struct net_device *netdev);
++static void ioh_gbe_set_multi(struct net_device *netdev);
++static int ioh_gbe_set_mac(struct net_device *netdev, void *p);
++static int ioh_gbe_change_mtu(struct net_device *netdev, int new_mtu);
++static int ioh_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
++static void ioh_gbe_tx_timeout(struct net_device *dev);
++
++static int ioh_gbe_sw_init(struct ioh_gbe_adapter *adapter);
++static int ioh_gbe_alloc_queues(struct ioh_gbe_adapter *adapter);
++static void ioh_gbe_init_stats(struct ioh_gbe_adapter *adapter);
++static int ioh_gbe_init_nvm(struct ioh_gbe_adapter *adapter);
++static int ioh_gbe_init_phy(struct ioh_gbe_adapter *adapter);
++static void ioh_gbe_reset_task(struct work_struct *work);
++static int ioh_gbe_request_irq(struct ioh_gbe_adapter *adapter);
++static void ioh_gbe_free_irq(struct ioh_gbe_adapter *adapter);
++static void ioh_gbe_irq_disable(struct ioh_gbe_adapter *adapter);
++static void ioh_gbe_irq_enable(struct ioh_gbe_adapter *adapter);
++static void ioh_gbe_clean_tx_ring(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_tx_ring *tx_ring);
++static void ioh_gbe_clean_rx_ring(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_rx_ring *rx_ring);
++static void
++ioh_gbe_unmap_and_free_tx_resource(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_buffer *buffer_info);
++static void
++ioh_gbe_unmap_and_free_rx_resource(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_buffer *buffer_info);
++
++static void ioh_gbe_setup_tctl(struct ioh_gbe_adapter *adapter);
++static void ioh_gbe_configure_tx(struct ioh_gbe_adapter *adapter);
++static void ioh_gbe_setup_rctl(struct ioh_gbe_adapter *adapter);
++static void ioh_gbe_configure_rx(struct ioh_gbe_adapter *adapter);
++static void ioh_gbe_set_rgmii_ctrl(struct ioh_gbe_adapter *adapter,
++ u16 speed, u16 duplex);
++static void ioh_gbe_set_mode(struct ioh_gbe_adapter *adapter,
++ u16 speed, u16 duplex);
++static void ioh_gbe_watchdog(unsigned long data);
++static irqreturn_t ioh_gbe_intr(int irq, void *data);
++static unsigned char ioh_gbe_clean_tx(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_tx_ring *tx_ring);
++static int ioh_gbe_napi_poll(struct napi_struct *napi, int budget);
++static unsigned char ioh_gbe_clean_rx(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_rx_ring *rx_ring,
++ int *work_done, int work_to_do);
++static void ioh_gbe_alloc_rx_buffers(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_rx_ring *rx_ring,
++ int cleaned_count);
++static void ioh_gbe_alloc_tx_buffers(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_tx_ring *tx_ring);
++static void ioh_gbe_tx_queue(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_tx_ring *tx_ring,
++ struct sk_buff *skb);
++
++/* ----------------------------------------------------------------------------
++ Data
++---------------------------------------------------------------------------- */
++/*!
++ * @ingroup PCI driver Layer
++ * @struct ioh_gbe_pcidev_id
++ * @brief PCI Device ID Table
++ * @remarks
++ * This is an instance of pci_device_id structure defined in linux/pci.h,
++ * and holds information of the PCI devices that are supported by this driver.
++ */
++static const struct pci_device_id ioh_gbe_pcidev_id[3] = {
++ {.vendor = PCI_VENDOR_ID_INTEL,
++ .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
++ .class_mask = (0xFFFF00)
++ },
++ /* required last entry */
++ {0}
++};
++
++/*!
++ * @ingroup PCI driver Layer
++ * @struct ioh_gbe_err_handler
++ * @brief Error handler Table
++ * @remarks
++ * This is an instance of pci_error_handlers structure defined in linux/pci.h,
++ * and holds information of the PCI devices that are supported by this driver.
++ */
++static struct pci_error_handlers ioh_gbe_err_handler = {
++ .error_detected = ioh_gbe_io_error_detected,
++ .slot_reset = ioh_gbe_io_slot_reset,
++ .resume = ioh_gbe_io_resume
++};
++
++/*!
++ * @ingroup PCI driver Layer
++ * @struct ioh_gbe_pcidev
++ * @brief Store the pointers of pci driver interfaces to kernel
++ */
++static struct pci_driver ioh_gbe_pcidev = {
++ .name = DRV_NAME,
++ .id_table = ioh_gbe_pcidev_id,
++ .probe = ioh_gbe_probe,
++ .remove = ioh_gbe_remove,
++ /* Power Managment Hooks */
++#ifdef CONFIG_PM
++ .suspend = ioh_gbe_suspend,
++ .resume = ioh_gbe_resume,
++#endif
++ .shutdown = ioh_gbe_shutdown,
++ .err_handler = &ioh_gbe_err_handler
++};
++
++static int debug = IOH_GBE_NETIF_MSG_DEFAULT;
++static unsigned int copybreak __read_mostly = IOH_GBE_COPYBREAK_DEFAULT;
++
++/* ----------------------------------------------------------------------------
++ module
++---------------------------------------------------------------------------- */
++
++MODULE_DESCRIPTION(DRV_DESCRIPTION);
++MODULE_AUTHOR(DRV_COPYRIGHT);
++MODULE_LICENSE("GPL");
++MODULE_VERSION(DRV_VERSION);
++MODULE_DEVICE_TABLE(pci, ioh_gbe_pcidev_id);
++
++module_param(copybreak, uint, 0644);
++MODULE_PARM_DESC(copybreak,
++ "Maximum size of packet that is copied to a new buffer on receive");
++module_param(debug, int, 0);
++MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
++
++module_init(ioh_gbe_init_module);
++module_exit(ioh_gbe_exit_module);
++
++/* ----------------------------------------------------------------------------
++ Macro Function
++---------------------------------------------------------------------------- */
++#define IOH_GBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
++#define IOH_GBE_RX_DESC(R, i) IOH_GBE_GET_DESC(R, i, ioh_gbe_rx_desc)
++#define IOH_GBE_TX_DESC(R, i) IOH_GBE_GET_DESC(R, i, ioh_gbe_tx_desc)
++#define IOH_GBE_DESC_UNUSED(R) \
++ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
++ (R)->next_to_clean - (R)->next_to_use - 1)
++
++/* ----------------------------------------------------------------------------
++ Function
++---------------------------------------------------------------------------- */
++/* ----------------------------------------------------------------------------
++ PCI driver methods
++---------------------------------------------------------------------------- */
++
++static const struct net_device_ops ioh_gbe_netdev_ops = {
++ .ndo_open = ioh_gbe_open,
++ .ndo_stop = ioh_gbe_stop,
++ .ndo_start_xmit = ioh_gbe_xmit_frame,
++ .ndo_get_stats = ioh_gbe_get_stats,
++ .ndo_set_mac_address = ioh_gbe_set_mac,
++ .ndo_tx_timeout = ioh_gbe_tx_timeout,
++ .ndo_change_mtu = ioh_gbe_change_mtu,
++ .ndo_do_ioctl = ioh_gbe_ioctl,
++ .ndo_set_multicast_list = &ioh_gbe_set_multi,
++#ifdef CONFIG_NET_POLL_CONTROLLER
++ .ndo_poll_controller = ioh_gbe_netpoll,
++#endif
++};
++
++/*!
++ * @ingroup PCI driver method
++ * @fn static int ioh_gbe_probe(struct pci_dev *pdev,
++ * const struct pci_device_id *pci_id)
++ * @brief Device Initialization Routine
++ * @param pdev [INOUT] PCI device information struct
++ * @param pci_id [INOUT] Entry in ioh_gbe_pcidev_id
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * This function initializes an adapter identified by a pci_dev structure.
++ * The OS initialization, configuring of the adapter private structure,
++ * and a hardware reset occur.
++ */
++static int
++ioh_gbe_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
++{
++ struct net_device *netdev;
++ struct ioh_gbe_adapter *adapter;
++ unsigned long mmio_start;
++ unsigned long mmio_len;
++ static int cards_found;
++ int i, ret;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_probe");
++
++ cards_found = 0;
++ ret = pci_enable_device(pdev);
++ if (ret != 0)
++ return ret;
++ ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
++ if (ret != 0) {
++ ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
++ if (ret != 0) {
++ IOH_GBE_ERR
++ ("ERR: No usable DMA configuration, aborting\n");
++ goto err_disable_device;
++ }
++ }
++ ret = pci_request_regions(pdev, DRV_NAME);
++ if (ret != 0) {
++ IOH_GBE_ERR
++ ("ERR: Can't reserve PCI I/O and memory resources\n");
++ goto err_disable_device;
++ }
++ pci_set_master(pdev);
++
++ netdev = alloc_etherdev((int)sizeof(struct ioh_gbe_adapter));
++ if (!netdev) {
++ ret = -ENOMEM;
++ IOH_GBE_ERR
++ ("ERR: Can't allocates and sets up an Ethernet device\n");
++ goto err_release_pci;
++ }
++ SET_NETDEV_DEV(netdev, &pdev->dev);
++
++ pci_set_drvdata(pdev, netdev);
++ adapter = netdev_priv(netdev);
++ adapter->netdev = netdev;
++ adapter->pdev = pdev;
++ adapter->msg_enable = (1 << debug) - 1;
++ adapter->bd_number = cards_found;
++ adapter->hw.back = adapter;
++ mmio_start = pci_resource_start(pdev, IOH_GBE_PCI_BAR);
++ mmio_len = pci_resource_len(pdev, IOH_GBE_PCI_BAR);
++ adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
++ if (!adapter->hw.hw_addr) {
++ ret = -EIO;
++ DPRINTK(PROBE, ERR, "Can't ioremap\n");
++ goto err_free_netdev;
++ }
++
++ netdev->netdev_ops = &ioh_gbe_netdev_ops;
++
++ netdev->watchdog_timeo = IOH_GBE_WATCHDOG_PERIOD;
++ netif_napi_add(netdev, &adapter->napi,
++ ioh_gbe_napi_poll, IOH_GBE_RX_WEIGHT);
++ strncpy(netdev->name, pci_name(pdev), (int)sizeof(netdev->name) - 1);
++ netdev->mem_start = mmio_start;
++ netdev->mem_end = mmio_start + mmio_len;
++ netdev->features = NETIF_F_HW_CSUM;
++ ioh_gbe_set_ethtool_ops(netdev);
++
++ /* setup the private structure */
++ ret = ioh_gbe_sw_init(adapter);
++ if (ret != 0)
++ goto err_iounmap;
++
++ ioh_gbe_hal_reset_hw(&adapter->hw);
++ /* Initialize PHY */
++ ret = ioh_gbe_init_phy(adapter);
++ if (ret != 0) {
++ DPRINTK(PROBE, ERR, "PHY initialize error\n");
++ goto err_free_adapter;
++ }
++
++ ioh_gbe_hal_get_bus_info(&adapter->hw);
++
++ /* Initialize NVM */
++ ret = ioh_gbe_init_nvm(adapter);
++ if (ret != 0) {
++ DPRINTK(PROBE, ERR, "NVM initialize error\n");
++ goto err_free_adapter;
++ }
++
++#ifdef CONFIG_PCH_PCIEQOS
++ /* Read the MAC address. and store to the private data */
++ ret = ioh_gbe_hal_read_mac_addr(&adapter->hw);
++ if (ret != 0) {
++ DPRINTK(PROBE, ERR, "MAC address Read Error\n");
++ goto err_free_adapter;
++ }
++#endif
++ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
++ if (!is_valid_ether_addr(netdev->dev_addr)) {
++ DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
++ ret = -EIO;
++ goto err_free_adapter;
++ }
++
++ init_timer(&adapter->watchdog_timer);
++ adapter->watchdog_timer.function = &ioh_gbe_watchdog;
++ adapter->watchdog_timer.data = (unsigned long)adapter;
++
++ INIT_WORK(&adapter->reset_task, ioh_gbe_reset_task);
++
++ ioh_gbe_check_options(adapter);
++
++ if (adapter->tx_csum != 0)
++ netdev->features |= NETIF_F_HW_CSUM;
++ else
++ netdev->features &= ~NETIF_F_HW_CSUM;
++
++ /* initialize the wol settings based on the eeprom settings */
++ adapter->wake_up_evt = IOH_GBE_WL_INIT_SETTING;
++
++ /* print bus type/speed/width info */
++ {
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
++ ((hw->bus.type == ioh_gbe_bus_type_pcix) ? "-X" :
++ (hw->bus.type == ioh_gbe_bus_type_pci_express) ? " Express" :
++ ""),
++ ((hw->bus.speed == ioh_gbe_bus_speed_2500) ? "2.5Gb/s" :
++ (hw->bus.speed == ioh_gbe_bus_speed_133) ? "133MHz" :
++ (hw->bus.speed == ioh_gbe_bus_speed_120) ? "120MHz" :
++ (hw->bus.speed == ioh_gbe_bus_speed_100) ? "100MHz" :
++ (hw->bus.speed == ioh_gbe_bus_speed_66) ? "66MHz" :
++ (hw->bus.speed == ioh_gbe_bus_speed_33) ? "33MHz" :
++ ""),
++ ((hw->bus.width == ioh_gbe_bus_width_64) ? "64-bit" :
++ (hw->bus.width == ioh_gbe_bus_width_32) ? "32-bit" :
++ (hw->bus.width == ioh_gbe_bus_width_pcie_x4) ? "Width x4" :
++ (hw->bus.width == ioh_gbe_bus_width_pcie_x2) ? "Width x2" :
++ (hw->bus.width == ioh_gbe_bus_width_pcie_x1) ? "Width x1" :
++ ""));
++ }
++ for (i = 0; i < 6; i++)
++ printk(KERN_INFO "%2.2x%c",
++ netdev->dev_addr[i], i == 5 ? '\n' : ':');
++
++ /* reset the hardware with the new settings */
++ ioh_gbe_reset(adapter);
++
++ strcpy(netdev->name, "eth%d");
++ ret = register_netdev(netdev);
++ if (ret != 0)
++ goto err_free_adapter;
++ /* tell the stack to leave us alone until ioh_gbe_open() is called */
++ netif_carrier_off(netdev);
++ netif_stop_queue(netdev);
++
++ DPRINTK(PROBE, INFO, "OKIsemi(R) IOH Network Connection\n");
++
++ cards_found++;
++ device_set_wakeup_enable(&pdev->dev, 1);
++ return IOH_GBE_SUCCESS;
++
++err_free_adapter:
++ ioh_gbe_hal_phy_hw_reset(&adapter->hw);
++ dev_put(adapter->polling_netdev);
++ kfree(adapter->tx_ring);
++ kfree(adapter->rx_ring);
++ kfree(adapter->polling_netdev);
++err_iounmap:
++ iounmap(adapter->hw.hw_addr);
++err_free_netdev:
++ free_netdev(netdev);
++err_release_pci:
++ pci_release_regions(pdev);
++err_disable_device:
++ pci_disable_device(pdev);
++ return ret;
++}
++
++/*!
++ * @ingroup PCI driver method
++ * @fn static void ioh_gbe_remove(struct pci_dev *pdev)
++ * @brief Device Removal Routine
++ * @param pdev [INOUT] PCI device information struct
++ * @return None
++ * @remarks
++ * This function is called by the PCI subsystem to alert the driver
++ * that it should release a PCI device. The could be caused by a
++ * Hot-Plug event, or because the driver is going to be removed from
++ * memory.
++ */
++static void ioh_gbe_remove(struct pci_dev *pdev)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ flush_scheduled_work();
++ unregister_netdev(netdev);
++ dev_put(adapter->polling_netdev);
++
++ ioh_gbe_hal_phy_hw_reset(&adapter->hw);
++
++ kfree(adapter->tx_ring);
++ kfree(adapter->rx_ring);
++ kfree(adapter->polling_netdev);
++
++ iounmap(adapter->hw.hw_addr);
++ pci_release_regions(pdev);
++ free_netdev(netdev);
++ pci_disable_device(pdev);
++}
++
++/*!
++ * @ingroup PCI driver method
++ * @fn static int ioh_gbe_suspend(struct pci_dev *pdev, pm_message_t state)
++ * @brief Device Suspend Routine
++ * @param pdev [INOUT] PCI device information struct
++ * @param state [IN] Power status
++ * @return None
++ */
++static int ioh_gbe_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ u32 wufc = adapter->wake_up_evt;
++ int retval = IOH_GBE_SUCCESS;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ netif_device_detach(netdev);
++
++ if (netif_running(netdev) != 0)
++ ioh_gbe_down(adapter);
++#ifdef CONFIG_PM
++ /* Implement our own version of pci_save_state(pdev) because pci-
++ * express adapters have 256-byte config spaces. */
++ retval = pci_save_state(pdev);
++ if (retval != 0) {
++ DPRINTK(PROBE, DEBUG, "pci_save_state failed\n");
++ return retval;
++ }
++#endif
++
++ if (wufc != 0) {
++ ioh_gbe_set_multi(netdev);
++ ioh_gbe_setup_rctl(adapter);
++ ioh_gbe_configure_rx(adapter);
++ ioh_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
++ hw->mac.link_duplex);
++ ioh_gbe_set_mode(adapter, hw->mac.link_speed,
++ hw->mac.link_duplex);
++ ioh_gbe_hal_set_wol_event(hw, wufc);
++ pci_disable_device(pdev);
++ retval = pci_set_power_state(pdev, PCI_D0);
++ if (retval)
++ DPRINTK(PROBE, DEBUG, "pci_set_power_state failed\n");
++ retval = pci_enable_wake(pdev, PCI_D0, 1);
++ if (retval)
++ DPRINTK(PROBE, DEBUG, "pci_enable_wake failed\n");
++ } else {
++ ioh_gbe_hal_power_down_phy(hw);
++ ioh_gbe_hal_set_wol_event(hw, wufc);
++ pci_disable_device(pdev);
++ pci_enable_wake(pdev, PCI_D0, 0);
++ pci_set_power_state(pdev, pci_choose_state(pdev, state));
++ }
++ return retval;
++}
++
++#ifdef CONFIG_PM
++/*!
++ * @ingroup PCI driver method
++ * @fn static int ioh_gbe_resume(struct pci_dev *pdev)
++ * @brief Device Resume Routine
++ * @param pdev [INOUT] PCI device information struct
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return negative: Failed
++ */
++static int ioh_gbe_resume(struct pci_dev *pdev)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ u32 err;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ pci_enable_wake(pdev, PCI_D0, 0);
++ pci_set_power_state(pdev, PCI_D0);
++ pci_restore_state(pdev);
++ err = pci_enable_device(pdev);
++ if (err != 0) {
++ DPRINTK(PROBE, ERR, "Cannot enable PCI device from suspend\n");
++ return err;
++ }
++ pci_set_master(pdev);
++ ioh_gbe_hal_power_up_phy(hw);
++ ioh_gbe_reset(adapter);
++ /* Clear wake on lan control and status */
++ ioh_gbe_hal_set_wol_event(hw, 0);
++
++ if (netif_running(netdev) != 0)
++ ioh_gbe_up(adapter);
++ netif_device_attach(netdev);
++
++ return IOH_GBE_SUCCESS;
++}
++#endif /* CONFIG_PM */
++
++/*!
++ * @ingroup PCI driver method
++ * @fn static void ioh_gbe_shutdown(struct pci_dev *pdev)
++ * @brief Device shutdown Routine
++ * @param pdev [INOUT] PCI device information struct
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++static void ioh_gbe_shutdown(struct pci_dev *pdev)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++
++ DPRINTK(PROBE, DEBUG, "\n");
++ ioh_gbe_suspend(pdev, PMSG_SUSPEND);
++}
++
++/*!
++ * @ingroup PCI driver method
++ * @fn static pci_ers_result_t ioh_gbe_io_error_detected(
++ * struct pci_dev *pdev,
++ * pci_channel_state_t state)
++ * @brief Called when PCI error is detected
++ * @param pdev [INOUT] PCI device information struct
++ * @param state [IN] The current pci connection state
++ * @return PCI_ERS_RESULT_NEED_RESET
++ * @remarks
++ * This function is called after a PCI bus error affecting
++ * this device has been detected.
++ */
++static pci_ers_result_t
++ioh_gbe_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ netif_device_detach(netdev);
++
++ if (netif_running(netdev) != 0)
++ ioh_gbe_down(adapter);
++ pci_disable_device(pdev);
++
++ /* Request a slot slot reset. */
++ return PCI_ERS_RESULT_NEED_RESET;
++}
++
++/*!
++ * @ingroup PCI driver method
++ * @fn static pci_ers_result_t ioh_gbe_io_slot_reset(struct pci_dev *pdev)
++ * @brief Called after the pci bus has been reset.
++ * @param pdev [INOUT] PCI device information struct
++ * @return PCI_ERS_RESULT_DISCONNECT
++ * @return PCI_ERS_RESULT_RECOVERED
++ * @remarks
++ * Restart the card from scratch, as if from a cold-boot. Implementation
++ * resembles the first-half of the ioh_gbe_resume routine.
++ */
++static pci_ers_result_t ioh_gbe_io_slot_reset(struct pci_dev *pdev)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ struct ioh_gbe_hw *hw = &adapter->hw;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ if (pci_enable_device(pdev) != 0) {
++ DPRINTK(PROBE, ERR,
++ "Cannot re-enable PCI device after reset.\n");
++ return PCI_ERS_RESULT_DISCONNECT;
++ }
++ pci_set_master(pdev);
++ pci_enable_wake(pdev, PCI_D0, 0);
++ ioh_gbe_hal_power_up_phy(hw);
++ ioh_gbe_reset(adapter);
++ /* Clear wake up status */
++ ioh_gbe_hal_set_wol_event(hw, 0);
++
++ return PCI_ERS_RESULT_RECOVERED;
++}
++
++/*!
++ * @ingroup PCI driver method
++ * @fn static void ioh_gbe_io_resume(struct pci_dev *pdev)
++ * @brief Called when traffic can start flowing again.
++ * @param pdev [INOUT] PCI device information struct
++ * @return PCI_ERS_RESULT_DISCONNECT
++ * @return PCI_ERS_RESULT_RECOVERED
++ * @remarks
++ * This callback is called when the error recovery driver tells us that
++ * its OK to resume normal operation. Implementation resembles the
++ * second-half of the ioh_gbe_resume routine.
++ */
++static void ioh_gbe_io_resume(struct pci_dev *pdev)
++{
++ struct net_device *netdev = pci_get_drvdata(pdev);
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ if (netif_running(netdev) != 0) {
++ if (ioh_gbe_up(adapter) != 0) {
++ DPRINTK(PROBE, DEBUG,
++ "can't bring device back up after reset\n");
++ return;
++ }
++ }
++ netif_device_attach(netdev);
++}
++
++/* ----------------------------------------------------------------------------
++ Gigabit Ethernet driver methods
++---------------------------------------------------------------------------- */
++/*!
++ * @ingroup Gigabit Ethernet driver methods
++ * @fn static int __init ioh_gbe_init_module(void)
++ * @brief Driver Registration Routine
++ * @return IOH_GBE_SUCCESS Successfully
++ * @return negative value Failed
++ * @remarks
++ * ioh_gbe_init_module is the first routine called when the driver is
++ * loaded. All it does is register with the PCI subsystem.
++ */
++static int __init ioh_gbe_init_module(void)
++{
++ int ret;
++ IOH_GBE_DBGFUNC("ioh_gbe_init_module");
++ printk(KERN_INFO "%s - version %s\n", DRV_STRING, DRV_VERSION);
++
++ ret = pci_register_driver(&ioh_gbe_pcidev);
++ if (copybreak != IOH_GBE_COPYBREAK_DEFAULT) {
++ if (copybreak == 0) {
++ printk(KERN_INFO "ioh_gbe: copybreak disabled\n");
++ } else {
++ printk(KERN_INFO "ioh_gbe: copybreak enabled for "
++ "packets <= %u bytes\n", copybreak);
++ }
++ }
++ return ret;
++}
++
++/*!
++ * @ingroup Gigabit Ethernet driver methods
++ * @fn static void __exit ioh_gbe_exit_module(void)
++ * @brief Driver Exit Cleanup Routine
++ * @return None
++ * @remarks
++ * ioh_gbe_exit_module is called just before the driver is removed
++ * from memory.
++ */
++static void __exit ioh_gbe_exit_module(void)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_exit_module");
++ pci_unregister_driver(&ioh_gbe_pcidev);
++
++ printk(KERN_INFO "%s - unregister\n", DRV_STRING);
++}
++
++/*!
++ * @ingroup Gigabit Ethernet driver methods
++ * @fn static int ioh_gbe_open(struct net_device *netdev)
++ * @brief Called when a network interface is made active
++ * @param netdev [INOUT] network interface device structure
++ * @return IOH_GBE_SUCCESS - Successfully
++ * @return negative value - Failed
++ * @remarks
++ * The open entry point is called when a network interface is made
++ * active by the system (IFF_UP). At this point all resources needed
++ * for transmit and receive operations are allocated, the interrupt
++ * handler is registered with the OS, the watchdog timer is started,
++ * and the stack is notified that the interface is ready.
++ */
++static int ioh_gbe_open(struct net_device *netdev)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ int err;
++
++ DPRINTK(IFUP, DEBUG, "\n");
++
++ /* disallow open during test */
++ if ((test_bit(__IOH_GBE_TESTING, &adapter->flags)) != 0)
++ return -EBUSY;
++ /* allocate transmit descriptors */
++ err = ioh_gbe_setup_tx_resources(adapter, adapter->tx_ring);
++ if (err != 0)
++ goto err_setup_tx;
++ /* allocate receive descriptors */
++ err = ioh_gbe_setup_rx_resources(adapter, adapter->rx_ring);
++ if (err != 0)
++ goto err_setup_rx;
++ ioh_gbe_hal_power_up_phy(hw);
++ err = ioh_gbe_up(adapter);
++ if (err != 0)
++ goto err_up;
++ DPRINTK(PROBE, DEBUG, "Success End\n");
++ return IOH_GBE_SUCCESS;
++
++err_up:
++ if (!adapter->wake_up_evt)
++ ioh_gbe_hal_power_down_phy(hw);
++ ioh_gbe_free_rx_resources(adapter, adapter->rx_ring);
++err_setup_rx:
++ ioh_gbe_free_tx_resources(adapter, adapter->tx_ring);
++err_setup_tx:
++ ioh_gbe_reset(adapter);
++ DPRINTK(PROBE, ERR, "Error End\n");
++ return err;
++}
++
++/*!
++ * @ingroup Gigabit Ethernet driver methods
++ * @fn static int ioh_gbe_stop(struct net_device *netdev)
++ * @brief Disables a network interface
++ * @param netdev [INOUT] network interface device structure
++ * @return IOH_GBE_SUCCESS - Successfully (This is not allowed to fail)
++ * @remarks
++ * The close entry point is called when an interface is de-activated
++ * by the OS. The hardware is still under the drivers control, but
++ * needs to be disabled. A global MAC reset is issued to stop the
++ * hardware, and all transmit and receive resources are freed.
++ */
++static int ioh_gbe_stop(struct net_device *netdev)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ struct ioh_gbe_hw *hw = &adapter->hw;
++
++ DPRINTK(IFDOWN, DEBUG, "\n");
++
++ ioh_gbe_down(adapter);
++ if (!adapter->wake_up_evt)
++ ioh_gbe_hal_power_down_phy(hw);
++ ioh_gbe_free_tx_resources(adapter, adapter->tx_ring);
++ ioh_gbe_free_rx_resources(adapter, adapter->rx_ring);
++
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup Gigabit Ethernet driver methods
++ * @fn static int ioh_gbe_xmit_frame(struct sk_buff *skb,
++ * struct net_device *netdev)
++ * @brief Packet transmitting start
++ * @param skb [INOUT] socket buffer structure
++ * @param netdev [INOUT] network interface device structure
++ * @return NETDEV_TX_OK: Normal end
++ * @return NETDEV_TX_BUSY: Error end
++ */
++static int ioh_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ struct ioh_gbe_tx_ring *tx_ring = adapter->tx_ring;
++ unsigned long flags;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ if (unlikely(skb->len <= 0)) {
++ dev_kfree_skb_any(skb);
++ IOH_GBE_TESTOUT("Return : OK skb len : %d\n", skb->len);
++ return NETDEV_TX_OK;
++ }
++ if (unlikely(skb->len > (adapter->hw.mac.max_frame_size - 4))) {
++ DPRINTK(PROBE, ERR, "Transfer length Error: %d over %d\n",
++ skb->len, adapter->hw.mac.max_frame_size);
++ dev_kfree_skb_any(skb);
++ adapter->stats.tx_length_errors++;
++ return NETDEV_TX_BUSY;
++ }
++ if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
++ /* Collision - tell upper layer to requeue */
++ return NETDEV_TX_LOCKED;
++ }
++ if (unlikely(!IOH_GBE_DESC_UNUSED(tx_ring))) {
++ netif_stop_queue(netdev);
++ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
++ IOH_GBE_TESTOUT
++ ("Return : BUSY next_to use : 0x%08x "
++ "next_to clean : 0x%08x\n",
++ tx_ring->next_to_use, tx_ring->next_to_clean);
++ return NETDEV_TX_BUSY;
++ }
++ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
++
++ /* CRC,ITAG no support */
++ ioh_gbe_tx_queue(adapter, tx_ring, skb);
++ netdev->trans_start = jiffies;
++ return NETDEV_TX_OK;
++}
++
++/*!
++ * @ingroup Gigabit Ethernet driver methods
++ * @fn static struct net_device_stats *ioh_gbe_get_stats(
++ * struct net_device *netdev)
++ * @brief Get System Network Statistics
++ * @param netdev [INOUT] network interface device structure
++ * @return The current stats
++ * @remarks
++ * Returns the address of the device statistics structure.
++ * The statistics are actually updated from the timer callback.
++ */
++static struct net_device_stats *ioh_gbe_get_stats(struct net_device *netdev)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ /* only return the current stats */
++ return &adapter->net_stats;
++}
++
++/*!
++ * @ingroup Gigabit Ethernet driver methods
++ * @fn static void ioh_gbe_set_multi(struct net_device *netdev)
++ * @brief Multicast and Promiscuous mode set
++ * @param netdev [INOUT] network interface device structure
++ * @return None
++ * @remarks
++ * The set_multi entry point is called whenever the multicast address
++ * list or the network interface flags are updated. This routine is
++ * responsible for configuring the hardware for proper multicast,
++ * promiscuous mode, and all-multi behavior.
++ */
++static void ioh_gbe_set_multi(struct net_device *netdev)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ struct ioh_gbe_mac_info *mac = &hw->mac;
++ struct dev_mc_list *mc_ptr;
++ u8 *mta_list;
++ u32 rctl;
++ int i;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++ IOH_GBE_TESTOUT("netdev->flags : 0x%08x\n", netdev->flags);
++
++ /* Check for Promiscuous and All Multicast modes */
++ rctl = IOH_GBE_READ_REG(hw, RX_MODE);
++
++ if ((netdev->flags & IFF_PROMISC) != 0) {
++ rctl &= ~IOH_GBE_ADD_FIL_EN;
++ rctl &= ~IOH_GBE_MLT_FIL_EN;
++ } else if ((netdev->flags & IFF_ALLMULTI) != 0) {
++ /* all the multicasting receive permissions */
++ rctl |= IOH_GBE_ADD_FIL_EN;
++ rctl &= ~IOH_GBE_MLT_FIL_EN;
++ } else {
++ if (netdev->mc_count >= IOH_GBE_MAR_ENTRIES) {
++ /* all the multicasting receive permissions */
++ rctl |= IOH_GBE_ADD_FIL_EN;
++ rctl &= ~IOH_GBE_MLT_FIL_EN;
++ } else {
++ rctl |= (IOH_GBE_ADD_FIL_EN | IOH_GBE_MLT_FIL_EN);
++ }
++ }
++ IOH_GBE_WRITE_REG(hw, RX_MODE, rctl);
++
++ if (netdev->mc_count >= IOH_GBE_MAR_ENTRIES)
++ return;
++ mta_list = kmalloc(netdev->mc_count * ETH_ALEN, GFP_ATOMIC);
++ if (!mta_list)
++ return;
++ /* The shared function expects a packed array of only addresses. */
++ mc_ptr = netdev->mc_list;
++
++ for (i = 0; i < netdev->mc_count; i++) {
++ if (!mc_ptr)
++ break;
++ memcpy(mta_list + (i * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
++ mc_ptr = mc_ptr->next;
++ }
++
++ ioh_gbe_hal_mc_addr_list_update(hw, mta_list, i, 1,
++ mac->mar_entry_count);
++ kfree(mta_list);
++
++ IOH_GBE_TESTOUT
++ ("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x "
++ "netdev->mc_count : 0x%08x\n",
++ IOH_GBE_READ_REG(hw, RX_MODE), netdev->mc_count);
++}
++
++/*!
++ * @ingroup Gigabit Ethernet driver methods
++ * @fn static int ioh_gbe_set_mac(struct net_device *netdev, void *addr)
++ * @brief Change the Ethernet Address of the NIC
++ * @param netdev [INOUT] Network interface device structure
++ * @param addr [IN] Pointer to an address structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return -EADDRNOTAVAIL: Failed
++ */
++static int ioh_gbe_set_mac(struct net_device *netdev, void *addr)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ struct sockaddr *skaddr = addr;
++ int ret_val;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ if (!is_valid_ether_addr(skaddr->sa_data)) {
++ ret_val = -EADDRNOTAVAIL;
++ } else {
++ memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
++ memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
++ ioh_gbe_hal_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
++ ret_val = IOH_GBE_SUCCESS;
++ }
++#ifdef DEBUG_TEST
++ IOH_GBE_TESTOUT("ret_val : 0x%08x\n", ret_val);
++ IOH_GBE_TESTOUT("dev_addr : %02x:%02x:%02x:%02x:%02x:%02x\n",
++ netdev->dev_addr[0], netdev->dev_addr[1],
++ netdev->dev_addr[2], netdev->dev_addr[3],
++ netdev->dev_addr[4], netdev->dev_addr[5]);
++ IOH_GBE_TESTOUT("mac_addr : %02x:%02x:%02x:%02x:%02x:%02x\n",
++ adapter->hw.mac.addr[0], adapter->hw.mac.addr[1],
++ adapter->hw.mac.addr[2], adapter->hw.mac.addr[3],
++ adapter->hw.mac.addr[4], adapter->hw.mac.addr[5]);
++ IOH_GBE_TESTOUT("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
++ IOH_GBE_READ_REG(&adapter->hw, MAC_ADR1A),
++ IOH_GBE_READ_REG(&adapter->hw, MAC_ADR1B));
++#endif
++ return ret_val;
++}
++
++/*!
++ * @ingroup Gigabit Ethernet driver methods
++ * @fn static int ioh_gbe_change_mtu(struct net_device *netdev,
++ * int new_mtu)
++ * @brief Change the Maximum Transfer Unit
++ * @param netdev [INOUT] Network interface device structure
++ * @param new_mtu [IN] New value for maximum frame size
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return -EINVAL: Failed
++ */
++static int ioh_gbe_change_mtu(struct net_device *netdev, int new_mtu)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ int max_frame;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
++ if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
++ (max_frame > IOH_GBE_MAX_JUMBO_FRAME_SIZE)) {
++ DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
++ return -EINVAL;
++ }
++ if (max_frame <= IOH_GBE_FRAME_SIZE_2048)
++ adapter->rx_buffer_len = IOH_GBE_FRAME_SIZE_2048;
++ else if (max_frame <= IOH_GBE_FRAME_SIZE_4096)
++ adapter->rx_buffer_len = IOH_GBE_FRAME_SIZE_4096;
++ else if (max_frame <= IOH_GBE_FRAME_SIZE_8192)
++ adapter->rx_buffer_len = IOH_GBE_FRAME_SIZE_8192;
++ else
++ adapter->rx_buffer_len = IOH_GBE_MAX_JUMBO_FRAME_SIZE;
++ netdev->mtu = new_mtu;
++ adapter->hw.mac.max_frame_size = max_frame;
++
++ if (netif_running(netdev) != 0)
++ ioh_gbe_reinit_locked(adapter);
++ else
++ ioh_gbe_reset(adapter);
++
++ IOH_GBE_TESTOUT
++ ("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
++ max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
++ adapter->hw.mac.max_frame_size);
++
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup Gigabit Ethernet driver methods
++ * @fn static int ioh_gbe_ioctl(struct net_device *netdev,
++ * struct ifreq *ifr, int cmd)
++ * @brief Controls register through a MII interface
++ * @param netdev [INOUT] Network interface device structure
++ * @param ifr [IN] Pointer to ifr structure
++ * @param cmd [IN] Control command
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++static int ioh_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++
++ DPRINTK(PROBE, DEBUG, "\n");
++ IOH_GBE_TESTOUT("cmd : 0x%04x\n", cmd);
++
++ return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
++}
++
++/*!
++ * @ingroup Gigabit Ethernet driver methods
++ * @fn static void ioh_gbe_tx_timeout(struct net_device *netdev)
++ * @brief Respond to a Tx Hang
++ * @param netdev [INOUT] Network interface device structure
++ * @return None
++ */
++static void ioh_gbe_tx_timeout(struct net_device *netdev)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++
++ DPRINTK(TX_ERR, DEBUG, "\n");
++
++ /* Do the reset outside of interrupt context */
++ adapter->stats.tx_timeout_count++;
++ schedule_work(&adapter->reset_task);
++}
++
++/*!
++ * @ingroup Gigabit Ethernet driver methods
++ * @fn static int ioh_gbe_napi_poll(struct napi_struct *napi, int budget)
++ * @brief NAPI receive and transfer polling callback
++ * @param napi [INOUT] Pointer of polling device struct
++ * @param budget [IN] The maximum number of a packet
++ * @return 0 : Exit the polling mode
++ * @return 1 : Continue the polling mode
++ */
++static int ioh_gbe_napi_poll(struct napi_struct *napi, int budget)
++{
++ struct ioh_gbe_adapter *adapter =
++ container_of(napi, struct ioh_gbe_adapter, napi);
++ struct net_device *netdev = adapter->netdev;
++ int work_done = 0;
++ int poll_end_flag = 0;
++ int cleaned = 0;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++ IOH_GBE_TESTOUT("budget : %d\n", budget);
++
++ /* Keep link state information with original netdev */
++ if (!netif_carrier_ok(netdev)) {
++ poll_end_flag = 1;
++ } else {
++ cleaned = ioh_gbe_clean_tx(adapter, adapter->tx_ring);
++ ioh_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
++
++ if (cleaned)
++ work_done = budget;
++ /* If no Tx and not enough Rx work done,
++ * exit the polling mode
++ */
++ if ((work_done < budget) || !netif_running(netdev))
++ poll_end_flag = 1;
++ }
++
++ if (poll_end_flag == 1) {
++ napi_complete(napi);
++ ioh_gbe_irq_enable(adapter);
++ }
++
++ IOH_GBE_TESTOUT("poll_end_flag : %d work_done : %d budget : %d\n",
++ poll_end_flag, work_done, budget);
++ return work_done;
++}
++
++#ifdef CONFIG_NET_POLL_CONTROLLER
++/*!
++ * @ingroup Gigabit Ethernet driver methods
++ * @fn static void ioh_gbe_netpoll(struct net_device *netdev)
++ * @brief Used by things like netconsole to send skbs
++ * @param netdev [INOUT] Network interface device structure
++ * @return None
++ * @remarks
++ * used by things like netconsole to send skbs
++ * without having to re-enable interrupts.
++ * It's not called while the interrupt routine is executing.
++ */
++static void ioh_gbe_netpoll(struct net_device *netdev)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ disable_irq(adapter->pdev->irq);
++ ioh_gbe_intr(adapter->pdev->irq, netdev);
++ enable_irq(adapter->pdev->irq);
++}
++#endif
++
++/* ----------------------------------------------------------------------------
++ Linux driver internal function
++---------------------------------------------------------------------------- */
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static int ioh_gbe_sw_init(struct ioh_gbe_adapter *adapter)
++ * @brief Initialize general software structures (struct ioh_gbe_adapter)
++ * @param adapter [INOUT] Board private structure to initialize
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * ioh_gbe_sw_init initializes the Adapter private data structure.
++ * Fields are initialized based on PCI device information and
++ * OS network device settings (MTU size).
++ */
++static int ioh_gbe_sw_init(struct ioh_gbe_adapter *adapter)
++{
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ struct net_device *netdev = adapter->netdev;
++ struct pci_dev *pdev = adapter->pdev;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ /* PCI config space info */
++ hw->vendor_id = pdev->vendor;
++ hw->device_id = pdev->device;
++ hw->subsystem_vendor_id = pdev->subsystem_vendor;
++ hw->subsystem_device_id = pdev->subsystem_device;
++
++ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
++
++ adapter->rx_buffer_len = IOH_GBE_FRAME_SIZE_2048;
++ hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
++ hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
++
++ /* Initialize the hardware-specific values */
++ if (ioh_gbe_hal_setup_init_funcs(hw) != 0) {
++ DPRINTK(PROBE, ERR, "Hardware Initialization Failure\n");
++ return -EIO;
++ }
++ if (ioh_gbe_alloc_queues(adapter) != 0) {
++ DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
++ return -ENOMEM;
++ }
++ dev_hold(adapter->polling_netdev);
++ set_bit(__LINK_STATE_START, &adapter->polling_netdev->state);
++
++ spin_lock_init(&adapter->hw.miim_lock);
++ spin_lock_init(&adapter->stats_lock);
++ atomic_set(&adapter->irq_sem, 0);
++ ioh_gbe_irq_disable(adapter);
++
++ ioh_gbe_init_stats(adapter);
++
++#ifdef DEBUG_TEST
++ IOH_GBE_TESTOUT("hw->vendor_id : 0x%08x\n", hw->vendor_id);
++ IOH_GBE_TESTOUT("hw->device_id : 0x%08x\n", hw->device_id);
++ IOH_GBE_TESTOUT("hw->subsystem_vendor_id :0x%08x\n",
++ hw->subsystem_vendor_id);
++ IOH_GBE_TESTOUT("hw->subsystem_device_id :0x%08x\n",
++ hw->subsystem_device_id);
++ IOH_GBE_TESTOUT("hw->revision_id :0x%08x\n", hw->revision_id);
++ IOH_GBE_TESTOUT("adapter->rx_buffer_len : %d\n",
++ (u32) adapter->rx_buffer_len);
++ IOH_GBE_TESTOUT("hw->mac.max_frame_size : %d\n",
++ hw->mac.max_frame_size);
++ IOH_GBE_TESTOUT("hw->mac.min_frame_size : %d\n",
++ hw->mac.min_frame_size);
++#endif
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static int ioh_gbe_alloc_queues(struct ioh_gbe_adapter *adapter)
++ * @brief Allocate memory for all rings
++ * @param adapter [INOUT] Board private structure to initialize
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * We allocate one ring per queue at run-time since we don't know the
++ * number of queues at compile-time. The polling_netdev array is
++ * intended for Multiqueue, but should work fine with a single queue.
++ */
++static int ioh_gbe_alloc_queues(struct ioh_gbe_adapter *adapter)
++{
++ int size;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ size = (int)sizeof(struct ioh_gbe_tx_ring);
++ adapter->tx_ring = kmalloc(size, GFP_KERNEL);
++ if (!adapter->tx_ring)
++ return -ENOMEM;
++ memset(adapter->tx_ring, 0, size);
++
++ size = (int)sizeof(struct ioh_gbe_rx_ring);
++ adapter->rx_ring = kmalloc(size, GFP_KERNEL);
++ if (!adapter->rx_ring) {
++ kfree(adapter->tx_ring);
++ return -ENOMEM;
++ }
++ memset(adapter->rx_ring, 0, size);
++
++ size = (int)sizeof(struct net_device);
++ adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
++ if (!adapter->polling_netdev) {
++ kfree(adapter->tx_ring);
++ kfree(adapter->rx_ring);
++ return -ENOMEM;
++ }
++ memset(adapter->polling_netdev, 0, size);
++
++#ifdef DEBUG_TEST
++ {
++ u32 *st_area, *sp_area;
++ st_area = (u32 *) adapter->tx_ring;
++ sp_area = (u32 *) (adapter->tx_ring +
++ (int)sizeof(struct ioh_gbe_tx_ring) - 1);
++ IOH_GBE_TESTOUT("tx_ring : 0x%08x - 0x%08x\n", *st_area,
++ *sp_area);
++ st_area = (u32 *) adapter->rx_ring;
++ sp_area = (u32 *) (adapter->rx_ring +
++ (int)sizeof(struct ioh_gbe_rx_ring) - 1);
++ IOH_GBE_TESTOUT("rx_ring : 0x%08x - 0x%08x\n", *st_area,
++ *sp_area);
++ st_area = (u32 *) adapter->polling_netdev;
++ sp_area = (u32 *) (adapter->polling_netdev +
++ (int)sizeof(struct net_device) - 1);
++ IOH_GBE_TESTOUT("polling_netdev : 0x%08x - 0x%08x\n",
++ *st_area, *sp_area);
++ }
++#endif
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static void ioh_gbe_init_stats(struct ioh_gbe_adapter *adapter)
++ * @brief Initialize status
++ * @param adapter [INOUT] Board private structure to initialize
++ * @return None
++ */
++static void ioh_gbe_init_stats(struct ioh_gbe_adapter *adapter)
++{
++ struct ioh_gbe_hw_stats *stats = &adapter->stats;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ stats->rx_packets = 0;
++ stats->tx_packets = 0;
++ stats->rx_bytes = 0;
++ stats->tx_bytes = 0;
++ stats->rx_errors = 0;
++ stats->tx_errors = 0;
++ stats->rx_dropped = 0;
++ stats->tx_dropped = 0;
++ stats->multicast = 0;
++ stats->collisions = 0;
++ stats->rx_crc_errors = 0;
++ stats->rx_frame_errors = 0;
++ stats->rx_alloc_buff_failed = 0;
++ stats->tx_length_errors = 0;
++ stats->tx_aborted_errors = 0;
++ stats->tx_carrier_errors = 0;
++ stats->tx_timeout_count = 0;
++ stats->tx_restart_count = 0;
++ stats->intr_rx_dsc_empty_count = 0;
++ stats->intr_rx_frame_err_count = 0;
++ stats->intr_rx_fifo_err_count = 0;
++ stats->intr_rx_dma_err_count = 0;
++ stats->intr_tx_fifo_err_count = 0;
++ stats->intr_tx_dma_err_count = 0;
++ stats->intr_tcpip_err_count = 0;
++ return;
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static int ioh_gbe_init_nvm(struct ioh_gbe_adapter *adapter)
++ * @brief Initialize NVM
++ * @param adapter [INOUT] Board private structure to initialize
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++static int ioh_gbe_init_nvm(struct ioh_gbe_adapter *adapter)
++{
++ DPRINTK(PROBE, DEBUG, "\n");
++
++#ifdef CONFIG_PCH_PCIEQOS
++ /* make sure the NVM is good */
++ if ((ioh_gbe_hal_validate_nvm_checksum(&adapter->hw) < 0)) {
++ DPRINTK(PROBE, ERR, "The NVM Checksum Is Not Valid\n");
++ return -EIO;
++ }
++#endif
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static int ioh_gbe_init_phy(struct ioh_gbe_adapter *adapter)
++ * @brief Initialize PHY
++ * @param adapter [INOUT] Board private structure to initialize
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++static int ioh_gbe_init_phy(struct ioh_gbe_adapter *adapter)
++{
++ struct net_device *netdev = adapter->netdev;
++ u32 addr;
++ u16 bmcr, stat;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
++ for (addr = 0; addr <= PHY_MAX_REG_ADDRESS; addr++) {
++ adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
++ bmcr = ioh_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
++ stat = ioh_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
++ stat = ioh_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
++ if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
++ break;
++ }
++ adapter->hw.phy.addr = adapter->mii.phy_id;
++ DPRINTK(PROBE, DEBUG, "phy_addr = %d\n", adapter->mii.phy_id);
++ if (addr == 32)
++ return -EAGAIN;
++ /* Selected the phy and isolate the rest */
++ for (addr = 0; addr <= PHY_MAX_REG_ADDRESS; addr++) {
++ if (addr != adapter->mii.phy_id) {
++ ioh_gbe_mdio_write(netdev, addr, MII_BMCR,
++ BMCR_ISOLATE);
++ } else {
++ bmcr = ioh_gbe_mdio_read(netdev, addr, MII_BMCR);
++ ioh_gbe_mdio_write(netdev, addr, MII_BMCR,
++ bmcr & ~BMCR_ISOLATE);
++ }
++ }
++
++ /* MII setup */
++ adapter->mii.phy_id_mask = 0x1F;
++ adapter->mii.reg_num_mask = 0x1F;
++ adapter->mii.dev = adapter->netdev;
++ adapter->mii.mdio_read = ioh_gbe_mdio_read;
++ adapter->mii.mdio_write = ioh_gbe_mdio_write;
++ adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn int ioh_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
++ * @brief The read function for mii
++ * @param netdev [INOUT] Network interface device structure
++ * @param addr [IN] Phy ID
++ * @param reg [IN] Access location
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++int ioh_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ struct ioh_gbe_hw *hw = &adapter->hw;
++
++ IOH_GBE_DBGOUT2("ioh_gbe_mdio_read\n");
++ return ioh_gbe_hal_ctrl_miim(hw, addr, IOH_GBE_HAL_MIIM_READ,
++ reg, (u16) 0);
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn void ioh_gbe_mdio_write(struct net_device *netdev,
++ * int addr, int reg, int data)
++ * @brief TThe write function for mii
++ * @param netdev [INOUT] Network interface device structure
++ * @param addr [IN] Phy ID (not used)
++ * @param reg [IN]Access location
++ * @param data [IN] Write data
++ * @return None
++ */
++void ioh_gbe_mdio_write(struct net_device *netdev, int addr, int reg, int data)
++{
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ struct ioh_gbe_hw *hw = &adapter->hw;
++
++ IOH_GBE_DBGOUT2("ioh_gbe_mdio_write\n");
++ ioh_gbe_hal_ctrl_miim(hw, addr, IOH_GBE_HAL_MIIM_WRITE, reg, data);
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static void ioh_gbe_reset_task(struct work_struct *work)
++ * @brief Reset processing at the time of transmission timeout
++ * @param work [INOUT] Pointer of board private structure
++ * @return None
++ */
++static void ioh_gbe_reset_task(struct work_struct *work)
++{
++ struct ioh_gbe_adapter *adapter;
++ adapter = container_of(work, struct ioh_gbe_adapter, reset_task);
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ ioh_gbe_reinit_locked(adapter);
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn void ioh_gbe_reinit_locked(struct ioh_gbe_adapter *adapter)
++ * @brief Re-initialization
++ * @param adapter [INOUT] Board private structure
++ * @return None
++ */
++void ioh_gbe_reinit_locked(struct ioh_gbe_adapter *adapter)
++{
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ while ((test_and_set_bit(__IOH_GBE_RESETTING, &adapter->flags)) != 0)
++ msleep(1);
++ ioh_gbe_down(adapter);
++ ioh_gbe_up(adapter);
++ clear_bit(__IOH_GBE_RESETTING, &adapter->flags);
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn void ioh_gbe_reset(struct ioh_gbe_adapter *adapter)
++ * @brief Reset GbE
++ * @param adapter [INOUT] Board private structure
++ * @return None
++ */
++void ioh_gbe_reset(struct ioh_gbe_adapter *adapter)
++{
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ ioh_gbe_hal_reset_hw(&adapter->hw);
++
++ if (ioh_gbe_hal_init_hw(&adapter->hw) != 0)
++ DPRINTK(PROBE, ERR, "Hardware Error\n");
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static int ioh_gbe_request_irq(struct ioh_gbe_adapter *adapter)
++ * @brief Allocate an interrupt line
++ * @param adapter [INOUT] Board private structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++static int ioh_gbe_request_irq(struct ioh_gbe_adapter *adapter)
++{
++ struct net_device *netdev = adapter->netdev;
++ int err;
++ int flags;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ flags = IRQF_SHARED;
++ adapter->have_msi = FALSE;
++ err = pci_enable_msi(adapter->pdev);
++ IOH_GBE_DBGOUT("call pci_enable_msi\n");
++ if (err != 0) {
++ DPRINTK(PROBE, ERR,
++ "Unable to allocate MSI interrupt Error: %d\n", err);
++ } else {
++ flags = 0;
++ adapter->have_msi = TRUE;
++ }
++ err = request_irq(adapter->pdev->irq, &ioh_gbe_intr,
++ flags, netdev->name, netdev);
++ if (err != 0) {
++ DPRINTK(PROBE, ERR, "Unable to allocate interrupt Error: %d\n",
++ err);
++ }
++
++ IOH_GBE_TESTOUT
++ ("adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n",
++ adapter->have_msi, flags, err);
++ return err;
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static void ioh_gbe_free_irq(struct ioh_gbe_adapter *adapter)
++ * @brief Free an interrupt
++ * @param adapter [INOUT] Board private structure
++ * @return None
++ */
++static void ioh_gbe_free_irq(struct ioh_gbe_adapter *adapter)
++{
++ struct net_device *netdev = adapter->netdev;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ free_irq(adapter->pdev->irq, netdev);
++ if (adapter->have_msi != 0) {
++ pci_disable_msi(adapter->pdev);
++ IOH_GBE_DBGOUT("call pci_disable_msi");
++ }
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static void ioh_gbe_irq_disable(struct ioh_gbe_adapter *adapter)
++ * @brief Mask off interrupt generation on the NIC
++ * @param adapter [INOUT] Board private structure
++ * @return None
++ */
++static void ioh_gbe_irq_disable(struct ioh_gbe_adapter *adapter)
++{
++ struct ioh_gbe_hw *hw = &adapter->hw;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ atomic_inc(&adapter->irq_sem);
++ IOH_GBE_WRITE_REG(hw, INT_EN, 0);
++ synchronize_irq(adapter->pdev->irq);
++
++ IOH_GBE_TESTOUT("INT_EN reg : 0x%08x\n", IOH_GBE_READ_REG(hw, INT_EN));
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static void ioh_gbe_irq_enable(struct ioh_gbe_adapter *adapter)
++ * @brief Enable default interrupt generation settings
++ * @param adapter [INOUT] Board private structure
++ * @return None
++ */
++static void ioh_gbe_irq_enable(struct ioh_gbe_adapter *adapter)
++{
++ struct ioh_gbe_hw *hw = &adapter->hw;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ if (likely(atomic_dec_and_test(&adapter->irq_sem)))
++ IOH_GBE_WRITE_REG(hw, INT_EN, IOH_GBE_INT_ENABLE_MASK);
++ IOH_GBE_TESTOUT("INT_EN reg : 0x%08x\n", IOH_GBE_READ_REG(hw, INT_EN));
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn int ioh_gbe_up(struct ioh_gbe_adapter *adapter)
++ * @brief Up GbE network device
++ * @param adapter [INOUT] Board private structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++int ioh_gbe_up(struct ioh_gbe_adapter *adapter)
++{
++ struct net_device *netdev = adapter->netdev;
++ struct ioh_gbe_tx_ring *tx_ring = adapter->tx_ring;
++ struct ioh_gbe_rx_ring *rx_ring = adapter->rx_ring;
++ int err;
++
++ DPRINTK(IFUP, DEBUG, "\n");
++
++ /* hardware has been reset, we need to reload some things */
++ ioh_gbe_set_multi(netdev);
++
++ ioh_gbe_setup_tctl(adapter);
++ ioh_gbe_configure_tx(adapter);
++ ioh_gbe_setup_rctl(adapter);
++ ioh_gbe_configure_rx(adapter);
++
++ err = ioh_gbe_request_irq(adapter);
++ if (err != 0) {
++ IOH_GBE_ERR("Error: can't bring device up\n");
++ return err;
++ }
++ ioh_gbe_alloc_tx_buffers(adapter, tx_ring);
++ ioh_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
++ adapter->tx_queue_len = netdev->tx_queue_len;
++
++ mod_timer(&adapter->watchdog_timer, jiffies);
++
++ napi_enable(&adapter->napi);
++ ioh_gbe_irq_enable(adapter);
++ netif_start_queue(adapter->netdev);
++
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn void ioh_gbe_down(struct ioh_gbe_adapter *adapter)
++ * @brief Down GbE network device
++ * @param adapter [INOUT] Board private structure
++ * @return None
++ */
++void ioh_gbe_down(struct ioh_gbe_adapter *adapter)
++{
++ struct net_device *netdev = adapter->netdev;
++
++ DPRINTK(IFDOWN, DEBUG, "\n");
++
++ /* signal that we're down so the interrupt handler does not
++ * reschedule our watchdog timer */
++ napi_disable(&adapter->napi);
++ atomic_set(&adapter->irq_sem, 0);
++
++ ioh_gbe_irq_disable(adapter);
++ ioh_gbe_free_irq(adapter);
++
++ del_timer_sync(&adapter->watchdog_timer);
++
++ netdev->tx_queue_len = adapter->tx_queue_len;
++ netif_carrier_off(netdev);
++ netif_stop_queue(netdev);
++
++ ioh_gbe_reset(adapter);
++ ioh_gbe_clean_tx_ring(adapter, adapter->tx_ring);
++ ioh_gbe_clean_rx_ring(adapter, adapter->rx_ring);
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn int ioh_gbe_setup_tx_resources(struct ioh_gbe_adapter *adapter,
++ * struct ioh_gbe_tx_ring *tx_ring)
++ * @brief Allocate Tx resources (Descriptors)
++ * @param adapter [INOUT] Board private structure
++ * @param tx_ring [OUT] Tx descriptor ring (for a specific queue) to setup
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++int
++ioh_gbe_setup_tx_resources(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_tx_ring *tx_ring)
++{
++ struct pci_dev *pdev = adapter->pdev;
++ struct ioh_gbe_tx_desc *tx_desc;
++ int size;
++ int desNo;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ size = (int)sizeof(struct ioh_gbe_buffer) * tx_ring->count;
++ tx_ring->buffer_info = vmalloc(size);
++ if (!tx_ring->buffer_info) {
++ DPRINTK(PROBE, ERR,
++ "Unable to allocate memory "
++ "for the buffer infomation\n");
++ return -ENOMEM;
++ }
++ memset(tx_ring->buffer_info, 0, size);
++
++ tx_ring->size = tx_ring->count * (int)sizeof(struct ioh_gbe_tx_desc);
++
++ tx_ring->desc =
++ pci_alloc_consistent(pdev, tx_ring->size, &tx_ring->dma);
++ if (!tx_ring->desc) {
++ vfree(tx_ring->buffer_info);
++ DPRINTK(PROBE, ERR,
++ "Unable to allocate memory "
++ "for the transmit descriptor ring\n");
++ return -ENOMEM;
++ }
++ memset(tx_ring->desc, 0, tx_ring->size);
++
++ tx_ring->next_to_use = 0;
++ tx_ring->next_to_clean = 0;
++ spin_lock_init(&tx_ring->tx_lock);
++
++ for (desNo = 0; desNo < tx_ring->count; desNo++) {
++ tx_desc = IOH_GBE_TX_DESC(*tx_ring, desNo);
++ tx_desc->gbec_status = DSC_INIT16;
++ }
++
++#ifdef DEBUG_TEST
++ IOH_GBE_TESTOUT("tx_ring->desc = 0x%08x tx_ring->dma = 0x%08x\n",
++ (u32) tx_ring->desc, tx_ring->dma);
++ IOH_GBE_TESTOUT("next_to_clean = 0x%08x next_to_use = 0x%08x\n",
++ tx_ring->next_to_clean, tx_ring->next_to_use);
++#endif
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn int ioh_gbe_setup_rx_resources(struct ioh_gbe_adapter *adapter,
++ * struct ioh_gbe_rx_ring *rx_ring)
++ * @brief Allocate Rx resources (Descriptors)
++ * @param adapter [INOUT] Board private structure
++ * @param rx_ring [OUT] Rx descriptor ring (for a specific queue) to setup
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++int
++ioh_gbe_setup_rx_resources(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_rx_ring *rx_ring)
++{
++ struct pci_dev *pdev = adapter->pdev;
++ struct ioh_gbe_rx_desc *rx_desc;
++ int size;
++ int desNo;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ size = (int)sizeof(struct ioh_gbe_buffer) * rx_ring->count;
++ rx_ring->buffer_info = vmalloc(size);
++ if (!rx_ring->buffer_info) {
++ DPRINTK(PROBE, ERR,
++ "Unable to allocate memory "
++ "for the receive descriptor ring\n");
++ return -ENOMEM;
++ }
++ memset(rx_ring->buffer_info, 0, size);
++
++ rx_ring->size = rx_ring->count * (int)sizeof(struct ioh_gbe_rx_desc);
++
++ rx_ring->desc =
++ pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
++
++ if (!rx_ring->desc) {
++ DPRINTK(PROBE, ERR,
++ "Unable to allocate memory "
++ "for the receive descriptor ring\n");
++ vfree(rx_ring->buffer_info);
++ return -ENOMEM;
++ }
++
++ memset(rx_ring->desc, 0, rx_ring->size);
++
++ rx_ring->next_to_clean = 0;
++ rx_ring->next_to_use = 0;
++
++ for (desNo = 0; desNo < rx_ring->count; desNo++) {
++ rx_desc = IOH_GBE_RX_DESC(*rx_ring, desNo);
++ rx_desc->gbec_status = DSC_INIT16;
++ }
++
++#ifdef DEBUG_TEST
++ IOH_GBE_TESTOUT("rx_ring->desc = 0x%08x rx_ring->dma = 0x%08x\n",
++ (u32) rx_ring->desc, rx_ring->dma);
++ IOH_GBE_TESTOUT("next_to_clean = 0x%08x next_to_use = 0x%08x\n",
++ rx_ring->next_to_clean, rx_ring->next_to_use);
++#endif
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn void ioh_gbe_free_tx_resources(struct ioh_gbe_adapter *adapter,
++ * struct ioh_gbe_tx_ring *tx_ring)
++ * @brief Free Tx Resources
++ * @param adapter [INOUT] Board private structure
++ * @param tx_ring [OUT] Tx descriptor ring for a specific queue
++ * @return None
++ * @remarks
++ * Free all transmit software resources
++ */
++void
++ioh_gbe_free_tx_resources(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_tx_ring *tx_ring)
++{
++ struct pci_dev *pdev = adapter->pdev;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ ioh_gbe_clean_tx_ring(adapter, tx_ring);
++ vfree(tx_ring->buffer_info);
++ tx_ring->buffer_info = NULL;
++ pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
++ tx_ring->desc = NULL;
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn void ioh_gbe_free_rx_resources(struct ioh_gbe_adapter *adapter,
++ * struct ioh_gbe_rx_ring *rx_ring)
++ * @brief Free Rx Resources
++ * @param adapter [INOUT] Board private structure
++ * @param rx_ring [OUT] ring to clean the resources from
++ * @return None
++ * @remarks
++ * Free all receive software resources
++ */
++void
++ioh_gbe_free_rx_resources(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_rx_ring *rx_ring)
++{
++ struct pci_dev *pdev = adapter->pdev;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ ioh_gbe_clean_rx_ring(adapter, rx_ring);
++ vfree(rx_ring->buffer_info);
++ rx_ring->buffer_info = NULL;
++ pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
++ rx_ring->desc = NULL;
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static void ioh_gbe_setup_tctl(struct ioh_gbe_adapter *adapter)
++ * @brief configure the Transmit control registers
++ * @param adapter [INOUT] Board private structure
++ * @return None
++ */
++static void ioh_gbe_setup_tctl(struct ioh_gbe_adapter *adapter)
++{
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ u32 tx_mode, tcpip;
++
++ DPRINTK(IFUP, DEBUG, "\n");
++
++ tx_mode = IOH_GBE_TM_LONG_PKT |
++ IOH_GBE_TM_ST_AND_FD |
++ IOH_GBE_TM_SHORT_PKT |
++ IOH_GBE_TM_TH_TX_STRT_8 |
++ IOH_GBE_TM_TH_ALM_EMP_4 | IOH_GBE_TM_TH_ALM_FULL_8;
++
++ IOH_GBE_WRITE_REG(hw, TX_MODE, tx_mode);
++
++ tcpip = IOH_GBE_READ_REG(hw, TCPIP_ACC);
++ tcpip |= IOH_GBE_TX_TCPIPACC_EN;
++ IOH_GBE_WRITE_REG(hw, TCPIP_ACC, tcpip);
++
++#ifdef DEBUG_TEST
++ IOH_GBE_TESTOUT("TX_MODE reg = 0x%08x TCPIP_ACC reg = 0x%08x\n",
++ IOH_GBE_READ_REG(hw, TX_MODE),
++ IOH_GBE_READ_REG(hw, TCPIP_ACC));
++#endif
++ return;
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static void ioh_gbe_configure_tx(struct ioh_gbe_adapter *adapter)
++ * @brief Configure Transmit Unit after Reset
++ * @param adapter [INOUT] Board private structure
++ * @return None
++ * @remarks
++ * Configure the Tx unit of the MAC after a reset.
++ */
++static void ioh_gbe_configure_tx(struct ioh_gbe_adapter *adapter)
++{
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ u32 tdba, tdlen, dctrl;
++
++ DPRINTK(IFUP, DEBUG, "\n");
++ IOH_GBE_TESTOUT("dma adr = 0x%08x size = 0x%08x\n",
++ adapter->tx_ring->dma, adapter->tx_ring->size);
++
++ /* Setup the HW Tx Head and Tail descriptor pointers */
++ tdba = adapter->tx_ring->dma;
++ tdlen = adapter->tx_ring->size - 0x10;
++ IOH_GBE_WRITE_REG(hw, TX_DSC_BASE, tdba);
++ IOH_GBE_WRITE_REG(hw, TX_DSC_SIZE, tdlen);
++ IOH_GBE_WRITE_REG(hw, TX_DSC_SW_P, tdba);
++
++ /* Enables Transmission DMA */
++ dctrl = IOH_GBE_READ_REG(hw, DMA_CTRL);
++ dctrl |= IOH_GBE_TX_DMA_EN;
++ IOH_GBE_WRITE_REG(hw, DMA_CTRL, dctrl);
++
++#ifdef DEBUG_TEST
++ IOH_GBE_TESTOUT
++ ("BASE = 0x%08x HW_P = 0x%08x SIZE = 0x%08x SW_P = 0x%08x\n",
++ IOH_GBE_READ_REG(hw, TX_DSC_BASE),
++ IOH_GBE_READ_REG(hw, TX_DSC_HW_P),
++ IOH_GBE_READ_REG(hw, TX_DSC_SIZE),
++ IOH_GBE_READ_REG(hw, TX_DSC_SW_P));
++ IOH_GBE_TESTOUT("DMA_CTRL reg[bit0] = 0x%08x\n",
++ IOH_GBE_READ_REG(hw, DMA_CTRL));
++#endif
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static void ioh_gbe_setup_rctl(struct ioh_gbe_adapter *adapter)
++ * @brief Configure the receive control registers
++ * @param adapter [INOUT] Board private structure
++ * @return None
++ * @remarks
++ * Configure the Tx unit of the MAC after a reset.
++ */
++static void ioh_gbe_setup_rctl(struct ioh_gbe_adapter *adapter)
++{
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ u32 rx_mode, tcpip;
++
++ DPRINTK(IFUP, DEBUG, "\n");
++
++ rx_mode = IOH_GBE_ADD_FIL_EN | IOH_GBE_MLT_FIL_EN |
++ IOH_GBE_RH_ALM_EMP_4 | IOH_GBE_RH_ALM_FULL_4 | IOH_GBE_RH_RD_TRG_8;
++
++ IOH_GBE_WRITE_REG(hw, RX_MODE, rx_mode);
++
++ tcpip = IOH_GBE_READ_REG(hw, TCPIP_ACC);
++
++ if (adapter->rx_csum == TRUE) {
++ tcpip &= ~IOH_GBE_RX_TCPIPACC_OFF;
++ tcpip |= IOH_GBE_RX_TCPIPACC_EN;
++ } else {
++ tcpip |= IOH_GBE_RX_TCPIPACC_OFF;
++ tcpip &= ~IOH_GBE_RX_TCPIPACC_EN;
++ }
++ IOH_GBE_WRITE_REG(hw, TCPIP_ACC, tcpip);
++
++#ifdef DEBUG_TEST
++ IOH_GBE_TESTOUT("RX_MODE reg = 0x%08x TCPIP_ACC reg = 0x%08x\n",
++ IOH_GBE_READ_REG(hw, RX_MODE),
++ IOH_GBE_READ_REG(hw, TCPIP_ACC));
++#endif
++ return;
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static void ioh_gbe_configure_rx(struct ioh_gbe_adapter *adapter)
++ * @brief Configure Receive Unit after Reset
++ * @param adapter [INOUT] Board private structure
++ * @return None
++ * @remarks
++ * Configure the Rx unit of the MAC after a reset.
++ */
++static void ioh_gbe_configure_rx(struct ioh_gbe_adapter *adapter)
++{
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ u32 rdba, rdlen, rctl, rxdma;
++
++ DPRINTK(IFUP, DEBUG, "\n");
++ IOH_GBE_TESTOUT("dma adr = 0x%08x size = 0x%08x\n",
++ adapter->rx_ring->dma, adapter->rx_ring->size);
++
++ ioh_gbe_hal_force_mac_fc(hw);
++
++ /* Disables Receive MAC */
++ rctl = IOH_GBE_READ_REG(hw, MAC_RX_EN);
++ IOH_GBE_WRITE_REG(hw, MAC_RX_EN, (rctl & ~IOH_GBE_MRE_MAC_RX_EN));
++
++ /* Disables Receive DMA */
++ rxdma = IOH_GBE_READ_REG(hw, DMA_CTRL);
++ rxdma &= ~IOH_GBE_RX_DMA_EN;
++ IOH_GBE_WRITE_REG(hw, DMA_CTRL, rxdma);
++
++ IOH_GBE_TESTOUT("MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
++ IOH_GBE_READ_REG(hw, MAC_RX_EN),
++ IOH_GBE_READ_REG(hw, DMA_CTRL));
++
++ /* Setup the HW Rx Head and Tail Descriptor Pointers and
++ * the Base and Length of the Rx Descriptor Ring */
++ rdba = adapter->rx_ring->dma;
++ rdlen = adapter->rx_ring->size - 0x10;
++ IOH_GBE_WRITE_REG(hw, RX_DSC_BASE, rdba);
++ IOH_GBE_WRITE_REG(hw, RX_DSC_SIZE, rdlen);
++ IOH_GBE_WRITE_REG(hw, RX_DSC_SW_P, rdba + rdlen);
++
++ /* Enables Receive DMA */
++ rxdma = IOH_GBE_READ_REG(hw, DMA_CTRL);
++ rxdma |= IOH_GBE_RX_DMA_EN;
++ IOH_GBE_WRITE_REG(hw, DMA_CTRL, rxdma);
++ /* Enables Receive */
++ IOH_GBE_WRITE_REG(hw, MAC_RX_EN, IOH_GBE_MRE_MAC_RX_EN);
++
++#ifdef DEBUG_TEST
++ IOH_GBE_TESTOUT
++ ("BASE = 0x%08x HW_P = 0x%08x SIZE = 0x%08x SW_P = 0x%08x\n",
++ IOH_GBE_READ_REG(hw, RX_DSC_BASE),
++ IOH_GBE_READ_REG(hw, RX_DSC_HW_P),
++ IOH_GBE_READ_REG(hw, RX_DSC_SIZE),
++ IOH_GBE_READ_REG(hw, RX_DSC_SW_P));
++ IOH_GBE_TESTOUT("MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
++ IOH_GBE_READ_REG(hw, MAC_RX_EN),
++ IOH_GBE_READ_REG(hw, DMA_CTRL));
++#endif
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static void ioh_gbe_unmap_and_free_tx_resource(
++ * struct ioh_gbe_adapter *adapter,
++ * struct ioh_gbe_buffer *buffer_info)
++ * @brief Unmap and free tx socket buffer
++ * @param adapter [INOUT] Board private structure
++ * @param buffer_info [OUT] Buffer information structure
++ * @return None
++ */
++static void
++ioh_gbe_unmap_and_free_tx_resource(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_buffer *buffer_info)
++{
++ IOH_GBE_DBGOUT2("ioh_gbe_unmap_and_free_tx_resource\n");
++
++ if (buffer_info->dma != 0) {
++ pci_unmap_page(adapter->pdev, buffer_info->dma,
++ buffer_info->length, PCI_DMA_TODEVICE);
++ buffer_info->dma = 0;
++ }
++ if (buffer_info->skb != 0) {
++ dev_kfree_skb_any(buffer_info->skb);
++ buffer_info->skb = NULL;
++ }
++ if (buffer_info->kernel_skb != 0) {
++ dev_kfree_skb_any(buffer_info->kernel_skb);
++ buffer_info->kernel_skb = NULL;
++ }
++ IOH_GBE_TESTOUT2
++ ("buffer_info->dma : 0x%08x buffer_info->skb : 0x%08x\n",
++ buffer_info->dma, buffer_info->skb);
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static void ioh_gbe_unmap_and_free_rx_resource(
++ * struct ioh_gbe_adapter *adapter,
++ * struct ioh_gbe_buffer *buffer_info)
++ * @brief Unmap and free rx socket buffer
++ * @param adapter [INOUT] Board private structure
++ * @param buffer_info [OUT] Buffer information structure
++ * @return None
++ */
++static void
++ioh_gbe_unmap_and_free_rx_resource(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_buffer *buffer_info)
++{
++ IOH_GBE_DBGOUT2("ioh_gbe_unmap_and_free_rx_resource\n");
++
++ if (buffer_info->dma != 0) {
++ pci_unmap_single(adapter->pdev, buffer_info->dma,
++ buffer_info->length, PCI_DMA_FROMDEVICE);
++ buffer_info->dma = 0;
++ }
++ if (buffer_info->skb != 0) {
++ dev_kfree_skb_any(buffer_info->skb);
++ buffer_info->skb = NULL;
++ }
++ IOH_GBE_TESTOUT2
++ ("buffer_info->dma : 0x%08x buffer_info->skb : 0x%08x\n",
++ buffer_info->dma, buffer_info->skb);
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static void ioh_gbe_clean_tx_ring(struct ioh_gbe_adapter *adapter,
++ * struct ioh_gbe_tx_ring *tx_ring)
++ * @brief Free Tx Buffers
++ * @param adapter [INOUT] Board private structure
++ * @param tx_ring [OUT] Ring to be cleaned
++ * @return None
++ */
++static void
++ioh_gbe_clean_tx_ring(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_tx_ring *tx_ring)
++{
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ struct ioh_gbe_buffer *buffer_info;
++ unsigned long size;
++ unsigned int i;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ /* Free all the Tx ring sk_buffs */
++ for (i = 0; i < tx_ring->count; i++) {
++ buffer_info = &tx_ring->buffer_info[i];
++ ioh_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
++ }
++ IOH_GBE_TESTOUT("call ioh_gbe_unmap_and_free_tx_resource() %d count\n",
++ i);
++
++ size = (unsigned long)sizeof(struct ioh_gbe_buffer) * tx_ring->count;
++ memset(tx_ring->buffer_info, 0, size);
++
++ /* Zero out the descriptor ring */
++ memset(tx_ring->desc, 0, tx_ring->size);
++
++ tx_ring->next_to_use = 0;
++ tx_ring->next_to_clean = 0;
++
++ IOH_GBE_WRITE_REG(hw, TX_DSC_HW_P, tx_ring->dma);
++ IOH_GBE_WRITE_REG(hw, TX_DSC_SIZE, (tx_ring->size - 0x10));
++
++#ifdef DEBUG_TEST
++ IOH_GBE_TESTOUT("next_to_use : %d next_to_clean : %d\n",
++ tx_ring->next_to_use, tx_ring->next_to_clean);
++ IOH_GBE_TESTOUT("TX_DSC_HW_P reg : 0x%08x TX_DSC_SIZE reg : 0x%08x\n",
++ IOH_GBE_READ_REG(hw, TX_DSC_HW_P),
++ IOH_GBE_READ_REG(hw, TX_DSC_SIZE));
++#endif
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static void ioh_gbe_clean_rx_ring(struct ioh_gbe_adapter *adapter,
++ * struct ioh_gbe_rx_ring *rx_ring)
++ * @brief Free Rx Buffers
++ * @param adapter [INOUT] Board private structure
++ * @param rx_ring [OUT] Ring to free buffers from
++ * @return None
++ */
++static void
++ioh_gbe_clean_rx_ring(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_rx_ring *rx_ring)
++{
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ struct ioh_gbe_buffer *buffer_info;
++ unsigned long size;
++ unsigned int i;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ /* Free all the Rx ring sk_buffs */
++ for (i = 0; i < rx_ring->count; i++) {
++ buffer_info = &rx_ring->buffer_info[i];
++ ioh_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
++ }
++ IOH_GBE_TESTOUT("call ioh_gbe_unmap_and_free_rx_resource() %d count\n",
++ i);
++
++ size = (unsigned long)sizeof(struct ioh_gbe_buffer) * rx_ring->count;
++ memset(rx_ring->buffer_info, 0, size);
++
++ /* Zero out the descriptor ring */
++ memset(rx_ring->desc, 0, rx_ring->size);
++
++ rx_ring->next_to_clean = 0;
++ rx_ring->next_to_use = 0;
++
++ IOH_GBE_WRITE_REG(hw, RX_DSC_HW_P, rx_ring->dma);
++ IOH_GBE_WRITE_REG(hw, RX_DSC_SIZE, (rx_ring->size - 0x10));
++#ifdef DEBUG_TEST
++ IOH_GBE_TESTOUT("next_to_use : %d next_to_clean : %d\n",
++ rx_ring->next_to_use, rx_ring->next_to_clean);
++ IOH_GBE_TESTOUT("RX_DSC_HW_P reg : 0x%08x RX_DSC_SIZE reg : 0x%08x\n",
++ IOH_GBE_READ_REG(hw, RX_DSC_HW_P),
++ IOH_GBE_READ_REG(hw, RX_DSC_SIZE));
++#endif
++}
++
++static void
++ioh_gbe_set_rgmii_ctrl(struct ioh_gbe_adapter *adapter, u16 speed, u16 duplex)
++{
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ unsigned long rgmii = 0;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++ /* Set the RGMII control. */
++#ifdef IOH_GBE_MAC_IFOP_RGMII
++ switch (speed) {
++ case SPEED_10:
++ rgmii = (IOH_GBE_RGMII_RATE_2_5M |
++ IOH_GBE_MAC_RGMII_CTRL_SETTING);
++ break;
++ case SPEED_100:
++ rgmii = (IOH_GBE_RGMII_RATE_25M |
++ IOH_GBE_MAC_RGMII_CTRL_SETTING);
++ break;
++ case SPEED_1000:
++ rgmii = (IOH_GBE_RGMII_RATE_125M |
++ IOH_GBE_MAC_RGMII_CTRL_SETTING);
++ break;
++ }
++ IOH_GBE_WRITE_REG(hw, RGMII_CTRL, rgmii);
++#else /* GMII */
++ rgmii = 0;
++ IOH_GBE_WRITE_REG(hw, RGMII_CTRL, rgmii);
++#endif
++}
++static void
++ioh_gbe_set_mode(struct ioh_gbe_adapter *adapter, u16 speed, u16 duplex)
++{
++ struct net_device *netdev = adapter->netdev;
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ unsigned long mode = 0;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++ /* Set the communication mode */
++ switch (speed) {
++ case SPEED_10:
++ mode = IOH_GBE_MODE_MII_ETHER;
++ netdev->tx_queue_len = 10;
++ break;
++ case SPEED_100:
++ mode = IOH_GBE_MODE_MII_ETHER;
++ netdev->tx_queue_len = 100;
++ break;
++ case SPEED_1000:
++ mode = IOH_GBE_MODE_GMII_ETHER;
++ break;
++ }
++ if (duplex == DUPLEX_FULL)
++ mode |= IOH_GBE_MODE_FULL_DUPLEX;
++ else
++ mode |= IOH_GBE_MODE_HALF_DUPLEX;
++ IOH_GBE_WRITE_REG(hw, MODE, mode);
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static void ioh_gbe_watchdog(unsigned long data)
++ * @brief Watchdog process
++ * @param data [INOUT] Board private structure
++ * @return None
++ */
++static void ioh_gbe_watchdog(unsigned long data)
++{
++ struct ioh_gbe_adapter *adapter = (struct ioh_gbe_adapter *)data;
++ struct net_device *netdev = adapter->netdev;
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ struct ethtool_cmd cmd;
++
++ DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
++
++ ioh_gbe_update_stats(adapter);
++ if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
++ netdev->tx_queue_len = adapter->tx_queue_len;
++ /* mii library handles link maintenance tasks */
++ if (mii_ethtool_gset(&adapter->mii, &cmd) != 0) {
++ DPRINTK(PROBE, ERR, "ethtool get setting Error\n");
++ mod_timer(&adapter->watchdog_timer,
++ round_jiffies(jiffies +
++ IOH_GBE_WATCHDOG_PERIOD));
++ return;
++ }
++ hw->mac.link_speed = cmd.speed;
++ hw->mac.link_duplex = cmd.duplex;
++ /* Set the RGMII control. */
++ ioh_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
++ hw->mac.link_duplex);
++ /* Set the communication mode */
++ ioh_gbe_set_mode(adapter, hw->mac.link_speed,
++ hw->mac.link_duplex);
++ DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s-Duplex\n",
++ cmd.speed, cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
++ netif_carrier_on(netdev);
++ netif_wake_queue(netdev);
++ } else if ((!mii_link_ok(&adapter->mii)) &&
++ (netif_carrier_ok(netdev))) {
++ DPRINTK(LINK, INFO, "NIC Link is Down\n");
++ hw->mac.link_speed = SPEED_10;
++ hw->mac.link_duplex = DUPLEX_HALF;
++ netif_carrier_off(netdev);
++ netif_stop_queue(netdev);
++ }
++ mod_timer(&adapter->watchdog_timer,
++ round_jiffies(jiffies + IOH_GBE_WATCHDOG_PERIOD));
++#ifdef DEBUG_TEST
++ IOH_GBE_TESTOUT
++ ("RGMII_CTRL reg : 0x%08x RGMII_ST reg : 0x%08x "
++ " MODE reg : 0x%08x\n",
++ IOH_GBE_READ_REG(hw, RGMII_CTRL),
++ IOH_GBE_READ_REG(hw, RGMII_ST),
++ IOH_GBE_READ_REG(hw, MODE));
++ IOH_GBE_TESTOUT
++ ("link_speed : %d link_duplex : %d tx_queue_len : %d\n",
++ hw->mac.link_speed, hw->mac.link_duplex,
++ (u32) netdev->tx_queue_len);
++#endif
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static void ioh_gbe_tx_queue(struct ioh_gbe_adapter *adapter,
++ * struct ioh_gbe_tx_ring *tx_ring, struct sk_buff *skb)
++ * @brief Carry out queuing of the transmission data
++ * @param adapter [INOUT] Board private structure
++ * @param tx_ring [OUT] Tx descriptor ring structure
++ * @param skb [IN] Sockt buffer structure
++ * @return None
++ */
++static void
++ioh_gbe_tx_queue(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_tx_ring *tx_ring, struct sk_buff *skb)
++{
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ struct ioh_gbe_tx_desc *tx_desc;
++ struct ioh_gbe_buffer *buffer_info;
++ struct sk_buff *tmp_skb;
++ unsigned int frame_ctrl;
++ unsigned int ring_num;
++ unsigned long flags;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ /*-- Set frame control --*/
++ frame_ctrl = 0;
++ if (unlikely(skb->len < IOH_GBE_SHORT_PKT))
++ frame_ctrl |= IOH_GBE_TXD_CTRL_APAD;
++ if (unlikely(adapter->tx_csum == FALSE))
++ frame_ctrl |= IOH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
++
++ /* Performs checksum processing */
++ /*
++ * It is because the hardware accelerator does not support a checksum,
++ * when the received data size is less than 64 bytes.
++ */
++ if ((skb->len < IOH_GBE_SHORT_PKT) && (adapter->tx_csum == TRUE)) {
++ frame_ctrl |=
++ IOH_GBE_TXD_CTRL_APAD | IOH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
++ if (skb->protocol == htons(ETH_P_IP)) {
++ struct iphdr *iph = ip_hdr(skb);
++ unsigned int offset;
++ iph->check = 0;
++ iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
++ offset = skb_transport_offset(skb);
++ if (iph->protocol == IPPROTO_TCP) {
++ skb->csum = 0;
++ tcp_hdr(skb)->check = 0;
++ skb->csum =
++ skb_checksum(skb, offset,
++ skb->len - offset, 0);
++ tcp_hdr(skb)->check =
++ csum_tcpudp_magic(iph->saddr,
++ iph->daddr,
++ skb->len - offset,
++ IPPROTO_TCP, skb->csum);
++ } else if (iph->protocol == IPPROTO_UDP) {
++ skb->csum = 0;
++ udp_hdr(skb)->check = 0;
++ skb->csum =
++ skb_checksum(skb, offset,
++ skb->len - offset, 0);
++ udp_hdr(skb)->check =
++ csum_tcpudp_magic(iph->saddr,
++ iph->daddr,
++ skb->len - offset,
++ IPPROTO_UDP, skb->csum);
++ }
++ }
++ }
++
++ spin_lock_irqsave(&tx_ring->tx_lock, flags);
++ ring_num = tx_ring->next_to_use;
++ if (unlikely((ring_num + 1) == tx_ring->count))
++ tx_ring->next_to_use = 0;
++ else
++ tx_ring->next_to_use = ring_num + 1;
++
++ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
++ buffer_info = &tx_ring->buffer_info[ring_num];
++ tmp_skb = buffer_info->skb;
++
++ /* [Header:14][payload] ---> [Header:14][paddong:2][payload] */
++ memcpy(tmp_skb->data, skb->data, ETH_HLEN);
++ tmp_skb->data[ETH_HLEN] = 0x00;
++ tmp_skb->data[ETH_HLEN + 1] = 0x00;
++ tmp_skb->len = skb->len;
++ memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
++ (skb->len - ETH_HLEN));
++ buffer_info->kernel_skb = skb;
++ skb = tmp_skb;
++
++ /*-- Set Buffer infomation --*/
++ buffer_info->length = skb->len;
++ buffer_info->dma =
++ pci_map_single(adapter->pdev, skb->data, buffer_info->length,
++ PCI_DMA_TODEVICE);
++ buffer_info->time_stamp = jiffies;
++
++ /*-- Set Tx descriptor --*/
++ tx_desc = IOH_GBE_TX_DESC(*tx_ring, ring_num);
++ tx_desc->buffer_addr = (buffer_info->dma);
++ tx_desc->length = (skb->len);
++ tx_desc->tx_words_eob = ((skb->len + 3));
++ tx_desc->tx_frame_ctrl = (frame_ctrl);
++ tx_desc->gbec_status = (DSC_INIT16);
++
++ if (unlikely(++ring_num == tx_ring->count))
++ ring_num = 0;
++
++#ifdef DEBUG_TEST
++ {
++ unsigned char *rd_data;
++
++ rd_data = (unsigned char *)tx_desc;
++ IOH_GBE_TESTOUT
++ ("buffer_info->dma : 0x%08x skb->len : 0x%08x "
++ "frame_ctrl : 0x%08x\n",
++ buffer_info->dma, skb->len, frame_ctrl);
++ IOH_GBE_TESTOUT
++ ("tx_desc: \n 0x%02x 0x%02x 0x%02x 0x%02x\n 0x%02x "
++ "0x%02x 0x%02x 0x%02x\n 0x%02x 0x%02x 0x%02x 0x%02x\n "
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ rd_data[0], rd_data[1], rd_data[2], rd_data[3], rd_data[4],
++ rd_data[5], rd_data[6], rd_data[7], rd_data[8], rd_data[9],
++ rd_data[10], rd_data[11], rd_data[12], rd_data[13],
++ rd_data[14], rd_data[15]);
++ }
++#endif
++
++ /* Update software pointer of TX descriptor */
++ IOH_GBE_WRITE_REG(hw, TX_DSC_SW_P,
++ tx_ring->dma +
++ (int)sizeof(struct ioh_gbe_tx_desc) * ring_num);
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn void ioh_gbe_update_stats(struct ioh_gbe_adapter *adapter)
++ * @brief Update the board statistics counters
++ * @param adapter [INOUT] Board private structure
++ * @return None
++ */
++void ioh_gbe_update_stats(struct ioh_gbe_adapter *adapter)
++{
++ struct pci_dev *pdev = adapter->pdev;
++ struct ioh_gbe_hw_stats *stats = &adapter->stats;
++ struct net_device_stats *net_stats = &adapter->net_stats;
++ unsigned long flags;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++ /*
++ * Prevent stats update while adapter is being reset, or if the pci
++ * connection is down.
++ */
++ if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
++ return;
++
++ spin_lock_irqsave(&adapter->stats_lock, flags);
++
++ /* Update device status "adapter->stats" */
++ stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
++ stats->tx_errors = stats->tx_length_errors +
++ stats->tx_aborted_errors +
++ stats->tx_carrier_errors + stats->tx_timeout_count;
++
++ /* Update network device status "adapter->net_stats" */
++ net_stats->rx_packets = stats->rx_packets;
++ net_stats->tx_packets = stats->tx_packets;
++ net_stats->rx_bytes = stats->rx_bytes;
++ net_stats->tx_bytes = stats->tx_bytes;
++ net_stats->rx_errors = stats->rx_errors;
++ net_stats->tx_errors = stats->tx_errors;
++ net_stats->rx_dropped = stats->rx_dropped;
++ net_stats->tx_dropped = stats->tx_dropped;
++ net_stats->multicast = stats->multicast;
++ net_stats->collisions = stats->collisions;
++ net_stats->rx_crc_errors = stats->rx_crc_errors;
++ net_stats->rx_frame_errors = stats->rx_frame_errors;
++ net_stats->tx_aborted_errors = stats->tx_aborted_errors;
++ net_stats->tx_carrier_errors = stats->tx_carrier_errors;
++
++ spin_unlock_irqrestore(&adapter->stats_lock, flags);
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static irqreturn_t ioh_gbe_intr(int irq, void *data)
++ * @brief Interrupt Handler
++ * @param irq [IN] Interrupt number
++ * @param data [INOUT] Pointer to a network interface device structure
++ * @return None
++ */
++static irqreturn_t ioh_gbe_intr(int irq, void *data)
++{
++ struct net_device *netdev = data;
++ struct ioh_gbe_adapter *adapter = netdev_priv(netdev);
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ u32 int_st;
++ u32 int_en;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ /* Check request status */
++ int_st = IOH_GBE_READ_REG(hw, INT_ST);
++ IOH_GBE_TESTOUT("int_st = 0x%08x\n", int_st);
++
++ int_st = int_st & IOH_GBE_READ_REG(hw, INT_EN);
++ /* When request status is no interruption factor */
++ if (unlikely(!int_st)) {
++ /* End processing. */
++ IOH_GBE_TESTOUT("return = 0x%08x\n", IRQ_NONE);
++ return IRQ_NONE; /* Not our interrupt */
++ }
++ if (int_st & IOH_GBE_INT_RX_FRAME_ERR)
++ adapter->stats.intr_rx_frame_err_count++;
++ if (int_st & IOH_GBE_INT_RX_FIFO_ERR)
++ adapter->stats.intr_rx_fifo_err_count++;
++ if (int_st & IOH_GBE_INT_RX_DMA_ERR)
++ adapter->stats.intr_rx_dma_err_count++;
++ if (int_st & IOH_GBE_INT_TX_FIFO_ERR)
++ adapter->stats.intr_tx_fifo_err_count++;
++ if (int_st & IOH_GBE_INT_TX_DMA_ERR)
++ adapter->stats.intr_tx_dma_err_count++;
++ if (int_st & IOH_GBE_INT_TCPIP_ERR)
++ adapter->stats.intr_tcpip_err_count++;
++ /* When Rx descriptor is empty */
++ if ((int_st & IOH_GBE_INT_RX_DSC_EMP) != 0) {
++ adapter->stats.intr_rx_dsc_empty_count++;
++ DPRINTK(PROBE, ERR, "Rx descriptor is empty\n");
++ int_en = IOH_GBE_READ_REG(hw, INT_EN);
++ IOH_GBE_WRITE_REG(hw, INT_EN,
++ (int_en & ~IOH_GBE_INT_RX_DSC_EMP));
++ if (hw->mac.tx_fc_enable == TRUE) {
++ /* Set Pause packet */
++ ioh_gbe_hal_set_pause_packet(hw);
++ }
++ if ((int_en & (IOH_GBE_INT_RX_DMA_CMPLT | IOH_GBE_INT_TX_CMPLT))
++ == 0) {
++ return IRQ_HANDLED;
++ }
++ }
++
++ /* When request status is Receive interruption */
++ if ((int_st & (IOH_GBE_INT_RX_DMA_CMPLT | IOH_GBE_INT_TX_CMPLT)) != 0) {
++ if (likely(napi_schedule_prep(&adapter->napi))) {
++ /* Enable only Rx Descriptor empty */
++ atomic_inc(&adapter->irq_sem);
++ int_en = IOH_GBE_READ_REG(hw, INT_EN);
++ int_en &=
++ ~(IOH_GBE_INT_RX_DMA_CMPLT | IOH_GBE_INT_TX_CMPLT);
++ IOH_GBE_WRITE_REG(hw, INT_EN, int_en);
++ /* Start polling for NAPI */
++ __napi_schedule(&adapter->napi);
++ }
++ }
++ IOH_GBE_TESTOUT("return = 0x%08x INT_EN reg = 0x%08x\n",
++ IRQ_HANDLED, IOH_GBE_READ_REG(hw, INT_EN));
++ return IRQ_HANDLED;
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static unsigned char ioh_gbe_clean_tx(struct ioh_gbe_adapter *adapter,
++ * struct ioh_gbe_tx_ring *tx_ring)
++ * @brief Reclaim resources after transmit completes
++ * @param adapter [INOUT] Board private structure
++ * @param tx_ring [OUT] Tx descriptor ring
++ * @return TRUE: Cleaned the descriptor
++ * @return FALSE: Not cleaned the descriptor
++ */
++static unsigned char
++ioh_gbe_clean_tx(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_tx_ring *tx_ring)
++{
++ struct ioh_gbe_tx_desc *tx_desc;
++ struct ioh_gbe_buffer *buffer_info;
++ struct sk_buff *skb;
++ unsigned int i;
++ unsigned int cleaned_count = 0;
++ unsigned char cleaned = FALSE;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++ IOH_GBE_TESTOUT("next_to_clean : %d\n", tx_ring->next_to_clean);
++
++ i = tx_ring->next_to_clean;
++ tx_desc = IOH_GBE_TX_DESC(*tx_ring, i);
++ IOH_GBE_TESTOUT("gbec_status:0x%04x dma_status:0x%04x\n",
++ tx_desc->gbec_status, tx_desc->dma_status);
++
++ while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
++ IOH_GBE_TESTOUT("gbec_status:0x%04x\n", tx_desc->gbec_status);
++ cleaned = TRUE;
++ buffer_info = &tx_ring->buffer_info[i];
++ skb = buffer_info->skb;
++
++ if ((tx_desc->gbec_status & IOH_GBE_TXD_GMAC_STAT_ABT) != 0) {
++ adapter->stats.tx_aborted_errors++;
++ DPRINTK(PROBE, ERR, "Transfer Aboat Error\n");
++ } else if ((tx_desc->gbec_status & IOH_GBE_TXD_GMAC_STAT_CRSER)
++ != 0) {
++ adapter->stats.tx_carrier_errors++;
++ DPRINTK(PROBE, ERR, "Transfer Carrier Sense Error\n");
++ } else if ((tx_desc->gbec_status & IOH_GBE_TXD_GMAC_STAT_EXCOL)
++ != 0) {
++ adapter->stats.tx_aborted_errors++;
++ DPRINTK(PROBE, ERR, "Transfer Collision Abort Error\n");
++ } else if ((tx_desc->gbec_status &
++ (IOH_GBE_TXD_GMAC_STAT_SNGCOL |
++ IOH_GBE_TXD_GMAC_STAT_MLTCOL)) != 0) {
++ adapter->stats.collisions++;
++ adapter->stats.tx_packets++;
++ adapter->stats.tx_bytes += skb->len;
++ DPRINTK(PROBE, DEBUG, "Transfer Collision\n");
++ } else if ((tx_desc->gbec_status & IOH_GBE_TXD_GMAC_STAT_CMPLT)
++ != 0) {
++ adapter->stats.tx_packets++;
++ adapter->stats.tx_bytes += skb->len;
++ }
++ if (buffer_info->dma != 0) {
++ IOH_GBE_TESTOUT("unmap buffer_info->dma : %d\n", i);
++ pci_unmap_page(adapter->pdev, buffer_info->dma,
++ buffer_info->length, PCI_DMA_TODEVICE);
++ buffer_info->dma = 0;
++ }
++ if (buffer_info->skb != 0) {
++ IOH_GBE_TESTOUT("trim buffer_info->skb : %d\n", i);
++ skb_trim(buffer_info->skb, 0);
++ }
++ if (buffer_info->kernel_skb != 0) {
++ IOH_GBE_TESTOUT
++ ("free buffer_info->kernel_skb adr: 0x%x\n",
++ (u32)(buffer_info->kernel_skb));
++ dev_kfree_skb(buffer_info->kernel_skb);
++ buffer_info->kernel_skb = NULL;
++ }
++ tx_desc->gbec_status = DSC_INIT16;
++ if (unlikely(++i == tx_ring->count))
++ i = 0;
++ tx_desc = IOH_GBE_TX_DESC(*tx_ring, i);
++
++ /* weight of a sort for tx, to avoid endless transmit cleanup */
++ if (cleaned_count++ == IOH_GBE_TX_WEIGHT)
++ break;
++ }
++ IOH_GBE_TESTOUT("called ioh_gbe_unmap_and_free_tx_resource() %dcount\n",
++ cleaned_count);
++ /* Recover from running out of Tx resources in xmit_frame */
++ if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
++ netif_wake_queue(adapter->netdev);
++ adapter->stats.tx_restart_count++;
++ DPRINTK(HW, DEBUG, "Tx wake queue\n");
++ }
++ spin_lock(&adapter->tx_queue_lock);
++ tx_ring->next_to_clean = i;
++ spin_unlock(&adapter->tx_queue_lock);
++ IOH_GBE_TESTOUT("next_to_clean : %d\n", tx_ring->next_to_clean);
++ return cleaned;
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static unsigned char ioh_gbe_clean_rx(struct ioh_gbe_adapter *adapter,
++ * struct ioh_gbe_rx_ring *rx_ring,
++ * int *work_done, int work_to_do)
++ * @brief Send received data up the network stack; legacy
++ * @param adapter [INOUT] Board private structure
++ * @param rx_ring [OUT] Rx descriptor ring
++ * @param work_done [OUT] Completed count
++ * @param work_to_do [IN] Request count
++ * @return TRUE: Cleaned the descriptor
++ * @return FALSE: Not cleaned the descriptor
++ */
++static unsigned char
++ioh_gbe_clean_rx(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_rx_ring *rx_ring,
++ int *work_done, int work_to_do)
++{
++ struct net_device *netdev = adapter->netdev;
++ struct pci_dev *pdev = adapter->pdev;
++ struct ioh_gbe_buffer *buffer_info;
++ struct ioh_gbe_rx_desc *rx_desc;
++ u32 length;
++ unsigned char tmp_packet[ETH_HLEN];
++ unsigned int i;
++ unsigned int cleaned_count = 0;
++ unsigned char cleaned = FALSE;
++ struct sk_buff *skb;
++ u8 dma_status;
++ u16 gbec_status;
++ u32 tcp_ip_status;
++ u8 skb_copy_flag = 0;
++ u8 skb_padding_flag = 0;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ i = rx_ring->next_to_clean;
++
++ while (*work_done < work_to_do) {
++ /* Check Rx descriptor status */
++ rx_desc = IOH_GBE_RX_DESC(*rx_ring, i);
++ if (rx_desc->gbec_status == DSC_INIT16)
++ break;
++ cleaned = TRUE;
++ cleaned_count++;
++
++ dma_status = rx_desc->dma_status;
++ gbec_status = rx_desc->gbec_status;
++ tcp_ip_status = rx_desc->tcp_ip_status;
++ rx_desc->gbec_status = DSC_INIT16;
++ buffer_info = &rx_ring->buffer_info[i];
++ skb = buffer_info->skb;
++
++ /* unmap dma */
++ pci_unmap_single(pdev, buffer_info->dma, buffer_info->length,
++ PCI_DMA_FROMDEVICE);
++ buffer_info->dma = 0;
++ /* Prefetch the packet */
++ prefetch(skb->data);
++
++ IOH_GBE_TESTOUT
++ ("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
++ "TCP:0x%08x] BufInf = 0x%08x\n",
++ i, dma_status, gbec_status, tcp_ip_status,
++ (u32) (buffer_info));
++ /* Error check */
++ if (unlikely(gbec_status & IOH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
++ adapter->stats.rx_frame_errors++;
++ DPRINTK(PROBE, ERR, "Receive Not Octal Error\n");
++ } else if (unlikely(gbec_status &
++ IOH_GBE_RXD_GMAC_STAT_NBLERR)) {
++ adapter->stats.rx_frame_errors++;
++ DPRINTK(PROBE, ERR, "Receive Nibble Error\n");
++ } else if (unlikely(gbec_status &
++ IOH_GBE_RXD_GMAC_STAT_CRCERR)) {
++ adapter->stats.rx_crc_errors++;
++ DPRINTK(PROBE, ERR, "Receive CRC Error\n");
++ } else {
++ /* get receive length */
++ /* length convert[-3], padding[-2] */
++ length = (rx_desc->rx_words_eob) - 3 - 2;
++
++ /* Decide the data conversion method */
++ if (adapter->rx_csum != TRUE) {
++ /* [Header:14][payload] */
++ skb_padding_flag = 0;
++ skb_copy_flag = 1;
++ } else {
++ /* [Header:14][padding:2][payload] */
++ skb_padding_flag = 1;
++ if (length < copybreak)
++ skb_copy_flag = 1;
++ else
++ skb_copy_flag = 0;
++ }
++
++ /* Data conversion */
++ if (skb_copy_flag != 0) { /* recycle skb */
++ struct sk_buff *new_skb;
++ new_skb =
++ netdev_alloc_skb(netdev,
++ length + NET_IP_ALIGN);
++ if (new_skb != 0) {
++ if (!skb_padding_flag) {
++ skb_reserve(new_skb,
++ NET_IP_ALIGN);
++ }
++ memcpy(new_skb->data, skb->data,
++ length);
++ /* save the skb
++ * in buffer_info as good */
++ skb = new_skb;
++ } else if (!skb_padding_flag) {
++ /* dorrop error */
++ DPRINTK(PROBE, ERR,
++ "New skb allocation Error\n");
++ goto dorrop;
++ }
++ } else {
++ buffer_info->skb = NULL;
++ }
++ if (skb_padding_flag != 0) {
++ memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN);
++ memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0],
++ ETH_HLEN);
++ skb_reserve(skb, NET_IP_ALIGN);
++
++ }
++
++ /* update status of driver */
++ adapter->stats.rx_bytes += length;
++ adapter->stats.rx_packets++;
++ if ((gbec_status & IOH_GBE_RXD_GMAC_STAT_MARMLT) != 0)
++ adapter->stats.multicast++;
++ /* Write meta date of skb */
++ skb_put(skb, length);
++ skb->protocol = eth_type_trans(skb, netdev);
++ if ((tcp_ip_status & IOH_GBE_RXD_ACC_STAT_TCPIPOK) ==
++ IOH_GBE_RXD_ACC_STAT_TCPIPOK) {
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ } else {
++ skb->ip_summed = CHECKSUM_NONE;
++ }
++
++ if (netif_receive_skb(skb) == NET_RX_DROP) {
++ adapter->stats.rx_dropped++;
++ DPRINTK(PROBE, ERR,
++ "Receive Netif Receive Dropped Error\n");
++ }
++ (*work_done)++;
++ netdev->last_rx = jiffies;
++ IOH_GBE_TESTOUT
++ ("Receive skb->ip_summed: %d length: %d\n",
++ skb->ip_summed, length);
++ }
++dorrop:
++ /* return some buffers to hardware, one at a time is too slow */
++ if (unlikely(cleaned_count >= IOH_GBE_RX_BUFFER_WRITE)) {
++ ioh_gbe_alloc_rx_buffers(adapter, rx_ring,
++ cleaned_count);
++ cleaned_count = 0;
++ }
++ if (++i == rx_ring->count)
++ i = 0;
++ }
++ rx_ring->next_to_clean = i;
++ if (cleaned_count != 0)
++ ioh_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
++#ifdef DEBUG_TEST
++ {
++ u32 tmp1, tmp2, tmp3, tmp4;
++ struct ioh_gbe_hw *hw = &adapter->hw;
++
++ IOH_GBE_TESTOUT
++ ("cleaned_count = %d next_to_clean = %d "
++ "next_to_use = %d\n",
++ cleaned_count, rx_ring->next_to_clean,
++ rx_ring->next_to_use);
++ tmp1 = IOH_GBE_READ_REG(hw, RX_DSC_BASE);
++ tmp2 = IOH_GBE_READ_REG(hw, RX_DSC_HW_P);
++ tmp3 = IOH_GBE_READ_REG(hw, RX_DSC_SIZE);
++ tmp4 = IOH_GBE_READ_REG(hw, RX_DSC_SW_P);
++ IOH_GBE_TESTOUT
++ ("BASE = 0x%08x HW_P = 0x%08x "
++ "SIZE = 0x%08x SW_P = 0x%08x\n",
++ tmp1, tmp2, tmp3, tmp4);
++ tmp1 = IOH_GBE_READ_REG(hw, DMA_CTRL);
++ tmp2 = IOH_GBE_READ_REG(hw, MAC_RX_EN);
++ IOH_GBE_TESTOUT("DMA_CTRL = 0x%08x MAC_RX_EN = 0x%08x\n", tmp1,
++ tmp2);
++ tmp1 = IOH_GBE_READ_REG(hw, RX_MODE);
++ tmp2 = IOH_GBE_READ_REG(hw, ADDR_MASK);
++ tmp3 = IOH_GBE_READ_REG(hw, MAC_ADR1A);
++ tmp4 = IOH_GBE_READ_REG(hw, MAC_ADR1B);
++ IOH_GBE_TESTOUT
++ ("RX_MODE = 0x%08x ADDR_MASK = 0x%08x "
++ "MAC_ADR1A = 0x%08x MAC_ADR1B = 0x%08x\n",
++ tmp1, tmp2, tmp3, tmp4);
++ }
++#endif
++ return cleaned;
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static void ioh_gbe_alloc_rx_buffers(
++ * struct ioh_gbe_adapter *adapter,
++ * struct ioh_gbe_rx_ring *rx_ring, int cleaned_count)
++ * @brief Replace used receive buffers; legacy & extended
++ * @param adapter [INOUT] Board private structure
++ * @param rx_ring [OUT] Rx descriptor ring
++ * @param cleaned_count [IN] Cleaned count
++ * @return None
++ */
++static void
++ioh_gbe_alloc_rx_buffers(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_rx_ring *rx_ring, int cleaned_count)
++{
++ struct net_device *netdev = adapter->netdev;
++ struct pci_dev *pdev = adapter->pdev;
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ struct ioh_gbe_rx_desc *rx_desc;
++ struct ioh_gbe_buffer *buffer_info;
++ struct sk_buff *skb;
++ unsigned int i;
++ unsigned int bufsz;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ bufsz = adapter->rx_buffer_len + IOH_GBE_DMA_ALIGN;
++ i = rx_ring->next_to_use;
++
++ while ((cleaned_count--) != 0) {
++ buffer_info = &rx_ring->buffer_info[i];
++ skb = buffer_info->skb;
++ if (skb != 0) {
++ skb_trim(skb, 0);
++ } else {
++ skb = netdev_alloc_skb(netdev, bufsz);
++ if (unlikely(!skb)) {
++ /* Better luck next round */
++ adapter->stats.rx_alloc_buff_failed++;
++ break;
++ }
++ /* 64byte align */
++ skb_reserve(skb, IOH_GBE_DMA_ALIGN);
++
++ buffer_info->skb = skb;
++ buffer_info->length = adapter->rx_buffer_len;
++ }
++
++ buffer_info->dma = pci_map_single(pdev,
++ skb->data,
++ buffer_info->length,
++ PCI_DMA_FROMDEVICE);
++
++ rx_desc = IOH_GBE_RX_DESC(*rx_ring, i);
++ rx_desc->buffer_addr = (buffer_info->dma);
++ rx_desc->gbec_status = DSC_INIT16;
++
++ IOH_GBE_DBGOUT1
++ ("i = %d buffer_info->dma = 0x%x "
++ "buffer_info->length = 0x%x\n",
++ i, buffer_info->dma, buffer_info->length);
++
++ if (unlikely(++i == rx_ring->count))
++ i = 0;
++ }
++ if (likely(rx_ring->next_to_use != i)) {
++ rx_ring->next_to_use = i;
++ if (unlikely(i-- == 0))
++ i = (rx_ring->count - 1);
++ wmb();
++ IOH_GBE_WRITE_REG(hw, RX_DSC_SW_P,
++ rx_ring->dma +
++ (int)sizeof(struct ioh_gbe_rx_desc) * i);
++ }
++ return;
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static void ioh_gbe_alloc_tx_buffers(
++ * struct ioh_gbe_adapter *adapter,
++ * struct ioh_gbe_tx_ring *tx_ring)
++ * @brief Allocate transmit buffers
++ * @param adapter [INOUT] Board private structure
++ * @param tx_ring [OUT] Tx descriptor ring
++ * @return None
++ */
++static void
++ioh_gbe_alloc_tx_buffers(struct ioh_gbe_adapter *adapter,
++ struct ioh_gbe_tx_ring *tx_ring)
++{
++ struct ioh_gbe_buffer *buffer_info;
++ struct sk_buff *skb;
++ unsigned int i;
++ unsigned int bufsz;
++ struct ioh_gbe_tx_desc *tx_desc;
++
++ DPRINTK(PROBE, DEBUG, "\n");
++
++ bufsz =
++ adapter->hw.mac.max_frame_size + IOH_GBE_DMA_ALIGN + NET_IP_ALIGN;
++
++ for (i = 0; i < tx_ring->count; i++) {
++ buffer_info = &tx_ring->buffer_info[i];
++ skb = netdev_alloc_skb(adapter->netdev, bufsz);
++ skb_reserve(skb, IOH_GBE_DMA_ALIGN);
++ buffer_info->skb = skb;
++ tx_desc = IOH_GBE_TX_DESC(*tx_ring, i);
++ tx_desc->gbec_status = (DSC_INIT16);
++ }
++
++ return;
++}
++/* ioh_gbe_main.c */
+diff -urN linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_nvm.c topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_nvm.c
+--- linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_nvm.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_nvm.c 2010-03-11 15:12:00.000000000 +0900
+@@ -0,0 +1,129 @@
++/*!
++ * @file ioh_gbe_nvm.c
++ * @brief Linux IOH Gigabit Ethernet Driver HAL internal function (NVM) source file
++ *
++ * @version 0.90
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR CO., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 09/21/2009
++ * modified:
++ *
++ */
++#include "pch_gbe_osdep.h"
++#include "pch_gbe_defines.h"
++#include "pch_gbe_hw.h"
++#include "pch_gbe_nvm.h"
++
++#ifdef CONFIG_PCH_PCIEQOS
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_nvm_read_mem(struct ioh_gbe_hw *hw,
++ * u32 offset, u8 *data)
++ * @brief Read EEPROM
++ * @param hw [INOUT] Pointer to the HW structure
++ * @param offset [IN] Offset of word in the EEPROM to read
++ * @param data [OUT] Word read from the EEPROM
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++s32 ioh_gbe_nvm_read_mem(struct ioh_gbe_hw *hw, u32 offset, u8 *data)
++{
++ s32 ret;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_nvm_read_mem");
++ IOH_GBE_TESTOUT("offset : 0x%04x\n", offset);
++ ret = ioh_pcieqos_read_gbe_mac_addr(offset, data);
++ return ret;
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_nvm_write_mem(struct ioh_gbe_hw *hw,
++ * u32 offset, u8 *data)
++ * @brief Write EEPROM
++ * @param hw [INOUT] Pointer to the HW structure
++ * @param offset [IN] Offset of word in the EEPROM to read
++ * @param data [IN] 8bit word(s) to be written to the EEPROM
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++s32 ioh_gbe_nvm_write_mem(struct ioh_gbe_hw *hw, u32 offset, u8 *data)
++{
++ s32 ret;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_nvm_write_mem");
++ IOH_GBE_TESTOUT("offset : 0x%04x\n", offset);
++ ret = ioh_pcieqos_write_gbe_mac_addr(offset, *data);
++ return ret;
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_nvm_read_mac_addr(struct ioh_gbe_hw *hw)
++ * @brief Read device MAC address
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++s32 ioh_gbe_nvm_read_mac_addr(struct ioh_gbe_hw *hw)
++{
++ s32 ret;
++ u8 i;
++ u8 *data;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_nvm_read_mac_addr");
++
++#ifdef NVM_MAC_FIX
++ hw->mac.addr[0] = (u8) (0x00);
++ hw->mac.addr[1] = (u8) (0x21);
++ hw->mac.addr[2] = (u8) (0x97);
++ hw->mac.addr[3] = (u8) (0x77);
++ hw->mac.addr[4] = (u8) (0x65);
++ hw->mac.addr[5] = (u8) (0x13);
++#else
++ data = hw->mac.addr;
++ for (i = 0; i < (hw->nvm.word_size * 2); i++) {
++ ret = ioh_pcieqos_read_gbe_mac_addr((u32) i, (data + i));
++ if (ret != 0)
++ break;
++ }
++#endif
++
++ IOH_GBE_TESTOUT("hw->mac.addr : 0x%02x %02x %02x %02x %02x %02x\n",
++ hw->mac.addr[0], hw->mac.addr[1], hw->mac.addr[2],
++ hw->mac.addr[3], hw->mac.addr[4], hw->mac.addr[5]);
++ return ret;
++}
++#endif
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_nvm_validate_checksum(struct ioh_gbe_hw *hw)
++ * @brief Validate EEPROM checksum
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ */
++s32 ioh_gbe_nvm_validate_checksum(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_nvm_validate_checksum");
++ return IOH_GBE_SUCCESS;
++}
+diff -urN linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_nvm.h topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_nvm.h
+--- linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_nvm.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_nvm.h 2010-03-11 15:12:18.000000000 +0900
+@@ -0,0 +1,85 @@
++/*!
++ * @file ioh_gbe_nvm.h
++ * @brief Linux IOH Gigabit Ethernet Driver HAL internal function (NVM) header file
++ *
++ * @version 0.90
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR CO., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 09/21/2009
++ * modified:
++ *
++ */
++#ifndef _IOH_GBE_NVM_H_
++#define _IOH_GBE_NVM_H_
++
++#ifdef CONFIG_PCH_PCIEQOS
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_nvm_read_mem(struct ioh_gbe_hw *hw,
++ * u32 offset, u8 *data)
++ * @brief Read EEPROM
++ */
++s32 ioh_gbe_nvm_read_mem(struct ioh_gbe_hw *hw, u32 offset, u8 * data);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_nvm_write_mem(struct ioh_gbe_hw *hw,
++ * u32 offset, u8 *data)
++ * @brief Write EEPROM
++ */
++s32 ioh_gbe_nvm_write_mem(struct ioh_gbe_hw *hw, u32 offset, u8 * data);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_nvm_read_mac_addr(struct ioh_gbe_hw *hw)
++ * @brief Read device MAC address
++ */
++s32 ioh_gbe_nvm_read_mac_addr(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_nvm_validate_checksum(struct ioh_gbe_hw *hw)
++ * @brief Validate EEPROM checksum
++ */
++s32 ioh_gbe_nvm_validate_checksum(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup PCIe QoS Driver function
++ * @fn int ioh_pcieqos_read_gbe_mac_addr (unsigned long offset_address,
++ * unsigned char *data);
++ * @brief Read MAC address from NVM
++ */
++int ioh_pcieqos_read_gbe_mac_addr(unsigned long offset_address,
++ unsigned char *data);
++
++/*!
++ * @ingroup PCIe QoS Driver function
++ * @fn int ioh_pcieqos_write_gbe_mac_addr(unsigned long offset_address,
++ * unsigned char data);
++ * @brief Write MAC address from NVM
++ */
++int ioh_pcieqos_write_gbe_mac_addr(unsigned long offset_address,
++ unsigned char data);
++#endif /* CONFIG_PCH_PCIEQOS */
++
++#endif /* _IOH_GBE_NVM_H_ */
+diff -urN linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_osdep.h topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_osdep.h
+--- linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_osdep.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_osdep.h 2010-03-09 09:27:26.000000000 +0900
+@@ -0,0 +1,74 @@
++/*!
++ * @file ioh_gbe_osdep.h
++ * @brief Linux IOH Gigabit Ethernet Driver OS independent header file
++ *
++ * glue for the OS independent part of ioh
++ * includes register access macros
++ *
++ * @version 0.90
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR CO., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 09/21/2009
++ * modified:
++ *
++ */
++#ifndef _IOH_GBE_OSDEP_H_
++#define _IOH_GBE_OSDEP_H_
++
++#include <linux/delay.h>
++#include <linux/io.h>
++
++#define usec_delay(x) udelay(x)
++#ifndef msec_delay
++#define msec_delay(x) do {if (in_interrupt()) { \
++ /* Don't mdelay in interrupt context! */ \
++ BUG(); \
++ } else { \
++ msleep(x); \
++ } } while (0)
++
++/* Some workarounds require millisecond delays and are run during interrupt
++ * context. Most notably, when establishing link, the phy may need tweaking
++ * but cannot process phy register reads/writes faster than millisecond
++ * intervals...and we establish link due to a "link status change" interrupt.
++ */
++#define msec_delay_irq(x) mdelay(x)
++#endif
++
++#undef FALSE
++#define FALSE 0
++#undef TRUE
++#define TRUE 1
++
++
++#define IOH_GBE_WRITE_REG(a, reg, value) ( \
++ writel((value), ((a)->hw_addr + IOH_GBE_##reg)))
++
++#define IOH_GBE_READ_REG(a, reg) ( \
++ readl((a)->hw_addr + IOH_GBE_##reg))
++
++#define IOH_GBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
++ writel((value), \
++ ((a)->hw_addr + IOH_GBE_##reg + ((offset) << 2))))
++
++#endif /* _IOH_GBE_OSDEP_H_ */
+diff -urN linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_param.c topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_param.c
+--- linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_param.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_param.c 2010-03-09 09:39:22.000000000 +0900
+@@ -0,0 +1,594 @@
++/*!
++ * @file ioh_gbe_param.c
++ * @brief Linux IOH Gigabit Ethernet Driver parameter check source file
++ *
++ * @version 0.90
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR CO., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 09/21/2009
++ * modified:
++ *
++ */
++
++#include <linux/netdevice.h>
++#include <linux/ethtool.h>
++#include <linux/mii.h>
++
++#include "pch_gbe_osdep.h"
++#include "pch_gbe_defines.h"
++#include "pch_gbe_hw.h"
++#include "pch_gbe.h"
++
++/* This is the only thing that needs to be changed to adjust the
++ * maximum number of ports that the driver can manage.
++ */
++
++#define IOH_GBE_MAX_NIC 1
++
++#define OPTION_UNSET -1
++#define OPTION_DISABLED 0
++#define OPTION_ENABLED 1
++
++/* All parameters are treated the same, as an integer array of values.
++ * This macro just reduces the need to repeat the same declaration code
++ * over and over (plus this helps to avoid typo bugs).
++ */
++
++#define IOH_GBE_PARAM_INIT { [0 ... IOH_GBE_MAX_NIC] = OPTION_UNSET }
++#ifdef IOH_GBE_QAC
++#define IOH_GBE_PARAM(X, desc)
++#else
++#define IOH_GBE_PARAM(X, desc) \
++ static int X[IOH_GBE_MAX_NIC+1] = IOH_GBE_PARAM_INIT; \
++ static int num_##X; \
++ module_param_array_named(X, X, int, &num_##X, 0); \
++ MODULE_PARM_DESC(X, desc);
++#endif
++
++/*
++ * Transmit Descriptor Count
++ * Valid Range: IOH_GBE_MIN_TXD - IOH_GBE_MAX_TXD
++ * Default Value: IOH_GBE_DEFAULT_TXD
++ */
++IOH_GBE_PARAM(TxDescriptors, "Number of transmit descriptors");
++
++/*
++ * Receive Descriptor Count
++ * Valid Range: IOH_GBE_MIN_RXD - IOH_GBE_MAX_RXD
++ * Default Value: IOH_GBE_DEFAULT_RXD
++ */
++IOH_GBE_PARAM(RxDescriptors, "Number of receive descriptors");
++
++/* User Specified Speed Override
++ *
++ * Valid Range: 0, 10, 100, 1000
++ * - 0 - auto-negotiate at all supported speeds
++ * - 10 - only link at 10 Mbps
++ * - 100 - only link at 100 Mbps
++ * - 1000 - only link at 1000 Mbps
++ *
++ * Default Value: 0
++ */
++IOH_GBE_PARAM(Speed, "Speed setting");
++
++/* User Specified Duplex Override
++ *
++ * Valid Range: 0-2
++ * - 0 - auto-negotiate for duplex
++ * - 1 - only link at half duplex
++ * - 2 - only link at full duplex
++ *
++ * Default Value: 0
++ */
++IOH_GBE_PARAM(Duplex, "Duplex setting");
++
++/*
++ * Auto-negotiation Advertisement Override
++ * Valid Range: 0x01-0x0F, 0x20-0x2F
++ *
++ * The AutoNeg value is a bit mask describing which speed and duplex
++ * combinations should be advertised during auto-negotiation.
++ * The supported speed and duplex modes are listed below
++ *
++ * Bit 7 6 5 4 3 2 1 0
++ * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10
++ * Duplex Full Full Half Full Half
++ *
++ * Default Value: 0x2F (copper)
++ */
++IOH_GBE_PARAM(AutoNeg, "Advertised auto-negotiation setting");
++#define AUTONEG_ADV_DEFAULT 0x2F
++
++/*
++ * User Specified Flow Control Override
++ * Valid Range: 0-3
++ * - 0 - No Flow Control
++ * - 1 - Rx only, respond to PAUSE frames but do not generate them
++ * - 2 - Tx only, generate PAUSE frames but ignore them on receive
++ * - 3 - Full Flow Control Support
++ * Default Value: Read flow control settings from the EEPROM
++ */
++IOH_GBE_PARAM(FlowControl, "Flow Control setting");
++
++/*
++ * XsumRX - Receive Checksum Offload Enable/Disable
++ * Valid Range: 0, 1
++ * - 0 - disables all checksum offload
++ * - 1 - enables receive IP/TCP/UDP checksum offload
++ * Default Value: IOH_GBE_DEFAULT_RX_CSUM
++ */
++IOH_GBE_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
++
++/*
++ * XsumTX - Transmit Checksum Offload Enable/Disable
++ * Valid Range: 0, 1
++ * - 0 - disables all checksum offload
++ * - 1 - enables transmit IP/TCP/UDP checksum offload
++ * Default Value: IOH_GBE_DEFAULT_TX_CSUM
++ */
++IOH_GBE_PARAM(XsumTX, "Disable or enable Transmit Checksum offload");
++
++struct ioh_gbe_option {
++ enum { enable_option, range_option, list_option } type;
++ signed char *name;
++ signed char *err;
++ int def;
++ union {
++ struct { /* range_option info */
++ int min;
++ int max;
++ } r;
++ struct { /* list_option info */
++ int nr;
++ struct ioh_gbe_opt_list { int i; signed char *str; } *p;
++ } l;
++ } arg;
++};
++
++/* ----------------------------------------------------------------------------
++ Function prototype
++---------------------------------------------------------------------------- */
++static void ioh_gbe_check_copper_options(struct ioh_gbe_adapter *adapter);
++static int ioh_gbe_validate_option(int *value,
++ struct ioh_gbe_option *opt,
++ struct ioh_gbe_adapter *adapter);
++
++/* ----------------------------------------------------------------------------
++ Function
++---------------------------------------------------------------------------- */
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static int ioh_gbe_validate_option(int *value,
++ * struct ioh_gbe_option *opt,
++ * struct ioh_gbe_adapter *adapter)
++ * @brief Validate option
++ * @param value [IN] value
++ * @param opt [IN] option
++ * @param adapter [IN] Board private structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ */
++static int
++ioh_gbe_validate_option(int *value, struct ioh_gbe_option *opt,
++ struct ioh_gbe_adapter *adapter)
++{
++ if (*value == OPTION_UNSET) {
++ *value = opt->def;
++ return 0;
++ }
++
++ switch (opt->type) {
++ case enable_option:
++ switch (*value) {
++ case OPTION_ENABLED:
++ DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
++ return 0;
++ case OPTION_DISABLED:
++ DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
++ return 0;
++ }
++ break;
++ case range_option:
++ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
++ DPRINTK(PROBE, INFO,
++ "%s set to %i\n", opt->name, *value);
++ return 0;
++ }
++ break;
++ case list_option: {
++ int i;
++ struct ioh_gbe_opt_list *ent;
++
++ for (i = 0; i < opt->arg.l.nr; i++) {
++ ent = &opt->arg.l.p[i];
++ if (*value == ent->i) {
++ if (ent->str[0] != '\0')
++ DPRINTK(PROBE, INFO, "%s\n", ent->str);
++ return 0;
++ }
++ }
++ }
++ break;
++ default:
++ BUG();
++ }
++
++ DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n",
++ opt->name, *value, opt->err);
++ *value = opt->def;
++ return -1;
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn void ioh_gbe_check_options(struct ioh_gbe_adapter *adapter)
++ * @brief Range Checking for Command Line Parameters
++ * @param adapter [IN] Board private structure
++ * @return None
++ * @remarks
++ * This routine checks all command line parameters for valid user
++ * input. If an invalid value is given, or if no user specified
++ * value exists, a default value is used. The final value is stored
++ * in a variable in the adapter structure.
++ */
++void
++ioh_gbe_check_options(struct ioh_gbe_adapter *adapter)
++{
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ int bd = adapter->bd_number;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_check_options");
++
++ if (bd >= IOH_GBE_MAX_NIC) {
++ DPRINTK(PROBE, NOTICE,
++ "Warning: no configuration for board #%i\n", bd);
++ DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
++ }
++
++ { /* Transmit Descriptor Count */
++ struct ioh_gbe_option opt = {
++ .type = range_option,
++ .name = "Transmit Descriptors",
++ .err = "using default of "
++ __MODULE_STRING(IOH_GBE_DEFAULT_TXD),
++ .def = IOH_GBE_DEFAULT_TXD,
++ .arg = { .r = { .min = IOH_GBE_MIN_TXD } },
++ .arg = { .r = { .max = IOH_GBE_MAX_TXD } }
++ };
++ struct ioh_gbe_tx_ring *tx_ring = adapter->tx_ring;
++ if (num_TxDescriptors > bd) {
++ tx_ring->count = TxDescriptors[bd];
++ ioh_gbe_validate_option(&tx_ring->count, &opt, adapter);
++ IOH_GBE_ROUNDUP(tx_ring->count,
++ IOH_GBE_TX_DESC_MULTIPLE);
++ } else {
++ tx_ring->count = opt.def;
++ }
++ }
++ { /* Receive Descriptor Count */
++ struct ioh_gbe_option opt = {
++ .type = range_option,
++ .name = "Receive Descriptors",
++ .err = "using default of "
++ __MODULE_STRING(IOH_GBE_DEFAULT_RXD),
++ .def = IOH_GBE_DEFAULT_RXD,
++ .arg = { .r = { .min = IOH_GBE_MIN_RXD } },
++ .arg = { .r = { .max = IOH_GBE_MAX_RXD } }
++ };
++ struct ioh_gbe_rx_ring *rx_ring = adapter->rx_ring;
++ if (num_RxDescriptors > bd) {
++ rx_ring->count = RxDescriptors[bd];
++ ioh_gbe_validate_option(&rx_ring->count, &opt, adapter);
++ IOH_GBE_ROUNDUP(rx_ring->count,
++ IOH_GBE_RX_DESC_MULTIPLE);
++ } else {
++ rx_ring->count = opt.def;
++ }
++ }
++ { /* Checksum Offload Enable/Disable */
++ struct ioh_gbe_option opt = {
++ .type = enable_option,
++ .name = "Checksum Offload",
++ .err = "defaulting to Enabled",
++ .def = IOH_GBE_DEFAULT_RX_CSUM
++ };
++
++ if (num_XsumRX > bd) {
++ adapter->rx_csum = XsumRX[bd];
++ ioh_gbe_validate_option((int *)(&adapter->rx_csum),
++ &opt, adapter);
++ } else {
++ adapter->rx_csum = opt.def;
++ }
++ }
++ { /* Checksum Offload Enable/Disable */
++ struct ioh_gbe_option opt = {
++ .type = enable_option,
++ .name = "Checksum Offload",
++ .err = "defaulting to Enabled",
++ .def = IOH_GBE_DEFAULT_TX_CSUM
++ };
++
++ if (num_XsumTX > bd) {
++ adapter->tx_csum = XsumTX[bd];
++ ioh_gbe_validate_option((int *)(&adapter->tx_csum),
++ &opt, adapter);
++ } else {
++ adapter->tx_csum = opt.def;
++ }
++ }
++ { /* Flow Control */
++
++ struct ioh_gbe_opt_list fc_list[] = {
++ {ioh_gbe_fc_none, "Flow Control Disabled"},
++ {ioh_gbe_fc_rx_pause, "Flow Control Receive Only"},
++ {ioh_gbe_fc_tx_pause, "Flow Control Transmit Only"},
++ {ioh_gbe_fc_full, "Flow Control Enabled"} };
++
++ struct ioh_gbe_option opt = {
++ .type = list_option,
++ .name = "Flow Control",
++ .err = "reading default settings from EEPROM",
++ .def = IOH_GBE_FC_DEFAULT,
++ .arg = { .l = { .nr = (int)ARRAY_SIZE(fc_list),
++ .p = fc_list } }
++ };
++
++ if (num_FlowControl > bd) {
++ hw->mac.fc = FlowControl[bd];
++ ioh_gbe_validate_option((int *)(&hw->mac.fc),
++ &opt, adapter);
++ } else {
++ hw->mac.fc = opt.def;
++ }
++ }
++
++ ioh_gbe_check_copper_options(adapter);
++}
++
++/*!
++ * @ingroup Linux driver internal function
++ * @fn static void ioh_gbe_check_copper_options(
++ * struct ioh_gbe_adapter *adapter)
++ * @brief Range Checking for Link Options, Copper Version
++ * @param adapter [IN] Board private structure
++ * @return None
++ * @remarks
++ * Handles speed and duplex options on copper adapters
++ */
++static void
++ioh_gbe_check_copper_options(struct ioh_gbe_adapter *adapter)
++{
++ struct ioh_gbe_hw *hw = &adapter->hw;
++ int speed, dplx;
++ int bd = adapter->bd_number;
++
++ { /* Speed */
++ struct ioh_gbe_opt_list speed_list[] = {
++ {0, "" },
++ {SPEED_10, ""},
++ {SPEED_100, ""},
++ {SPEED_1000, ""} };
++
++ struct ioh_gbe_option opt = {
++ .type = list_option,
++ .name = "Speed",
++ .err = "parameter ignored",
++ .def = 0,
++ .arg = { .l = { .nr = (int)ARRAY_SIZE(speed_list),
++ .p = speed_list } }
++ };
++
++ if (num_Speed > bd) {
++ speed = Speed[bd];
++ ioh_gbe_validate_option(&speed, &opt, adapter);
++ } else {
++ speed = opt.def;
++ }
++ }
++ { /* Duplex */
++ struct ioh_gbe_opt_list dplx_list[] = {
++ {0, ""},
++ {PHY_HALF_DUPLEX, ""},
++ {PHY_FULL_DUPLEX, ""} };
++
++ struct ioh_gbe_option opt = {
++ .type = list_option,
++ .name = "Duplex",
++ .err = "parameter ignored",
++ .def = 0,
++ .arg = { .l = { .nr = (int)ARRAY_SIZE(dplx_list),
++ .p = dplx_list } }
++ };
++
++ if (num_Duplex > bd) {
++ dplx = Duplex[bd];
++ ioh_gbe_validate_option(&dplx, &opt, adapter);
++ } else {
++ dplx = opt.def;
++ }
++ }
++
++ { /* Autoneg */
++ struct ioh_gbe_opt_list an_list[] =
++ #define AA "AutoNeg advertising "
++ {{ 0x01, AA "10/HD" },
++ { 0x02, AA "10/FD" },
++ { 0x03, AA "10/FD, 10/HD" },
++ { 0x04, AA "100/HD" },
++ { 0x05, AA "100/HD, 10/HD" },
++ { 0x06, AA "100/HD, 10/FD" },
++ { 0x07, AA "100/HD, 10/FD, 10/HD" },
++ { 0x08, AA "100/FD" },
++ { 0x09, AA "100/FD, 10/HD" },
++ { 0x0a, AA "100/FD, 10/FD" },
++ { 0x0b, AA "100/FD, 10/FD, 10/HD" },
++ { 0x0c, AA "100/FD, 100/HD" },
++ { 0x0d, AA "100/FD, 100/HD, 10/HD" },
++ { 0x0e, AA "100/FD, 100/HD, 10/FD" },
++ { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" },
++ { 0x20, AA "1000/FD" },
++ { 0x21, AA "1000/FD, 10/HD" },
++ { 0x22, AA "1000/FD, 10/FD" },
++ { 0x23, AA "1000/FD, 10/FD, 10/HD" },
++ { 0x24, AA "1000/FD, 100/HD" },
++ { 0x25, AA "1000/FD, 100/HD, 10/HD" },
++ { 0x26, AA "1000/FD, 100/HD, 10/FD" },
++ { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" },
++ { 0x28, AA "1000/FD, 100/FD" },
++ { 0x29, AA "1000/FD, 100/FD, 10/HD" },
++ { 0x2a, AA "1000/FD, 100/FD, 10/FD" },
++ { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" },
++ { 0x2c, AA "1000/FD, 100/FD, 100/HD" },
++ { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" },
++ { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
++ { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" } };
++
++ struct ioh_gbe_option opt = {
++ .type = list_option,
++ .name = "AutoNeg",
++ .err = "parameter ignored",
++ .def = AUTONEG_ADV_DEFAULT,
++ .arg = { .l = { .nr = (int)ARRAY_SIZE(an_list),
++ .p = an_list} }
++ };
++
++ if (num_AutoNeg > bd) {
++ if (speed != 0 || dplx != 0) {
++ DPRINTK(PROBE, INFO,
++ "AutoNeg specified along with Speed or Duplex, "
++ "parameter ignored\n");
++ hw->phy.autoneg_advertised = opt.def;
++ } else {
++ hw->phy.autoneg_advertised = AutoNeg[bd];
++ ioh_gbe_validate_option(
++ (int *)(&hw->phy.autoneg_advertised),
++ &opt, adapter);
++ }
++ } else {
++ hw->phy.autoneg_advertised = opt.def;
++ }
++ }
++
++ switch (speed + dplx) {
++ case 0:
++ hw->mac.autoneg = hw->mac.fc_autoneg = 1;
++ if ((num_Speed > bd) && (speed != 0 || dplx != 0))
++ DPRINTK(PROBE, INFO,
++ "Speed and duplex autonegotiation enabled\n");
++ hw->mac.link_speed = SPEED_10;
++ hw->mac.link_duplex = DUPLEX_HALF;
++ break;
++ case PHY_HALF_DUPLEX:
++ DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n");
++ DPRINTK(PROBE, INFO, "Using Autonegotiation at "
++ "Half Duplex only\n");
++ hw->mac.autoneg = hw->mac.fc_autoneg = 1;
++ hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF |
++ PHY_ADVERTISE_100_HALF;
++ hw->mac.link_speed = SPEED_10;
++ hw->mac.link_duplex = DUPLEX_HALF;
++ break;
++ case PHY_FULL_DUPLEX:
++ DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n");
++ DPRINTK(PROBE, INFO, "Using Autonegotiation at "
++ "Full Duplex only\n");
++ hw->mac.autoneg = hw->mac.fc_autoneg = 1;
++ hw->phy.autoneg_advertised = PHY_ADVERTISE_10_FULL |
++ PHY_ADVERTISE_100_FULL |
++ PHY_ADVERTISE_1000_FULL;
++ hw->mac.link_speed = SPEED_10;
++ hw->mac.link_duplex = DUPLEX_FULL;
++ break;
++ case PHY_SPEED_10:
++ DPRINTK(PROBE, INFO, "10 Mbps Speed specified "
++ "without Duplex\n");
++ DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n");
++ hw->mac.autoneg = hw->mac.fc_autoneg = 1;
++ hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF |
++ PHY_ADVERTISE_10_FULL;
++ hw->mac.link_speed = SPEED_10;
++ hw->mac.link_duplex = DUPLEX_HALF;
++ break;
++ case PHY_SPEED_10 + PHY_HALF_DUPLEX:
++ DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Half Duplex\n");
++ hw->mac.autoneg = hw->mac.fc_autoneg = 0;
++ hw->phy.autoneg_advertised = 0;
++ hw->mac.link_speed = SPEED_10;
++ hw->mac.link_duplex = DUPLEX_HALF;
++ break;
++ case PHY_SPEED_10 + PHY_FULL_DUPLEX:
++ DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Full Duplex\n");
++ hw->mac.autoneg = hw->mac.fc_autoneg = 0;
++ hw->phy.autoneg_advertised = 0;
++ hw->mac.link_speed = SPEED_10;
++ hw->mac.link_duplex = DUPLEX_FULL;
++ break;
++ case PHY_SPEED_100:
++ DPRINTK(PROBE, INFO, "100 Mbps Speed specified "
++ "without Duplex\n");
++ DPRINTK(PROBE, INFO, "Using Autonegotiation at "
++ "100 Mbps only\n");
++ hw->mac.autoneg = hw->mac.fc_autoneg = 1;
++ hw->phy.autoneg_advertised = PHY_ADVERTISE_100_HALF |
++ PHY_ADVERTISE_100_FULL;
++ hw->mac.link_speed = SPEED_100;
++ hw->mac.link_duplex = DUPLEX_HALF;
++ break;
++ case PHY_SPEED_100 + PHY_HALF_DUPLEX:
++ DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Half Duplex\n");
++ hw->mac.autoneg = hw->mac.fc_autoneg = 0;
++ hw->phy.autoneg_advertised = 0;
++ hw->mac.link_speed = SPEED_100;
++ hw->mac.link_duplex = DUPLEX_HALF;
++ break;
++ case PHY_SPEED_100 + PHY_FULL_DUPLEX:
++ DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Full Duplex\n");
++ hw->mac.autoneg = hw->mac.fc_autoneg = 0;
++ hw->phy.autoneg_advertised = 0;
++ hw->mac.link_speed = SPEED_100;
++ hw->mac.link_duplex = DUPLEX_FULL;
++ break;
++ case PHY_SPEED_1000:
++ DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
++ "Duplex\n");
++ goto full_duplex_only;
++ case PHY_SPEED_1000 + PHY_HALF_DUPLEX:
++ DPRINTK(PROBE, INFO,
++ "Half Duplex is not supported at 1000 Mbps\n");
++ /* fall through */
++ case PHY_SPEED_1000 + PHY_FULL_DUPLEX:
++full_duplex_only:
++ DPRINTK(PROBE, INFO,
++ "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
++ hw->mac.autoneg = hw->mac.fc_autoneg = 1;
++ hw->phy.autoneg_advertised = PHY_ADVERTISE_1000_FULL;
++ hw->mac.link_speed = SPEED_1000;
++ hw->mac.link_duplex = DUPLEX_FULL;
++ break;
++ default:
++ BUG();
++ }
++}
++
+diff -urN linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_pci_ids.h topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_pci_ids.h
+--- linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_pci_ids.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_pci_ids.h 2010-03-09 09:27:26.000000000 +0900
+@@ -0,0 +1,38 @@
++/*!
++ * @file ioh_gbe_pci_ids.h
++ * @brief Linux IOH Gigabit Ethernet Driver PCI ID header file
++ *
++ * @version 0.90
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR CO., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 09/21/2009
++ * modified:
++ *
++ */
++#ifndef _IOH_GBE_PCI_IDS_H_
++#define _IOH_GBE_PCI_IDS_H_
++
++/* Pci vender/device ID */
++#define PCI_DEVICE_ID_INTEL_IOH1_GBE (u16)(0x8802)
++
++#endif
+diff -urN linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_phy.c topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_phy.c
+--- linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_phy.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_phy.c 2010-03-09 09:27:26.000000000 +0900
+@@ -0,0 +1,493 @@
++/*!
++ * @file ioh_gbe_phy.c
++ * @brief Linux IOH Gigabit Ethernet Driver HAL internal function (PHY) source file
++ *
++ * @version 0.90
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR CO., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 09/21/2009
++ * modified:
++ *
++ */
++#include <linux/pci.h>
++#include <linux/netdevice.h>
++#include <linux/ethtool.h>
++#include <linux/mii.h>
++
++#include "pch_gbe_osdep.h"
++#include "pch_gbe_defines.h"
++#include "pch_gbe_hw.h"
++#include "pch_gbe_phy.h"
++#include "pch_gbe_api.h"
++#include "pch_gbe_regs.h"
++#include "pch_gbe.h"
++
++
++/*!
++ * @ingroup HAL internal function
++ * @def PHY_CONTROL_DEFAULT
++ * @brief Default value of CONTROL register of PHY
++ */
++#define PHY_CONTROL_DEFAULT 0x1140 /* Control Register */
++
++/*!
++ * @ingroup HAL internal function
++ * @def PHY_AUTONEG_ADV_DEFAULT
++ * @brief Default value of AUTONEG_ADV register of PHY
++ */
++#define PHY_AUTONEG_ADV_DEFAULT 0x01e0 /* Autoneg Advertisement */
++
++/*!
++ * @ingroup HAL internal function
++ * @def PHY_NEXT_PAGE_TX_DEFAULT
++ * @brief Default value of NEXT_PAGE_TX register of PHY
++ */
++#define PHY_NEXT_PAGE_TX_DEFAULT 0x2001 /* Next Page TX */
++
++/*!
++ * @ingroup HAL internal function
++ * @def PHY_1000T_CTRL_DEFAULT
++ * @brief Default value of 1000T_CTRL register of PHY
++ */
++#define PHY_1000T_CTRL_DEFAULT 0x0300 /* 1000Base-T Control Register */
++
++/*!
++ * @ingroup HAL internal function
++ * @def PHY_PHYSP_CONTROL_DEFAULT
++ * @brief Default value of PHYSP_CONTROL register of PHY
++ */
++#ifdef FPGA
++#define PHY_PHYSP_CONTROL_DEFAULT 0x0078 /* PHY Specific Control Register */
++#else
++#define PHY_PHYSP_CONTROL_DEFAULT 0x01EE /* PHY Specific Control Register */
++#endif
++
++/*!
++ * @ingroup HAL internal function
++ * @def PHY_EXT_PHYSP_CONTROL_DEFAULT
++ * @brief Default value of EXT_PHYSP_CONTROL register of PHY
++ */
++#define PHY_EXT_PHYSP_CONTROL_DEFAULT 0x0c60
++
++/*!
++ * @ingroup HAL internal function
++ * @def PHY_LED_CONTROL_DEFAULT
++ * @brief Default value of LED_CONTROL register of PHY
++ */
++#define PHY_LED_CONTROL_DEFAULT 0x4100
++
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_phy_get_id(struct ioh_gbe_hw *hw)
++ * @brief Retrieve the PHY ID and revision
++ * @param hw [IN] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * Reads the PHY registers and stores the PHY ID and possibly the PHY
++ * revision in the hardware structure.
++ */
++s32
++ioh_gbe_phy_get_id(struct ioh_gbe_hw *hw)
++{
++ struct ioh_gbe_phy_info *phy = &hw->phy;
++ s32 ret;
++ u16 phy_id1;
++ u16 phy_id2;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_phy_get_id");
++
++ ret = ioh_gbe_hal_read_phy_reg(hw, PHY_ID1, &phy_id1);
++ if (ret != 0)
++ return ret;
++ ret = ioh_gbe_hal_read_phy_reg(hw, PHY_ID2, &phy_id2);
++ if (ret != 0)
++ return ret;
++ /*
++ * PHY_ID1: [bit15-0:ID(21-6)]
++ * PHY_ID2: [bit15-10:ID(5-0)][bit9-4:Model][bit3-0:revision]
++ */
++ phy->id = (u32)phy_id1;
++ phy->id = ((phy->id << 6) | ((phy_id2 & 0xFC00) >> 10));
++ phy->revision = (u32) (phy_id2 & 0x000F);
++
++#ifdef DEBUG_TEST
++ IOH_GBE_TESTOUT("phy->id : 0x%08x phy->revision : 0x%08x\n",
++ phy->id, phy->revision);
++#endif
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_phy_read_reg_miic(struct ioh_gbe_hw *hw,
++ * u32 offset, u16 *data)
++ * @brief Read MII control register
++ * @param hw [IN] Pointer to the HW structure
++ * @param offset [IN] Register offset to be read
++ * @param data [OUT] Pointer to the read data
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * Reads the PHY registers and stores the PHY ID and possibly the PHY
++ * revision in the hardware structure.
++ */
++s32
++ioh_gbe_phy_read_reg_miic(struct ioh_gbe_hw *hw, u32 offset, u16 *data)
++{
++ struct ioh_gbe_phy_info *phy = &hw->phy;
++ s32 ret_val = IOH_GBE_SUCCESS;
++
++ if (offset > PHY_MAX_REG_ADDRESS) {
++ IOH_GBE_DBGOUT1("PHY Address %d is out of range\n", offset);
++ ret_val = -IOH_GBE_ERR_PARAM;
++ } else {
++ *data = ioh_gbe_hal_ctrl_miim(hw, phy->addr,
++ IOH_GBE_HAL_MIIM_READ, offset, (u16)0);
++ }
++ return ret_val;
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_phy_write_reg_miic(struct ioh_gbe_hw *hw,
++ * u32 offset, u16 data)
++ * @brief Write MII control register
++ * @param hw [IN] Pointer to the HW structure
++ * @param offset [IN] Register offset to be read
++ * @param data [IN] data to write to register at offset
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * Writes data to MDI control register in the PHY at offset.
++ */
++s32
++ioh_gbe_phy_write_reg_miic(struct ioh_gbe_hw *hw, u32 offset, u16 data)
++{
++ struct ioh_gbe_phy_info *phy = &hw->phy;
++ s32 ret_val = IOH_GBE_SUCCESS;
++
++ if (offset > PHY_MAX_REG_ADDRESS) {
++ IOH_GBE_DBGOUT1("PHY Address %d is out of range\n", offset);
++ ret_val = -IOH_GBE_ERR_PARAM;
++ } else {
++ ioh_gbe_hal_ctrl_miim(hw, phy->addr,
++ IOH_GBE_HAL_MIIM_WRITE, offset, data);
++ }
++ return ret_val;
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_phy_setup_link_fpga(struct ioh_gbe_hw *hw)
++ * @brief Configure link settings for FPGA
++ * @param hw [IN] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * Calls the appropriate function to configure the link for auto-neg or forced
++ * speed and duplex. Then we check for link, once link is established calls
++ * to configure collision distance and flow control are called. If link is
++ * not established, we return -IOH_GBE_ERR_PHY (-2).
++ */
++s32
++ioh_gbe_phy_setup_link_fpga(struct ioh_gbe_hw *hw)
++{
++
++ IOH_GBE_DBGFUNC("ioh_gbe_phy_setup_link_fpga");
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_phy_sw_reset(struct ioh_gbe_hw *hw)
++ * @brief PHY software reset
++ * @param hw [IN] Pointer to the HW structure
++ * @return None
++ * @remarks
++ * Does a software reset of the PHY by reading the PHY control register and
++ * setting/write the control register reset bit to the PHY.
++ */
++void
++ioh_gbe_phy_sw_reset(struct ioh_gbe_hw *hw)
++{
++ u16 phy_ctrl;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_phy_sw_reset");
++
++ ioh_gbe_hal_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
++ phy_ctrl |= MII_CR_RESET;
++ ioh_gbe_hal_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
++ usec_delay(1);
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_phy_hw_reset(struct ioh_gbe_hw *hw)
++ * @brief PHY hardware reset
++ * @param hw [IN] Pointer to the HW structure
++ * @return None
++ * @remarks
++ * Verify the reset block is not blocking us from resetting. Acquire
++ * semaphore (if necessary) and read/set/write the device control reset
++ * bit in the PHY. Wait the appropriate delay time for the device to
++ * reset and relase the semaphore (if necessary).
++ */
++void
++ioh_gbe_phy_hw_reset(struct ioh_gbe_hw *hw)
++{
++#ifndef PHY_RESET_REG_INIT
++ struct ioh_gbe_phy_info *phy = &hw->phy;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_phy_hw_reset");
++
++ /* ISSUE: reset used GPIO driver */
++ usec_delay(phy->reset_delay_us);
++ /* ISSUE: release reset used GPIO driver */
++#else
++ IOH_GBE_DBGFUNC("ioh_gbe_phy_hw_reset");
++
++ ioh_gbe_hal_write_phy_reg(hw, PHY_CONTROL, PHY_CONTROL_DEFAULT);
++ ioh_gbe_hal_write_phy_reg(hw, PHY_AUTONEG_ADV,
++ PHY_AUTONEG_ADV_DEFAULT);
++ ioh_gbe_hal_write_phy_reg(hw, PHY_NEXT_PAGE_TX,
++ PHY_NEXT_PAGE_TX_DEFAULT);
++ ioh_gbe_hal_write_phy_reg(hw, PHY_1000T_CTRL, PHY_1000T_CTRL_DEFAULT);
++ ioh_gbe_hal_write_phy_reg(hw, PHY_PHYSP_CONTROL,
++ PHY_PHYSP_CONTROL_DEFAULT);
++#ifdef FPGA
++ ioh_gbe_hal_write_phy_reg(hw, PHY_EXT_PHYSP_CONTROL,
++ PHY_EXT_PHYSP_CONTROL_DEFAULT);
++ ioh_gbe_hal_write_phy_reg(hw, PHY_LED_CONTROL, PHY_LED_CONTROL_DEFAULT);
++#endif
++
++#endif
++
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_phy_led_on(struct ioh_gbe_hw *hw)
++ * @brief Set ting of led on
++ * @param hw [IN] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ */
++s32
++ioh_gbe_phy_led_on(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_phy_led_on");
++
++ ioh_gbe_hal_write_phy_reg(hw, PHY_LED_CONTROL, PHY_LED_CTRL_ON);
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_phy_led_off(struct ioh_gbe_hw *hw)
++ * @brief Set ting of led off
++ * @param hw [IN] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ */
++s32
++ioh_gbe_phy_led_off(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_phy_led_off");
++
++ ioh_gbe_hal_write_phy_reg(hw, PHY_LED_CONTROL, PHY_LED_CTRL_OFF);
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_phy_led_cleanup(struct ioh_gbe_hw *hw)
++ * @brief Cleanup led control
++ * @param hw [IN] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ */
++s32
++ioh_gbe_phy_led_cleanup(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_phy_led_cleanup");
++
++ ioh_gbe_hal_write_phy_reg(hw, PHY_LED_CONTROL, PHY_LED_CTRL_CLEANUP);
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_phy_led_setup(struct ioh_gbe_hw *hw)
++ * @brief Setup led control
++ * @param hw [IN] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ */
++s32
++ioh_gbe_phy_led_setup(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_phy_led_setup");
++
++ return IOH_GBE_SUCCESS;
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_phy_power_up(struct ioh_gbe_hw *hw)
++ * @brief restore link in case the phy was powered down
++ * @param hw [IN] Pointer to the HW structure
++ * @return None
++ * @remarks
++ * The phy may be powered down to save power and turn off link when the
++ * driver is unloaded and wake on lan is not enabled (among others)
++ * *** this routine MUST be followed by a call to ioh_gbe_reset ***
++ */
++void ioh_gbe_phy_power_up(struct ioh_gbe_hw *hw)
++{
++ u16 mii_reg;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_phy_power_up");
++
++ mii_reg = 0;
++ /* Just clear the power down bit to wake the phy back up */
++ /* according to the manual, the phy will retain its
++ * settings across a power-down/up cycle */
++ ioh_gbe_hal_read_phy_reg(hw, PHY_CONTROL, &mii_reg);
++ mii_reg &= ~MII_CR_POWER_DOWN;
++ ioh_gbe_hal_write_phy_reg(hw, PHY_CONTROL, mii_reg);
++
++#ifdef DEBUG_TEST
++ ioh_gbe_hal_read_phy_reg(hw, PHY_CONTROL, &mii_reg);
++ IOH_GBE_TESTOUT("PHY_CONTROL reg : 0x%08x\n", mii_reg);
++#endif
++ return;
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_phy_power_down(struct ioh_gbe_hw *hw)
++ * @brief Power down PHY
++ * @param hw [IN] Pointer to the HW structure
++ * @return None
++ */
++void ioh_gbe_phy_power_down(struct ioh_gbe_hw *hw)
++{
++ u16 mii_reg;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_phy_power_down");
++
++ mii_reg = 0;
++ /* Power down the PHY so no link is implied when interface is down *
++ * The PHY cannot be powered down if any of the following is TRUE *
++ * (a) WoL is enabled
++ * (b) AMT is active
++ */
++ ioh_gbe_hal_read_phy_reg(hw, PHY_CONTROL, &mii_reg);
++ mii_reg |= MII_CR_POWER_DOWN;
++ ioh_gbe_hal_write_phy_reg(hw, PHY_CONTROL, mii_reg);
++ mdelay(1);
++
++#ifdef DEBUG_TEST
++ ioh_gbe_hal_read_phy_reg(hw, PHY_CONTROL, &mii_reg);
++ IOH_GBE_TESTOUT("PHY_CONTROL reg : 0x%08x\n", mii_reg);
++#endif
++ return;
++}
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_phy_set_rgmii(struct ioh_gbe_hw *hw)
++ * @brief RGMII interface setting
++ * @param hw [IN] Pointer to the HW structure
++ * @return None
++ */
++#ifdef FPGA
++void ioh_gbe_phy_set_rgmii(struct ioh_gbe_hw *hw)
++{
++ u16 mii_reg;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_phy_set_rgmii");
++
++ ioh_gbe_hal_read_phy_reg(hw, PHY_EXT_PHYSP_STATUS, &mii_reg);
++ mii_reg &= ~HWCFG_MODE_MASK;
++ mii_reg |= HWCFG_MODE_RGMII_COPPER;
++ ioh_gbe_hal_write_phy_reg(hw, PHY_EXT_PHYSP_STATUS, mii_reg);
++ ioh_gbe_hal_read_phy_reg(hw, PHY_EXT_PHYSP_CONTROL, &mii_reg);
++ mii_reg |= 0x01; /* Transfer enable */
++ mii_reg |= 0x02; /* add delay to GTX_CLK */
++ mii_reg |= 0x80; /* add delay to RX_CLK */
++ ioh_gbe_hal_write_phy_reg(hw, PHY_EXT_PHYSP_CONTROL, mii_reg);
++ ioh_gbe_hal_phy_sw_reset(hw);
++
++#ifdef DEBUG_TEST
++ ioh_gbe_hal_read_phy_reg(hw, PHY_EXT_PHYSP_STATUS, &mii_reg);
++ IOH_GBE_TESTOUT("PHY_EXT_PHYSP_STATUS reg : 0x%08x\n", mii_reg);
++ ioh_gbe_hal_read_phy_reg(hw, PHY_EXT_PHYSP_CONTROL, &mii_reg);
++ IOH_GBE_TESTOUT("PHY_EXT_PHYSP_CONTROL reg : 0x%08x\n", mii_reg);
++#endif
++ return;
++}
++#else
++void ioh_gbe_phy_set_rgmii(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_phy_set_rgmii");
++
++ ioh_gbe_hal_phy_sw_reset(hw);
++ return;
++}
++#endif
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_phy_init_setting(struct ioh_gbe_hw *hw)
++ * @brief PHY initial setting
++ * @param hw [IN] Pointer to the HW structure
++ * @return None
++ */
++void ioh_gbe_phy_init_setting(struct ioh_gbe_hw *hw)
++{
++ struct ioh_gbe_adapter *adapter;
++ struct ethtool_cmd cmd;
++ int ret;
++ u16 mii_reg;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_phy_init_setting");
++
++ adapter = container_of(hw, struct ioh_gbe_adapter, hw);
++ ret = mii_ethtool_gset(&adapter->mii, &cmd);
++ if (ret != 0)
++ IOH_GBE_ERR("Error: mii_ethtool_gset\n");
++
++ cmd.speed = hw->mac.link_speed;
++ cmd.duplex = hw->mac.link_duplex;
++ cmd.advertising = hw->phy.autoneg_advertised;
++ cmd.autoneg = hw->mac.autoneg;
++ ioh_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET);
++ ret = mii_ethtool_sset(&adapter->mii, &cmd);
++ if (ret != 0)
++ IOH_GBE_ERR("Error: mii_ethtool_sset\n");
++
++ ioh_gbe_hal_phy_sw_reset(hw);
++
++ ioh_gbe_hal_read_phy_reg(hw, PHY_PHYSP_CONTROL, &mii_reg);
++ mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX;
++ ioh_gbe_hal_write_phy_reg(hw, PHY_PHYSP_CONTROL, mii_reg);
++
++}
+diff -urN linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_phy.h topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_phy.h
+--- linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_phy.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_phy.h 2010-03-09 09:27:26.000000000 +0900
+@@ -0,0 +1,136 @@
++/*!
++ * @file ioh_gbe_phy.h
++ * @brief Linux IOH Gigabit Ethernet Driver HAL internal function (PHY) header file
++ *
++ * @version 0.90
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR CO., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 09/21/2009
++ * modified:
++ *
++ */
++#ifndef _IOH_GBE_PHY_H_
++#define _IOH_GBE_PHY_H_
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_phy_get_id(struct ioh_gbe_hw *hw)
++ * @brief Retrieve the PHY ID and revision
++ */
++s32 ioh_gbe_phy_get_id(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_phy_read_reg_miic(struct ioh_gbe_hw *hw,
++ * u32 offset, u16 *data)
++ * @brief Read MII control register
++ */
++s32 ioh_gbe_phy_read_reg_miic(struct ioh_gbe_hw *hw, u32 offset, u16 *data);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_phy_write_reg_miic(struct ioh_gbe_hw *hw,
++ * u32 offset, u16 data)
++ * @brief Write MII control register
++ */
++s32 ioh_gbe_phy_write_reg_miic(struct ioh_gbe_hw *hw, u32 offset, u16 data);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_phy_setup_link_fpga(struct ioh_gbe_hw *hw)
++ * @brief Configure link settings for FPGA
++ */
++s32 ioh_gbe_phy_setup_link_fpga(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_phy_sw_reset(struct ioh_gbe_hw *hw)
++ * @brief PHY software reset
++ */
++void ioh_gbe_phy_sw_reset(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_phy_hw_reset(struct ioh_gbe_hw *hw)
++ * @brief PHY hardware reset
++ */
++void ioh_gbe_phy_hw_reset(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_phy_led_on(struct ioh_gbe_hw *hw)
++ * @brief Set ting of led on
++ */
++s32 ioh_gbe_phy_led_on(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_phy_led_off(struct ioh_gbe_hw *hw)
++ * @brief Set ting of led off
++ */
++s32 ioh_gbe_phy_led_off(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_phy_led_cleanup(struct ioh_gbe_hw *hw)
++ * @brief Cleanup led control
++ */
++s32 ioh_gbe_phy_led_cleanup(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn s32 ioh_gbe_phy_led_setup(struct ioh_gbe_hw *hw)
++ * @brief Setup led control
++ */
++s32 ioh_gbe_phy_led_setup(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_phy_power_up(struct ioh_gbe_hw *hw)
++ * @brief restore link in case the phy was powered down
++ */
++void ioh_gbe_phy_power_up(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_phy_power_down(struct ioh_gbe_hw *hw)
++ * @brief Power down PHY
++ */
++void ioh_gbe_phy_power_down(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_phy_set_rgmii(struct ioh_gbe_hw *hw)
++ * @brief RGMII interface setting
++ */
++void ioh_gbe_phy_set_rgmii(struct ioh_gbe_hw *hw);
++
++/*!
++ * @ingroup HAL internal function
++ * @fn void ioh_gbe_phy_init_setting(struct ioh_gbe_hw *hw)
++ * @brief PHY initial setting
++ */
++void ioh_gbe_phy_init_setting(struct ioh_gbe_hw *hw);
++
++
++#endif /* _IOH_GBE_PHY_H_ */
+diff -urN linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_plat.c topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_plat.c
+--- linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_plat.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_plat.c 2010-03-11 15:11:52.000000000 +0900
+@@ -0,0 +1,175 @@
++/*!
++ * @file ioh_gbe_plat.c
++ * @brief Linux IOH Gigabit Ethernet Driver HAL internal function (platform) source file
++ *
++ * @version 0.90
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR CO., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 09/21/2009
++ * modified:
++ *
++ */
++
++#include "pch_gbe_osdep.h"
++#include "pch_gbe_defines.h"
++#include "pch_gbe_hw.h"
++#include "pch_gbe_mac.h"
++#include "pch_gbe_nvm.h"
++#include "pch_gbe_phy.h"
++#include "pch_gbe_api.h"
++
++
++static void ioh_gbe_plat_get_bus_info(struct ioh_gbe_hw *hw);
++static s32 ioh_gbe_plat_init_hw(struct ioh_gbe_hw *hw);
++
++
++/*!
++ * @ingroup HAL internal functions
++ * @fn void ioh_gbe_plat_init_function_pointers(struct ioh_gbe_hw *hw)
++ * @brief Init func ptrs.
++ * @param hw [OUT] Pointer to the HW structure
++ * @return None
++ * @remarks
++ * The only function explicitly called by the api module to initialize
++ * all function pointers and parameters.
++ */
++void
++ioh_gbe_plat_init_function_pointers(struct ioh_gbe_hw *hw)
++{
++ struct ioh_gbe_mac_info *mac = &hw->mac;
++ struct ioh_gbe_phy_info *phy = &hw->phy;
++ struct ioh_gbe_nvm_info *nvm = &hw->nvm;
++ struct ioh_gbe_functions *func = &hw->func;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_plat_init_function_pointers");
++
++ /* Set MAC address registers entry count */
++ mac->mar_entry_count = IOH_GBE_MAR_ENTRIES;
++ /* Set PHY parameter */
++ phy->reset_delay_us = IOH_GBE_PHY_RESET_DELAY_US;
++ /* Set NVM parameter */
++ nvm->word_size = IOH_GBE_NVM_WORD_SIZE;
++
++ /* Set function pointers */
++ func->get_bus_info = ioh_gbe_plat_get_bus_info;
++ func->reset_hw = ioh_gbe_mac_reset_hw;
++ func->init_hw = ioh_gbe_plat_init_hw;
++ func->setup_link = ioh_gbe_mac_setup_link;
++ func->setup_physical_interface = ioh_gbe_phy_setup_link_fpga;
++ func->mc_addr_list_update = ioh_gbe_mac_mc_addr_list_update;
++ func->setup_led = ioh_gbe_phy_led_setup;
++ func->cleanup_led = ioh_gbe_phy_led_cleanup;
++ func->led_on = ioh_gbe_phy_led_on;
++ func->led_off = ioh_gbe_phy_led_off;
++ func->read_phy_reg = ioh_gbe_phy_read_reg_miic;
++ func->write_phy_reg = ioh_gbe_phy_write_reg_miic;
++ func->reset_phy = ioh_gbe_phy_hw_reset;
++ func->sw_reset_phy = ioh_gbe_phy_sw_reset;
++ func->power_up_phy = ioh_gbe_phy_power_up;
++ func->power_down_phy = ioh_gbe_phy_power_down;
++#ifdef CONFIG_PCH_PCIEQOS
++ func->read_nvm = ioh_gbe_nvm_read_mem;
++ func->write_nvm = ioh_gbe_nvm_write_mem;
++ func->validate_nvm = ioh_gbe_nvm_validate_checksum;
++ func->read_mac_addr = ioh_gbe_nvm_read_mac_addr;
++#else
++ func->read_mac_addr = ioh_gbe_mac_read_mac_addr;
++#endif
++ func->ctrl_miim = ioh_gbe_mac_ctrl_miim;
++ func->pause_packet = ioh_gbe_mac_set_pause_packet;
++
++#ifdef DEBUG_TEST
++ IOH_GBE_TESTOUT("[MAC]mar_entry_count:%d /[PHY] reset_delay_us:%d\n",
++ mac->mar_entry_count, phy->reset_delay_us);
++ IOH_GBE_TESTOUT("[NVM] word_size:0x%08x\n", nvm->word_size);
++#endif
++}
++
++
++/*!
++ * @ingroup HAL internal functions
++ * @fn static void ioh_gbe_plat_get_bus_info(struct ioh_gbe_hw *hw)
++ * @brief Obtain bus information for adapter
++ * @param hw [OUT] Pointer to the HW structure
++ * @return None
++ * @remarks
++ * This will obtain information about the HW bus for which the
++ * adaper is attached and stores it in the hw structure. This is a function
++ * pointer entry point called by the api module.
++ */
++static void
++ioh_gbe_plat_get_bus_info(struct ioh_gbe_hw *hw)
++{
++ IOH_GBE_DBGFUNC("ioh_gbe_plat_get_bus_info");
++
++ hw->bus.type = ioh_gbe_bus_type_pci_express;
++ hw->bus.speed = ioh_gbe_bus_speed_2500;
++ hw->bus.width = ioh_gbe_bus_width_pcie_x1;
++
++#ifdef DEBUG_TEST
++ IOH_GBE_TESTOUT("[BUS] type:0x%08x speed:0x%08x width:0x%08x\n",
++ hw->bus.type, hw->bus.speed, hw->bus.width);
++#endif
++}
++
++/*!
++ * @ingroup HAL internal functions
++ * @fn static s32 ioh_gbe_plat_init_hw(struct ioh_gbe_hw *hw)
++ * @brief Initialize hardware
++ * @param hw [INOUT] Pointer to the HW structure
++ * @return IOH_GBE_SUCCESS: Successfully
++ * @return Negative value: Failed
++ * @remarks
++ * This inits the hardware readying it for operation. This is a
++ * function pointer entry point called by the api module.
++ */
++static s32
++ioh_gbe_plat_init_hw(struct ioh_gbe_hw *hw)
++{
++ struct ioh_gbe_mac_info *mac = &hw->mac;
++ s32 ret_val;
++
++ IOH_GBE_DBGFUNC("ioh_gbe_plat_init_hw");
++
++ /* Setup the receive address. */
++ ioh_gbe_mac_init_rx_addrs(hw, mac->mar_entry_count);
++
++ ret_val = ioh_gbe_phy_get_id(hw);
++ if (ret_val) {
++ IOH_GBE_ERR("ioh_gbe_phy_get_id error\n");
++ return ret_val;
++ }
++ ioh_gbe_phy_init_setting(hw);
++ /* Setup Mac interface option RGMII */
++#ifdef IOH_GBE_MAC_IFOP_RGMII
++ ioh_gbe_phy_set_rgmii(hw);
++#endif
++ /* Setup link and flow control */
++ ret_val = ioh_gbe_hal_setup_link(hw);
++#ifdef DEBUG_TEST
++ if (ret_val)
++ IOH_GBE_ERR("ioh_gbe_phy_get_id error\n");
++#endif
++ return ret_val;
++}
++
+diff -urN linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_regs.h topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_regs.h
+--- linux-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_regs.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/net/pch_gbe/pch_gbe_regs.h 2010-03-09 09:27:26.000000000 +0900
+@@ -0,0 +1,351 @@
++/*!
++ * @file ioh_gbe_regs.h
++ * @brief Linux IOH Gigabit Ethernet Driver register macro header file
++ *
++ * @version 0.90
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR CO., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 09/21/2009
++ * modified:
++ *
++ */
++#ifndef _IOH_GBE_REGS_H_
++#define _IOH_GBE_REGS_H_
++
++/* MAC registers */
++#define IOH_GBE_INT_ST 0x0000 /* Interrupt Status */
++#define IOH_GBE_INT_EN 0x0004 /* Interrupt Enable */
++#define IOH_GBE_MODE 0x0008 /* Mode */
++#define IOH_GBE_RESET 0x000C /* Reset */
++#define IOH_GBE_TCPIP_ACC 0x0010 /* TCP/IP Accelerator Control */
++#define IOH_GBE_EX_LIST 0x0014 /* External List */
++#define IOH_GBE_INT_ST_HOLD 0x0018 /* Interrupt Status Hold */
++#define IOH_GBE_PHY_INT_CTRL 0x001C /* PHY Interrupt Control */
++#define IOH_GBE_MAC_RX_EN 0x0020 /* MAC RX Enable */
++#define IOH_GBE_RX_FCTRL 0x0024 /* RX Flow Control */
++#define IOH_GBE_PAUSE_REQ 0x0028 /* Pause Packet Request */
++#define IOH_GBE_RX_MODE 0x002C /* RX Mode */
++#define IOH_GBE_TX_MODE 0x0030 /* TX Mode */
++#define IOH_GBE_RX_FIFO_ST 0x0034 /* RX FIFO Status */
++#define IOH_GBE_TX_FIFO_ST 0x0038 /* TX FIFO Status */
++#define IOH_GBE_TX_FID 0x003C /* TX Frame ID */
++#define IOH_GBE_TX_RESULT 0x0040 /* TX Result */
++#define IOH_GBE_PAUSE_PKT1 0x0044 /* Pause Packet1 */
++#define IOH_GBE_PAUSE_PKT2 0x0048 /* Pause Packet2 */
++#define IOH_GBE_PAUSE_PKT3 0x004C /* Pause Packet3 */
++#define IOH_GBE_PAUSE_PKT4 0x0050 /* Pause Packet4 */
++#define IOH_GBE_PAUSE_PKT5 0x0054 /* Pause Packet5 */
++#define IOH_GBE_MAC_ADR 0x0060 /* MAC Address */
++#define IOH_GBE_MAC_ADR1A 0x0060 /* MAC Address 1A */
++#define IOH_GBE_MAC_ADR1B 0x0064 /* MAC Address 1B */
++#define IOH_GBE_MAC_ADR2A 0x0068 /* MAC Address 2A */
++#define IOH_GBE_MAC_ADR2B 0x006C /* MAC Address 2B */
++#define IOH_GBE_MAC_ADR3A 0x0070 /* MAC Address 3A */
++#define IOH_GBE_MAC_ADR3B 0x0074 /* MAC Address 3B */
++#define IOH_GBE_MAC_ADR4A 0x0078 /* MAC Address 4A */
++#define IOH_GBE_MAC_ADR4B 0x007C /* MAC Address 4B */
++#define IOH_GBE_MAC_ADR5A 0x0080 /* MAC Address 5A */
++#define IOH_GBE_MAC_ADR5B 0x0084 /* MAC Address 5B */
++#define IOH_GBE_MAC_ADR6A 0x0088 /* MAC Address 6A */
++#define IOH_GBE_MAC_ADR6B 0x008C /* MAC Address 6B */
++#define IOH_GBE_MAC_ADR7A 0x0090 /* MAC Address 7A */
++#define IOH_GBE_MAC_ADR7B 0x0094 /* MAC Address 7B */
++#define IOH_GBE_MAC_ADR8A 0x0098 /* MAC Address 8A */
++#define IOH_GBE_MAC_ADR8B 0x009C /* MAC Address 8B */
++#define IOH_GBE_MAC_ADR9A 0x00A0 /* MAC Address 9A */
++#define IOH_GBE_MAC_ADR9B 0x00A4 /* MAC Address 9B */
++#define IOH_GBE_MAC_ADR10A 0x00A8 /* MAC Address 10A */
++#define IOH_GBE_MAC_ADR10B 0x00AC /* MAC Address 10B */
++#define IOH_GBE_MAC_ADR11A 0x00B0 /* MAC Address 11A */
++#define IOH_GBE_MAC_ADR11B 0x00B4 /* MAC Address 11B */
++#define IOH_GBE_MAC_ADR12A 0x00B8 /* MAC Address 12A */
++#define IOH_GBE_MAC_ADR12B 0x00BC /* MAC Address 12B */
++#define IOH_GBE_MAC_ADR13A 0x00C0 /* MAC Address 13A */
++#define IOH_GBE_MAC_ADR13B 0x00C4 /* MAC Address 13B */
++#define IOH_GBE_MAC_ADR14A 0x00C8 /* MAC Address 14A */
++#define IOH_GBE_MAC_ADR14B 0x0CC /* MAC Address 14B */
++#define IOH_GBE_MAC_ADR15A 0x00D0 /* MAC Address 15A */
++#define IOH_GBE_MAC_ADR15B 0x00D4 /* MAC Address 15B */
++#define IOH_GBE_MAC_ADR16A 0x00D8 /* MAC Address 16A */
++#define IOH_GBE_MAC_ADR16B 0x00DC /* MAC Address 16B */
++#define IOH_GBE_ADDR_MASK 0x00E0 /* MAC Address Mask */
++#define IOH_GBE_MIIM 0x00E4 /* MIIM */
++#define IOH_GBE_RGMII_ST 0x00EC /* RGMII Status */
++#define IOH_GBE_RGMII_CTRL 0x00F0 /* RGMII Control */
++#define IOH_GBE_DMA_CTRL 0x0100 /* DMA Control */
++#define IOH_GBE_RX_DSC_BASE 0x0110 /* RX Descriptor Base Address */
++#define IOH_GBE_RX_DSC_SIZE 0x0114 /* RX Descriptor Size */
++#define IOH_GBE_RX_DSC_HW_P 0x0118 /* RX Descriptor Hard Pointer */
++#define IOH_GBE_RX_DSC_HW_P_HLD 0x011C /* RX Descriptor Hard Pointer Hold */
++#define IOH_GBE_RX_DSC_SW_P 0x0120 /* RX Descriptor Soft Pointer */
++#define IOH_GBE_TX_DSC_BASE 0x0130 /* TX Descriptor Base Address */
++#define IOH_GBE_TX_DSC_SIZE 0x0134 /* TX Descriptor Size */
++#define IOH_GBE_TX_DSC_HW_P 0x0138 /* TX Descriptor Hard Pointer */
++#define IOH_GBE_TX_DSC_HW_P_HLD 0x013C /* TX Descriptor Hard Pointer Hold */
++#define IOH_GBE_TX_DSC_SW_P 0x0140 /* TX Descriptor Soft Pointer */
++#define IOH_GBE_RX_DMA_ST 0x0150 /* RX DMA Status */
++#define IOH_GBE_TX_DMA_ST 0x0154 /* TX DMA Status */
++#define IOH_GBE_WOL_ST 0x0160 /* Wake On LAN Status */
++#define IOH_GBE_WOL_CTRL 0x0164 /* Wake On LAN Control */
++#define IOH_GBE_WOL_ADDR_MASK 0x0168 /* Wake On LAN Address Mask */
++
++
++/* Definitions for MAC registers */
++
++/* Interrupt Status */
++/* Interrupt Status Hold */
++/* Interrupt Enable */
++#define IOH_GBE_INT_RX_DMA_CMPLT 0x00000001 /* Receive DMA Transfer Complete */
++#define IOH_GBE_INT_RX_VALID 0x00000002 /* MAC Normal Receive Complete */
++#define IOH_GBE_INT_RX_FRAME_ERR 0x00000004 /* Receive frame error */
++#define IOH_GBE_INT_RX_FIFO_ERR 0x00000008 /* Receive FIFO Overflow */
++#define IOH_GBE_INT_RX_DMA_ERR 0x00000010 /* Receive DMA Transfer Error */
++#define IOH_GBE_INT_RX_DSC_EMP 0x00000020 /* Receive Descriptor Empty */
++#define IOH_GBE_INT_TX_CMPLT 0x00000100 /* MAC Transmission Complete */
++#define IOH_GBE_INT_TX_DMA_CMPLT 0x00000200 /* DMA Transfer Complete */
++#define IOH_GBE_INT_TX_FIFO_ERR 0x00000400 /* Transmission FIFO underflow. */
++#define IOH_GBE_INT_TX_DMA_ERR 0x00000800 /* Transmission DMA Error */
++#define IOH_GBE_INT_PAUSE_CMPLT 0x00001000 /* Pause Transmission complete */
++#define IOH_GBE_INT_MIIM_CMPLT 0x00010000 /* MIIM I/F Read completion */
++#define IOH_GBE_INT_PHY_INT 0x00100000 /* Interruption from PHY */
++#define IOH_GBE_INT_WOL_DET 0x01000000 /* Wake On LAN Event detection. */
++#define IOH_GBE_INT_TCPIP_ERR 0x10000000 /* TCP/IP Accelerator Error */
++
++/* Mode */
++#define IOH_GBE_MODE_MII_ETHER 0x00000000 /* GIGA Ethernet Mode [MII] */
++#define IOH_GBE_MODE_GMII_ETHER 0x80000000 /* GIGA Ethernet Mode [GMII] */
++#define IOH_GBE_MODE_HALF_DUPLEX 0x00000000 /* Duplex Mode [half duplex] */
++#define IOH_GBE_MODE_FULL_DUPLEX 0x40000000 /* Duplex Mode [full duplex] */
++#define IOH_GBE_MODE_FR_BST 0x04000000 /* Frame bursting is done */
++
++/* Reset */
++#define IOH_GBE_ALL_RST 0x80000000 /* All reset */
++#define IOH_GBE_TX_RST 0x40000000 /* TX MAC, TX FIFO, TX DMA reset */
++#define IOH_GBE_RX_RST 0x04000000 /* RX MAC, RX FIFO, RX DMA reset */
++
++/* TCP/IP Accelerator Control */
++/* External List Enable */
++#define IOH_GBE_EX_LIST_EN 0x00000008
++/* RX TCP/IP accelerator Disabled (Padding Enable) */
++#define IOH_GBE_RX_TCPIPACC_OFF 0x00000004
++/* TX TCP/IP accelerator and Padding Enable */
++#define IOH_GBE_TX_TCPIPACC_EN 0x00000002
++/* RX TCP/IP accelerator and Padding Enable */
++#define IOH_GBE_RX_TCPIPACC_EN 0x00000001
++
++/* External List */
++/* Interrupt Status Hold */
++/* PHY Interrupt Control */
++
++/* MAC RX Enable */
++#define IOH_GBE_MRE_MAC_RX_EN 0x00000001 /* MAC Receive Enable */
++
++/* RX Flow Control */
++/* Pause packet and Transmission Pause are enabled */
++#define IOH_GBE_FL_CTRL_EN 0x80000000
++
++/* Pause Packet Request */
++#define IOH_GBE_PS_PKT_RQ 0x80000000 /* Pause packet Request */
++
++/* RX Mode */
++/* Address Filtering Enable */
++#define IOH_GBE_ADD_FIL_EN 0x80000000
++/* Multicast Filtering Enable */
++#define IOH_GBE_MLT_FIL_EN 0x40000000
++/* Receive Almost Empty Threshold */
++#define IOH_GBE_RH_ALM_EMP_4 0x00000000 /* 4 words */
++#define IOH_GBE_RH_ALM_EMP_8 0x00004000 /* 8 words */
++#define IOH_GBE_RH_ALM_EMP_16 0x00008000 /* 16 words */
++#define IOH_GBE_RH_ALM_EMP_32 0x0000C000 /* 32 words */
++/* Receive Almost Full Threshold */
++#define IOH_GBE_RH_ALM_FULL_4 0x00000000 /* 4 words */
++#define IOH_GBE_RH_ALM_FULL_8 0x00001000 /* 8 words */
++#define IOH_GBE_RH_ALM_FULL_16 0x00002000 /* 16 words */
++#define IOH_GBE_RH_ALM_FULL_32 0x00003000 /* 32 words */
++/* RX FIFO Read Triger Threshold */
++#define IOH_GBE_RH_RD_TRG_4 0x00000000 /* 4 words */
++#define IOH_GBE_RH_RD_TRG_8 0x00000200 /* 8 words */
++#define IOH_GBE_RH_RD_TRG_16 0x00000400 /* 16 words */
++#define IOH_GBE_RH_RD_TRG_32 0x00000600 /* 32 words */
++#define IOH_GBE_RH_RD_TRG_64 0x00000800 /* 64 words */
++#define IOH_GBE_RH_RD_TRG_128 0x00000A00 /* 128 words */
++#define IOH_GBE_RH_RD_TRG_256 0x00000C00 /* 256 words */
++#define IOH_GBE_RH_RD_TRG_512 0x00000E00 /* 512 words */
++
++ /* Receive Descriptor bit definitions */
++ #define IOH_GBE_RXD_ACC_STAT_BCAST 0x00000400
++ #define IOH_GBE_RXD_ACC_STAT_MCAST 0x00000200
++ #define IOH_GBE_RXD_ACC_STAT_UCAST 0x00000100
++ #define IOH_GBE_RXD_ACC_STAT_TCPIPOK 0x000000C0
++ #define IOH_GBE_RXD_ACC_STAT_IPOK 0x00000080
++ #define IOH_GBE_RXD_ACC_STAT_TCPOK 0x00000040
++ #define IOH_GBE_RXD_ACC_STAT_IP6ERR 0x00000020
++ #define IOH_GBE_RXD_ACC_STAT_OFLIST 0x00000010
++ #define IOH_GBE_RXD_ACC_STAT_TYPEIP 0x00000008
++ #define IOH_GBE_RXD_ACC_STAT_MACL 0x00000004
++ #define IOH_GBE_RXD_ACC_STAT_PPPOE 0x00000002
++ #define IOH_GBE_RXD_ACC_STAT_VTAGT 0x00000001
++ #define IOH_GBE_RXD_GMAC_STAT_PAUSE 0x0200
++ #define IOH_GBE_RXD_GMAC_STAT_MARBR 0x0100
++ #define IOH_GBE_RXD_GMAC_STAT_MARMLT 0x0080
++ #define IOH_GBE_RXD_GMAC_STAT_MARIND 0x0040
++ #define IOH_GBE_RXD_GMAC_STAT_MARNOTMT 0x0020
++ #define IOH_GBE_RXD_GMAC_STAT_TLONG 0x0010
++ #define IOH_GBE_RXD_GMAC_STAT_TSHRT 0x0008
++ #define IOH_GBE_RXD_GMAC_STAT_NOTOCTAL 0x0004
++ #define IOH_GBE_RXD_GMAC_STAT_NBLERR 0x0002
++ #define IOH_GBE_RXD_GMAC_STAT_CRCERR 0x0001
++
++ /* Transmit Descriptor bit definitions */
++ #define IOH_GBE_TXD_CTRL_TCPIP_ACC_OFF 0x0008
++ #define IOH_GBE_TXD_CTRL_ITAG 0x0004
++ #define IOH_GBE_TXD_CTRL_ICRC 0x0002
++ #define IOH_GBE_TXD_CTRL_APAD 0x0001
++ #define IOH_GBE_TXD_WORDS_SHIFT 2
++ #define IOH_GBE_TXD_GMAC_STAT_CMPLT 0x2000
++ #define IOH_GBE_TXD_GMAC_STAT_ABT 0x1000
++ #define IOH_GBE_TXD_GMAC_STAT_EXCOL 0x0800
++ #define IOH_GBE_TXD_GMAC_STAT_SNGCOL 0x0400
++ #define IOH_GBE_TXD_GMAC_STAT_MLTCOL 0x0200
++ #define IOH_GBE_TXD_GMAC_STAT_CRSER 0x0100
++ #define IOH_GBE_TXD_GMAC_STAT_TLNG 0x0080
++ #define IOH_GBE_TXD_GMAC_STAT_TSHRT 0x0040
++ #define IOH_GBE_TXD_GMAC_STAT_LTCOL 0x0020
++ #define IOH_GBE_TXD_GMAC_STAT_TFUNDFLW 0x0010
++ #define IOH_GBE_TXD_GMAC_STAT_RTYCNT_MASK 0x000F
++
++/* TX Mode */
++#define IOH_GBE_TM_NO_RTRY 0x80000000 /* No Retransmission */
++#define IOH_GBE_TM_LONG_PKT 0x40000000 /* Long Packt TX Enable */
++#define IOH_GBE_TM_ST_AND_FD 0x20000000 /* Stare and Forward */
++#define IOH_GBE_TM_SHORT_PKT 0x10000000 /* Short Packet TX Enable */
++#define IOH_GBE_TM_LTCOL_RETX 0x08000000 /* Retransmission at Late Collision */
++/* Frame Start Threshold */
++#define IOH_GBE_TM_TH_TX_STRT_4 0x00000000 /* 4 words */
++#define IOH_GBE_TM_TH_TX_STRT_8 0x00004000 /* 8 words */
++#define IOH_GBE_TM_TH_TX_STRT_16 0x00008000 /* 16 words */
++#define IOH_GBE_TM_TH_TX_STRT_32 0x0000C000 /* 32 words */
++/* Transmit Almost Empty Threshold */
++#define IOH_GBE_TM_TH_ALM_EMP_4 0x00000000 /* 4 words */
++#define IOH_GBE_TM_TH_ALM_EMP_8 0x00000800 /* 8 words */
++#define IOH_GBE_TM_TH_ALM_EMP_16 0x00001000 /* 16 words */
++#define IOH_GBE_TM_TH_ALM_EMP_32 0x00001800 /* 32 words */
++#define IOH_GBE_TM_TH_ALM_EMP_64 0x00002000 /* 64 words */
++#define IOH_GBE_TM_TH_ALM_EMP_128 0x00002800 /* 128 words */
++#define IOH_GBE_TM_TH_ALM_EMP_256 0x00003000 /* 256 words */
++#define IOH_GBE_TM_TH_ALM_EMP_512 0x00003800 /* 512 words */
++/* Transmit Almost Full Threshold */
++#define IOH_GBE_TM_TH_ALM_FULL_4 0x00000000 /* 4 words */
++#define IOH_GBE_TM_TH_ALM_FULL_8 0x00000200 /* 8 words */
++#define IOH_GBE_TM_TH_ALM_FULL_16 0x00000400 /* 16 words */
++#define IOH_GBE_TM_TH_ALM_FULL_32 0x00000600 /* 32 words */
++
++/* RX FIFO Status */
++/* RX FIFO is almost full. */
++#define IOH_GBE_RF_ALM_FULL 0x80000000
++/* RX FIFO is almost empty. */
++#define IOH_GBE_RF_ALM_EMP 0x40000000
++/* that the data within RX FIFO has become more than RH_RD_TRG. */
++#define IOH_GBE_RF_RD_TRG 0x20000000
++/* The word count of the data existing within RX FIFO. */
++#define IOH_GBE_RF_STRWD 0x1FFE0000
++/* that frame which is currently valid/enabled is stored in RX FIFO. */
++#define IOH_GBE_RF_RCVING 0x00010000
++
++/* TX FIFO Status */
++/* TX Frame ID */
++/* TX Result */
++/* Pause Packet1-5 */
++/* MAC Address 1(A/B)- 16*/
++/* MAC Address Mask */
++#define IOH_GBE_BUSY 0x80000000
++
++/* MIIM */
++#define IOH_GBE_MIIM_OPER_WRITE 0x04000000
++#define IOH_GBE_MIIM_OPER_READ 0x00000000
++#define IOH_GBE_MIIM_OPER_READY 0x04000000
++#define IOH_GBE_MIIM_PHY_ADDR_SHIFT 21
++#define IOH_GBE_MIIM_REG_ADDR_SHIFT 16
++
++/* RGMII Status */
++#define IOH_GBE_LINK_UP 0x80000008
++#define IOH_GBE_RXC_SPEED_MSK 0x00000006
++#define IOH_GBE_RXC_SPEED_2_5M 0x00000000 /* 2.5MHz */
++#define IOH_GBE_RXC_SPEED_25M 0x00000002 /* 25MHz */
++#define IOH_GBE_RXC_SPEED_125M 0x00000004 /* 100MHz */
++#define IOH_GBE_DUPLEX_FULL 0x00000001
++
++/* RGMII Control */
++#define IOH_GBE_CRS_SEL 0x00000010
++#define IOH_GBE_RGMII_RATE_125M 0x00000000
++#define IOH_GBE_RGMII_RATE_25M 0x00000008
++#define IOH_GBE_RGMII_RATE_2_5M 0x0000000C
++#define IOH_GBE_RGMII_MODE_GMII 0x00000000
++#define IOH_GBE_RGMII_MODE_RGMII 0x00000002
++#define IOH_GBE_CHIP_TYPE_EXTERNAL 0x00000000
++#define IOH_GBE_CHIP_TYPE_INTERNAL 0x00000001
++
++/* DMA Control */
++#define IOH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */
++#define IOH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */
++
++
++
++/* RX Descriptor Base Address */
++/* RX Descriptor Size */
++/* RX Descriptor Hard Pointer */
++/* RX Descriptor Hard Pointer Hold */
++/* RX Descriptor Soft Pointer */
++/* TX Descriptor Base Address */
++/* TX Descriptor Size */
++/* TX Descriptor Hard Pointer */
++/* TX Descriptor Hard Pointer Hold */
++/* TX Descriptor Soft Pointer */
++
++/* RX DMA Status */
++/* TX DMA Status */
++
++
++/* Wake On LAN Status */
++#define IOH_GBE_WLS_BR 0x00000008 /* Broadcas Address */
++#define IOH_GBE_WLS_MLT 0x00000004 /* Multicast Address */
++/* The Frame registered in Address Recognizer */
++#define IOH_GBE_WLS_IND 0x00000002
++#define IOH_GBE_WLS_MP 0x00000001 /* Magic packet Address */
++
++/* Wake On LAN Control */
++#define IOH_GBE_WLC_WOL_MODE 0x00010000
++#define IOH_GBE_WLC_IGN_TLONG 0x00000100
++#define IOH_GBE_WLC_IGN_TSHRT 0x00000080
++#define IOH_GBE_WLC_IGN_OCTER 0x00000040
++#define IOH_GBE_WLC_IGN_NBLER 0x00000020
++#define IOH_GBE_WLC_IGN_CRCER 0x00000010
++#define IOH_GBE_WLC_BR 0x00000008
++#define IOH_GBE_WLC_MLT 0x00000004
++#define IOH_GBE_WLC_IND 0x00000002
++#define IOH_GBE_WLC_MP 0x00000001
++
++/* Wake On LAN Address Mask */
++#define IOH_GBE_WLA_BUSY 0x80000000
++
++#endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-gpio.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-gpio.patch
new file mode 100644
index 0000000..c57f949
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-gpio.patch
@@ -0,0 +1,2700 @@
+
+
+From: Masayuki Ohtake <masa-korg@dsn.okisemi.com>
+Subject: OKI Semiconductor PCH GPIO driver
+
+This driver implements GPIO controls for PCH.
+
+Signed-off-by: Masayuki Ohtake <masa-korg@dsn.okisemi.com>
+Acked-by: Wang Qi <qi.wang@intel.com>
+
+---
+ drivers/gpio/Kconfig |
+ drivers/gpio/Makefile |
+ drivers/gpio/pch_gpio/Makefile |
+ drivers/gpio/pch_gpio/pch_common.h |
+ drivers/gpio/pch_gpio/pch_debug.h |
+ drivers/gpio/pch_gpio/pch_gpio_hal.c |
+ drivers/gpio/pch_gpio/pch_gpio_hal.h |
+ drivers/gpio/pch_gpio/pch_gpio_main.c |
+ drivers/gpio/pch_gpio/pch_gpio_main.h |
+ drivers/gpio/pch_gpio/pch_gpio_pci.c |
++++++++++++++++++++++++++++++++ 10 files changed, yy insertions(+)(-)
+diff -urN linux-2.6.33.1/drivers/gpio/Kconfig topcliff-2.6.33.1/drivers/gpio/Kconfig
+--- linux-2.6.33.1/drivers/gpio/Kconfig 2010-03-16 01:09:39.000000000 +0900
++++ topcliff-2.6.33.1/drivers/gpio/Kconfig 2010-04-01 10:58:31.000000000 +0900
+@@ -87,6 +87,13 @@
+
+ comment "I2C GPIO expanders:"
+
++config PCH_GPIO
++ tristate "PCH GPIO"
++ depends on PCI
++ help
++ If you say yes to this option, support will be included for the SMB
++ PCH GPIO Host controller.
++
+ config GPIO_MAX732X
+ tristate "MAX7319, MAX7320-7327 I2C Port Expanders"
+ depends on I2C
+diff -urN linux-2.6.33.1/drivers/gpio/Makefile topcliff-2.6.33.1/drivers/gpio/Makefile
+--- linux-2.6.33.1/drivers/gpio/Makefile 2010-03-16 01:09:39.000000000 +0900
++++ topcliff-2.6.33.1/drivers/gpio/Makefile 2010-04-01 10:58:31.000000000 +0900
+@@ -22,3 +22,4 @@
+ obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
+ obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
+ obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o
++obj-$(CONFIG_PCH_GPIO) += pch_gpio/
+diff -urN linux-2.6.33.1/drivers/gpio/pch_gpio/Makefile topcliff-2.6.33.1/drivers/gpio/pch_gpio/Makefile
+--- linux-2.6.33.1/drivers/gpio/pch_gpio/Makefile 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33.1/drivers/gpio/pch_gpio/Makefile 2010-04-01 10:58:31.000000000 +0900
+@@ -0,0 +1,7 @@
++ifeq ($(CONFIG_GPIO_DEBUG_CORE),y)
++EXTRA_CFLAGS += -DDEBUG
++endif
++
++obj-$(CONFIG_PCH_GPIO) += pch_gpio.o
++pch_gpio-objs := pch_gpio_hal.o pch_gpio_main.o pch_gpio_pci.o
++
+diff -urN linux-2.6.33.1/drivers/gpio/pch_gpio/pch_common.h topcliff-2.6.33.1/drivers/gpio/pch_gpio/pch_common.h
+--- linux-2.6.33.1/drivers/gpio/pch_gpio/pch_common.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33.1/drivers/gpio/pch_gpio/pch_common.h 2010-04-01 10:58:31.000000000 +0900
+@@ -0,0 +1,146 @@
++/*!
++ * @file ioh_common.h
++ * @brief Provides the macro definitions used by all files.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++#ifndef __IOH_COMMON_H__
++#define __IOH_COMMON_H__
++
++/*! @ingroup Global
++@def IOH_WRITE8
++@brief Macro for writing 8 bit data to an io/mem address
++*/
++#define IOH_WRITE8(val, addr) iowrite8((val), (void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_LOG
++@brief Macro for writing 16 bit data to an io/mem address
++*/
++#define IOH_WRITE16(val, addr) iowrite16((val), (void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_LOG
++@brief Macro for writing 32 bit data to an io/mem address
++*/
++#define IOH_WRITE32(val, addr) iowrite32((val), (void __iomem *)(addr))
++
++/*! @ingroup Global
++@def IOH_READ8
++@brief Macro for reading 8 bit data from an io/mem address
++*/
++#define IOH_READ8(addr) ioread8((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_READ16
++@brief Macro for reading 16 bit data from an io/mem address
++*/
++#define IOH_READ16(addr) ioread16((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_READ32
++@brief Macro for reading 32 bit data from an io/mem address
++*/
++#define IOH_READ32(addr) ioread32((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_WRITE32_F
++@brief Macro for writing 32 bit data to an io/mem address
++*/
++#define IOH_WRITE32_F(val, addr) do \
++ { IOH_WRITE32((val), (addr)); (void)IOH_READ32((addr)); } while (0);
++
++/*! @ingroup Global
++@def IOH_WRITE_BYTE
++@brief Macro for writing 1 byte data to an io/mem address
++*/
++#define IOH_WRITE_BYTE IOH_WRITE8
++/*! @ingroup Global
++@def IOH_WRITE_WORD
++@brief Macro for writing 1 word data to an io/mem address
++*/
++#define IOH_WRITE_WORD IOH_WRITE16
++/*! @ingroup Global
++@def IOH_WRITE_LONG
++@brief Macro for writing long data to an io/mem address
++*/
++#define IOH_WRITE_LONG IOH_WRITE32
++
++/*! @ingroup Global
++@def IOH_READ_BYTE
++@brief Macro for reading 1 byte data from an io/mem address
++*/
++#define IOH_READ_BYTE IOH_READ8
++/*! @ingroup Global
++@def IOH_READ_WORD
++@brief Macro for reading 1 word data from an io/mem address
++*/
++#define IOH_READ_WORD IOH_READ16
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief Macro for reading long data from an io/mem address
++*/
++#define IOH_READ_LONG IOH_READ32
++
++/* Bit Manipulation Macros */
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bit(mask) at the
++ specified address
++*/
++#define IOH_SET_ADDR_BIT(addr, bitmask) IOH_WRITE_LONG((IOH_READ_LONG(addr) |\
++ (bitmask)), (addr))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bit(mask) at the specified address
++*/
++#define IOH_CLR_ADDR_BIT(addr, bitmask) IOH_WRITE_LONG((IOH_READ_LONG(addr) &\
++ ~(bitmask)), (addr))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bitmask for a variable
++*/
++#define IOH_SET_BITMSK(var, bitmask) ((var) |= (bitmask))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bitmask for a variable
++*/
++#define IOH_CLR_BITMSK(var, bitmask) ((var) &= (~(bitmask)))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bit for a variable
++*/
++#define IOH_SET_BIT(var, bit) ((var) |= (1<<(bit)))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bit for a variable
++*/
++#define IOH_CLR_BIT(var, bit) ((var) &= ~(1<<(bit)))
++
++#endif
+diff -urN linux-2.6.33.1/drivers/gpio/pch_gpio/pch_debug.h topcliff-2.6.33.1/drivers/gpio/pch_gpio/pch_debug.h
+--- linux-2.6.33.1/drivers/gpio/pch_gpio/pch_debug.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33.1/drivers/gpio/pch_gpio/pch_debug.h 2010-04-01 10:58:31.000000000 +0900
+@@ -0,0 +1,60 @@
++/*!
++ * @file ioh_debug.h
++ * @brief Provides the macro definitions used for debugging.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++#ifndef __IOH_DEBUG_H__
++#define __IOH_DEBUG_H__
++
++#ifdef MODULE
++#define IOH_LOG(level, fmt, args...) printk(level "%s:" fmt "\n",\
++ THIS_MODULE->name, ##args)
++#else
++#define IOH_LOG(level, fmt, args...) printk(level "%s:" fmt "\n" ,\
++ __FILE__, ##args)
++#endif
++
++
++#ifdef DEBUG
++ #define IOH_DEBUG(fmt, args...) IOH_LOG(KERN_DEBUG, fmt, ##args)
++#else
++ #define IOH_DEBUG(fmt, args...)
++#endif
++
++#ifdef IOH_TRACE_ENABLED
++ #define IOH_TRACE IOH_DEBUG
++#else
++ #define IOH_TRACE(fmt, args...)
++#endif
++
++#define IOH_TRACE_ENTER IOH_TRACE("Enter %s", __func__)
++#define IOH_TRACE_EXIT IOH_TRACE("Exit %s", __func__)
++
++
++#endif
+diff -urN linux-2.6.33.1/drivers/gpio/pch_gpio/pch_gpio_hal.c topcliff-2.6.33.1/drivers/gpio/pch_gpio/pch_gpio_hal.c
+--- linux-2.6.33.1/drivers/gpio/pch_gpio/pch_gpio_hal.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33.1/drivers/gpio/pch_gpio/pch_gpio_hal.c 2010-04-01 10:58:31.000000000 +0900
+@@ -0,0 +1,595 @@
++/*!
++ * @file ioh_gpio_hal.c
++ * @brief Provides all the implementation of the interfaces pertaining to the
++ * HAL.
++ * @version 0.92
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 02/20/2009
++ * modified:
++ * WIPRO 01/05/2010
++ * Added the interfaces provided by the gpio module.
++ *
++ */
++
++/*includes*/
++#include <linux/io.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <linux/module.h>
++
++#include "pch_common.h"
++#include "pch_debug.h"
++#include "pch_gpio_main.h"
++#include "pch_gpio_hal.h"
++
++/* mask for interrupt mode configuration */
++#define GPIO_INT_MODE_MSK (0xF)
++
++/* mask for interrupt mode bit position */
++#define GPIO_INT_MODE_POS (0x4)
++
++/* interrupt mode valid value */
++#define GPIO_INT_MODE_VALID (0x4)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def BITS_32
++ @brief Corresponds to the 32 bit position.
++*/
++#define BITS_32 (32)
++
++
++/*! @ingroup GPIO_Global
++ @var ioh_gpio_cbr
++ @brief The global variable for storing the callback function pointer.
++ @remarks This variable is used to store the function pointer
++ to the callback function of the GPIO module.
++
++ @see
++ - ioh_gpio_cb_register
++*/
++void (*ioh_gpio_cbr) (u32);
++
++/*! @ingroup GPIO_HALLayer
++ struct ioh_gpio_reg_data
++ @brief The register data.
++*/
++struct ioh_gpio_reg_data {
++ u32 ien_reg; /**< To store contents of IEN register. */
++ u32 imask_reg; /**< To store contents of IMASK register. */
++ u32 po_reg; /**< To store contents of PO register. */
++ u32 pm_reg; /**< To store contents of PM register. */
++ u32 im0_reg; /**< To store contents of IM0 register. */
++ u32 im1_reg; /**< To store contents of IM1 register. */
++} ioh_gpio_reg;
++
++/*functions implementations*/
++/*! @ingroup GPIO_HALLayerAPI
++@fn void ioh_gpio_cb_register(void(*ioh_gpio_ptr)(u32))
++@brief Registers the callback function.
++@remarks This function registers the callback function used
++ by the gpio module. The main task performed by this
++ function is:
++ - If the function argument is non NULL, update the
++ same in the global callback pointer variable
++ @ref ioh_gpio_cbr.
++@param ioh_gpio_cb_register [@ref INOUT]
++ Contains the reference of the function pointer
++@retval None
++@see
++ - ioh_gpio_int_mode
++*/
++void ioh_gpio_cb_register(void (*ioh_gpio_ptr) (u32))
++{
++ if (ioh_gpio_ptr != NULL) {
++
++ init_waitqueue_head(&ioh_gpio_event);
++ IOH_DEBUG
++ (" In ioh_gpio_cb_register: value of ioh_gpio_ptr = %p\n",
++ ioh_gpio_ptr);
++ ioh_gpio_cbr = ioh_gpio_ptr;
++ IOH_DEBUG("ioh_gpio_cb_register Registered callback\n");
++ IOH_DEBUG
++ ("In ioh_gpio_cb_register : value of ioh_gpio_cbr =%p\n",
++ ioh_gpio_cbr);
++ }
++
++}
++
++/*! @ingroup GPIO_HALLayerAPI
++@fn void ioh_gpio_int_mode(struct ioh_gpio_reqt * gpio_reqt)
++@remarks Implements the functionality of disabling or enabling interrupts.
++ The main tasks performed by this function are:
++ - If the request is for disabling the interrupts, then the
++ corresponding bits in the IEN register are set to 0.
++ - If the request is for enabling the interrupts, then the
++ corresponding bits in the IEN register are set to 1.
++
++@param gpio_reqt [@ref INOUT]
++ Contains the reference of the ioh_gpio_reqt structure
++@retval s32
++ - @ref IOH_GPIO_SUCCESS --> If the operation is successful.
++ - -EINVAL --> Failure.
++@see
++ - ioh_gpio_dir_mode
++*/
++s32 ioh_gpio_int_mode(struct ioh_gpio_reqt *gpio_reqt)
++{
++ u32 ioh_pins;
++ u32 base_addr;
++ u32 i;
++ u32 ien_val;
++ u64 temp;
++ u64 mode_val;
++
++ /* initialize locals */
++ ioh_pins = gpio_reqt->pins;
++ base_addr = ioh_gpio_base_address;
++ ien_val = IOH_READ_LONG(base_addr + IOH_IEN);
++
++ /* Disable applicable bits in IEN register */
++ ien_val &= (~ioh_pins);
++ IOH_WRITE_LONG(ien_val, (base_addr + IOH_IEN));
++ IOH_DEBUG("ioh_gpio_int_mode wrote %x to IOH_IEN\n", ien_val);
++
++ /* Reading the modes of all the 12 pins. */
++ mode_val = ((((u64) IOH_READ_LONG(base_addr + IOH_IM1)) << BITS_32) |
++ (IOH_READ_LONG(base_addr + IOH_IM0)));
++
++ /* enable interrupts */
++ if (gpio_reqt->enable == 1) {
++ IOH_DEBUG("ioh_gpio_int_mode Enabling interrupts\n");
++
++ for (i = 0; i < GPIO_NUM_PINS; i++) {
++ /*GPIO_NUM_PINS = max num. pins for the GPIO port. */
++ if (ioh_pins & (1 << i)) {
++ /*If interrupt for the pin has to be enabled. */
++ /* int. mode setting for each pin is specified
++ by 3 bits
++ 000 Generates an interrupt
++ at the falling edge.
++ 001 Generates an interrupt
++ at the rising edge.
++ 010 Generates an interrupt
++ at the input of a L level.
++ 011 Generates an interrupt
++ at the input of a H level.
++ 100 Generates an interrupt
++ at both edges (rising edge/falling edge).
++ */
++ /* Clear the existing interrupt mode
++ setting for the current pin. */
++ mode_val &=
++ ~(((u64) GPIO_INT_MODE_MSK) <<
++ (i * GPIO_INT_MODE_POS));
++
++ /* Update the new setting. */
++ temp =
++ (gpio_reqt->
++ mode) & (((u64) GPIO_INT_MODE_MSK) << (i *
++ GPIO_INT_MODE_POS));
++
++ mode_val |= temp;
++
++ if (((temp >> (i * GPIO_INT_MODE_POS)) >
++ GPIO_INT_MODE_VALID)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_gpio_int_mode Invalid\
++ mode selection for "
++ "pin %d\n", i);
++ return -EINVAL;
++ }
++ IOH_DEBUG
++ ("ioh_gpio_int_mode Interrupt enabled\
++ for pin %d \n",
++ i);
++ } else {
++ IOH_DEBUG
++ ("ioh_gpio_int_mode Interrupt not enabled\
++ for pin %d \n",
++ i);
++ }
++ }
++ /* Set the mode register IM0 */
++ IOH_WRITE_LONG(((u32) (mode_val & BIT_MASK_32)),
++ (base_addr + IOH_IM0));
++ IOH_DEBUG("ioh_gpio_int_mode wrote %x to IOH_IM0\n",
++ ((u32) (mode_val & BIT_MASK_32)));
++
++ /* Set the mode register IM1 */
++ IOH_WRITE_LONG(((u32) (mode_val >> BITS_32)),
++ (base_addr + IOH_IM1));
++ IOH_DEBUG("ioh_gpio_int_mode wrote %x to IOH_IM1\n",
++ ((u32) (mode_val >> BITS_32)));
++
++ /* Clear the status */
++ IOH_WRITE_LONG(ioh_pins, (base_addr + IOH_ICLR));
++ IOH_DEBUG("ioh_gpio_int_mode wrote %x to IOH_ICLR\n",
++ ioh_pins);
++ IOH_DEBUG("ioh_gpio_int_mode value in to IOH_ISTATUS %x\n",
++ IOH_READ_LONG(base_addr + IOH_ISTATUS));
++
++ /* Clear the mask register */
++ IOH_WRITE_LONG(ioh_pins, (base_addr + IOH_IMASKCLR));
++ IOH_DEBUG("ioh_gpio_int_mode wrote %x to IOH_IMASKCLR\n",
++ ioh_pins);
++
++ ien_val = (ien_val | ioh_pins);
++
++ /*Enable applicable bits in IEN register */
++ IOH_WRITE_LONG(ien_val, (base_addr + IOH_IEN));
++ IOH_DEBUG("ioh_gpio_int_mode wrote %x to IOH_IEN\n", ien_val);
++
++ }
++
++ /* disable interrupts */
++ else {
++ IOH_DEBUG("ioh_gpio_int_mode Disabling interrupts\n");
++ /* Clear the status */
++ IOH_WRITE_LONG(ioh_pins, (base_addr + IOH_ICLR));
++ IOH_DEBUG("ioh_gpio_int_mode wrote %x to IOH_ICLR\n",
++ ioh_pins);
++
++ /* Set the mask register */
++ IOH_WRITE_LONG(ioh_pins, (base_addr + IOH_IMASK));
++ IOH_DEBUG("ioh_gpio_int_mode wrote %x to IOH_IMASK\n",
++ ioh_pins);
++
++ /* IEN bits are already disabled initially */
++
++ }
++ IOH_DEBUG("ioh_gpio_int_mode returning=%d\n", IOH_GPIO_SUCCESS);
++ return IOH_GPIO_SUCCESS;
++}
++
++/*! @ingroup GPIO_HALLayerAPI
++@fn void ioh_gpio_dir_mode(struct ioh_gpio_reqt * gpio_reqt)
++@remarks Implements the functionalities for setting GPIO pin
++ directions[input/output].
++ The main tasks performed by this function are:
++ - Reads the current value of PM register
++ - If input mode is specified @ref GPIO_IN, performs logical
++ AND between the present PM register value and the 1's
++ complement of gpio_reqt->pins (@ref ioh_gpio_reqt) and
++ updates the value in the PM register.
++ - Else performs logical OR between the present PM register value
++ and gpio_reqt->pins (@ref ioh_gpio_reqt) and updates the value
++ in the PM register.
++
++@param gpio_reqt [@ref INOUT] Contains the reference of
++ the ioh_gpio_reqt structure
++@retval None
++@see
++ - ioh_gpio_read
++*/
++void ioh_gpio_dir_mode(struct ioh_gpio_reqt *gpio_reqt)
++{
++ u32 ioh_pm_regval;
++ u32 ioh_pins;
++ u32 base_addr;
++
++ base_addr = ioh_gpio_base_address;
++ ioh_pm_regval = IOH_READ_LONG(base_addr + IOH_PM);
++ ioh_pins = gpio_reqt->pins;
++
++ /* input mode */
++ if (gpio_reqt->mode == GPIO_IN) {
++ IOH_DEBUG("ioh_gpio_dir_mode GPIO_IN\n");
++ (ioh_pm_regval &= (~ioh_pins));
++ } else {
++ ioh_pm_regval |= ioh_pins;
++ IOH_DEBUG("ioh_gpio_dir_mode GPIO_OUT\n");
++ }
++
++ IOH_WRITE_LONG(ioh_pm_regval, (base_addr + IOH_PM));
++ IOH_DEBUG("ioh_gpio_dir_mode wrote %x to IOH_PM\n", ioh_pm_regval);
++}
++
++/*! @ingroup GPIO_HALLayerAPI
++ @fn int ioh_gpio_read(struct ioh_gpio_reqt * gpio_reqt)
++ @remarks Implements the register read functionality of the
++ gpio module.
++ The main tasks performed by this function are:
++ - Reads the value from PI[Port Input] Register.
++ Masks the value with 0xff and updates the value in
++ gpio_reqt->pins
++ (@ref ioh_gpio_reqt).
++
++ @param gpio_reqt [@ref INOUT]
++ Contains the reference of the ioh_gpio_reqt structure
++ @retval s32
++ - @ref IOH_GPIO_SUCCESS -->
++ If the operation is successful.
++ @see
++ - IOCTL_GPIO_READ
++*/
++s32 ioh_gpio_read(struct ioh_gpio_reqt *gpio_reqt)
++{
++
++ gpio_reqt->pins =
++ (ioh_gpio_bit_mask & IOH_READ_LONG(ioh_gpio_base_address + IOH_PI));
++ return IOH_GPIO_SUCCESS;
++
++}
++
++/*! @ingroup GPIO_HALLayerAPI
++@fn int ioh_gpio_write(struct ioh_gpio_reqt * gpio_reqt)
++@remarks Implements the register write functionality of the gpio module.
++ The main tasks performed by this function are:
++- Masks gpio_reqt->pins (@ref ioh_gpio_reqt) with 0xFF to
++ retrieve the valid 8 bits.
++- Reads the current value of PO register
++- If (gpio_reqt->mode == GPIO_LOW), performs logical AND
++ between the present PM register value and the 1.s complement
++ of gpio_reqt->pins and updates the value in the PO register.
++- Else, (gpio_reqt->mode != GPIO_LOW; implies Output High), performs
++ logical OR between the present PO register value and gpio_reqt->pins
++ and updates the value in the PO register.
++
++ @param gpio_reqt [@ref INOUT]
++ Contains the reference of the ioh_gpio_reqt structure
++ @retval s32
++ - @ref IOH_GPIO_SUCCESS
++ --> If the operation is successful.
++ @see
++ - IOCTL_GPIO_WRITE
++*/
++s32 ioh_gpio_write(struct ioh_gpio_reqt *gpio_reqt)
++{
++ u32 reg_val;
++
++ reg_val = IOH_READ_LONG(ioh_gpio_base_address + IOH_PO);
++
++ if (gpio_reqt->mode == GPIO_LOW) {
++ reg_val &= ~(gpio_reqt->pins);
++ IOH_DEBUG("ioh_gpio_write GPIO_LOW\n");
++ } else {
++ reg_val |= gpio_reqt->pins;
++ IOH_DEBUG("ioh_gpio_write GPIO_HIGH\n");
++ }
++
++ IOH_WRITE_LONG(reg_val, ioh_gpio_base_address + IOH_PO);
++ IOH_DEBUG("ioh_gpio_write writing value=%x\n", reg_val);
++
++ IOH_DEBUG("ioh_gpio_write returning %d\n", IOH_GPIO_SUCCESS);
++ return IOH_GPIO_SUCCESS;
++}
++
++/*! @ingroup GPIO_HALLayerAPI
++@fn irqreturn_t ioh_gpio_handler(int irq,void * pData)
++@remarks Implements the interrupt handler functionality of the gpio module.
++The main tasks performed by this function are:
++- Reads the IDISP register
++- If IDISP register content is zero, then returns IRQ_NONE.
++- Else clears the Interrupt status by writing to the ICLR register,
++ invokes the call back function specified by @ref ioh_gpio_cbr
++ with the value of IDISP as parameter and returns IRQ_HANDLED.
++
++@param irq [@ref INOUT] Contains the irq value
++@param pData [@ref INOUT] Contains the reference to the base
++ gpio address.
++@retval irqreturn_t
++ - IRQ_HANDLED --> If GPIO hardware is responsible
++ for the interrupt.
++ - IRQ_NONE --> Non-GPIO interrupt.
++*/
++irqreturn_t ioh_gpio_handler(int irq, void *pData)
++{
++ irqreturn_t ret = IRQ_NONE;
++ u32 base_addr = ioh_gpio_base_address;
++ u32 ioh_idisp_regval;
++
++ ioh_idisp_regval =
++ (ioh_gpio_bit_mask & IOH_READ_LONG(base_addr + IOH_IDISP));
++ if (ioh_idisp_regval != 0) {
++ /*invoke the callback */
++ (*ioh_gpio_cbr) (ioh_idisp_regval);
++
++ IOH_DEBUG
++ ("ioh_gpio_handler : ioh_gpio_cb invoked successfully %d\n",
++ ret);
++ /*clear the interrupt */
++ IOH_LOG(KERN_ERR, "Value at idisp 8 = %x",
++ (IOH_READ_LONG(base_addr + IOH_IDISP)));
++ IOH_LOG(KERN_ERR, "Value at pin 8 = %x",
++ ((IOH_READ_LONG(base_addr + IOH_PI) & 0x80)));
++
++ IOH_WRITE_LONG(ioh_idisp_regval, (base_addr + IOH_ICLR));
++
++ ret = IRQ_HANDLED;
++ IOH_DEBUG("ioh_gpio_handler returns IRQ_HANDLED\n");
++ } else {
++
++ IOH_DEBUG("ioh_gpio_handler returns IRQ_NONE\n");
++ }
++ return ret;
++}
++
++/*! @ingroup GPIO_HALLayerAPI
++@fn void ioh_gpio_cb(u32 value)
++@brief The interrupt handler callback function.
++@remarks The main tasks performed by this function are:
++ - Updates the GPIO event flag with the parameter value.
++ This sets the appropriate event flag bits based on the
++ bits set in IDISP register.
++ - Wakes up the blocking ioctl call @ref IOCTL_GPIO_NOTIFY.
++
++@param value [@ref INOUT] Contains the value data
++@retval None
++@see
++ - ioh_gpio_cb_register
++*/
++void ioh_gpio_cb(u32 value)
++{
++ /* update the event flag */
++ ioh_gpio_event_flag = value;
++
++ IOH_DEBUG
++ ("ioh_gpio_cb : event flag value = %x\tIDISP register value = %x\n",
++ ioh_gpio_event_flag,
++ (IOH_READ_LONG(ioh_gpio_base_address + IOH_IDISP)));
++ wake_up_interruptible(&ioh_gpio_event);
++}
++
++/*! @ingroup GPIO_HALLayerAPI
++@fn void ioh_gpio_save_reg_conf(void)
++@remarks Save register configuration and disable interrupts.
++ The main tasks performed by this function are:
++ - Read the registers PM, PO, IEN, IM0, IM1 and IMASK
++ and stores the values for further use.
++ - Disables the interrupts by clearing IEN register.
++
++@param None
++@retval None
++@see
++ - ioh_gpio_suspend
++*/
++void ioh_gpio_save_reg_conf(void)
++{
++ u32 base_addr = ioh_gpio_base_address;
++ IOH_DEBUG("ioh_gpio_save_reg_conf ENTRY\n");
++ /* to store contents of IEN register */
++ ioh_gpio_reg.ien_reg = IOH_READ_LONG(base_addr + IOH_IEN);
++
++ /* to store contents of IMASK register */
++ ioh_gpio_reg.imask_reg = IOH_READ_LONG(base_addr + IOH_IMASK);
++
++ /* to store contents of PO register */
++ ioh_gpio_reg.po_reg = IOH_READ_LONG(base_addr + IOH_PO);
++
++ /* to store contents of PM register */
++ ioh_gpio_reg.pm_reg = IOH_READ_LONG(base_addr + IOH_PM);
++
++ /* to store contents of IM0 register */
++ ioh_gpio_reg.im0_reg = IOH_READ_LONG(base_addr + IOH_IM0);
++
++ /* to store contents of IM1 register */
++ ioh_gpio_reg.im1_reg = IOH_READ_LONG(base_addr + IOH_IM1);
++
++ IOH_DEBUG
++ ("ioh_gpio_save_reg_conf : IOH_IEN=%x, IOH_IMASK=%x, IOH_PO=%x,"
++ "IOH_PM=%x, IOH_IM0=%x, IOH_IM1=%x\n",
++ IOH_READ_LONG(base_addr + IOH_IEN),
++ IOH_READ_LONG(base_addr + IOH_IMASK),
++ IOH_READ_LONG(base_addr + IOH_PO),
++ IOH_READ_LONG(base_addr + IOH_PM),
++ IOH_READ_LONG(base_addr + IOH_IM0),
++ IOH_READ_LONG(base_addr + IOH_IM1));
++
++ IOH_DEBUG("ioh_gpio_save_reg_conf : ioh_gpio_reg.ien_reg=%x, "
++ "ioh_gpio_reg.imask_reg=%x, ioh_gpio_reg.po_reg=%x,\
++ ioh_gpio_reg.pm_reg=%x,"
++ "ioh_gpio_reg.im0_reg=%x, ioh_gpio_reg.im1_reg=%x\n",
++ ioh_gpio_reg.ien_reg, ioh_gpio_reg.imask_reg,
++ ioh_gpio_reg.po_reg, ioh_gpio_reg.pm_reg,
++ ioh_gpio_reg.im0_reg, ioh_gpio_reg.im1_reg);
++
++ /* Disable all gpio interrupts */
++ IOH_WRITE_LONG(0, (base_addr + IOH_IEN));
++ IOH_DEBUG("ioh_gpio_save_reg_conf disabled interrupts\n");
++}
++
++/*! @ingroup GPIO_HALLayerAPI
++@fn void ioh_gpio_restore_reg_conf(void)
++@remarks This function restores the register configuration of the
++ GPIO device. The main task performed by this function
++ is:
++ - Restores the previous register values into the registers
++ PM, PO, IEN, IM0, IM1 and IMASK.
++
++@param None
++@retval None
++@see
++ - ioh_gpio_resume
++*/
++void ioh_gpio_restore_reg_conf(void)
++{
++ u32 base_addr = ioh_gpio_base_address;
++ IOH_DEBUG("ioh_gpio_restore_reg_conf ENTRY\n");
++ /* to store contents of IEN register */
++ IOH_WRITE_LONG(ioh_gpio_reg.ien_reg, base_addr + IOH_IEN);
++
++ /* to store contents of IMASK register */
++ IOH_WRITE_LONG(ioh_gpio_reg.imask_reg, base_addr + IOH_IMASK);
++
++ /* to store contents of IMASK register */
++ IOH_WRITE_LONG(~ioh_gpio_reg.imask_reg, base_addr + IOH_IMASKCLR);
++
++ /* to store contents of PO register */
++ IOH_WRITE_LONG(ioh_gpio_reg.po_reg, base_addr + IOH_PO);
++
++ /* to store contents of PM register */
++ IOH_WRITE_LONG(ioh_gpio_reg.pm_reg, base_addr + IOH_PM);
++
++ /* to store contents of IM0 register */
++ IOH_WRITE_LONG(ioh_gpio_reg.im0_reg, base_addr + IOH_IM0);
++
++ /* to store contents of IM1 register */
++ IOH_WRITE_LONG(ioh_gpio_reg.im1_reg, base_addr + IOH_IM1);
++
++ IOH_DEBUG
++ ("ioh_gpio_save_reg_conf : ioh_gpio_reg.ien_reg=%x,\
++ ioh_gpio_reg.imask_reg=%x,"\
++ "ioh_gpio_reg.po_reg=%x, ioh_gpio_reg.pm_reg=%x,\
++ ioh_gpio_reg.im0_reg=%x,"\
++ "ioh_gpio_reg.im1_reg=%x\n", ioh_gpio_reg.ien_reg,
++ ioh_gpio_reg.imask_reg, ioh_gpio_reg.po_reg, ioh_gpio_reg.pm_reg,
++ ioh_gpio_reg.im0_reg, ioh_gpio_reg.im1_reg);
++
++ IOH_DEBUG
++ ("ioh_gpio_save_reg_conf : IOH_IEN=%x, IOH_IMASK=%x, IOH_PO=%x,\
++ IOH_PM=%x, IOH_IM0=%x, IOH_IM1=%x\n",\
++ IOH_READ_LONG(base_addr + IOH_IEN),
++ IOH_READ_LONG(base_addr + IOH_IMASK),
++ IOH_READ_LONG(base_addr + IOH_PO),
++ IOH_READ_LONG(base_addr + IOH_PM),
++ IOH_READ_LONG(base_addr + IOH_IM0),
++ IOH_READ_LONG(base_addr + IOH_IM1));
++
++ IOH_DEBUG("ioh_gpio_restore_reg_conf enabled interrupts\n");
++}
++
++/*! @ingroup GPIO_HALLayerAPI
++ @fn u32 ioh_gpio_readreg(int offset)
++ @brief Reads the register.
++ @remarks This function reads the register located at
++ the passed offset and returns the read value.
++ @param Offset [@reg IN] The offset to be read.
++ @retval u32 --> Register value
++
++*/
++u32 ioh_gpio_readreg(int offset)
++{
++ u32 reg_val;
++ reg_val = (IOH_READ_LONG(ioh_gpio_base_address + offset));
++ IOH_DEBUG("ioh_gpio_readreg read reg=%x,value=%x\n",
++ (ioh_gpio_base_address + offset), reg_val);
++ return reg_val;
++}
++
++int ioh_gpio_writereg(int offset, u32 val)
++{
++ IOH_WRITE_LONG(val, ioh_gpio_base_address + offset);
++ IOH_DEBUG("%s read reg=%x,value=%x\n", __func__,
++ (ioh_gpio_base_address + offset), val);
++ return 0;
++}
+diff -urN linux-2.6.33.1/drivers/gpio/pch_gpio/pch_gpio_hal.h topcliff-2.6.33.1/drivers/gpio/pch_gpio/pch_gpio_hal.h
+--- linux-2.6.33.1/drivers/gpio/pch_gpio/pch_gpio_hal.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33.1/drivers/gpio/pch_gpio/pch_gpio_hal.h 2010-04-01 10:58:31.000000000 +0900
+@@ -0,0 +1,170 @@
++/*!
++ * @file ioh_gpio_hal.h
++ * @brief Provides all the interfaces pertaining to the HAL.
++ * @version 0.92
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 02/20/2009
++ * modified:
++ * WIPRO 01/05/2010
++ * Added the interfaces provided by the HAL.
++ *
++ */
++
++#ifndef __IOH_GPIO_HAL_H__
++#define __IOH_GPIO_HAL_H__
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_IEN
++ @brief Offset for IEN register.
++*/
++#define IOH_IEN (0x00)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_ISTATUS
++ @brief Offset for ISTATUS register.
++*/
++#define IOH_ISTATUS (0x04)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_IDISP
++ @brief Offset for IDISP register.
++*/
++#define IOH_IDISP (0x08)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_ICLR
++ @brief Offset for ICLR register.
++*/
++#define IOH_ICLR (0x0C)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_IMASK
++ @brief Offset for IMASK register.
++*/
++#define IOH_IMASK (0x10)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_IMASKCLR
++ @brief Offset for IMASKCLR register.
++*/
++#define IOH_IMASKCLR (0x14)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_PO
++ @brief Offset for IMASKCLR register.
++*/
++#define IOH_PO (0x18)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_PI
++ @brief Offset for PI register.
++*/
++#define IOH_PI (0x1C)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_PM
++ @brief Offset for PM register.
++*/
++#define IOH_PM (0x20)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_IM0
++ @brief Offset for IM0 register.
++*/
++#define IOH_IM0 (0x24)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_IM1
++ @brief Offset for IM1 register.
++*/
++#define IOH_IM1 (0x28)
++
++/* exported function prototypes */
++/*! @ingroup GPIO_HALLayerAPI
++ @fn void ioh_gpio_cb(int)
++ @brief Interrupt handler callback function
++*/
++void ioh_gpio_cb(u32);
++
++/*! @ingroup GPIO_HALLayerAPI
++ @fn void ioh_gpio_cb_register(void(*ioh_gpio_cbr)(u32))
++ @brief Interrupt handler callback register function
++*/
++void ioh_gpio_cb_register(void (*ioh_gpio_cbr) (u32));
++
++/*! @ingroup GPIO_HALLayerAPI
++@fn s32 ioh_gpio_int_mode(struct ioh_gpio_reqt * gpio_reqt)
++@brief This function sets the interrupt mode for each of the GPIO input pins
++*/
++s32 ioh_gpio_int_mode(struct ioh_gpio_reqt *gpio_reqt);
++
++/*! @ingroup GPIO_HALLayerAPI
++@fn void ioh_gpio_dir_mode(struct ioh_gpio_reqt * gpio_reqt)
++@brief Provides the functionality of setting gpio pin directions[input/output]
++*/
++void ioh_gpio_dir_mode(struct ioh_gpio_reqt *gpio_reqt);
++
++/*! @ingroup GPIO_HALLayerAPI
++ @fn s32 ioh_gpio_read(struct ioh_gpio_reqt * gpio_reqt)
++ @brief Provides the functionality of reading GPIO pin status
++*/
++s32 ioh_gpio_read(struct ioh_gpio_reqt *gpio_reqt);
++
++/*! @ingroup GPIO_HALLayerAPI
++@fn s32 ioh_gpio_write(struct ioh_gpio_reqt * gpio_reqt)
++@brief Provides the functionality of writing data to the GPIO port
++*/
++s32 ioh_gpio_write(struct ioh_gpio_reqt *gpio_reqt);
++
++/*! @ingroup GPIO_HALLayerAPI
++@fn irqreturn_t ioh_gpio_handler(int irq,void * pData)
++@brief Provides the functionality of handling interrupts from GPIO h/w
++*/
++irqreturn_t ioh_gpio_handler(int irq, void *pData);
++
++/*! @ingroup GPIO_HALLayerAPI
++@fn void ioh_gpio_save_reg_conf(void)
++@brief Saves register configuration and also disables GPIO interrupts
++*/
++void ioh_gpio_save_reg_conf(void);
++
++/*! @ingroup GPIO_HALLayerAPI
++ @fn void ioh_gpio_restore_reg_conf(void)
++ @brief Restores register configuration
++*/
++void ioh_gpio_restore_reg_conf(void);
++
++/*! @ingroup GPIO_HALLayerAPI
++ @fn u32 ioh_gpio_readreg(int offset)
++ @brief Function to read the value of a GPIO register
++*/
++u32 ioh_gpio_readreg(int offset);
++int ioh_gpio_writereg(int offset, u32 val);
++
++/* global variables */
++extern u32 ioh_gpio_base_address;
++extern u32 ioh_gpio_event_flag;
++extern wait_queue_head_t ioh_gpio_event;
++extern u32 ioh_gpio_bit_mask;
++
++#endif
+diff -urN linux-2.6.33.1/drivers/gpio/pch_gpio/pch_gpio_main.c topcliff-2.6.33.1/drivers/gpio/pch_gpio/pch_gpio_main.c
+--- linux-2.6.33.1/drivers/gpio/pch_gpio/pch_gpio_main.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33.1/drivers/gpio/pch_gpio/pch_gpio_main.c 2010-04-01 11:41:01.000000000 +0900
+@@ -0,0 +1,420 @@
++/*!
++ * @file ioh_gpio_main.c
++ * @brief Provides all the implementation of the interfaces pertaining to the
++ * GPIO module.
++ * @version 0.92
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 02/20/2009
++ * modified:
++ * WIPRO 01/05/2010
++ * Added the interfaces provided by the GPIO module.
++ *
++ */
++
++/* includes */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/interrupt.h>
++#include <linux/uaccess.h>
++#include <linux/string.h>
++#include <linux/sched.h>
++
++#include "pch_common.h"
++#include "pch_debug.h"
++#include "pch_gpio_main.h"
++#include "pch_gpio_hal.h"
++
++#define MODULE_NAME "ioh_gpio"
++
++DEFINE_SPINLOCK(ioh_gpio_lock); /* for spin lock */
++u32 ioh_gpio_event_flag; /* flag for event */
++s32 ioh_gpio_opencount; /* check whether opened or not */
++
++/*spinlock_t ioh_gpio_lock = SPIN_LOCK_UNLOCKED; for spin lock */
++wait_queue_head_t ioh_gpio_event; /* wait queue head */
++
++/*! @ingroup GPIO_InterfaceLayerFacilitators
++ @struct ioh_gpio_fops
++ @brief Instance of the Kernel structure file_operations.
++*/
++const struct file_operations ioh_gpio_fops = {
++ .owner = THIS_MODULE,
++ .open = ioh_gpio_open,
++ .release = ioh_gpio_release,
++ .ioctl = ioh_gpio_ioctl,
++};
++
++/*function implementations*/
++
++/*! @ingroup GPIO_InterfaceLayerAPI
++@fn int ioh_gpio_open( struct inode *inode,struct file *file)
++@remarks The main tasks performed by this function are:
++- Ensures that the device is not opened before by checking the open count.
++ If it is already opened, then returns EBUSY status code.
++- Registers the interrupt handler by invoking request_irq.
++ If this fails then returns its error code.
++ Otherwise returns @ref IOH_GPIO_SUCCESS
++
++@param inode [@ref INOUT] Contains the reference of the inode structure
++@param file [@ref INOUT] Contains the reference of the file structure
++@retval int
++ - @ref IOH_GPIO_SUCCESS --> If operation is successful.
++ - -EBUSY --> If already opened/ request_irq
++ error status code.
++ - -EINVAL --> request_irq error status code.
++ - -ENOMEM --> request_irq error status code.
++ - -ENOSYS --> request_irq error status code.
++
++@see
++ - ioh_gpio_fops
++*/
++int ioh_gpio_open(struct inode *inode, struct file *file)
++{
++ int ret;
++
++ spin_lock(&ioh_gpio_lock);
++ IOH_DEBUG("ioh_gpio_open : open count value = %d", ioh_gpio_opencount);
++ if (ioh_gpio_opencount) {
++ IOH_LOG(KERN_ERR, "ioh_gpio_open : device already opened\n");
++ ret = -EBUSY;
++ } else {
++
++ ret =
++ (request_irq
++ (ioh_gpio_irq, &ioh_gpio_handler, IRQF_SHARED, MODULE_NAME,
++ (void *)ioh_gpio_base_address));
++ if (ret) {
++ IOH_LOG(KERN_ERR,
++ "ioh_gpio_open : request_irq failed\n");
++ } else {
++ ioh_gpio_opencount++;
++ IOH_DEBUG
++ ("ioh_gpio_open : request_irq invoked\
++ successfully\n");
++ ret = IOH_GPIO_SUCCESS;
++ }
++ }
++ spin_unlock(&ioh_gpio_lock);
++
++ IOH_DEBUG("ioh_gpio_open returns=%d\n", ret);
++ return ret;
++}
++
++/*! @ingroup GPIO_InterfaceLayerAPI
++@fn int ioh_gpio_release(struct inode *inode,struct file *file)
++@remarks The main tasks performed by this function are:
++-Ensures that device is opened successfully by checking the open count value.
++ If it is not opened, then returns with IOH_GPIO_SUCCESS status code.
++-Disables interrupts for all pins by using @ref ioh_gpio_int_mode
++ API.
++-Un-registers interrupt handler and returns @ref IOH_GPIO_SUCCESS.
++
++@param inode [@ref INOUT] Contains the reference of the inode structure
++@param file [@ref INOUT] Contains the reference of the file structure
++@retval int
++ - @ref IOH_GPIO_SUCCESS -->
++ If the operation is successful.
++@see
++ - ioh_gpio_fops
++*/
++int ioh_gpio_release(struct inode *inode, struct file *file)
++{
++ struct ioh_gpio_reqt req;
++ spin_lock(&ioh_gpio_lock);
++
++ if (ioh_gpio_opencount > 0) {
++ memset(&req, 0, sizeof(req));
++ req.pins = IOH_GPIO_ALL_PINS;
++
++ /* disable interrupts for all gpio pins */
++ (void)ioh_gpio_int_mode(&req);
++
++ free_irq(ioh_gpio_irq, (void *)ioh_gpio_base_address);
++ IOH_DEBUG("ioh_gpio_release : free_irq invoked successfully");
++
++ ioh_gpio_opencount--;
++ }
++ spin_unlock(&ioh_gpio_lock);
++
++ IOH_DEBUG("ioh_gpio_release : ioh_gpio_opencount =%d\n",
++ ioh_gpio_opencount);
++
++ IOH_DEBUG("ioh_gpio_release returning=%d\n", IOH_GPIO_SUCCESS);
++ return IOH_GPIO_SUCCESS;
++}
++
++/*! @ingroup GPIO_InterfaceLayerAPI
++@fn int ioh_gpio_ioctl(struct inode * inode,struct file * file,
++ unsigned int cmd,unsigned long arg)
++@remarks The main tasks performed by this function are:
++ - Copies the arg from user space to kernel space.
++ If this fails, returns EFAULT status code.
++ - Checks the cmd specified. If not a valid command,
++ returns EINVAL status code.
++ - Verifies the validity of the command argument based on
++ the operation requested. If invalid, returns EINVAL.
++ - Performs the requested action based on the ioctl command,
++ by calling the appropriate HAL API functions.
++ - Returns @ref IOH_GPIO_SUCCESS if the command is completed
++ successfully.
++
++@param inode [@ref INOUT] Contains the reference of the inode structure
++@param file [@ref INOUT] Contains the reference of the file structure
++@param cmd [@ref IN] Contains the command value
++@param arg [@ref IN] Contains the command argument value
++@retval int
++- @ref IOH_GPIO_SUCCES --> If the operation is successful.
++- -EFAULT --> wait_event_interruptible API
++ is interrupted by a signal.
++- -ERESTARTSYS --> wait_event_interruptible API
++ is interrupted by a signal.
++- -EINVAL --> Invalid address/parameter.
++
++@see
++ - ioh_gpio_fops
++*/
++int ioh_gpio_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++
++ struct ioh_gpio_reqt gpio_reqt;
++ s32 ret_value;
++
++ IOH_DEBUG(KERN_INFO"%s:cmd = 0x%x\n", __func__, cmd);
++ IOH_DEBUG(KERN_INFO"%s: IOCTL_GPIO_INT_ENABLE= 0x%x\n", __func__,
++ IOCTL_GPIO_INT_ENABLE);
++ IOH_DEBUG(KERN_INFO"%s: IOCTL_GPIO_INT_DISABLE= 0x%x\n", __func__,
++ IOCTL_GPIO_INT_DISABLE);
++ IOH_DEBUG(KERN_INFO"%s: IOCTL_GPIO_DIRECTION= 0x%x\n", __func__,
++ IOCTL_GPIO_DIRECTION);
++ IOH_DEBUG(KERN_INFO"%s: IOCTL_GPIO_WRITE= 0x%x\n", __func__,
++ IOCTL_GPIO_WRITE);
++ IOH_DEBUG(KERN_INFO"%s: IOCTL_GPIO_READ= 0x%x\n", __func__,
++ IOCTL_GPIO_READ);
++ IOH_DEBUG(KERN_INFO"%s: IOCTL_GPIO_NOTIFY= 0x%x\n", __func__,
++ IOCTL_GPIO_NOTIFY);
++
++ do {
++ if (ioh_gpio_suspended == true) {
++ IOH_LOG(KERN_ERR,
++ "ioh_gpio_ioctl : suspend initiated returning\
++ =%d\n",
++ IOH_GPIO_FAIL);
++ ret_value = IOH_GPIO_FAIL;
++ break;
++ }
++
++ ret_value =
++ copy_from_user(&gpio_reqt, (void *)arg,
++ sizeof(struct ioh_gpio_reqt));
++ if (ret_value) {
++ IOH_LOG(KERN_ERR,
++ "ioh_gpio_ioctl : copy_from_user fail returning\
++ =%d\n",
++ -EFAULT);
++ ret_value = -EFAULT;
++ break;
++ }
++ IOH_DEBUG("ioh_gpio_ioctl : copy_from_user returns =%d\n",
++ ret_value);
++
++ if (((gpio_reqt.enable) > 1)
++ || ((gpio_reqt.pins) > GPIO_MAX_PINS_MASK)
++ || ((gpio_reqt.port) > GPIO_NUM_PORT_MAX)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_gpio_ioctl : Invalid parameter\
++ returning=%d\n",
++ -EINVAL);
++ ret_value = -EINVAL;
++ break;
++ }
++ switch (cmd) {
++ case IOCTL_GPIO_INT_ENABLE:
++ {
++
++ if (gpio_reqt.enable == 0) {
++ ret_value = -EINVAL;
++ IOH_DEBUG
++ ("ioh_gpio_ioctl : Invalid\
++ parameter in enable\n");
++ } else {
++ ret_value =
++ ioh_gpio_int_mode(&gpio_reqt);
++ IOH_DEBUG
++ ("ioh_gpio_ioctl : Invoked\
++ ioh_gpio_int_mode successfully\n");
++ }
++ break;
++ }
++
++ case IOCTL_GPIO_INT_DISABLE:
++ {
++ if (gpio_reqt.enable != 0) {
++ ret_value = -EINVAL;
++ IOH_DEBUG
++ ("ioh_gpio_ioctl : Invalid\
++ parameter in enable\n");
++ } else {
++ ret_value =
++ ioh_gpio_int_mode(&gpio_reqt);
++ IOH_DEBUG
++ ("ioh_gpio_ioctl : Invoked\
++ ioh_gpio_int_mode successfully\n");
++ }
++ break;
++ }
++
++ case IOCTL_GPIO_DIRECTION:
++ {
++ if ((gpio_reqt.mode > 1)) {
++
++ IOH_DEBUG
++ ("ioh_gpio_ioctl : Invalid\
++ direction\n");
++ ret_value = -EINVAL;
++ } else {
++ ioh_gpio_dir_mode(&gpio_reqt);
++ IOH_DEBUG
++ ("ioh_gpio_ioctl : Invoked\
++ ioh_gpio_dir_mode successfully\n");
++ ret_value = IOH_GPIO_SUCCESS;
++ }
++ break;
++ }
++
++ case IOCTL_GPIO_WRITE:
++ {
++ ret_value = ioh_gpio_write(&gpio_reqt);
++ IOH_DEBUG
++ ("ioh_gpio_ioctl : Invoked\
++ ioh_gpio_write_mode successfully\n");
++ break;
++ }
++
++ case IOCTL_GPIO_READ:
++ {
++ (void)ioh_gpio_read(&gpio_reqt);
++ IOH_DEBUG
++ ("ioh_gpio_ioctl : Invoked\
++ ioh_gpio_read_mode successfully\n");
++ ret_value =
++ copy_to_user((void *)arg, &gpio_reqt,
++ sizeof(struct ioh_gpio_reqt));
++
++ if (ret_value) {
++ IOH_LOG(KERN_ERR,
++ "ioh_gpio_ioctl copy_to_user\
++ failed returning=%d\n",
++ -EFAULT);
++ ret_value = -EFAULT;
++ } else {
++ IOH_DEBUG
++ ("ioh_gpio_ioctl copy_to_user\
++ returns=%d\n",
++ ret_value);
++ ret_value = IOH_GPIO_SUCCESS;
++ }
++ break;
++ }
++
++ case IOCTL_GPIO_NOTIFY:
++ {
++ ioh_gpio_event_flag = 0;
++ if ((((ioh_gpio_readreg(IOH_IEN)) &
++ (gpio_reqt.pins)) != (gpio_reqt.pins))
++ ||
++ (((ioh_gpio_readreg(IOH_PM) &
++ (gpio_reqt.pins)) != false))) {
++ /* if interrupts are not enabled on the
++ pins for which notify is requested */
++ /* or the pins are not in input mode */
++ IOH_DEBUG
++ ("ioh_gpio_ioctl GPIO pins not in\
++ input mode or interrupts\
++ not enabled!");
++ ret_value = -EINVAL;
++ } else {
++ ret_value =
++ wait_event_interruptible
++ (ioh_gpio_event,
++ (ioh_gpio_event_flag & gpio_reqt.
++ pins) != 0);
++ if (ret_value) {
++ IOH_LOG(KERN_ERR,
++ "ioh_gpio_ioctl wait_ev\
++ ent_interruptible\
++ failed returning=%d\n",
++ -ERESTARTSYS);
++ ret_value = -ERESTARTSYS;
++ } else {
++ IOH_DEBUG
++ ("ioh_gpio_ioctl wait_event\
++ _interruptible returns=%d\n",
++ ret_value);
++ (void)ioh_gpio_read(&gpio_reqt);
++ ret_value =
++ copy_to_user((void *)arg,
++ &gpio_reqt,
++ sizeof(struct
++ ioh_gpio_reqt));
++ if (ret_value) {
++ IOH_LOG(KERN_ERR,
++ "ioh_gpio_ioctl\
++ copy_to_user\
++ failed returni\
++ ng=%d\n",
++ -EFAULT);
++ ret_value = -EFAULT;
++ } else {
++ IOH_DEBUG
++ ("ioh_gpio_ioctl\
++ copy_to_user\
++ returns=%d\n",
++ ret_value);
++ ret_value =
++ IOH_GPIO_SUCCESS;
++ }
++ }
++ }
++ break;
++ }
++
++ default:
++ {
++ IOH_LOG(KERN_ERR,
++ "ioh_gpio_ioctl invalid command\
++ returning=%d\n",
++ -EINVAL);
++ ret_value = -EINVAL;
++ break;
++ }
++ }
++ break;
++
++ } while (0);
++ IOH_LOG(KERN_ERR, "ioh_gpio_ioctl returns=%d\n", ret_value);
++ return ret_value;
++}
+diff -urN linux-2.6.33.1/drivers/gpio/pch_gpio/pch_gpio_main.h topcliff-2.6.33.1/drivers/gpio/pch_gpio/pch_gpio_main.h
+--- linux-2.6.33.1/drivers/gpio/pch_gpio/pch_gpio_main.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33.1/drivers/gpio/pch_gpio/pch_gpio_main.h 2010-04-01 10:58:31.000000000 +0900
+@@ -0,0 +1,686 @@
++#ifndef __IOH_GPIO_MAIN_H__
++#define __IOH_GPIO_MAIN_H__
++/*!
++ * @file ioh_gpio_main.h
++ * @brief Provides all the interfaces pertaining to the GPIO module.
++ * @version 0.92
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++*/
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 02/20/2009
++ * modified:
++ * WIPRO 01/05/2010
++ * Added the interfaces provided by the gpio module.
++ *
++*/
++
++/*! @defgroup GPIO*/
++/*! @defgroup GPIO_Global
++ @ingroup GPIO*/
++
++/* @defgroup GlobalGeneral
++ @ingroup GPIO_Global*/
++/* @defgroup GlobalResultCodes
++ @ingroup GPIO_Global*/
++
++/*! @defgroup GPIO_InterfaceLayer
++ @ingroup GPIO*/
++/*! @defgroup GPIO_InterfaceLayerAPI
++ @ingroup GPIO_InterfaceLayer
++*/
++
++/* @defgroup InterfaceLayerNotifyRoutines
++ @ingroup GPIO_InterfaceLayer
++*/
++
++/*! @defgroup GPIO_PCILayer
++ @ingroup GPIO*/
++/*! @defgroup GPIO_PCILayerAPI
++ @ingroup GPIO_PCILayer
++*/
++/*! @defgroup GPIO_PCILayerFacilitators
++ @ingroup GPIO_PCILayer
++*/
++/*! @defgroup GPIO_HALLayer
++ @ingroup GPIO*/
++/*! @defgroup GPIO_HALLayerAPI
++ @ingroup GPIO_HALLayer
++*/
++
++/* @defgroup HALLayerFacilitators
++ @ingroup GPIO_HALLayer
++*/
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def GPIO_IOCTL_MAGIC
++ @brief The ioctl magic number.
++*/
++#define GPIO_IOCTL_MAGIC (0xf7)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOCTL_GPIO_INT_ENABLE
++ @brief IOCTL for GPIO interrupt enable.
++*/
++#define IOCTL_GPIO_INT_ENABLE (_IOW(GPIO_IOCTL_MAGIC, 1, struct ioh_gpio_reqt))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOCTL_GPIO_INT_DISABLE
++ @brief IOCTL for GPIO interrupt disable.
++*/
++#define IOCTL_GPIO_INT_DISABLE (_IOW(GPIO_IOCTL_MAGIC, 2, struct ioh_gpio_reqt))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOCTL_GPIO_DIRECTION
++ @brief IOCTL for GPIO direction setting.
++*/
++#define IOCTL_GPIO_DIRECTION (_IOW(GPIO_IOCTL_MAGIC, 3, struct ioh_gpio_reqt))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOCTL_GPIO_WRITE
++ @brief IOCTL for GPIO write.
++*/
++#define IOCTL_GPIO_WRITE (_IOW(GPIO_IOCTL_MAGIC, 4, struct ioh_gpio_reqt))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOCTL_GPIO_READ
++ @brief IOCTL for GPIO read.
++*/
++#define IOCTL_GPIO_READ (_IOR(GPIO_IOCTL_MAGIC, 5, struct ioh_gpio_reqt))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOCTL_GPIO_NOTIFY
++ @brief IOCTL for GPIO pin status change notification.
++*/
++#define IOCTL_GPIO_NOTIFY (_IOR(GPIO_IOCTL_MAGIC, 6, struct ioh_gpio_reqt))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_GPIO_PIN0
++ @brief GPIO PIN 0
++*/
++#define IOH_GPIO_PIN0 (0x1)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_GPIO_PIN1
++ @brief GPIO PIN 1
++*/
++#define IOH_GPIO_PIN1 (0x2)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_GPIO_PIN2
++ @brief GPIO PIN 2
++*/
++#define IOH_GPIO_PIN2 (0x4)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_GPIO_PIN3
++ @brief GPIO PIN 3
++*/
++#define IOH_GPIO_PIN3 (0x8)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_GPIO_PIN4
++ @brief GPIO PIN 4
++*/
++#define IOH_GPIO_PIN4 (0x10)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_GPIO_PIN5
++ @brief GPIO PIN 5
++*/
++#define IOH_GPIO_PIN5 (0x20)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_GPIO_PIN6
++ @brief GPIO PIN 6
++*/
++#define IOH_GPIO_PIN6 (0x40)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_GPIO_PIN7
++ @brief GPIO PIN 7
++*/
++#define IOH_GPIO_PIN7 (0x80)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_GPIO_PIN8
++ @brief GPIO PIN 8
++*/
++#define IOH_GPIO_PIN8 (0x100)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_GPIO_PIN9
++ @brief GPIO PIN 9
++*/
++#define IOH_GPIO_PIN9 (0x200)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_GPIO_PIN10
++ @brief GPIO PIN 10
++*/
++#define IOH_GPIO_PIN10 (0x400)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_GPIO_PIN11
++ @brief GPIO PIN 11
++*/
++#define IOH_GPIO_PIN11 (0x800)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_GPIO_ALL_PINS
++ @brief Mask for GPIO pins 0 to 11
++*/
++#define IOH_GPIO_ALL_PINS (IOH_GPIO_PIN0 | IOH_GPIO_PIN1 | IOH_GPIO_PIN2\
++| IOH_GPIO_PIN3 | IOH_GPIO_PIN4 | IOH_GPIO_PIN5 | IOH_GPIO_PIN6 | IOH_GPIO_PIN7\
++| IOH_GPIO_PIN8 | IOH_GPIO_PIN9 | IOH_GPIO_PIN10 | IOH_GPIO_PIN11)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_EDG_PIN0
++ @brief Falling Edge interrupt on Pin0
++*/
++#define INT_FL_EDG_PIN0 (0x0)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_EDG_PIN1
++ @brief Falling Edge interrupt on Pin1
++*/
++#define INT_FL_EDG_PIN1 (0x0)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_EDG_PIN2
++ @brief Falling Edge interrupt on Pin2
++*/
++#define INT_FL_EDG_PIN2 (0x0)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_EDG_PIN3
++ @brief Falling Edge interrupt on Pin3
++*/
++#define INT_FL_EDG_PIN3 (0x0)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_EDG_PIN4
++ @brief Falling Edge interrupt on Pin4
++*/
++#define INT_FL_EDG_PIN4 (0x0)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_EDG_PIN5
++ @brief Falling Edge interrupt on Pin5
++*/
++#define INT_FL_EDG_PIN5 (0x0)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_EDG_PIN6
++ @brief Falling Edge interrupt on Pin6
++*/
++#define INT_FL_EDG_PIN6 (0x0)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_EDG_PIN7
++ @brief Falling Edge interrupt on Pin7
++*/
++#define INT_FL_EDG_PIN7 (0x0)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_EDG_PIN8
++ @brief Falling Edge interrupt on Pin8
++*/
++#define INT_FL_EDG_PIN8 (0x0)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_EDG_PIN9
++ @brief Falling Edge interrupt on Pin9
++*/
++#define INT_FL_EDG_PIN9 (0x0)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_EDG_PIN10
++ @brief Falling Edge interrupt on Pin10
++*/
++#define INT_FL_EDG_PIN10 (0x0)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_EDG_PIN11
++ @brief Falling Edge interrupt on Pin11
++*/
++#define INT_FL_EDG_PIN11 (0x0)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_RS_EDG_PIN0
++ @brief Rising Edge interrupt on Pin0
++*/
++#define INT_RS_EDG_PIN0 (0x1)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_RS_EDG_PIN1
++ @brief Rising Edge interrupt on Pin1
++*/
++#define INT_RS_EDG_PIN1 (0x10)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_RS_EDG_PIN2
++ @brief Rising Edge interrupt on Pin2
++*/
++#define INT_RS_EDG_PIN2 (0x100)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_RS_EDG_PIN3
++ @brief Rising Edge interrupt on Pin3
++*/
++#define INT_RS_EDG_PIN3 (0x1000)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_RS_EDG_PIN4
++ @brief Rising Edge interrupt on Pin4
++*/
++#define INT_RS_EDG_PIN4 (0x10000)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_RS_EDG_PIN5
++ @brief Rising Edge interrupt on Pin5
++*/
++#define INT_RS_EDG_PIN5 (0x100000)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_RS_EDG_PIN6
++ @brief Rising Edge interrupt on Pin6
++*/
++#define INT_RS_EDG_PIN6 (0x1000000)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_RS_EDG_PIN7
++ @brief Rising Edge interrupt on Pin7
++*/
++#define INT_RS_EDG_PIN7 (0x10000000)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_RS_EDG_PIN8
++ @brief Rising Edge interrupt on Pin8
++*/
++#define INT_RS_EDG_PIN8 ((0x100000000ULL))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_RS_EDG_PIN9
++ @brief Rising Edge interrupt on Pin9
++*/
++#define INT_RS_EDG_PIN9 ((0x1000000000ULL))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_RS_EDG_PIN10
++ @brief Rising Edge interrupt on Pin10
++*/
++#define INT_RS_EDG_PIN10 ((0x10000000000ULL))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_RS_EDG_PIN11
++ @brief Rising Edge interrupt on Pin11
++*/
++#define INT_RS_EDG_PIN11 ((0x100000000000ULL))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_LO_PIN0
++ @brief Low Level Interrupt on Pin0
++*/
++#define INT_LVL_LO_PIN0 (0x2)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_LO_PIN1
++ @brief Low Level Interrupt on Pin1
++*/
++#define INT_LVL_LO_PIN1 (0x20)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_LO_PIN2
++ @brief Low Level Interrupt on Pin2
++*/
++#define INT_LVL_LO_PIN2 (0x200)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_LO_PIN3
++ @brief Low Level Interrupt on Pin3
++*/
++#define INT_LVL_LO_PIN3 (0x2000)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_LO_PIN4
++ @brief Low Level Interrupt on Pin4
++*/
++#define INT_LVL_LO_PIN4 (0x20000)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_LO_PIN5
++ @brief Low Level Interrupt on Pin5
++*/
++#define INT_LVL_LO_PIN5 (0x200000)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_LO_PIN6
++ @brief Low Level Interrupt on Pin6
++*/
++#define INT_LVL_LO_PIN6 (0x2000000)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_LO_PIN7
++ @brief Low Level Interrupt on Pin7
++*/
++#define INT_LVL_LO_PIN7 (0x20000000)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_LO_PIN8
++ @brief Low Level Interrupt on Pin8
++*/
++#define INT_LVL_LO_PIN8 ((0x200000000ULL))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_LO_PIN9
++ @brief Low Level Interrupt on Pin9
++*/
++#define INT_LVL_LO_PIN9 ((0x2000000000ULL))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_LO_PIN10
++ @brief Low Level Interrupt on Pin10
++*/
++#define INT_LVL_LO_PIN10 ((0x20000000000ULL))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_LO_PIN11
++ @brief Low Level Interrupt on Pin11
++*/
++#define INT_LVL_LO_PIN11 ((0x200000000000ULL))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_HI_PIN0
++ @brief High Level Interrupt on Pin0
++*/
++#define INT_LVL_HI_PIN0 (0x3)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_HI_PIN1
++ @brief High Level Interrupt on Pin1
++*/
++#define INT_LVL_HI_PIN1 (0x30)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_HI_PIN2
++ @brief High Level Interrupt on Pin2
++*/
++#define INT_LVL_HI_PIN2 (0x300)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_HI_PIN3
++ @brief High Level Interrupt on Pin3
++*/
++#define INT_LVL_HI_PIN3 (0x3000)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_HI_PIN4
++ @brief High Level Interrupt on Pin4
++*/
++#define INT_LVL_HI_PIN4 (0x30000)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_HI_PIN5
++ @brief High Level Interrupt on Pin5
++*/
++#define INT_LVL_HI_PIN5 (0x300000)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_HI_PIN6
++ @brief High Level Interrupt on Pin6
++*/
++#define INT_LVL_HI_PIN6 (0x3000000)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_HI_PIN7
++ @brief High Level Interrupt on Pin7
++*/
++#define INT_LVL_HI_PIN7 (0x30000000)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_HI_PIN8
++ @brief High Level Interrupt on Pin8
++*/
++#define INT_LVL_HI_PIN8 ((0x300000000ULL))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_HI_PIN9
++ @brief High Level Interrupt on Pin9
++*/
++#define INT_LVL_HI_PIN9 ((0x3000000000ULL))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_HI_PIN10
++ @brief High Level Interrupt on Pin10
++*/
++#define INT_LVL_HI_PIN10 ((0x30000000000ULL))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_LVL_HI_PIN11
++ @brief High Level Interrupt on Pin11
++*/
++#define INT_LVL_HI_PIN11 ((0x300000000000ULL))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_RS_EDG_PIN0
++ @brief Falling and rising Edge on Pin0
++*/
++#define INT_FL_RS_EDG_PIN0 (0x4)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_RS_EDG_PIN1
++ @brief Falling and rising Edge on Pin1
++*/
++#define INT_FL_RS_EDG_PIN1 (0x40)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_RS_EDG_PIN2
++ @brief Falling and rising Edge on Pin2
++*/
++#define INT_FL_RS_EDG_PIN2 (0x400)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_RS_EDG_PIN3
++ @brief Falling and rising Edge on Pin3
++*/
++#define INT_FL_RS_EDG_PIN3 (0x4000)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_RS_EDG_PIN4
++ @brief Falling and rising Edge on Pin4
++*/
++#define INT_FL_RS_EDG_PIN4 (0x40000)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_RS_EDG_PIN5
++ @brief Falling and rising Edge on Pin5
++*/
++#define INT_FL_RS_EDG_PIN5 (0x400000)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_RS_EDG_PIN6
++ @brief Falling and rising Edge on Pin6
++*/
++#define INT_FL_RS_EDG_PIN6 (0x4000000)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_RS_EDG_PIN7
++ @brief Falling and rising Edge on Pin7
++*/
++#define INT_FL_RS_EDG_PIN7 (0x40000000)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_RS_EDG_PIN8
++ @brief Falling and rising Edge on Pin8
++*/
++#define INT_FL_RS_EDG_PIN8 ((0x400000000ULL))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_RS_EDG_PIN9
++ @brief Falling and rising Edge on Pin9
++*/
++#define INT_FL_RS_EDG_PIN9 ((0x4000000000ULL))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_RS_EDG_PIN10
++ @brief Falling and rising Edge on Pin10
++*/
++#define INT_FL_RS_EDG_PIN10 ((0x40000000000ULL))
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def INT_FL_RS_EDG_PIN11
++ @brief Falling and rising Edge on Pin11
++*/
++#define INT_FL_RS_EDG_PIN11 ((0x400000000000ULL))
++
++/*! @ingroup GPIO_InterfaceLayer
++
++ @def GPIO_MAX_PINS_MASK
++ @brief Mask used for all pins.
++*/
++#define GPIO_MAX_PINS_MASK (0xFFF)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def GPIO_NUM_PORT_MAX
++ @brief Maximum number of ports.
++*/
++#define GPIO_NUM_PORT_MAX (0)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def GPIO_NUM_PINS
++ @brief Specifies number of GPIO PINS
++*/
++#define GPIO_NUM_PINS (12)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def GPIO_IN
++ @brief Specifies GPIO input mode.
++*/
++#define GPIO_IN (0)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def GPIO_OUT
++ @brief Specifies GPIO output mode.
++*/
++#define GPIO_OUT (1)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def GPIO_HIGH
++ @brief Specifies GPIO HIGH level.
++*/
++#define GPIO_HIGH (1)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def GPIO_LOW
++ @brief Specifies GPIO LOW level.
++*/
++#define GPIO_LOW (0)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_GPIO_SUCCESS
++ @brief Specifies GPIO SUCCESS STATUS CODE
++*/
++#define IOH_GPIO_SUCCESS (0)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def IOH_GPIO_FAIL
++ @brief Specifies GPIO ERROR STATUS CODE
++*/
++#define IOH_GPIO_FAIL (-1)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def BIT_MASK_16
++ @brief Mask for 16 bits
++*/
++#define BIT_MASK_16 (0xFFFF)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def BIT_MASK_8
++ @brief Mask for 8 bits
++*/
++#define BIT_MASK_8 (0xFF)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def BIT_MASK_12
++ @brief Mask for 12 bits
++*/
++#define BIT_MASK_12 (0xFFF)
++
++/*! @ingroup GPIO_InterfaceLayer
++ @def BIT_MASK_32
++ @brief Maks value for 32 bits.
++*/
++#define BIT_MASK_32 (0xFFFFFFFF)
++
++/*structures*/
++/*! @ingroup GPIO_InterfaceLayer
++@struct ioh_gpio_reqt
++@brief This structure specifies information such the GPIO port, pins,
++ interrupt and direction
++ mode details associated with a user request. The GPIO port
++ status is also returned to
++ the user using this structure.
++@see
++ - ioh_gpio_int_mode
++ - ioh_gpio_dir_mode
++ - ioh_gpio_read
++ - ioh_gpio_write
++*/
++struct ioh_gpio_reqt {
++ unsigned long port; /**< Specifies the port. */
++ unsigned long pins; /**< Specifies the pins. */
++ unsigned long long mode;/**< Specifies the direction/interrupt mode.*/
++ unsigned long enable; /**< Interrupt enable/disable. */
++};
++
++extern s32 ioh_gpio_opencount;
++extern spinlock_t ioh_gpio_lock;
++extern const struct file_operations ioh_gpio_fops;
++
++/* exported function prototypes*/
++/*! @ingroup GPIO_InterfaceLayerAPI
++@fn int ioh_gpio_open( struct inode *inode,struct file *file )
++@brief This function is invoked when a process opens the device node
++*/
++int ioh_gpio_open(struct inode *inode, struct file *file);
++
++/*! @ingroup GPIO_InterfaceLayerAPI
++@fn int ioh_gpio_release(struct inode *inode,struct file *file)
++@brief This function is invoked when a process closes the device node
++*/
++int ioh_gpio_release(struct inode *inode, struct file *file);
++
++/*! @ingroup GPIO_InterfaceLayerAPI
++@fn int ioh_gpio_ioctl(struct inode * inode,struct file * file,unsigned int cmd,
++ unsigned long arg)
++@brief This function is registered at the driver initialization point
++ (module_init)
++ and invoked when user process invokes an ioctl call on the device.
++*/
++int ioh_gpio_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
++ unsigned long arg);
++
++/* global variables*/
++extern u32 ioh_gpio_base_address; /* base address*/
++extern u32 ioh_gpio_irq; /* irq number*/
++extern s32 ioh_gpio_suspended; /* suspend status*/
++
++#endif
+diff -urN linux-2.6.33.1/drivers/gpio/pch_gpio/pch_gpio_pci.c topcliff-2.6.33.1/drivers/gpio/pch_gpio/pch_gpio_pci.c
+--- linux-2.6.33.1/drivers/gpio/pch_gpio/pch_gpio_pci.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33.1/drivers/gpio/pch_gpio/pch_gpio_pci.c 2010-04-01 10:58:31.000000000 +0900
+@@ -0,0 +1,537 @@
++/*!
++ * @file ioh_gpio_pci.c
++ * @brief Provides all the implementation of the interfaces pertaining to the
++ * pci and gpio registrations.
++ * @version 0.92
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 02/20/2009
++ * modified:
++ * WIPRO 01/05/2010
++ * Added the interfaces provided by the gpio module.
++ *
++ */
++/*includes*/
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include <linux/fs.h>
++#include <linux/cdev.h>
++#include <linux/interrupt.h>
++#include <linux/string.h>
++
++#include "pch_common.h"
++#include "pch_debug.h"
++#include "pch_gpio_main.h"
++#include "pch_gpio_hal.h"
++
++/*macros*/
++/*! @ingroup GPIO_PCILayer
++ @def PCI_VENDOR_ID_IOH
++ @brief Outlines the PCI Vendor ID.
++*/
++#define PCI_VENDOR_ID_IOH (0x10DB)
++
++/*! @ingroup GPIO_PCILayer
++ @def PCI_DEVICE_ID_GE_GPIO
++ @brief Outlines the PCI Device ID for IOH GPIO.
++*/
++/* #define PCI_DEVICE_ID_GE_GPIO (0x8000)*/
++#define PCI_DEVICE_ID_GE_GPIO (0x8803) /* OKISEMI for LSI */
++
++/*! @ingroup GPIO_PCILayer
++ @def PCI_DEVICE_ID_MP_GPIO
++ @brief Outlines the PCI Device ID for MP GPIO.
++*/
++#define PCI_DEVICE_ID_MP_GPIO (0x8004)
++
++/*! @ingroup GPIO_PCILayer
++ @def PCI_DEVICE_ID_IVI_GPIO
++ @brief Outlines the PCI Device ID for IVI GPIO.
++*/
++#define PCI_DEVICE_ID_IVI_GPIO (0x8001)
++
++/*! @ingroup GPIO_PCILayer
++ @def IOH_MINOR_NOS
++ @brief Outlines the GPIO minor number limit.
++*/
++#define IOH_MINOR_NOS (1)
++
++/* Global variables*/
++u32 ioh_gpio_base_address;
++u32 ioh_gpio_irq;
++s32 ioh_gpio_suspended;
++
++/* Major number allocation via module parameter */
++static dev_t ioh_gpio_dev_no;
++static int ioh_gpio_major_no;
++static struct cdev ioh_gpio_dev;
++
++u32 ioh_gpio_bit_mask;
++
++/*! @ingroup GPIO_PCILayerAPI
++ @fn static s32 __devinit ioh_gpio_probe
++ (struct pci_dev* ioh_pci_dev,
++ const struct pci_device_id* pci_id)
++ @brief Provides the functionality of probing the module.
++*/
++static int __devinit ioh_gpio_probe(struct pci_dev *pdev, const
++ struct pci_device_id *id);
++
++/*! @ingroup GPIO_PCILayerAPI
++ @fn static void __devexit ioh_gpio_remove
++ (struct pci_dev * ioh_pci_dev)
++ @brief Provides the functionality of removing the module.
++*/
++static void __devexit ioh_gpio_remove(struct pci_dev *pdev);
++
++/*! @ingroup GPIO_PCILayerAPI
++ @fn static s32 ioh_gpio_suspend(struct pci_dev* pDev,
++ pm_message_t state)
++ @brief Provides the functionality of suspending the module.
++*/
++static int ioh_gpio_suspend(struct pci_dev *pdev, pm_message_t state);
++
++/*! @ingroup GPIO_PCILayerAPI
++ @fn static s32 ioh_gpio_resume(struct pci_dev* pDev)
++ @brief Provides the functionalities of resuming the module.
++*/
++static int ioh_gpio_resume(struct pci_dev *pdev);
++
++/*structures*/
++/*! @ingroup GPIO_PCILayerFacilitators
++@static struct pci_device_id
++@brief It is a structure used for preserving information related to the
++ device id.
++@note
++The concerned details should be provided as a reference in the pci driver
++structure.
++
++@see
++ - ioh_gpio_driver
++
++*/
++static struct pci_device_id ioh_gpio_pcidev_id[] = {
++
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_GE_GPIO)},
++ {PCI_DEVICE(PCI_VENDOR_ID_IOH, PCI_DEVICE_ID_MP_GPIO)},
++ {PCI_DEVICE(PCI_VENDOR_ID_IOH, PCI_DEVICE_ID_IVI_GPIO)},
++ {0,}
++};
++
++/*! @ingroup GPIO_PCILayerFacilitators
++@struct ioh_gpio_driver
++@brief This structure specifies the pci driver methods pertaining to
++ GPIO module.
++@see
++ - ioh_gpio_pci_init
++ - ioh_gpio_pci_exit
++*/
++static struct pci_driver ioh_gpio_driver = {
++ .name = "ioh_gpio_empty",
++ .id_table = ioh_gpio_pcidev_id,
++ .probe = ioh_gpio_probe,
++ .remove = __devexit_p(ioh_gpio_remove),
++#ifdef CONFIG_PM
++ .suspend = ioh_gpio_suspend, /* OKISEMI for PM bug fix */
++ .resume = ioh_gpio_resume /* OKISEMI for PM bug fix */
++#endif
++};
++
++/*! @ingroup GPIO_PCILayerAPI
++ @fn static int __init ioh_gpio_pci_init(void)
++ @brief Provides the functionality of initializing the module
++*/
++static int __init ioh_gpio_pci_init(void);
++/*! @ingroup GPIO_PCILayerAPI
++ @fn static void __exit ioh_gpio_pci_exit(void)
++ @brief Provides the functionality of exiting the module
++*/
++static void __exit ioh_gpio_pci_exit(void);
++
++MODULE_DESCRIPTION("IOH GPIO PCI Driver");
++MODULE_LICENSE("GPL");
++module_init(ioh_gpio_pci_init);
++module_exit(ioh_gpio_pci_exit);
++module_param(ioh_gpio_major_no, int, S_IRUSR | S_IWUSR);
++
++/*function implementations*/
++
++/*! @ingroup GPIO_PCILayerAPI
++@fn static int __init ioh_gpio_pci_init(void)
++@remarks Implements the initialization functionality of the module.
++ The main task performed by this function is:
++ - Register the module as PCI Driver.
++
++@param None
++@retval int
++ - @ref IOH_GPIO_SUCCESS --> Loading successful.
++ - -EEXIST --> pci_register_driver failed.
++ - -EINVAL --> pci_register_driver failed.
++ - -ENOMEM --> pci_register_driver failed.
++*/
++static int __init ioh_gpio_pci_init(void)
++{
++ s32 ret;
++ ret = pci_register_driver(&ioh_gpio_driver);
++ IOH_DEBUG
++ ("ioh_gpio_pci_init : Invoked pci_register_driver successfully\n");
++ IOH_DEBUG("ioh_gpio_pci_init returns -%d\n", ret);
++ return ret;
++}
++
++/*! @ingroup GPIO_PCILayerAPI
++ @fn static void __exit ioh_gpio_pci_exit(void)
++ @remarks Implements the exit functionality of the module.
++ The main task performed by this function is:
++ - Un-register the module as a PCI Driver.
++
++ @param None
++ @retval None
++*/
++static void __exit ioh_gpio_pci_exit(void)
++{
++ pci_unregister_driver(&ioh_gpio_driver);
++ IOH_DEBUG
++ ("ioh_gpio_pci_exit : Invoked pci_unregister_driver\
++ successfully\n");
++}
++
++/*! @ingroup GPIO_PCILayerAPI
++@fn static int __devinit ioh_gpio_probe(struct pci_dev* pdev,
++ const struct pci_device_id* id)
++@remarks Implements the probe functionality of the module.
++ This function is invoked
++ when a PCI device with the Vendor and Device ID supported by this module
++ is detected. The main tasks performed by this function are:
++ - Enables the device.
++ - Acquires the device resources and the remapped base address of
++ the device.
++ - Registers a character device driver for the user space application
++ to interact with the system.
++ - Registers the callback function.
++@note This function is invoked by the Kernel subsystem when a PCI device
++ with a supported vendor ID and Device ID is detected.
++
++@param pdev [@ref INOUT] Contains the reference of the pci_dev structure
++@param id [@ref IN] Contains the reference of the pci_device_id structure
++@retval int
++ - @ref IOH_GPIO_SUCCESS --> Operation successful.
++ - -EIO --> pci_enable_device error status code.
++ - -EINVAL --> pci_enable_device error status code.
++ - -EBUSY --> pci_request_regions/ alloc_chrdev_region
++ error status code.
++ - -ENOMEM --> pci_iomap/alloc_chrdev_region/cdev_add
++ error status code.
++
++@see
++ - ioh_gpio_driver
++*/
++static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id)
++{
++
++ char *DRIVER_NAME = "ioh_gpio";
++ int ret;
++ ioh_gpio_bit_mask =
++ (pdev->device ==
++ PCI_DEVICE_ID_IVI_GPIO) ? BIT_MASK_16 : BIT_MASK_12;
++ IOH_DEBUG("ioh_gpio_probe : The value of ioh_gpio_bit_mask is: %x\n",
++ ioh_gpio_bit_mask);
++
++ ioh_gpio_major_no = (ioh_gpio_major_no < 0
++ || ioh_gpio_major_no >
++ 254) ? 0 : ioh_gpio_major_no;
++
++ do {
++
++ ret = pci_enable_device(pdev);
++ if (ret) {
++ IOH_LOG(KERN_ERR,
++ "\nioh_gpio_probe : pci_enable_device FAILED");
++ break;
++ }
++ IOH_DEBUG("ioh_gpio_probe : pci_enable_device returns %d\n",
++ ret);
++
++ ret = pci_request_regions(pdev, DRIVER_NAME);
++ if (ret) {
++ IOH_LOG(KERN_ERR,
++ "ioh_gpio_probe : pci_request_regions FAILED");
++ pci_disable_device(pdev);
++ break;
++ }
++ IOH_DEBUG("ioh_gpio_probe : pci_request_regions returns %d\n",
++ ret);
++
++ ioh_gpio_base_address = (unsigned long)pci_iomap(pdev, 1, 0);
++
++ if (ioh_gpio_base_address == 0) {
++ IOH_LOG(KERN_ERR, "ioh_gpio_probe : pci_iomap FAILED");
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++ ret = -ENOMEM;
++ break;
++ }
++
++ IOH_DEBUG
++ ("ioh_gpio_probe : pci_iomap SUCCESS and value in\
++ ioh_gpio_base_address"
++ "variable is %d\n", ioh_gpio_base_address);
++
++ if (ioh_gpio_major_no) {
++ ioh_gpio_dev_no = MKDEV(ioh_gpio_major_no, 0);
++ ret =
++ register_chrdev_region(ioh_gpio_dev_no,
++ IOH_MINOR_NOS, DRIVER_NAME);
++ if (ret) {
++ IOH_LOG(KERN_ERR,
++ "ioh_gpio_probe : register_chrdev_\
++ region FAILED");
++ pci_iounmap(pdev,
++ (void *)ioh_gpio_base_address);
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_gpio_probe : register_chrdev_region\
++ returns %d\n",
++ ret);
++ } else {
++ ret =
++ alloc_chrdev_region(&ioh_gpio_dev_no, 0,
++ IOH_MINOR_NOS, DRIVER_NAME);
++ if (ret) {
++ IOH_LOG(KERN_ERR,
++ "ioh_gpio_probe : alloc_chrdev_region\
++ FAILED");
++ pci_iounmap(pdev,
++ (void *)ioh_gpio_base_address);
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_gpio_probe : alloc_chrdev_region\
++ returns %d\n",
++ ret);
++ }
++
++ cdev_init(&ioh_gpio_dev, &ioh_gpio_fops);
++ IOH_DEBUG("ioh_gpio_probe : cdev_init invoked successfully\n");
++
++ ioh_gpio_dev.owner = THIS_MODULE;
++ ioh_gpio_dev.ops = &ioh_gpio_fops;
++
++ ret = cdev_add(&ioh_gpio_dev, ioh_gpio_dev_no, IOH_MINOR_NOS);
++ if (ret) {
++ IOH_LOG(KERN_ERR, "ioh_gpio_probe : cdev_add FAILED");
++ unregister_chrdev_region(ioh_gpio_dev_no,
++ IOH_MINOR_NOS);
++ pci_iounmap(pdev, (void *)ioh_gpio_base_address);
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++ break;
++ }
++ IOH_DEBUG("ioh_gpio_probe : cdev_add returns- %d\n", ret);
++
++ ioh_gpio_cb_register(ioh_gpio_cb);
++ ioh_gpio_irq = pdev->irq;
++ IOH_DEBUG("ioh_gpio_probe returns %d\n", IOH_GPIO_SUCCESS);
++ device_set_wakeup_enable(&pdev->dev, 1);
++ return IOH_GPIO_SUCCESS;
++ } while (0);
++ IOH_DEBUG("ioh_gpio_probe returns %d\n", ret);
++ return ret;
++}
++
++/*! @ingroup GPIO_PCILayerAPI
++@fn static void __devexit ioh_gpio_remove(struct pci_dev * pdev)
++@remarks Implements the remove functionality of the module.
++The main tasks performed by this function are:
++-Disables the interrupts by invoking @ref ioh_gpio_int_mode API.
++-Removes the device from the system using cdev_del API
++-Un-registers the char device number by invoking unregister_chrdev_region API.
++-Releases the IO memory using pci_iounmap API
++-Releases the resources acquired using pci_release_regions API
++-Disables the pci device using pci_disable_device API
++
++@param pdev [@ref INOUT] Contains the reference of the pci_dev structure
++@retval None
++@see
++ - ioh_gpio_driver
++*/
++static void __devexit ioh_gpio_remove(struct pci_dev *pdev)
++{
++
++ struct ioh_gpio_reqt req;
++ memset(&req, 0, sizeof(req));
++ req.pins = IOH_GPIO_ALL_PINS;
++ /* disable interrupts for all gpio pins */
++ (void)ioh_gpio_int_mode(&req);
++
++ cdev_del(&ioh_gpio_dev);
++ IOH_DEBUG("ioh_gpio_remove - cdev_del Invoked successfully\n");
++
++ unregister_chrdev_region(ioh_gpio_dev_no, IOH_MINOR_NOS);
++ IOH_DEBUG
++ ("ioh_gpio_remove - unregister_chrdev_region Invoked\
++ successfully\n");
++
++ pci_iounmap(pdev, (void *)ioh_gpio_base_address);
++
++ IOH_DEBUG("ioh_gpio_remove - pci_iounmap Invoked successfully\n");
++
++ pci_release_regions(pdev);
++ IOH_DEBUG
++ ("ioh_gpio_remove - pci_release_regions Invoked successfully\n");
++
++ pci_disable_device(pdev);
++ IOH_DEBUG
++ ("ioh_gpio_remove - pci_disable_device Invoked successfully\n");
++
++}
++
++#ifdef CONFIG_PM
++
++/*! @ingroup GPIO_PCILayerAPI
++@fn static s32 ioh_gpio_suspend(struct pci_dev* pdev,pm_message_t state)
++@remarks Implements the suspend functionality of the module. The main
++tasks performed by this function are:
++- Saves the current pin configuration by invoking
++ @ref ioh_gpio_save_reg_conf API.
++- Invokes pci_enable_wake with the enable parameter as 0,
++ so as to ensure that the device has its "wake" ability disabled
++- Saves the current state by invoking pci_save_state API.
++ If it fails then return with its error code.
++- Disables PCI device by invoking pci_disable_device API.
++- Sets the power state to low power mode by invoking
++ pci_set_power_state API and return @ref IOH_GPIO_SUCCESS status code.
++
++@param pdev [@ref INOUT] Contains the reference of the pci_dev structure
++@param state [@ref INOUT] Contains the reference of the pm_message_t
++ structure
++@retval int
++ - @ref IOH_GPIO_SUCCESS --> Operation successful.
++ - -ENOMEM --> pci_save_state error status code.
++@see
++ - ioh_gpio_driver
++
++*/
++static int ioh_gpio_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ int ret;
++
++ ioh_gpio_suspended = true; /* For blocking further IOCTLs */
++
++ ioh_gpio_save_reg_conf();
++ IOH_DEBUG
++ ("ioh_gpio_suspend - ioh_gpio_save_reg_conf Invoked successfully\n");
++ ioh_gpio_restore_reg_conf();
++
++ ret = pci_save_state(pdev);
++ if (ret) {
++ IOH_LOG(KERN_ERR,
++ " ioh_gpio_suspend -pci_save_state returns-%d\n", ret);
++ return ret;
++ }
++
++ IOH_DEBUG("ioh_gpio_suspend - pci_save_state returns %d\n", ret);
++
++ pci_disable_device(pdev);
++ IOH_DEBUG
++ ("ioh_gpio_suspend - pci_disable_device Invoked successfully\n");
++
++ pci_set_power_state(pdev, PCI_D0);
++ IOH_DEBUG
++ ("ioh_gpio_suspend - pci_set_power_state Invoked successfully\n");
++
++ ret = pci_enable_wake(pdev, PCI_D0, 1);
++ if (!ret) {
++ IOH_DEBUG
++ ("ioh_gpio_suspend - pci_enable_wake Invoked successfully\n");
++ } else {
++ IOH_DEBUG("ioh_gpio_suspend - pci_enable_wake failed\n");
++ }
++ IOH_LOG(KERN_ERR, "ioh_gpio_suspend - return %d\n", IOH_GPIO_SUCCESS);
++
++ return IOH_GPIO_SUCCESS;
++}
++
++/*! @ingroup GPIO_PCILayerAPI
++@fn static s32 ioh_gpio_resume(struct pci_dev* pdev)
++@remarks Implements the resume functionality of the module. The main
++tasks performed by this function are:
++-Changes the power state of the device to D0 using pci_set_power_state API.
++-Invokes pci_restore_state API to restore the PCI register state
++-Invokes pci_enable_device API to enable the PCI device.
++If it fails, then return its error code.
++-To ensure that the device has its "wake" ability disabled,
++invokes pci_enable_wake with the enable parameter as 0
++-Invokes @ref ioh_gpio_restore_reg_conf API to restore the GPIO register
++configuration values and returns @ref IOH_GPIO_SUCCESS status code.
++
++@param pdev [@ref INOUT] Contains the reference of the pci_dev structure
++@retval int
++ - @ref IOH_GPIO_SUCCESS --> Operation successful.
++ - -EIO --> pci_enable_device error status code.
++ - -EINVAL --> pci_enable_device error status code.
++
++@see
++ - ioh_gpio_driver
++
++*/
++static int ioh_gpio_resume(struct pci_dev *pdev)
++{
++
++ int ret;
++
++ ret = pci_enable_wake(pdev, PCI_D0, 0);
++ IOH_LOG(KERN_ERR,
++ "ioh_gpio_resume - pci_set_power_state Invoked successfully\n");
++
++ pci_set_power_state(pdev, PCI_D0);
++ IOH_DEBUG
++ ("ioh_gpio_resume - pci_set_power_state Invoked successfully\n");
++
++ ret = pci_enable_device(pdev);
++ if (ret) {
++ IOH_LOG(KERN_ERR, "ioh_gpio_resume-pci_enable_device failed ");
++ return ret;
++ }
++
++ IOH_DEBUG("ioh_gpio_resume - pci_enable_device returns -%d\n", ret);
++
++ pci_restore_state(pdev);
++ IOH_DEBUG("ioh_gpio_resume - pci_restore_state Invoked successfully\n");
++
++ ioh_gpio_writereg(0x3c, 0x00000001); /*reset*/
++ ioh_gpio_writereg(0x3c, 0x00000000);
++ ioh_gpio_restore_reg_conf();
++ ioh_gpio_suspended = false;
++
++ IOH_DEBUG("ioh_gpio_resume returns- %d\n", IOH_GPIO_SUCCESS);
++ return IOH_GPIO_SUCCESS;
++}
++
++#endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-i2c.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-i2c.patch
new file mode 100644
index 0000000..e6f6f67
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-i2c.patch
@@ -0,0 +1,3435 @@
+
+
+From: Masayuki Ohtake <masa-korg@dsn.okisemi.com>
+Subject: OKI Semiconductor PCH I2C driver
+
+This driver implements I2C controls for PCH.
+
+Signed-off-by: Masayuki Ohtake <masa-korg@dsn.okisemi.com>
+Acked-by: Wang Qi <qi.wang@intel.com>
+
+---
+ drivers/i2c/busses/Kconfig | 7 +
+ drivers/i2c/busses/Makefile | 3
+ drivers/i2c/busses/pch_common.h | 146
+ drivers/i2c/busses/pch_debug.h | 60
+ drivers/i2c/busses/pch_i2c_hal.c | 1930
+ drivers/i2c/busses/pch_i2c_hal.h | 337
+ drivers/i2c/busses/pch_i2c_main.c | 247
+ drivers/i2c/busses/pch_i2c_pci.c | 583
+ drivers/i2c/i2c-dev.c | 28
++++++++++++++++++++++++++++++++ 9 files changed, yy insertions(+)
+diff -urN linux-2.6.33.1/drivers/i2c/busses/Kconfig topcliff-2.6.33.1/drivers/i2c/busses/Kconfig
+--- linux-2.6.33.1/drivers/i2c/busses/Kconfig 2010-03-16 01:09:39.000000000 +0900
++++ topcliff-2.6.33.1/drivers/i2c/busses/Kconfig 2010-03-23 10:40:18.000000000 +0900
+@@ -7,6 +7,13 @@
+ comment "PC SMBus host controller drivers"
+ depends on PCI
+
++config PCH_I2C
++ tristate "PCH I2C"
++ depends on PCI
++ help
++ If you say yes to this option, support will be included for the SMB
++ PCH I2C Host controller.
++
+ config I2C_ALI1535
+ tristate "ALI 1535"
+ depends on PCI
+diff -urN linux-2.6.33.1/drivers/i2c/busses/Makefile topcliff-2.6.33.1/drivers/i2c/busses/Makefile
+--- linux-2.6.33.1/drivers/i2c/busses/Makefile 2010-03-16 01:09:39.000000000 +0900
++++ topcliff-2.6.33.1/drivers/i2c/busses/Makefile 2010-03-23 10:40:18.000000000 +0900
+@@ -75,3 +75,6 @@
+ ifeq ($(CONFIG_I2C_DEBUG_BUS),y)
+ EXTRA_CFLAGS += -DDEBUG
+ endif
++
++obj-$(CONFIG_PCH_I2C) += pch_i2c.o
++pch_i2c-objs := pch_i2c_main.o pch_i2c_pci.o pch_i2c_hal.o
+diff -urN linux-2.6.33.1/drivers/i2c/busses/pch_common.h topcliff-2.6.33.1/drivers/i2c/busses/pch_common.h
+--- linux-2.6.33.1/drivers/i2c/busses/pch_common.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33.1/drivers/i2c/busses/pch_common.h 2010-03-23 10:40:18.000000000 +0900
+@@ -0,0 +1,146 @@
++/*!
++ * @file ioh_common.h
++ * @brief Provides the macro definitions used by all files.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++#ifndef __IOH_COMMON_H__
++#define __IOH_COMMON_H__
++
++/*! @ingroup Global
++@def IOH_WRITE8
++@brief Macro for writing 8 bit data to an io/mem address
++*/
++#define IOH_WRITE8(val, addr) iowrite8((val), (void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_LOG
++@brief Macro for writing 16 bit data to an io/mem address
++*/
++#define IOH_WRITE16(val, addr) iowrite16((val), (void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_LOG
++@brief Macro for writing 32 bit data to an io/mem address
++*/
++#define IOH_WRITE32(val, addr) iowrite32((val), (void __iomem *)(addr))
++
++/*! @ingroup Global
++@def IOH_READ8
++@brief Macro for reading 8 bit data from an io/mem address
++*/
++#define IOH_READ8(addr) ioread8((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_READ16
++@brief Macro for reading 16 bit data from an io/mem address
++*/
++#define IOH_READ16(addr) ioread16((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_READ32
++@brief Macro for reading 32 bit data from an io/mem address
++*/
++#define IOH_READ32(addr) ioread32((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_WRITE32_F
++@brief Macro for writing 32 bit data to an io/mem address
++*/
++#define IOH_WRITE32_F(val, addr) do \
++ { IOH_WRITE32((val), (addr)); (void)IOH_READ32((addr)); } while (0);
++
++/*! @ingroup Global
++@def IOH_WRITE_BYTE
++@brief Macro for writing 1 byte data to an io/mem address
++*/
++#define IOH_WRITE_BYTE IOH_WRITE8
++/*! @ingroup Global
++@def IOH_WRITE_WORD
++@brief Macro for writing 1 word data to an io/mem address
++*/
++#define IOH_WRITE_WORD IOH_WRITE16
++/*! @ingroup Global
++@def IOH_WRITE_LONG
++@brief Macro for writing long data to an io/mem address
++*/
++#define IOH_WRITE_LONG IOH_WRITE32
++
++/*! @ingroup Global
++@def IOH_READ_BYTE
++@brief Macro for reading 1 byte data from an io/mem address
++*/
++#define IOH_READ_BYTE IOH_READ8
++/*! @ingroup Global
++@def IOH_READ_WORD
++@brief Macro for reading 1 word data from an io/mem address
++*/
++#define IOH_READ_WORD IOH_READ16
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief Macro for reading long data from an io/mem address
++*/
++#define IOH_READ_LONG IOH_READ32
++
++/* Bit Manipulation Macros */
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bit(mask) at the
++ specified address
++*/
++#define IOH_SET_ADDR_BIT(addr, bitmask) IOH_WRITE_LONG((IOH_READ_LONG(addr) |\
++ (bitmask)), (addr))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bit(mask) at the specified address
++*/
++#define IOH_CLR_ADDR_BIT(addr, bitmask) IOH_WRITE_LONG((IOH_READ_LONG(addr) &\
++ ~(bitmask)), (addr))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bitmask for a variable
++*/
++#define IOH_SET_BITMSK(var, bitmask) ((var) |= (bitmask))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bitmask for a variable
++*/
++#define IOH_CLR_BITMSK(var, bitmask) ((var) &= (~(bitmask)))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bit for a variable
++*/
++#define IOH_SET_BIT(var, bit) ((var) |= (1<<(bit)))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bit for a variable
++*/
++#define IOH_CLR_BIT(var, bit) ((var) &= ~(1<<(bit)))
++
++#endif
+diff -urN linux-2.6.33.1/drivers/i2c/busses/pch_debug.h topcliff-2.6.33.1/drivers/i2c/busses/pch_debug.h
+--- linux-2.6.33.1/drivers/i2c/busses/pch_debug.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33.1/drivers/i2c/busses/pch_debug.h 2010-03-23 10:40:18.000000000 +0900
+@@ -0,0 +1,60 @@
++/*!
++ * @file ioh_debug.h
++ * @brief Provides the macro definitions used for debugging.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++#ifndef __IOH_DEBUG_H__
++#define __IOH_DEBUG_H__
++
++#ifdef MODULE
++#define IOH_LOG(level, fmt, args...) printk(level "%s:" fmt "\n",\
++ THIS_MODULE->name, ##args)
++#else
++#define IOH_LOG(level, fmt, args...) printk(level "%s:" fmt "\n" ,\
++ __FILE__, ##args)
++#endif
++
++
++#ifdef DEBUG
++ #define IOH_DEBUG(fmt, args...) IOH_LOG(KERN_DEBUG, fmt, ##args)
++#else
++ #define IOH_DEBUG(fmt, args...)
++#endif
++
++#ifdef IOH_TRACE_ENABLED
++ #define IOH_TRACE IOH_DEBUG
++#else
++ #define IOH_TRACE(fmt, args...)
++#endif
++
++#define IOH_TRACE_ENTER IOH_TRACE("Enter %s", __func__)
++#define IOH_TRACE_EXIT IOH_TRACE("Exit %s", __func__)
++
++
++#endif
+diff -urN linux-2.6.33.1/drivers/i2c/busses/pch_i2c_hal.c topcliff-2.6.33.1/drivers/i2c/busses/pch_i2c_hal.c
+--- linux-2.6.33.1/drivers/i2c/busses/pch_i2c_hal.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33.1/drivers/i2c/busses/pch_i2c_hal.c 2010-03-23 10:40:18.000000000 +0900
+@@ -0,0 +1,1930 @@
++/*!
++* @file ioh_i2c_hal.c
++* @brief This file contains definitions of HAL Layer APIs and
++* Internal functions
++* @version 0.95
++* @section
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; version 2 of the License.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not, write to the Free Software
++* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++*/
++
++/*
++* History:
++* Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++* All rights reserved.
++*
++* created:
++* WIPRO 02/20/2009
++* modified:
++* WIPRO 05/21/2009
++*
++*/
++
++/*includes*/
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/i2c.h>
++#include <linux/fs.h>
++#include <linux/io.h>
++#include <linux/types.h>
++#include <linux/interrupt.h>
++#include <linux/jiffies.h>
++
++#include "pch_i2c_hal.h"
++#include "pch_common.h"
++#include "pch_debug.h"
++
++/**
++ *macro definition
++ */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CSADR
++@brief I2CSADR register offset
++*/
++#define IOH_I2CSADR (0x00) /* I2C slave address register */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CCTL
++@brief I2CCTL register offset
++*/
++#define IOH_I2CCTL (0x04) /* I2C control register */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CSR
++@brief I2CSR register offset
++*/
++#define IOH_I2CSR (0x08) /* I2C status register */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CDR
++@brief I2CDR register offset
++*/
++#define IOH_I2CDR (0x0C) /* I2C data register */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CMON
++@brief I2CMON register offset
++*/
++#define IOH_I2CMON (0x10) /* I2C bus monitor register */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CBC
++@brief I2CBC register offset
++*/
++#define IOH_I2CBC (0x14) /* I2C bus transfer rate setup counter */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CMOD
++@brief I2CMOD register offset
++*/
++#define IOH_I2CMOD (0x18) /* I2C mode register */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CBUFSLV
++@brief I2CBUFSLV register offset
++*/
++#define IOH_I2CBUFSLV (0x1C) /* I2C buffer mode slave address register */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CBUFSUB
++@brief I2CBUFSUB register offset
++*/
++#define IOH_I2CBUFSUB (0x20) /* I2C buffer mode subaddress register */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CBUFFOR
++@brief I2CBUFFOR register offset
++*/
++#define IOH_I2CBUFFOR (0x24) /* I2C buffer mode format register */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CBUFCTL
++@brief I2CBUFCTL register offset
++*/
++#define IOH_I2CBUFCTL (0x28) /* I2C buffer mode control register */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CBUFMSK
++@brief I2CBUFMSK register offset
++*/
++#define IOH_I2CBUFMSK (0x2C) /* I2C buffer mode interrupt mask register */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CBUFSTA
++@brief I2CBUFSTA register offset
++*/
++#define IOH_I2CBUFSTA (0x30) /* I2C buffer mode status register */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CBUFLEV
++@brief I2CBUFLEV register offset
++*/
++#define IOH_I2CBUFLEV (0x34) /* I2C buffer mode level register */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CESRFOR
++@brief I2CESRFOR register offset
++*/
++#define IOH_I2CESRFOR (0x38) /* EEPROM software reset mode format register */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CESRCTL
++@brief I2CESRCTL register offset
++*/
++#define IOH_I2CESRCTL (0x3C) /* EEPROM software reset mode control register */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CESRMSK
++@brief I2CESRMSK register offset
++*/
++#define IOH_I2CESRMSK (0x40) /* EEPROM software reset mode
++ * interrupt mask register */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CESRSTA
++@brief I2CESRSTA register offset
++*/
++#define IOH_I2CESRSTA (0x44) /* EEPROM software reset mode status register */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CTMR
++@brief I2CTMR register offset
++*/
++#define IOH_I2CTMR (0x48) /* I2C timer register */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CSRST
++@brief I2CSRST register offset
++*/
++#define IOH_I2CSRST (0xFC) /* I2C reset register */
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CNF
++@brief I2CNF register offset
++*/
++#define IOH_I2CNF (0xF8) /* I2C noise filter register */
++
++/*! @ingroup I2C_HALLayer
++@def BUS_IDLE_TIMEOUT
++@brief Time out value when waiting for Bus Idle
++*/
++#define BUS_IDLE_TIMEOUT (20)
++
++/*! @ingroup I2C_HALLayer
++@def IOH_I2CCTL_I2CMEN
++@brief Bitmask to enable I2CMEN bit
++*/
++#define IOH_I2CCTL_I2CMEN (0x0080)
++
++/*! @ingroup I2C_HALLayer
++@def TEN_BIT_ADDR_DEFAULT
++@brief Default bits to be added for 10 bit addressing
++*/
++#define TEN_BIT_ADDR_DEFAULT (0xF000)
++
++/*! @ingroup I2C_HALLayer
++@def TEN_BIT_ADDR_MASK
++@brief 10 bit address mask
++*/
++#define TEN_BIT_ADDR_MASK (0xF0)
++
++/*! @ingroup I2C_HALLayer
++@def IOH_START
++@brief Set the start bit in Normal mode
++*/
++#define IOH_START (0x0020)
++
++/*! @ingroup I2C_HALLayer
++@def IOH_ESR_START
++@brief Bitmask to set Start bit in EEPROM Software Reset mode
++*/
++#define IOH_ESR_START (0x0001)
++
++/*! @ingroup I2C_HALLayer
++@def IOH_BUFF_START
++@brief Bitmask to set Start bit in Buffer mode
++*/
++#define IOH_BUFF_START (0x1)
++
++/*! @ingroup I2C_HALLayer
++@def IOH_REPSTART
++@brief Bitmask to set repeated start bit
++*/
++#define IOH_REPSTART (0x0004)
++
++/*! @ingroup I2C_HALLayer
++@def IOH_ACK
++@brief Ack bit position in I2CCTL register
++*/
++#define IOH_ACK (0x0008)
++
++/*! @ingroup I2C_HALLayer
++@def IOH_GETACK
++@brief Mask to extract the ack bit
++*/
++#define IOH_GETACK (0x0001)
++
++/*! @ingroup I2C_HALLayer
++@def CLR_REG
++@brief Mask for register reset
++*/
++#define CLR_REG (0x0)
++/*! @ingroup I2C_HALLayer
++@def I2C_RD
++@brief Set read bit in I2CDR with slave address
++*/
++#define I2C_RD (0x1)
++
++/*! @ingroup I2C_HALLayer
++@def I2CMCF_BIT
++@brief Mask for I2CMCF bit
++*/
++#define I2CMCF_BIT (0x0080)
++
++/*! @ingroup I2C_HALLayer
++@def I2CMIF_BIT
++@brief Mask for I2CMIF bit
++*/
++#define I2CMIF_BIT (0x0002)
++
++/*! @ingroup I2C_HALLayer
++@def I2CMAL_BIT
++@brief Mask for I2CMAL bit
++*/
++#define I2CMAL_BIT (0x0010)
++
++/*! @ingroup I2C_HALLayer
++@def I2CBMFI_BIT
++@brief Mask for I2CBMFI bit
++*/
++#define I2CBMFI_BIT (0x0001)
++
++/*! @ingroup I2C_HALLayer
++@def I2CBMAL_BIT
++@brief Mask for I2CBMAL bit
++*/
++#define I2CBMAL_BIT (0x0002)
++
++/*! @ingroup I2C_HALLayer
++@def I2CBMNA_BIT
++@brief Mask for I2CBMNA bit
++*/
++#define I2CBMNA_BIT (0x0004)
++
++/*! @ingroup I2C_HALLayer
++@def I2CBMTO_BIT
++@brief Mask for I2CBMTO bit
++*/
++#define I2CBMTO_BIT (0x0008)
++
++/*! @ingroup I2C_HALLayer
++@def I2CBMIS_BIT
++@brief Mask for I2CBMIS bit
++*/
++#define I2CBMIS_BIT (0x0010)
++
++/*! @ingroup I2C_HALLayer
++@def I2CESRFI_BIT
++@brief Mask for I2CESRFI bit
++*/
++#define I2CESRFI_BIT (0X0001)
++
++/*! @ingroup I2C_HALLayer
++@def I2CESRTO_BIT
++@brief Mask for I2CESRTO bit
++*/
++#define I2CESRTO_BIT (0x0002)
++
++/*! @ingroup I2C_HALLayer
++@def I2CESRFIIE_BIT
++@brief Mask for I2CESRFIIE bit
++*/
++#define I2CESRFIIE_BIT (0x1)
++
++/*! @ingroup I2C_HALLayer
++@def I2CESRTOIE_BIT
++@brief Mask for I2CESRTOIE bit
++*/
++#define I2CESRTOIE_BIT (0x2)
++
++/*! @ingroup I2C_HALLayer
++@def I2CBMDZ_BIT
++@brief Mask for I2CBMDZ bit
++*/
++#define I2CBMDZ_BIT (0x0040)
++
++/*! @ingroup I2C_HALLayer
++@def I2CBMAG_BIT
++@brief Mask for I2CBMAG bit
++*/
++#define I2CBMAG_BIT (0x0020)
++
++/*! @ingroup I2C_HALLayer
++@def I2CMBB_BIT
++@brief Mask for I2CMBB bit
++*/
++#define I2CMBB_BIT (0x0020)
++
++/*! @ingroup I2C_HALLayer
++@def BUFFER_MODE_MASK
++@brief Status bit mask in buffer mode
++*/
++#define BUFFER_MODE_MASK (I2CBMFI_BIT | I2CBMAL_BIT | I2CBMNA_BIT | \
++ I2CBMTO_BIT | I2CBMIS_BIT)
++
++/*! @ingroup I2C_HALLayer
++@def I2C_ADDR_MSK
++@brief Mask to get the 8 LSB bits in 10 bit addressing
++*/
++#define I2C_ADDR_MSK (0xFF)
++
++/*! @ingroup I2C_HALLayer
++@def I2C_MSB_2B_MSK
++@brief Mask to get the 2 MSB bits in 10 bit addressing
++*/
++#define I2C_MSB_2B_MSK (0x300)
++
++/*! @ingroup I2C_HALLayer
++@def FAST_MODE_CLK
++@brief Fast mode clock in KHz
++*/
++#define FAST_MODE_CLK (400)
++
++/*! @ingroup I2C_HALLayer
++@def FAST_MODE_EN
++@brief Enable the fast mode
++*/
++#define FAST_MODE_EN (0x0001)
++
++/*! @ingroup I2C_HALLayer
++@def SUB_ADDR_LEN_MAX
++@brief Maximum sub address length
++*/
++#define SUB_ADDR_LEN_MAX (4)
++
++/*! @ingroup I2C_HALLayer
++@def BUF_LEN_MAX
++@brief Maximum buffer length in buffer mode
++*/
++#define BUF_LEN_MAX (32)
++
++/*! @ingroup I2C_HALLayer
++@def IOH_BUFFER_MODE
++@brief To enable the buffer mode
++*/
++#define IOH_BUFFER_MODE (0x1)
++
++/*! @ingroup I2C_HALLayer
++@def EEPROM_SW_RST_MODE
++@brief Mask to enable the EEPROM Software Reset mode
++*/
++#define EEPROM_SW_RST_MODE (0x0002)
++
++/*! @ingroup I2C_HALLayer
++@def NORMAL_INTR_ENBL
++@brief Mask to enable the I2C interrupts in normal mode
++*/
++#define NORMAL_INTR_ENBL (0x0300)
++
++/*! @ingroup I2C_HALLayer
++@def EEPROM_RST_INTR_ENBL
++@brief Mask to enable I2CESRFI, I2CESRTO interrupts
++ in EEPROM Software Reset mode
++*/
++#define EEPROM_RST_INTR_ENBL (I2CESRFIIE_BIT | I2CESRTOIE_BIT)
++
++/*! @ingroup I2C_HALLayer
++@def EEPROM_RST_INTR_DISBL
++@brief Mask to disable interrupts in EEPROM Software Reset mode
++*/
++#define EEPROM_RST_INTR_DISBL (0x0)
++
++/*! @ingroup I2C_HALLayer
++@def BUFFER_MODE_INTR_ENBL
++@brief Mask to enable I2CBMIS,I2CBMTO,I2CBMNA,I2CBMAL,I2CBMFI
++ interrupts in Buffer mode
++*/
++#define BUFFER_MODE_INTR_ENBL (0x001F)
++
++/*! @ingroup I2C_HALLayer
++@def BUFFER_MODE_INTR_DISBL
++@brief Mask to disable all interrupts in Buffer mode
++*/
++#define BUFFER_MODE_INTR_DISBL (0x0)
++
++/*! @ingroup I2C_HALLayer
++@def NORMAL_MODE
++@brief Specifies Normal mode
++*/
++#define NORMAL_MODE (0x0)
++
++/*! @ingroup I2C_HALLayer
++@def BUFFER_MODE
++@brief Specifies Buffer mode
++*/
++#define BUFFER_MODE (0x1)
++
++/*! @ingroup I2C_HALLayer
++@def EEPROM_SR_MODE
++@brief Specifies EEPROM software reset mode
++*/
++#define EEPROM_SR_MODE (0x2)
++
++/*! @ingroup I2C_HALLayer
++@def I2C_TX_MODE
++@brief Specifies Master transmission mode
++*/
++#define I2C_TX_MODE (0x0010)
++
++/*! @ingroup I2C_HALLayer
++@def IOH_BUF_TX
++@brief Specifies Buffer transmission mode
++*/
++#define IOH_BUF_TX (0xFFF7)
++
++/*! @ingroup I2C_HALLayer
++@def IOH_BUF_RD
++@brief Specifies Buffer reception mode
++*/
++#define IOH_BUF_RD (0x0008)
++
++/*! @ingroup I2C_HALLayer
++@def I2C_ERROR_MASK
++@brief Mask for errors in all modes
++*/
++#define I2C_ERROR_MASK (I2CESRTO_EVENT | I2CBMIS_EVENT | I2CBMTO_EVENT | \
++ I2CBMNA_EVENT | I2CBMAL_EVENT | I2CMAL_EVENT)
++
++/*! @ingroup I2C_HALLayer
++@def I2CMAL_EVENT
++@brief MAL bit position in event flag
++*/
++#define I2CMAL_EVENT (0x0001)
++
++/*! @ingroup I2C_HALLayer
++@def I2CMCF_EVENT
++@brief MCF bit position in event flag
++*/
++#define I2CMCF_EVENT (0x0002)
++
++/*! @ingroup I2C_HALLayer
++@def I2CBMFI_EVENT
++@brief I2CBMFI bit position in event flag
++*/
++#define I2CBMFI_EVENT (0x0004)
++
++/*! @ingroup I2C_HALLayer
++@def I2CBMAL_EVENT
++@brief I2CBMAL bit position in event flag
++*/
++#define I2CBMAL_EVENT (0x0008)
++
++/*! @ingroup I2C_HALLayer
++@def I2CBMNA_EVENT
++@brief I2CBMNA bit position in event flag
++*/
++#define I2CBMNA_EVENT (0x0010)
++
++/*! @ingroup I2C_HALLayer
++@def I2CBMTO_EVENT
++@brief I2CBMTO bit position in event flag
++*/
++#define I2CBMTO_EVENT (0x0020)
++
++/*! @ingroup I2C_HALLayer
++@def I2CBMIS_EVENT
++@brief I2CBMIS bit position in event flag
++*/
++#define I2CBMIS_EVENT (0x0040)
++
++/*! @ingroup I2C_HALLayer
++@def I2CESRFI_EVENT
++@brief I2CESRFI bit position in event flag
++*/
++#define I2CESRFI_EVENT (0x0080)
++
++/*! @ingroup I2C_HALLayer
++@def I2CESRTO_EVENT
++@brief I2CESRTO bit position in event flag
++*/
++#define I2CESRTO_EVENT (0x0100)
++
++/*
++ * wait queue head
++ */
++
++/*! @ingroup I2C_UtilitiesAPI
++@var ioh_i2c_event
++@brief Wait queue head
++@remarks This global variable is used to synchronize
++ data handling with interrupts
++@see - ioh_i2c_init
++ - ioh_i2c_cb
++*/
++static wait_queue_head_t ioh_i2c_event;
++
++/* Function prototypes */
++
++/*! @ingroup I2C_UtilitiesAPI
++@fn ioh_i2c_start(struct i2c_algo_ioh_data * adap)
++@brief Function to generate start condition in normal mode
++*/
++static void ioh_i2c_start(struct i2c_algo_ioh_data *adap);
++
++/*! @ingroup I2C_UtilitiesAPI
++@fn ioh_i2c_buff_mode_start(struct i2c_algo_ioh_data * adap)
++@brief Function to generate start condition in buffer mode
++*/
++static void ioh_i2c_buff_mode_start(struct i2c_algo_ioh_data *adap);
++
++/*! @ingroup I2C_UtilitiesAPI
++@fn ioh_i2c_eeprom_swrst_start(struct i2c_algo_ioh_data * adap)
++@brief Function to generate start condition in EEPROM Software
++ Reset mode
++*/
++static void ioh_i2c_eeprom_swrst_start(struct i2c_algo_ioh_data *adap);
++
++/*! @ingroup I2C_UtilitiesAPI
++@fn ioh_i2c_stop(struct i2c_algo_ioh_data *adap)
++@brief Function to generate stop condition in normal mode
++*/
++static void ioh_i2c_stop(struct i2c_algo_ioh_data *adap);
++
++/*! @ingroup I2C_UtilitiesAPI
++@fn ioh_i2c_repstart(struct i2c_algo_ioh_data *adap)
++@brief Function to generate repeated start condition in normal mode
++*/
++static void ioh_i2c_repstart(struct i2c_algo_ioh_data *adap);
++
++/*! @ingroup I2C_UtilitiesAPI
++@fn ioh_i2c_getack(struct i2c_algo_ioh_data *adap)
++@brief Function to confirm ACK/NACK
++*/
++static s32 ioh_i2c_getack(struct i2c_algo_ioh_data *adap);
++
++/*! @ingroup I2C_UtilitiesAPI
++@fn ioh_i2c_sendack(struct i2c_algo_ioh_data *adap)
++@brief Function to send ACK
++*/
++static void ioh_i2c_sendack(struct i2c_algo_ioh_data *adap);
++
++/*! @ingroup I2C_UtilitiesAPI
++@fn ioh_i2c_sendnack(struct i2c_algo_ioh_data *adap)
++@brief Function to send NACK
++*/
++static void ioh_i2c_sendnack(struct i2c_algo_ioh_data *adap);
++
++/*! @ingroup I2C_UtilitiesAPI
++@fn ioh_i2c_wait_for_bus_idle
++ (struct i2c_algo_ioh_data *adap,s32 timeout)
++@brief Function to check the status of bus
++*/
++static s32 ioh_i2c_wait_for_bus_idle(struct i2c_algo_ioh_data *adap,
++ s32 timeout);
++
++/*! @ingroup I2C_UtilitiesAPI
++@fn ioh_i2c_wait_for_xfer_complete(struct i2c_algo_ioh_data *adap)
++@brief Function to wait till transfer complete.
++*/
++static s32 ioh_i2c_wait_for_xfer_complete(struct i2c_algo_ioh_data *adap);
++
++/*! @ingroup I2C_HALLayerAPI
++ @fn ioh_i2c_init(struct i2c_algo_ioh_data * adap)
++ @remarks Implements the hardware initialization of I2C module.
++ The main tasks performed by this method are:
++ - Clear I2CCTL,I2CMOD,I2CBUFFOR,I2CBUFSLV,I2CBUFSUB,I2CBUFMSK,
++ I2CESRFOR,I2CESRMSK registers.
++ - Set I2CMEN in I2CCTL to 1.
++ - Set bus speed based on module parameter.
++ - Enable required interrupts.
++ - Initialize wait queue head.
++ @note This function always returns @ref IOH_I2C_SUCCESS
++ @param adap [@ref IN] Contains reference to struct i2c_algo_ioh_data
++ @retval s32
++ - @ref IOH_I2C_SUCCESS Function returns successfully.
++ @see - ioh_i2c_probe
++ - ioh_i2c_resume
++ <hr>
++ */
++s32 ioh_i2c_init(struct i2c_algo_ioh_data *adap)
++{
++ u32 ioh_i2cbc;
++ u32 ioh_i2ctmr;
++ u32 reg_value = 0;
++
++#ifndef FPGA
++ /*reset I2C controller */
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CSRST, 0x1);
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CSRST, 0x0);
++#endif
++ /* Initialize I2C registers */
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CCTL, CLR_REG);
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CMOD, CLR_REG);
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CBUFFOR, CLR_REG);
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CBUFSLV, CLR_REG);
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CBUFSUB, CLR_REG);
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CBUFMSK, CLR_REG);
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CESRFOR, CLR_REG);
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CESRMSK, CLR_REG);
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CNF, 0x21);
++ IOH_DEBUG
++ ("Cleared the registers IOH_I2CCTL,IOH_I2CMOD,IOH_I2CBUFFOR\n,"
++ "IOH_I2CBUFSLV,IOH_I2CBUFSUB,IOH_I2CBUFMSK,"
++ "\nIOH_I2CESRFOR,IOH_I2CESRMSK\n");
++
++ reg_value |= IOH_I2CCTL_I2CMEN;
++ adap->set_reg_bit((adap->ioh_i2c_base_address), IOH_I2CCTL,
++ IOH_I2CCTL_I2CMEN);
++
++ ioh_i2c_speed = (ioh_i2c_speed == 400) ? 400 : 100;
++
++ if (ioh_i2c_speed == FAST_MODE_CLK) {
++ reg_value |= FAST_MODE_EN;
++ IOH_DEBUG("Fast mode enabled\n");
++ }
++
++ ioh_i2c_clk = (ioh_i2c_clk <= 0
++ || ioh_i2c_clk > IOH_I2C_MAX_CLK) ? 62500 : ioh_i2c_clk;
++
++ ioh_i2cbc = ((ioh_i2c_clk) + (ioh_i2c_speed * 4)) / (ioh_i2c_speed * 8);
++ /* Set transfer speed in I2CBC */
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CBC, ioh_i2cbc);
++
++ ioh_i2ctmr = (ioh_i2c_clk) / 8;
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CTMR, ioh_i2ctmr);
++
++ reg_value |= NORMAL_INTR_ENBL; /* Enable interrupts in normal mode */
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CCTL, reg_value);
++
++ IOH_DEBUG("In ioh_i2c_init: I2CCTL =%x\n",
++ (adap->readreg(adap->ioh_i2c_base_address, IOH_I2CCTL)));
++ IOH_DEBUG("In ioh_i2c_init: ioh_i2cbc =%x\n", ioh_i2cbc);
++ IOH_DEBUG("In ioh_i2c_init: ioh_i2ctmr =%x\n", ioh_i2ctmr);
++
++ IOH_DEBUG("Enable interrupts\n");
++ init_waitqueue_head(&ioh_i2c_event);
++ return IOH_I2C_SUCCESS;
++}
++
++/*! @ingroup I2C_HALLayerAPI
++ @fn ioh_i2c_writebytes(struct i2c_adapter *i2c_adap ,
++ struct i2c_msg *msgs, u32 last, u32 first)
++ @remarks Function to write data to I2C bus in normal mode.
++ The main tasks performed by this method are:
++ - Enable transmission mode.
++ - Send out the slave address.
++ - Wait for Bus idle and send out Start signal
++ - Perform data write operation.
++ - Send stop or repeat start as necessary, depending on whether
++ the current message is the last message or not.
++ - Return with number of bytes transferred successfully or
++ the error code
++ @param i2c_adap [@ref IN] contains reference to the struct i2c_adapter
++ @param msgs [@ref IN] contains reference to i2c_msg structure
++ @param last [@ref IN] specifies whether last message or not
++ In the case of compound mode it will be
++ 1 for last message, otherwise 0.
++ @param first [@ref IN] specifies whether first message or not
++ 1 for first message otherwise 0.
++ @retval s32
++ - Number of bytes transferred successfully
++ - @ref IOH_I2C_FAIL @ref ioh_i2c_wait_for_bus_idle,
++ @ref ioh_i2c_wait_for_xfer_complete,
++ @ref ioh_i2c_getack fails
++ - -ERESTARTSYS
++ @ref ioh_i2c_wait_for_xfer_complete was interrupted by a signal
++ @see ioh_i2c_xfer
++ <hr>
++ */
++s32 ioh_i2c_writebytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
++ u32 last, u32 first)
++{
++
++ struct i2c_algo_ioh_data *adap = i2c_adap->algo_data;
++
++ u8 *buf;
++ u32 length;
++ u32 addr;
++ u32 addr_2_msb;
++ u32 addr_8_lsb;
++ s32 wrcount = IOH_I2C_FAIL;
++ length = msgs->len;
++ buf = msgs->buf;
++ addr = msgs->addr;
++ /* enable master tx */
++ adap->set_reg_bit((adap->ioh_i2c_base_address), IOH_I2CCTL,
++ I2C_TX_MODE);
++
++ IOH_DEBUG("In ioh_i2c_writebytes : I2CCTL = %x\n",
++ (adap->readreg(adap->ioh_i2c_base_address, IOH_I2CCTL)));
++ IOH_DEBUG("In ioh_i2c_writebytes : msgs->len = %d\n", length);
++
++ if (first) {
++ if (ioh_i2c_wait_for_bus_idle(adap, BUS_IDLE_TIMEOUT) ==
++ IOH_I2C_FAIL) {
++ return IOH_I2C_FAIL;
++ }
++ }
++
++ if ((msgs->flags & I2C_M_TEN) != false) {
++ addr_2_msb = ((addr & I2C_MSB_2B_MSK) >> 7);
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CDR,
++ (addr_2_msb | TEN_BIT_ADDR_MASK));
++
++ if (first)
++ ioh_i2c_start(adap);
++ if ((ioh_i2c_wait_for_xfer_complete(adap) == IOH_I2C_SUCCESS) &&
++ (ioh_i2c_getack(adap) == IOH_I2C_SUCCESS)) {
++ addr_8_lsb = (addr & I2C_ADDR_MSK);
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CDR,
++ (addr_8_lsb));
++
++ } else {
++ ioh_i2c_stop(adap);
++ return IOH_I2C_FAIL;
++ }
++ } else {
++ /* set 7 bit slave address and R/W bit as 0 */
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CDR,
++ ((addr) << 1));
++ if (first)
++ ioh_i2c_start(adap);
++ }
++
++ if ((ioh_i2c_wait_for_xfer_complete(adap) == IOH_I2C_SUCCESS) &&
++ (ioh_i2c_getack(adap) == IOH_I2C_SUCCESS)) {
++ for (wrcount = 0; wrcount < length; ++wrcount) {
++ /* write buffer value to I2C data register */
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CDR,
++ buf[wrcount]);
++ IOH_DEBUG
++ ("ioh_i2c_writebytes : writing %x to Data register\n",
++ buf[wrcount]);
++
++ if (ioh_i2c_wait_for_xfer_complete(adap) !=
++ IOH_I2C_SUCCESS) {
++ wrcount = IOH_I2C_FAIL;
++ break;
++ }
++
++ IOH_DEBUG("ioh_i2c_wait_for_xfer_complete return %d",
++ IOH_I2C_SUCCESS);
++
++ if (ioh_i2c_getack(adap)) {
++ wrcount = IOH_I2C_FAIL;
++ break;
++ }
++ }
++
++ /* check if this is the last message */
++ if (last)
++ ioh_i2c_stop(adap);
++ else
++ ioh_i2c_repstart(adap);
++ } else {
++ ioh_i2c_stop(adap);
++ }
++
++ IOH_DEBUG(KERN_INFO, "ioh_i2c_writebytes return=%d\n", wrcount);
++
++ return wrcount;
++}
++
++/*! @ingroup I2C_HALLayerAPI
++ @fn ioh_i2c_readbytes(struct i2c_adapter *i2c_adap,
++ struct i2c_msg *msgs, u32 last, u32 first)
++ @remarks Function to read data from I2C bus in normal mode.
++ The main tasks performed by this method are:
++ - Enable Reception mode.
++ - Send out the slave address.
++ - Wait for Bus idle and send out Start signal
++ - Perform data reads.
++ - Send stop or repeat start as necessary, depending on whether
++ the current
++ message read is the last message or not
++ - Return with number of bytes read (if successful) or
++ the error code
++ @param i2c_adap [@ref IN] contains reference to the struct i2c_adapter
++ @param msgs [@ref INOUT] contains reference to i2c_msg structure
++ @param last [@ref IN] specifies whether last message or not
++ @param first [@ref IN] specifies whether first message or not
++ @retval s32 - Number of Bytes read successfully
++ - @ref IOH_I2C_FAIL @ref ioh_i2c_wait_for_bus_idle,
++ @ref ioh_i2c_wait_for_xfer_complete,
++ @ref ioh_i2c_getack fails
++ - -ERESTARTSYS
++ @ref ioh_i2c_wait_for_xfer_complete was interrupted by a signal
++ @see ioh_i2c_xfer
++ <hr>
++ */
++s32 ioh_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
++ u32 last, u32 first)
++{
++
++ struct i2c_algo_ioh_data *adap = i2c_adap->algo_data;
++
++ u8 *buf;
++ u32 count = IOH_I2C_FAIL;
++ u32 length;
++ u32 addr;
++ u32 addr_2_msb;
++ length = msgs->len;
++ buf = msgs->buf;
++ addr = msgs->addr;
++
++ /* enable master reception */
++ adap->clr_reg_bit((adap->ioh_i2c_base_address), IOH_I2CCTL,
++ I2C_TX_MODE);
++
++ if (first) {
++ if (ioh_i2c_wait_for_bus_idle(adap, BUS_IDLE_TIMEOUT) ==
++ IOH_I2C_FAIL) {
++ return IOH_I2C_FAIL;
++ }
++ }
++
++ if ((msgs->flags & I2C_M_TEN) != false) {
++ addr_2_msb = (((addr & I2C_MSB_2B_MSK) >> 7) | (I2C_RD));
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CDR,
++ (addr_2_msb | TEN_BIT_ADDR_MASK));
++
++ } else {
++ /* 7 address bits + R/W bit */
++ addr = (((addr) << 1) | (I2C_RD));
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CDR, addr);
++ }
++
++ /* check if it is the first message */
++ if (first == true)
++ ioh_i2c_start(adap);
++
++ if ((ioh_i2c_wait_for_xfer_complete(adap) == IOH_I2C_SUCCESS)
++ && (ioh_i2c_getack(adap) == IOH_I2C_SUCCESS)) {
++ IOH_DEBUG("ioh_i2c_wait_for_xfer_complete return %d",
++ IOH_I2C_SUCCESS);
++
++ if (length == 0) {
++
++ ioh_i2c_stop(adap);
++ (void)adap->readreg((adap->ioh_i2c_base_address),
++ IOH_I2CDR);
++
++ count = length;
++ } else {
++ int read_index = 0;
++ int loop;
++ ioh_i2c_sendack(adap);
++
++ /* Dummy read */
++
++ for (loop = 1; loop < length; loop++) {
++ buf[read_index] =
++ adap->readreg((adap->ioh_i2c_base_address),
++ IOH_I2CDR);
++
++ if (loop != 1)
++ read_index++;
++
++ if (ioh_i2c_wait_for_xfer_complete(adap) !=
++ IOH_I2C_SUCCESS) {
++ ioh_i2c_stop(adap);
++ return IOH_I2C_FAIL;
++ }
++
++ } /* end for */
++
++ ioh_i2c_sendnack(adap);
++
++ buf[read_index] =
++ adap->readreg((adap->ioh_i2c_base_address),
++ IOH_I2CDR);
++
++ if (length != 1)
++ read_index++;
++
++ if (ioh_i2c_wait_for_xfer_complete(adap) ==
++ IOH_I2C_SUCCESS) {
++ if (last)
++ ioh_i2c_stop(adap);
++ else
++ ioh_i2c_repstart(adap);
++
++ buf[read_index++] =
++ adap->readreg((adap->ioh_i2c_base_address),
++ IOH_I2CDR);
++ count = read_index;
++ }
++
++ }
++ } else {
++ ioh_i2c_stop(adap);
++ }
++
++ return count;
++}
++
++/*! @ingroup I2C_HALLayerAPI
++ @fn ioh_i2c_entcb(s32(*ioh_i2c_ptr)(struct i2c_algo_ioh_data *adap))
++ @remarks Function to register call back function.
++ The main tasks performed by this method are:
++ - Validate ioh_i2c_ptr
++ - Update the reference of the callback function in the callback
++ function pointer.
++ @param ioh_i2c_ptr [@ref IN] Contains reference to call back function
++ @retval None
++ @see ioh_i2c_probe
++ <hr>
++ */
++void ioh_i2c_entcb(s32(*ioh_i2c_ptr) (struct i2c_algo_ioh_data *adap))
++{
++ if (ioh_i2c_ptr != NULL) {
++ IOH_DEBUG("value in ioh_i2c_ptr = %p", ioh_i2c_ptr);
++ /* set the handler call back function */
++ ioh_i2c_cbr = ioh_i2c_ptr;
++ IOH_DEBUG("value updated in ioh_i2c_cbr = %p", ioh_i2c_cbr);
++ IOH_DEBUG("Invoked ioh_i2c_entcb successfully");
++
++ }
++}
++
++/*! @ingroup I2C_HALLayerAPI
++ @fn ioh_i2c_handler(int irq,void * pData)
++ @remarks This function implements the interrupt handler for
++ the IOH I2C controller.
++ The main tasks performed by this method are:
++ - Invoke callback function.
++ - Based on return value of callback function,
++ return IRQ_NONE or IRQ_HANDLED
++ @param irq [@ref IN] irq number
++ @param pData [@ref IN] cookie passed back to the handler function
++ @retval irqreturn_t
++ - IRQ_NONE Not our interrupt
++ - IRQ_HANDLED Interrupt serviced
++ @see ioh_i2c_probe
++ <hr>
++ */
++irqreturn_t ioh_i2c_handler(int irq, void *pData)
++{
++ s32 ret = 0;
++ u32 i;
++
++ struct adapter_info *adap_info = (struct adapter_info *)pData;
++ /* invoke the call back */
++
++ if (ioh_i2c_cbr != NULL) {
++ for (i = 0; i < IOH_I2C_MAX_CHN; i++)
++ ret |= (ioh_i2c_cbr) (&adap_info->ioh_i2c_data[i]);
++ } else {
++ IOH_LOG(KERN_ERR, " Call back pointer null ...");
++ }
++
++ IOH_DEBUG("ioh_i2c_cb return = %d\n", ret);
++
++ if (ret == IOH_I2C_EVENT_SET)
++ IOH_DEBUG(" ioh_i2c_handler return IRQ_HANDLED");
++ else
++ IOH_DEBUG("ioh_i2c_handler return IRQ_NONE");
++
++ return (ret == IOH_I2C_EVENT_SET) ? (IRQ_HANDLED) : (IRQ_NONE);
++}
++
++/*! @ingroup I2C_HALLayerAPI
++ @fn ioh_i2c_buffer_read
++ (struct i2c_adapter * i2c_adap,struct i2c_msg *msgs)
++ @remarks Function to read data from I2C bus in buffer mode.
++ The main tasks performed by this method are:
++ - Enable Buffer Mode.
++ - Set timeout interval in I2CTMR register.
++ - Enable buffer mode interrupts.
++ - Set the I2C Slave Address in the I2CBUFSLV register.
++ - Set the number of bytes, transmission mode and
++ sub-address length in I2CBUFFOR register.
++ - Perform the data read.
++ - Disable buffer mode interrupts.
++ @param i2c_adap [@ref IN] contains reference to the struct i2c_adapter
++ @param msgs [@ref INOUT] contains reference to i2c_msg structure
++ @retval s32
++ - @ref IOH_I2C_SUCCESS Function returns successfully
++ - @ref IOH_I2C_FAIL @ref ioh_i2c_wait_for_bus_idle,
++ @ref ioh_i2c_wait_for_xfer_complete,
++ @ref ioh_i2c_getack fails
++ - -ERESTARTSYS
++ @ref ioh_i2c_wait_for_xfer_complete was interrupted by a signal
++ @see ioh_i2c_xfer
++ <hr>
++ */
++s32 ioh_i2c_buffer_read(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs)
++{
++
++ struct i2c_algo_ioh_data *adap = i2c_adap->algo_data;
++
++ u32 loop;
++ u32 rdcount = 0;
++ u32 length;
++ u32 i2cbufsub = 0;
++ u32 addr;
++ u32 i2cbufslv_7_lsb;
++ u32 i2cbufslv_10_9_bit;
++ u32 msglen;
++ /* initialize to invalid length, so that no sub address is tx-ed */
++ u32 subaddrlen = 5;
++ u32 i2cmod_prev;
++ s32 i;
++ u32 time_interval = i2c_adap->timeout;
++ u32 i2ctmr;
++ s32 retvalue = IOH_I2C_FAIL;
++ u8 *buf;
++
++ length = msgs->len;
++ buf = msgs->buf;
++ addr = msgs->addr;
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CBUFMSK,
++ BUFFER_MODE_INTR_ENBL);
++
++ /* get the current value of I2C mod register */
++ i2cmod_prev = adap->readreg((adap->ioh_i2c_base_address), IOH_I2CMOD);
++
++ /* enable buffer mode */
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CMOD,
++ IOH_BUFFER_MODE);
++
++ time_interval = (time_interval <= 10) ? (time_interval) : (10);
++
++ /* value of I2CT = (Timeout interval * PCLK frequency)/ 8 */
++ i2ctmr = (time_interval * (ioh_i2c_clk)) / 8;
++
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CTMR, i2ctmr);
++
++ /* if 10 bit addressing is selected */
++
++ if ((msgs->flags & I2C_M_TEN) != false) {
++ /* get the 8 LSBits */
++ i2cbufslv_7_lsb = (addr & I2C_ADDR_MSK);
++
++ /* get the 2 MSBits */
++ i2cbufslv_10_9_bit = ((addr & I2C_MSB_2B_MSK) << 1);
++
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CBUFSLV,
++ (TEN_BIT_ADDR_DEFAULT | i2cbufslv_7_lsb |
++ i2cbufslv_10_9_bit));
++ } else {
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CBUFSLV,
++ ((addr & I2C_ADDR_MSK) << 1));
++ }
++
++ /* get sub address length, restrict to 4 bytes max */
++ subaddrlen =
++ (buf[0] <= SUB_ADDR_LEN_MAX) ? (buf[0]) : (SUB_ADDR_LEN_MAX);
++
++ for (i = (subaddrlen - 1); i >= 0; i--) {
++ /* frame the sub address based on the length */
++ i2cbufsub |= (((u32) buf[2 - i]) << (8 * i));
++ }
++
++ msglen = length - (subaddrlen + 1);
++
++ loop = (subaddrlen + 1);
++
++ /* write the sub address to the reg */
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CBUFSUB, i2cbufsub);
++ /* clear buffers */
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CBUFLEV, CLR_REG);
++
++ rdcount = (msglen <= BUF_LEN_MAX) ? (msglen) : (BUF_LEN_MAX);
++
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CBUFFOR,
++ ((rdcount << 4) | (IOH_BUF_RD) | (subaddrlen)));
++
++ do {
++ if (ioh_i2c_wait_for_bus_idle(adap, BUS_IDLE_TIMEOUT) ==
++ IOH_I2C_FAIL) {
++ break;
++ }
++
++ ioh_i2c_buff_mode_start(adap);
++
++ IOH_DEBUG("buffer mode start");
++
++ if ((adap->readreg((adap->ioh_i2c_base_address),
++ IOH_I2CBUFSTA) & I2CBMDZ_BIT) != 0) {
++ IOH_DEBUG("buffer read error 1");
++ break;
++ }
++
++ if (ioh_i2c_wait_for_xfer_complete(adap) == IOH_I2C_FAIL) {
++ IOH_DEBUG("buffer read error2");
++ break;
++ }
++
++ IOH_DEBUG("ioh_i2c_wait_for_xfer_complete return %d",
++ IOH_I2C_SUCCESS);
++
++ retvalue = rdcount;
++
++ for (; rdcount > 0; rdcount--, loop++) {
++ buf[loop] =
++ adap->readreg((adap->ioh_i2c_base_address),
++ IOH_I2CDR);
++
++ }
++ } while (0);
++
++ /* disable buffer mode interrupts */
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CBUFMSK,
++ BUFFER_MODE_INTR_DISBL);
++ /* restore the I2CMOD register */
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CMOD, i2cmod_prev);
++
++ return retvalue;
++}
++
++/*! @ingroup I2C_HALLayerAPI
++ @fn ioh_i2c_buffer_write
++ (struct i2c_adapter * i2c_adap,struct i2c_msg * msgs)
++ @remarks Function to write data to I2C bus in buffer mode.
++ The main tasks performed by this method are:
++ - Enable Buffer Mode.
++ - Set timeout interval in I2CTMR register.
++ - Enable buffer mode interrupts.
++ - Set the I2C Slave Address in the I2CBUFSLV register.
++ - Set the number of bytes, transmission mode and
++ subaddress length in I2CBUFFOR register.
++ - Perform data transfer.
++ - Disable the buffer mode interrupts.
++ @param i2c_adap [@ref IN] contains reference to the struct i2c_adapter
++ @param msgs [@ref INOUT] contains reference to i2c_msg structure
++ @retval s32
++ - @ref IOH_I2C_SUCCESS Function returns successfully
++ - @ref IOH_I2C_FAIL @ref ioh_i2c_wait_for_bus_idle,
++ @ref ioh_i2c_wait_for_xfer_complete,
++ @ref ioh_i2c_getack fails
++ - -ERESTARTSYS
++ @ref ioh_i2c_wait_for_xfer_complete was interrupted by a signal
++ @see ioh_i2c_xfer
++ <hr>
++ */
++s32 ioh_i2c_buffer_write(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs)
++{
++ struct i2c_algo_ioh_data *adap = i2c_adap->algo_data;
++
++ u32 loop = 0;
++ u32 wrcount = 0;
++ u32 msglen;
++ u32 i2cbufsub = 0;
++ u32 addr;
++ u32 i2cbufslv_7_lsb;
++ u32 i2cbufslv_10_9_bit;
++
++ /* initialize to invalid length, so that no sub address is tx-ed */
++ u32 subaddrlen = 5;
++ u32 i2cmod_prev;
++ s32 i;
++ u32 time_interval = i2c_adap->timeout;
++ u32 i2ctmr;
++ s32 retvalue = IOH_I2C_FAIL;
++ u8 *buf;
++
++ msglen = msgs->len;
++ buf = msgs->buf;
++ addr = msgs->addr;
++
++ /* get the current value of I2C mod register */
++ i2cmod_prev = adap->readreg((adap->ioh_i2c_base_address), IOH_I2CMOD);
++ /* enable buffer mode */
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CMOD,
++ IOH_BUFFER_MODE);
++
++ time_interval = (time_interval <= 10) ? (time_interval) : (10);
++ /* value of I2CT = (Timeout interval * PCLK frequency)/ 8 */
++ i2ctmr = (time_interval * (ioh_i2c_clk)) / 8;
++
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CTMR, i2ctmr);
++
++ /* enable buffer mode interrupts */
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CBUFMSK,
++ BUFFER_MODE_INTR_ENBL);
++
++ /* if 10 bit addressing is selected */
++
++ if ((msgs->flags & I2C_M_TEN) != false) {
++ IOH_DEBUG("ioh_i2c_buffer_write...ten bit addressing");
++ /* get the 8 LSBits */
++ i2cbufslv_7_lsb = (addr & I2C_ADDR_MSK);
++
++ /* get the 2 MSBits */
++ i2cbufslv_10_9_bit = ((addr & I2C_MSB_2B_MSK) << 1);
++
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CBUFSLV,
++ (TEN_BIT_ADDR_DEFAULT | i2cbufslv_7_lsb |
++ i2cbufslv_10_9_bit));
++ } else {
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CBUFSLV,
++ ((addr & I2C_ADDR_MSK) << 1));
++
++ }
++
++ /* get sub address length, restrict to 4 bytes max */
++ subaddrlen =
++ (buf[0] <= SUB_ADDR_LEN_MAX) ? (buf[0]) : (SUB_ADDR_LEN_MAX);
++
++ for (i = (subaddrlen - 1); i >= 0; i--) {
++ /* frame the sub address based on the length */
++ i2cbufsub |= (((u32) buf[2 - i]) << (8 * i));
++ }
++
++ /* subaddrlen bytes + the 1st field */
++ loop = subaddrlen + 1;
++
++ msglen = msglen - loop;
++
++ /* write the sub address to the reg */
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CBUFSUB, i2cbufsub);
++
++ /* clear buffers */
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CBUFLEV, CLR_REG);
++
++ msglen = (msglen < BUF_LEN_MAX) ? (msglen) : (BUF_LEN_MAX);
++
++ for (wrcount = 0; wrcount < msglen; wrcount++) {
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CDR,
++ buf[loop]);
++ IOH_DEBUG("Buffer mode %x", (buf[loop] & 0xff));
++ loop++;
++ }
++
++ /* set the number of bytes, transmission mode and sub address length */
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CBUFFOR,
++ ((((wrcount << 4) & (IOH_BUF_TX)) | (subaddrlen))));
++
++ do {
++ if ((ioh_i2c_wait_for_bus_idle(adap, BUS_IDLE_TIMEOUT)) ==
++ IOH_I2C_FAIL) {
++ break;
++ }
++
++ /* issue start bits */
++ ioh_i2c_buff_mode_start(adap);
++
++ if (((adap->readreg((adap->ioh_i2c_base_address),
++ IOH_I2CBUFSTA)) & (I2CBMDZ_BIT |
++ I2CBMAG_BIT)) != false) {
++ break;
++ }
++
++ if (ioh_i2c_wait_for_xfer_complete(adap) == IOH_I2C_FAIL)
++ break;
++
++ IOH_DEBUG("ioh_i2c_wait_for_xfer_complete return %d",
++ IOH_I2C_SUCCESS);
++ retvalue = wrcount;
++ } while (0);
++
++ /* disable buffer mode interrupts */
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CBUFMSK,
++ BUFFER_MODE_INTR_DISBL);
++ /* restore the I2CMOD register */
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CMOD, i2cmod_prev);
++
++ return retvalue;
++}
++
++/*! @ingroup I2C_HALLayerAPI
++ @fn ioh_i2c_eeprom_sw_reset
++ (struct i2c_adapter * i2c_adap,struct i2c_msg *msgs)
++ @remarks Function for triggering EEPROM software reset.
++ The main tasks performed by this method are:
++ - Enable EEPROM software reset mode.
++ - Enable the required interrupts.
++ - Update timeout value in I2CTMR register.
++ - Invoke @ref ioh_i2c_eeprom_swrst_start to
++ send software reset pattern.
++ - Disable interrupts.
++ @param i2c_adap [@ref IN] contains reference to the struct i2c_adapter
++ @param msgs [@ref IN] contains reference to i2c_msg structure
++ @retval s32
++ - @ref IOH_I2C_SUCCESS Function returns successfully
++ - i@ref IOH_I2C_FAIL @ref ioh_i2c_wait_for_bus_idle,
++ @ref ioh_i2c_wait_for_xfer_complete,
++ - -ERESTARTSYS
++ @ref ioh_i2c_wait_for_xfer_complete was interrupted by a signal
++ @see ioh_i2c_xfer
++ <hr>
++ */
++s32 ioh_i2c_eeprom_sw_reset(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs)
++{
++
++ struct i2c_algo_ioh_data *adap = i2c_adap->algo_data;
++
++ u32 time_interval = i2c_adap->timeout;
++ u32 i2ctmr;
++ u32 i2cmod_prev;
++ u32 ioh_pattern;
++
++ s32 ret_val = IOH_I2C_FAIL; /* init return value to error */
++
++ /* get the current value of I2C mod register */
++ i2cmod_prev = adap->readreg((adap->ioh_i2c_base_address), IOH_I2CMOD);
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CMOD, CLR_REG);
++ adap->set_reg_bit((adap->ioh_i2c_base_address), IOH_I2CMOD,
++ EEPROM_SW_RST_MODE);
++
++ IOH_DEBUG("ioh_i2c_eeprom_sw_reset : I2CMOD %x\n",
++ adap->readreg((adap->ioh_i2c_base_address), IOH_I2CMOD));
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CESRMSK,
++ EEPROM_RST_INTR_ENBL);
++
++ time_interval = (time_interval <= 10) ? (time_interval) : (10);
++
++ /* value of I2CT = (Timeout interval * PCLK frequency)/ 8 */
++ i2ctmr = (time_interval * (ioh_i2c_clk)) / 8;
++
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CTMR, i2ctmr);
++
++ /* get the EEPROM reset pattern */
++ ioh_pattern = (u32) (*(msgs->buf));
++
++ /* mode 1 & 2 are used for buffer mode selection */
++ ioh_pattern -= 2;
++
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CESRFOR,
++ ioh_pattern);
++
++ IOH_DEBUG("ioh_i2c_eeprom_sw_reset : I2CESRFOR %x\n",
++ adap->readreg((adap->ioh_i2c_base_address), IOH_I2CESRFOR));
++
++ if (ioh_i2c_wait_for_bus_idle(adap, BUS_IDLE_TIMEOUT) ==
++ IOH_I2C_SUCCESS) {
++
++ ioh_i2c_eeprom_swrst_start(adap);
++ ret_val = ioh_i2c_wait_for_xfer_complete(adap);
++
++ IOH_DEBUG("ioh_i2c_wait_for_xfer_complete return =%d\n",
++ ret_val);
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CMOD,
++ i2cmod_prev);
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CESRMSK,
++ EEPROM_RST_INTR_DISBL);
++ }
++
++ IOH_DEBUG("ioh_i2c_eeprom_sw_reset return=%d\n", ret_val);
++
++ return ret_val;
++}
++
++/*! @ingroup I2C_UtilitiesAPI
++ @fn ioh_i2c_cb(struct i2c_algo_ioh_data * adap)
++ @remarks Interrupt handler Call back function.
++ The main tasks performed by this method are:
++ - Get the current operation mode.
++ - For the current mode ,check if any of the required interrupt
++ bits are set.
++ - Invoke wake_up_interruptible function to unblock the functions
++ waiting for these events.
++ @param adap [@ref IN] Contains reference to struct i2c_algo_ioh_data
++ @retval s32
++ - @ref IOH_I2C_EVENT_SET Valid I2C event recognized and flagged
++ - @ref IOH_I2C_EVENT_NONE No valid I2C event
++ @see ioh_i2c_probe
++ <hr>
++ */
++s32 ioh_i2c_cb(struct i2c_algo_ioh_data *adap)
++{
++ u32 reg_val;
++ u32 i2c_mode;
++ u32 i2c_interrupt = false;
++
++ reg_val = adap->readreg((adap->ioh_i2c_base_address), IOH_I2CMOD);
++ /* get the current mode of operation */
++ i2c_mode = reg_val & (BUFFER_MODE | EEPROM_SR_MODE);
++
++ switch (i2c_mode) {
++
++ case NORMAL_MODE:
++ {
++ reg_val =
++ adap->readreg((adap->ioh_i2c_base_address),
++ IOH_I2CSR);
++ reg_val &= (I2CMAL_BIT | I2CMCF_BIT | I2CMIF_BIT);
++
++ if (reg_val != 0) {
++
++ if (I2CMAL_BIT & reg_val) {
++ adap->ioh_i2c_event_flag |=
++ I2CMAL_EVENT;
++ }
++
++ if (I2CMCF_BIT & reg_val) {
++ adap->ioh_i2c_event_flag |=
++ I2CMCF_EVENT;
++ }
++
++ /* clear the applicable bits */
++ adap->clr_reg_bit((adap->ioh_i2c_base_address),
++ IOH_I2CSR, reg_val);
++
++ IOH_DEBUG("ioh_i2c_cb : IOH_I2CSR = %x\n",
++ (adap->
++ readreg(adap->ioh_i2c_base_address,
++ IOH_I2CSR)));
++
++ i2c_interrupt = true;
++ }
++
++ break;
++ }
++
++ case BUFFER_MODE:
++ {
++ reg_val =
++ adap->readreg((adap->ioh_i2c_base_address),
++ IOH_I2CBUFSTA);
++ reg_val &= BUFFER_MODE_MASK;
++ if (reg_val != 0) {
++ /* there is a co-relation between the buffer
++ * mode interrupt flags' bit */
++ /* positions and the flag positions in event
++ * flag. for e.g. I2CBMFI is at position */
++ /* 0 in the I2CBUFSTA register. its position
++ * in the event flag is 2, hence left shifting
++ */
++ adap->ioh_i2c_event_flag |= ((reg_val) << 2);
++
++ /* clear the applicable bits */
++ adap->clr_reg_bit((adap->ioh_i2c_base_address),
++ IOH_I2CBUFSTA, reg_val);
++
++ IOH_DEBUG("ioh_i2c_cb : IOH_I2CBUFSTA = %x\n",
++ (adap->
++ readreg(adap->ioh_i2c_base_address,
++ IOH_I2CBUFSTA)));
++
++ i2c_interrupt = true;
++ }
++
++ break;
++
++ }
++
++ case EEPROM_SR_MODE:
++ {
++ reg_val =
++ adap->readreg((adap->ioh_i2c_base_address),
++ IOH_I2CESRSTA);
++ reg_val &= (I2CESRFI_BIT | I2CESRTO_BIT);
++ if (reg_val != 0) {
++
++ adap->ioh_i2c_event_flag |= ((reg_val) << 7);
++
++ /* clear the applicable bits */
++ adap->clr_reg_bit((adap->ioh_i2c_base_address),
++ IOH_I2CESRSTA, reg_val);
++
++ IOH_DEBUG("ioh_i2c_cb : IOH_I2CESRSTA = %x\n",
++ (adap->
++ readreg(adap->ioh_i2c_base_address,
++ IOH_I2CESRSTA)));
++
++ i2c_interrupt = true;
++ }
++
++ break;
++ }
++
++ default:
++ {
++ break;
++ }
++ } /* end switch */
++
++ if (i2c_interrupt == true)
++ wake_up_interruptible(&ioh_i2c_event);
++
++ return ((i2c_interrupt ==
++ true) ? (IOH_I2C_EVENT_SET) : (IOH_I2C_EVENT_NONE));
++}
++
++/*! @ingroup I2C_UtilitiesAPI
++ @fn ioh_i2c_start(struct i2c_algo_ioh_data * adap)
++ @remarks The main tasks performed by this method are:
++ - Generate I2C start condition in normal mode
++ by setting I2CCTL.I2CMSTA to 1.
++ @param adap [@ref IN] Contains reference to struct i2c_algo_ioh_data
++ @retval None
++ @see - ioh_i2c_readbytes
++ - ioh_i2c_writebytes
++ <hr>
++ */
++static void ioh_i2c_start(struct i2c_algo_ioh_data *adap)
++{
++ IOH_DEBUG("In ioh_i2c_start : I2CCTL = %x\n",
++ (adap->readreg(adap->ioh_i2c_base_address, IOH_I2CCTL)));
++ adap->set_reg_bit((adap->ioh_i2c_base_address), IOH_I2CCTL, IOH_START);
++ IOH_DEBUG(" Invoke ioh_i2c_start successfully \n");
++ IOH_DEBUG("In ioh_i2c_start : I2CCTL = %x\n",
++ (adap->readreg(adap->ioh_i2c_base_address, IOH_I2CCTL)));
++}
++
++/*! @ingroup I2C_UtilitiesAPI
++ @fn ioh_i2c_buff_mode_start(struct i2c_algo_ioh_data * adap)
++ @remarks The main tasks performed by this method are:
++ - Generate I2C start condition in buffer mode
++ by setting I2CBUFCTL.I2CBMSTA to 1.
++ @param adap [@ref IN] Contains reference to struct i2c_algo_ioh_data
++ @retval None
++ @see - ioh_i2c_buffer_read
++ - ioh_i2c_buffer_write
++ <hr>
++ */
++static void ioh_i2c_buff_mode_start(struct i2c_algo_ioh_data *adap)
++{
++ IOH_DEBUG("In ioh_i2c_buff_mode_start : I2CBUFCTL = %x\n",
++ (adap->readreg(adap->ioh_i2c_base_address, IOH_I2CBUFCTL)));
++ adap->set_reg_bit((adap->ioh_i2c_base_address), IOH_I2CBUFCTL,
++ IOH_BUFF_START);
++
++ IOH_DEBUG(" Invoke ioh_i2c_buff_mode_start successfully \n");
++ IOH_DEBUG("In ioh_i2c_buff_mode_start : I2CBUFCTL = %x\n",
++ (adap->readreg(adap->ioh_i2c_base_address, IOH_I2CBUFCTL)));
++}
++
++/*! @ingroup I2C_UtilitiesAPI
++ @fn ioh_i2c_eeprom_swrst_start(struct i2c_algo_ioh_data * adap)
++ @remarks The main tasks performed by this method are:
++ - Generate I2C start condition in EEPROM sw reset mode
++ by setting I2CESRCTL.I2CSTA to 1.
++ @param adap [@ref IN] Contains reference to struct i2c_algo_ioh_data
++ @retval None
++ @see ioh_i2c_eeprom_sw_reset
++ <hr>
++ */
++static void ioh_i2c_eeprom_swrst_start(struct i2c_algo_ioh_data *adap)
++{
++ IOH_DEBUG("In ioh_i2c_eeprom_swrst_start : I2CESRCTL = %x\n",
++ (adap->readreg(adap->ioh_i2c_base_address, IOH_I2CESRCTL)));
++ adap->set_reg_bit((adap->ioh_i2c_base_address), IOH_I2CESRCTL,
++ IOH_ESR_START);
++
++ IOH_DEBUG(" Invoked ioh_i2c_eeprom_swrst_start successfully\n");
++ IOH_DEBUG("In ioh_i2c_eeprom_swrst_start : I2CESRCTL = %x\n",
++ (adap->readreg(adap->ioh_i2c_base_address, IOH_I2CESRCTL)));
++
++}
++
++/*! @ingroup I2C_UtilitiesAPI
++ @fn ioh_i2c_stop(struct i2c_algo_ioh_data *adap)
++ @remarks Function to generate stop condition in normal mode.
++ The main tasks performed by this method are:
++ - Generate I2C stop condition by setting I2CCTL.I2CMSTA to 0.
++ @param adap [@ref IN] Contains reference to struct i2c_algo_ioh_data
++ @retval None
++ @see - ioh_i2c_readbytes
++ - ioh_i2c_writebytes
++ <hr>
++ */
++static void ioh_i2c_stop(struct i2c_algo_ioh_data *adap)
++{
++ IOH_DEBUG("In ioh_i2c_stop : I2CCTL = %x\n",
++ (adap->readreg(adap->ioh_i2c_base_address, IOH_I2CCTL)));
++ /* clear the start bit */
++ adap->clr_reg_bit((adap->ioh_i2c_base_address), IOH_I2CCTL, IOH_START);
++ IOH_DEBUG(" Invoke ioh_i2c_stop successfully \n");
++ IOH_DEBUG("In ioh_i2c_stop : I2CCTL = %x\n",
++ (adap->readreg(adap->ioh_i2c_base_address, IOH_I2CCTL)));
++
++}
++
++/*! @ingroup I2C_UtilitiesAPI
++ @fn ioh_i2c_repstart(struct i2c_algo_ioh_data *adap)
++ @remarks Function to generate repeated start condition in normal mode.
++ The main tasks performed by this method are:
++ - Generate repeated start condition by setting using
++ I2CCTL.I2CRSTA to 1.
++ @param adap [@ref IN] Contains reference to struct i2c_algo_ioh_data
++ @retval None
++ @see - ioh_i2c_readbytes
++ - ioh_i2c_writebytes
++ <hr>
++ */
++static void ioh_i2c_repstart(struct i2c_algo_ioh_data *adap)
++{
++ IOH_DEBUG("In ioh_i2c_repstart : I2CCTL = %x\n",
++ (adap->readreg(adap->ioh_i2c_base_address, IOH_I2CCTL)));
++ adap->set_reg_bit((adap->ioh_i2c_base_address), IOH_I2CCTL,
++ IOH_REPSTART);
++
++ IOH_DEBUG(" Invoke ioh_i2c_repstart successfully \n");
++ IOH_DEBUG("In ioh_i2c_repstart : I2CCTL = %x\n",
++ (adap->readreg(adap->ioh_i2c_base_address, IOH_I2CCTL)));
++
++}
++
++/*! @ingroup I2C_UtilitiesAPI
++ @fn ioh_i2c_getack(struct i2c_algo_ioh_data *adap)
++ @remarks Function to confirm ACK/NACK.
++ The main tasks performed by this method are:
++ - Get the ACK status from I2CSR.
++ - Return success if ACK received or failure otherwise.
++ @param adap [@ref IN] Contains reference to struct i2c_algo_ioh_data
++ @retval s32
++ - @ref IOH_I2C_SUCCESS Acknowledgement was received.
++ - @ref IOH_I2C_FAIL No acknowledgement received.
++ @see - ioh_i2c_readbytes
++ - ioh_i2c_writebytes
++ <hr>
++ */
++static s32 ioh_i2c_getack(struct i2c_algo_ioh_data *adap)
++{
++ u32 reg_val;
++ reg_val =
++ (adap->readreg((adap->ioh_i2c_base_address), IOH_I2CSR) &
++ IOH_GETACK);
++
++ if (reg_val == 0)
++ IOH_DEBUG("ioh_i2c_getack : return%d \n", IOH_I2C_SUCCESS);
++ else
++ IOH_DEBUG("ioh_i2c_getack : return%d \n", IOH_I2C_FAIL);
++
++ return (((reg_val) == 0) ? (IOH_I2C_SUCCESS) : (IOH_I2C_FAIL));
++
++}
++
++/*! @ingroup I2C_UtilitiesAPI
++ @fn ioh_i2c_sendack(struct i2c_algo_ioh_data *adap)
++ @remarks Function to send ACK.
++ The main tasks performed by this method are:
++ - Clear the I2C TXAK bit in I2CCTL register .
++ @param adap [@ref IN] Contains reference to struct i2c_algo_ioh_data
++ @retval None
++ @see ioh_i2c_readbytes
++ <hr>
++ */
++static void ioh_i2c_sendack(struct i2c_algo_ioh_data *adap)
++{
++ IOH_DEBUG("In ioh_i2c_sendack : I2CCTL = %x\n",
++ (adap->readreg(adap->ioh_i2c_base_address, IOH_I2CCTL)));
++ adap->clr_reg_bit((adap->ioh_i2c_base_address), IOH_I2CCTL, IOH_ACK);
++
++ IOH_DEBUG("Invoke ioh_i2c_sendack successfully\n");
++ IOH_DEBUG("In ioh_i2c_sendack : I2CCTL = %x\n",
++ (adap->readreg(adap->ioh_i2c_base_address, IOH_I2CCTL)));
++
++}
++
++/*! @ingroup I2C_UtilitiesAPI
++ @fn ioh_i2c_sendnack(struct i2c_algo_ioh_data *adap)
++ @remarks Function to send NACK.
++ The main tasks performed by this method are:
++ - Set the I2C TXAK bit in I2CCTL register .
++ @param adap [@ref IN] Contains reference to struct i2c_algo_ioh_data
++ @retval None
++ @see ioh_i2c_readbytes
++ <hr>
++ */
++static void ioh_i2c_sendnack(struct i2c_algo_ioh_data *adap)
++{
++ IOH_DEBUG("In ioh_i2c_sendnack : I2CCTL = %x\n",
++ (adap->readreg(adap->ioh_i2c_base_address, IOH_I2CCTL)));
++ adap->set_reg_bit((adap->ioh_i2c_base_address), IOH_I2CCTL, IOH_ACK);
++ IOH_DEBUG("Invoke ioh_i2c_sendnack successfully\n");
++ IOH_DEBUG("In ioh_i2c_sendnack : I2CCTL = %x\n",
++ (adap->readreg(adap->ioh_i2c_base_address, IOH_I2CCTL)));
++
++}
++
++/*! @ingroup I2C_UtilitiesAPI
++ @fn ioh_i2c_wait_for_bus_idle
++ (struct i2c_algo_ioh_data *adap,s32 timeout)
++ @remarks Function to check the status of bus.
++ The main tasks performed by this method are:
++ - Get the status of Bus Busy.
++ - If bus is busy sleep for 1 msec and again check.
++ - Repeat until bus is free or timeout happens.
++ @param adap [@ref IN] Contains reference to struct i2c_algo_ioh_data
++ @param timeout [@ref IN] waiting time counter (us)
++ @retval s32
++ - @ref IOH_I2C_SUCCESS The function returns successfully.
++ - @ref IOH_I2C_FAIL The bus is still idle.
++ @see - ioh_i2c_readbytes
++ - ioh_i2c_writebytes
++ - ioh_i2c_buffer_read
++ - ioh_i2c_buffer_write
++ - ioh_i2c_eeprom_sw_reset
++ <hr>
++ */
++static s32 ioh_i2c_wait_for_bus_idle(struct i2c_algo_ioh_data *adap,
++ s32 timeout)
++{
++ u32 reg_value;
++
++ /* get the status of bus busy */
++ reg_value =
++ (adap->readreg((adap->ioh_i2c_base_address), IOH_I2CSR) &
++ (I2CMBB_BIT));
++
++ while ((timeout != 0) && (reg_value != 0)) {
++ msleep(1); /* wait for 100 ms */
++ reg_value =
++ (adap->readreg((adap->ioh_i2c_base_address),
++ IOH_I2CSR) & (I2CMBB_BIT));
++ timeout--;
++ }
++
++ IOH_DEBUG("In ioh_i2c_wait_for_bus_idle : I2CSR = %x\n",
++ adap->readreg((adap->ioh_i2c_base_address), IOH_I2CSR));
++
++ if (timeout == 0) {
++ IOH_LOG(KERN_ERR, "ioh_i2c_wait_for_bus_idle :return%d\n",
++ IOH_I2C_FAIL);
++ } else {
++ IOH_DEBUG("ioh_i2c_wait_for_bus_idle : return %d\n",
++ IOH_I2C_SUCCESS);
++ }
++
++ return ((timeout <= 0) ? (IOH_I2C_FAIL) : (IOH_I2C_SUCCESS));
++}
++
++/*! @ingroup I2C_UtilitiesAPI
++ @fn ioh_i2c_wait_for_xfer_complete(struct i2c_algo_ioh_data * adap)
++ @remarks This functions initiates a wait for the transfer complete event
++ @param adap [@ref IN] Contains reference to struct i2c_algo_ioh_data
++ @retval s32
++ - @ref IOH_I2C_SUCCESS Function returns successfully.
++ - @ref IOH_I2C_FAIL Any error occurs.
++ - -ERESTARTSYS wait_event_interruptible_timeout
++ API was interrupted
++ @see - ioh_i2c_readbytes
++ - ioh_i2c_writebytes
++ - ioh_i2c_buffer_read
++ - ioh_i2c_buffer_write
++ - ioh_i2c_eeprom_sw_reset
++ <hr>
++*/
++static s32 ioh_i2c_wait_for_xfer_complete(struct i2c_algo_ioh_data *adap)
++{
++
++ u32 temp_flag;
++ s32 ret = IOH_I2C_FAIL;
++ ret =
++ wait_event_interruptible_timeout(ioh_i2c_event,
++ (adap->ioh_i2c_event_flag != 0),
++ msecs_to_jiffies(50));
++
++ IOH_DEBUG
++ ("adap->ioh_i2c_event_flag in ioh_i2c_wait_for_xfer_complete=%x",
++ adap->ioh_i2c_event_flag);
++ temp_flag = adap->ioh_i2c_event_flag;
++ adap->ioh_i2c_event_flag = 0;
++
++ if (ret == 0) {
++ IOH_LOG(KERN_ERR, "ioh_i2c_wait_for_xfer_complete : Timeout\n");
++ } else if (ret < 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_i2c_wait_for_xfer_complete failed : "
++ "Interrupted by other signal\n");
++ ret = -ERESTARTSYS;
++ } else if ((temp_flag & I2C_ERROR_MASK) == 0) {
++ ret = IOH_I2C_SUCCESS;
++ } else {
++ IOH_LOG(KERN_ERR,
++ "ioh_i2c_wait_for_xfer_complete failed : "
++ "Error in transfer\n");
++ }
++
++ IOH_DEBUG(KERN_ERR, "ioh_i2c_wait_for_xfer_complete returns %d\n", ret);
++
++ return ret;
++}
++
++/*! @ingroup I2C_UtilitiesAPI
++ @fn ioh_i2c_writereg(u32 addr,u32 offset,u32 val)
++ @remarks Function for writing data to register.
++ The main tasks performed by this method are:
++ - Compute the target address by adding the offset to
++ the base address.
++ - Write the specified value to the target address.
++ @param addr [@ref IN] Base address for the I2C channel
++ @param offset [@ref IN] offset for the register
++ @param val [@ref IN] Value to be written
++ @retval None
++ @see ioh_i2c_probe
++ <hr>
++ */
++void ioh_i2c_writereg(u32 addr, u32 offset, u32 val)
++{
++ IOH_WRITE_LONG(val, (addr + offset));
++}
++
++/*! @ingroup I2C_UtilitiesAPI
++ @fn ioh_i2c_readreg(u32 addr,u32 offset)
++ @remarks Function for reading data from register.
++ The main tasks performed by this method are:
++ - Compute the target address by adding the offset to
++ the base address.
++ - Read the register value and return the same.
++ @param addr [@ref IN] Base address for the I2C channel
++ @param offset [@ref IN] offset for the register
++ @retval u32
++ The content of the register that is read.
++ @see ioh_i2c_probe
++ <hr>
++ */
++u32 ioh_i2c_readreg(u32 addr, u32 offset)
++{
++ u32 ret;
++ ret = IOH_READ_LONG(addr + offset);
++ return ret;
++}
++
++/*! @ingroup I2C_UtilitiesAPI
++ @fn ioh_i2c_setbit(u32 addr,u32 offset,u32 bitmask)
++ @remarks Function to set particular bit in register.
++ The main tasks performed by this method are:
++ - Compute the target address by adding the offset
++ to the base address.
++ - Read the register value at the target address.
++ - Perform logical OR with bitmask and write back
++ to the target address.
++ @param addr [@ref IN] Base address for the I2C channel
++ @param offset [@ref IN] offset for the register
++ @param bitmask [@ref IN] bit position
++ @retval None
++ @see ioh_i2c_probe
++ <hr>
++ */
++void ioh_i2c_setbit(u32 addr, u32 offset, u32 bitmask)
++{
++ IOH_WRITE_LONG(((IOH_READ_LONG(addr + offset)) | (bitmask)),
++ (addr + offset));
++}
++
++/*! @ingroup I2C_UtilitiesAPI
++ @fn ioh_i2c_clrbit(u32 addr,u32 off,u32 bitmask)
++ @remarks Function to reset particular bit in register.
++ The main tasks performed by this method are:
++ - Compute the target address by adding the offset
++ to the base address.
++ - Read the register value at the target address.
++ - Perform logical AND with bitmask and write back
++ to the target address.
++ @param addr [@ref IN] Base address for the I2C channel
++ @param offset [@ref IN] offset for the register
++ @param bitmask [@ref IN] bit position
++ @retval None
++ @see ioh_i2c_probe
++ <hr>
++ */
++void ioh_i2c_clrbit(u32 addr, u32 offset, u32 bitmask)
++{
++ IOH_WRITE_LONG(((IOH_READ_LONG(addr + offset)) & (~(bitmask))),
++ (addr + offset));
++}
++
++/*! @ingroup I2C_UtilitiesAPI
++ @fn ioh_i2c_disbl_int(struct i2c_algo_ioh_data * adap)
++ @remarks Function to disable IOH I2C interrupts.
++ The main tasks performed by this method are:
++ - Disable the following interrupts:
++ MAL,MCF,I2CESRFI,I2CESRTO,I2CBMIS,I2CBMTO,I2CBMNA,
++ I2CBMAL and I2CBMFI.
++ @param adap [@ref IN] Contains reference to struct i2c_algo_ioh_data
++ @retval None
++ @see - ioh_i2c_remove
++ - ioh_i2c_suspend
++ <hr>
++*/
++void ioh_i2c_disbl_int(struct i2c_algo_ioh_data *adap)
++{
++
++ adap->clr_reg_bit((adap->ioh_i2c_base_address), IOH_I2CCTL,
++ NORMAL_INTR_ENBL);
++
++ IOH_DEBUG("ioh_i2c_disbl_int : I2CCTL = %x\n",
++ (adap->readreg(adap->ioh_i2c_base_address, IOH_I2CCTL)));
++
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CESRMSK,
++ EEPROM_RST_INTR_DISBL);
++
++ IOH_DEBUG("ioh_i2c_disbl_int : IOH_I2CESRMSK = %x\n",
++ (adap->readreg(adap->ioh_i2c_base_address, IOH_I2CESRMSK)));
++
++ adap->writereg((adap->ioh_i2c_base_address), IOH_I2CBUFMSK,
++ BUFFER_MODE_INTR_DISBL);
++
++ IOH_DEBUG("ioh_i2c_disbl_int : IOH_I2CBUFMSK = %x\n",
++ (adap->readreg(adap->ioh_i2c_base_address, IOH_I2CBUFMSK)));
++
++}
+diff -urN linux-2.6.33.1/drivers/i2c/busses/pch_i2c_hal.h topcliff-2.6.33.1/drivers/i2c/busses/pch_i2c_hal.h
+--- linux-2.6.33.1/drivers/i2c/busses/pch_i2c_hal.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33.1/drivers/i2c/busses/pch_i2c_hal.h 2010-03-23 10:40:18.000000000 +0900
+@@ -0,0 +1,337 @@
++#ifndef __IOH_I2C_HAL_H__
++#define __IOH_I2C_HAL_H__
++/*!
++* @file ioh_i2c_hal.h
++* @brief This file provides the function prototypes and macros to the I2C module.
++* @version 0.95
++* @section
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; version 2 of the License.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not, write to the Free Software
++* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++*/
++
++/*
++* History:
++* Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++* All rights reserved.
++*
++* created:
++* WIPRO 02/20/2009
++* modified:
++* WIPRO 05/21/2009
++*
++*/
++
++/*! @defgroup I2C*/
++
++/*! @defgroup I2C_Global
++@ingroup I2C
++@brief This group describes the global entities within
++ the module.
++@remarks This group includes all the global data structures
++ used within the modules. These are mainly used to
++ store the device related information which is used
++ through out the module.
++<hr>
++*/
++
++/*! @defgroup I2C_PCILayer
++@ingroup I2C
++@brief This group describes the PCI layer interface
++ functionalities.
++@remarks This group contains the functions and data structures
++ that are used to interface the module with PCI Layer
++ subsystem of the Kernel.
++<hr>
++*/
++
++/*! @defgroup I2C_InterfaceLayer
++@ingroup I2C
++@brief This group describes the Driver interface functionalities.
++@remarks This group contains the data structures and functions used
++ to interface the module driver with the kernel subsystem.
++<hr>
++*/
++
++/*! @defgroup I2C_HALLayer
++@ingroup I2C
++@brief This group describes the hardware specific functionalities.
++@remarks This group contains the functions and data structures used
++ by the module to communicate with the hardware. These
++ functions are device specific and designed according to the
++ device specifications.
++<hr>
++*/
++
++/*! @defgroup I2C_Utilities
++@ingroup I2C
++@brief This group describes the utility functionalities.
++@remarks This group contains the functions and data structures used
++ to assist the other functionalities in their operations.
++<hr>
++*/
++
++/*! @defgroup I2C_PCILayerAPI
++@ingroup I2C_PCILayer
++@brief This group contains the API(functions) used as the PCI
++ interface between the Kernel subsystem and the module.
++<hr>
++*/
++
++/*! @defgroup I2C_PCILayerFacilitators
++@ingroup I2C_PCILayer
++@brief This group contains the data structures used by the PCI
++ Layer APIs for their functionalities.
++<hr>
++*/
++
++/*! @defgroup I2C_InterfaceLayerAPI
++@ingroup I2C_InterfaceLayer
++@brief This group contains the API(functions) used as the Driver
++ interface between the Kernel subsystem and the module.
++<hr>
++*/
++
++/*! @defgroup I2C_InterfaceLayerFacilitators
++@ingroup I2C_InterfaceLayer
++@brief This group contains the data structures used by the Driver
++ interface APIs for their functionalities.
++<hr>
++*/
++
++/*! @defgroup I2C_HALLayerAPI
++@ingroup I2C_HALLayer
++@brief This group contains the APIs(functions) used to interact with
++ the hardware. These APIs act as an interface between the
++ hardware and the other driver functions.
++<hr>
++*/
++
++/*! @defgroup I2C_UtilitiesAPI
++@ingroup I2C_Utilities
++@brief This group contains the APIs(functions) used by other functions.
++<hr>
++*/
++
++/*includes*/
++#include <linux/irqreturn.h>
++
++/*! @ingroup I2C_Global
++@def IOH_I2C_SUCCESS
++@brief Success status code
++*/
++#define IOH_I2C_SUCCESS (0)
++
++/*! @ingroup I2C_Global
++@def IOH_I2C_FAIL
++@brief Error status code
++*/
++#define IOH_I2C_FAIL (-1)
++
++/*! @ingroup I2C_Global
++@def IOH_I2C_MAX_CHN
++@brief Maximum I2C channels available
++*/
++#define IOH_I2C_MAX_CHN (1)
++
++/*! @ingroup I2C_Global
++@def IOH_I2C_EVENT_SET
++@brief I2C Interrupt Event Set Status
++*/
++#define IOH_I2C_EVENT_SET (0)
++
++/*! @ingroup I2C_Global
++@def IOH_I2C_EVENT_NONE
++@brief I2C Interrupt Event Clear Status
++*/
++#define IOH_I2C_EVENT_NONE (1)
++
++/*! @ingroup I2C_Global
++@def IOH_I2C_MAX_CLK
++@brief Maximum peripheral Clock speed supported in MHz
++*/
++#define IOH_I2C_MAX_CLK (100000)
++
++
++/* flag for Buffer mode enable */
++#define IOH_BUFFER_MODE_ENABLE (0x0002)
++
++/* flag for EEPROM SW RST enable */
++#define IOH_EEPROM_SW_RST_MODE_ENABLE (0x0008)
++
++/* for mode selection */
++#define I2C_MODE_SEL (0x711)
++
++/*structures*/
++/*! @ingroup I2C_HALLayer
++@struct i2c_algo_ioh_data
++@brief This structure contains references to methods implementing
++ I2C driver functionalities.
++@note The concerned details should be provided during
++ the data transfer.
++@see - ioh_i2c_init
++ - ioh_i2c_entcb
++ - ioh_i2c_cb
++ - ioh_i2c_disbl_int
++*/
++
++struct i2c_algo_ioh_data {
++
++ struct adapter_info *p_adapter_info;
++ /**< stores the reference to adapter_info structure*/
++
++ struct i2c_adapter ioh_i2c_adapter;
++ /**< stores the reference to i2c_adapter structure*/
++
++ u32 ioh_i2c_base_address; /**< specifies the remapped base address*/
++ int ioh_i2c_buff_mode_en; /**< specifies if buffer mode is enabled*/
++ u32 ioh_i2c_event_flag; /**< specifies occurrence of interrupt events*/
++
++ bool ioh_i2c_xfer_in_progress;
++ /**< specifies whether the transfer is completed */
++
++ void (*writereg) (u32 addr, u32 off, u32 val);
++ /**< stores the reference to register write function*/
++
++ u32(*readreg) (u32 addr, u32 off);
++ /**< stores the reference to register read function*/
++
++ void (*set_reg_bit) (u32 addr, u32 off, u32 bitmsk);
++ /**< stores the reference to register bit setting function*/
++
++ void (*clr_reg_bit) (u32 addr, u32 off, u32 bitmsk);
++ /**< stores the reference to register bit clearing function*/
++};
++
++/*! @ingroup I2C_HALLayer
++@struct adapter_info
++@brief This structure holds the adapter information
++ for the IOH i2c controller.
++@note This structure contains instances of struct i2c_algo_ioh_data
++ for the available I2C channels and also a variable for saving
++ the suspend status.
++@see - ioh_i2c_probe
++ - ioh_i2c_remove
++ - ioh_i2c_suspend
++ - ioh_i2c_resume
++*/
++
++struct adapter_info {
++
++ struct i2c_algo_ioh_data ioh_i2c_data[IOH_I2C_MAX_CHN];
++ /**< stores a list of i2c_algo_ioh_data;
++ there will be as many elements as maximum I2C channels*/
++
++ bool ioh_i2c_suspended;
++ /**< specifies whether the system is suspended or not*/
++};
++
++/**global variables*/
++extern int ioh_i2c_speed;
++extern int ioh_i2c_clk;
++extern s32(*ioh_i2c_cbr) (struct i2c_algo_ioh_data *);
++
++extern struct i2c_algorithm ioh_i2c_algorithm;
++
++/* Function prototypes */
++/*! @ingroup I2C_HALLayerAPI
++@fn s32 ioh_i2c_init(struct i2c_algo_ioh_data *adap)
++@brief Function to initialize IOH I2C hardware
++*/
++s32 ioh_i2c_init(struct i2c_algo_ioh_data *adap);
++
++/*! @ingroup I2C_HALLayerAPI
++@fn s32 ioh_i2c_writebytes(struct i2c_adapter *i2c_adap ,
++ struct i2c_msg *msgs,u32 last, u32 first)
++@brief Function for data write in normal mode
++*/
++s32 ioh_i2c_writebytes(struct i2c_adapter *i2c_adap,
++ struct i2c_msg *msgs, u32 last, u32 first);
++
++/*! @ingroup I2C_HALLayerAPI
++@fn s32 ioh_i2c_readbytes(struct i2c_adapter *i2c_adap ,
++ struct i2c_msg *msgs,u32 last, u32 first)
++@brief Function for data read in normal mode
++*/
++s32 ioh_i2c_readbytes(struct i2c_adapter *i2c_adap,
++ struct i2c_msg *msgs, u32 last, u32 first);
++
++/*! @ingroup I2C_HALLayerAPI
++@fn s32 ioh_i2c_eeprom_sw_reset(struct i2c_adapter * i2c_adap,
++ struct i2c_msg *msgs)
++@brief Function for triggering EEPROM software reset mode
++*/
++s32 ioh_i2c_eeprom_sw_reset(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs);
++
++/*! @ingroup I2C_HALLayerAPI
++@fn s32 ioh_i2c_buffer_write
++ (struct i2c_adapter * i2c_adap,struct i2c_msg *msgs)
++@brief Function for data write in buffer mode
++*/
++s32 ioh_i2c_buffer_write(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs);
++
++/*! @ingroup I2C_HALLayerAPI
++@fn s32 ioh_i2c_buffer_read
++ (struct i2c_adapter * i2c_adap,struct i2c_msg *msgs)
++@brief Function for data read in buffer mode
++*/
++s32 ioh_i2c_buffer_read(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs);
++
++/*! @ingroup I2C_HALLayerAPI
++@fn irqreturn_t ioh_i2c_handler(int irq,void *pData)
++@brief Interrupt handler
++*/
++irqreturn_t ioh_i2c_handler(int irq, void *pData);
++
++/*! @ingroup I2C_HALLayerAPI
++@fn void ioh_i2c_entcb
++ (s32(*ioh_i2c_ptr)(struct i2c_algo_ioh_data *adap))
++@brief Function for registering the interrupt handler call back
++*/
++void ioh_i2c_entcb(s32(*ioh_i2c_ptr) (struct i2c_algo_ioh_data *adap));
++
++/*! @ingroup I2C_UtilitiesAPI
++@fn s32 ioh_i2c_cb(struct i2c_algo_ioh_data * adap)
++@brief Call back function invoked from interrupt handler
++*/
++s32 ioh_i2c_cb(struct i2c_algo_ioh_data *adap);
++
++/*! @ingroup I2C_UtilitiesAPI
++@fn void ioh_i2c_disbl_int(struct i2c_algo_ioh_data *adap)
++@brief Function for disabling the interrupt
++*/
++void ioh_i2c_disbl_int(struct i2c_algo_ioh_data *adap);
++
++/*! @ingroup I2C_UtilitiesAPI
++@fn void ioh_i2c_writereg(u32 addr,u32 off,u32 val)
++@brief Function for writing data to register
++*/
++void ioh_i2c_writereg(u32 addr, u32 off, u32 val);
++
++/*! @ingroup I2C_UtilitiesAPI
++@fn u32 ioh_i2c_readreg(u32 addr,u32 off)
++@brief Function for reading data from register
++*/
++u32 ioh_i2c_readreg(u32 addr, u32 off);
++
++/*! @ingroup I2C_UtilitiesAPI
++@fn void ioh_i2c_setbit(u32 addr,u32 off,u32 bitmsk)
++@brief Function to set a particular bit in a register
++*/
++void ioh_i2c_setbit(u32 addr, u32 off, u32 bitmsk);
++
++/*! @ingroup I2C_UtilitiesAPI
++@fn void ioh_i2c_clrbit(u32 addr,u32 off,u32 bitmsk)
++@brief Function to clear a particular bit in a register
++*/
++void ioh_i2c_clrbit(u32 addr, u32 off, u32 bitmsk);
++#endif
+diff -urN linux-2.6.33.1/drivers/i2c/busses/pch_i2c_main.c topcliff-2.6.33.1/drivers/i2c/busses/pch_i2c_main.c
+--- linux-2.6.33.1/drivers/i2c/busses/pch_i2c_main.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33.1/drivers/i2c/busses/pch_i2c_main.c 2010-03-23 10:40:18.000000000 +0900
+@@ -0,0 +1,247 @@
++/*!
++ * @file ioh_i2c_main.c
++ * @brief This file contains the definitions
++ * of Interface Layer APIs for IOH I2C driver.
++ * @version 0.95
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 02/20/2009
++ * modified:
++ * WIPRO 05/21/2009
++ *
++ */
++
++/*includes*/
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/i2c.h>
++#include <linux/pci.h>
++#include <linux/types.h>
++#include <linux/stat.h>
++#include <linux/interrupt.h>
++
++#include "pch_i2c_hal.h"
++#include "pch_common.h"
++#include "pch_debug.h"
++
++/* Function prototypes */
++
++/*! @ingroup I2C_UtilitiesAPI
++ @fn ioh_i2c_func(struct i2c_adapter *adap)
++ @brief This function returns the functionalities supported
++ by I2C driver.
++ */
++static u32 ioh_i2c_func(struct i2c_adapter *adap);
++
++/*! @ingroup I2C_UtilitiesAPI
++ @fn ioh_i2c_xfer(struct i2c_adapter *i2c_adap,
++ struct i2c_msg *msgs,s32 num)
++ @brief This function handles data transfer through I2C bus
++ */
++static s32 ioh_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
++ s32 num);
++
++/*structures*/
++
++/*! @ingroup I2C_Global
++ @struct ioh_i2c_algorithm
++ @brief This an instance of the kernel structure i2c_algorithm structure
++ and it stores the properties of the IOH I2C algorithm driver.
++ @note This structure stores the references of the @ref ioh_i2c_xfer
++ and @ref ioh_i2c_func functions.
++ @see ioh_i2c_probe
++ */
++
++struct i2c_algorithm ioh_i2c_algorithm = {
++ .master_xfer = ioh_i2c_xfer,
++ .functionality = ioh_i2c_func
++};
++
++/*! @ingroup I2C_UtilitiesAPI
++ @fn ioh_i2c_func(struct i2c_adapter *adap)
++ @brief Function return the functionality of the I2C driver
++ @remarks Returns (I2C_FUNC_I2C) | (I2C_FUNC_SMBUS_EMUL) |
++ (I2C_FUNC_10BIT_ADDR)
++ @param adap [@ref IN] Contains reference to i2c_adapter structure
++ @retval u32
++ - Bitwise OR of the feature status codes supported
++ by this algorithm driver.
++ @see ioh_i2c_algorithm
++ */
++static u32 ioh_i2c_func(struct i2c_adapter *adap)
++{
++ u32 ret;
++ ret = I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR;
++ return ret;
++}
++
++/*! @ingroup I2C_UtilitiesAPI
++ @fn ioh_i2c_xfer(struct i2c_adapter *i2c_adap,
++ struct i2c_msg *msgs,s32 num)
++ @brief Function to transfer data through I2C bus
++ @remarks Function to transfer data through I2C bus
++ The main tasks performed by this method are:
++ - Check if system is suspended.
++ - If EEPROM software reset command is received,
++ then invoke function ioh_i2c_eeprom_sw_reset.
++ - If Buffer mode selection command is received,
++ check the value of msgs[0]->buf[0]. If set,
++ enable buffer mode, by setting the variable
++ adap->ioh_i2c_buff_mode_en. Otherwise reset the flag.
++ - If no special command, perform the requested
++ data transfer operation.
++ @note The master transfer function ioh_i2c_xfer
++ is invoked by the Linux I2C core, whenever
++ communication/data transfer with the IOH I2C
++ driver is necessary. The Linux I2C core
++ ensures that the function is called with
++ valid parameters only.
++ @param i2c_adap [@ref IN] contains reference to the struct i2c_adapter
++ @param msgs [@ref IN] contains reference to i2c_msg structure
++ @param num [@ref IN] number of messages
++ @retval s32
++ - @ref IOH_I2C_SUCCESS
++ Function returns successfully for EEPROM sw reset mode,
++ buffer mode selection commands.
++ - The number of bytes transferred for successful operation
++ of read/write calls.
++ - @ref IOH_I2C_FAIL
++ Any error occurs during the execution of the function.
++ @see ioh_i2c_algorithm
++ <hr>
++ */
++
++static s32 ioh_i2c_xfer(struct i2c_adapter *i2c_adap,
++ struct i2c_msg *msgs, s32 num)
++{
++
++ struct i2c_msg *pmsg;
++ u32 i = 0;
++ u32 status;
++ u32 msglen;
++ u32 subaddrlen;
++ s32 ret = IOH_I2C_FAIL;
++
++ struct i2c_algo_ioh_data *adap = i2c_adap->algo_data;
++
++ if (adap->p_adapter_info->ioh_i2c_suspended == false) {
++ IOH_DEBUG("ioh_i2c_xfer "
++ "adap->p_adapter_info->ioh_i2c_suspended is %d\n",
++ adap->p_adapter_info->ioh_i2c_suspended);
++ /* transfer not completed */
++ adap->ioh_i2c_xfer_in_progress = true;
++ IOH_DEBUG(" adap->ioh_i2c_xfer_in_progress is %d\n",
++ adap->ioh_i2c_xfer_in_progress);
++ pmsg = &msgs[0];
++ status = pmsg->flags;
++ /* special commands for IOH I2C driver */
++ if ((status &
++ (IOH_EEPROM_SW_RST_MODE_ENABLE | IOH_BUFFER_MODE_ENABLE))
++ != false) {
++ if ((status & IOH_EEPROM_SW_RST_MODE_ENABLE) != false) {
++ /* check whether EEPROM sw reset is enabled */
++ IOH_DEBUG("ioh_i2c_xfer invoking "
++ "ioh_i2c_eeprom_sw_reset\n");
++ IOH_DEBUG("After invoking "
++ "I2C_MODE_SEL :flag= 0x%x\n", status);
++ ret = ioh_i2c_eeprom_sw_reset(i2c_adap, pmsg);
++ } else {
++ adap->ioh_i2c_buff_mode_en =
++ (pmsg->buf[0] == 1) ?
++ (IOH_BUFFER_MODE_ENABLE) : (pmsg->buf[0]);
++ ret = IOH_I2C_SUCCESS;
++ }
++ /* transfer completed */
++ adap->ioh_i2c_xfer_in_progress = false;
++ IOH_DEBUG("adap->ioh_i2c_xfer_in_progress is %d\n",
++ adap->ioh_i2c_xfer_in_progress);
++ IOH_DEBUG(KERN_INFO,
++ "After mode selection "
++ "ioh_i2c_xfer return = %d\n", ret);
++ return ret;
++ }
++ for (i = 0; i < num; i++) {
++ pmsg = &msgs[i];
++ pmsg->flags |= adap->ioh_i2c_buff_mode_en;
++ status = pmsg->flags;
++ IOH_DEBUG("After invoking I2C_MODE_SEL :flag= 0x%x\n",
++ status);
++ /* calculate sub address length and message length */
++ /* these are applicable only for buffer mode */
++ subaddrlen = pmsg->buf[0];
++ /* calculate actual message length excluding
++ * the sub address fields */
++ msglen = (pmsg->len) - (subaddrlen + 1);
++
++ if (((status & IOH_BUFFER_MODE_ENABLE) != false)
++ && (msglen != 0)) {
++ /* Buffer mode cannot be used for transferring
++ * 0 byte data. Hence when buffer mode is
++ * enabled and 0 byte transfer is requested,
++ * normal mode transfer will be used */
++ if ((status & (I2C_M_RD)) != false) {
++ IOH_DEBUG(KERN_INFO,
++ "ioh_i2c_xfer invoking "
++ "ioh_i2c_buffer_read\n");
++ ret =
++ ioh_i2c_buffer_read(i2c_adap, pmsg);
++ } else {
++ IOH_DEBUG(KERN_INFO,
++ "ioh_i2c_xfer invoking "
++ "ioh_i2c_buffer_write\n");
++ ret =
++ ioh_i2c_buffer_write(i2c_adap, pmsg);
++ }
++ } else {
++ if ((status & (I2C_M_RD)) != false) {
++ IOH_DEBUG(KERN_INFO,
++ "ioh_i2c_xfer invoking "
++ "ioh_i2c_readbytes\n");
++ ret =
++ ioh_i2c_readbytes(i2c_adap, pmsg,
++ (i + 1 == num),
++ (i == 0));
++ } else {
++ IOH_DEBUG(KERN_INFO,
++ "ioh_i2c_xfer invoking "
++ "ioh_i2c_writebytes\n");
++ ret =
++ ioh_i2c_writebytes(i2c_adap, pmsg,
++ (i + 1 == num),
++ (i == 0));
++ }
++ }
++
++ }
++
++ adap->ioh_i2c_xfer_in_progress = false; /* transfer completed */
++
++ IOH_DEBUG(" adap->ioh_i2c_xfer_in_progress is %d\n",
++ adap->ioh_i2c_xfer_in_progress);
++ }
++ IOH_DEBUG(KERN_INFO, "ioh_i2c_xfer return:%d\n\n\n\n", ret);
++
++ return ret;
++}
+diff -urN linux-2.6.33.1/drivers/i2c/busses/pch_i2c_pci.c topcliff-2.6.33.1/drivers/i2c/busses/pch_i2c_pci.c
+--- linux-2.6.33.1/drivers/i2c/busses/pch_i2c_pci.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33.1/drivers/i2c/busses/pch_i2c_pci.c 2010-03-23 10:40:18.000000000 +0900
+@@ -0,0 +1,583 @@
++/*!
++* @file ioh_i2c_pci.c
++* @brief This file contains the definitions of I2C_PCILayer APIs.
++* @version 0.95
++* @section
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; version 2 of the License.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not, write to the Free Software
++* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++*/
++
++/*
++* History:
++* Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++* All rights reserved.
++*
++* created:
++* WIPRO 02/20/2009
++* modified:
++* WIPRO 05/21/2009
++*
++*/
++
++/*includes*/
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/i2c.h>
++#include <linux/pci.h>
++#include <linux/types.h>
++#include <linux/stat.h>
++#include <linux/interrupt.h>
++
++#include "pch_i2c_hal.h"
++#include "pch_common.h"
++#include "pch_debug.h"
++
++/**
++ *macro definition
++ */
++
++/*! @ingroup I2C_PCILayer
++@def PCI_DEVICE_ID_IOH_I2C
++@brief Device ID of the device supported by IOH I2C
++ driver in GE configuration.
++*/
++#define PCI_DEVICE_ID_IOH_I2C (0x8817)
++
++/*
++ * variable declaration
++ */
++/*! @ingroup I2C_Global
++@var ioh_i2c_speed
++@brief specifies I2C bus speed in Kbps
++@note This parameter is provided as module parameter
++ while loading the driver. If no value is provided,
++ by default the speed is set to 100 kbps.
++@see ioh_i2c_init
++<hr>
++*/
++int ioh_i2c_speed = 100;
++
++/*! @ingroup I2C_Global
++@var ioh_i2c_clock
++@brief specifies I2C clock speed in KHz
++@note This parameter is provided as module parameter
++ while inserting the driver. If no value is provided,
++ by default the speed is set to 62500KHz.
++@see ioh_i2c_init
++<hr>
++*/
++/* int ioh_i2c_clk = 62500; */
++int ioh_i2c_clk = 50000;
++
++/*! @ingroup I2C_Global
++@var ioh_i2c_cbr
++@brief I2C_Global function pointer to save reference to
++ callback function
++@see ioh_i2c_entcb
++<hr>
++*/
++s32(*ioh_i2c_cbr) (struct i2c_algo_ioh_data *);
++
++/*! @ingroup I2C_Global
++@var MODULE_NAME
++@brief I2C_Global variable storing the name of this driver
++@see ioh_i2c_probe
++<hr>
++*/
++#define MODULE_NAME "pch_i2c" /* name for the driver */
++
++/* Function prototypes */
++/*! @ingroup I2C_PCILayerAPI
++@fn ioh_i2c_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id)
++@brief This function implements the probe routine
++ for IOH I2C driver module
++*/
++static int __devinit ioh_i2c_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id);
++
++/*! @ingroup I2C_PCILayerAPI
++@fn ioh_i2c_remove(struct pci_dev *pdev)
++@brief This function implements the remove routine
++ for IOH I2C driver module.
++*/
++static void __devexit ioh_i2c_remove(struct pci_dev *pdev);
++
++/*! @ingroup I2C_PCILayerAPI
++@fn ioh_i2c_suspend(struct pci_dev* pdev,pm_message_t state)
++@brief This function implements the suspend routine
++ for IOH I2C driver module
++*/
++static int ioh_i2c_suspend(struct pci_dev *pdev, pm_message_t state);
++
++/*! @ingroup I2C_PCILayerAPI
++@fn ioh_i2c_resume(struct pci_dev* pdev)
++@brief This function implements the resume routine
++ for IOH I2C driver module
++*/
++static int ioh_i2c_resume(struct pci_dev *pdev);
++
++/*structures*/
++/*! @ingroup I2C_PCILayerFacilitators
++@struct ioh_i2c_pcidev_id
++@brief Store information of supported PCI devices
++@note This is an instance of pci_device_id structure and
++ holds information of the PCI devices that are supported
++ by this driver
++@see ioh_i2c_pcidriver
++*/
++
++static struct pci_device_id __devinitdata ioh_i2c_pcidev_id[] = {
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_IOH_I2C)},
++ {0,}
++};
++
++/*! @ingroup I2C_PCILayerFacilitators
++@struct ioh_i2c_pcidriver
++@brief Store the references of PCI driver interfaces to kernel
++@note This is an instance of pci_driver and this structure specifies
++ the driver details to be registered with the kernel
++@see - ioh_i2c_pci_init
++ - ioh_i2c_pci_exit
++<hr>
++*/
++
++static struct pci_driver ioh_i2c_pcidriver = {
++ .name = "ioh_i2c",
++ .id_table = ioh_i2c_pcidev_id,
++ .probe = ioh_i2c_probe,
++ .remove = __devexit_p(ioh_i2c_remove),
++#ifdef CONFIG_PM
++ .suspend = ioh_i2c_suspend,
++ .resume = ioh_i2c_resume
++#endif
++};
++
++/*! @ingroup I2C_PCILayerAPI
++ @fn ioh_i2c_probe(struct pci_dev *pdev, const struct
++ pci_device_id *id)
++ @remarks The main tasks performed by this method are:
++ - Allocate memory for driver private data.
++ - Enable the PCI device.
++ - Reserve the PCI regions.
++ - Map the device address of the IO BAR.
++ - Register the interrupt handler.
++ - Initialize the members in adap_info->ioh_i2c_data.
++ - Register the ioh_i2c_adapter.
++ - Initialize the IOH I2C hardware.
++ @note This function is invoked by the PCI core when a device is
++ found for this driver to control
++ @param pdev [@ref IN] contains reference to
++ PCI device descriptor for the peripheral
++ @param id [@ref IN] contains reference to
++ the pci_device_id table of matching peripheral
++ @retval int
++ - @ref IOH_I2C_SUCCESS Function returns successfully.
++ - -EIO pci_enable_device fails
++ - -EINVAL pci_enable_device/request_irq fails
++ - -EBUSY pci_request_regions/request_irq fails
++ - -ENOMEM i2c_add_adapter/request_irq/pci_iomap/kzalloc fails
++ - -EAGAIN i2c_add_adapter fails
++ - -ENOSYS request_irq fails
++ @see ioh_i2c_pcidriver
++ <hr>
++ */
++static int __devinit ioh_i2c_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id)
++{
++
++ int i;
++ u32 base_addr;
++ s32 ret = IOH_I2C_SUCCESS;
++
++ IOH_DEBUG("Enterred in i2c_probe\n");
++
++ do {
++ struct adapter_info *adap_info =
++ kzalloc((sizeof(struct adapter_info)), GFP_KERNEL);
++ if (adap_info == NULL) {
++ IOH_LOG(KERN_ERR, "Memory allocation failed FAILED");
++ ret = -ENOMEM;
++ break;
++ }
++
++ IOH_DEBUG
++ ("Function kzalloc invoked successfully "
++ "and adap_info valu = %p\n",
++ adap_info);
++
++ ret = pci_enable_device(pdev);
++
++ if (ret) {
++ IOH_LOG(KERN_ERR, "pci_enable_device FAILED");
++ kfree(adap_info);
++ break;
++ }
++
++ IOH_DEBUG("pci_enable_device returns %d\n", ret);
++
++ ret = pci_request_regions(pdev, MODULE_NAME);
++ if (ret) {
++ IOH_LOG(KERN_ERR, "pci_request_regions FAILED");
++ pci_disable_device(pdev);
++ kfree(adap_info);
++ break;
++ }
++
++ IOH_DEBUG("pci_request_regions returns %d\n", ret);
++
++ /* Wipro 1/13/2010 Use Mem BAR */
++ base_addr = (unsigned long)pci_iomap(pdev, 1, 0);
++
++ if (base_addr == 0) {
++ IOH_LOG(KERN_ERR, "pci_iomap FAILED");
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++ kfree(adap_info);
++ ret = -ENOMEM;
++ break;
++ }
++
++ IOH_DEBUG("pci_iomap invoked successfully\n");
++
++ ioh_i2c_entcb(ioh_i2c_cb);
++ IOH_DEBUG("ioh_i2c_entcb invoked successfully\n");
++
++ for (i = 0; i < IOH_I2C_MAX_CHN; i++) {
++ adap_info->ioh_i2c_data[i].p_adapter_info = adap_info;
++ adap_info->ioh_i2c_data[i].writereg = ioh_i2c_writereg;
++ adap_info->ioh_i2c_data[i].readreg = ioh_i2c_readreg;
++ adap_info->ioh_i2c_data[i].set_reg_bit = ioh_i2c_setbit;
++ adap_info->ioh_i2c_data[i].clr_reg_bit = ioh_i2c_clrbit;
++
++ adap_info->ioh_i2c_data[i].ioh_i2c_adapter.owner =
++ THIS_MODULE;
++ adap_info->ioh_i2c_data[i].ioh_i2c_adapter.class =
++ I2C_CLASS_HWMON;
++ strcpy(adap_info->ioh_i2c_data[i].ioh_i2c_adapter.name,
++ "ioh_i2c");
++ adap_info->ioh_i2c_data[i].ioh_i2c_adapter.algo =
++ &ioh_i2c_algorithm;
++ adap_info->ioh_i2c_data[i].ioh_i2c_adapter.algo_data =
++ &adap_info->ioh_i2c_data[i];
++
++ /* (i * 0x80) + base_addr; */
++ adap_info->ioh_i2c_data[i].ioh_i2c_base_address =
++ base_addr;
++
++ adap_info->ioh_i2c_data[i].ioh_i2c_adapter.dev.parent =
++ &pdev->dev;
++
++ ret =
++ i2c_add_adapter(&
++ (adap_info->ioh_i2c_data[i].
++ ioh_i2c_adapter));
++
++ if (ret) {
++ IOH_LOG(KERN_ERR, "i2c_add_adapter FAILED");
++
++ pci_iounmap(pdev, (void *)base_addr);
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++ kfree(adap_info);
++ break;
++ }
++
++ IOH_DEBUG("i2c_add_adapter returns %d for channel-%d\n",
++ ret, i);
++ (void)ioh_i2c_init(&adap_info->ioh_i2c_data[i]);
++ IOH_DEBUG("ioh_i2c_init invoked successfully \n");
++
++ }
++
++ if (ret)
++ break;
++
++ ret = request_irq(pdev->irq, &ioh_i2c_handler, IRQF_SHARED,
++ MODULE_NAME, (void *)adap_info);
++
++ if (ret) {
++ IOH_DEBUG("request_irq Failed\n");
++
++ for (i = 0; i < IOH_I2C_MAX_CHN; i++) {
++ i2c_del_adapter(&
++ (adap_info->
++ ioh_i2c_data
++ [i].ioh_i2c_adapter));
++ }
++
++ pci_iounmap(pdev, (void *)base_addr);
++
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++ kfree(adap_info);
++ break;
++ }
++
++ IOH_DEBUG("request_irq returns %d\n", ret);
++
++ IOH_DEBUG("ioh_i2c_probe returns %d\n", IOH_I2C_SUCCESS);
++ pci_set_drvdata(pdev, (void *)adap_info);
++ return IOH_I2C_SUCCESS;
++ } while (0);
++
++ return ret;
++}
++
++/*! @ingroup I2C_PCILayerAPI
++ @fn ioh_i2c_remove(struct pci_dev *pdev)
++ @remarks The main tasks performed by this method are:
++ - Disable interrupts.
++ - Unregister interrupt handler.
++ - Unregister i2c_adapter.
++ - Release IO memory.
++ - Release PCI regions.
++ - Disable PCI device.
++ @note This function is invoked when the IOH I2C driver module is
++ unloaded from the system using rmmod command or when the
++ IOH I2C device is removed from the system.
++ @param pdev [@ref INOUT] contains reference to
++ PCI device descriptor for the peripheral
++ @retval None
++ @see ioh_i2c_pcidriver
++ <hr>
++ */
++
++static void __devexit ioh_i2c_remove(struct pci_dev *pdev)
++{
++ int i;
++
++ struct adapter_info *adap_info = pci_get_drvdata(pdev);
++
++ IOH_DEBUG(" invoked function pci_get_drvdata successfully\n");
++
++ for (i = 0; i < IOH_I2C_MAX_CHN; i++) {
++ ioh_i2c_disbl_int(&adap_info->ioh_i2c_data[i]);
++
++ if (i == (IOH_I2C_MAX_CHN - 1)) {
++ free_irq(pdev->irq, (void *)adap_info);
++ IOH_DEBUG(" free_irq invoked successfully\n");
++ }
++
++ i2c_del_adapter(&(adap_info->ioh_i2c_data[i].ioh_i2c_adapter));
++
++ IOH_DEBUG(" invoked i2c_del_adapter successfully\n");
++
++ }
++
++ if (adap_info->ioh_i2c_data[0].ioh_i2c_base_address) {
++ pci_iounmap(pdev,
++ (void *)adap_info->ioh_i2c_data[0].
++ ioh_i2c_base_address);
++ IOH_DEBUG(" pci_iounmap invoked successfully\n");
++ adap_info->ioh_i2c_data[0].ioh_i2c_base_address = 0;
++ }
++
++ pci_set_drvdata(pdev, NULL);
++
++ pci_release_regions(pdev);
++ IOH_DEBUG(" pci_release_regions invoked successfully\n");
++
++ pci_disable_device(pdev);
++ kfree(adap_info);
++ IOH_DEBUG(" pci_disable_device invoked successfully\n");
++ IOH_DEBUG(" ioh_i2c_remove invoked successfully\n");
++}
++
++#ifdef CONFIG_PM
++
++/*! @ingroup I2C_PCILayerAPI
++ @fn ioh_i2c_suspend(struct pci_dev* pdev,pm_message_t state)
++ @remarks The main tasks performed by this method are:
++ - Wait for any transfer in progress to complete.
++ - Disable interrupts.
++ - Save PCI device state.
++ - Disable PM notifications.
++ - Disable the PCI device.
++ - Move the device to D3Hot power state.
++ @note This function is invoked by the kernel when the system is
++ transitioning to low power state.
++ @param pdev [@ref INOUT]
++ contains reference to PCI device descriptor for the peripheral
++ @param state [@ref IN]
++ Represents the low power state the system is transitioning to.
++ @retval int
++ - @ref IOH_I2C_SUCCESS Function returns successfully.
++ - -ENOMEM pci_save_state fails.
++ @see ioh_i2c_pcidriver
++ */
++static int ioh_i2c_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++
++ int i;
++ int ret;
++
++ struct adapter_info *adap_info = pci_get_drvdata(pdev);
++
++ IOH_DEBUG(" invoked function pci_get_drvdata successfully\n");
++
++ adap_info->ioh_i2c_suspended = true;
++
++ for (i = 0; i < IOH_I2C_MAX_CHN; i++) {
++ while ((adap_info->ioh_i2c_data[i].ioh_i2c_xfer_in_progress ==
++ true)) {
++ /* It is assumed that any pending transfer will
++ * be completed after the delay
++ */
++ msleep(1);
++ }
++ /* Disable the i2c interrupts */
++ ioh_i2c_disbl_int(&adap_info->ioh_i2c_data[i]);
++ }
++
++ IOH_DEBUG("I2CSR = %x\n",
++ ioh_i2c_readreg(adap_info->
++ ioh_i2c_data[0].ioh_i2c_base_address, 0x08));
++ IOH_DEBUG("I2CBUFSTA = %x\n",
++ ioh_i2c_readreg(adap_info->
++ ioh_i2c_data[0].ioh_i2c_base_address, 0x30));
++ IOH_DEBUG("I2CESRSTA = %x\n",
++ ioh_i2c_readreg(adap_info->
++ ioh_i2c_data[0].ioh_i2c_base_address, 0x44));
++
++ IOH_DEBUG(" invoked function ioh_i2c_disbl_int successfully\n");
++
++ ret = pci_save_state(pdev);
++
++ if (ret) {
++ IOH_LOG(KERN_ERR, "pci_save_state failed\n");
++ return ret;
++ }
++
++ IOH_DEBUG("Invoked pci_save_state successfully\n");
++
++ pci_enable_wake(pdev, PCI_D3hot, 0);
++ IOH_DEBUG("Invoked pci_enable_wake successfully\n");
++
++ pci_disable_device(pdev);
++ IOH_DEBUG("Invoked pci_disable_device successfully\n");
++
++ pci_set_power_state(pdev, pci_choose_state(pdev, state));
++ IOH_DEBUG("Invoked pci_set_power_state successfully\n");
++ IOH_DEBUG("ioh_i2c_suspend returns %d\n", IOH_I2C_SUCCESS);
++
++ return IOH_I2C_SUCCESS;
++
++}
++
++/*! @ingroup I2C_PCILayerAPI
++ @fn ioh_i2c_resume(struct pci_dev* pdev)
++ @remarks The main tasks performed by this method are:
++ - Move device to D0 power state.
++ - Restore PCI device state.
++ - Enable the PCI device state.
++ - Disable PM notifications.
++ - Initialize IOH I2C device.
++ @note This function is invoked by the kernel when the system is
++ transitioning to normal power state from a lower power state.
++ @param pdev [@ref INOUT]
++ contains reference to PCI device descriptor for the peripheral
++ @retval int
++ - @ref IOH_I2C_SUCCESS Function returns successfully.
++ - -EIO pci_enable_device fails.
++ - -EINVAL pci_enable_device fails.
++ @see ioh_i2c_pcidriver
++ <hr>
++ */
++static int ioh_i2c_resume(struct pci_dev *pdev)
++{
++
++ struct adapter_info *adap_info = pci_get_drvdata(pdev);
++ int i;
++
++ IOH_DEBUG(" invoked function pci_get_drvdata successfully\n");
++
++ pci_set_power_state(pdev, PCI_D0);
++ IOH_DEBUG("Invoked pci_set_power_state successfully\n");
++
++ pci_restore_state(pdev);
++ IOH_DEBUG("Invoked pci_restore_state successfully\n");
++
++ if (pci_enable_device(pdev) < 0) {
++ IOH_LOG(KERN_ERR,
++ "pci_enable_device failed in ioh_i2c_resume\n");
++ return -EIO;
++ }
++
++ pci_enable_wake(pdev, PCI_D3hot, 0);
++
++ IOH_DEBUG("Invoked pci_enable_wake successfully\n");
++
++ for (i = 0; i < IOH_I2C_MAX_CHN; i++)
++ (void)ioh_i2c_init(&adap_info->ioh_i2c_data[i]);
++
++ IOH_DEBUG("Invoked ioh_i2c_init successfully\n");
++
++ adap_info->ioh_i2c_suspended = false;
++
++ IOH_DEBUG("ioh_i2c_resume return %d\n", IOH_I2C_SUCCESS);
++ return IOH_I2C_SUCCESS;
++}
++
++#endif
++
++/*! @ingroup I2C_InterfaceLayerAPI
++ @fn ioh_i2c_pci_init(void)
++ @brief This function implements the module entry point.
++ @remarks This function invoked at module insertion
++ The main task performed by this method:
++ - Register the PCI driver with PCI core
++ using pci_register_driver API.
++ @param None
++ @retval int
++ - 0 Function returns successfully.
++ - -EEXIST pci_register_driver fails
++ - -EINVAL pci_register_driver fails
++ - -ENOMEM pci_register_driver fails
++ <hr>
++ */
++static int __init ioh_i2c_pci_init(void)
++{
++
++ IOH_DEBUG
++ ("ioh_i2c_pci_init : Invoked pci_register_driver successfully\n");
++ return pci_register_driver(&ioh_i2c_pcidriver);
++}
++
++/*! @ingroup I2C_InterfaceLayerAPI
++ @fn ioh_i2c_pci_exit(void)
++ @brief This function implements the module exit point.
++ @remarks This function is invoked when IOH I2C driver module is being
++ removed from the system. The main task performed by this method:
++ - Unregister the PCI driver with PCI core using
++ the pci_unregister_driver API
++ @param None
++ @retval None
++ */
++static void __exit ioh_i2c_pci_exit(void)
++{
++ IOH_DEBUG
++ ("ioh_i2c_pci_exit : Invoked pci_unregister_driver successfully \n ");
++ pci_unregister_driver(&ioh_i2c_pcidriver);
++
++}
++
++MODULE_DESCRIPTION("IOH I2C PCI Driver");
++MODULE_LICENSE("GPL");
++module_init(ioh_i2c_pci_init);
++module_exit(ioh_i2c_pci_exit);
++module_param(ioh_i2c_speed, int, (S_IRUSR | S_IWUSR));
++module_param(ioh_i2c_clk, int, (S_IRUSR | S_IWUSR));
+diff -urN linux-2.6.33.1/drivers/i2c/i2c-dev.c topcliff-2.6.33.1/drivers/i2c/i2c-dev.c
+--- linux-2.6.33.1/drivers/i2c/i2c-dev.c 2010-03-16 01:09:39.000000000 +0900
++++ topcliff-2.6.33.1/drivers/i2c/i2c-dev.c 2010-03-24 11:21:29.000000000 +0900
+@@ -36,7 +36,7 @@
+ #include <linux/i2c-dev.h>
+ #include <linux/jiffies.h>
+ #include <asm/uaccess.h>
+-
++#include "busses/pch_i2c_hal.h"
+ static struct i2c_driver i2cdev_driver;
+
+ /*
+@@ -147,6 +147,11 @@
+ if (tmp==NULL)
+ return -ENOMEM;
+
++ if (copy_from_user(tmp, buf, count)) {
++ kfree(tmp);
++ return -EFAULT;
++ }
++
+ pr_debug("i2c-dev: i2c-%d reading %zu bytes.\n",
+ iminor(file->f_path.dentry->d_inode), count);
+
+@@ -372,6 +377,12 @@
+ struct i2c_client *client = (struct i2c_client *)file->private_data;
+ unsigned long funcs;
+
++ unsigned long ioh_mode;
++ int ret;
++
++ struct i2c_msg msg;
++ unsigned char msgbuf[1];
++
+ dev_dbg(&client->adapter->dev, "ioctl, cmd=0x%02x, arg=0x%02lx\n",
+ cmd, arg);
+
+@@ -427,6 +438,22 @@
+ */
+ client->adapter->timeout = msecs_to_jiffies(arg * 10);
+ break;
++ case I2C_MODE_SEL:
++ ioh_mode = arg;
++
++ if (ioh_mode <= 4) {
++ msgbuf[0] = ioh_mode;
++ msg.buf = msgbuf;
++ msg.len = 1;
++ msg.flags = (ioh_mode <=1) ? \
++ (IOH_BUFFER_MODE_ENABLE) : \
++ (IOH_EEPROM_SW_RST_MODE_ENABLE);
++ ret = i2c_transfer(client->adapter, &msg, 1);
++ } else {
++ printk(KERN_ERR "I2C mode sel:Invalid mode \n");
++ ret = -EINVAL;
++ }
++ return ret;
+ default:
+ /* NOTE: returning a fault code here could cause trouble
+ * in buggy userspace code. Some old kernel bugs returned
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-ieee1588.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-ieee1588.patch
new file mode 100644
index 0000000..ac1bea9
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-ieee1588.patch
@@ -0,0 +1,7945 @@
+
+
+From: Masayuki Ohtake <masa-korg@dsn.okisemi.com>
+Subject: OKI Semiconductor PCH IEEE1588 driver
+
+This driver implements IEEE1588 controls for PCH.
+
+Signed-off-by: Masayuki Ohtake <masa-korg@dsn.okisemi.com>
+Acked-by: Wang Qi <qi.wang@intel.com>
+
+---
+ drivers/char/Kconfig | 7 ++
+ drivers/char/Makefile | 2
+ drivers/char/pch_ieee1588/Makefile | 10
+ drivers/char/pch_ieee1588/pch_1588_hal.c | 4040
+ drivers/char/pch_ieee1588/pch_1588_hal.h | 885
+ drivers/char/pch_ieee1588/pch_1588_main.c | 1192
+ drivers/char/pch_ieee1588/pch_1588_main.h | 702
+ drivers/char/pch_ieee1588/pch_1588_pci.c | 700
+ drivers/char/pch_ieee1588/pch_1588_pci.h | 122
+ drivers/char/pch_ieee1588/pch_common.h | 146
+ drivers/char/pch_ieee1588/pch_debug.h | 60
++++++++++++++++++++++++++++++++ 11 files changed, zz insertions(+)
+diff -urN linux-2.6.33-rc3/drivers/char/Kconfig topcliff-2.6.33-rc3/drivers/char/Kconfig
+--- linux-2.6.33-rc3/drivers/char/Kconfig 2010-01-06 09:02:46.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/Kconfig 2010-03-09 10:14:52.000000000 +0900
+@@ -4,6 +4,13 @@
+
+ menu "Character devices"
+
++config PCH_IEEE1588
++ tristate "PCH IEEE1588"
++ depends on PCI
++ help
++ If you say yes to this option, support will be included for the
++ PCH IEEE1588 Host controller.
++
+ config VT
+ bool "Virtual terminal" if EMBEDDED
+ depends on !S390
+diff -urN linux-2.6.33-rc3/drivers/char/Makefile topcliff-2.6.33-rc3/drivers/char/Makefile
+--- linux-2.6.33-rc3/drivers/char/Makefile 2010-01-06 09:02:46.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/Makefile 2010-03-05 22:57:39.000000000 +0900
+@@ -111,6 +111,8 @@
+ obj-$(CONFIG_JS_RTC) += js-rtc.o
+ js-rtc-y = rtc.o
+
++obj-$(CONFIG_PCH_IEEE1588) += pch_ieee1588/
++
+ # Files generated that shall be removed upon make clean
+ clean-files := consolemap_deftbl.c defkeymap.c
+
+diff -urN linux-2.6.33-rc3/drivers/char/pch_ieee1588/Makefile topcliff-2.6.33-rc3/drivers/char/pch_ieee1588/Makefile
+--- linux-2.6.33-rc3/drivers/char/pch_ieee1588/Makefile 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/pch_ieee1588/Makefile 2010-03-10 04:52:54.000000000 +0900
+@@ -0,0 +1,10 @@
++ifeq ($(CONFIG_IEEE1588_DEBUG_CORE),y)
++EXTRA_CFLAGS += -DDEBUG
++endif
++
++obj-$(CONFIG_PCH_IEEE1588) += pch_ieee1588.o
++
++#for A0_A1_SAMPLE LSI board
++EXTRA_CFLAGS+=-DIOH_IEEE1588_A0_A1_SAMPLE_BUG
++
++pch_ieee1588-objs := pch_1588_main.o pch_1588_pci.o pch_1588_hal.o
+diff -urN linux-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_hal.c topcliff-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_hal.c
+--- linux-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_hal.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_hal.c 2010-03-09 10:34:22.000000000 +0900
+@@ -0,0 +1,4040 @@
++ /*!
++ * @file ioh_1588_hal.c
++ * @brief
++ * This file has the definitions for HALLAyer APIs.
++ * @version 0.92
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * modified to support Intel IOH GE IEEE 1588 hardware
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ * derived from
++ * IEEE 1588 Time Synchronization Driver for Intel EP80579
++ * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ */
++
++#include <linux/io.h>
++#include "pch_common.h"
++#include "pch_debug.h"
++#include <linux/module.h>
++#include "pch_1588_hal.h"
++
++/*
++ * HAL API definitions for the IEEE 1588 module
++ */
++
++/*! @ingroup IEEE1588
++ * @def INLINE
++ * @brief The macro used instead of the keyword __inline.
++*/
++#define INLINE inline
++
++/*Register read/write macros*/
++#define IOH_REG_32_READ(regAddr, varRef) (*(varRef) = IOH_READ32(regAddr))
++#define IOH_REG_32_WRITE(regAddr, varValue) IOH_WRITE32(varValue, regAddr)
++#define IOH_BIT_SET_CHECK(regAddr, bitMask) \
++ ((IOH_READ32(regAddr) & (bitMask)) == (bitMask))
++
++#ifdef IOH_IEEE1588_A1_SAMPLE_BUG
++/*global variable to store tick rate*/
++static unsigned long gTickRateApp;
++#endif
++
++/* function prototypes */
++/* TS_Control register access routines */
++static INLINE void ioh_1588_pps_imask_set(void);
++static INLINE void ioh_1588_amms_imask_set(void);
++static INLINE void ioh_1588_asms_imask_set(void);
++static INLINE void ioh_1588_ttm_imask_set(void);
++static INLINE void ioh_1588_pps_imask_clear(void);
++static INLINE void ioh_1588_amms_imask_clear(void);
++static INLINE void ioh_1588_asms_imask_clear(void);
++static INLINE void ioh_1588_ttm_imask_clear(void);
++static INLINE unsigned long ioh_1588_pps_imask_get(void);
++static INLINE unsigned long ioh_1588_amms_imask_get(void);
++static INLINE unsigned long ioh_1588_asms_imask_get(void);
++static INLINE unsigned long ioh_1588_ttm_imask_get(void);
++static INLINE void ioh_1588_block_reset(void);
++
++/* TS_Event register access routines */
++static INLINE unsigned long ioh_1588_pps_evt_get(void);
++static INLINE unsigned long ioh_1588_amms_evt_get(void);
++static INLINE unsigned long ioh_1588_asms_evt_get(void);
++static INLINE unsigned long ioh_1588_ttm_evt_get(void);
++static INLINE void ioh_1588_pps_evt_clear(void);
++static INLINE void ioh_1588_amms_evt_clear(void);
++static INLINE void ioh_1588_asms_evt_clear(void);
++static INLINE void ioh_1588_ttm_evt_clear(void);
++
++/* TS_Addend register access routines - Frequency Scaling Value */
++static INLINE void ioh_1588_addend_set(unsigned long fsv);
++static INLINE void ioh_1588_addend_get(unsigned long *fsv);
++
++/* TS_PPS_Compare register access routines */
++static INLINE void ioh_1588_pps_set(unsigned long fsv);
++static INLINE void ioh_1588_pps_get(unsigned long *fsv);
++/* TS_SYSTimeLo, Hi registers access routines */
++static INLINE void ioh_1588_sys_snap_set(unsigned long sys_time_low,
++ unsigned long sys_time_high);
++static INLINE void ioh_1588_sys_snap_get(unsigned long *sys_time_low,
++ unsigned long *sys_time_high);
++/* TS_TrgtTimeLo, Hi registers access routines */
++static INLINE void ioh_1588_tgt_snap_set(unsigned long tgt_time_low,
++ unsigned long tgt_time_high);
++static INLINE void ioh_1588_tgt_snap_get(unsigned long *tgt_time_low,
++ unsigned long *tgt_time_high);
++/* TS_ASMSLo, Hi registers access routines */
++static INLINE void ioh_1588_aux_slave_snap_get(unsigned long *asms_low,
++ unsigned long *asms_high);
++/* TS_AMMSLo, Hi registers access routines */
++static INLINE void ioh_1588_aux_master_snap_get(unsigned long *amms_low,
++ unsigned long *amms_high);
++
++/* TS_Ch_Control register access routines */
++static INLINE void ioh_1588_master_mode_set(unsigned long master_mode);
++static INLINE unsigned long ioh_1588_master_mode_get(void);
++static INLINE void ioh_1588_timestamp_all_set(unsigned long allMsg);
++static INLINE unsigned long ioh_1588_timestamp_all_get(void);
++static INLINE void ioh_1588_op_mode_set(unsigned long mode);
++static INLINE unsigned long ioh_1588_op_mode_get(void);
++static INLINE void ioh_1588_version_set(unsigned long versionVal);
++static INLINE unsigned long ioh_1588_version_get(void);
++
++/* TS_Ch_Event register access routines */
++static INLINE unsigned long ioh_1588_rx_snap_evt(void);
++static INLINE unsigned long ioh_1588_tx_snap_evt(void);
++static INLINE void ioh_1588_rx_snap_evt_clear(void);
++static INLINE void ioh_1588_tx_snap_evt_clear(void);
++/* TS_TxSnapLo, Hi registers access routines */
++static INLINE void ioh_1588_tx_snap_get(unsigned long *txs_low,
++ unsigned long *txs_high);
++/* TS_RxSnapLo, Hi registers access routines */
++static INLINE void ioh_1588_rx_snap_get(unsigned long *rxs_low,
++ unsigned long *rxs_high);
++/* TS_srcUUIDLo, Hi registers access routines */
++static INLINE void ioh_1588_uuid_seqid_get(unsigned long *uuid_low,
++ unsigned long *uuid_high,
++ unsigned int *seq_id);
++
++static INLINE unsigned long ioh_1588_can_snap_valid(void);
++static INLINE unsigned long ioh_1588_can_snap_ovr(void);
++static INLINE void ioh_1588_can_snap_valid_clear(void);
++static INLINE void ioh_1588_can_snap_ovr_clear(void);
++static INLINE void ioh_1588_can_snap_get(unsigned long *rxs_low,
++ unsigned long *rxs_high);
++
++static INLINE void ioh_1588_eth_enable_set(void);
++static INLINE void ioh_1588_eth_enable_clear(void);
++static INLINE unsigned long ioh_1588_eth_enable_get(void);
++static INLINE void ioh_1588_can_enable_set(void);
++static INLINE void ioh_1588_can_enable_clear(void);
++static INLINE unsigned long ioh_1588_can_enable_get(void);
++static INLINE void ioh_1588_station_set(unsigned long station,
++ unsigned long value);
++static INLINE void ioh_1588_station_get(unsigned long station,
++ unsigned long *value);
++
++/* Masks to extract High and Low SHORTs from unsigned long values */
++#define IOH_1588_MSB_SHORT_MASK (0xFFFF0000)
++#define IOH_1588_LSB_SHORT_MASK (0x0000FFFF)
++
++/* Location of SeqID in the register */
++#define IOH_1588_SID_LOC (16)
++
++/* Variable declarations */
++
++/**
++ * Client registered callback routines for
++ * a) the target time reached or exceeded interrupt notification
++ * b) the auxiliary time stamps availability interrupt notification
++ * c) the pulse per second match interrupt notification
++ */
++static ioh1588TargetTimeCallback ioh_tt_cbptr =
++ (ioh1588TargetTimeCallback) NULL;
++static ioh1588AuxTimeCallback ioh_am_cbptr = (ioh1588AuxTimeCallback) NULL;
++static ioh1588AuxTimeCallback ioh_as_cbptr = (ioh1588AuxTimeCallback) NULL;
++static ioh1588PulsePerSecondCallback ioh_pps_cbptr =
++ (ioh1588PulsePerSecondCallback) NULL;
++
++/* The transmit and receive timestamp statistics */
++static struct ioh1588Stats ioh_1588_stats = { 0, 0 };
++
++/* To save the state of all registers of the module */
++static struct ioh_1588_regs_set ioh_1588_regs;
++
++/* IO mapped virtual address for the 1588 registers */
++static unsigned long ioh_1588_base;
++
++/* local functions definitions. */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_pps_imask_set(void)
++ * @brief Enable PPS Interrupt
++ * @param None
++ * @retval None
++*/
++
++static INLINE void ioh_1588_pps_imask_set(void)
++{
++ /* SET the ppsm bit */
++ IOH_SET_ADDR_BIT(ioh_1588_base + IOH_1588_TSC_OFFSET,
++ IOH_1588_TSC_PPSM_MASK);
++}
++
++/** @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_amms_imask_set(void)
++ * @brief Enable Auxiliary Master Mode Snapshot Interrupt
++ * @param None
++ * @retval None
++*/
++static INLINE void ioh_1588_amms_imask_set(void)
++{
++ /* SET the amms bit */
++ IOH_SET_ADDR_BIT(ioh_1588_base + IOH_1588_TSC_OFFSET,
++ IOH_1588_TSC_AMMS_MASK);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_asms_imask_set(void)
++ * @brief Enable Auxiliary Slave Mode Snapshot Interrupt
++ * @param None
++ * @retval None
++*/
++static INLINE void ioh_1588_asms_imask_set(void)
++{
++ /* SET the asms bit */
++ IOH_SET_ADDR_BIT(ioh_1588_base + IOH_1588_TSC_OFFSET,
++ IOH_1588_TSC_ASMS_MASK);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_ttm_imask_set(void)
++ * @brief Enable Target Time Interrupt
++ * @param None
++ * @retval None
++*/
++static INLINE void ioh_1588_ttm_imask_set(void)
++{
++ /* SET the ttm bit */
++ IOH_SET_ADDR_BIT(ioh_1588_base + IOH_1588_TSC_OFFSET,
++ IOH_1588_TSC_TTM_MASK);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE unsigned long ioh_1588_pps_imask_get(void)
++ * @brief Get PPS Interrupt Mask value
++ * @param None
++ * @retval unsigned long
++*/
++static INLINE unsigned long ioh_1588_pps_imask_get(void)
++{
++ /* Is the ppsm bit SET? */
++ return IOH_BIT_SET_CHECK(ioh_1588_base + IOH_1588_TSC_OFFSET,
++ IOH_1588_TSC_PPSM_MASK);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE unsigned long ioh_1588_amms_imask_get(void)
++ * @brief Get Auxiliary Master Mode Snapshot Interrupt Mask value
++ * @param None
++ * @retval unsigned long
++*/
++static INLINE unsigned long ioh_1588_amms_imask_get(void)
++{
++ /* Is the amms bit SET? */
++ return IOH_BIT_SET_CHECK(ioh_1588_base + IOH_1588_TSC_OFFSET,
++ IOH_1588_TSC_AMMS_MASK);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ @fn static INLINE unsigned long ioh_1588_asms_imask_get(void)
++ @brief Get Auxiliary Slave Mode Snapshot Interrupt Mask value
++ @param None
++ @retval unsigned long
++*/
++static INLINE unsigned long ioh_1588_asms_imask_get(void)
++{
++ /* Is the asms bit SET? */
++ return IOH_BIT_SET_CHECK(ioh_1588_base + IOH_1588_TSC_OFFSET,
++ IOH_1588_TSC_ASMS_MASK);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE unsigned long ioh_1588_ttm_imask_get(void)
++ * @brief Get Target Time Interrupt Mask value
++ * @param None.
++ * @retval unsigned long
++*/
++static INLINE unsigned long ioh_1588_ttm_imask_get(void)
++{
++ /* Is the ttm bit SET? */
++ return IOH_BIT_SET_CHECK(ioh_1588_base + IOH_1588_TSC_OFFSET,
++ IOH_1588_TSC_TTM_MASK);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ @fn static INLINE void ioh_1588_pps_imask_clear(void)
++ @brief Disable PPS Interrupt
++ @param None
++ @retval None
++*/
++static INLINE void ioh_1588_pps_imask_clear(void)
++{
++ /* CLEAR the ppsm bit */
++ IOH_CLR_ADDR_BIT(ioh_1588_base + IOH_1588_TSC_OFFSET,
++ IOH_1588_TSC_PPSM_MASK);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_amms_imask_clear(void)
++ * @brief Disable Auxiliary Master Mode Snapshot Interrupt.
++ * @param None.
++ * @retval None
++*/
++static INLINE void ioh_1588_amms_imask_clear(void)
++{
++ /* CLEAR the amms bit */
++ IOH_CLR_ADDR_BIT(ioh_1588_base + IOH_1588_TSC_OFFSET,
++ IOH_1588_TSC_AMMS_MASK);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_asms_imask_clear(void)
++ * @brief Disable Auxiliary Slave Mode Snapshot Interrupt
++ * @param None
++ * @retval None
++*/
++static INLINE void ioh_1588_asms_imask_clear(void)
++{
++ /* CLEAR the asms bit */
++ IOH_CLR_ADDR_BIT(ioh_1588_base + IOH_1588_TSC_OFFSET,
++ IOH_1588_TSC_ASMS_MASK);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_ttm_imask_clear(void)
++ * @brief Disable Target Time Interrupt
++ * @param None
++ * @retval None
++*/
++static INLINE void ioh_1588_ttm_imask_clear(void)
++{
++ /* CLEAR the ttm bit */
++ IOH_CLR_ADDR_BIT(ioh_1588_base + IOH_1588_TSC_OFFSET,
++ IOH_1588_TSC_TTM_MASK);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_block_reset(void)
++ * @brief Reset Hardware Assist block
++ * @param None
++ * @retval None
++*/
++static INLINE void ioh_1588_block_reset(void)
++{
++ /* SET the rst bit */
++ IOH_SET_ADDR_BIT(ioh_1588_base + IOH_1588_TSC_OFFSET,
++ IOH_1588_TSC_RESET);
++ /* CLEAR the rst bit */
++ IOH_CLR_ADDR_BIT(ioh_1588_base + IOH_1588_TSC_OFFSET,
++ IOH_1588_TSC_RESET);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE unsigned long ioh_1588_pps_evt_get(void)
++ * @brief Poll for PPS event
++ * @param None
++ * @retval unsigned long
++*/
++static INLINE unsigned long ioh_1588_pps_evt_get(void)
++{
++ /* Check for PPS event */
++ return IOH_BIT_SET_CHECK(ioh_1588_base + IOH_1588_TSE_OFFSET,
++ IOH_1588_TSE_PPS);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE unsigned long ioh_1588_amms_evt_get(void)
++ * @brief Poll for Auxiliary Master Mode Snapshot Captured event
++ * @param None
++ * @retval unsigned long
++*/
++static INLINE unsigned long ioh_1588_amms_evt_get(void)
++{
++ /* Check for AMMS event */
++ return IOH_BIT_SET_CHECK(ioh_1588_base + IOH_1588_TSE_OFFSET,
++ IOH_1588_TSE_SNM);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE unsigned long ioh_1588_asms_evt_get(void)
++ * @brief Poll for Auxiliary Slave Mode Snapshot Captured event
++ * @param None
++ * @retval unsigned long
++*/
++static INLINE unsigned long ioh_1588_asms_evt_get(void)
++{
++ /* Check ASMS event */
++ return IOH_BIT_SET_CHECK(ioh_1588_base + IOH_1588_TSE_OFFSET,
++ IOH_1588_TSE_SNS);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE unsigned long ioh_1588_ttm_evt_get(void)
++ * @brief Poll for Target Time Reached event
++ * @param None
++ * @retval unsigned long
++*/
++static INLINE unsigned long ioh_1588_ttm_evt_get(void)
++{
++ /* Check target time pending event */
++ return IOH_BIT_SET_CHECK(ioh_1588_base + IOH_1588_TSE_OFFSET,
++ IOH_1588_TSE_TTIPEND);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_pps_evt_clear(void)
++ * @brief Clear PPS event
++ * @param None
++ * @retval None
++*/
++static INLINE void ioh_1588_pps_evt_clear(void)
++{
++ /* clear the pps bit */
++ IOH_SET_ADDR_BIT(ioh_1588_base + IOH_1588_TSE_OFFSET, IOH_1588_TSE_PPS);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_amms_evt_clear(void)
++ * @brief Clear Auxiliary Master Mode Snapshot Captured event
++ * @param None.
++ * @retval None.
++*/
++static INLINE void ioh_1588_amms_evt_clear(void)
++{
++ /* clear the snm bit */
++ IOH_SET_ADDR_BIT(ioh_1588_base + IOH_1588_TSE_OFFSET, IOH_1588_TSE_SNM);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_asms_evt_clear(void)
++ * @brief Clear Auxiliary Slave Mode Snapshot Captured event
++ * @param None
++ * @retval None
++*/
++static INLINE void ioh_1588_asms_evt_clear(void)
++{
++ /* clear the sns bit */
++ IOH_SET_ADDR_BIT(ioh_1588_base + IOH_1588_TSE_OFFSET, IOH_1588_TSE_SNS);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_ttm_evt_clear(void)
++ * @brief Clear Target Time Reached event
++ * @param None
++ * @retval None
++*/
++static INLINE void ioh_1588_ttm_evt_clear(void)
++{
++ /* CLEAR the ttipend bit */
++ IOH_SET_ADDR_BIT(ioh_1588_base + IOH_1588_TSE_OFFSET,
++ IOH_1588_TSE_TTIPEND);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_sys_snap_set(
++ * unsigned long sys_time_low, unsigned long sys_time_high)
++ * @brief Set System Time value
++ * @param sys_time_low [IN] The system time low.
++ * @param sys_time_high {In} The system time high.
++ * @retval None
++*/
++static INLINE void
++ioh_1588_sys_snap_set(unsigned long sys_time_low, unsigned long sys_time_high)
++{
++ /* Update the System Time Low Register contents */
++ IOH_REG_32_WRITE(ioh_1588_base + IOH_1588_STL_OFFSET, sys_time_low);
++
++ /* Update the System Time High Register contents */
++ IOH_REG_32_WRITE(ioh_1588_base + IOH_1588_STH_OFFSET, sys_time_high);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE voidioh_1588_sys_snap_get(
++ * unsigned long *sys_time_low, unsigned long *sys_time_high)
++ * @brief Get System Time Low value
++ * @param sys_time_low [OUT] The system time low.
++ * @param sys_time_high [OUT] The system time high.
++ * @retval None
++*/
++static INLINE void
++ioh_1588_sys_snap_get(unsigned long *sys_time_low, unsigned long *sys_time_high)
++{
++ /* Get the System Time Low Register contents */
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_STL_OFFSET, sys_time_low);
++
++ /* Get the System Time High Register contents */
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_STH_OFFSET, sys_time_high);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_tgt_snap_set (
++ * unsigned long tgt_time_low, unsigned long tgt_time_high)
++ * @brief Set Target Time value
++ * @param tgt_time_low [IN] The target time low.
++ * @param tgt_time_high [IN] The target time high.
++ * @retval None.
++*/
++static INLINE void
++ioh_1588_tgt_snap_set(unsigned long tgt_time_low, unsigned long tgt_time_high)
++{
++ /* Update the Target Time Low Register contents */
++ IOH_REG_32_WRITE(ioh_1588_base + IOH_1588_TTL_OFFSET, tgt_time_low);
++
++ /* Update the Target Time High Register contents */
++ IOH_REG_32_WRITE(ioh_1588_base + IOH_1588_TTH_OFFSET, tgt_time_high);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_tgt_snap_get(
++ * unsigned long *tgt_time_low, unsigned long *tgt_time_high)
++ * @brief Get Target Time value
++ * @param tgt_time_low [OUT] The target time low.
++ * @param tgt_time_high [IN] The target time high.
++ * @retval None
++*/
++static INLINE void
++ioh_1588_tgt_snap_get(unsigned long *tgt_time_low, unsigned long *tgt_time_high)
++{
++ /* Get the Target Time Low Register contents */
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_TTL_OFFSET, tgt_time_low);
++
++ /* Get the Target Time High Register contents */
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_TTH_OFFSET, tgt_time_high);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_addend_set(unsigned long fsv)
++ * @brief Set Frequency Scaling Value
++ * @param fsv [IN] Frequency
++ * @retval None
++*/
++static INLINE void ioh_1588_addend_set(unsigned long fsv)
++{
++ /* Update the Addend Register contents */
++ IOH_REG_32_WRITE(ioh_1588_base + IOH_1588_ADD_OFFSET, fsv);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_addend_get(unsigned long *fsv)
++ * @brief Get Frequency Scaling Value
++ * @param fsv [OUT] The frequency.
++ * @retval None
++*/
++static INLINE void ioh_1588_addend_get(unsigned long *fsv)
++{
++ /* Get the Addend Register contents */
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_ADD_OFFSET, fsv);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_pps_set(unsigned long pps)
++ * @brief Set Pulse Per Second Value
++ * @param pps [IN] The pulse per second value.
++ * @retval None.
++*/
++static INLINE void ioh_1588_pps_set(unsigned long pps)
++{
++ /* Update the PPS Compare Register contents */
++ IOH_REG_32_WRITE(ioh_1588_base + IOH_1588_PPS_OFFSET, pps);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_pps_get(unsigned long *pps)
++ * @brief Get Pulse Per Second Value
++ * @param pps [OUT] The pulse per second value.
++ * @retval None.
++*/
++static INLINE void ioh_1588_pps_get(unsigned long *pps)
++{
++ /* Get the PPS Compare Register contents */
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_PPS_OFFSET, pps);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_aux_master_snap_get(
++ * unsigned long *amms_low, unsigned long *amms_high)
++ * @brief Get AMMS value
++ * @param amms_low [OUT] AMMS low value.
++ * @param amms_high [OUT] AMMS high value.
++ * @retval None.
++*/
++static INLINE void
++ioh_1588_aux_master_snap_get(unsigned long *amms_low, unsigned long *amms_high)
++{
++ /* Get the Auxiliary Master Mode Snapshot Low Register contents */
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_AMSL_OFFSET, amms_low);
++
++ /* Get the Auxiliary Master Mode Snapshot High Register contents */
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_AMSH_OFFSET, amms_high);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_aux_slave_snap_get(
++ * unsigned long *asms_low, unsigned long *asms_high)
++ * @brief Get ASMS value
++ * @param asms_low [OUT] The ASMS low value.
++ * @param asms_high [OUT] The ASMS high value.
++ * @retval None
++*/
++static INLINE void
++ioh_1588_aux_slave_snap_get(unsigned long *asms_low, unsigned long *asms_high)
++{
++ /* Get the Auxiliary Slave Mode Snapshot Low Register contents */
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_ASSL_OFFSET, asms_low);
++
++ /* Get the Auxiliary Slave Mode Snapshot High Register contents */
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_ASSH_OFFSET, asms_high);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_master_mode_set(
++ * unsigned long master_mode)
++ * @brief Set the channel mode to 1588 Master/Slave
++ * @param master_mode [IN] The channel mode.
++ * @retval None
++*/
++static INLINE void ioh_1588_master_mode_set(unsigned long master_mode)
++{
++ /* SET or CLEAR the Master Mode */
++ if (TRUE == master_mode) {
++ IOH_DEBUG("ioh_1588_master_mode_set: setting master mode\n");
++ /* SET the mm bit */
++ IOH_SET_ADDR_BIT(ioh_1588_base + IOH_1588_CC_OFFSET,
++ IOH_1588_CC_MM);
++ } else {
++ IOH_DEBUG("ioh_1588_master_mode_set: clearing master mode\n");
++ /* CLEAR the mm bit */
++ IOH_CLR_ADDR_BIT(ioh_1588_base + IOH_1588_CC_OFFSET,
++ IOH_1588_CC_MM);
++ }
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE unsigned long ioh_1588_master_mode_get (void)
++ * @brief Check for 1588 master mode of channel
++ * @param None.
++ * @retval unsigned long
++*/
++static INLINE unsigned long ioh_1588_master_mode_get(void)
++{
++ /* Is the mm bit SET? */
++ return IOH_BIT_SET_CHECK(ioh_1588_base + IOH_1588_CC_OFFSET,
++ IOH_1588_CC_MM);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_timestamp_all_set(
++ * unsigned long allMsg)
++ * @brief Set Timestamp all or only PTP messages flag
++ * @param allMsg [IN] All/PTP messages.
++ * @retval None
++*/
++static INLINE void ioh_1588_timestamp_all_set(unsigned long allMsg)
++{
++ /* SET or CLEAR the All Message Timestamping */
++ if (TRUE == allMsg) {
++ IOH_DEBUG
++ ("ioh_1588_timestamp_all_set: time stamp all messages\n");
++ /* SET the ta bit */
++ IOH_SET_ADDR_BIT(ioh_1588_base + IOH_1588_CC_OFFSET,
++ IOH_1588_CC_TA);
++ } else { /* else of if (TRUE == allMsg) */
++
++ IOH_DEBUG
++ ("ioh_1588_timestamp_all_set: time stamp PTP messages \
++ only\n");
++ /* CLEAR the ta bit */
++ IOH_CLR_ADDR_BIT(ioh_1588_base + IOH_1588_CC_OFFSET,
++ IOH_1588_CC_TA);
++ }
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE unsigned long ioh_1588_timestamp_all_get(void)
++ * @brief Check for Timestamp all OR only PTP messages flag
++ * @param None.
++ * @retval unsigned long
++*/
++static INLINE unsigned long ioh_1588_timestamp_all_get(void)
++{
++ /* Is the ta bit SET? */
++ return IOH_BIT_SET_CHECK(ioh_1588_base + IOH_1588_CC_OFFSET,
++ IOH_1588_CC_TA);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_version_set(
++ * unsigned long versionVal)
++ * @brief Set the 1588 version number
++ * @param versionVal [IN] The version value.
++ * @retval None.
++*/
++static INLINE void ioh_1588_version_set(unsigned long versionVal)
++{
++ if (TRUE == versionVal) {
++ IOH_DEBUG
++ ("ioh_1588_version_set supports IEEE1588 v1 and \
++ IEEE1588-2008\n");
++ /* SET the version bit */
++ IOH_SET_ADDR_BIT(ioh_1588_base + IOH_1588_CC_OFFSET,
++ IOH_1588_CC_VERSION);
++ } else {
++ IOH_DEBUG("ioh_1588_version_set supports IEEE1588 v1 only\n");
++ /* CLEAR the version bit */
++ IOH_CLR_ADDR_BIT(ioh_1588_base + IOH_1588_CC_OFFSET,
++ IOH_1588_CC_VERSION);
++ }
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE unsigned long ioh_1588_version_get (void)
++ * @brief Get the 1588 version number
++ * @param None.
++ * @retval unsigned long
++*/
++static INLINE unsigned long ioh_1588_version_get(void)
++{
++ /* Is the version bit SET? */
++ return IOH_BIT_SET_CHECK(ioh_1588_base + IOH_1588_CC_OFFSET,
++ IOH_1588_CC_VERSION);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE unsigned long ioh_1588_can_snap_valid (void)
++ * @brief CAN Timestamp available
++ * @param None
++ * @retval unsigned long
++*/
++static INLINE unsigned long ioh_1588_can_snap_valid(void)
++{
++ /* Is the valid bit SET? */
++ return IOH_BIT_SET_CHECK(ioh_1588_base + IOH_1588_CCE_OFFSET,
++ IOH_1588_CE_VAL);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE unsigned long ioh_1588_can_snap_ovr(void)
++ * @brief CAN Timestamp overrun
++ * @param None
++ * @retval unsigned long
++*/
++static INLINE unsigned long ioh_1588_can_snap_ovr(void)
++{
++ /* Is the ovr bit SET? */
++ return IOH_BIT_SET_CHECK(ioh_1588_base + IOH_1588_CCE_OFFSET,
++ IOH_1588_CE_OVR);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_can_snap_valid_clear(void)
++ * @brief Clear CAN Timestamp valid flag
++ * @param None
++ * @retval None
++*/
++static INLINE void ioh_1588_can_snap_valid_clear(void)
++{
++ /* CLEAR the valid bit by writing '1' onto it */
++ IOH_REG_32_WRITE(ioh_1588_base + IOH_1588_CCE_OFFSET, IOH_1588_CE_VAL);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_can_snap_ovr_clear(void)
++ * @brief Clear CAN Timestamp overrun flag
++ * @param None
++ * @retval None
++*/
++static INLINE void ioh_1588_can_snap_ovr_clear(void)
++{
++ /* CLEAR the overrun bit */
++ IOH_REG_32_WRITE(ioh_1588_base + IOH_1588_CCE_OFFSET, IOH_1588_CE_OVR);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE unsigned long ioh_1588_rx_snap_evt(void)
++ * @brief Receive Timestamp available
++ * @param None
++ * @retval unsigned long
++*/
++static INLINE unsigned long ioh_1588_rx_snap_evt(void)
++{
++ /* Is the rxs bit SET? */
++ return IOH_BIT_SET_CHECK(ioh_1588_base + IOH_1588_CE_OFFSET,
++ IOH_1588_CE_RXS);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn
++ * @brief Transmit Timestamp available
++ * @param None
++ * @retval unsigned long
++*/
++static INLINE unsigned long ioh_1588_tx_snap_evt(void)
++{
++ /* Is the txs bit SET? */
++ return IOH_BIT_SET_CHECK(ioh_1588_base + IOH_1588_CE_OFFSET,
++ IOH_1588_CE_TXS);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_rx_snap_evt_clear(void)
++ * @brief Clear Receive Timestamp available event
++ * @param None.
++ * @retval None.*/
++static INLINE void ioh_1588_rx_snap_evt_clear(void)
++{
++ /* CLEAR the rxs bit */
++ IOH_REG_32_WRITE(ioh_1588_base + IOH_1588_CE_OFFSET, IOH_1588_CE_RXS);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_tx_snap_evt_clear(void)
++ * @brief Clear Transmit Timestamp available event
++ * @param None
++ * @retval None
++*/
++static INLINE void ioh_1588_tx_snap_evt_clear(void)
++{
++ unsigned long ev_reg;
++
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_CE_OFFSET, &ev_reg);
++ IOH_DEBUG
++ ("ioh_1588_tx_snap_evt_clear event reg content before clearing= %lx\n",
++ ev_reg);
++
++ /* CLEAR the txs bit */
++ IOH_REG_32_WRITE(ioh_1588_base + IOH_1588_CE_OFFSET, IOH_1588_CE_TXS);
++
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_CE_OFFSET, &ev_reg);
++ IOH_DEBUG
++ ("ioh_1588_tx_snap_evt_clear event reg content after clearing= %lx\n",
++ ev_reg);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_can_snap_get(
++ * unsigned long *rxs_low,unsigned long *rxs_high)
++ * @brief Get PTP CAN Port Timestamp value
++ * @param rxs_low [OUT] The CAN Rx low value.
++ * @param rxs_high [OUT] The CAN Rx high value.
++ * @retval None.
++*/
++static INLINE void
++ioh_1588_can_snap_get(unsigned long *rxs_low, unsigned long *rxs_high)
++{
++ /* Get the Receive Timestamp/Snapshot Low Register contents */
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_CXSL_OFFSET, rxs_low);
++
++ /* Get the Receive Timestamp/Snapshot High Register contents */
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_CXSH_OFFSET, rxs_high);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_rx_snap_get(
++ * unsigned long *rxs_low, unsigned long *rxs_high)
++ * @brief Get PTP Port Rx Timestamp value
++ * @param rxs_low [OUT] The Snap Rx value.
++ * @param rxs_high [OUT] The Snap Rx Value.
++ * @reatval None
++*/
++static INLINE void
++ioh_1588_rx_snap_get(unsigned long *rxs_low, unsigned long *rxs_high)
++{
++ /* Get the Receive Timestamp/Snapshot Low Register contents */
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_RSL_OFFSET, rxs_low);
++
++ /* Get the Receive Timestamp/Snapshot High Register contents */
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_RSH_OFFSET, rxs_high);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_tx_snap_get(
++ * unsigned long *txs_low, unsigned long *txs_high)
++ * @brief Get PTP Port Tx Timestamp value
++ * @param txs_low [OUT] The Port Tx value low.
++ * @param txs_high [OUT] The Port Tx value high.
++ * @retval None
++*/
++static INLINE void
++ioh_1588_tx_snap_get(unsigned long *txs_low, unsigned long *txs_high)
++{
++ /* Get the Transmit Timestamp/Snapshot Low Register contents */
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_XSL_OFFSET, txs_low);
++
++ /* Get the Transmit Timestamp/Snapshot High Register contents */
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_XSH_OFFSET, txs_high);
++
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_uuid_seqid_get(
++ * unsigned long *uuid_low,unsigned long *uuid_high, unsigned int *seq_id)
++ * @brief Get UUID High (16-bit value) & Sequence ID (16-bit value) of
++ * PTP message
++ * @param uuid_low [OUT] The UUID low value.
++ * @param seq_id [OUT] The sequence ID.
++ * @retval None.
++*/
++static INLINE void
++ioh_1588_uuid_seqid_get(unsigned long *uuid_low,
++ unsigned long *uuid_high, unsigned int *seq_id)
++{
++ unsigned long regval = 0;
++
++ /* Get the UUID Low Register contents */
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_UID_OFFSET, uuid_low);
++
++ /* Get the Sequence ID and Source UUID High Register contents */
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_SID_OFFSET, &regval);
++
++ *seq_id = (regval >> IOH_1588_SID_LOC);
++ *uuid_high = (regval & IOH_1588_LSB_SHORT_MASK);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_op_mode_set(unsigned long mode)
++ * @brief Sets the operation mode.
++ * @param mode [IN] The mode value.
++ * @retval None.
++*/
++static INLINE void ioh_1588_op_mode_set(unsigned long mode)
++{
++ unsigned long regval;
++
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_CC_OFFSET, &regval);
++ regval =
++ (regval & ~IOH_1588_CC_MODE_MASK) | (mode <<
++ IOH_1588_CC_MODE_SHIFT);
++ /* set the operaion mode bits */
++ IOH_REG_32_WRITE(ioh_1588_base + IOH_1588_CC_OFFSET, regval);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE unsigned long ioh_1588_op_mode_get(void)
++ * @brief Gets the operation mode.
++ * @param None.
++ * @retval unsigned long
++*/
++static INLINE unsigned long ioh_1588_op_mode_get(void)
++{
++ unsigned long regval;
++ unsigned long mode;
++
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_CC_OFFSET, &regval);
++ /* get the operaion mode bits */
++ mode = (regval & IOH_1588_CC_MODE_MASK) >> IOH_1588_CC_MODE_SHIFT;
++
++ return mode;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_eth_enable_set(void)
++ * @brief Enbales the eth.
++ * @param None.
++ * @retval None.
++*/
++static INLINE void ioh_1588_eth_enable_set(void)
++{
++ /* SET the eth_enable bit */
++ IOH_SET_ADDR_BIT(ioh_1588_base + IOH_1588_ECS_OFFSET, IOH_1588_ECS_ETH);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_eth_enable_clear(void)
++ * @brief Clears the eth enable.
++ * @param None.
++ * @retval None.
++*/
++static INLINE void ioh_1588_eth_enable_clear(void)
++{
++ /* Clear the eth_enable bit */
++ IOH_CLR_ADDR_BIT(ioh_1588_base + IOH_1588_ECS_OFFSET, IOH_1588_ECS_ETH);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE unsigned long ioh_1588_eth_enable_get(void)
++ * @brief Gets the eth enable.
++ * @param None.
++ * @retval unsigned long
++*/
++static INLINE unsigned long ioh_1588_eth_enable_get(void)
++{
++ /* Is eth_enable bit set? */
++ return IOH_BIT_SET_CHECK(ioh_1588_base + IOH_1588_ECS_OFFSET,
++ IOH_1588_ECS_ETH);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_can_enable_set(void)
++ * @brief Sets the CAN enable.
++ * @param None
++ * @retval None
++*/
++static INLINE void ioh_1588_can_enable_set(void)
++{
++ /* SET the can_enable bit */
++ IOH_SET_ADDR_BIT(ioh_1588_base + IOH_1588_ECS_OFFSET, IOH_1588_ECS_CAN);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_can_enable_clear(void)
++ * @brief Sets the CAN enable clear
++ * @param None
++ * @retval None
++*/
++static INLINE void ioh_1588_can_enable_clear(void)
++{
++ /* Clear the can_enable bit */
++ IOH_CLR_ADDR_BIT(ioh_1588_base + IOH_1588_ECS_OFFSET, IOH_1588_ECS_CAN);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE unsigned long ioh_1588_can_enable_get(void)
++ * @brief Gets the CAN enable status.
++ * @param None.
++ * @retval unsigned long
++*/
++static INLINE unsigned long ioh_1588_can_enable_get(void)
++{
++ /* Is can_enable bit set? */
++ return IOH_BIT_SET_CHECK(ioh_1588_base + IOH_1588_ECS_OFFSET,
++ IOH_1588_ECS_CAN);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_station_set(unsigned long station,
++ * unsigned long value)
++ * @brief Set the station[1-6] address to be used in PTP message
++ * @param station [IN] The Station.
++ * @param value [IN] The Value.
++ * @retval None.
++*/
++static INLINE void
++ioh_1588_station_set(unsigned long station, unsigned long value)
++{
++ IOH_DEBUG("ioh_1588_station_set;setting station address=%lx\n", value);
++ /* Set the Station Address Register contents */
++ IOH_REG_32_WRITE(ioh_1588_base + IOH_1588_STA_OFFSET +
++ station * sizeof(int), value);
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn static INLINE void ioh_1588_station_get (unsigned long station,
++ * unsigned long *value)
++ * @brief Get the station[1-6] address used in PTP message
++ * @param station [IN] The station.
++ * @param value [OUT] The value.
++ * @retval None.
++*/
++static INLINE void
++ioh_1588_station_get(unsigned long station, unsigned long *value)
++{
++ /* Get the Station Address Register contents */
++ IOH_REG_32_READ(ioh_1588_base + IOH_1588_STA_OFFSET +
++ station * sizeof(int), value);
++ *value &= 0xFF; /* only one byte */
++}
++
++/**
++ * Support functions definitions
++ */
++
++/**
++ * @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_disable_interrupts(void)
++ * @brief Disables all interrupts on the 1588 device.
++ * @param None
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ */
++
++enum ioh_status ioh_1588_disable_interrupts(void)
++{
++ if (ioh_1588_base != 0) {
++ IOH_DEBUG
++ ("ioh_1588_disable_interrupts:invoking \
++ ioh_1588_ttm_imask_clear\n");
++ ioh_1588_ttm_imask_clear();
++ IOH_DEBUG
++ ("ioh_1588_disable_interrupts:invoking \
++ ioh_1588_asms_imask_clear\n");
++ ioh_1588_asms_imask_clear();
++ IOH_DEBUG
++ ("ioh_1588_disable_interrupts:invoking \
++ ioh_1588_amms_imask_clear\n");
++ ioh_1588_amms_imask_clear();
++ IOH_DEBUG
++ ("ioh_1588_disable_interrupts:invoking \
++ ioh_1588_pps_imask_clear\n");
++ ioh_1588_pps_imask_clear();
++ }
++ return IOH_1588_SUCCESS;
++}
++
++/**
++ * @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_interrupt_pending(
++ * unsigned long *pending)
++ * @brief Check whether there is any pending interrupts from the 1588
++ * device.
++ * @param pending [IN] Pending flag which set to TRUE if there is any
++ * pending interrupt
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ */
++enum ioh_status ioh_1588_interrupt_pending(unsigned long *pending)
++{
++ *pending = FALSE;
++ if (ioh_1588_pps_evt_get() || ioh_1588_amms_evt_get() ||
++ ioh_1588_asms_evt_get() || ioh_1588_ttm_evt_get()) {
++ IOH_DEBUG("ioh_1588_interrupt_pending:interrupt pending\n");
++ *pending = TRUE;
++ } else {
++ IOH_DEBUG("ioh_1588_interrupt_pending:NO interrupt pending\n");
++ }
++
++ return IOH_1588_SUCCESS;
++}
++
++/**
++ * @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh1588PTPPortMode ioh_1588_port_mode_get(
++ * enum ioh1588PTPPort ptpPort)
++ * @brief Function to determine the port mode
++ * @param ptpPort [IN] Interested port (GBE_0)
++ * @retval enum ioh1588PTPPortMode
++ * - IOH_1588PTP_PORT_MASTER
++ * - IOH_1588PTP_PORT_SLAVE
++ * - IOH_1588PTP_PORT_ANYMODE
++ */
++enum ioh1588PTPPortMode ioh_1588_port_mode_get(enum ioh1588PTPPort ptpPort)
++{
++ /* Local variables */
++ unsigned long master_mode = FALSE;
++ unsigned long any_mode = FALSE;
++ enum ioh1588PTPPortMode port_mode = IOH_1588PTP_PORT_SLAVE;
++
++ /* Get the Mode of the PTP Port */
++ master_mode = ioh_1588_master_mode_get();
++ any_mode = ioh_1588_timestamp_all_get();
++
++ /* Is ANY mode (all message timestamp mode) on? */
++ if (TRUE == any_mode) {
++ IOH_DEBUG
++ ("ioh_1588_port_mode_get:all messages being timestamped\n");
++ /*
++ * When Any mode is set, all messages are time stamped,
++ * irrespective of the Master/Slave mode bit
++ */
++ port_mode = IOH_1588PTP_PORT_ANYMODE;
++ IOH_DEBUG
++ ("ioh_1588_port_mode_get:port_mode = \
++ IOH_1588PTP_PORT_ANYMODE\n");
++ } else {
++ /* Is Master mode on? */
++ if (TRUE == master_mode) {
++ port_mode = IOH_1588PTP_PORT_MASTER;
++ IOH_DEBUG
++ ("ioh_1588_port_mode_get:port_mode = \
++ IOH_1588PTP_PORT_MASTER\n");
++ } else {
++ port_mode = IOH_1588PTP_PORT_SLAVE;
++ IOH_DEBUG
++ ("ioh_1588_port_mode_get:port_mode = \
++ IOH_1588PTP_PORT_SLAVE\n");
++ }
++ }
++
++ return port_mode;
++}
++
++/*
++ * Public API definitions
++ */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_blpl_base_address_set(unsigned long base_addr)
++ *
++ * @brief Function sets the virtual address of registers
++ * @remarks This API will set the starting virtual addresses for the
++ * 1588 hardware registers. The main tasks performed by this
++ * function are:
++ * - If the aargument passed is NULL, return status
++ * IOH_1588_INVALIDPARAM
++ * - Set base address of IEEE 1588 registers to specified value
++ *
++ *
++ * @param base_addr [IN] - Virtual address of IEEE 1588 module registers
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ * @li IOH_1588_INVALIDPARAM - Invalid parameter passed
++ */
++enum ioh_status ioh_1588_blpl_base_address_set(unsigned long base_addr)
++{
++ if (!base_addr) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_blpl_base_address_set:invalid base address\
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ ioh_1588_base = base_addr;
++ IOH_DEBUG("ioh_1588_blpl_base_address_set:base address=%lx\n",
++ ioh_1588_base);
++
++ /* Initialize the callback pointers */
++ ioh_tt_cbptr = (ioh1588TargetTimeCallback) NULL;
++ ioh_am_cbptr = (ioh1588AuxTimeCallback) NULL;
++ ioh_as_cbptr = (ioh1588AuxTimeCallback) NULL;
++ ioh_pps_cbptr = (ioh1588PulsePerSecondCallback) NULL;
++ IOH_DEBUG("ioh_1588_blpl_base_address_set:initialized callback ptrs\n");
++
++ /* Reset the statistics counters */
++ ioh_1588_stats.rxMsgs = ioh_1588_stats.txMsgs = 0;
++ IOH_DEBUG("ioh_1588_blpl_base_address_set:reset statistics counters\n");
++
++ /* Clear availability of various events */
++ IOH_DEBUG
++ ("ioh_1588_blpl_base_address_set:invoking \
++ ioh_1588_pps_evt_clear\n");
++ ioh_1588_pps_evt_clear();
++ IOH_DEBUG
++ ("ioh_1588_blpl_base_address_set:invoking \
++ ioh_1588_ttm_evt_clear\n");
++ ioh_1588_ttm_evt_clear();
++ IOH_DEBUG
++ ("ioh_1588_blpl_base_address_set:invoking \
++ ioh_1588_amms_evt_clear\n");
++ ioh_1588_amms_evt_clear();
++ IOH_DEBUG
++ ("ioh_1588_blpl_base_address_set:invoking \
++ ioh_1588_asms_evt_clear\n");
++ ioh_1588_asms_evt_clear();
++
++ IOH_DEBUG("ioh_1588_blpl_base_address_set:returning success\n");
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_ptp_port_config_set(
++ * enum ioh1588PTPPort ptpPort, enum ioh1588PTPPortMode ptpPortMode)
++ *
++ * @brief Configure IEEE 1588 Hardware Assist message detection on a given PTP
++ * port
++ *
++ * @remarks This API enables the time stamping on a particular PTP port.
++ * The main tasks performed by this function are:
++ * - Validate the parameters and return
++ * IOH_1588_INVALIDPARAM, if found invalid
++ * - Modify the TS_Channel_Control register to set the
++ * requested mode
++ *
++ *
++ * @param ptpPort [IN] - port on which PTP message detection to be enabled
++ * @param ptpPortMode [IN]- Master/Slave/All messages
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ * @li IOH_1588_INVALIDPARAM - Invalid parameters passed
++ */
++enum ioh_status
++ioh_1588_ptp_port_config_set(enum ioh1588PTPPort ptpPort,
++ enum ioh1588PTPPortMode ptpPortMode)
++{
++ /* Verify the parameters for proper values */
++ if (ptpPort != IOH_1588_GBE_0_1588PTP_PORT) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ptp_port_config_set:invalid ptp port\
++ returning IOH_1588_GBE_0_1588PTP_PORT\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ /* Set the Mode of the PTP Port */
++ switch (ptpPortMode) {
++ case IOH_1588PTP_PORT_MASTER:
++ {
++ IOH_DEBUG
++ ("ioh_1588_ptp_port_config_set:port_mode=\
++ IOH_1588PTP_PORT_MASTER\n");
++ IOH_DEBUG
++ ("ioh_1588_ptp_port_config_set:invoking \
++ ioh_1588_master_mode_set \
++ with param TRUE\n");
++ ioh_1588_master_mode_set(TRUE);
++ IOH_DEBUG
++ ("ioh_1588_ptp_port_config_set:invoking \
++ ioh_1588_timestamp_all_set \
++ with param FALSE\n");
++ ioh_1588_timestamp_all_set(FALSE);
++ break;
++ }
++ case IOH_1588PTP_PORT_SLAVE:
++ {
++ IOH_DEBUG
++ ("ioh_1588_ptp_port_config_set:port_mode=\
++ IOH_1588PTP_PORT_SLAVE\n");
++ IOH_DEBUG
++ ("ioh_1588_ptp_port_config_set:invoking \
++ ioh_1588_master_mode_set \
++ with param FALSE\n");
++ ioh_1588_master_mode_set(FALSE);
++ IOH_DEBUG
++ ("ioh_1588_ptp_port_config_set:invoking \
++ ioh_1588_timestamp_all_set \
++ with param FALSE\n");
++ ioh_1588_timestamp_all_set(FALSE);
++ break;
++ }
++ case IOH_1588PTP_PORT_ANYMODE:
++ {
++ IOH_DEBUG
++ ("ioh_1588_ptp_port_config_set:port_mode=\
++ IOH_1588PTP_PORT_ANYMODE\n");
++ IOH_DEBUG
++ ("ioh_1588_ptp_port_config_set:invoking \
++ ioh_1588_master_mode_set \
++ with param FALSE\n");
++ ioh_1588_master_mode_set(FALSE);
++ IOH_DEBUG
++ ("ioh_1588_ptp_port_config_set:invoking \
++ ioh_1588_timestamp_all_set \
++ with param TRUE\n");
++ ioh_1588_timestamp_all_set(TRUE);
++ break;
++ }
++ default:
++ {
++ IOH_LOG(KERN_ERR, "ioh_1588_ptp_port_config_set: \
++ Invalid Port Mode (%d) \
++ returning IOH_1588_INVALIDPARAM\n",
++ ptpPortMode);
++ return IOH_1588_INVALIDPARAM;
++ }
++ }
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_ptp_port_config_get(
++ * enum ioh1588PTPPort ptpPort,
++ * enum ioh1588PTPPortMode *ptpPortMode)
++ *
++ * @brief Get the configuration of IEEE 1588 Hardware Assist message
++ * detection
++ * for given PTP port
++ *
++ * @remarks This API retrieves the time stamping configuration of the given
++ * PTP port.
++ * The main tasks performed by this function are:
++ * - Validate the parameters and return
++ * IOH_1588_INVALIDPARAM, if found invalid
++ * - Return the current master/slave mode from
++ * TS_Channel_Control register
++ *
++ *
++ * @param ptpPort [IN] port for which PTP message configuration
++ * to be obtained
++ * @param ptpPortMode [OUT] Master/Slave/All messages
++ *
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ * @li IOH_1588_INVALIDPARAM - Invalid parameters passed
++ */
++enum ioh_status
++ioh_1588_ptp_port_config_get(enum ioh1588PTPPort ptpPort,
++ enum ioh1588PTPPortMode *ptpPortMode)
++{
++ /* Verify the parameters for proper values */
++ if ((ptpPort != IOH_1588_GBE_0_1588PTP_PORT) ||
++ ((enum ioh1588PTPPortMode *) NULL == ptpPortMode)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ptp_port_config_get:\
++ invalid port_mode or port \
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ /* Get the Mode of the PTP Port */
++ IOH_DEBUG
++ ("ioh_1588_ptp_port_config_get:invoking ioh_1588_port_mode_get\n");
++ *ptpPortMode = ioh_1588_port_mode_get(ptpPort);
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_ptp_rx_poll(
++ * enum ioh1588PTPPort ptpPort,
++ * struct ioh1588PtpMsgData *ptpMsgData)
++ *
++ * @brief Poll the IEEE 1588 Hardware Assist receive side message/time
++ * stamp
++ * detection status for given PTP Port
++ *
++ * @remarks This API will poll for the availability of a time stamp on the
++ * received Sync (in slave mode) or Delay_Req (in master mode)
++ * messages.
++ * The buffer is provided by the caller.
++ * The steps performed in this function are:
++ * - If ptpPort not valid or ptpMsgData is NULL return
++ * IOH_1588_INVALID_PARAM
++ * - Find out whether locked /unlocked mode.
++ * - If locked mode ,check the TS_Channel_Event register for Rx
++ * event
++ * - If locked mode, return NO TIMESTAMP if no Rx event flag is
++ * set.
++ * - Read the time stamp from RECV_Snapshot low, high and
++ * - SourceUUID0 low, high registers
++ * - Clear SourceUUId if the mode is not master/slave and PTP v1
++ * only.
++ * - Increment RX messages captured statistics counter
++ * - If locked mode ,clear the RX event is TS_Channel_Event
++ * register
++ *
++ *
++ * @param ptpPort [IN] port on which time stamp availability
++ * to be checked
++ * @param ptpMsgData [out] Captured time stamp and other message
++ * information
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ * @li IOH_1588_INVALIDPARAM - Invalid parameters passed
++ * @li IOH_1588_NOTIMESTAMP - Time stamp not available
++ * @li IOH_1588_FAILED - Internal error occurred
++ */
++enum ioh_status
++ioh_1588_ptp_rx_poll(enum ioh1588PTPPort ptpPort, \
++ struct ioh1588PtpMsgData *ptpMsgData)
++{
++ unsigned long locked_mode = FALSE;
++
++ enum ioh1588PTPPortMode port_mode = IOH_1588PTP_PORT_MODE_INVALID;
++ enum ioh1588PTPVersion ptpVersion = IOH_1588PTP_VERSION_INVALID;
++ enum ioh1588PTPOperationMode opMode = IOH_1588PTP_OP_MODE_INVALID;
++
++ /* Verify the parameters for proper values */
++ if ((ptpPort != IOH_1588_GBE_0_1588PTP_PORT) ||
++ ((struct ioh1588PtpMsgData *) NULL == ptpMsgData)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ptp_rx_poll:invalid port or ptp message \
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ /*Get the PTP version */
++ ptpVersion = ioh_1588_version_get();
++
++ /*check if locked/unlocked mode */
++ if (ptpVersion == IOH_1588PTP_VERSION_0) { /*PTP v1 only */
++ /* Get the Mode of the PTP Port if only PTPv1 is supported */
++ port_mode = ioh_1588_port_mode_get(ptpPort);
++ if (port_mode != IOH_1588PTP_PORT_ANYMODE)
++ locked_mode = TRUE;
++ } else { /*PTP v1 & v2 */
++
++ /*get operation mode */
++ opMode = ioh_1588_op_mode_get();
++ if ((opMode != IOH_1588PTP_OP_MODE_V1_ALL_MSGS) &&
++ (opMode != IOH_1588PTP_OP_MODE_V1_V2_ALL_MSGS)) {
++ locked_mode = TRUE;
++ }
++ }
++
++ /*if locked mode,check event flag */
++ if ((TRUE == locked_mode) && (TRUE != ioh_1588_rx_snap_evt())) {
++ IOH_DEBUG
++ ("ioh_1588_ptp_rx_poll:locked mode-event flag not set\n");
++ IOH_DEBUG("ioh_1588_ptp_rx_poll:NO TIMESTAMP \
++ returning IOH_1588_NOTIMESTAMP\n");
++ return IOH_1588_NOTIMESTAMP;
++ }
++
++ /* Fetch the receive timestamp */
++ IOH_DEBUG("ioh_1588_ptp_rx_poll:invoking ioh_1588_rx_snap_get\n");
++ ioh_1588_rx_snap_get(&ptpMsgData->ptpTimeStamp.timeValueLowWord,
++ &ptpMsgData->ptpTimeStamp.timeValueHighWord);
++
++ IOH_DEBUG
++ ("ioh_1588_ptp_rx_poll:ioh_1588_ptp_rx_poll Snapshot (Hi:Low): \
++ %lx : %lx\n",
++ ptpMsgData->ptpTimeStamp.timeValueHighWord,
++ ptpMsgData->ptpTimeStamp.timeValueLowWord);
++
++ /* Fetch the UUID & Seq# of PTP messages in 'Master/Slave Mode' only */
++ if ((TRUE == locked_mode) && (IOH_1588PTP_VERSION_0 == ptpVersion)) {
++ IOH_DEBUG
++ ("ioh_1588_ptp_rx_poll:invoking ioh_1588_uuid_seqid_get\n");
++ ioh_1588_uuid_seqid_get(&ptpMsgData->ptpUuid.uuidValueLowWord,
++ &ptpMsgData->ptpUuid.
++ uuidValueHighHalfword,
++ &ptpMsgData->ptpSequenceNumber);
++ }
++ /* Clear-off the UUID & Seq# of all the messages in 'Any Mode' */
++ else {
++ IOH_DEBUG
++ ("ioh_1588_ptp_rx_poll:port mode is ANYMODE,clearing off \
++ UUID & SeqNumber\n");
++ ptpMsgData->ptpUuid.uuidValueLowWord = 0;
++ ptpMsgData->ptpUuid.uuidValueHighHalfword = 0;
++ ptpMsgData->ptpSequenceNumber = 0;
++ }
++
++ /* Increment receive timestamp counter
++ * Note:In unlocked modes,this will get incremented
++ * for every rx time stamp poll.
++ */
++ ioh_1588_stats.rxMsgs++;
++ IOH_DEBUG("ioh_1588_ptp_rx_poll:incremented rcv timestamp \
++ counter=%ld\n", ioh_1588_stats.rxMsgs);
++
++ /*
++ * Fill-in the PTP message type.This can be done
++ * only when PTP v1 alone is supported and mode
++ * is master/slave.Set the message type as unknown
++ * for all other cases.
++ */
++ if (ptpVersion == IOH_1588PTP_VERSION_0) { /*PTP v1 only */
++ switch (port_mode) {
++ case IOH_1588PTP_PORT_MASTER:
++ {
++ IOH_DEBUG
++ ("ioh_1588_ptp_rx_poll:PTP message type=\
++ IOH_1588PTP_MSGTYPE_DELAYREQ\n");
++ ptpMsgData->ptpMsgType =
++ IOH_1588PTP_MSGTYPE_DELAYREQ;
++ break;
++ }
++ case IOH_1588PTP_PORT_SLAVE:
++ {
++ IOH_DEBUG
++ ("ioh_1588_ptp_rx_poll:PTP message type=\
++ IOH_1588PTP_MSGTYPE_SYNC\n");
++ ptpMsgData->ptpMsgType =
++ IOH_1588PTP_MSGTYPE_SYNC;
++ break;
++ }
++ case IOH_1588PTP_PORT_ANYMODE:
++ {
++ IOH_DEBUG
++ ("ioh_1588_ptp_rx_poll:PTP message type=\
++ IOH_1588PTP_MSGTYPE_UNKNOWN\n");
++ ptpMsgData->ptpMsgType =
++ IOH_1588PTP_MSGTYPE_UNKNOWN;
++ break;
++ }
++ default:
++ {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ptp_rx_poll(): Invalid Port \
++ Mode \
++ returning IOH_1588_FAILED\n");
++ return IOH_1588_FAILED;
++ }
++ }
++ } else { /*PTP v1 & v2 */
++
++ IOH_DEBUG
++ ("ioh_1588_ptp_rx_poll:PTP message type=\
++ IOH_1588PTP_MSGTYPE_UNKNOWN\n");
++ ptpMsgData->ptpMsgType = IOH_1588PTP_MSGTYPE_UNKNOWN;
++ }
++
++ /* If locked mode allow next timestamp to be captured */
++ if (TRUE == locked_mode) {
++ IOH_DEBUG
++ ("ioh_1588_ptp_rx_poll:invoking \
++ ioh_1588_rx_snap_evt_clear\n");
++ ioh_1588_rx_snap_evt_clear();
++ }
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_ptp_tx_poll(
++ * enum ioh1588PTPPort ptpPort,
++ * struct ioh1588PtpMsgData *ptpMsgData)
++ *
++ * @brief Poll the IEEE 1588 Hardware Assist transmit side message/time
++ * stamp
++ * detection status for given PTP Port
++ *
++ * @remarks This API will poll for the availability of a time stamp on the
++ * transmit side Sync (in master mode) or Delay_Req (in slave mode)
++ * messages.
++ * The buffer is provided by the caller.
++ * The main tasks performed by this function are:
++ * - If ptpPort not valid or ptpMsgData is NULL
++ * return IOH_1588_INVALID_PARAM
++ * - Find out whether locked /unlocked mode.
++ * - If locked mode ,check the TS_Channel_Event register for Tx
++ * event
++ * - If locked mode, return NO TIMESTAMP if no Tx event flag is
++ * set.
++ * - Read the time stamp from XMIT_Snapshot low, high registers
++ * - Increment TX messages captured statistics counter
++ * - If locked mode, clear the TX event is TS_Channel_Event
++ * register
++ *
++ *
++ * @param ptpPort [IN] port on which time stamp availability to be
++ * checked
++ * @param ptpMsgData [OUT] Captured time stamp and other message
++ * information
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ * @li IOH_1588_INVALIDPARAM - Invalid parameters passed
++ * @li IOH_1588_NOTIMESTAMP - Time stamp not available
++ * @li IOH_1588_FAILED - Internal error occurred
++ */
++enum ioh_status
++ioh_1588_ptp_tx_poll(enum ioh1588PTPPort ptpPort, \
++ struct ioh1588PtpMsgData *ptpMsgData)
++{
++ unsigned long locked_mode = FALSE;
++
++ enum ioh1588PTPPortMode port_mode = IOH_1588PTP_PORT_MODE_INVALID;
++ enum ioh1588PTPVersion ptpVersion = IOH_1588PTP_VERSION_INVALID;
++ enum ioh1588PTPOperationMode opMode = IOH_1588PTP_OP_MODE_INVALID;
++
++ /* Verify the parameters for proper values */
++ if ((ptpPort != IOH_1588_GBE_0_1588PTP_PORT) ||
++ ((struct ioh1588PtpMsgData *) NULL == ptpMsgData)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ptp_tx_poll:invalid port or ptp message \
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ /*Get the PTP version */
++ ptpVersion = ioh_1588_version_get();
++
++ /*check if locked/unlocked mode */
++ if (ptpVersion == IOH_1588PTP_VERSION_0) { /*PTP v1 only */
++ /* Get the Mode of the PTP Port if only PTPv1 is supported */
++ port_mode = ioh_1588_port_mode_get(ptpPort);
++ if (port_mode != IOH_1588PTP_PORT_ANYMODE)
++ locked_mode = TRUE;
++ } else { /*PTP v1 & v2 */
++
++ /*get operation mode */
++ opMode = ioh_1588_op_mode_get();
++ if ((opMode != IOH_1588PTP_OP_MODE_V1_ALL_MSGS) &&
++ (opMode != IOH_1588PTP_OP_MODE_V1_V2_ALL_MSGS)) {
++ locked_mode = TRUE;
++ }
++ }
++
++ /*if locked mode,check event flag */
++ if ((TRUE == locked_mode) && (TRUE != ioh_1588_tx_snap_evt())) {
++ IOH_DEBUG
++ ("ioh_1588_ptp_tx_poll:locked mode-event flag not set\n");
++ IOH_DEBUG
++ ("ioh_1588_ptp_tx_poll:ioh_1588_ptp_tx_poll NO TIMESTAMP \
++ returning IOH_1588_NOTIMESTAMP\n");
++ return IOH_1588_NOTIMESTAMP;
++ }
++
++ /* read time stamp registers */
++ ioh_1588_tx_snap_get(&ptpMsgData->ptpTimeStamp.timeValueLowWord,
++ &ptpMsgData->ptpTimeStamp.timeValueHighWord);
++
++ IOH_DEBUG
++ ("ioh_1588_ptp_tx_poll:ioh_1588_ptp_tx_poll Snapshot (Hi:Low): \
++ %lx : %lx\n",
++ ptpMsgData->ptpTimeStamp.timeValueHighWord,
++ ptpMsgData->ptpTimeStamp.timeValueLowWord);
++ /*
++ * Fill the UUID and Seq# with invalid values (zeros)
++ * since they are not relevant for transmit timestamp
++ */
++ ptpMsgData->ptpUuid.uuidValueLowWord = 0;
++ ptpMsgData->ptpUuid.uuidValueHighHalfword = 0;
++ ptpMsgData->ptpSequenceNumber = 0;
++
++ /*
++ * Increment transmit timestamp counter
++ * Note:In unlocked modes,this will get incremented
++ * for every tx time stamp poll
++ */
++ ioh_1588_stats.txMsgs++;
++ IOH_DEBUG("ioh_1588_ptp_tx_poll:incremented tx timestamp counter=%ld\n",
++ ioh_1588_stats.txMsgs);
++
++ /*
++ * Fill-in the PTP message type.This can be done
++ * only when PTP v1 alone is supported and mode
++ * is master/slave.Set the message type as unknown
++ * for all other cases.
++ */
++ if (ptpVersion == IOH_1588PTP_VERSION_0) { /*PTP v1 only */
++ switch (port_mode) {
++ case IOH_1588PTP_PORT_MASTER:
++ {
++ IOH_DEBUG
++ ("ioh_1588_ptp_tx_poll:PTP message type=\
++ IOH_1588PTP_MSGTYPE_SYNC\n");
++ ptpMsgData->ptpMsgType =
++ IOH_1588PTP_MSGTYPE_SYNC;
++ break;
++ }
++ case IOH_1588PTP_PORT_SLAVE:
++ {
++ IOH_DEBUG
++ ("ioh_1588_ptp_tx_poll:PTP message type=\
++ IOH_1588PTP_MSGTYPE_DELAYREQ\n");
++ ptpMsgData->ptpMsgType =
++ IOH_1588PTP_MSGTYPE_DELAYREQ;
++ break;
++ }
++ case IOH_1588PTP_PORT_ANYMODE:
++ {
++ IOH_DEBUG
++ ("ioh_1588_ptp_tx_poll:PTP message type=\
++ IOH_1588PTP_MSGTYPE_UNKNOWN\n");
++ ptpMsgData->ptpMsgType =
++ IOH_1588PTP_MSGTYPE_UNKNOWN;
++ break;
++ }
++ default:
++ {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ptp_tx_poll(): \
++ Invalid Port Mode \
++ returning IOH_1588_FAILED\n");
++ return IOH_1588_FAILED;
++ }
++ }
++ } else { /*PTP v1 & v2 */
++
++ IOH_DEBUG
++ ("ioh_1588_ptp_tx_poll:PTP message type=\
++ IOH_1588PTP_MSGTYPE_UNKNOWN\n");
++ ptpMsgData->ptpMsgType = IOH_1588PTP_MSGTYPE_UNKNOWN;
++ }
++
++ /* If locked mode allow next timestamp to be captured */
++ if (locked_mode) {
++ IOH_DEBUG
++ ("ioh_1588_ptp_tx_poll:invoking ioh_1588_tx_snap_evt_clear\n");
++ ioh_1588_tx_snap_evt_clear();
++ }
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_system_time_set(
++ * struct ioh1588TimeValue systemTime)
++ *
++ * @brief This API sets the system time to given value
++ * @remarks Sets the System Time in the IEEE 1588 hardware assist block.
++ *
++ * @param systemTime [IN] value to set the system time to
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ */
++enum ioh_status ioh_1588_system_time_set(struct ioh1588TimeValue systemTime)
++{
++ unsigned long old_fsv = 0;
++
++ /* Retrieve old Frequency Scaling Value */
++ IOH_DEBUG("ioh_1588_system_time_set:invoking ioh_1588_addend_get\n");
++ ioh_1588_addend_get(&old_fsv);
++ IOH_DEBUG("ioh_1588_system_time_set:existing freq scaling value=%lx\n",
++ old_fsv);
++
++ /*
++ * Set the Frequency Scaling Value to zero (0) so that
++ * System Time doesn't get incremented while it is being written to
++ */
++ IOH_DEBUG("ioh_1588_system_time_set:invoking ioh_1588_addend_set \
++ to set frequncy scaling value to 0\n");
++ ioh_1588_addend_set(0);
++
++ /* Update System Time with user specified values */
++ IOH_DEBUG("ioh_1588_system_time_set:invoking \
++ ioh_1588_ioh_1588_sys_snap_set \
++ with values low=%lx,high=%lx\n",
++ systemTime.timeValueLowWord, systemTime.timeValueHighWord);
++ ioh_1588_sys_snap_set(systemTime.timeValueLowWord,
++ systemTime.timeValueHighWord);
++
++ /*
++ * Let the hardware assist re-evaluate the target time reached
++ * condition based on the new system time
++ */
++ IOH_DEBUG("ioh_1588_system_time_set:invoking ioh_1588_ttm_evt_clear\n");
++ ioh_1588_ttm_evt_clear();
++
++ /*
++ * Restore old Frequency Scaling Value so that System Time
++ * can be incremented
++ */
++ IOH_DEBUG("ioh_1588_system_time_set:invoking ioh_1588_addend_set \
++ to restore freq scaling value\n");
++ ioh_1588_addend_set(old_fsv);
++
++ IOH_DEBUG("ioh_1588_system_time_set:returning IOH_1588_SUCCESS\n");
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_system_time_get(
++ * struct ioh1588TimeValue *systemTime)
++ *
++ * @brief Gets the System Time from the IEEE 1588 hardware assist block
++ * @remarks This API gets the System time.
++ * The main steps followed in this function are:
++ * - Return IOH_1588_INVALIDPARAM if argument passed is NULL
++ * - Return the current system time by reading the SystemTime low,
++ * high registers
++ *
++ *
++ * @param systemTime [OUT] - Address to which system time is to be
++ * returned
++ *
++ * @return enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ * @li IOH_1588_INVALIDPARAM - Invalid address passed
++ */
++enum ioh_status ioh_1588_system_time_get(struct ioh1588TimeValue *systemTime)
++{
++ /* Verify the parameter */
++ if ((struct ioh1588TimeValue *) NULL == systemTime) {
++ IOH_LOG(KERN_ERR, "ioh_1588_system_time_get:invalid parameter \
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ /* Fetch System Time */
++ ioh_1588_sys_snap_get(&systemTime->timeValueLowWord,
++ &systemTime->timeValueHighWord);
++
++ IOH_DEBUG("ioh_1588_system_time_get:invoked \
++ ioh_1588_ioh_1588_sys_snap_get \
++ system time:low=%lx,high=%lx\n",
++ systemTime->timeValueLowWord, systemTime->timeValueHighWord);
++ IOH_DEBUG("ioh_1588_system_time_get returning IOH_1588_SUCCESS\n");
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_tick_rate_set(unsigned long tickRate)
++ *
++ * @brief Sets the Frequency Scaling Value in the IEEE 1588 hardware
++ * assist block
++ *
++ * @remarks This API sets the Tick Rate (Frequency Scaling Value) in the
++ * IEEE 1588 block. This value determines the progress at which
++ * the System time advances.
++ * Note: For the A1 hardware sample, the addend register value
++ * configured in the hardware
++ * is calculated as follows:
++ * Addend register value = Logical right shift tickRate by 1 and
++ * set MSB to 1
++ *
++ * @param tickRate [IN] Frequency scaling value
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ */
++enum ioh_status ioh_1588_tick_rate_set(unsigned long tickRate)
++{
++ /* Update the Frequency Scaling Value */
++#ifdef IOH_IEEE1588_A1_SAMPLE_BUG
++ /*back up tick rate provided by app */
++ gTickRateApp = tickRate;
++ /*calculate actual tick rate for device */
++ IOH_DEBUG("ioh_1588_tick_rate_set:tick rate [app]=%lx\n", tickRate);
++ tickRate = ((tickRate >> 1) | 0x80000000);
++ IOH_DEBUG("ioh_1588_tick_rate_set:tick rate [dev]=%lx\n", tickRate);
++#endif
++ IOH_DEBUG("ioh_1588_tick_rate_set:invoking ioh_1588_addend_set \
++ with tick rate=%lx\n", tickRate);
++ ioh_1588_addend_set(tickRate);
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_tick_rate_get(unsigned long *tickRate)
++ *
++ * @brief Gets the Frequency Scaling Value from the IEEE 1588 hardware
++ * assist block
++ *
++ * @remarks This API gets the Tick Rate (Frequency Scaling Value) used in
++ * the IEEE 1588 block.
++ * This value determines the progress at which the System time
++ * advances.
++ * The main steps followed in this function are:
++ * - Return IOH_1588_INVALIDPARAM if argument passed is NULL
++ * - Return the content of Addend register
++ *
++ *
++ * @param tickRate [IN] - Address where current Frequency scaling value is
++ * returned
++ *
++ * @return enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ * @li IOH_1588_INVALIDPARM - Invalid address passed
++ */
++enum ioh_status ioh_1588_tick_rate_get(unsigned long *tickRate)
++{
++ /* Verify the parameter */
++ if ((unsigned long *)NULL == tickRate) {
++ IOH_LOG(KERN_ERR, "ioh_1588_tick_rate_get:invalid tick rate\
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++#ifdef IOH_IEEE1588_A1_SAMPLE_BUG
++ /* Retrieve Frequency Scaling Value stored in software buffer */
++ *tickRate = gTickRateApp;
++#else
++ /* Retrieve Current Frequency Scaling Value */
++ ioh_1588_addend_get(tickRate);
++#endif
++ IOH_DEBUG("ioh_1588_tick_rate_get:invoked ioh_1588_addend_get\
++ the tick rate=%lx\n", *tickRate);
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_target_time_interrupt_enable(
++ * ioh1588TargetTimeCallback callBack)
++ *
++ * @brief Enable the target time reached/exceeded system time interrupt
++ *
++ * @remarks This API enables the interrupt that occurs when the System time
++ * reaches the Target time set in the IEEE 1588 hardware assist
++ * block.
++ * The main steps followed in this function are:
++ * - Return IOH_1588_INVALIDPARAM if argument passed is NULL
++ * - Modify Time Sync Control Register to enable target time
++ * interrupt
++ * - Set the handler to callback function provided
++ *
++ *
++ * @param callBack [IN] - Routine to be invoked when target time reached
++ * interrupt occurs
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ * @li IOH_1588_INVALIDPARAM - Null parameter passed for callback
++ */
++enum ioh_status
++ioh_1588_target_time_interrupt_enable(ioh1588TargetTimeCallback callBack)
++{
++ /* Verify the parameter */
++ if ((ioh1588TargetTimeCallback) NULL == callBack) {
++ IOH_LOG(KERN_ERR, "ioh_1588_target_time_interrupt_enable\
++ invalid callback;returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ /* Register the Callback */
++ ioh_tt_cbptr = callBack;
++
++ /* Set target time interrupt mask */
++ IOH_DEBUG("ioh_1588_target_time_interrupt_enable:invoking\
++ ioh_1588_ttm_imask_set\n");
++ ioh_1588_ttm_imask_set();
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_target_time_interrupt_disable(void)
++ *
++ * @brief Disable the target time reached/exceeded system time interrupt
++ *
++ * @remarks This API disables the interrupt that occurs when the System time
++ * reaches the Target time set in the IEEE 1588 hardware assist
++ * block.
++ * The main steps followed in this function are:
++ * - Modify Time Sync Control Register to disable target time
++ * interrupt
++ * - Clear the callback handler
++ *
++ * @param None
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ */
++enum ioh_status ioh_1588_target_time_interrupt_disable(void)
++{
++ /* Clear target time interrupt mask */
++ IOH_DEBUG("ioh_1588_target_time_interrupt_disable:invoking \
++ ioh_1588_ttm_imask_clear\n");
++ ioh_1588_ttm_imask_clear();
++
++ /* Unregister the Callback */
++ ioh_tt_cbptr = (ioh1588TargetTimeCallback) NULL;
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_target_time_poll(
++ * unsigned long *ttmPollFlag, struct ioh1588TimeValue *targetTime)
++ *
++ * @brief Poll to verify whether the System time is greater or equal
++ * to the Target time in the IEEE 1588 hardware assist block.
++ *
++ * @remarks The main steps followed in this function are:
++ * - Validate the parameters and return IOH_1588_INVALIDPARAM, if
++ * not valid
++ * - If callback function registered, return status
++ * IOH_1588_INTERRUPTMODEINUSE
++ * - Read the TS_Event register to check for the presence of valid
++ * snapshot
++ * - Read the TargetTimeSnap low, high registers
++ * - Clear the event from TS_Event register
++ *
++ *
++ * @param ttmPollFlag [OUT] TRUE if target time has reached system
++ * time
++ * FALSE if target time has not reached
++ * system time
++ * @param targetTime [OUT] Snap shot of target time captured
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ * @li IOH_1588_INVALIDPARAM - Null parameter passed
++ * @li IOH_1588_INTERRUPTMODEINUSE - Interrupt mode is in use
++ */
++enum ioh_status
++ioh_1588_target_time_poll(unsigned long *ttmPollFlag,
++ struct ioh1588TimeValue *targetTime)
++{
++ /* Verify the parameters */
++ if (((unsigned long *)NULL == ttmPollFlag) ||
++ ((struct ioh1588TimeValue *) NULL == targetTime)) {
++ IOH_LOG(KERN_ERR, "ioh_1588_target_time_poll: invalid param\
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ /* Is interrupt mode of processing is enabled? */
++ if ((ioh1588TargetTimeCallback) NULL != ioh_tt_cbptr) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_target_time_poll:returning \
++ IOH_1588_INTERRUPTMODEINUSE\n");
++ return IOH_1588_INTERRUPTMODEINUSE;
++ }
++
++ /* Is the System Time reached or exceeded Target Time? */
++ *ttmPollFlag = ioh_1588_ttm_evt_get();
++ if (FALSE == *ttmPollFlag) {
++ IOH_DEBUG
++ ("ioh_1588_target_time_poll:target time not reached\n");
++ /* Target Time not to be returned yet */
++ targetTime->timeValueLowWord = 0;
++ targetTime->timeValueHighWord = 0;
++
++ return IOH_1588_SUCCESS;
++ }
++
++ IOH_DEBUG("ioh_1588_target_time_poll:target time reached\n");
++ /* Get the Target Time */
++ ioh_1588_tgt_snap_get(&targetTime->timeValueLowWord,
++ &targetTime->timeValueHighWord);
++
++ IOH_DEBUG("ioh_1588_target_time_poll:target time:low=%lx high=%lx\n",
++ targetTime->timeValueLowWord, targetTime->timeValueHighWord);
++
++ IOH_DEBUG
++ ("ioh_1588_target_time_poll:invoking ioh_1588_ttm_evt_clear\n");
++ /* Clear the target time reached condition (ttipend bit) */
++ ioh_1588_ttm_evt_clear();
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_target_time_set(
++ * struct ioh1588TimeValue targetTime)
++ *
++ * @brief Sets the Target Time in the IEEE 1588 hardware assist block
++ *
++ * @remarks This API will set the Target Time to a given value.
++ *
++ * @param targetTime [IN] - Target time to set
++ *
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ */
++enum ioh_status ioh_1588_target_time_set(struct ioh1588TimeValue targetTime)
++{
++ unsigned long old_mask = FALSE;
++
++ /* Retrieve existing target time interrupt mask value */
++ old_mask = ioh_1588_ttm_imask_get();
++ IOH_DEBUG("ioh_1588_target_time_set:target time interrupt mask=%lx\n",
++ old_mask);
++
++ /*
++ * Clear the target time interrupt mask so that the interrupt will not
++ * come
++ * during the time we manipulate the registers.
++ */
++ IOH_DEBUG("ioh_1588_target_time_set:invoking ioh_1588_ttm_imask_clear\
++ to clear the target time interrupt mask\n");
++ ioh_1588_ttm_imask_clear();
++
++ IOH_DEBUG("ioh_1588_target_time_set:invoking ioh_1588_tgt_snap_set \
++ with values:low=%lx,high=%lx\n", \
++ targetTime.timeValueLowWord, targetTime.timeValueHighWord);
++ /* Update Target Time with user specified values */
++ ioh_1588_tgt_snap_set(targetTime.timeValueLowWord,
++ targetTime.timeValueHighWord);
++
++ /*
++ * Let the hardware assist re-evaluate the target time reached
++ * condition based on the new target time
++ */
++ IOH_DEBUG("ioh_1588_target_time_set:invoking ioh_1588_ttm_evt_clear\n");
++ ioh_1588_ttm_evt_clear();
++
++ /* Restore the preserved target time interrupt mask value */
++ if (TRUE == old_mask) {
++ IOH_DEBUG
++ ("ioh_1588_target_time_set:invoking \
++ ioh_1588_ttm_imask_set\n");
++ ioh_1588_ttm_imask_set();
++ }
++
++ IOH_DEBUG("ioh_1588_target_time_set:returning IOH_1588_SUCCESS\n");
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_target_time_get(
++ * struct ioh1588TimeValue *targetTime)
++ *
++ * @brief Gets the Target Time in the IEEE 1588 hardware assist block
++ *
++ * @remarks This API will get the Target Time from IEEE 1588 block
++ * - Return IOH_1588_INVALIDPARAM if argument passed is NULL
++ * - Read and return the content of TargetTime low, high registers
++ *
++ *
++ * @param targetTime [IN] - Address to which target time is to be returned
++ *
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ * @li IOH_1588_INVALIDPARAM - Null parameter passed
++ */
++enum ioh_status ioh_1588_target_time_get(struct ioh1588TimeValue *targetTime)
++{
++ /* Verify the parameter */
++ if ((struct ioh1588TimeValue *) NULL == targetTime) {
++ IOH_DEBUG("ioh_1588_target_time_get:invalid param \
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ /* Get Target Time */
++ ioh_1588_tgt_snap_get(&targetTime->timeValueLowWord,
++ &targetTime->timeValueHighWord);
++ IOH_DEBUG("ioh_1588_target_time_get:invoked ioh_1588_tgt_snap_get \
++ target time:low=%lx,high:%lx\n", \
++ targetTime->timeValueLowWord, targetTime->timeValueHighWord);
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_aux_time_interrupt_enable(
++ * enum ioh1588AuxMode auxMode,ioh1588AuxTimeCallback callBack)
++ *
++ * @brief Enables the interrupt for the Auxiliary Master/Slave mode for
++ * Time
++ * Stamp in the IEEE 1588 hardware assist block
++ *
++ * @remarks This API will enable the Auxiliary Master/Slave
++ * Time stamp Interrupt. The main steps followed in
++ * this function are:
++ * - Return IOH_1588_INVALIDPARAM if argument passed is NULL
++ * - Modify the Time Sync Controller register to enable the
++ * interrupt
++ * - Set the callback routine
++ *
++ *
++ * @param auxMode [IN] - Auxiliary slave or master mode
++ * @param callBack [IN] - Callback to be invoked when interrupt
++ * fires
++ *
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ * @li IOH_1588_INVALIDPARAM - Null parameter passed
++ */
++enum ioh_status
++ioh_1588_aux_time_interrupt_enable(enum ioh1588AuxMode auxMode,
++ ioh1588AuxTimeCallback callBack)
++{
++ /* Verify the parameters */
++ if ((IOH_1588_AUXMODE_INVALID <= auxMode) ||
++ ((ioh1588AuxTimeCallback) NULL == callBack)) {
++ IOH_DEBUG("ioh_1588_aux_time_interrupt_enable:invalid param \
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ /* Register the Callback and SET the amm/asm bits on */
++ if (IOH_1588_AUXMODE_MASTER == auxMode) {
++ IOH_DEBUG
++ ("ioh_1588_aux_time_interrupt_enable:IOH_1588_AUXMODE_MASTER \
++ invoking ioh_1588_amms_imask_set\n");
++ ioh_am_cbptr = callBack;
++ ioh_1588_amms_imask_set();
++
++ } else {
++ IOH_DEBUG
++ ("ioh_1588_aux_time_interrupt_enable:IOH_1588_AUXMODE_SLAVE \
++ invoking ioh_1588_asms_imask_set\n");
++ ioh_as_cbptr = callBack;
++ ioh_1588_asms_imask_set();
++ }
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_aux_time_interrupt_disable(
++ * enum ioh1588AuxMode auxMode)
++ *
++ * @brief Disables the interrupt for the Auxiliary Master/Slave mode for
++ * Time
++ * Stamp in the IEEE 1588 hardware assist block
++ *
++ * @remarks This API will disable the Auxiliary Master/Slave
++ * Time stamp Interrupt. The main steps followed in this
++ * function are:
++ * - Return IOH_1588_INVALIDPARAM if auxMode passed is not valid
++ * - Modify the Time Sync Controller register to disable the
++ * interrupt
++ * - Clear the callback handler
++ *
++ *
++ * @param auxMode [IN] - Auxiliary slave or master mode
++ *
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ * @li IOH_1588_INVALIDPARAM - Invalid mode specified
++ */
++enum ioh_status ioh_1588_aux_time_interrupt_disable(enum ioh1588AuxMode auxMode)
++{
++ /* Verify the parameters */
++ if (IOH_1588_AUXMODE_INVALID <= auxMode) {
++ IOH_DEBUG("ioh_1588_aux_time_interrupt_disable:invalid param \
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ if (IOH_1588_AUXMODE_MASTER == auxMode) {
++ IOH_DEBUG
++ ("ioh_1588_aux_time_interrupt_disable:\
++ IOH_1588_AUXMODE_MASTER\
++ invoking ioh_1588_amms_imask_clear\n");
++ ioh_1588_amms_imask_clear();
++ ioh_am_cbptr = (ioh1588AuxTimeCallback) NULL;
++ } else {
++ IOH_DEBUG
++ ("ioh_1588_aux_time_interrupt_disable:\
++ IOH_1588_AUXMODE_SLAVE\
++ invoking ioh_1588_asms_imask_clear\n");
++ ioh_1588_asms_imask_clear();
++ ioh_as_cbptr = (ioh1588AuxTimeCallback) NULL;
++ }
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_aux_time_poll(
++ * enum ioh1588AuxMode auxMode, unsigned long *pollFlag,
++ * struct ioh1588TimeValue*auxTime)
++ *
++ * @brief Poll for the Auxiliary Time Stamp captured event for the mode
++ * requested
++ *
++ * @remarks Polls for the Time stamp in the appropriate Auxiliary Snapshot
++ * Registers based on the mode specified. Return true and
++ * the contents of the Auxiliary snapshot if it is available
++ * otherwise return false. The main steps followed in this function
++ * are:
++ * - Validate the parameters and return IOH_1588_INVALIDPARAM if
++ * found invalid
++ * - If callbacks registered, return status
++ * IOH_1588_INTERRUPTMODEINUSE
++ * - Read the TS_Event register to check for the presence of valid
++ * snapshot
++ * - If the event is not set, return status IOH_1588_NOTIMESTAMP
++ * - Read the AuxSlaveModeSnap or AuxMasterModeSnap low, high
++ * registers depending on the auxMode
++ * - Clear the event from TS_Event register
++ *
++ *
++ * @param auxMode [IN] Auxiliary Snapshot Register
++ * (Master/Slave) to be checked
++ * @param pollFlag [OUT] TRUE if time stamp captured in aux
++ * snapshot register
++ * FALSE if the time stamp not captured
++ * @param auxTime [OUT] Buffer for returning captured Auxiliary
++ * Snapshot time
++ *
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ * @li IOH_1588_INVALIDPARAM - Invalid parameter passed
++ * @li IOH_1588_INTERRUPTMODEINUSE - Interrupt mode is in use
++ */
++enum ioh_status
++ioh_1588_aux_time_poll(enum ioh1588AuxMode auxMode, unsigned long *pollFlag,
++ struct ioh1588TimeValue *auxTime)
++{
++ unsigned long ammsFlag = FALSE;
++ unsigned long asmsFlag = FALSE;
++
++ /* Verify the parameters */
++ if (((unsigned long *)NULL == pollFlag) ||
++ (IOH_1588_AUXMODE_INVALID <= auxMode) ||
++ ((struct ioh1588TimeValue *) NULL == auxTime)) {
++ IOH_DEBUG("ioh_1588_aux_time_poll:invalid param \
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ /* Get Auxiliary Master/Slave Mode Snapshot */
++ if (IOH_1588_AUXMODE_MASTER == auxMode) {
++ IOH_DEBUG("ioh_1588_aux_time_poll:IOH_1588_AUXMODE_MASTER\n");
++ /* Is interrupt mode of processing is enabled? */
++ if ((ioh1588AuxTimeCallback) NULL != ioh_am_cbptr) {
++ IOH_DEBUG
++ ("ioh_1588_aux_time_poll:interrupt mode in use\n");
++ return IOH_1588_INTERRUPTMODEINUSE;
++ }
++
++ /* Is the Auxiliary Master Mode Snapshot available? */
++ ammsFlag = ioh_1588_amms_evt_get();
++ if (FALSE == ammsFlag) {
++ IOH_DEBUG("ioh_1588_aux_time_poll:NO Auxiliary Master \
++ Mode Snapshot available\n");
++ *pollFlag = FALSE;
++ auxTime->timeValueLowWord = 0;
++ auxTime->timeValueHighWord = 0;
++ return IOH_1588_SUCCESS;
++ }
++
++ /* Get Auxiliary Master Snapshot */
++ ioh_1588_aux_master_snap_get(&auxTime->timeValueLowWord,
++ &auxTime->timeValueHighWord);
++ IOH_DEBUG("ioh_1588_aux_time_poll:Auxiliary Master Snapshot \
++ low=%lx,high=%lx\n", auxTime->timeValueLowWord, \
++ auxTime->timeValueHighWord);
++
++ *pollFlag = TRUE;
++
++ /* Clear the snapshot availability condition */
++ IOH_DEBUG
++ ("ioh_1588_aux_time_poll:invoking ioh_1588_amms_evt_clear\n");
++ ioh_1588_amms_evt_clear();
++ } else { /* IOH_1588_AUXMODE_SLAVE == auxMode */
++
++ IOH_DEBUG("ioh_1588_aux_time_poll:IOH_1588_AUXMODE_SLAVE\n");
++ /* Is interrupt mode of processing is enabled? */
++ if ((ioh1588AuxTimeCallback) NULL != ioh_as_cbptr) {
++ IOH_DEBUG
++ ("ioh_1588_aux_time_poll:interrupt mode in use\n");
++ return IOH_1588_INTERRUPTMODEINUSE;
++ }
++
++ /* Is the Auxiliary Slave Mode Snapshot available? */
++ asmsFlag = ioh_1588_asms_evt_get();
++ if (FALSE == asmsFlag) {
++ IOH_DEBUG("ioh_1588_aux_time_poll:NO Auxiliary Slave \
++ Mode Snapshot available\n");
++ *pollFlag = FALSE;
++ auxTime->timeValueLowWord = 0;
++ auxTime->timeValueHighWord = 0;
++ return IOH_1588_SUCCESS;
++ }
++
++ /* Get Auxiliary Slave Snapshot */
++ ioh_1588_aux_slave_snap_get(&auxTime->timeValueLowWord,
++ &auxTime->timeValueHighWord);
++ IOH_DEBUG("ioh_1588_aux_time_poll:Auxiliary Slave Snapshot \
++ low=%lx,high=%lx\n", auxTime->timeValueLowWord, \
++ auxTime->timeValueHighWord);
++
++ *pollFlag = TRUE;
++
++ /* Clear the snapshot availability condition */
++ IOH_DEBUG
++ ("ioh_1588_aux_time_poll:invoking ioh_1588_amms_evt_clear\n");
++ ioh_1588_asms_evt_clear();
++ }
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_reset(void)
++ *
++ * @brief Resets the IEEE 1588 hardware assist block
++ *
++ * @remarks Resets the IEEE 1588 hardware assist block. The
++ * main steps followed in this function are:
++ * - Set the reset bit of Time Sync Control register
++ * - Clear the reset bit of Time Sync Control register
++ * -Note: For A0/A1 sample, test mode setting is enabled for
++ * the 64 bit System Time Register. This is a work around for
++ * the non continuous value in the 64 bit System Time Register
++ * consisting of High(32bit) / Low(32bit)
++ *
++ *
++ * @param None
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation is successful
++ */
++enum ioh_status ioh_1588_reset(void)
++{
++ /* Reset Hardware Assist */
++ IOH_DEBUG("ioh_1588_reset:invoking ioh_1588_block_reset\n");
++ ioh_1588_block_reset();
++
++ /* Clear Stats */
++ ioh_1588_stats.rxMsgs = ioh_1588_stats.txMsgs = 0;
++
++ /* Unregister any Callback Routines */
++ IOH_DEBUG("ioh_1588_reset:unregistering callbacks\n");
++ ioh_pps_cbptr = (ioh1588PulsePerSecondCallback) NULL;
++ ioh_am_cbptr = (ioh1588AuxTimeCallback) NULL;
++ ioh_as_cbptr = (ioh1588AuxTimeCallback) NULL;
++ ioh_tt_cbptr = (ioh1588TargetTimeCallback) NULL;
++
++#ifdef IOH_IEEE1588_A0_A1_SAMPLE_BUG
++ /*enable all 32 bits in system time registers */
++ ioh_1588_set_system_time_count();
++#endif
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_chnl_reset(enum ioh1588PTPPort ptpPort)
++ *
++ * @brief Resets the IEEE 1588 channel by resetting the hardware block
++ *
++ * @remarks This API also sets the reset bit in the IEEE1588 to fully
++ * resets the block. The main steps followed in this function
++ * are:
++ * - Return IOH_1588_INVALIDPARAM if ptpPort passed is not valid
++ * - Perform a block level reset by invoking ioh_1588_reset
++ *
++ *
++ * @param ptpPort [IN] The PTP port that is to be reset
++ *
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ */
++enum ioh_status ioh_1588_chnl_reset(enum ioh1588PTPPort ptpPort)
++{
++ IOH_DEBUG("ioh_1588_chnl_reset:invoking ioh_1588_reset\n");
++ return ioh_1588_reset();
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_stats_get(struct ioh1588Stats *stats)
++ *
++ * @brief Returns the ioh1588 Statistics
++ *
++ * @remarks This API will return the statistics of the snapshots captured
++ * for
++ * receive and transmit of messages. The main steps followed in
++ * this
++ * function are:
++ * - Return IOH_1588_INVALIDPARAM if argument passed is NULL
++ * - Return the counter values stored for Rx and Tx messages
++ *
++ *
++ * @param stats [OUT] Buffer for returning the statistics
++ * counter values
++ *
++ *
++ * @note These counters are updated only when the client application
++ * polls for
++ * the time stamps or interrupt are enabled.
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation is successful
++ * @li IOH_1588_INVALIDPARAM - NULL parameter passed
++ */
++
++enum ioh_status ioh_1588_stats_get(struct ioh1588Stats *stats)
++{
++ /* Verify the parameter */
++ if ((struct ioh1588Stats *) NULL == stats) {
++ IOH_DEBUG("ioh_1588_stats_get:invalid param \
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ /* Return the statistics */
++ stats->rxMsgs = ioh_1588_stats.rxMsgs;
++ stats->txMsgs = ioh_1588_stats.txMsgs;
++ IOH_DEBUG("ioh_1588_stats_get:stats-txMsg=%lx,rxMsg=%lx\n",
++ stats->txMsgs, stats->rxMsgs);
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn void ioh_1588_stats_reset(void)
++ *
++ * @brief Resets the statistics counters
++ *
++ * @remarks This API will reset the statistics counters maintained by the
++ * driver
++ *
++ * @param None
++ * @retval None
++ */
++void ioh_1588_stats_reset(void)
++{
++ /* Clear the statistics */
++ IOH_DEBUG("ioh_1588_stats_reset:clearing stats\n");
++ ioh_1588_stats.rxMsgs = ioh_1588_stats.txMsgs = 0;
++
++ return;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn void ioh_1588_save_state(void)
++ *
++ * @brief Save the state of all registers
++ *
++ * @remarks Save the content of all registers of IEEE1588 module
++ *
++ * @param None.
++ *
++ * @retval None
++ */
++void ioh_1588_save_state(void)
++{
++ int i;
++ unsigned long val;
++
++ /* Time stamp control register */
++ ioh_1588_regs.ts_control =
++ (ioh_1588_ttm_imask_get() << IOH_1588_TSC_TTM_SHIFT) |
++ (ioh_1588_asms_imask_get() << IOH_1588_TSC_ASMS_SHIFT) |
++ (ioh_1588_amms_imask_get() << IOH_1588_TSC_AMMS_SHIFT) |
++ (ioh_1588_pps_imask_get() << IOH_1588_TSC_PPSM_SHIFT);
++ IOH_DEBUG("ioh_1588_save_state:TS_CONTROL reg=%lx\n",
++ ioh_1588_regs.ts_control);
++
++ /*
++ * Time stamp event register; clear on write,
++ * so no point in reading and then saving;
++ * Will be cleared on restore to start in a clean slate
++ */
++ ioh_1588_regs.ts_event = IOH_1588_TSE_TTIPEND | IOH_1588_TSE_SNS |
++ IOH_1588_TSE_SNM | IOH_1588_TSE_PPS;
++ IOH_DEBUG("ioh_1588_save_state:TS_EVENT reg=%lx\n",
++ ioh_1588_regs.ts_event);
++
++ /* Addend register */
++ ioh_1588_addend_get(&ioh_1588_regs.ts_addend);
++ IOH_DEBUG("ioh_1588_save_state:TS_ADDEND reg=%lx\n",
++ ioh_1588_regs.ts_addend);
++
++ /* PPS comapre register */
++ ioh_1588_pps_get(&ioh_1588_regs.ts_compare);
++ IOH_DEBUG("ioh_1588_save_state:TS_COMPARE reg=%lx\n",
++ ioh_1588_regs.ts_compare);
++
++ /* System time Low and Hi registers */
++ ioh_1588_sys_snap_get(&ioh_1588_regs.ts_syslo, &ioh_1588_regs.ts_syshi);
++ IOH_DEBUG("ioh_1588_save_state:sys time reg-low =%lx,high=%lx\n",
++ ioh_1588_regs.ts_syslo, ioh_1588_regs.ts_syshi);
++
++ /* Target time Low and Hi registers */
++ ioh_1588_tgt_snap_get(&ioh_1588_regs.ts_tgtlo, &ioh_1588_regs.ts_tgthi);
++ IOH_DEBUG("ioh_1588_save_state:target time reg-low =%lx,high=%lx\n",
++ ioh_1588_regs.ts_tgtlo, ioh_1588_regs.ts_tgthi);
++
++#if 0
++ /*
++ * Below registers are read only, so no point in reading/storing, since
++ * we can't restore them
++ */
++ /* Slave mode snapshot Low and Hi registers */
++ ioh_1588_aux_slave_snap_get(&ioh_1588_regs.ts_asmslo,
++ &ioh_1588_regs.ts_asmshi);
++
++ /* Master mode snapshot Low and Hi registers */
++ ioh_1588_aux_master_snap_get(&ioh_1588_regs.ts_ammslo,
++ &ioh_1588_regs.ts_ammshi);
++#endif
++ ioh_1588_regs.ts_cc =
++ (ioh_1588_master_mode_get() << IOH_1588_CC_MM_SHIFT) |
++ (ioh_1588_timestamp_all_get() << IOH_1588_CC_TA_SHIFT) |
++ (ioh_1588_op_mode_get() << IOH_1588_CC_MODE_SHIFT) |
++ (ioh_1588_version_get() << IOH_1588_CC_VERSION_SHIFT);
++ IOH_DEBUG("ioh_1588_save_state:TS_CC reg=%lx\n", ioh_1588_regs.ts_cc);
++
++ /* Channel event register, not saved - will be cleared on restore */
++ ioh_1588_regs.ts_ce = IOH_1588_CE_TXS | IOH_1588_CE_RXS;
++
++#if 0
++ /*
++ * Below registers are read only, so no point in reading/storing, since
++ * we can't restore them
++ */
++ ioh_1588_rx_snap_get(&ioh_1588_regs.ts_xslo, &ioh_1588_regs.ts_xshi);
++ ioh_1588_tx_snap_get(&ioh_1588_regs.ts_rslo, &ioh_1588_regs.ts_rshi);
++ ioh_1588_uuid_seqid_get(&ioh_1588_regs.ts_uuidlo,
++ &ioh_1588_regs.ts_uuidhi);
++
++ /* CAN */
++ ioh_1588_can_snap_get(&ioh_1588_regs.ts_cxslo, &ioh_1588_regs.ts_cxshi);
++#endif
++
++ /* CAN Channel event register, not saved - will be cleared on restore */
++ ioh_1588_regs.ts_cce = IOH_1588_CE_OVR | IOH_1588_CE_VAL;
++
++ /* Ethernet CAN selector register */
++ ioh_1588_regs.ts_sel =
++ (ioh_1588_eth_enable_get() << IOH_1588_ECS_ETH_SHIFT) |
++ (ioh_1588_can_enable_get() << IOH_1588_ECS_CAN_SHIFT);
++ IOH_DEBUG("ioh_1588_save_state:TS_SEL reg=%lx\n", ioh_1588_regs.ts_sel);
++
++ /* Station Address registers */
++ for (i = 0; i < IOH_1588_STATION_BYTES; i++) {
++ ioh_1588_station_get(i, &val);
++ ioh_1588_regs.ts_sti[i] = val & 0xff;
++ IOH_DEBUG("ioh_1588_save_state:TS_ST[%d] reg=%d\n", i,
++ ioh_1588_regs.ts_sti[i]);
++ }
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn void ioh_1588_restore_state(void)
++ *
++ * @brief Restore the state of all registers
++ *
++ * @remarks Restores the content of all registers of IEEE1588 module.
++ * Note: For A0/A1 sample, test mode setting is enabled for
++ * the 64 bit System Time Register. This is a work around for
++ * the non continuous value in the 64 bit System Time
++ * Register
++ * consisting of High(32bit) / Low(32bit)
++ *
++ * @param None
++ * @retval None
++ */
++void ioh_1588_restore_state(void)
++{
++ int i;
++
++ /* Time stamp control register */
++ if (ioh_1588_regs.ts_control & IOH_1588_TSC_TTM_MASK) {
++ IOH_DEBUG
++ ("ioh_1588_restore_state:invoking ioh_1588_ttm_imask_set\n");
++ ioh_1588_ttm_imask_set();
++ }
++ if (ioh_1588_regs.ts_control & IOH_1588_TSC_ASMS_MASK) {
++ IOH_DEBUG
++ ("ioh_1588_restore_state:invoking ioh_1588_asms_imask_set\n");
++ ioh_1588_asms_imask_set();
++ }
++ if (ioh_1588_regs.ts_control & IOH_1588_TSC_AMMS_MASK) {
++ IOH_DEBUG
++ ("ioh_1588_restore_state:invoking ioh_1588_amms_imask_set\n");
++ ioh_1588_amms_imask_set();
++ }
++ if (ioh_1588_regs.ts_control & IOH_1588_TSC_PPSM_MASK) {
++ IOH_DEBUG
++ ("ioh_1588_restore_state:invoking ioh_1588_pps_imask_set\n");
++ ioh_1588_pps_imask_set();
++ }
++
++ /* Time stamp event register; clear all events */
++ IOH_DEBUG("ioh_1588_restore_state:invoking ioh_1588_ttm_evt_clear\n");
++ ioh_1588_ttm_evt_clear();
++ IOH_DEBUG("ioh_1588_restore_state:invoking ioh_1588_asms_evt_clear\n");
++ ioh_1588_asms_evt_clear();
++ IOH_DEBUG("ioh_1588_restore_state:invoking ioh_1588_amms_evt_clear\n");
++ ioh_1588_amms_evt_clear();
++ IOH_DEBUG("ioh_1588_restore_state:invoking ioh_1588_pps_evt_clear\n");
++ ioh_1588_pps_evt_clear();
++
++#ifdef IOH_IEEE1588_A0_A1_SAMPLE_BUG
++ /*enable all 32 bits in system time registers */
++ ioh_1588_set_system_time_count();
++#endif
++
++ /* Addend register */
++ IOH_DEBUG("ioh_1588_restore_state:invoking ioh_1588_addend_set\n");
++ ioh_1588_addend_set(ioh_1588_regs.ts_addend);
++
++ /* PPS comapre register */
++ IOH_DEBUG("ioh_1588_restore_state:invoking ioh_1588_pps_set\n");
++ ioh_1588_pps_set(ioh_1588_regs.ts_compare);
++
++ /* System time Low and Hi registers */
++ IOH_DEBUG("ioh_1588_restore_state:invoking ioh_1588_sys_snap_set\n");
++ ioh_1588_sys_snap_set(ioh_1588_regs.ts_syslo, ioh_1588_regs.ts_syshi);
++
++ /* Target time Low and Hi registers */
++ IOH_DEBUG("ioh_1588_restore_state:invoking ioh_1588_tgt_snap_set\n");
++ ioh_1588_tgt_snap_set(ioh_1588_regs.ts_tgtlo, ioh_1588_regs.ts_tgthi);
++
++ /* Ethernet Channel Control register */
++ if (ioh_1588_regs.ts_cc & IOH_1588_CC_MM) {
++ IOH_DEBUG
++ ("ioh_1588_restore_state:invoking ioh_1588_master_mode_set\
++ with TRUE as parameter\n");
++ ioh_1588_master_mode_set(TRUE);
++ }
++ if (ioh_1588_regs.ts_cc & IOH_1588_CC_TA) {
++ IOH_DEBUG
++ ("ioh_1588_restore_state:invoking ioh_1588_timestamp_all_set\
++ with TRUE as parameter\n");
++ ioh_1588_timestamp_all_set(TRUE);
++ }
++ IOH_DEBUG("ioh_1588_restore_state:invoking ioh_1588_op_mode_set\n");
++ ioh_1588_op_mode_set((ioh_1588_regs.ts_cc & IOH_1588_CC_MODE_MASK) >>
++ IOH_1588_CC_MODE_SHIFT);
++ if (ioh_1588_regs.ts_cc & IOH_1588_CC_VERSION) {
++ IOH_DEBUG("ioh_1588_restore_state:invoking ioh_1588_version_set\
++ with IOH_1588PTP_VERSION_1 as parameter\n");
++ ioh_1588_version_set(IOH_1588PTP_VERSION_1);
++ }
++
++ /* Channel event register, cleared on restore */
++ IOH_DEBUG
++ ("ioh_1588_restore_state:invoking ioh_1588_rx_snap_evt_clear\n");
++ ioh_1588_rx_snap_evt_clear();
++ IOH_DEBUG
++ ("ioh_1588_restore_state:invoking ioh_1588_tx_snap_evt_clear\n");
++ ioh_1588_tx_snap_evt_clear();
++
++ /* CAN Channel event register, cleared on restore */
++ IOH_DEBUG
++ ("ioh_1588_restore_state:invoking ioh_1588_tx_snap_ovr_clear\n");
++ ioh_1588_can_snap_ovr_clear();
++ IOH_DEBUG
++ ("ioh_1588_restore_state:invoking ioh_1588_tx_snap_valid_clear\n");
++ ioh_1588_can_snap_valid_clear();
++
++ /* Ethernet CAN selector register */
++ if (ioh_1588_regs.ts_sel & IOH_1588_ECS_ETH) {
++ IOH_DEBUG
++ ("ioh_1588_restore_state:invoking ioh_1588_eth_enable_set\n");
++ ioh_1588_eth_enable_set();
++ }
++ if (ioh_1588_regs.ts_sel & IOH_1588_ECS_CAN) {
++ IOH_DEBUG
++ ("ioh_1588_restore_state:invoking ioh_1588_can_enable_set\n");
++ ioh_1588_can_enable_set();
++ }
++
++ /* Station Address registers */
++ for (i = 0; i < IOH_1588_STATION_BYTES; i++) {
++ IOH_DEBUG("ioh_1588_restore_state:invoking ioh_1588_station_set\
++ for station=%d\n", i);
++ ioh_1588_station_set(i, ioh_1588_regs.ts_sti[i]);
++ }
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn void ioh_1588_show(void)
++ *
++ * @brief Display the dump of IEEE 1588 registers
++ *
++ * @remarks This API will dump the contents of configuration, event and
++ * snapshot
++ * registers of the IEEE1588 module
++ *
++ * @param None.
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS
++ */
++enum ioh_status ioh_1588_show(void)
++{
++ int i;
++ unsigned long flag = FALSE;
++ unsigned long reg_low = 0;
++ unsigned long reg_hi = 0;
++ unsigned int seq_id = 0;
++ unsigned long uuid_low = 0;
++ unsigned long uuid_hi = 0;
++
++ /*dump all register as such */
++ IOH_DEBUG("TS Control Register offset = %x,content = %x\n",
++ IOH_1588_TSC_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_TSC_OFFSET));
++ IOH_DEBUG("TS Event Register offset = %x,content = %x\n",
++ IOH_1588_TSE_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_TSE_OFFSET));
++ IOH_DEBUG("TS Addend Register offset = %x,content = %x\n",
++ IOH_1588_ADD_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_ADD_OFFSET));
++ IOH_DEBUG("TS Accumulator Register offset = %x,content = %x\n",
++ IOH_1588_ACC_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_ACC_OFFSET));
++ IOH_DEBUG("TS Test Register offset = %x,content = %x\n",
++ IOH_1588_TST_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_TST_OFFSET));
++ IOH_DEBUG("TS PPS Compare Register offset = %x,content = %x\n",
++ IOH_1588_PPS_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_PPS_OFFSET));
++ IOH_DEBUG("TS System Time Low Register offset = %x,content = %x\n",
++ IOH_1588_STL_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_STL_OFFSET));
++ IOH_DEBUG("TS System Time High Register offset = %x,content = %x\n",
++ IOH_1588_STH_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_STH_OFFSET));
++ IOH_DEBUG("TS Target Time Low Register offset = %x,content = %x\n",
++ IOH_1588_TTL_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_TTL_OFFSET));
++ IOH_DEBUG("TS Target Time High Register offset = %x,content = %x\n",
++ IOH_1588_TTH_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_TTH_OFFSET));
++ IOH_DEBUG
++ ("TS Aux Slave Mode Snapshot Low Register offset = %x,content = %x\n",
++ IOH_1588_ASSL_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_ASSL_OFFSET));
++ IOH_DEBUG
++ ("TS Aux Slave Mode Snapshot High Register offset = %x,content = %x\n",
++ IOH_1588_ASSH_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_ASSH_OFFSET));
++ IOH_DEBUG
++ ("TS Aux Master Mode Snapshot Low Register offset = %x,content = %x\n",
++ IOH_1588_AMSL_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_AMSL_OFFSET));
++ IOH_DEBUG
++ ("TS Aux Master Mode Snapshot High Register offset = %x,content = %x\n",
++ IOH_1588_AMSH_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_AMSH_OFFSET));
++ IOH_DEBUG("TS Channel Control Register offset = %x,content = %x\n",
++ IOH_1588_CC_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_CC_OFFSET));
++ IOH_DEBUG("TS Channel Event Register offset = %x,content = %x\n",
++ IOH_1588_CE_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_CE_OFFSET));
++ IOH_DEBUG("TS Tx Snapshot High Register offset = %x,content = %x\n",
++ IOH_1588_XSH_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_XSH_OFFSET));
++ IOH_DEBUG("TS Tx Snapshot Low Register offset = %x,content = %x\n",
++ IOH_1588_XSL_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_XSL_OFFSET));
++ IOH_DEBUG("TS Rx Snapshot Low Register offset = %x,content = %x\n",
++ IOH_1588_RSL_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_RSL_OFFSET));
++ IOH_DEBUG("TS Rx Snapshot High Register offset = %x,content = %x\n",
++ IOH_1588_RSH_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_RSH_OFFSET));
++ IOH_DEBUG("TS Source UUID Low Register offset = %x,content = %x\n",
++ IOH_1588_UID_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_UID_OFFSET));
++ IOH_DEBUG
++ ("TS Source UUID High/SequenceID Register offset = %x,content = %x\n",
++ IOH_1588_SID_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_SID_OFFSET));
++ IOH_DEBUG("TS CAN Channel Status Register offset = %x,content = %x\n",
++ IOH_1588_CCE_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_CCE_OFFSET));
++ IOH_DEBUG("TS CAN Snapshot Low Register offset = %x,content = %x\n",
++ IOH_1588_CXSL_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_CXSL_OFFSET));
++ IOH_DEBUG("TS CAN Snapshot High Register offset = %x,content = %x\n",
++ IOH_1588_CXSH_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_CXSH_OFFSET));
++ IOH_DEBUG("TS Ethernet/CAN Selecti Register offset = %x,content = %x\n",
++ IOH_1588_ECS_OFFSET,
++ IOH_READ32(ioh_1588_base + IOH_1588_ECS_OFFSET));
++ /* Station Address registers */
++ IOH_DEBUG("TS Station Address [1-6]");
++ for (i = 0; i < IOH_1588_STATION_BYTES; i++) {
++ ioh_1588_station_get(i, &reg_low);
++ IOH_DEBUG(":%02lx", reg_low);
++ }
++ IOH_DEBUG("\n");
++
++ /* Target time reached interrupt mask */
++ flag = ioh_1588_ttm_imask_get();
++ IOH_LOG(KERN_ERR, "Target Time Interrupt Mask: %s\n",
++ ((TRUE == flag) ? "Set" : "Clear"));
++
++ /* Auxiliary Slave Mode Snapshot interrupt mask */
++ flag = ioh_1588_asms_imask_get();
++ IOH_LOG(KERN_ERR, "ASMS Interrupt Mask: %s\n",
++ ((TRUE == flag) ? "Set" : "Clear"));
++
++ /* Auxiliary Master Mode Snapshot interrupt mask */
++ flag = ioh_1588_amms_imask_get();
++ IOH_LOG(KERN_ERR, "AMMS Interrupt Mask: %s\n",
++ ((TRUE == flag) ? "Set" : "Clear"));
++
++ /* Pulse per second interrupt mask */
++ flag = ioh_1588_pps_imask_get();
++ IOH_LOG(KERN_ERR, "PPS Interrupt Mask: %s\n",
++ ((TRUE == flag) ? "Set" : "Clear"));
++
++ /* TS_Event Register */
++ /* Target time interrupt event */
++ flag = ioh_1588_ttm_evt_get();
++ IOH_LOG(KERN_ERR, "Target Time Interrupt Pending: %s\n",
++ ((TRUE == flag) ? "Set" : "Clear"));
++
++ /* Auxiliary Slave Mode Snapshot event */
++ flag = ioh_1588_asms_evt_get();
++ IOH_LOG(KERN_ERR, "ASMS Snapshot Event: %s\n",
++ ((TRUE == flag) ? "Set" : "Clear"));
++
++ /* Auxiliary Master Mode Snapshot event */
++ flag = ioh_1588_amms_evt_get();
++ IOH_LOG(KERN_ERR, "AMMS Snapshot Event: %s\n",
++ ((TRUE == flag) ? "Set" : "Clear"));
++
++ /* PPS Match event */
++ flag = ioh_1588_pps_evt_get();
++ IOH_LOG(KERN_ERR, "PPS Match Event: %s\n",
++ ((TRUE == flag) ? "Set" : "Clear"));
++
++ /* Addend Register */
++ reg_low = 0;
++ ioh_1588_addend_get(&reg_low);
++ IOH_LOG(KERN_ERR, "Frequency Scaling Value: %lx\n", reg_low);
++
++ /* PPS Comapre Register */
++ reg_low = 0;
++ ioh_1588_pps_get(&reg_low);
++ IOH_LOG(KERN_ERR, "PPS Compare Register Value: %lx\n", reg_low);
++
++ /* System Time registers */
++ reg_low = reg_hi = 0;
++ ioh_1588_sys_snap_get(&reg_low, &reg_hi);
++ IOH_LOG(KERN_ERR, "System Time (Hi:Low): %lx : %lx\n", reg_hi, reg_low);
++
++ /* Target Time registers */
++ reg_low = reg_hi = 0;
++ ioh_1588_tgt_snap_get(&reg_low, &reg_hi);
++ IOH_LOG(KERN_ERR, "Target Time (Hi:Low): %lx : %lx\n", reg_hi, reg_low);
++
++ /* Auxiliary Slave Mode Snapshot registers */
++ reg_low = reg_hi = 0;
++ ioh_1588_aux_slave_snap_get(&reg_low, &reg_hi);
++ IOH_LOG(KERN_ERR,
++ "Auxiliary Slave Mode Snapshot (Hi:Low) : %lx : %lx\n", reg_hi,
++ reg_low);
++
++ /* Auxiliary Master Mode Snapshot registers */
++ reg_low = reg_hi = 0;
++ ioh_1588_aux_master_snap_get(&reg_low, &reg_hi);
++ IOH_LOG(KERN_ERR,
++ "Auxiliary Master Mode Snapshot (Hi:Low): %lx : %lx\n", reg_hi,
++ reg_low);
++
++ /* Ethernet port */
++ IOH_LOG(KERN_ERR, "\nPTP Eth Port\n");
++
++ /* Master Mode */
++ flag = ioh_1588_master_mode_get();
++ IOH_LOG(KERN_ERR, "Master Mode: %s\n",
++ ((TRUE == flag) ? "Set" : "Clear"));
++
++ /* Timestamp All PTP messages */
++ flag = ioh_1588_timestamp_all_get();
++ IOH_LOG(KERN_ERR, "Timestamp All Messages: %s\n",
++ ((TRUE == flag) ? "Set" : "Clear"));
++
++ /* Version */
++ flag = ioh_1588_version_get();
++ IOH_LOG(KERN_ERR, "Version support: %s\n",
++ ((TRUE == flag) ? "v1 and v2" : "v1 only"));
++
++ /* Receive Snapshot Locked */
++ flag = ioh_1588_rx_snap_evt();
++ IOH_LOG(KERN_ERR, "Receive Snapshot Locked: %s\n",
++ ((TRUE == flag) ? "Set" : "Clear"));
++
++ /* Transmit Snapshot Locked */
++ flag = ioh_1588_tx_snap_evt();
++ IOH_LOG(KERN_ERR, "Transmit Snapshot Locked: %s\n",
++ ((TRUE == flag) ? "Set" : "Clear"));
++
++ /* Receive Snapshot registers */
++ reg_low = reg_hi = 0;
++ ioh_1588_rx_snap_get(&reg_low, &reg_hi);
++ IOH_LOG(KERN_ERR, "Receive Snapshot (Hi:Low): %lx : %lx\n", reg_hi,
++ reg_low);
++
++ /* Transmit Snapshot registers */
++ reg_low = reg_hi = 0;
++ ioh_1588_tx_snap_get(&reg_low, &reg_hi);
++ IOH_LOG(KERN_ERR, "Transmit Snapshot (Hi:Low): %lx : %lx\n", reg_hi,
++ reg_low);
++
++ /* UUID and Seqquence Id */
++ ioh_1588_uuid_seqid_get(&uuid_low, &uuid_hi, &seq_id);
++ IOH_LOG(KERN_ERR, "UUID (Hi:Lo): %lx : %lx\n", uuid_hi, uuid_low);
++ IOH_LOG(KERN_ERR, "Sequence id: %x\n", seq_id);
++
++ /* CAN port */
++ IOH_LOG(KERN_ERR, "\nPTP CAN Port:\n");
++
++ /* Snapshot Valid */
++ flag = ioh_1588_can_snap_valid();
++ IOH_LOG(KERN_ERR, "Snapshot Valid : %s\n",
++ ((TRUE == flag) ? "Set" : "Clear"));
++
++ /* Snapshot Overrun */
++ flag = ioh_1588_can_snap_ovr();
++ IOH_LOG(KERN_ERR, "Snapshot Overrun: %s\n",
++ ((TRUE == flag) ? "Set" : "Clear"));
++
++ /* CAN Snapshot registers */
++ reg_low = reg_hi = 0;
++ ioh_1588_can_snap_get(&reg_low, &reg_hi);
++ IOH_LOG(KERN_ERR, "CAN Snapshot (Hi:Low): %lx : %lx\n", reg_hi,
++ reg_low);
++
++ /* Ethernet Selector */
++ flag = ioh_1588_eth_enable_get();
++ IOH_LOG(KERN_ERR, "\nEthernet Enable: %s\n",
++ ((TRUE == flag) ? "Set" : "Clear"));
++
++ /* CAN Selector */
++ flag = ioh_1588_can_enable_get();
++ IOH_LOG(KERN_ERR, "CAN Enable: %s\n",
++ ((TRUE == flag) ? "Set" : "Clear"));
++
++ /* Station Address Registers */
++ IOH_LOG(KERN_ERR, "Station Address [1-6]");
++ for (i = 0; i < IOH_1588_STATION_BYTES; i++) {
++ ioh_1588_station_get(i, &reg_low);
++ IOH_LOG(KERN_ERR, ":%02lx", reg_low);
++ }
++ IOH_LOG(KERN_ERR, "\n");
++
++ /* Statistics */
++ IOH_LOG(KERN_ERR,
++ "Receive Snapshot Count: %lu\nTransmit Snapshot Count: %lu\n",
++ ioh_1588_stats.rxMsgs, ioh_1588_stats.txMsgs);
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_ptp_can_poll (
++ * enum ioh1588PTPPort ptpPort,
++ * struct ioh1588TimeValue *ptpTimeStamp)
++ *
++ * @brief Polls the IEEE 1588 message time stamp detect status on a given
++ * CAN PTP Port.
++ *
++ * @remarks This API polls for the availability of a time stamp on a CAN
++ * port.
++ *
++ * @param ptpPort [IN] PTP port to poll
++ * @param ptpTimeStamp [OUT] Buffer to store the snapshot captured
++ *
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ * @li IOH_1588_INVALIDPARAM - Invalid parameters passed
++ * @li IOH_1588_NOTIMESTAMP - No time stamp available
++ */
++enum ioh_status
++ioh_1588_ptp_can_poll(enum ioh1588PTPPort ptpPort, \
++ struct ioh1588TimeValue *ptpTimeStamp)
++{
++ unsigned long valid = FALSE;
++ unsigned long overrun = FALSE;
++
++ /* Verify the parameters for proper values */
++ if ((ptpPort != IOH_1588_CAN_0_1588PTP_PORT) ||
++ ((struct ioh1588TimeValue *) NULL == ptpTimeStamp)) {
++ IOH_DEBUG("ioh_1588_ptp_can_poll:invalid params\
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ /* Check whether a new timestamp available? */
++ IOH_DEBUG("ioh_1588_ptp_can_poll:invoking ioh_1588_can_snap_valid\n");
++ valid = ioh_1588_can_snap_valid();
++
++ /* there is not a valid timestamp */
++ if (TRUE != valid) {
++ IOH_DEBUG("ioh_1588_ptp_can_poll:no valid timestamp\
++ returning IOH_1588_NOTIMESTAMP\n");
++ return IOH_1588_NOTIMESTAMP;
++ }
++
++ /* check overrun bit before retreiving timestamp */
++ IOH_DEBUG("ioh_1588_ptp_can_poll:invoking ioh_1588_can_snap_ovr\n");
++ overrun = ioh_1588_can_snap_ovr();
++
++ /* if the timestamp has been overwritten */
++ if (TRUE == overrun) {
++ IOH_DEBUG("ioh_1588_ptp_can_poll:overrun occured\n");
++ /* reset valid and overrun bits */
++ IOH_DEBUG
++ ("ioh_1588_ptp_can_poll:invoking \
++ ioh_1588_can_snap_valid_clear\n");
++ ioh_1588_can_snap_valid_clear();
++ IOH_DEBUG
++ ("ioh_1588_ptp_can_poll:invoking \
++ ioh_1588_can_snap_ovr_clear\n");
++ ioh_1588_can_snap_ovr_clear();
++
++ /* return no valid timestamp available */
++ ptpTimeStamp->timeValueLowWord = 0;
++ ptpTimeStamp->timeValueHighWord = 0;
++
++ IOH_DEBUG
++ ("ioh_1588_ptp_can_poll:returning IOH_1588_NOTIMESTAMP\n");
++ return IOH_1588_NOTIMESTAMP;
++ }
++
++ /* Fetch the receive timestamp */
++ ioh_1588_can_snap_get(&ptpTimeStamp->timeValueLowWord,
++ &ptpTimeStamp->timeValueHighWord);
++ IOH_DEBUG("ioh_1588_ptp_can_poll:timestamp-low=%lx,high=%lx\n",
++ ptpTimeStamp->timeValueLowWord,
++ ptpTimeStamp->timeValueHighWord);
++
++ /* check overrun bit again to ensure timestamp is valid */
++ overrun = ioh_1588_can_snap_ovr();
++
++ /* if the timestamp has been overwritten */
++ if (TRUE == overrun) {
++ IOH_DEBUG("ioh_1588_ptp_can_poll:overrun occured\n");
++ /* reset valid and overrun bits */
++ IOH_DEBUG
++ ("ioh_1588_ptp_can_poll:invoking \
++ ioh_1588_can_snap_valid_clear\n");
++ ioh_1588_can_snap_valid_clear();
++ IOH_DEBUG
++ ("ioh_1588_ptp_can_poll:invoking \
++ ioh_1588_can_snap_ovr_clear\n");
++ ioh_1588_can_snap_ovr_clear();
++
++ /* return no valid timestamp available */
++ ptpTimeStamp->timeValueLowWord = 0;
++ ptpTimeStamp->timeValueHighWord = 0;
++
++ IOH_DEBUG
++ ("ioh_1588_ptp_can_poll:returning IOH_1588_NOTIMESTAMP\n");
++ return IOH_1588_NOTIMESTAMP;
++ }
++
++ /* reset valid bit */
++ IOH_DEBUG
++ ("ioh_1588_ptp_can_poll:invoking ioh_1588_can_snap_valid_clear\n");
++ ioh_1588_can_snap_valid_clear();
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_handler(void)
++ *
++ * @brief Interrupt handler for the IEEE 1588 module
++ *
++ * @remarks Interrupt handler for the IEEE 1588 module
++ * The Interrupts are handled in the following order
++ * - 1 - Target Time Reached/Hit Condition
++ * - 2 - Auxiliary Master Timestamp
++ * - 3 - Auxiliary Slave Timestamp
++ * - 4 - pulse per second
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ */
++enum ioh_status ioh_1588_handler(void)
++{
++ struct ioh1588TimeValue tgt_time = { 0, 0 };
++ struct ioh1588TimeValue aux_time = { 0, 0 };
++ unsigned long pps;
++
++ /* If valid callbacks are available process each interrupt */
++
++ /* Handle Target Time Reached or Exceeded Interrupt */
++ if ((NULL != ioh_tt_cbptr) && (TRUE == ioh_1588_ttm_evt_get())) {
++ IOH_DEBUG
++ ("ioh_1588_handler:Target Time Reached or Exceeded \
++ Interrupt\n");
++ IOH_DEBUG("ioh_1588_handler:invoking ioh_1588_ttm_imask_clear\
++ to disable interrupts\n");
++ /* Disable interrupt */
++ ioh_1588_ttm_imask_clear();
++
++ /* Target Time registers contents */
++ ioh_1588_tgt_snap_get(&tgt_time.timeValueLowWord,
++ &tgt_time.timeValueHighWord);
++ IOH_DEBUG("ioh_1588_handler:target time-low=%lx,high=%lx\n",
++ tgt_time.timeValueLowWord,
++ tgt_time.timeValueHighWord);
++
++ IOH_DEBUG("ioh_1588_handler:invoking callback\n");
++ /* Invoke client callback */
++ (*ioh_tt_cbptr) (tgt_time);
++
++ /* Clear the target time reached condition (ttipend bit) */
++ IOH_DEBUG("ioh_1588_handler:invoking ioh_1588_ttm_evt_clear\n");
++ ioh_1588_ttm_evt_clear();
++ }
++
++ /* Handle Auxiliary Master Mode Snapshot Interrupt */
++ if ((NULL != ioh_am_cbptr) && (TRUE == ioh_1588_amms_evt_get())) {
++ IOH_DEBUG
++ ("ioh_1588_handler:Auxiliary Master Mode Snapshot Interrupt\n");
++ /* Disable interrupt */
++ IOH_DEBUG("ioh_1588_handler:invoking ioh_1588_amms_imask_clear\
++ to disable interrupts\n");
++ ioh_1588_amms_imask_clear();
++
++ /* Fetch Auxiliary Master Mode Snapshot */
++ ioh_1588_aux_master_snap_get(&aux_time.timeValueLowWord,
++ &aux_time.timeValueHighWord);
++ IOH_DEBUG
++ ("ioh_1588_handler:Auxiliary Master Mode Snapshot-low=%lx,\
++ high=%lx\n",
++ aux_time.timeValueLowWord, aux_time.timeValueHighWord);
++
++ IOH_DEBUG("ioh_1588_handler:invoking callback\n");
++ /* Return Auxiliary Master Mode Snapshot */
++ (*ioh_am_cbptr) (IOH_1588_AUXMODE_MASTER, aux_time);
++
++ /* Clear the snapshot availability condition */
++ IOH_DEBUG
++ ("ioh_1588_handler:invoking ioh_1588_amms_evt_clear\n");
++ ioh_1588_amms_evt_clear();
++ }
++
++ /* Handle Auxiliary Slave Mode Snapshot Interrupt */
++ if ((NULL != ioh_as_cbptr) && (TRUE == ioh_1588_asms_evt_get())) {
++ IOH_DEBUG
++ ("ioh_1588_handler:Auxiliary Slave Mode Snapshot Interrupt\n");
++ /* Disable interrupt */
++ IOH_DEBUG("ioh_1588_handler:invoking ioh_1588_asms_imask_clear\
++ to disable interrupts\n");
++ ioh_1588_asms_imask_clear();
++
++ /* Fetch Auxiliary Slave Mode Snapshot */
++ ioh_1588_aux_slave_snap_get(&aux_time.timeValueLowWord,
++ &aux_time.timeValueHighWord);
++ IOH_DEBUG
++ ("ioh_1588_handler:Auxiliary Master Mode Snapshot-low=%lx,\
++ high=%lx\n",
++ aux_time.timeValueLowWord, aux_time.timeValueHighWord);
++
++ /* Return Auxiliary Slave Mode Snapshot */
++ IOH_DEBUG("ioh_1588_handler:invoking callback\n");
++ (*ioh_as_cbptr) (IOH_1588_AUXMODE_SLAVE, aux_time);
++
++ /* Clear the snapshot availability condition */
++ IOH_DEBUG
++ ("ioh_1588_handler:invoking ioh_1588_asms_evt_clear\n");
++ ioh_1588_asms_evt_clear();
++ }
++
++ /* Handle Pulse Per Second Interrupt */
++ if ((NULL != ioh_pps_cbptr) && (TRUE == ioh_1588_pps_evt_get())) {
++ IOH_DEBUG("ioh_1588_handler:Pulse Per Second Interrupt\n");
++ /* Disable interrupt */
++ IOH_DEBUG("ioh_1588_handler:invoking ioh_1588_pps_imask_clear\
++ to disable interrupts\n");
++ ioh_1588_pps_imask_clear();
++
++ /* Fetch PPS compare register */
++ IOH_DEBUG("ioh_1588_handler:invoking ioh_1588_pps_get\n");
++ ioh_1588_pps_get(&pps);
++
++ /* Invoke the call back */
++ IOH_DEBUG("ioh_1588_handler:invoking callback\n");
++ (*ioh_pps_cbptr) (pps);
++
++ /* Clear the snapshot availability condition */
++ IOH_DEBUG("ioh_1588_handler:invoking ioh_1588_pps_evt_clear\n");
++ ioh_1588_pps_evt_clear();
++
++ }
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_ptp_version_get(
++ * enum ioh1588PTPPort ptpPort, enum ioh1588PTPVersion *ptpVersion)
++ *
++ * @brief Retrieves IEEE 1588 PTP version supported on the given PTP port.
++ *
++ * @remarks This API retrieves IEEE 1588 PTP version supported on given PTP
++ * port.
++ * The main steps followed in this function are:
++ * - Return IOH_1588_INVALIDPARAM if ptpPort passed is not valid or
++ * ptpVersion passed is NULL
++ * - Ensure that the module is initialized and the port is valid
++ * - Return the PTP version that is supported from the
++ * TS_Channel_Control register, bit 31
++ *
++ *
++ * @param ptpPort [IN] PTP port
++ * @param ptpVersion [OUT] Version supported on PTP port
++ *
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation is successful
++ * @li IOH_1588_INVALIDPARAM - Invalid parameters passed
++ */
++enum ioh_status
++ioh_1588_ptp_version_get(enum ioh1588PTPPort ptpPort, \
++ enum ioh1588PTPVersion *ptpVersion)
++{
++ /* Verify the parameters for proper values */
++ if ((ptpPort != IOH_1588_GBE_0_1588PTP_PORT) || (ptpVersion == NULL)) {
++ IOH_DEBUG("ioh_1588_ptp_version_get:invalid params\
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ IOH_DEBUG("ioh_1588_ptp_version_get:invoking ioh_1588_version_get\n");
++ *ptpVersion = ioh_1588_version_get();
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_ptp_version_set(
++ * enum ioh1588PTPPort ptpPort,
++ * enum ioh1588PTPVersion ptpVersion)
++ *
++ * @brief Configures IEEE 1588 PTP version to be used on given PTP port.
++ *
++ * @remarks This API set the IEEE 1588 PTP version to be used on given PTP
++ * port.
++ * The main steps followed in this function are:
++ * - Validate parameter
++ * - Ensure that the module is initialized and the version
++ * requested is valid
++ * - Set the version in TS_Channel_Control register, bit 31
++ *
++ *
++ * @param ptpPort [IN] PTP port
++ * @param ptpVersion [IN] Version to be supported on PTP port
++ *
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation is successful
++ * @li IOH_1588_INVALIDPARAM - Invalid parameters passed
++ */
++enum ioh_status
++ioh_1588_ptp_version_set(enum ioh1588PTPPort ptpPort, \
++ enum ioh1588PTPVersion ptpVersion)
++{
++ /* Verify the parameters for proper values */
++ if ((ptpPort != IOH_1588_GBE_0_1588PTP_PORT) ||
++ ((ptpVersion != IOH_1588PTP_VERSION_0) &&
++ (ptpVersion != IOH_1588PTP_VERSION_1))) {
++ IOH_DEBUG("ioh_1588_ptp_version_set:invalid params\
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ IOH_DEBUG("ioh_1588_ptp_version_get:invoking ioh_1588_version_set\n");
++ ioh_1588_version_set(ptpVersion);
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_ptp_operation_mode_set(
++ * enum ioh1588PTPPort ptpPort,
++ * enum ioh1588PTPOperationMode ptpMode)
++ *
++ * @brief Configure the IEEE 1588 PTP operation mode of given PTP port.
++ *
++ * @remarks This API will set the operation mode on given PTP port.
++ * The main steps followed in this function are:
++ * - Ensure that the module is initialized and the mode requested
++ * is valid
++ * - If not valid, return status IOH_1588_INVALIDPARAM
++ * - Set the requested operation mode in TS_Channel_Control
++ * register, bits 16-20
++ *
++ *
++ * @param ptpPort [IN] PTP port to configure
++ * @param ptpMode [IN] Operation mode to be used
++ *
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ * @li IOH_1588_INVALIDPARAM - Invalid parameters passed
++ */
++enum ioh_status
++ioh_1588_ptp_operation_mode_set(enum ioh1588PTPPort ptpPort,
++ enum ioh1588PTPOperationMode ptpMode)
++{
++ /* Verify the parameters for proper values */
++ if ((ptpPort != IOH_1588_GBE_0_1588PTP_PORT) ||
++ ((ptpMode != IOH_1588PTP_OP_MODE_SYNC_DELAYREQ_MSGS) &&
++ (ptpMode != IOH_1588PTP_OP_MODE_V1_ALL_MSGS) &&
++ (ptpMode != IOH_1588PTP_OP_MODE_V1_V2_EVENT_MSGS) &&
++ (ptpMode != IOH_1588PTP_OP_MODE_V1_V2_ALL_MSGS))) {
++ IOH_DEBUG("ioh_1588_ptp_operation_mode_set:invalid params\
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ IOH_DEBUG("ioh_1588_ptp_version_get:invoking ioh_1588_op_mode_set\n");
++ ioh_1588_op_mode_set(ptpMode);
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_ptp_operation_mode_get(
++ * enum ioh1588PTPPort ptpPort,
++ * enum ioh1588PTPOperationMode *ptpMode)
++ *
++ * @brief Gets the current PTP operation mode of given PTP port.
++ *
++ * @remarks This API will get the operation mode of given PTP port.
++ * The main steps followed in this function are:
++ * - Ensure that the module is initialized and the port is valid
++ * - If not valid, return status IOH_1588_INVALIDPARAM
++ * - Return the PTP operation mode that is currently in use by
++ * reading the TS_Channel_Control register, bits 16-20
++ *
++ *
++ * @param ptpPort [IN] PTP port to configure
++ * @param ptpMode [OUT] Address where PTP operation mode is
++ * returned
++ *
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ * @li IOH_1588_INVALIDPARAM - Null parameter passed
++ */
++enum ioh_status
++ioh_1588_ptp_operation_mode_get(enum ioh1588PTPPort ptpPort,
++ enum ioh1588PTPOperationMode *ptpMode)
++{
++ /* Verify the parameters for proper values */
++ if ((ptpPort != IOH_1588_GBE_0_1588PTP_PORT) || (ptpMode == NULL)) {
++ IOH_DEBUG("ioh_1588_ptp_operation_mode_get:invalid params\
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ IOH_DEBUG
++ ("ioh_1588_ptp_operation_mode_get:invoking ioh_1588_op_mode_get\n");
++ *ptpMode = ioh_1588_op_mode_get();
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_pulse_per_sec_interrupt_enable(
++ * ioh1588PulsePerSecondCallback callBack)
++ *
++ * @brief Enable the Pulse Per Second match interrupt
++ *
++ * @remarks This API will enable the Pulse Per Second match interrupt.
++ * This interrupt is generated when the low word of System
++ * Time matches the value in the Pulse Per Second compare
++ * register in the IEEE hardware assist block. The main steps
++ * followed in this function are:
++ * - Return IOH_1588_INVALIDPARAM if argument passed is NULL
++ * - Modify the Time Sync Controller register to enable the
++ * interrupt
++ * - Set the callback routine
++ *
++ * @param callBack [IN] Routine to be invoked when interrupt
++ * fires
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ * @li IOH_1588_INVALIDPARAM - Null parameter passed
++ */
++enum ioh_status
++ioh_1588_pulse_per_sec_interrupt_enable(ioh1588PulsePerSecondCallback callBack)
++{
++ /* Verify the parameter */
++ if ((ioh1588PulsePerSecondCallback) NULL == callBack) {
++ IOH_DEBUG
++ ("ioh_1588_pulse_per_sec_interrupt_enable:invalid params\
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ /* Register the Callback */
++ ioh_pps_cbptr = callBack;
++
++ /* Set target time interrupt mask */
++ IOH_DEBUG("ioh_1588_pulse_per_sec_interrupt_enable:invoking \
++ ioh_1588_pps_imask_set\n");
++ ioh_1588_pps_imask_set();
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_pulse_per_sec_interrupt_disable(void)
++ *
++ * @brief Disable the Pulse Per Second match interrupt
++ *
++ * @remarks This API will disable the Pulse Per Second match interrupt.
++ * This interrupt is generated when the low word of System
++ * Time matches the value in the Pulse Per Second compare
++ * register in the IEEE hardware assist block. The main
++ * steps followed in this function are:
++ * - Modify the Time Sync Controller register to disable the
++ * interrupt
++ * - Clear the callback routine
++ *
++ * @param None
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ */
++enum ioh_status ioh_1588_pulse_per_sec_interrupt_disable(void)
++{
++ /* Clear pulse per second interrupt mask */
++ IOH_DEBUG("ioh_1588_pulse_per_sec_interrupt_disable:invoking \
++ ioh_1588_pps_imask_clear\n");
++ ioh_1588_pps_imask_clear();
++
++ /* Unregister the Callback */
++ ioh_pps_cbptr = (ioh1588PulsePerSecondCallback) NULL;
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_pulse_per_sec_time_set(unsigned long
++ * ppsTime)
++ *
++ * @brief Sets the Pulse Per Second match time in the IEEE 1588 hardware
++ * assist block
++ *
++ * @remarks This API will set the PPS match register with the value supplied
++ * The main steps followed in this function are:
++ * - Return IOH_1588_INVALIDPARAM if argument passed is NULL
++ * - Set the time in PPS Compare Register
++ *
++ *
++ * @param ppsTime [IN] Value to be stored in pps match register
++ *
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation is successful
++ * @li IOH_1588_INVALIDPARAM - Null parameter passed
++ */
++enum ioh_status ioh_1588_pulse_per_sec_time_set(unsigned long ppsTime)
++{
++ unsigned long old_mask = FALSE;
++
++ /* Retrieve existing pps mask value */
++ old_mask = ioh_1588_pps_imask_get();
++ IOH_DEBUG
++ ("ioh_1588_pulse_per_sec_time_set:target time interrupt mask=%lx\n",
++ old_mask);
++
++ /*
++ * Clear the pps time interrupt mask so that the interrupt will not come
++ * during the time we manipulate the registers.
++ */
++ IOH_DEBUG
++ ("ioh_1588_pulse_per_sec_time_set:invoking ioh_1588_pps_imask_clear\
++ to clear the pps interrupt mask\n");
++ ioh_1588_pps_imask_clear();
++
++ /* Update the PPS time */
++ IOH_DEBUG
++ ("ioh_1588_pulse_per_sec_time_set:invoking ioh_1588_pps_set\n");
++
++ ioh_1588_pps_set(ppsTime);
++
++ /*
++ * Let the hardware assist re-evaluate the pps reached
++ * condition based on the new pps value
++ */
++ IOH_DEBUG
++ ("ioh_1588_pulse_per_sec_time_set:invoking ioh_1588_pps_evt_clear\n");
++ ioh_1588_pps_evt_clear();
++
++ /* Restore the preserved pps interrupt mask value */
++ if (TRUE == old_mask) {
++ IOH_DEBUG
++ ("ioh_1588_pulse_per_sec_time_set:invoking \
++ ioh_1588_pps_imask_set\n");
++ ioh_1588_pps_imask_set();
++ }
++
++ IOH_DEBUG
++ ("ioh_1588_pulse_per_sec_time_set:returning IOH_1588_SUCCESS\n");
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_pulse_per_sec_time_get(
++ * unsigned long *ppsTime)
++ *
++ * @brief Gets the Pulse Per Second match time from the IEEE 1588 hardware
++ * assist block
++ *
++ * @remarks This API will get the PPS match register content
++ * from IEEE 1588 block. The main steps followed in this
++ * function are:
++ * - Return IOH_1588_INVALIDPARAM if argument passed is NULL
++ * - Return the time from PPS compare register
++ *
++ *
++ * @param ppsTime [OUT] Buffer for returning the pps match value
++ *
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ * @li IOH_1588_INVALIDPARAM - Null parameter passed
++ */
++enum ioh_status ioh_1588_pulse_per_sec_time_get(unsigned long *ppsTime)
++{
++ /* Verify the parameter */
++ if ((unsigned long *)NULL == ppsTime) {
++ IOH_DEBUG("ioh_1588_pulse_per_sec_time_get:invalid params\
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ /* Retrieve PPS Value */
++ IOH_DEBUG
++ ("ioh_1588_pulse_per_sec_time_get:invoking ioh_1588_pps_get\n");
++ ioh_1588_pps_get(ppsTime);
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_eth_enable(void)
++ *
++ * @brief Sets the eth_enb bit (bit 0) of Ethernet-CAN Select Register
++ * @remarks This API enables the IEEE 1588 hardware time stamping of PTP
++ * traffic
++ * on the Ethernet interface
++ *
++ * @param None
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ */
++enum ioh_status ioh_1588_eth_enable(void)
++{
++ if (ioh_1588_base != 0) {
++ IOH_DEBUG
++ ("ioh_1588_eth_enable:invoking ioh_1588_eth_enable_set\n");
++ ioh_1588_eth_enable_set();
++ }
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_eth_disable(void)
++ *
++ * @brief Clears the eth_enb bit (bit 0) of Ethernet-CAN Select Register
++ * @remarks This API disables the IEEE 1588 hardware time stamping of PTP
++ * traffic
++ * on the Ethernet interface
++ *
++ * @param None
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ */
++enum ioh_status ioh_1588_eth_disable(void)
++{
++ if (ioh_1588_base != 0) {
++ IOH_DEBUG
++ ("ioh_1588_eth_disable:invoking ioh_1588_eth_enable_clear\n");
++ ioh_1588_eth_enable_clear();
++ }
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_can_enable(void)
++ *
++ * @brief Sets the can_enb bit (bit 1) of Ethernet-CAN Select Register
++ * @remraks This API enables the IEEE 1588 hardware time stamping of PTP
++ * traffic
++ * on the CAN interface
++ *
++ * @param None
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ */
++enum ioh_status ioh_1588_can_enable(void)
++{
++ if (ioh_1588_base != 0) {
++ IOH_DEBUG
++ ("ioh_1588_can_enable:invoking ioh_1588_can_enable_set\n");
++ ioh_1588_can_enable_set();
++ }
++
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_can_disable(void)
++ *
++ * @brief Clear the can_enb bit (bit 1) of Ethernet-CAN Select Register
++ * @remarks This API disables the IEEE 1588 hardware time stamping of PTP
++ * traffic
++ * on the CAN interface
++ *
++ * @param None
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ */
++enum ioh_status ioh_1588_can_disable(void)
++{
++ if (ioh_1588_base != 0) {
++ IOH_DEBUG
++ ("ioh_1588_can_disable:invoking ioh_1588_can_enable_clear\n");
++ ioh_1588_can_enable_clear();
++ }
++
++ return IOH_1588_SUCCESS;
++}
++
++/**
++ * @ingroup IEEE_1588_UtilitiesAPI
++ * @fn static int get_decimal(unsigned char ch)
++ *
++ * @brief Returns the decimal value of the passed
++ * hexadecimal value.
++ *
++ * @note Returns -1 if the passed arguement is invalid.
++ *
++ * @param ch [IN] The hexadecimal value that has to be converted.
++ *
++ * @retval int
++ * - On Success --> decimal Value
++ * - Invalid value --> -1
++ */
++static int get_decimal(unsigned char ch)
++{
++ int ret;
++
++ if ((ch >= '0') && (ch <= '9')) {
++ ret = ch - '0';
++ return ret;
++ } else if ((ch >= 'A') && (ch <= 'F')) {
++ ret = 10 + ch - 'A';
++ return ret;
++ } else if ((ch >= 'a') && (ch <= 'f')) {
++ ret = 10 + ch - 'a';
++ return ret;
++ }
++
++ return -1;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_set_station_address (
++ * unsigned char *addr)
++ *
++ * @brief This API sets the station address used by IEEE 1588 hardware
++ * when looking
++ * at PTP traffic on the ethernet interface
++ *
++ * @param addr [IN] Address which contain the column separated
++ * address to be used
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ * @li IOH_1588_INVALIDPARAM - Invalid address string
++ */
++enum ioh_status ioh_1588_set_station_address(unsigned char *addr)
++{
++ int i;
++
++ /* Verify the parameter */
++ if ((ioh_1588_base == 0) || (unsigned char *)NULL == addr) {
++ IOH_DEBUG("ioh_1588_set_station_address :invalid params\
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ for (i = 0; i < IOH_1588_STATION_BYTES; i++) { /* For all station
++ address bytes */
++ unsigned long val = 0;
++ int tmp;
++
++ tmp = get_decimal(addr[i * 3]);
++ if (tmp < 0) {
++ IOH_DEBUG("ioh_1588_set_station_address :invalid params\
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++ val = tmp * 16;
++ tmp = get_decimal(addr[(i * 3) + 1]);
++ if (tmp < 0) {
++ IOH_DEBUG("ioh_1588_set_station_address :invalid params\
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++ val += tmp;
++ if ((i < 5) && (addr[(i * 3) + 2] != ':')) { /* Expects ':'
++ separated addresses */
++ IOH_DEBUG("ioh_1588_set_station_address :invalid params\
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ /* Ideally we should set the address only after validating
++ entire string */
++ IOH_DEBUG
++ ("ioh_1588_set_station_address \
++ :invoking ioh_1588_station_set\n");
++ ioh_1588_station_set(i, val);
++ }
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_get_station_address(char *addr)
++ * @brief This API gets the station address currently used by IEEE 1588
++ * hardware when looking at PTP traffic on the ethernet interface
++ *
++ * @param addr [OUT] Buffer to which column separated address is
++ * returned
++ * @retval enum ioh_status
++ * @li IOH_1588_SUCCESS - Operation successful
++ * @li IOH_1588_INVALIDPARAM - Null parameter passed
++ */
++enum ioh_status ioh_1588_get_station_address(char *addr)
++{
++ int i;
++
++ /* Verify the parameter */
++ if ((char *)NULL == addr) {
++ IOH_DEBUG("ioh_1588_get_station_address:invalid params\
++ returning IOH_1588_INVALIDPARAM\n");
++ return IOH_1588_INVALIDPARAM;
++ }
++
++ for (i = 0; i < IOH_1588_STATION_BYTES; i++) {
++ unsigned long val = 0;
++
++ ioh_1588_station_get(i, &val);
++ addr[i * 3] = val / 16;
++ if (addr[i * 3] > 9)
++ addr[i * 3] += 'a' - 10;
++ else
++ addr[i * 3] += '0';
++ addr[i * 3 + 1] = val % 16;
++ if (addr[i * 3 + 1] > 9)
++ addr[i * 3 + 1] += 'a' - 10;
++ else
++ addr[i * 3 + 1] += '0';
++ addr[i * 3 + 2] = ':';
++ }
++ addr[17] = '\0';
++ return IOH_1588_SUCCESS;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_aux_target_time_interrupt_enable(
++ * void *callBack)
++ *
++ * @brief This API just returns an error.
++ *
++ * @remarks This API is just for compatibility. It just returns an error.
++ *
++ * @param callBack [IN] Callback to be invoked when interrupt
++ * fires
++ *
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_UNSUPPORTED - Operation is not supported
++ */
++enum ioh_status ioh_1588_aux_target_time_interrupt_enable(void *callBack)
++{
++ IOH_DEBUG("ioh_1588_aux_target_time_interrupt_enable:unsupported\n");
++ return IOH_1588_UNSUPPORTED;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_aux_target_time_interrupt_disable(void)
++ *
++ * @brief This API just returns an error.
++ *
++ * @remarks This API is just for compatibility. It just returns an error.
++ *
++ * @param None
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_UNSUPPORTED - Operation is not supported
++ */
++enum ioh_status ioh_1588_aux_target_time_interrupt_disable(void)
++{
++ IOH_DEBUG("ioh_1588_aux_target_time_interrupt_disable:unsupported\n");
++ return IOH_1588_UNSUPPORTED;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_aux_target_time_poll(
++ * unsigned long *attmPollFlag,
++ * struct ioh1588TimeValue *targetTime)
++ *
++ * @brief This API just returns an error.
++ *
++ * @remarks This API is just for compatibility. It just returns an error.
++ *
++ * @param attmPollFlag [OUT] Flag returning the availablity of a
++ * snapshot
++ * @param targetTime [OUT] Snapshot captured
++ *
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_UNSUPPORTED - Operation supported
++ */
++enum ioh_status
++ioh_1588_aux_target_time_poll(unsigned long *attmPollFlag,
++ struct ioh1588TimeValue *targetTime)
++{
++ IOH_DEBUG("ioh_1588_aux_target_time_poll:unsupported\n");
++ return IOH_1588_UNSUPPORTED;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_aux_target_time_set(
++ * struct ioh1588TimeValue targetTime)
++ *
++ * @brief This API just returns an error.
++ *
++ * @remarks This API is just for compatibility. It just returns an error.
++ *
++ * @param targetTime [IN] Time to set to
++ *
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_UNSUPPORTED - Operation supported
++ */
++enum ioh_status ioh_1588_aux_target_time_set(struct ioh1588TimeValue targetTime)
++{
++ IOH_DEBUG("ioh_1588_aux_target_time_set:unsupported\n");
++ return IOH_1588_UNSUPPORTED;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_aux_target_time_get(
++ * struct ioh1588TimeValue *targetTime)
++ *
++ * @brief This API just returns an error.
++ *
++ * @remarks This API is just for compatibility. It just returns an error.
++ *
++ * @param targetTime [OUT] Buffer for returning time snapshot
++ *
++ *
++ * @retval enum ioh_status
++ * @li IOH_1588_UNSUPPORTED - Operation supported
++ */
++enum ioh_status ioh_1588_aux_target_time_get(
++ struct ioh1588TimeValue *targetTime)
++{
++ IOH_DEBUG("ioh_1588_aux_target_time_get:unsupported\n");
++ return IOH_1588_UNSUPPORTED;
++}
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ *
++ * @fn int ioh_1588_eth_can_get(void)
++ *
++ * @brief This function returns the modes [ethernet/CAN] enabled
++ *
++ * @retval int
++ * - the modes enabled
++ */
++int ioh_1588_eth_can_get(void)
++{
++ int ieee_mode = 0;
++
++ if (ioh_1588_eth_enable_get() == 1)
++ ieee_mode |= IOH_IEEE1588_ETH;
++ if (ioh_1588_can_enable_get() == 1)
++ ieee_mode |= IOH_IEEE1588_CAN;
++
++ return ieee_mode;
++}
++
++#ifdef IOH_IEEE1588_A0_A1_SAMPLE_BUG
++/*! @ingroup IEEE_1588_HALLayerAPI
++ *
++ * @fn void ioh_1588_set_system_time_count(void)
++ *
++ * @brief This function enables all 64 bits in system time registers
++ * [high & low]. This is a work-around for non continuous value
++ * in the SystemTime Register
++ *
++ * @retval none
++ */
++void ioh_1588_set_system_time_count(void)
++{
++ IOH_REG_32_WRITE((ioh_1588_base + 0xC0), 0x1);
++ IOH_REG_32_WRITE((ioh_1588_base + 0xC4), 0xFFFFFFFF);
++ IOH_REG_32_WRITE((ioh_1588_base + 0xC0), 0x0);
++}
++#endif
+diff -urN linux-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_hal.h topcliff-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_hal.h
+--- linux-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_hal.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_hal.h 2010-03-09 07:40:00.000000000 +0900
+@@ -0,0 +1,885 @@
++ /*!
++ * @file ioh_1588_hal.h
++ * @brief
++ * This file lists the declarations of IEEE_1588_HALLayer APIs.
++ * @version 0.92
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * modified to support Intel IOH GE IEEE 1588 hardware
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ * derived from
++ * IEEE 1588 Time Synchronization Driver for Intel EP80579
++ * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ */
++
++#ifndef IOH_1588_HAL_H
++#define IOH_1588_HAL_H
++
++#include "pch_1588_main.h"
++
++/* IOH 1588 Hardware Assist Module Register offsets */
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TSC_OFFSET
++@brief TS Control Register Offset
++*/
++#define IOH_1588_TSC_OFFSET (0x00) /* TS_Control */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TSE_OFFSET
++@brief TS Event Register Offset
++*/
++#define IOH_1588_TSE_OFFSET (0x04) /* TS_Event */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_ADD_OFFSET
++@brief TS Addend Register Offset
++*/
++#define IOH_1588_ADD_OFFSET (0x08) /* TS_Addend */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_ACC_OFFSET
++@brief TS Accumulator Register Offset
++*/
++#define IOH_1588_ACC_OFFSET (0x0C) /* TS_Accum */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TST_OFFSET
++@brief TS Test Register Offset
++*/
++#define IOH_1588_TST_OFFSET (0x10) /* TS_Test */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_PPS_OFFSET
++@brief TS PPS Compare Register Offset
++*/
++#define IOH_1588_PPS_OFFSET (0x14) /* TS_PPS_Compare */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_STL_OFFSET
++@brief TS System Time Low Register Offset
++*/
++#define IOH_1588_STL_OFFSET (0x20) /* TS_SysTimeLo */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_STH_OFFSET
++@brief TS System Time High Register Offset
++*/
++#define IOH_1588_STH_OFFSET (0x24) /* TS_SysTimeHi */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TTL_OFFSET
++@brief TS Target Time Low Register Offset
++*/
++#define IOH_1588_TTL_OFFSET (0x28) /* TS_TrgtLo */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TTH_OFFSET
++@brief TS Target Time High Register Offset
++*/
++#define IOH_1588_TTH_OFFSET (0x2c) /* TS_TrgtHi */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_ASSL_OFFSET
++@brief TS Aux Slave Mode Snapshot Low Register Offset
++*/
++#define IOH_1588_ASSL_OFFSET (0x30) /* TS_ASMSLo */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_ASSH_OFFSET
++@brief TS Aux Slave Mode Snapshot High Register Offset
++*/
++#define IOH_1588_ASSH_OFFSET (0x34) /* TS_ASMSHi */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_ASSH_OFFSET
++@brief TS Aux Master Mode Snapshot Low Register Offset
++*/
++#define IOH_1588_AMSL_OFFSET (0x38) /* TS_AMMSLo */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_AMSH_OFFSET
++@brief TS Aux Master Mode Snapshot High Register Offset
++*/
++#define IOH_1588_AMSH_OFFSET (0x3C) /* TS_AMMSHi */
++
++/* Ethernet */
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_CC_OFFSET
++@brief TS Channel Control Register Offset
++*/
++#define IOH_1588_CC_OFFSET (0x40) /* TS_Ch_Contr */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_CE_OFFSET
++@brief TS Channel Event Register Offset
++*/
++#define IOH_1588_CE_OFFSET (0x44) /* TS_Ch_Event */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_XSL_OFFSET
++@brief TS Tx Snapshot Low Register Offset
++*/
++#define IOH_1588_XSL_OFFSET (0x48) /* TS_TxSnapLo */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_XSH_OFFSET
++@brief TS Tx Snapshot High Register Offset
++*/
++#define IOH_1588_XSH_OFFSET (0x4C) /* TS_TxSnapHi */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_RSL_OFFSET
++@brief TS Rx Snapshot Low Register Offset
++*/
++#define IOH_1588_RSL_OFFSET (0x50) /* TS_RxSnapLo */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_RSH_OFFSET
++@brief TS Rx Snapshot High Register Offset
++*/
++#define IOH_1588_RSH_OFFSET (0x54) /* TS_RxSnapHi */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_UID_OFFSET
++@brief TS Source UUID Low Register Offset
++*/
++#define IOH_1588_UID_OFFSET (0x58) /* TS_SrcUUID */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_SID_OFFSET
++@brief TS Source UUID High/SequenceID Register Offset
++*/
++#define IOH_1588_SID_OFFSET (0x5C) /* TS_SrcUUID */
++
++/* CAN */
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_CCE_OFFSET
++@brief TS CAN Channel Status Register Offset
++*/
++#define IOH_1588_CCE_OFFSET (0x60) /* TS_CAN_Stat */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_CXSL_OFFSET
++@brief TS CAN Snapshot Low Register Offset
++*/
++#define IOH_1588_CXSL_OFFSET (0x64) /* TS_CAN_Snap */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_CXSH_OFFSET
++@brief TS CAN Snapshot High Register Offset
++*/
++#define IOH_1588_CXSH_OFFSET (0x68) /* TS_CAN_Snap */
++
++/* Selector */
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_ECS_OFFSET
++@brief TS Ethernet/CAN Select Register Offset
++*/
++#define IOH_1588_ECS_OFFSET (0x6c) /* TS_SEL */
++
++/* Station Address 1-6 */
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_STA_OFFSET
++@brief TS Station Address Register Offset
++*/
++#define IOH_1588_STA_OFFSET (0x70) /* TS_ST1 */
++
++/* Bit Masks of Control Register */
++/* Hardware Assist Reset */
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TSC_RESET_SHIFT
++@brief Reset Bit position in Control Register
++*/
++#define IOH_1588_TSC_RESET_SHIFT 0
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TSC_RESET
++@brief Bit Maks for Reset Bit in Control Register
++*/
++#define IOH_1588_TSC_RESET (1 << IOH_1588_TSC_RESET_SHIFT)
++
++/* Target Time Interrupt Mask */
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TSC_TIM_SHIFT
++@brief Bit position of Target Time Interrupt Bit in Control
++ Register
++*/
++#define IOH_1588_TSC_TTM_SHIFT 1
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TSC_TIM_MASK
++@brief Bit Mask for Target Time Interrupt in Control Register
++*/
++#define IOH_1588_TSC_TTM_MASK (1 << IOH_1588_TSC_TTM_SHIFT)
++
++/* Auxiliary Slave Mode snapshot Interrupt Mask */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TSC_ASMS_SHIFT
++@brief Bit position of Aux Slave Mode snapshot
++ Interrupt in Control Register
++*/
++#define IOH_1588_TSC_ASMS_SHIFT 2
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TSC_ASMS_MASK
++@brief Bit Mask for Aux Slave Mode snapshot
++ Interrupt in Control Register
++*/
++#define IOH_1588_TSC_ASMS_MASK (1 << IOH_1588_TSC_ASMS_SHIFT)
++
++/* Auxiliary Master Mode snapshot Interrupt Mask */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TSC_AMMS_SHIFT
++@brief Bit position for for Aux Master Mode snapshot
++ Interrupt in Control Register
++*/
++#define IOH_1588_TSC_AMMS_SHIFT 3
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TSC_AMMS_MASK
++@brief Bit mask for for Aux Master Mode snapshot
++ Interrupt in Control Register
++*/
++#define IOH_1588_TSC_AMMS_MASK (1 << IOH_1588_TSC_AMMS_SHIFT)
++
++/* Pulse Per Second Interrupt Mask */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TSC_PPSM_SHIFT
++@brief Bit position of Pulse Per Second
++ Interrupt in Control Register
++*/
++#define IOH_1588_TSC_PPSM_SHIFT 4
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TSC_PPSM_MASK
++@brief Bit mask of Pulse Per Second
++ Interrupt in Control Register
++*/
++#define IOH_1588_TSC_PPSM_MASK (1 << IOH_1588_TSC_PPSM_SHIFT)
++
++/* Bit Masks of Event Register */
++/* Target Time Interrupt Pending Event */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TSE_TTIPEND_SHIFT
++@brief Bit position of Target Time Interrupt
++ Pending in Event Register
++*/
++#define IOH_1588_TSE_TTIPEND_SHIFT 1
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TSE_TTIPEND
++@brief Bit mask of Target Time Interrupt
++ Pending in Event Register
++*/
++#define IOH_1588_TSE_TTIPEND (1 << IOH_1588_TSE_TTIPEND_SHIFT)
++
++/* Auxiliary Slave Mode snapshot Event */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TSE_SNS_SHIFT
++@brief Bit position of Aux Slave Mode snapshot
++ in Event Register
++*/
++#define IOH_1588_TSE_SNS_SHIFT 2
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TSE_SNS
++@brief Bit mask of Aux Slave Mode snapshot
++ in Event Register
++*/
++#define IOH_1588_TSE_SNS (1 << IOH_1588_TSE_SNS_SHIFT)
++
++/* Auxiliary Master Mode snapshot Event */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TSE_SNM_SHIFT
++@brief Bit position of Aux Master Mode snapshot
++ in Event Register
++*/
++#define IOH_1588_TSE_SNM_SHIFT 3
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TSE_SNM
++@brief Bit mask of Aux Master Mode snapshot
++ in Event Register
++*/
++#define IOH_1588_TSE_SNM (1 << IOH_1588_TSE_SNM_SHIFT)
++
++/* Pulse Per Second Match */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TSE_PPS_SHIFT
++@brief Bit position of Pusle Per Second Match
++ in Event Register
++*/
++#define IOH_1588_TSE_PPS_SHIFT 4
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_TSE_PPS
++@brief Bit mask of Pusle Per Second Match
++ in Event Register
++*/
++#define IOH_1588_TSE_PPS (1 << IOH_1588_TSE_PPS_SHIFT)
++
++/* Bit Masks of Channel Control Register */
++/* Timestamp Master or Slave Mode Control Flag */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_CC_MM_SHIFT
++@brief Bit position of Timestamp Master/Slave Mode
++ in Channel Control Register
++*/
++#define IOH_1588_CC_MM_SHIFT 0
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_CC_MM
++@brief Bit mask of Timestamp Master/Slave Mode
++ control flag in
++ in Channel Control Register
++*/
++#define IOH_1588_CC_MM (1 << IOH_1588_CC_MM_SHIFT)
++
++/* Timestamp All Messages Control Flag */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_CC_TA_SHIFT
++@brief Bit position of Timestamp all messages
++ Mode control flag
++ in Channel Control Register
++*/
++#define IOH_1588_CC_TA_SHIFT 1
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_CC_TA
++@brief Bit mask of Timestamp all messages
++ Mode control flag
++ in Channel Control Register
++*/
++#define IOH_1588_CC_TA (1 << IOH_1588_CC_TA_SHIFT)
++
++/* Mode bits */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_CC_MODE_SHIFT
++@brief Bit position of mode bits
++ in Channel Control Register
++*/
++#define IOH_1588_CC_MODE_SHIFT 16
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_CC_MODE_MASK
++@brief Bit mask for mode bits
++ in Channel Control Register
++*/
++#define IOH_1588_CC_MODE_MASK (0x001F0000)
++
++/* Version bit */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_CC_VERSION_SHIFT
++@brief Bit position for version bits
++ in Channel Control Register
++*/
++#define IOH_1588_CC_VERSION_SHIFT 31
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_CC_VERSION
++@brief Bit mask for version bits
++ in Channel Control Register
++*/
++#define IOH_1588_CC_VERSION (1 << IOH_1588_CC_VERSION_SHIFT)
++
++/* Bit Masks of Channel Event Register */
++/* Transmit Snapshot Locked Indicator Flag */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_CE_TXS
++@brief Bit mask for Transmit Snapshot Locked bit
++ in Channel Event Register
++*/
++#define IOH_1588_CE_TXS (1 << 0)
++
++/* Receive Snapshot Locked Indicator Flag */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_CE_TXS
++@brief Bit mask for Receive Snapshot Locked bit
++ in Channel Event Register
++*/
++#define IOH_1588_CE_RXS (1 << 1)
++
++/* Bit Masks of CAN Channel Event Register */
++/* Overrun Indicator Flag */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_CE_OVR
++@brief Bit mask for Overrun Indicator bit
++ in Channel Event Register
++*/
++#define IOH_1588_CE_OVR (1 << 0)
++
++/* Valid Indicator Flag */
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_CE_VAL
++@brief Bit mask for Valid Indicator bit
++ in Channel Event Register
++*/
++#define IOH_1588_CE_VAL (1 << 1)
++
++/* Ethernet Enable bit */
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_ECS_ETH_SHIFT
++@brief Bit position for Ethernet Enable bit
++ in Ethernet/CAN select Register
++*/
++#define IOH_1588_ECS_ETH_SHIFT 0
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_ECS_ETH
++@brief Bit mask for Ethernet Enable bit
++ in Ethernet/CAN select Register
++*/
++#define IOH_1588_ECS_ETH (1 << IOH_1588_ECS_ETH_SHIFT)
++/* Can Enable bit */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_ECS_CAN_SHIFT
++@brief Bit position for CAN Enable bit
++ in Ethernet/CAN select Register
++*/
++#define IOH_1588_ECS_CAN_SHIFT 1
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_ECS_CAN
++@brief Bit mask for CAN Enable bit
++ in Ethernet/CAN select Register
++*/
++#define IOH_1588_ECS_CAN (1 << IOH_1588_ECS_CAN_SHIFT)
++
++/* Station Address bytes */
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def IOH_1588_STATION_BYTES
++@brief Bytes for Station Address
++*/
++#define IOH_1588_STATION_BYTES 6
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@def DRIVER_NAME
++@brief The name of this driver
++*/
++#define DRIVER_NAME "ioh_ieee1588"
++
++#define IOH_IEEE1588_ETH (1 << 0)
++#define IOH_IEEE1588_CAN (1 << 1)
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@typedef typedef void (*ioh1588TargetTimeCallback)
++ (struct ioh1588TimeValue tgt_time)
++@brief Pointer for Callback function for Target Time interrupt
++@see
++ - ioh_1588_blpl_base_address_set
++ - ioh_1588_target_time_interrupt_enable
++ - ioh_1588_target_time_interrupt_disable
++ - ioh_1588_target_time_poll
++ - ioh_1588_reset
++*/
++typedef void (*ioh1588TargetTimeCallback) (struct ioh1588TimeValue tgt_time);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@typedef typedef void (*ioh1588AuxTimeCallback)
++ (enum ioh1588AuxMode aux_mode,
++ struct ioh1588TimeValue aux_time)
++@brief Pointer for Callback function for Aux Time interrupt
++@see
++ - ioh_1588_blpl_base_address_set
++ - ioh_1588_aux_time_interrupt_enable
++ - ioh_1588_aux_time_interrupt_disable
++ - ioh_1588_aux_time_poll
++ - ioh_1588_reset
++*/
++typedef void (*ioh1588AuxTimeCallback) (enum ioh1588AuxMode aux_mode,
++ struct ioh1588TimeValue aux_time);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++@typedef typedef void (*ioh1588PulsePerSecondCallback)(
++ unsigned long pps)
++@brief Pointer for Callback function for Pulse Per Second
++ interrupt
++@see
++ - ioh_1588_blpl_base_address_set
++ - ioh_1588_pulse_per_sec_interrupt_enable
++ - ioh_1588_pulse_per_sec_interrupt_disable
++ - ioh_1588_reset
++*/
++typedef void (*ioh1588PulsePerSecondCallback) (unsigned long pps);
++
++/**
++ * prototypes of HAL APIs
++ */
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_blpl_base_address_set(unsigned long base_addr)
++ */
++enum ioh_status ioh_1588_blpl_base_address_set(unsigned long base_addr);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_ptp_port_config_set(
++ * enum ioh1588PTPPort ptpPort,
++ * enum ioh1588PTPPortMode ptpPortMode)
++ */
++enum ioh_status
++ioh_1588_ptp_port_config_set(enum ioh1588PTPPort ptpPort,
++ enum ioh1588PTPPortMode ptpPortMode);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_ptp_port_config_get(
++ * enum ioh1588PTPPort ptpPort,
++ * enum ioh1588PTPPortMode *ptpPortMode)
++ */
++enum ioh_status
++ioh_1588_ptp_port_config_get(enum ioh1588PTPPort ptpPort,
++ enum ioh1588PTPPortMode *ptpPortMode);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_ptp_rx_poll(
++ * enum ioh1588PTPPort ptpPort,
++ * struct ioh1588PtpMsgData *ptpMsgData)
++ *
++ *
++ */
++enum ioh_status
++ioh_1588_ptp_rx_poll(
++ enum ioh1588PTPPort ptpPort, \
++ struct ioh1588PtpMsgData *ptpMsgData);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_ptp_tx_poll(
++ * enum ioh1588PTPPort ptpPort,
++ * struct ioh1588PtpMsgData *ptpMsgData)
++ *
++ */
++enum ioh_status
++ioh_1588_ptp_tx_poll(
++ enum ioh1588PTPPort ptpPort, \
++ struct ioh1588PtpMsgData *ptpMsgData);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_system_time_set(
++ * struct ioh1588TimeValue systemTime)
++ *
++ */
++enum ioh_status ioh_1588_system_time_set(struct ioh1588TimeValue systemTime);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_system_time_get(
++ * struct ioh1588TimeValue *systemTime)
++ *
++ */
++enum ioh_status ioh_1588_system_time_get(struct ioh1588TimeValue *systemTime);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_tick_rate_set(unsigned long tickRate)
++ *
++ */
++enum ioh_status ioh_1588_tick_rate_set(unsigned long tickRate);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_tick_rate_get(unsigned long *tickRate)
++ *
++ */
++enum ioh_status ioh_1588_tick_rate_get(unsigned long *tickRate);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_target_time_interrupt_enable(
++ * ioh1588TargetTimeCallback callBack)
++ *
++ */
++enum ioh_status
++ioh_1588_target_time_interrupt_enable(ioh1588TargetTimeCallback callBack);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_target_time_interrupt_disable(void)
++ *
++ */
++enum ioh_status ioh_1588_target_time_interrupt_disable(void);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_target_time_poll(
++ * unsigned long *ttmPollFlag,
++ * struct ioh1588TimeValue *targetTime)
++ *
++ */
++enum ioh_status
++ioh_1588_target_time_poll(unsigned long *ttmPollFlag,
++ struct ioh1588TimeValue *targetTime);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_target_time_set(
++ * struct ioh1588TimeValue targetTime)
++ *
++ */
++enum ioh_status ioh_1588_target_time_set(struct ioh1588TimeValue targetTime);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_target_time_get(
++ * struct ioh1588TimeValue *targetTime)
++ *
++ */
++enum ioh_status ioh_1588_target_time_get(struct ioh1588TimeValue *targetTime);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_aux_time_interrupt_enable(
++ enum ioh1588AuxMode auxMode,
++ * ioh1588AuxTimeCallback callBack)
++ *
++ */
++enum ioh_status
++ioh_1588_aux_time_interrupt_enable(enum ioh1588AuxMode auxMode,
++ ioh1588AuxTimeCallback callBack);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_aux_time_interrupt_disable(
++ enum ioh1588AuxMode auxMode)
++ *
++ */
++enum ioh_status ioh_1588_aux_time_interrupt_disable(
++ enum ioh1588AuxMode auxMode);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_aux_time_poll(
++ * enum ioh1588AuxMode auxMode,
++ * unsigned long *pollFlag,
++ * struct ioh1588TimeValue *auxTime)
++ *
++ */
++enum ioh_status
++ioh_1588_aux_time_poll(enum ioh1588AuxMode auxMode,
++ unsigned long *pollFlag,
++ struct ioh1588TimeValue *auxTime);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_reset(void)
++ *
++ */
++enum ioh_status ioh_1588_reset(void);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_chnl_reset(enum ioh1588PTPPort ptpPort)
++ *
++ */
++enum ioh_status ioh_1588_chnl_reset(enum ioh1588PTPPort ptpPort);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_stats_get(struct ioh1588Stats *stats)
++ *
++ */
++enum ioh_status ioh_1588_stats_get(struct ioh1588Stats *stats);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn void ioh_1588_stats_reset(void)
++ *
++ */
++void ioh_1588_stats_reset(void);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn void ioh_1588_show(void)
++ *
++ */
++enum ioh_status ioh_1588_show(void);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_pulse_per_sec_interrupt_enable(
++ * ioh1588PulsePerSecondCallback callBack)
++ *
++ */
++enum ioh_status
++ioh_1588_pulse_per_sec_interrupt_enable(ioh1588PulsePerSecondCallback callBack);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_pulse_per_sec_interrupt_disable(void)
++ *
++ */
++enum ioh_status ioh_1588_pulse_per_sec_interrupt_disable(void);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_pulse_per_sec_time_get(unsigned long *ppsTime)
++ *
++ */
++enum ioh_status ioh_1588_pulse_per_sec_time_get(unsigned long *ppsTime);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_pulse_per_sec_time_set(unsigned long ppsTime)
++ *
++ */
++enum ioh_status ioh_1588_pulse_per_sec_time_set(unsigned long ppsTime);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_ptp_can_poll (
++ * enum ioh1588PTPPort ptpPort,
++ * struct ioh1588TimeValue *ptpTimeStamp)
++ *
++ */
++enum ioh_status ioh_1588_ptp_can_poll(enum ioh1588PTPPort ptpPort,
++ struct ioh1588TimeValue *ptpTimeStamp);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_ptp_version_get(enum ioh1588PTPPort ptpPort,
++ * enum ioh1588PTPVersion *ptpVersion)
++ *
++ */
++enum ioh_status
++ioh_1588_ptp_version_get(enum ioh1588PTPPort ptpPort,
++ enum ioh1588PTPVersion *ptpVersion);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_ptp_version_set(enum ioh1588PTPPort ptpPort,
++ * enum ioh1588PTPVersion ptpVersion)
++ *
++ */
++enum ioh_status
++ioh_1588_ptp_version_set(
++ enum ioh1588PTPPort ptpPort, \
++ enum ioh1588PTPVersion ptpVersion);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_ptp_operation_mode_set(
++ * enum ioh1588PTPPort ptpPort,
++ * enum ioh1588PTPOperationMode ptpMode)
++ *
++ */
++enum ioh_status
++ioh_1588_ptp_operation_mode_set(enum ioh1588PTPPort ptpPort,
++ enum ioh1588PTPOperationMode ptpMode);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_ptp_operation_mode_get(enum ioh1588PTPPort ptpPort,
++ * enum ioh1588PTPOperationMode *ptpMode)
++ */
++enum ioh_status
++ioh_1588_ptp_operation_mode_get(enum ioh1588PTPPort ptpPort,
++ enum ioh1588PTPOperationMode *ptpMode);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_handler(void)
++ *
++ */
++enum ioh_status ioh_1588_handler(void);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_aux_target_time_interrupt_enable(void *callBack)
++ *
++ */
++enum ioh_status ioh_1588_aux_target_time_interrupt_enable(void *callBack);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_aux_target_time_interrupt_disable(void)
++ *
++ */
++enum ioh_status ioh_1588_aux_target_time_interrupt_disable(void);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_aux_target_time_poll(
++ * unsigned long *attmPollFlag, struct ioh1588TimeValue *targetTime)
++ *
++ */
++enum ioh_status
++ioh_1588_aux_target_time_poll(unsigned long *attmPollFlag,
++ struct ioh1588TimeValue *targetTime);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_aux_target_time_set(
++ * struct ioh1588TimeValue targetTime)
++ *
++ */
++enum ioh_status ioh_1588_aux_target_time_set(
++ struct ioh1588TimeValue targetTime);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_aux_target_time_get(
++ * struct ioh1588TimeValue *targetTime)
++ *
++ */
++enum ioh_status ioh_1588_aux_target_time_get(
++ struct ioh1588TimeValue *stargetTime);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_disable_interrupts(void)
++ *
++ */
++enum ioh_status ioh_1588_disable_interrupts(void);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_interrupt_pending(unsigned long *pending)
++ *
++ */
++enum ioh_status ioh_1588_interrupt_pending(unsigned long *pending);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_eth_enable(void)
++ *
++ */
++enum ioh_status ioh_1588_eth_enable(void);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_eth_disable(void)
++ *
++ */
++enum ioh_status ioh_1588_eth_disable(void);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_can_enable(void)
++ *
++ */
++enum ioh_status ioh_1588_can_enable(void);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_can_disable(void)
++ *
++ */
++enum ioh_status ioh_1588_can_disable(void);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_set_station_address (unsigned char *addr)
++ *
++ */
++enum ioh_status ioh_1588_set_station_address(unsigned char *addr);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn enum ioh_status ioh_1588_get_station_address(char *addr)
++ *
++ */
++enum ioh_status ioh_1588_get_station_address(char *addr);
++
++/*! @ingroup IEEE_1588_HALLayerAPI
++ * @fn int ioh_1588_eth_can_get(void)
++ *
++ */
++int ioh_1588_eth_can_get(void);
++
++#ifdef IOH_IEEE1588_A0_A1_SAMPLE_BUG
++void ioh_1588_set_system_time_count(void);
++#endif
++
++#endif /* IOH_1588_HAL_H */
+diff -urN linux-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_main.c topcliff-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_main.c
+--- linux-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_main.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_main.c 2010-03-09 10:33:42.000000000 +0900
+@@ -0,0 +1,1192 @@
++ /*!
++ * @file ioh_1588_main.c
++ * @brief
++ * This file contains the definitions of the IEEE_1588_InterfaceLayer APIs
++ * @version 0.92
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * modified to support Intel IOH GE IEEE 1588 hardware
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ * derived from
++ * IEEE 1588 Time Synchronization Driver for Intel EP80579
++ * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ */
++
++#include "pch_1588_pci.h"
++#include "pch_1588_main.h"
++#include "pch_1588_hal.h"
++#include "pch_debug.h"
++#include <linux/sched.h>
++
++/* Linux functions prototypes */
++static int ioh_1588_open(struct inode *inode, struct file *filep);
++static int ioh_1588_release(struct inode *inode, struct file *filep);
++static int ioh_1588_ioctl(struct inode *inode, struct file *filep,
++ unsigned int cmd, unsigned long arg);
++
++/* Linux file operations */
++/*! @ingroup IEEE_1588_Global
++ * @var ioh_1588_fops
++ * @brief The structure variable used to specify the
++ * driver specific functionalities to the kernel
++ * subsystem.
++*/
++const struct file_operations ioh_1588_fops = {
++ .owner = THIS_MODULE,
++ .open = ioh_1588_open,
++ .release = ioh_1588_release,
++ .ioctl = ioh_1588_ioctl,
++};
++
++/* For notify ioctls - values are populated from isr callbacks */
++/*! @ingroup IEEE_1588_Global
++ * @var ioh_1588_target_time
++ * @brief This variable is updated from the target time reached callback
++ */
++struct ioh1588TimeValue ioh_1588_target_time;
++/*! @ingroup IEEE_1588_Global
++ * @var ioh_1588_aux_time
++ * @brief This variable is updated from the Auxiliary master/slave time
++ * captured callback
++ */
++struct ioh1588AuxTimeIoctl ioh_1588_aux_time;
++/*! @ingroup IEEE_1588_Global
++ * @var ioh_1588_pps_time
++ * @brief This variable is updated from the Pulse per second match
++ * callback
++ */
++unsigned long ioh_1588_pps_time;
++
++typedef int (*ioc_func_ptr) (unsigned long cmd, char *arg);
++static int ioc_handle_notify(unsigned long cmd, char *buf);
++static int ioc_handle_clr_notify(unsigned long cmd, char *buf);
++static int ioc_handle_reset(unsigned long cmd, char *buf);
++static int ioc_handle_show(unsigned long cmd, char *buf);
++static int ioc_handle_stats(unsigned long cmd, char *buf);
++static int ioc_handle_stats_reset(unsigned long cmd, char *buf);
++static int ioc_handle_int_enable(unsigned long cmd, char *buf);
++static int ioc_handle_int_disable(unsigned long cmd, char *buf);
++static int ioc_handle_port_config(unsigned long cmd, char *buf);
++static int ioc_handle_poll(unsigned long cmd, char *buf);
++static int ioc_handle_time_set(unsigned long cmd, char *buf);
++static int ioc_handle_time_get(unsigned long cmd, char *buf);
++static int ioc_handle_tick_rate(unsigned long cmd, char *buf);
++static int ioc_handle_pps_reqt(unsigned long cmd, char *buf);
++static int ioc_handle_version_reqt(unsigned long cmd, char *buf);
++static int ioc_handle_op_mode_reqt(unsigned long cmd, char *buf);
++
++/* IOCTL command and their associated functions */
++
++/*! @ingroup IEEE_1588_Global
++ * @struct ioh_1588_ioc_tbl
++ * @brief Structure to map the ioctl command to the associated function
++ */
++static const struct ioh_1588_ioc_tbl {
++ unsigned long cmd;
++ ioc_func_ptr func;
++} ioh_1588_ioc_tbl[] = {
++ {
++ IOCTL_1588_TARG_TIME_NOTIFY, ioc_handle_notify}, {
++ IOCTL_1588_AUX_TIME_NOTIFY, ioc_handle_notify}, {
++ IOCTL_1588_PULSE_PER_SEC_NOTIFY, ioc_handle_notify}, {
++ IOCTL_1588_AUX_TARG_TIME_NOTIFY, ioc_handle_notify}, {
++ IOCTL_1588_TARG_TIME_CLR_NOTIFY, ioc_handle_clr_notify}, {
++ IOCTL_1588_AUX_TIME_CLR_NOTIFY, ioc_handle_clr_notify}, {
++ IOCTL_1588_PULSE_PER_SEC_CLR_NOTIFY, ioc_handle_clr_notify}, {
++ IOCTL_1588_AUX_TARG_TIME_CLR_NOTIFY, ioc_handle_clr_notify}, {
++ IOCTL_1588_RESET, ioc_handle_reset}, {
++ IOCTL_1588_CHNL_RESET, ioc_handle_reset}, /* for this case too */
++ {
++ IOCTL_1588_SHOW_ALL, ioc_handle_show}, {
++ IOCTL_1588_STATS_GET, ioc_handle_stats}, {
++ IOCTL_1588_STATS_RESET, ioc_handle_stats_reset}, {
++ IOCTL_1588_TARG_TIME_INTRPT_ENABLE, ioc_handle_int_enable}, {
++ IOCTL_1588_AUX_TIME_INTRPT_ENABLE, ioc_handle_int_enable}, {
++ IOCTL_1588_PULSE_PER_SEC_INTRPT_ENABLE, ioc_handle_int_enable}, {
++ IOCTL_1588_AUX_TARG_TIME_INTRPT_ENABLE, ioc_handle_int_enable}, {
++ IOCTL_1588_TARG_TIME_INTRPT_DISABLE, ioc_handle_int_disable}, {
++ IOCTL_1588_AUX_TIME_INTRPT_DISABLE, ioc_handle_int_disable}, {
++ IOCTL_1588_PULSE_PER_SEC_INTRPT_DISABLE, ioc_handle_int_disable}, {
++ IOCTL_1588_AUX_TARG_TIME_INTRPT_DISABLE, ioc_handle_int_disable}, {
++ IOCTL_1588_PORT_CONFIG_SET, ioc_handle_port_config}, {
++ IOCTL_1588_PORT_CONFIG_GET, ioc_handle_port_config}, {
++ IOCTL_1588_RX_POLL, ioc_handle_poll}, {
++ IOCTL_1588_TX_POLL, ioc_handle_poll}, {
++ IOCTL_1588_CAN_POLL, ioc_handle_poll}, {
++ IOCTL_1588_TARG_TIME_POLL, ioc_handle_poll}, {
++ IOCTL_1588_AUX_TIME_POLL, ioc_handle_poll}, {
++ IOCTL_1588_AUX_TARG_TIME_POLL, ioc_handle_poll}, {
++ IOCTL_1588_SYS_TIME_SET, ioc_handle_time_set}, {
++ IOCTL_1588_TARG_TIME_SET, ioc_handle_time_set}, {
++ IOCTL_1588_AUX_TARG_TIME_SET, ioc_handle_time_set}, {
++ IOCTL_1588_SYS_TIME_GET, ioc_handle_time_get}, {
++ IOCTL_1588_TARG_TIME_GET, ioc_handle_time_get}, {
++ IOCTL_1588_AUX_TARG_TIME_GET, ioc_handle_time_get}, {
++ IOCTL_1588_TICK_RATE_GET, ioc_handle_tick_rate}, {
++ IOCTL_1588_TICK_RATE_SET, ioc_handle_tick_rate}, {
++ IOCTL_1588_PULSE_PER_SEC_TIME_SET, ioc_handle_pps_reqt}, {
++ IOCTL_1588_PULSE_PER_SEC_TIME_GET, ioc_handle_pps_reqt}, {
++ IOCTL_1588_PORT_VERSION_SET, ioc_handle_version_reqt}, {
++ IOCTL_1588_PORT_VERSION_GET, ioc_handle_version_reqt}, {
++ IOCTL_1588_PORT_OPERATION_MODE_SET, ioc_handle_op_mode_reqt}, {
++IOCTL_1588_PORT_OPERATION_MODE_GET, ioc_handle_op_mode_reqt},};
++
++#define IOH_1588_IOC_TBL_ENTRIES \
++ (sizeof ioh_1588_ioc_tbl / sizeof ioh_1588_ioc_tbl[0])
++
++/*! @ingroup IEEE_1588_InterfaceLayerAPI
++ * @fn int ioh_1588_open(struct inode *inode, struct file *filep)
++ * @brief This function is called when the driver interface is opened
++ * @remarks This function is registered at the driver initialization
++ * point (module_init) and invoked when a process opens the
++ * IEEE 1588 device node.
++ *
++ * @param inode [IN] pointer to device inode structure
++ * @param filep [IN] pointer to open file structure
++ *
++ * @return int
++ * - Returns 0 on success and <0 on failure
++ */
++static int ioh_1588_open(struct inode *inode, struct file *filep)
++{
++ if (ioh_1588_devp->suspend) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_open returning as device is suspended\n");
++ return -EINTR;
++ }
++ IOH_DEBUG("ioh_1588_open\n");
++
++ return 0;
++}
++
++/*! @ingroup IEEE_1588_InterfaceLayerAPI
++ * @fn int ioh_1588_release(struct inode *inode, struct file *filep)
++ * @brief This function is called when the driver interface is closed
++ * @remarks This function is registered at the driver initialization
++ * point (module_init) and invoked when the last process
++ * which has an open file table entry for the device
++ * exits or does a close of the device file.
++ *
++ * @param inode [IN] pointer to device inode structure
++ * @param filep [IN] pointer to open file structure
++ *
++ * @retval int
++ * - Returns 0 on success and <0 on failure
++ */
++static int ioh_1588_release(struct inode *inode, struct file *filep)
++{
++ IOH_DEBUG("ioh_1588_release\n");
++
++ return 0;
++}
++
++/*! @ingroup IEEE_1588_InterfaceLayerAPI
++ * @fn int ioh_1588_ioctl(struct inode *inode, struct file *filep,
++ * unsigned int cmd, unsigned long arg)
++ * @brief This function implements the ioctl interface of the driver
++ * @remarks This function is registered at the driver initialization
++ * point (module_init) and invoked when a user process
++ * invokes the .ioctl. call on the device
++ *
++ * @param inode [IN] pointer to device inode structure
++ * @param filep [IN] pointer to open file structure
++ * @param cmd [IN] ioctl command
++ * @param arg [INOUT] argument passed to the command
++ *
++ * @retval int
++ * - Returns 0 on success and <0 on failure
++ */
++static int ioh_1588_ioctl(struct inode *inode, struct file *filep,
++ unsigned int cmd, unsigned long arg)
++{
++ char buffer[0x64];
++ unsigned int argsz;
++ int i, ret = 0;
++
++ if ((!ioh_1588_devp->initialized) || (ioh_1588_devp->suspend)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl:device is suspended OR \
++ uninitialized\n");
++ return -EINTR;
++ }
++
++ argsz = _IOC_SIZE(cmd);
++
++ if (argsz > sizeof buffer) {
++ IOH_LOG(KERN_ERR, "ioh_1588_ioctl: buffer size too small.\n");
++ return -EINVAL;
++ }
++
++ /* if data is being written to the driver */
++ if (_IOC_DIR(cmd) & _IOC_WRITE) {
++ /* get the data passed in by user */
++ if (copy_from_user(&buffer, (void *)arg, argsz)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: could not copy user space \
++ data.\n");
++ return -EFAULT;
++ }
++ }
++
++ for (i = 0; i < IOH_1588_IOC_TBL_ENTRIES; i++) {
++ if (ioh_1588_ioc_tbl[i].cmd == cmd) {
++ ret = ioh_1588_ioc_tbl[i].func(cmd, buffer);
++ break;
++ }
++ }
++ if (i >= IOH_1588_IOC_TBL_ENTRIES) { /* did not find a match */
++ IOH_LOG(KERN_ERR, "ioh_1588_ioctl: unknown command (0x%x)\n",
++ cmd);
++ return -EINVAL;
++ }
++
++ /* if data is being read from the driver */
++ if ((ret == 0) && (_IOC_DIR(cmd) & _IOC_READ)) {
++ if (copy_to_user((void *)arg, buffer, argsz)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: could not copy data to user \
++ space.\n");
++ return -EFAULT;
++ }
++ }
++
++ return ret;
++}
++
++/* Handles all NOTIFY IOCTLs */
++/*! @ingroup IEEE_1588_UtilitiesAPI
++ * @fn ioc_handle_notify (unsigned long cmd, char *buf)
++ * @brief Handles all NOTIFY IOCTLs
++ * @param cmd [IN] the IOCTL command
++ * @param buf [OUT] the reference to data to be returned
++ * @retval int
++ * - 0
++ * <hr>
++ */
++static int ioc_handle_notify(unsigned long cmd, char *buf)
++{
++ unsigned int bytes_ret = 0;
++ void *param_addr = NULL;
++ wait_queue_head_t *event = NULL;
++ unsigned int eventnum = 0;
++
++ if (IOCTL_1588_AUX_TARG_TIME_NOTIFY == cmd) {
++ IOH_LOG(KERN_ERR, "ioc_handle_notify \
++ returning...[cmd = IOCTL_1588_AUX_TARG_TIME_NOTIFY]\n");
++ return -EINVAL;
++ }
++ /* request to be notified of a 1588 interrupt event Target Time */
++ else if (cmd == IOCTL_1588_TARG_TIME_NOTIFY) {
++ IOH_DEBUG
++ ("ioc_handle_notify cmd = IOCTL_1588_TARG_TIME_NOTIFY]\n");
++ event = &ioh_1588_devp->notify_evt[TARG_TIME_EVENT_NUM];
++ bytes_ret = sizeof(struct ioh1588TimeValue);
++ param_addr = &ioh_1588_target_time;
++ eventnum = TARG_TIME_EVENT_NUM;
++ } else if (cmd == IOCTL_1588_AUX_TIME_NOTIFY) {
++ IOH_DEBUG
++ ("ioc_handle_notify cmd = IOCTL_1588_AUX_TIME_NOTIFY]\n");
++ event = &ioh_1588_devp->notify_evt[AUX_TIME_EVENT_NUM];
++ bytes_ret = sizeof(struct ioh1588AuxTimeIoctl);
++ param_addr = &ioh_1588_aux_time;
++ eventnum = AUX_TIME_EVENT_NUM;
++ } else {
++ event = &ioh_1588_devp->notify_evt[PPS_EVENT_NUM];
++ bytes_ret = sizeof(unsigned long);
++ param_addr = &ioh_1588_pps_time;
++ eventnum = PPS_EVENT_NUM;
++ }
++
++ ioh_1588_devp->event_flags[eventnum] = 0;
++
++ /* wait infinitely for a 1588 interrupt event to occur */
++ IOH_DEBUG("ioc_handle_notify waiting for interrupt event...\n");
++ wait_event_interruptible(*event,
++ ioh_1588_devp->event_flags[eventnum] == 1);
++ IOH_DEBUG("ioc_handle_notify got interrupt event...\n");
++
++ /* copy global data retreived from interrupt handler */
++ (void)memcpy((void *)&buf, (const void *)param_addr, bytes_ret);
++
++ /* reset global data to 0 */
++ (void)memset((void *)param_addr, 0, bytes_ret);
++
++ ioh_1588_devp->event_flags[eventnum] = 0;
++
++ return 0;
++}
++
++/* Handles all CLEAR NOTIFY IOCTLs */
++/*! @ingroup IEEE_1588_UtilitiesAPI
++ * @fn ioc_handle_clr_notify (unsigned long cmd, char *buf)
++ * @brief Handles all CLEAR NOTIFY IOCTLs
++ * @param cmd [IN] the IOCTL command
++ * @param buf unused
++ * @retval int
++ * - 0 on success
++ * - -EINVAL for unsupported IOCTL
++ * <hr>
++ */
++static int ioc_handle_clr_notify(unsigned long cmd, char *buf)
++{
++ unsigned int eventnum = 0;
++
++ /*
++ * request to release a notify thread that is waiting
++ * on a 1588 interrupt event
++ */
++ if (cmd == IOCTL_1588_TARG_TIME_CLR_NOTIFY) {
++ IOH_DEBUG
++ ("ioc_handle_clr_notify cmd=\
++ IOCTL_1588_TARG_TIME_CLR_NOTIFY\n");
++ eventnum = TARG_TIME_EVENT_NUM;
++ } else if (cmd == IOCTL_1588_AUX_TIME_CLR_NOTIFY) {
++ IOH_DEBUG
++ ("ioc_handle_clr_notify cmd=\
++ IOCTL_1588_AUX_TIME_CLR_NOTIFY\n");
++ eventnum = AUX_TIME_EVENT_NUM;
++ } else if (cmd == IOCTL_1588_PULSE_PER_SEC_CLR_NOTIFY) {
++ IOH_DEBUG
++ ("ioc_handle_clr_notify cmd=\
++ IOCTL_1588_PULSE_PER_SEC_CLR_NOTIFY\n");
++ eventnum = PPS_EVENT_NUM;
++ } else if (cmd == IOCTL_1588_AUX_TARG_TIME_CLR_NOTIFY) {
++ IOH_DEBUG
++ ("ioc_handle_clr_notify cmd=\
++ IOCTL_1588_AUX_TARG_TIME_CLR_NOTIFY\n");
++ IOH_LOG(KERN_ERR, "ioc_handle_clr_notify returning -EINVAL\n");
++ return -EINVAL;
++ }
++
++ ioh_1588_devp->event_flags[eventnum] = 1;
++
++ IOH_DEBUG("ioc_handle_clr_notify waking up blocking notify call...\n");
++ wake_up_interruptible(&ioh_1588_devp->notify_evt[eventnum]);
++ return 0;
++}
++
++/* Handles reset and channel reset IOCTLs */
++
++/*! @ingroup IEEE_1588_UtilitiesAPI
++ * @fn ioc_handle_reset (unsigned long cmd, char *buf)
++ * @brief Handles reset and channel reset IOCTLs
++ * @param cmd [IN] the IOCTL command
++ * @param buf unused
++ * @retval int
++ * - 0 on success
++ * - -EINVAL when hardware reset fails
++ * <hr>
++ */
++static int ioc_handle_reset(unsigned long cmd, char *buf)
++{
++ int i = 0;
++ int ieee_mode;
++ unsigned char station[STATION_ADDR_LEN] = "00:00:00:00:00:00";
++
++ IOH_DEBUG("ioc_handle_reset: invoking ioh_1588_reset\n");
++
++ /*retrieve eth/CAN mode */
++ ieee_mode = ioh_1588_eth_can_get();
++
++ /*retrive station address */
++ ioh_1588_get_station_address(station);
++
++ /* reset the 1588 hardware */
++ if (ioh_1588_reset() != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR, "ioc_handle_reset: ioh_1588_reset failed\n");
++ return -EINVAL;
++ }
++ /* Anyway, now clear all the events */
++ for (i = 0; i < NUM_EVENTS; i++)
++ ioh_1588_devp->event_flags[i] = 0;
++ /*set ETH/CAN mode */
++ if (ieee_mode & IOH_IEEE1588_ETH)
++ ioh_1588_eth_enable();
++ if (ieee_mode & IOH_IEEE1588_CAN)
++ ioh_1588_can_enable();
++
++ /*set station address */
++ if (strcmp(station, "00:00:00:00:00:00") != 0) {
++ if (ioh_1588_set_station_address(station) != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_reset: could not set station \
++ address\n");
++ }
++ }
++
++ IOH_DEBUG("ioc_handle_reset: returning 0\n");
++ return 0;
++}
++
++/* Handles reset statistics IOCTL */
++/*! @ingroup IEEE_1588_UtilitiesAPI
++ * @fn ioc_handle_stats_reset (unsigned long cmd, char *buf)
++ * @brief Handles reset statistics IOCTLs
++ * @param cmd unused
++ * @param buf unused
++ * @retval int
++ * - 0 on success
++ * <hr>
++ */
++static int ioc_handle_stats_reset(unsigned long cmd, char *buf)
++{
++
++ IOH_DEBUG("ioc_handle_stats_reset: invoking ioh_1588_stats_reset\n");
++ ioh_1588_stats_reset();
++ IOH_DEBUG("ioc_handle_stats_reset: returning 0\n");
++ return 0;
++}
++
++/* Handles get statistics IOCTL */
++/*! @ingroup IEEE_1588_UtilitiesAPI
++ * @fn ioc_handle_stats (unsigned long cmd, char *buf)
++ * @brief Handles get statistics IOCTL
++ * @param cmd [IN] the IOCTL command
++ * @param buf [OUT] reference to statistics retrieved
++ * @retval int
++ * - 0 on success
++ * <hr>
++ */
++static int ioc_handle_stats(unsigned long cmd, char *buf)
++{
++ IOH_DEBUG("ioc_handle_stats: invoking ioh_ioh_1588_stats_get\n");
++ if (ioh_1588_stats_get((struct ioh1588Stats *) buf) != \
++ IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: ioh_1588_stats_get failed\n");
++ return -EINVAL;
++ }
++ IOH_DEBUG("ioc_handle_statst: returning 0\n");
++ return 0;
++}
++
++/* Handles show all IOCTL */
++/*! @ingroup IEEE_1588_UtilitiesAPI
++ * @fn ioc_handle_show (unsigned long cmd, char *buf)
++ * @brief Handles show all IOCTL
++ * @param cmd unused
++ * @param buf unused
++ * @retval int
++ * - 0 on success
++ * - -EINVAL when @ref ioh_1588_show fails
++ * <hr>
++ */
++static int ioc_handle_show(unsigned long cmd, char *buf)
++{
++ IOH_DEBUG("ioc_handle_show: invoking ioh_1588_show\n");
++ if (ioh_1588_show() != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR, "ioh_1588_ioctl: ioh_1588_show failed\n");
++ return -EINVAL;
++ }
++ IOH_DEBUG("ioc_handle_show: returning 0\n");
++ return 0;
++}
++
++/* Handles all interrupt enable IOCTLs */
++/*! @ingroup IEEE_1588_UtilitiesAPI
++ * @fn ioc_handle_int_enable (unsigned long cmd, char *buf)
++ * @brief Handles all interrupt enable IOCTLs
++ * @param cmd [IN] the IOCTL command
++ * @param buf [IN] the reference to auxiliary mode
++ * @retval int
++ * - 0 on success
++ * - -EINVAL failed to enable interrupt
++ * <hr>
++ */
++static int ioc_handle_int_enable(unsigned long cmd, char *buf)
++{
++ int ret = 0;
++
++ if (IOCTL_1588_TARG_TIME_INTRPT_ENABLE == cmd) {
++ IOH_DEBUG
++ ("ioc_handle_int_enable cmd=\
++ IOCTL_1588_TARG_TIME_INTRPT_ENABLE \
++ invoking \
++ ioh_1588_target_time_interrupt_enable\n");
++ if (ioh_1588_target_time_interrupt_enable(target_time_callback)
++ != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR, "ioh_1588_ioctl: \
++ ioh_1588_target_time_interrupt_enable \
++ failed\n");
++ ret = -EINVAL;
++ }
++ } else if (IOCTL_1588_AUX_TIME_INTRPT_ENABLE == cmd) {
++ enum ioh1588AuxMode aux_mode;
++ IOH_DEBUG
++ ("ioc_handle_int_enable cmd=\
++ IOCTL_1588_AUX_TIME_INTRPT_ENABLE \
++ invoking ioh_1588_aux_time_interrupt_enable\n");
++
++ (void)memcpy((void *)&aux_mode, (const void *)buf,
++ sizeof(enum ioh1588AuxMode));
++
++ if (ioh_1588_aux_time_interrupt_enable
++ (aux_mode, auxiliary_time_callback) != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR, "ioh_1588_ioctl: \
++ ioh_1588_aux_time_interrupt_enable \
++ failed\n");
++ ret = -EINVAL;
++ }
++ } else if (IOCTL_1588_PULSE_PER_SEC_INTRPT_ENABLE == cmd) {
++ IOH_DEBUG
++ ("ioc_handle_int_enable cmd=\
++ IOCTL_1588_PULSE_PER_SEC_INTRPT_ENABLE \
++ invoking \
++ ioh_1588_pulse_per_sec_interrupt_enable\n");
++ if (ioh_1588_pulse_per_sec_interrupt_enable
++ (pulse_per_sec_callback)
++ != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR, "ioh_1588_ioctl: \
++ ioh_1588_pps_interrupt_enable \
++ failed\n");
++ ret = -EINVAL;
++ }
++ } else { /* IOCTL_1588_AUX_TARG_TIME_INTRPT_ENABLE */
++
++ IOH_DEBUG
++ ("ioc_handle_int_enable cmd=\
++ OCTL_1588_AUX_TARG_TIME_INTRPT_ENABLE \
++ invoking \
++ ioh_1588_aux_target_time_interrupt_enable\n");
++ if (ioh_1588_aux_target_time_interrupt_enable((void *)NULL)
++ != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR, "ioh_1588_ioctl: \
++ ioh_1588_aux_target_time_interrupt_enable \
++ failed\n");
++ ret = -EINVAL;
++ }
++ }
++ return ret;
++}
++
++/* Handles all interrupt disable IOCTLs */
++/*! @ingroup IEEE_1588_UtilitiesAPI
++ * @fn ioc_handle_int_disable (unsigned long cmd, char *buf)
++ * @brief Handles all interrupt enable IOCTLs
++ * @param cmd [IN] the IOCTL command
++ * @param buf [IN] the reference to auxiliary mode
++ * @retval int
++ * - 0 on success
++ * - -EINVAL failed to disable interrupt
++ * <hr>
++ */
++static int ioc_handle_int_disable(unsigned long cmd, char *buf)
++{
++ int ret = 0;
++
++ if (IOCTL_1588_TARG_TIME_INTRPT_DISABLE == cmd) {
++ IOH_DEBUG
++ ("ioc_handle_int_disable cmd=\
++ IOCTL_1588_TARG_TIME_INTRPT_DISABLE \
++ invoking \
++ ioh_1588_target_time_interrupt_disable\n");
++ if (ioh_1588_target_time_interrupt_disable()
++ != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR, "ioh_1588_ioctl: \
++ ioh_1588_target_time_interrupt_disable \
++ failed\n");
++ ret = -EINVAL;
++ }
++ } else if (IOCTL_1588_AUX_TIME_INTRPT_DISABLE == cmd) {
++ enum ioh1588AuxMode aux_mode;
++ IOH_DEBUG
++ ("ioc_handle_int_disable cmd=\
++ IOCTL_1588_AUX_TIME_INTRPT_DISABLE \
++ invoking \
++ ioh_1588_aux_time_interrupt_disable\n");
++
++ (void)memcpy((void *)&aux_mode, (const void *)buf,
++ sizeof(enum ioh1588AuxMode));
++
++ if (ioh_1588_aux_time_interrupt_disable(aux_mode)
++ != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR, "ioh_1588_ioctl: \
++ ioh_1588_aux_time_interrupt_disable \
++ failed\n");
++ ret = -EINVAL;
++ }
++ } else if (IOCTL_1588_PULSE_PER_SEC_INTRPT_DISABLE == cmd) {
++ IOH_DEBUG
++ ("ioc_handle_int_disable cmd=\
++ IOCTL_1588_PULSE_PER_SEC_INTRPT_DISABLE \
++ invoking \
++ ioh_1588_pulse_per_sec_interrupt_disable\n");
++ if (ioh_1588_pulse_per_sec_interrupt_disable() !=
++ IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR, "ioh_1588_ioctl: \
++ ioh_1588_pulse_per_sec_interrupt_disable \
++ failed\n");
++ ret = -EINVAL;
++ }
++ } else { /* IOCTL_1588_AUX_TARG_TIME_INTRPT_DISABLE */
++
++ IOH_DEBUG
++ ("ioc_handle_int_disable cmd=\
++ IOCTL_1588_AUX_TARG_TIME_INTRPT_DISABLE \
++ invoking \
++ ioh_1588_aux_target_time_interrupt_disable\n");
++ if (ioh_1588_aux_target_time_interrupt_disable() !=
++ IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR, "ioh_1588_ioctl: \
++ ioh_1588_aux_target_time_interrupt_disable \
++ failed\n");
++ ret = -EINVAL;
++ }
++ }
++ return ret;
++}
++
++/* Handles port config set/get IOCTLs */
++/*! @ingroup IEEE_1588_UtilitiesAPI
++ * @fn ioc_handle_port_config (unsigned long cmd, char *buf)
++ * @brief Handles port config set/get IOCTLs
++ * @param cmd [IN] the IOCTL command
++ * @param buf [IN] the port configuration
++ * @retval int
++ * - 0 on success
++ * - -EINVAL failed to set/get port configuration
++ * <hr>
++ */
++static int ioc_handle_port_config(unsigned long cmd, char *buf)
++{
++ struct ioh1588PortCfgIoctl *port_cfg_ioctl = \
++ (struct ioh1588PortCfgIoctl *) buf;
++
++ if (IOCTL_1588_PORT_CONFIG_SET == cmd) {
++ IOH_DEBUG
++ ("ioc_handle_port_config cmd = IOCTL_1588_PORT_CONFIG_SET \
++ invoking ioh_1588_ptp_port_config_set\n");
++
++ if (ioh_1588_ptp_port_config_set(port_cfg_ioctl->ptpPort,
++ port_cfg_ioctl->ptpPortMode) !=
++ IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: ioh_1588_ptp_port_config_set \
++ failed\n");
++ return -EINVAL;
++ }
++ } else { /* IOCTL_1588_PORT_CONFIG_GET */
++
++ IOH_DEBUG
++ ("ioc_handle_port_config cmd = IOCTL_1588_PORT_CONFIG_GET \
++ invoking ioh_1588_ptp_port_config_get\n");
++ if (ioh_1588_ptp_port_config_get
++ (port_cfg_ioctl->ptpPort,
++ &port_cfg_ioctl->ptpPortMode) != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: ioh_1588_ptp_port_config_get \
++ failed\n");
++ return -EINVAL;
++ }
++ }
++ return 0;
++}
++
++/* Handles all POLL IOCTLs */
++/*! @ingroup IEEE_1588_UtilitiesAPI
++ * @fn ioc_handle_poll (unsigned long cmd, char *buf)
++ * @brief Handles all poll IOCTLs
++ * @param cmd [IN] the IOCTL command
++ * @param buf [IN] the poll configuration
++ * @retval int
++ * - 0 on success
++ * - -EINVAL on failure
++ * <hr>
++ */
++static int ioc_handle_poll(unsigned long cmd, char *buf)
++{
++ int ret = 0;
++ struct ioh1588RxTxPollIoctl *poll_ioctl = \
++ (struct ioh1588RxTxPollIoctl *) buf;
++ struct ioh1588CANPollIoctl *can_poll_ioctl = \
++ (struct ioh1588CANPollIoctl *) buf;
++ struct ioh1588TimePollIoctl *time_poll_ioctl = \
++ (struct ioh1588TimePollIoctl *) buf;
++
++ if (IOCTL_1588_RX_POLL == cmd) {
++ IOH_DEBUG("ioc_handle_poll: cmd = IOCTL_1588_RX_POLL \
++ invoking ioh_1588_ptp_rx_poll\n");
++ ret = ioh_1588_ptp_rx_poll(poll_ioctl->ptpPort,
++ &poll_ioctl->ptpMsgData);
++ if ((ret != IOH_1588_SUCCESS) && \
++ (ret != IOH_1588_NOTIMESTAMP)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: ioh_1588_ptp_rx_poll \
++ failed\n");
++ ret = -EINVAL;
++ }
++ } else if (IOCTL_1588_TX_POLL == cmd) {
++ IOH_DEBUG("ioc_handle_poll: cmd = IOCTL_1588_TX_POLL \
++ invoking ioh_1588_ptp_tx_poll\n");
++ ret = ioh_1588_ptp_tx_poll(poll_ioctl->ptpPort,
++ &poll_ioctl->ptpMsgData);
++ if ((ret != IOH_1588_SUCCESS) && \
++ (ret != IOH_1588_NOTIMESTAMP)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: ioh_1588_ptp_tx_poll \
++ failed\n");
++ ret = -EINVAL;
++ }
++ } else if (IOCTL_1588_CAN_POLL == cmd) {
++ IOH_DEBUG("ioc_handle_poll: cmd = IOCTL_1588_CAN_POLL \
++ invoking ioh_1588_ptp_can_poll\n");
++ ret = ioh_1588_ptp_can_poll(can_poll_ioctl->ptpPort,
++ &can_poll_ioctl->ptpTimeStamp);
++ if ((ret != IOH_1588_SUCCESS) && \
++ (ret != IOH_1588_NOTIMESTAMP)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: ioh_1588_ptp_can_poll \
++ failed\n");
++ ret = -EINVAL;
++ }
++ } else if (IOCTL_1588_TARG_TIME_POLL == cmd) {
++ IOH_DEBUG("ioc_handle_poll: cmd = IOCTL_1588_TARG_TIME_POLL "
++ "invoking ioh_1588_target_time_poll\n");
++ if (ioh_1588_target_time_poll(&time_poll_ioctl->pollFlag,
++ &time_poll_ioctl->timeVal) !=
++ IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: ioh_1588_target_time_poll \
++ failed\n");
++ ret = -EINVAL;
++ }
++ } else if (IOCTL_1588_AUX_TIME_POLL == cmd) {
++ IOH_DEBUG("ioc_handle_poll: cmd = IOCTL_1588_AUX_TIME_POLL "
++ "invoking ioh_1588_aux_time_poll\n");
++ if (ioh_1588_aux_time_poll(time_poll_ioctl->auxMode,
++ &time_poll_ioctl->pollFlag,
++ &time_poll_ioctl->timeVal)
++ != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: ioh_1588_aux_time_poll \
++ failed\n");
++ ret = -EINVAL;
++ }
++ } else { /* IOCTL_1588_AUX_TARG_TIME_POLL */
++
++ IOH_DEBUG
++ ("ioc_handle_poll: cmd = IOCTL_1588_AUX_TARG_TIME_POLL "
++ "invoking ioh_1588_aux_target_time_poll\n");
++ if (ioh_1588_aux_target_time_poll
++ (&time_poll_ioctl->pollFlag,
++ &time_poll_ioctl->timeVal) != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: ioh_1588_aux_target_time_poll \
++ failed\n");
++ ret = -EINVAL;
++ }
++ }
++ if ((unsigned long)ret == IOH_1588_NOTIMESTAMP)
++ ret = 0;
++ return ret;
++}
++
++/* Handles all Time Set IOCTLs */
++/*! @ingroup IEEE_1588_UtilitiesAPI
++ * @fn ioc_handle_time_set (unsigned long cmd, char *buf)
++ * @brief Handles all Time Set IOCTLs
++ * @param cmd [IN] the IOCTL command
++ * @param buf [IN] the time value
++ * @retval int
++ * - 0 on success
++ * - -EINVAL on failure
++ * <hr>
++ */
++static int ioc_handle_time_set(unsigned long cmd, char *buf)
++{
++ struct ioh1588TimeValue time_value;
++
++ (void)memcpy((void *)&time_value, (const void *)buf,
++ sizeof(struct ioh1588TimeValue));
++
++ if (IOCTL_1588_SYS_TIME_SET == cmd) {
++ IOH_DEBUG("ioc_handle_time_set cmd=IOCTL_1588_SYS_TIME_SET \
++ invoking ioh_1588_system_time_set\n");
++ if (ioh_1588_system_time_set(time_value) != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: ioh_1588_system_time_set \
++ failed\n");
++ return -EINVAL;
++ }
++ } else if (IOCTL_1588_TARG_TIME_SET == cmd) {
++ IOH_DEBUG("ioc_handle_time_set cmd=IOCTL_1588_TARG_TIME_SET \
++ invoking ioh_1588_target_time_set\n");
++ if (ioh_1588_target_time_set(time_value) != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: ioh_1588_target_time_set \
++ failed\n");
++ return -EINVAL;
++ }
++ } else { /* IOCTL_1588_AUX_TARG_TIME_SET */
++
++ IOH_DEBUG
++ ("ioc_handle_time_set cmd=IOCTL_1588_AUX_TARG_TIME_SET \
++ invoking ioh_1588_aux_target_time_set\n");
++ if (ioh_1588_aux_target_time_set(time_value) !=
++ IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: ioh_1588_aux_target_time_set \
++ failed\n");
++ return -EINVAL;
++ }
++ }
++ return 0;
++}
++
++/* Handles all Time Get IOCTLs */
++/*! @ingroup IEEE_1588_UtilitiesAPI
++ * @fn ioc_handle_time_get (unsigned long cmd, char *buf)
++ * @brief Handles all Time Get IOCTLs
++ * @param cmd [IN] the IOCTL command
++ * @param buf [OUT] the time value
++ * @retval int
++ * - 0 on success
++ * - -EINVAL on failure
++ * <hr>
++ */
++static int ioc_handle_time_get(unsigned long cmd, char *buf)
++{
++ struct ioh1588TimeValue time_value;
++
++ if (IOCTL_1588_SYS_TIME_GET == cmd) {
++ IOH_DEBUG("ioc_handle_time_get cmd=IOCTL_1588_SYS_TIME_GET \
++ invoking ioh_1588_system_time_get\n");
++ if (ioh_1588_system_time_get(&time_value) != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: ioh_1588_system_time_get \
++ failed\n");
++ return -EINVAL;
++ }
++ } else if (IOCTL_1588_TARG_TIME_GET == cmd) {
++ IOH_DEBUG("ioc_handle_time_get cmd=IOCTL_1588_TARG_TIME_GET \
++ invoking ioh_1588_target_time_get\n");
++ if (ioh_1588_target_time_get(&time_value) != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: ioh_1588_target_time_get \
++ failed\n");
++ return -EINVAL;
++ }
++ } else { /* IOCTL_1588_AUX_TARG_TIME_GET */
++
++ IOH_DEBUG
++ ("ioc_handle_time_get cmd=IOCTL_1588_AUX_TARG_TIME_GET \
++ invoking ioh_1588_aux_target_time_get\n");
++ if (ioh_1588_aux_target_time_get(&time_value)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: ioh_1588_aux_target_time_set \
++ failed\n");
++ return -EINVAL;
++ }
++ }
++
++ (void)memcpy((void *)buf, (const void *)&time_value,
++ sizeof(struct ioh1588TimeValue));
++ return 0;
++}
++
++/* Handles tick rate get/set IOCTLs */
++/*! @ingroup IEEE_1588_UtilitiesAPI
++ * @fn ioc_handle_tick_rate (unsigned long cmd, char *buf)
++ * @brief Handles tick rate get/set IOCTLs
++ * @param cmd [IN] the IOCTL command
++ * @param buf [OUT] the tick rate
++ * @retval int
++ * - 0 on success
++ * - -EINVAL on failure
++ * <hr>
++ */
++static int ioc_handle_tick_rate(unsigned long cmd, char *buf)
++{
++ unsigned long val;
++
++ if (IOCTL_1588_TICK_RATE_GET == cmd) {
++ IOH_DEBUG("ioc_handle_tick_rate cmd =IOCTL_1588_TICK_RATE_GET \
++ invoking ioh_1588_tick_rate_get\n");
++ if (ioh_1588_tick_rate_get(&val) != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: ioh_1588_tick_rate_get \
++ failed\n");
++ return -EINVAL;
++ }
++
++ (void)memcpy((void *)buf, (const void *)&val, sizeof val);
++ } else { /* (cmd == IOCTL_1588_TICK_RATE_SET) */
++
++ IOH_DEBUG("ioc_handle_tick_rate cmd =IOCTL_1588_TICK_RATE_SET \
++ invoking ioh_1588_tick_rate_set\n");
++ (void)memcpy((void *)&val, (const void *)buf, sizeof val);
++
++ if (ioh_1588_tick_rate_set(val) != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: ioh_1588_tick_rate_set \
++ failed\n");
++ return -EINVAL;
++ }
++ }
++ return 0;
++}
++
++/* Handles pps time get/set IOCTLs */
++/*! @ingroup IEEE_1588_UtilitiesAPI
++ * @fn ioc_handle_pps_reqt (unsigned long cmd, char *buf)
++ * @brief Handles pps time get/set IOCTLs
++ * @param cmd [IN] the IOCTL command
++ * @param buf [INOUT] the pulse per second value
++ * @retval int
++ * - 0 on success
++ * - -EINVAL on failure
++ * <hr>
++ */
++static int ioc_handle_pps_reqt(unsigned long cmd, char *buf)
++{
++ unsigned long val;
++
++ if (IOCTL_1588_PULSE_PER_SEC_TIME_SET == cmd) {
++ IOH_DEBUG
++ ("ioc_handle_pps_reqt cmd=\
++ IOCTL_1588_PULSE_PER_SEC_TIME_SET \
++ invoking ioh_1588_pulse_per_sec_time_set\n");
++ (void)memcpy((void *)&val, (const void *)buf, sizeof val);
++ if (ioh_1588_pulse_per_sec_time_set(val) != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR, "ioh_1588_ioctl: \
++ ioh_1588_pulse_per_sec_time_set \
++ failed\n");
++ return -EINVAL;
++ }
++ } else { /* IOCTL_1588_PULSE_PER_SEC_TIME_GET */
++
++ IOH_DEBUG
++ ("ioc_handle_pps_reqt cmd=\
++ IOCTL_1588_PULSE_PER_SEC_TIME_GET \
++ invoking ioh_1588_pulse_per_sec_time_get\n");
++ if (ioh_1588_pulse_per_sec_time_get(&val) != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR, "ioh_1588_ioctl: \
++ ioh_1588_pulse_per_sec_time_get failed\n");
++ return -EINVAL;
++ }
++ (void)memcpy((void *)buf, (const void *)&val, sizeof val);
++ }
++ return 0;
++}
++
++/* Handles ptp version get/set IOCTLs */
++/*! @ingroup IEEE_1588_UtilitiesAPI
++ * @fn ioc_handle_version_reqt (unsigned long cmd, char *buf)
++ * @brief Handles ptp version get/set IOCTLs
++ * @param cmd [IN] the IOCTL command
++ * @param buf [INOUT]the ptp version
++ * @retval int
++ * - 0 on success
++ * - -EINVAL on failure
++ * <hr>
++ */
++static int ioc_handle_version_reqt(unsigned long cmd, char *buf)
++{
++ struct ioh1588VersionIoctl *version_ioctl = \
++ (struct ioh1588VersionIoctl *) buf;
++
++ if (IOCTL_1588_PORT_VERSION_SET == cmd) {
++ IOH_DEBUG
++ ("ioc_handle_version_reqt cmd=IOCTL_1588_PORT_VERSION_SET \
++ invoking ioh_1588_ptp_version_set\n");
++ if (ioh_1588_ptp_version_set
++ (version_ioctl->ptpPort,
++ version_ioctl->ptpVersion) != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: ioh_1588_ptp_version_set \
++ failed\n");
++ return -EINVAL;
++ }
++ } else { /* IOCTL_1588_PORT_VERSION_GET */
++
++ IOH_DEBUG
++ ("ioc_handle_version_reqt cmd=IOCTL_1588_PORT_VERSION_GET \
++ invoking ioh_1588_ptp_version_get\n");
++ if (ioh_1588_ptp_version_get
++ (version_ioctl->ptpPort,
++ &version_ioctl->ptpVersion) != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: ioh_1588_ptp_version_get \
++ failed\n");
++ return -EINVAL;
++ }
++ }
++ return 0;
++}
++
++/* Handles ptp operation mode get/set IOCTLs */
++/*! @ingroup IEEE_1588_UtilitiesAPI
++ * @fn ioc_handle_op_mode_reqt (unsigned long cmd, char *buf)
++ * @brief Handles ptp operation mode get/set IOCTLs
++ * @param cmd [IN] the IOCTL command
++ * @param buf [INOUT]the ptp operation mode
++ * @retval int
++ * - 0 on success
++ * - -EINVAL on failure
++ * <hr>
++ */
++static int ioc_handle_op_mode_reqt(unsigned long cmd, char *buf)
++{
++ struct ioh1588OperationModeIoctl *opmode_ioctl =
++ (struct ioh1588OperationModeIoctl *) buf;
++
++ if (IOCTL_1588_PORT_OPERATION_MODE_SET == cmd) {
++ IOH_DEBUG
++ ("ioc_handle_op_mode_reqt cmd=\
++ IOCTL_1588_PORT_OPERATION_MODE_SET \
++ invoking ioh_1588_ptp_operation_mode_set\n");
++ if (ioh_1588_ptp_operation_mode_set
++ (opmode_ioctl->ptpPort,
++ opmode_ioctl->ptpOpMode) != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: \
++ ioh_1588_ptp_operation_mode_set failed\n");
++ return -EINVAL;
++ }
++ } else { /* IOCTL_1588_PORT_OPERATION_MODE_GET */
++
++ IOH_DEBUG
++ ("ioc_handle_op_mode_reqt cmd=\
++ IOCTL_1588_PORT_OPERATION_MODE_GET \
++ invoking ioh_1588_ptp_operation_mode_get\n");
++ if (ioh_1588_ptp_operation_mode_get
++ (opmode_ioctl->ptpPort,
++ &opmode_ioctl->ptpOpMode) != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_ioctl: \
++ ioh_1588_ptp_operation_mode_get failed\n");
++ return -EINVAL;
++ }
++ }
++ return 0;
++}
++
++/*! @ingroup InterfaceLayerNotifyRoutines
++ * @fn irqreturn_t ioh_1588_isr(int irq, void *p_data)
++ * @brief This function is the driver interrupt service routine.
++ *
++ * @param irq [IN] interrupt number
++ * @param p_data caller data
++ *
++ * @retval irqreturn_t
++ * - IRQ_HANDLED => interrupt handled,
++ * - IRQ_NONE => this device did not interrupt
++ */
++irqreturn_t ioh_1588_isr(int irq, void *p_data)
++{
++ unsigned long pending = 0;
++
++ (void)ioh_1588_interrupt_pending(&pending);
++ if (!pending) {
++ IOH_DEBUG("ioh_1588_isr: no pending interrupt\n");
++ return IRQ_NONE;
++ }
++
++ if (ioh_1588_handler() != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR, "ioh_1588_isr: ioh_1588_handler failed\n");
++ return IRQ_NONE;
++ }
++
++ return IRQ_HANDLED;
++}
++
++/*! @ingroup InterfaceLayerNotifyRoutines
++ * @fn void target_time_callback(struct ioh1588TimeValue tgt_time)
++ * @brief The callback function that is called from the HAL
++ * when the target time expired interrupt occurs
++ *
++ * @param tgt_time target time register timestamp
++ *
++ * @retval None
++ */
++void target_time_callback(struct ioh1588TimeValue tgt_time)
++{
++ /*
++ * copy the target time value to the global value to be read by the
++ * notify ioctl
++ */
++ (void)memcpy((void *)&ioh_1588_target_time, (const void *)&tgt_time,
++ sizeof(struct ioh1588TimeValue));
++
++ ioh_1588_devp->event_flags[TARG_TIME_EVENT_NUM] = 1;
++
++ IOH_DEBUG("target_time_callback: signalling the notify ioctl \
++ that the target time has expired\n");
++ /* signal the notify ioctl that the target time has expired */
++ wake_up_interruptible(&ioh_1588_devp->notify_evt[TARG_TIME_EVENT_NUM]);
++}
++
++/*! @ingroup InterfaceLayerNotifyRoutines
++ * @fn void auxiliary_time_callback(enum ioh1588AuxMode aux_mode,
++ * struct ioh1588TimeValue aux_time)
++ * @brief The callback function that is called from the HAL
++ * when an aux time interrupt has occurred
++ *
++ * @param aux_mode master, slave, or any
++ * @param aux_time aux time register timestamp
++ *
++ * @return None
++ */
++void auxiliary_time_callback(enum ioh1588AuxMode aux_mode, \
++ struct ioh1588TimeValue aux_time)
++{
++ /*
++ * copy the aux time value and aux mode to the global value
++ * to be read by the notify ioctl
++ */
++
++ ioh_1588_aux_time.auxMode = aux_mode;
++ (void)memcpy((void *)&ioh_1588_aux_time.auxTime,
++ (const void *)&aux_time, \
++ sizeof(struct ioh1588AuxTimeIoctl));
++
++ ioh_1588_devp->event_flags[AUX_TIME_EVENT_NUM] = 1;
++
++ IOH_DEBUG("auxiliary_time_callback: signalling the notify ioctl \
++ that the auxiliary time stamp has been set\n");
++
++ /*
++ * signal the notify ioctl that the aux timestamp has been set
++ */
++ wake_up_interruptible(&ioh_1588_devp->notify_evt[AUX_TIME_EVENT_NUM]);
++}
++
++/*! @ingroup InterfaceLayerNotifyRoutines
++ * @fn void pulse_per_sec_callback(unsigned long pps)
++ * @brief This is a callback function that will be called from the HAL
++ * when the pulse per second time has expired which generates an
++ * interrupt
++ *
++ * @param pps pulse per second register timestamp
++ *
++ * @retval None
++ */
++void pulse_per_sec_callback(unsigned long pps)
++{
++ ioh_1588_pps_time = pps;
++
++ ioh_1588_devp->event_flags[PPS_EVENT_NUM] = 1;
++
++ IOH_DEBUG("pulse_per_sec_callback: signalling the notify ioctl \
++ that the pulse per second time has expired\n");
++ /* signal the notify ioctl that the pulse per second time has expired */
++
++ wake_up_interruptible(&ioh_1588_devp->notify_evt[PPS_EVENT_NUM]);
++}
+diff -urN linux-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_main.h topcliff-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_main.h
+--- linux-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_main.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_main.h 2010-03-09 10:22:48.000000000 +0900
+@@ -0,0 +1,702 @@
++ /*!
++ * @file ioh_1588_main.h
++ * @brief
++ * This file contains the declarations of IEEE_1588_InterfaceLayer APIs
++ * @version 0.92
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * modified to support Intel IOH GE IEEE 1588 hardware
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ * derived from
++ * IEEE 1588 Time Synchronization Driver for Intel EP80579
++ * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ */
++
++#ifndef IOH_1588_MAIN_H
++#define IOH_1588_MAIN_H
++
++#ifdef __GNUC__
++#define UNUSED __attribute__ ((unused))
++#define UNUSED_ARG(x)
++#else
++#define UNUSED
++#define UNUSED_ARG(x) (void) x
++#endif
++
++#include <linux/ioctl.h>
++
++#define TRUE 1
++#define FALSE 0
++
++/*! @defgroup IEEE1588*/
++
++/*! @defgroup IEEE_1588_Global
++ * @ingroup IEEE1588
++ * @brief This group describes the global entities within
++ * the module.
++ * @remarks This group includes all the global data structures
++ * used within the modules. These are mainly used to
++ * store the device related information, so that it can
++ * be used by other functions of the modules.
++ * <hr>
++ * */
++
++/*! @defgroup IEEE_1588_PCILayer
++ * @ingroup IEEE1588
++ * @brief This group describes the PCI layer interface
++ * functionalities.
++ * @remarks This group contains the functions and data structures
++ * that are used to interface the module with PCI Layer
++ * subsystem of the Kernel.
++ * <hr>
++ * */
++
++/*! @defgroup IEEE_1588_InterfaceLayer
++ * @ingroup IEEE1588
++ * @brief This group describes the Driver interface functionalities.
++ * @remarks This group contains the data structures and functions used
++ * to interface the module driver with the kernel subsystem.
++ * <hr>
++ * */
++
++/*! @defgroup IEEE_1588_HALLayer
++ * @ingroup IEEE1588
++ * @brief This group describes the hardware specific functionalities.
++ * @remarks This group contains the functions and data structures used
++ * by the module to communicate with the hardware. These
++ * functions are device specific and designed according to the
++ * device specifications.
++ * <hr>
++ * */
++
++/*! @defgroup IEEE_1588_Utilities
++ * @ingroup IEEE1588
++ * @brief This group describes the utility functionalities.
++ * @remarks This group contains the functions and data structures used
++ * to assist the other functionalities in their operations.
++ * <hr>
++ **/
++
++/*! @defgroup IEEE_1588_PCILayerAPI
++ * @ingroup IEEE_1588_PCILayer
++ * @brief This group contains the API(functions) used as the PCI
++ * interface between the Kernel subsystem and the module.
++ *<hr>
++ **/
++
++/*! @defgroup IEEE_1588_PCILayerFacilitators
++ * @ingroup IEEE_1588_PCILayer
++ * @brief This group contains the data structures used by the PCI
++ * Layer APIs for their functionalities.
++ * <hr>
++ **/
++
++/*! @defgroup IEEE_1588_InterfaceLayerAPI
++ * @ingroup IEEE_1588_InterfaceLayer
++ * @brief This group contains the API(functions) used as the Driver
++ * interface between the Kernel subsystem and the module.
++ * <hr>
++ **/
++
++/*! @defgroup IEEE_1588_InterfaceLayerFacilitators
++ * @ingroup IEEE_1588_InterfaceLayer
++ * @brief This group contains the data structures used by the Driver
++ * interface APIs for their functionalities.
++ * <hr>
++ **/
++
++/*! @defgroup IEEE_1588_HALLayerAPI
++ * @ingroup IEEE_1588_HALLayer
++ * @brief This group contains the APIs(functions) used to interact with
++ * the hardware. These APIs act as an interface between the
++ * hardware and the other driver functions.
++ * <hr>
++ **/
++
++/*! @defgroup IEEE_1588_UtilitiesAPI
++ * @ingroup IEEE_1588_Utilities
++ * @brief This group contains the APIs(functions) used by other
++ * functions
++ * in their operations.
++ * <hr>
++ **/
++
++/* 1588 module ioctl command codes */
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOC_1588_BASE
++ * @brief The unique one byte data used to define
++ * the IOCTL commands
++ */
++#define IOC_1588_BASE 0x88
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_PORT_CONFIG_SET
++ * @brief Set the IEEE 1588 Ethernet port to Mater/Slave/All mode
++ */
++#define IOCTL_1588_PORT_CONFIG_SET \
++ _IOW(IOC_1588_BASE, 0, struct ioh1588PortCfgIoctl)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_PORT_CONFIG_GET
++ * @brief Get the IEEE 1588 Ethernet port Configuration mode
++ */
++#define IOCTL_1588_PORT_CONFIG_GET \
++ _IOWR(IOC_1588_BASE, 1, struct ioh1588PortCfgIoctl)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_RX_POLL
++ * @brief Poll for receive timestamp captured on the IEEE 1588 ethernet
++ * channel
++ */
++#define IOCTL_1588_RX_POLL _IOWR(IOC_1588_BASE, 2, struct ioh1588RxTxPollIoctl)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_TX_POLL
++ * @brief Poll for transmit timestamp captured on the IEEE 1588 ethernet
++ * channel
++ */
++#define IOCTL_1588_TX_POLL _IOWR(IOC_1588_BASE, 3, struct ioh1588RxTxPollIoctl)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_CAN_POLL
++ * @brief Poll for timestamp captured on the IEEE 1588 CAN channel
++ */
++#define IOCTL_1588_CAN_POLL _IOWR(IOC_1588_BASE, 4, struct ioh1588CANPollIoctl)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_SYS_TIME_GET
++ * @brief Get the IEEE 1588 system time from the module
++ */
++#define IOCTL_1588_SYS_TIME_GET _IOR(IOC_1588_BASE, 5, struct ioh1588TimeValue)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_SYS_TIME_SET
++ * @brief Set the IEEE 1588 system time on the module
++ */
++#define IOCTL_1588_SYS_TIME_SET _IOW(IOC_1588_BASE, 6, struct ioh1588TimeValue)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_TICK_RATE_SET
++ * @brief Set the frequency scaling value used on IEEE 1588 module
++ */
++#define IOCTL_1588_TICK_RATE_SET _IOW(IOC_1588_BASE, 7, unsigned long)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_TICK_RATE_GET
++ * @brief Get the frequency scaling value used on IEEE 1588 module
++ */
++#define IOCTL_1588_TICK_RATE_GET _IOR(IOC_1588_BASE, 8, unsigned long)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_TARG_TIME_INTRPT_ENABLE
++ * @brief Enable the target time reached/exceeded interrupt on IEEE 1588 module
++ */
++#define IOCTL_1588_TARG_TIME_INTRPT_ENABLE _IO(IOC_1588_BASE, 9)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_TARG_TIME_INTRPT_DISABLE
++ * @brief Disable the target time reached/exceeded interrupt on IEEE 1588 module
++ */
++#define IOCTL_1588_TARG_TIME_INTRPT_DISABLE _IO(IOC_1588_BASE, 10)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_TARG_TIME_POLL
++ * @brief Poll for the target time reached/exceeded condition on IEEE 1588
++ * module
++ */
++#define IOCTL_1588_TARG_TIME_POLL \
++ _IOR(IOC_1588_BASE, 11, struct ioh1588TimePollIoctl)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_TARG_TIME_SET
++ * @brief Set the target time to match on IEEE 1588 module
++ */
++#define IOCTL_1588_TARG_TIME_SET \
++ _IOW(IOC_1588_BASE, 12, struct ioh1588TimeValue)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_TARG_TIME_GET
++ * @brief Get the target time currently set on IEEE 1588 module
++ */
++#define IOCTL_1588_TARG_TIME_GET \
++ _IOR(IOC_1588_BASE, 13, struct ioh1588TimeValue)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_AUX_TIME_INTRPT_ENABLE
++ * @brief Enable the auxiliary time captured interrupt on IEEE 1588 module
++ */
++#define IOCTL_1588_AUX_TIME_INTRPT_ENABLE \
++ _IOW(IOC_1588_BASE, 14, enum ioh1588AuxMode)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_AUX_TIME_INTRPT_DISABLE
++ * @brief Disable the auxiliary time captured interrupt on IEEE 1588 module
++ */
++#define IOCTL_1588_AUX_TIME_INTRPT_DISABLE \
++ _IOW(IOC_1588_BASE, 15, enum ioh1588AuxMode)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_AUX_TIME_POLL
++ * @brief Poll for the auxiliary time captured on IEEE 1588 module
++ */
++#define IOCTL_1588_AUX_TIME_POLL \
++ _IOWR(IOC_1588_BASE, 16, struct ioh1588TimePollIoctl)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_RESET
++ * @brief Reset the IEEE 1588 module
++ */
++#define IOCTL_1588_RESET _IO(IOC_1588_BASE, 17)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_CHNL_RESET
++ * @brief Reset the IEEE 1588 channel
++ */
++#define IOCTL_1588_CHNL_RESET _IOW(IOC_1588_BASE, 18, enum ioh1588PTPPort)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_STATS_GET
++ * @brief Get the timestamp captured counters from the IEEE 1588 module
++ */
++#define IOCTL_1588_STATS_GET _IOR(IOC_1588_BASE, 19, struct ioh1588Stats)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_STATS_RESET
++ * @brief Reset the timestamp captured counters maintained for IEEE 1588 module
++ */
++#define IOCTL_1588_STATS_RESET _IO(IOC_1588_BASE, 20)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_SHOW_ALL
++ * @brief Display the register contents of IEEE 1588 module
++ */
++#define IOCTL_1588_SHOW_ALL _IO(IOC_1588_BASE, 21)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_AUX_TARG_TIME_INTRPT_ENABLE
++ * @brief Enable Auxiliary target time reached interrupt - not supported
++ */
++#define IOCTL_1588_AUX_TARG_TIME_INTRPT_ENABLE _IO(IOC_1588_BASE, 22)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_AUX_TARG_TIME_INTRPT_DISABLE
++ * @brief Disable Auxiliary target time reached interrupt - not supported
++ */
++#define IOCTL_1588_AUX_TARG_TIME_INTRPT_DISABLE _IO(IOC_1588_BASE, 23)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_AUX_TARG_TIME_POLL
++ * @brief Poll for Auxiliary target time captured - not supported
++ */
++#define IOCTL_1588_AUX_TARG_TIME_POLL \
++ _IOR(IOC_1588_BASE, 24, struct ioh1588TimePollIoctl)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_AUX_TARG_TIME_SET
++ * @brief Set Auxiliary target time - not supported
++ */
++#define IOCTL_1588_AUX_TARG_TIME_SET \
++ _IOW(IOC_1588_BASE, 25, struct ioh1588TimeValue)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_AUX_TARG_TIME_GET
++ * @brief Get Auxiliary target time currently set - not supported
++ */
++#define IOCTL_1588_AUX_TARG_TIME_GET \
++ _IOR(IOC_1588_BASE, 26, struct ioh1588TimeValue)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_PULSE_PER_SEC_INTRPT_ENABLE
++ * @brief Enable Pulse per second match interrupt
++ */
++#define IOCTL_1588_PULSE_PER_SEC_INTRPT_ENABLE _IO(IOC_1588_BASE, 27)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_PULSE_PER_SEC_INTRPT_DISABLE
++ * @brief Disable Pulse per second match interrupt
++ */
++#define IOCTL_1588_PULSE_PER_SEC_INTRPT_DISABLE _IO(IOC_1588_BASE, 28)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_TARG_TIME_NOTIFY
++ * @brief Block till a target time reached interrupt occurs
++ */
++#define IOCTL_1588_TARG_TIME_NOTIFY \
++ _IOR(IOC_1588_BASE, 29, struct ioh1588TimeValue)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_AUX_TIME_NOTIFY
++ * @brief Block till an auxiliary time captured interrupt occurs
++ */
++#define IOCTL_1588_AUX_TIME_NOTIFY \
++ _IOR(IOC_1588_BASE, 30, struct ioh1588AuxTimeIoctl)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_AUX_TARG_TIME_NOTIFY
++ * @brief Block till an auxiliary target time reached interrupt occurs - not
++ * supported
++ */
++#define IOCTL_1588_AUX_TARG_TIME_NOTIFY \
++ _IOR(IOC_1588_BASE, 31, struct ioh1588TimeValue)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_PULSE_PER_SEC_NOTIFY
++ * @brief Block till a pulse per second match interrupt occurs
++ */
++#define IOCTL_1588_PULSE_PER_SEC_NOTIFY _IOR(IOC_1588_BASE, 32, unsigned long)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_TARG_TIME_CLR_NOTIFY
++ * @brief Unblock a process waiting on target time reached interrupt
++ */
++#define IOCTL_1588_TARG_TIME_CLR_NOTIFY _IO(IOC_1588_BASE, 33)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_AUX_TIME_CLR_NOTIFY
++ * @brief Unblock a process waiting on auxiliary time captured interrupt
++ */
++#define IOCTL_1588_AUX_TIME_CLR_NOTIFY _IO(IOC_1588_BASE, 34)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_AUX_TARG_TIME_CLR_NOTIFY
++ * @brief Unblock a process waiting on an auxiliary target time reached
++ * interrupt - not supported
++ */
++#define IOCTL_1588_AUX_TARG_TIME_CLR_NOTIFY _IO(IOC_1588_BASE, 35)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_PULSE_PER_SEC_CLR_NOTIFY
++ * @brief Unblock a process waiting on a pulse per second match interrupt
++ */
++#define IOCTL_1588_PULSE_PER_SEC_CLR_NOTIFY _IO(IOC_1588_BASE, 36)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_PULSE_PER_SEC_TIME_GET
++ * @brief Get the currently specified pulse per second match time
++ */
++#define IOCTL_1588_PULSE_PER_SEC_TIME_GET _IOR(IOC_1588_BASE, 37, unsigned long)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_PULSE_PER_SEC_TIME_SET
++ * @brief Specify the pulse per second match time
++ */
++#define IOCTL_1588_PULSE_PER_SEC_TIME_SET _IOW(IOC_1588_BASE, 38, unsigned long)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_PORT_VERSION_SET
++ * @brief Set the PTP version to be used on the given PTP channel
++ */
++#define IOCTL_1588_PORT_VERSION_SET \
++ _IOW(IOC_1588_BASE, 39, struct ioh1588VersionIoctl)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_PORT_VERSION_GET
++ * @brief Get the PTP version used on the given PTP channel
++ */
++#define IOCTL_1588_PORT_VERSION_GET \
++ _IOWR(IOC_1588_BASE, 40, struct ioh1588VersionIoctl)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_PORT_OPERATION_MODE_SET
++ * @brief Set the PTP messages that are matched by the module on the given PTP
++ * channel
++ */
++#define IOCTL_1588_PORT_OPERATION_MODE_SET \
++ _IOW(IOC_1588_BASE, 41, struct ioh1588OperationModeIoctl)
++
++/*! @ingroup IEEE_1588_InterfaceLayer
++ * @def IOCTL_1588_PORT_OPERATION_MODE_GET
++ * @brief Get the PTP messages that are currently matched by the module on given
++ * PTP channel
++ */
++#define IOCTL_1588_PORT_OPERATION_MODE_GET \
++ _IOWR(IOC_1588_BASE, 42, struct ioh1588OperationModeIoctl)
++
++/**
++ * @ingroup IEEE_1588_InterfaceLayer
++ * @enum ioh1588PTPPort
++ * @brief IEEE 1588 PTP Communication Port(Channel)
++ */
++enum ioh1588PTPPort { /* ioh1588PTPPort */
++ IOH_1588_GBE_0_1588PTP_PORT, /**< PTP Communication Port on GBE-0 */
++ IOH_1588_CAN_0_1588PTP_PORT, /**< PTP Communication Port on CAN-0 */
++ IOH_1588_PORT_INVALID /**< Invalid PTP Communication Port */
++};
++
++/**
++ * @ingroup IEEE_1588_InterfaceLayer
++ * @enum ioh1588PTPPortMode
++ * @brief PTP Port mode - Master or Slave or any
++ */
++enum ioh1588PTPPortMode { /* ioh1588PTPPortMode */
++ IOH_1588PTP_PORT_MASTER, /**< Master Mode */
++ IOH_1588PTP_PORT_SLAVE, /**< Slave Mode */
++ IOH_1588PTP_PORT_ANYMODE, /**< Timestamp all messages */
++ IOH_1588PTP_PORT_MODE_INVALID /**< Invalid PTP Port Mode */
++};
++
++/**
++ * @ingroup IEEE_1588_InterfaceLayer
++ * @struct ioh1588PortCfgIoctl
++ * @brief Struct to pass port config data for ioctl call
++ */
++struct ioh1588PortCfgIoctl { /* ioh1588PortCfgIoctl */
++ enum ioh1588PTPPort ptpPort; /**< IEEE 1588 PTP Communication Port */
++ enum ioh1588PTPPortMode ptpPortMode; /**< Master, Slave,
++ or Any mode */
++};
++
++/**
++ * @ingroup IEEE_1588_InterfaceLayer
++ * @enum ioh1588PTPMsgType
++ * @brief PTP Messages types that can be detected on communication port
++ */
++enum ioh1588PTPMsgType { /* ioh1588PTPMsgType */
++ IOH_1588PTP_MSGTYPE_SYNC, /**< PTP Sync message sent by Master or
++ received by Slave */
++ IOH_1588PTP_MSGTYPE_DELAYREQ, /**< PTP Delay_Req message sent by Slave
++ or received by Master */
++ IOH_1588PTP_MSGTYPE_UNKNOWN /**< Other PTP and non-PTP message sent
++ or received by both Master
++ and/or Slave */
++};
++
++/**
++ * @ingroup IEEE_1588_InterfaceLayer
++ * @struct ioh1588TimeValue
++ * @brief Struct to hold 64 bit SystemTime and TimeStamp values
++ */
++struct ioh1588TimeValue { /* ioh1588TimeValue */
++ unsigned long timeValueLowWord; /**< Lower 32 bits of time value */
++ unsigned long timeValueHighWord; /**< Upper 32 bits of time value */
++};
++
++/**
++ * @ingroup IEEE_1588_InterfaceLayer
++ * @struct ioh1588Uuid
++ * @brief Struct to hold 48 bit UUID values captured in Sync or Delay_Req
++ * messages
++ */
++struct ioh1588Uuid{ /* ioh1588Uuid */
++ unsigned long uuidValueLowWord; /**< The lower 32 bits of UUID */
++ unsigned long uuidValueHighHalfword; /**< The upper 16 bits of UUID */
++};
++
++/**
++ * @ingroup IEEE_1588_InterfaceLayer
++ * @struct ioh1588PtpMsgData
++ * @brief Struct for data from the PTP message returned when TimeStamp
++ * available
++ */
++struct ioh1588PtpMsgData{ /* ioh1588PtpMsgData */
++ enum ioh1588PTPMsgType ptpMsgType; /**< PTP Messages type */
++ struct ioh1588TimeValue ptpTimeStamp; /**< 64 bit TimeStamp value from
++ PTP Message */
++ struct ioh1588Uuid ptpUuid; /**< 48 bit UUID value from the
++ PTP Message */
++ unsigned int ptpSequenceNumber; /**< 16 bit Sequence Number from PTP
++ Message */
++};
++
++/**
++ * @ingroup IEEE_1588_InterfaceLayer
++ * @struct ioh1588RxTxPollIoctl
++ * @brief Struct to pass PTP message data for ioctl call
++ */
++struct ioh1588RxTxPollIoctl{ /* ioh1588RxTxPollIoctl */
++ enum ioh1588PTPPort ptpPort; /**< IEEE 1588 PTP Communication Port */
++ struct ioh1588PtpMsgData ptpMsgData; /**< PTP message data */
++};
++
++/**
++ * @ingroup IEEE_1588_InterfaceLayer
++ * @struct ioh1588CANPollIoctl
++ * @brief Struct to pass CAN timestamp data for ioctl call
++ */
++struct ioh1588CANPollIoctl{ /* ioh1588CANPollIoctl */
++ enum ioh1588PTPPort ptpPort; /**< IEEE 1588 PTP Communication
++ Port */
++ struct ioh1588TimeValue ptpTimeStamp; /**< CAN PTP timestamp */
++};
++
++/**
++ * @ingroup IEEE_1588_InterfaceLayer
++ * @enum ioh1588AuxMode
++ * @brief Master or Slave Auxiliary Time Stamp (Snap Shot)
++ */
++enum ioh1588AuxMode{ /* ioh1588AuxMode */
++ IOH_1588_AUXMODE_MASTER, /**< Auxiliary Master Mode */
++ IOH_1588_AUXMODE_SLAVE, /**< Auxiliary Slave Mode */
++ IOH_1588_AUXMODE_INVALID /**< Invalid Auxiliary Mode */
++};
++
++/**
++ * @ingroup IEEE_1588_InterfaceLayer
++ * @struct ioh1588TimePollIoctl
++ * @brief Struct to pass timestamp data for ioctl call
++ */
++struct ioh1588TimePollIoctl { /* ioh1588TimePollIoctl */
++ unsigned long pollFlag; /**< time event */
++ struct ioh1588TimeValue timeVal; /**< timestamp value */
++ enum ioh1588AuxMode auxMode; /**< Master or Slave mode */
++};
++
++/**
++ * @ingroup IEEE_1588_InterfaceLayer
++ * @struct ioh1588Stats
++ * @brief Provides the number of times timestamps are locked for rx and tx
++ * PTP
++ * messages. The counters are reinitialized when the module is
++ * reset.
++ */
++struct ioh1588Stats { /* ioh1588Stats */
++ unsigned long rxMsgs; /**< Count of timestamps for received PTP Msgs */
++ unsigned long txMsgs; /**< Count of timestamps for transmitted PTP
++ Msgs */
++};
++
++/**
++ * @ingroup IEEE_1588_InterfaceLayer
++ * @struct ioh1588AuxTimeIoctl
++ * @brief Struct to pass aux time data for ioctl call
++ */
++struct ioh1588AuxTimeIoctl { /* ioh1588AuxTimeIoctl */
++ enum ioh1588AuxMode auxMode; /**< aux mode: master or slave */
++ struct ioh1588TimeValue auxTime; /**< aux time snapshot */
++};
++
++/**
++ * @ingroup IEEE_1588_InterfaceLayer
++ * @enum ioh1588PTPVersion
++ * @brief 1588 PTP version value that can be detected on communication
++ * port
++ */
++enum ioh1588PTPVersion { /* ioh1588PTPVersion */
++ IOH_1588PTP_VERSION_0, /**< support version 1 only */
++ IOH_1588PTP_VERSION_1, /**< support both version 1 and version
++ 2 */
++ IOH_1588PTP_VERSION_INVALID /**< Invalid version */
++};
++
++/**
++ * @ingroup IEEE_1588_InterfaceLayer
++ * @struct ioh1588VersionIoctl
++ * @brief Struct to pass timestamp data for ioctl call
++ */
++struct ioh1588VersionIoctl { /* ioh1588VersionIoctl */
++ enum ioh1588PTPPort ptpPort; /**< IEEE 1588 PTP Communication Port */
++ enum ioh1588PTPVersion ptpVersion; /**< version value */
++};
++
++/**
++ * @ingroup IEEE_1588_InterfaceLayer
++ * @enum ioh1588VersionIoctl
++ * @brief 1588 PTP operation mode value that can be detected on
++ * communication port
++ */
++enum ioh1588PTPOperationMode { /* ioh1588PTPOperationMode */
++ IOH_1588PTP_OP_MODE_SYNC_DELAYREQ_MSGS,
++ /**< timestamp version 1 SYNC and DELAYED_REQ only */
++ IOH_1588PTP_OP_MODE_V1_ALL_MSGS,
++ /**< timestamp version 1 all messages */
++ IOH_1588PTP_OP_MODE_V1_V2_EVENT_MSGS,
++ /**< timestamp version 1 and 2 event messages only */
++ IOH_1588PTP_OP_MODE_V1_V2_ALL_MSGS,
++ /**< timestamp version 1 and 2 all messages */
++ IOH_1588PTP_OP_MODE_INVALID /**< Invalid mode */
++};
++
++/**
++ * @ingroup IEEE_1588_InterfaceLayer
++ * @struct ioh1588OperationModeIoctl
++ * @brief Struct to pass timestamp data for ioctl call
++ */
++struct ioh1588OperationModeIoctl { /* ioh1588OperationModeIoctl */
++ enum ioh1588PTPPort ptpPort; /**< IEEE 1588 PTP Communication
++ Port */
++ enum ioh1588PTPOperationMode ptpOpMode; /**< IEEE 1588 operation mode */
++};
++
++/**
++ * @ingroup IEEE_1588_Global
++ * @enum ioh_status
++ * @brief The status as returned from the HAL
++ */
++enum ioh_status { /* ioh_status */
++ IOH_1588_SUCCESS, /**< operation successful */
++ IOH_1588_INVALIDPARAM, /**< parameter passed is invalid */
++ IOH_1588_NOTIMESTAMP, /**< no time stamp available when
++ polled */
++ IOH_1588_INTERRUPTMODEINUSE, /**< while operating in interrupt mode,
++ polling not permitted */
++ IOH_1588_FAILED, /**< Internal error in driver */
++ IOH_1588_UNSUPPORTED, /**< Implementation does not support
++ this feature */
++};
++
++/* IEEE 1588 registers to save and restore on suspend/resume */
++
++/** @ingroup IEEE_1588_Global
++ * @struct ioh_1588_regs_set
++ * @brief IEEE 1588 registers to save and restore on suspend/resume
++ */
++struct ioh_1588_regs_set {
++ unsigned long ts_control;
++ unsigned long ts_event; /* not saved, cleared on restore */
++ unsigned long ts_addend;
++ unsigned long ts_accum; /* not saved/restored */
++ unsigned long ts_test; /* not saved/restored */
++ unsigned long ts_compare;
++ unsigned long ts_syslo;
++ unsigned long ts_syshi;
++ unsigned long ts_tgtlo;
++ unsigned long ts_tgthi;
++ unsigned long ts_asmslo; /* not saved/restored */
++ unsigned long ts_asmshi; /* not saved/restored */
++ unsigned long ts_ammslo; /* not saved/restored */
++ unsigned long ts_ammshi; /* not saved/restored */
++
++ /* Ethernet */
++ unsigned long ts_cc;
++ unsigned long ts_ce; /* not saved, cleared on restore */
++ unsigned long ts_xslo; /* not saved/restored */
++ unsigned long ts_xshi; /* not saved/restored */
++ unsigned long ts_rslo; /* not saved/restored */
++ unsigned long ts_rshi; /* not saved/restored */
++ unsigned long ts_uuidlo; /* not saved/restored */
++ unsigned long ts_uuidhi; /* not saved/restored */
++
++ /* CAN */
++ unsigned long ts_cce; /* not saved, cleared on restore */
++ unsigned long ts_cxslo; /* not saved/restored */
++ unsigned long ts_cxshi; /* not saved/restored */
++
++ unsigned long ts_sel; /* Selector */
++ unsigned char ts_sti[6]; /* station addresses */
++};
++
++/* callback function prototypes */
++extern void target_time_callback(struct ioh1588TimeValue targetTime);
++extern void auxiliary_time_callback(enum ioh1588AuxMode aux_mode, \
++ struct ioh1588TimeValue auxTime);
++extern void pulse_per_sec_callback(unsigned long pps);
++#endif /* IOH_1588_H */
+diff -urN linux-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_pci.c topcliff-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_pci.c
+--- linux-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_pci.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_pci.c 2010-03-09 10:32:42.000000000 +0900
+@@ -0,0 +1,700 @@
++ /*!
++ * @file ioh_1588_pci.c
++ * @brief
++ * This file contains the definitions of IEEE_1588_PCILayer APIs
++ * @version 0.92
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * modified to support Intel IOH GE IEEE 1588 hardware
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ * derived from
++ * IEEE 1588 Time Synchronization Driver for Intels EP80579
++ * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ */
++
++#include "pch_1588_pci.h"
++#include "pch_1588_hal.h"
++#include "pch_debug.h"
++
++#ifdef CONFIG_PM
++static int ioh_1588_suspend(struct pci_dev *pdev, pm_message_t state);
++static int ioh_1588_resume(struct pci_dev *pdev);
++#endif
++static int ioh_1588_probe(struct pci_dev *pdev, const struct pci_device_id *id);
++static void ioh_1588_remove(struct pci_dev *pdev);
++
++/*! @ingroup IEEE_1588_PCILayerFacilitators
++ * @struct ioh_1588_pcidev_id
++ * @brief PCI device ids supported by this driver
++ * @remarks This structure is used to specify the Ids of the
++ * devices supported by the driver module during
++ * registering of the module as PCI driver.The values
++ * within the structure is maintained by the kernel
++ * subsystem to recognize the individual devices
++ * when they are attached to the system. Depending
++ * on this the corresponding device functionalities
++ * such as probe, remove, suspend,... are invoked.
++ *
++ * @note This structure contains the Vendor and device
++ * IDs of the device supported by the driver module.
++ *
++ * @see
++ * - ioh_1588_pcidev
++ * <hr>
++ */
++static const struct pci_device_id ioh_1588_pcidev_id[] = {
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_IOH_1588)},
++ {0},
++};
++
++/* Linux pci operations */
++/*! @ingroup IEEE_1588_PCILayerFacilitators
++* @struct ioh_1588_pcidev
++* @brief Store the references of PCI driver interfaces to kernel.
++* @remarks This strutcure is used to specify the driver specific
++* functionalities to the kernel subsystem. The kernel invokes
++* these functionalities depending on the supported device and
++* the events that occured.
++*
++* @note This structure is registered with the kernel via the call
++* pci_register_driver from @ref ioh_1588_init
++* @see
++* - ioh_1588_init
++* - ioh_1588_exit
++*<hr>
++*/
++
++static struct pci_driver ioh_1588_pcidev = {
++ .name = DRIVER_NAME,
++ .id_table = ioh_1588_pcidev_id,
++ .probe = ioh_1588_probe,
++ .remove = ioh_1588_remove,
++#ifdef CONFIG_PM
++ .suspend = ioh_1588_suspend,
++ .resume = ioh_1588_resume,
++#endif
++};
++
++/* instance of driver data structure */
++/*! @ingroup IEEE_1588_PCILayerFacilitators
++ * @var ioh_1588_dev
++ * @brief instance of driver data structure
++ * @see
++ * - ioh_1588_probe
++ * - ioh_1588_init
++ * - ioh_1588_remove
++ * - ioh_1588_suspend
++ * - ioh_1588_resume
++ * <hr>
++ */
++static struct ioh_1588_dev ioh_1588_dev;
++
++/*! @ingroup IEEE_1588_Global
++ * @var ioh_1588_devp
++ * @brief Pointer to driver data structure
++ * @see
++ * - ioc_handle_notify
++ * - ioc_handle_clr_notify
++ * - ioc_handle_reset
++ * - target_time_callback
++ * - auxiliary_time_callback
++ * - pulse_per_sec_callback
++ * - ioh_1588_open
++ * <hr>
++ */
++
++struct ioh_1588_dev *ioh_1588_devp = &ioh_1588_dev;
++
++/*! @ingroup IEEE_1588_PCILayerFacilitators
++ * @struct ioh_1588_params_
++ * @brief structure to hold the module parameters
++ * @see
++ * - ioh_1588_init
++ * - ioh_1588_probe
++ * <hr>
++ */
++static struct ioh_1588_params_ {
++ /* module parameters */
++ int eth_enable; /**< IEEE 1588 on ethernet interface
++ 0=Disabled 1=Enabled (default 1)*/
++ int can_enable; /**< IEEE 1588 on CAN interface
++ 0=Disabled 1=Enabled (default 0)*/
++ int major; /**< IEEE 1588 device major number to
++ use (default system assigned)*/
++ unsigned char station[STATION_ADDR_LEN]; /**< IEEE 1588 station address
++ to use - column separated hex values*/
++} ioh_1588_param = {
++1, 0, 0, "00:00:00:00:00:00"};
++
++module_param_named(eth_enable, ioh_1588_param.eth_enable, bool, 0444);
++MODULE_PARM_DESC(eth_enable,
++ "IEEE 1588 on ethernet interface 0=Disabled 1=Enabled \
++ (default 1)");
++
++module_param_named(can_enable, ioh_1588_param.can_enable, bool, 0444);
++MODULE_PARM_DESC(can_enable,
++ "IEEE 1588 on CAN interface 0=Disabled 1=Enabled (default 0)");
++
++module_param_named(major, ioh_1588_param.major, int, 0444);
++MODULE_PARM_DESC(major,
++ "IEEE 1588 device major number to use (default system \
++ assigned)");
++
++module_param_string(station, ioh_1588_param.station,
++ sizeof ioh_1588_param.station, 0444);
++MODULE_PARM_DESC(station,
++ "IEEE 1588 station address to use - column separated hex \
++ values");
++
++/*!@ingroup IEEE_1588_PCILayerAPI
++ * @fn int ioh_1588_probe(struct pci_dev *pdev,const struct pci_device_id *id)
++ * @brief
++ * This function is called right after insmod by the PCI core. Here we enable
++ * the
++ * device to make the device's resources live. We can then read PCI CFG space
++ * and init the device.
++ *
++ * @remarks
++ * The main tasks performed by this method are:
++ * - Allocate PCI resource required using
++ * request_mem_region API
++ * and map the memory to kernel virtual space using
++ * ioremap API.
++ * - Enable the PCI device using pci_enable_device
++ * API.
++ * - Initialize global variables and wait queues
++ * using
++ * init_waitqueue_head API
++ * - Set the memory address to be used by the HAL
++ * using ioh_1588_blpl_base_address_set API
++ * - Register the interrupt handler using request_irq
++ * API
++ * - Register the charecter driver using
++ * sregister_chrdev_region/
++ * alloc_chrdev_region APIs based on whether major
++ * number has been
++ * provided by the user or not.
++ * - Add the device to the system using the APIs
++ * cdev_init and
++ * cdev_add.
++ * - If any of the above calls fail, undo the steps
++ * performed before the failure and return
++ * appropraite error
++ * status.
++ * - Depending on the parameters passed during module
++ * load,
++ * enable/disable PTP clock synchronization control
++ * on GbE
++ * (using ioh_1588_eth_enable/ioh_1588_eth_disable
++ * API). /CAN
++ * (using ioh_1588_can_enable/ioh_1588_can_disable
++ * API) channel.
++ * - Set the Station address as specified in the
++ * module
++ * parameter using ioh_1588_set_station_address
++ * API
++ * -Note: For A0/A1 sample, test mode setting is enabled for
++ * the 64 bit System Time Register. This is a work around
++ * for
++ * the non continuous value in the 64 bit System Time
++ * Register
++ * consisting of High(32bit) / Low(32bit)
++ *
++ *
++ * @param pdev [INOUT] pci device structure
++ * @param id [IN] list of devices supported by this driver
++ *
++ * @return int
++ * - 0 Success
++ * - -ENODEV request_mem_region or ioremap or
++ * pci_enable_device or request_irq error
++ *
++ * @see
++ * - ioh_1588_pcidev
++ * <hr>
++ */
++
++static int __devinit
++ioh_1588_probe(struct pci_dev *pdev, const struct pci_device_id *id)
++{
++ int ret = 0;
++ dev_t devno;
++ int i;
++
++ UNUSED_ARG(id);
++
++ /* enable the 1588 pci device */
++ ret = pci_enable_device(pdev);
++ if (ret != 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_probe:could not enable the pci device\n");
++ goto exit_error;
++ }
++ IOH_DEBUG("ioh_1588_probe:pci_enable_device success\n");
++
++ ioh_1588_dev.mem_base = pci_resource_start(pdev, IO_MEM_BAR);
++
++ if (!ioh_1588_dev.mem_base) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_probe: could not locate IO memory address\n");
++ /*disable the pci device */
++ pci_disable_device(pdev);
++ ret = -ENODEV;
++ goto exit_error;
++ }
++ IOH_DEBUG("ioh_1588_probe:allocated IO memory address\n");
++
++ /* retreive the available length of the IO memory space */
++ ioh_1588_dev.mem_size = pci_resource_len(pdev, IO_MEM_BAR);
++
++ /* allocate the memory for the device registers */
++ if (!request_mem_region
++ (ioh_1588_dev.mem_base, ioh_1588_dev.mem_size, "1588_regs")) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_probe: could not allocate register memory \
++ space\n");
++ ioh_1588_dev.mem_base = 0;
++ /*disable the pci device */
++ pci_disable_device(pdev);
++ ret = -EBUSY;
++ goto exit_error;
++ }
++ IOH_DEBUG("ioh_1588_probe:allocated register memory space\n");
++
++ /* get the virtual address to the 1588 registers */
++ ioh_1588_dev.mem_virt =
++ ioremap(ioh_1588_dev.mem_base, ioh_1588_dev.mem_size);
++
++ if (!ioh_1588_dev.mem_virt) {
++ pci_disable_device(pdev);
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_probe: Could not get virtual address\n");
++ /*release memory acquired for device registers */
++ release_mem_region(ioh_1588_dev.mem_base,
++ ioh_1588_dev.mem_size);
++ ioh_1588_dev.mem_base = 0;
++ /*disable the pci device */
++ pci_disable_device(pdev);
++ ret = -ENOMEM;
++ goto exit_error;
++ }
++ IOH_DEBUG("ioh_1588_probe:obtained virtual address=%p\n",
++ ioh_1588_dev.mem_virt);
++
++ for (i = 0; i < NUM_EVENTS; i++) {
++ init_waitqueue_head(&ioh_1588_dev.notify_evt[i]);
++ ioh_1588_dev.event_flags[i] = 0;
++ }
++ IOH_DEBUG("ioh_1588_probe:initialized wait queue heads\n");
++
++ ret =
++ ioh_1588_blpl_base_address_set((unsigned int)ioh_1588_dev.mem_virt);
++ if (ret != IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_probe: ioh_1588_blpl_base_address_set \
++ failed\n");
++ /*unmap io */
++ iounmap(ioh_1588_dev.mem_virt);
++ ioh_1588_dev.mem_virt = 0;
++ /*release memory acquired for device */
++ release_mem_region(ioh_1588_dev.mem_base,
++ ioh_1588_dev.mem_size);
++ ioh_1588_dev.mem_base = 0;
++ /*disable device */
++ pci_disable_device(pdev);
++ goto exit_error;
++ }
++ IOH_DEBUG("ioh_1588_probe:set base address\n");
++
++ ret = request_irq(pdev->irq, &ioh_1588_isr, IRQF_SHARED, DRIVER_NAME,
++ &ioh_1588_dev);
++ if (ret != 0) {
++ IOH_LOG(KERN_ERR, "ioh_1588_probe: failed to get irq %d\n",
++ pdev->irq);
++ /*unmap io */
++ iounmap(ioh_1588_dev.mem_virt);
++ ioh_1588_dev.mem_virt = 0;
++ /*release memory acquired for device */
++ release_mem_region(ioh_1588_dev.mem_base,
++ ioh_1588_dev.mem_size);
++ ioh_1588_dev.mem_base = 0;
++ /*disable device */
++ pci_disable_device(pdev);
++ goto exit_error;
++ }
++ IOH_DEBUG("ioh_1588_probe:registered IRQ handler successfully\n");
++
++ /*register the module */
++ if (ioh_1588_param.major != 0) { /* user specified a major
++ number, use it */
++ IOH_DEBUG("ioh_1588_probe:using user specified major number\n");
++ devno = MKDEV(ioh_1588_param.major, 0);
++ ret = register_chrdev_region(devno, 1, DRIVER_NAME);
++ ioh_1588_dev.devno = devno; /* store it */
++ } else { /* request and reserve a device number */
++
++ IOH_DEBUG
++ ("ioh_1588_probe:dynamically allocating major number\n");
++ ret =
++ alloc_chrdev_region(&ioh_1588_dev.devno, 0, 1, DRIVER_NAME);
++ devno = MKDEV(MAJOR(ioh_1588_dev.devno), 0);
++ }
++ if (ret < 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_probe: Could not register module (major %d)\
++ \n",
++ ioh_1588_param.major);
++ /*free irq */
++ free_irq(pdev->irq, &ioh_1588_dev);
++ /*unmap io */
++ iounmap(ioh_1588_dev.mem_virt);
++ ioh_1588_dev.mem_virt = 0;
++ /*release memory acquired for device */
++ release_mem_region(ioh_1588_dev.mem_base,
++ ioh_1588_dev.mem_size);
++ ioh_1588_dev.mem_base = 0;
++ /*disable device */
++ pci_disable_device(pdev);
++ goto exit_error;
++ }
++ IOH_DEBUG("ioh_1588_probe:registered the module(major %d)\n",
++ ioh_1588_param.major);
++
++ /* init cdev struct for adding device to kernel */
++ cdev_init(&ioh_1588_dev.cdev, &ioh_1588_fops);
++ ioh_1588_dev.cdev.owner = THIS_MODULE;
++ ioh_1588_dev.cdev.ops = &ioh_1588_fops;
++
++ ret = cdev_add(&ioh_1588_dev.cdev, devno, 1);
++ if (ret != 0) {
++ IOH_LOG(KERN_ERR, "ioh_1588_probe: cdev_add failed\n");
++ /*free region allocated for char device */
++ unregister_chrdev_region(ioh_1588_dev.devno, 1);
++ /*free irq */
++ free_irq(pdev->irq, &ioh_1588_dev);
++ /*unmap io */
++ iounmap(ioh_1588_dev.mem_virt);
++ ioh_1588_dev.mem_virt = 0;
++ /*release memory acquired for device */
++ release_mem_region(ioh_1588_dev.mem_base,
++ ioh_1588_dev.mem_size);
++ ioh_1588_dev.mem_base = 0;
++ /*disable device */
++ pci_disable_device(pdev);
++ goto exit_error;
++ }
++ IOH_DEBUG("ioh_1588_probe: cdev_add successful\n");
++
++ ioh_1588_dev.initialized = 1;
++ /* indicate success */
++ ioh_1588_dev.irq = pdev->irq;
++
++ /*reset the ieee1588 h/w */
++ ioh_1588_reset();
++
++ if (ioh_1588_param.eth_enable != 0) { /* Enable by default */
++ IOH_DEBUG("ioh_1588_probe: invoking ioh_1588_eth_enable \
++ to enable ethernet\n");
++ (void)ioh_1588_eth_enable();
++ } else {
++ IOH_DEBUG("ioh_1588_probe: invoking ioh_1588_eth_disable \
++ to disable ethernet\n");
++ (void)ioh_1588_eth_disable();
++ }
++ if (ioh_1588_param.can_enable == 1) { /* Enable if requested */
++ IOH_DEBUG("ioh_1588_probe: invoking ioh_1588_can_enable \
++ to enable CAN\n");
++ (void)ioh_1588_can_enable();
++ } else {
++ IOH_DEBUG("ioh_1588_probe: invoking ioh_1588_can_disable \
++ to disable CAN\n");
++ (void)ioh_1588_can_disable();
++ }
++ if (strcmp(ioh_1588_param.station, "00:00:00:00:00:00") != 0) {
++ if (ioh_1588_set_station_address(ioh_1588_param.station) !=
++ IOH_1588_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_probe: Invalid station address \
++ parameter\n"
++ "Module loaded; But, station address not set \
++ correctly\n");
++ }
++ }
++ IOH_DEBUG("ioh_1588_probe: probe succeeded\n");
++
++ return 0;
++
++exit_error:
++
++ IOH_LOG(KERN_ERR, "ioh_1588_probe: probe failed\n");
++ return ret;
++}
++
++/*! @ingroup IEEE_1588_PCILayerAPI
++ * @fn void ioh_1588_remove(struct pci_dev *pdev)
++ * @brief
++ * This function is called when the pci driver is being unloaded from the
++ * kernel.
++ * @remarks
++ * The main tasks performed by this method are:
++ * - Free the interrupt line using free_irq API.
++ * - Disable the PCI device using pci_disable_device API.
++ * - Unmap the PCI memory and release the same using iounmap API.
++ * - Delete the char device from the system using cdev_del API.
++ * - Unregister the driver using unregister_chrdev_region API.
++ *
++ * @param pdev [INOUT] pci device structure
++ *
++ * @return None
++ *@see
++ - ioh_1588_pcidev
++ *
++ * <hr>
++ */
++
++static void __devexit ioh_1588_remove(struct pci_dev *pdev)
++{
++ /* disable the interrupts on the 1588 hardware */
++ IOH_DEBUG("ioh_1588_remove: disabling interrupts by \
++ invoking ioh_1588_disable_interrupts\n");
++ (void)ioh_1588_disable_interrupts();
++
++ /* free the interrupt */
++ if (pdev->irq != 0) {
++ free_irq(pdev->irq, &ioh_1588_dev);
++ IOH_DEBUG("ioh_1588_remove: unregistered IRQ handler\n");
++ }
++
++ /* unmap the virtual IO memory space */
++ if (ioh_1588_dev.mem_virt != 0) {
++ iounmap(ioh_1588_dev.mem_virt);
++ IOH_DEBUG
++ ("ioh_1588_remove: unmaped the virtual IO memory space\n");
++ }
++
++ /* release the reserved IO memory space */
++ if (ioh_1588_dev.mem_base != 0) {
++ release_mem_region(ioh_1588_dev.mem_base,
++ ioh_1588_dev.mem_size);
++ IOH_DEBUG
++ ("ioh_1588_remove: released the reserved IO memory \
++ space\n");
++ }
++
++ /* remove cdev struct from system */
++ cdev_del(&ioh_1588_dev.cdev);
++ IOH_DEBUG("ioh_1588_remove: removed the cdev from system\n");
++
++ unregister_chrdev_region(ioh_1588_dev.devno, 1);
++ IOH_DEBUG("ioh_1588_remove:unregisterd the module\n");
++
++ /*disable the device */
++ pci_disable_device(pdev);
++ IOH_DEBUG("ioh_1588_remove:disabled the device\n");
++
++ IOH_LOG(KERN_ERR, "ioh_1588_remove: complete\n");
++}
++
++#ifdef CONFIG_PM
++/*! @ingroup IEEE_1588_PCILayerAPI
++ * @fn int ioh_1588_suspend(struct pci_dev *pdev,pm_message_t state)
++ * @brief
++ * This function is called to suspend a device before being put into a
++ * low power state.
++ *
++ * @remarks
++ * The main tasks performed by this method are:
++ * - Save PCI configuration space by invoking pci_save_state API.
++ * - If the above step fails, return the error from pci_save_state
++ * API.
++ * - Disable the PCI device using the pci_disable_device API.
++ * - Put the device to new power state using pci_set_power_state
++ * API.
++ *
++ *
++ * @param pdev [INOUT] pci device structure
++ * @param state [IN] suspend state
++ *
++ * @return int
++ * - 0 on success
++ * - -ENOMEM pci_save_state fails
++ *
++ * @see
++ * - ioh_1588_pcidev
++ * <hr>
++ * */
++static int ioh_1588_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++
++ IOH_DEBUG("ioh_1588_suspend: disabling interrupts by \
++ invoking ioh_1588_disable_interrupts\n");
++ (void)ioh_1588_disable_interrupts();
++
++ ioh_1588_dev.suspend = 1;
++
++ IOH_DEBUG("ioh_1588_suspend: saving register values by \
++ invoking ioh_1588_save_state\n");
++ ioh_1588_save_state();
++
++ pci_disable_device(pdev);
++ IOH_DEBUG("ioh_1588_suspend: disabled the device\n");
++
++ (void)pci_enable_wake(pdev, PCI_D3hot, 0);
++ IOH_DEBUG("ioh_1588_suspend: disabled PM notifications\n");
++
++ if (pci_save_state(pdev) != 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_suspend: could not save PCI config state\n");
++ return -ENOMEM;
++ }
++ IOH_DEBUG("ioh_1588_suspend: saved state\n");
++
++ pci_set_power_state(pdev, pci_choose_state(pdev, state));
++
++ IOH_DEBUG("ioh_1588_suspend: returning success\n");
++ return 0;
++}
++
++/*! @ingroup IEEE_1588_PCILayerAPI
++ * @fn int ioh_1588_resume(struct pci_dev *pdev)
++ * @brief
++ * This function is called to resume a device after being put into a
++ * low power state.
++ * @remarks
++ * The main tasks performed by this method are:
++ * - Restore the power state of the device using
++ * pci_set_power_state
++ * API.
++ * - Restore the PCI configuration space using pci_restore_state
++ * API.
++ * - Enable the PCI device using pci_enable_device API.
++ * - If pci_enable_device fails, return the error status;
++ * else return 0.
++ *
++ *
++ * @param pdev [INOUT] pci device structure
++ *
++ * @return int
++ * - 0 on success
++ * - -EIO pci_enable_device fails
++ * - -EINVAL pci_enable_device fails
++ * @see
++ * - ioh_1588_pcidev
++ * <hr>
++ */
++
++static int ioh_1588_resume(struct pci_dev *pdev)
++{
++ int ret;
++
++ pci_set_power_state(pdev, PCI_D0);
++
++ ret = pci_restore_state(pdev);
++
++ if (ret != 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_resume: pci_restore_state failed\n");
++ return ret;
++ }
++ IOH_DEBUG("ioh_1588_resume: restored state\n");
++
++ ret = pci_enable_device(pdev);
++
++ if (ret) {
++ IOH_LOG(KERN_ERR,
++ "ioh_1588_resume: pci_enable_device failed\n");
++ return ret;
++ }
++
++ IOH_DEBUG("ioh_1588_resume: enabled device\n");
++
++ (void)pci_enable_wake(pdev, PCI_D3hot, 0);
++ IOH_DEBUG("ioh_1588_resume: disabled PM notifications\n");
++
++ IOH_DEBUG("ioh_1588_resume: restoring register values by \
++ invoking ioh_1588_restore_state\n");
++ ioh_1588_restore_state();
++
++ ioh_1588_dev.suspend = 0;
++
++ IOH_DEBUG("ioh_1588_resume: returning success\n");
++ return 0;
++}
++#endif
++
++/*! @ingroup IEEE_1588_InterfaceLayerAPI
++ * @fn void ioh_1588_exit(void)
++ * @brief Un-loads the IEEE 1588 PCI driver.
++ * @remarks This function is invoked when the driver is
++ * unloaded. The main task performed by this
++ * function is un-registering the module as
++ * PCI driver.
++ * @param None
++ * @return None
++ * <hr>
++ */
++static void __exit ioh_1588_exit(void)
++{
++
++ pci_unregister_driver(&ioh_1588_pcidev);
++
++ IOH_DEBUG("ioh_1588_exit: Driver unloaded\n");
++}
++
++/*! @ingroup IEEE_1588_InterfaceLayerAPI
++ * @fn int ioh_1588_init(void)
++ * @brief Initializes the driver.
++ * @remarks This function is the entry point for the driver.
++ * The main tasks performed by this method are:
++ * - Register IEEE 1588 driver as a PCI driver by calling
++ * pci_register_driver
++ *
++ * @param None
++ *
++ * @return int
++ * - 0 on success
++ * - -EBUSY register_chrdev_region fails
++ * - -ENOMEM cdev_add/register_chrdev_region/
++ * pci_register_driver fails
++ * - -EEXIST pci_register_driver fails
++ * - -EINVAL pci_register_driver fails
++ * <hr>
++ */
++static int __init ioh_1588_init(void)
++{
++ int ret;
++
++ (void)memset((void *)&ioh_1588_dev, 0, sizeof ioh_1588_dev);
++
++ /* register the driver with the pci core */
++ ret = pci_register_driver(&ioh_1588_pcidev);
++
++ if (ret)
++ IOH_LOG(KERN_ERR, "ioh_1588_init: pci_register failed\n");
++
++ IOH_DEBUG("ioh_1588_init: pci_register success\n");
++ return ret;
++}
++
++module_init(ioh_1588_init);
++module_exit(ioh_1588_exit);
++MODULE_LICENSE("GPL");
++MODULE_DEVICE_TABLE(pci, ioh_1588_pcidev_id);
+diff -urN linux-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_pci.h topcliff-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_pci.h
+--- linux-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_pci.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/pch_ieee1588/pch_1588_pci.h 2010-03-09 05:27:48.000000000 +0900
+@@ -0,0 +1,122 @@
++ /*!
++ * @file ioh_1588_pci.h
++ * @brief
++ * This file lists the declarations for IEEE_1588_PCILayer APIs.
++ * @version 0.92
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * modified to support Intel IOH GE IEEE 1588 hardware
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ * derived from
++ * IEEE 1588 Time Synchronization Driver for Intel EP80579
++ * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
++ * All rights reserved.
++ *
++ */
++
++#ifndef IOH_1588_PCI_H
++#define IOH_1588_PCI_H
++
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/poll.h>
++#include <linux/init.h>
++#include <linux/fs.h>
++#include <linux/fcntl.h>
++#include <linux/interrupt.h>
++#include <linux/uaccess.h>
++#include <linux/cdev.h>
++#include <linux/pci.h>
++
++/*! @ingroup IEEE_1588_Global
++ * @def DRIVER_NAME
++ * @brief Macro representing the name of this driver
++ * <hr>
++ */
++#define DRIVER_NAME "ioh_ieee1588"
++
++/*! @ingroup IEEE_1588_Global
++ * @def STATION_ADDR_LEN
++ * @brief Macro representing the station address length
++ * <hr>
++ */
++#define STATION_ADDR_LEN 20
++
++/*! @ingroup IEEE_1588_PCILayer
++ @def PCI_VENDOR_ID_IOH
++ @brief Outlines the PCI Vendor ID for IOH board.
++ */
++
++/*! @ingroup IEEE_1588_PCILayer
++ @def PCI_DEVICE_ID_IOH_1588
++ @brief Outlines the PCI Device ID for IEEE 1588 device.
++ */
++#define PCI_DEVICE_ID_IOH_1588 0x8819
++
++/*! @ingroup IEEE_1588_PCILayer
++ * @def IO_MEM_BAR
++ * @brief Macro representing IO memory BAR
++ * <hr>
++ */
++#define IO_MEM_BAR 1
++
++/* enumeration of events used */
++
++/*! @ingroup IEEE_1588_Global
++ * @enum notify_event
++ * @brief enumeration of events used
++ * <hr>
++ */
++enum _notify_event {
++ TARG_TIME_EVENT_NUM,
++ AUX_TIME_EVENT_NUM,
++ PPS_EVENT_NUM,
++ NUM_EVENTS
++};
++
++/* private driver data */
++/*! @ingroup IEEE_1588_Global
++ * @struct ioh_1588_dev_t
++ * @brief Driver private data
++ * <hr>
++ */
++struct ioh_1588_dev {
++ dev_t devno; /**< The device (major) number. */
++ struct cdev cdev; /**< The cdev structure instance. */
++ void *mem_virt; /**< The virtual memory base address.*/
++ unsigned int mem_base; /**< The physical memory base address.*/
++ unsigned int mem_size; /**< The memory size. */
++ unsigned int irq; /**< The IRQ line of the device.*/
++ unsigned int suspend:1; /**< The suspend flag. */
++ unsigned int initialized:1; /**< The initialized flag. */
++ /* event variables */
++ unsigned int event_flags[NUM_EVENTS]; /**< The event variables. */
++ wait_queue_head_t notify_evt[NUM_EVENTS]; /**< The notify event
++ variable.*/
++};
++
++extern struct ioh_1588_dev *ioh_1588_devp;
++extern const struct file_operations ioh_1588_fops;
++irqreturn_t ioh_1588_isr(int irq, void *p_data);
++extern void ioh_1588_save_state(void);
++extern void ioh_1588_restore_state(void);
++
++#endif /* IOH_1588_PCI_H */
+diff -urN linux-2.6.33-rc3/drivers/char/pch_ieee1588/pch_common.h topcliff-2.6.33-rc3/drivers/char/pch_ieee1588/pch_common.h
+--- linux-2.6.33-rc3/drivers/char/pch_ieee1588/pch_common.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/pch_ieee1588/pch_common.h 2010-03-09 05:56:11.000000000 +0900
+@@ -0,0 +1,146 @@
++/*!
++ * @file ioh_common.h
++ * @brief Provides the macro definitions used by all files.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++#ifndef __IOH_COMMON_H__
++#define __IOH_COMMON_H__
++
++/*! @ingroup Global
++@def IOH_WRITE8
++@brief Macro for writing 8 bit data to an io/mem address
++*/
++#define IOH_WRITE8(val, addr) iowrite8((val), (void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_LOG
++@brief Macro for writing 16 bit data to an io/mem address
++*/
++#define IOH_WRITE16(val, addr) iowrite16((val), (void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_LOG
++@brief Macro for writing 32 bit data to an io/mem address
++*/
++#define IOH_WRITE32(val, addr) iowrite32((val), (void __iomem *)(addr))
++
++/*! @ingroup Global
++@def IOH_READ8
++@brief Macro for reading 8 bit data from an io/mem address
++*/
++#define IOH_READ8(addr) ioread8((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_READ16
++@brief Macro for reading 16 bit data from an io/mem address
++*/
++#define IOH_READ16(addr) ioread16((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_READ32
++@brief Macro for reading 32 bit data from an io/mem address
++*/
++#define IOH_READ32(addr) ioread32((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_WRITE32_F
++@brief Macro for writing 32 bit data to an io/mem address
++*/
++#define IOH_WRITE32_F(val, addr) do \
++ { IOH_WRITE32((val), (addr)); (void)IOH_READ32((addr)); } while (0);
++
++/*! @ingroup Global
++@def IOH_WRITE_BYTE
++@brief Macro for writing 1 byte data to an io/mem address
++*/
++#define IOH_WRITE_BYTE IOH_WRITE8
++/*! @ingroup Global
++@def IOH_WRITE_WORD
++@brief Macro for writing 1 word data to an io/mem address
++*/
++#define IOH_WRITE_WORD IOH_WRITE16
++/*! @ingroup Global
++@def IOH_WRITE_LONG
++@brief Macro for writing long data to an io/mem address
++*/
++#define IOH_WRITE_LONG IOH_WRITE32
++
++/*! @ingroup Global
++@def IOH_READ_BYTE
++@brief Macro for reading 1 byte data from an io/mem address
++*/
++#define IOH_READ_BYTE IOH_READ8
++/*! @ingroup Global
++@def IOH_READ_WORD
++@brief Macro for reading 1 word data from an io/mem address
++*/
++#define IOH_READ_WORD IOH_READ16
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief Macro for reading long data from an io/mem address
++*/
++#define IOH_READ_LONG IOH_READ32
++
++/* Bit Manipulation Macros */
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bit(mask) at the
++ specified address
++*/
++#define IOH_SET_ADDR_BIT(addr, bitmask) IOH_WRITE_LONG((IOH_READ_LONG(addr) |\
++ (bitmask)), (addr))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bit(mask) at the specified address
++*/
++#define IOH_CLR_ADDR_BIT(addr, bitmask) IOH_WRITE_LONG((IOH_READ_LONG(addr) &\
++ ~(bitmask)), (addr))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bitmask for a variable
++*/
++#define IOH_SET_BITMSK(var, bitmask) ((var) |= (bitmask))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bitmask for a variable
++*/
++#define IOH_CLR_BITMSK(var, bitmask) ((var) &= (~(bitmask)))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bit for a variable
++*/
++#define IOH_SET_BIT(var, bit) ((var) |= (1<<(bit)))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bit for a variable
++*/
++#define IOH_CLR_BIT(var, bit) ((var) &= ~(1<<(bit)))
++
++#endif
+diff -urN linux-2.6.33-rc3/drivers/char/pch_ieee1588/pch_debug.h topcliff-2.6.33-rc3/drivers/char/pch_ieee1588/pch_debug.h
+--- linux-2.6.33-rc3/drivers/char/pch_ieee1588/pch_debug.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/pch_ieee1588/pch_debug.h 2010-03-09 05:37:47.000000000 +0900
+@@ -0,0 +1,60 @@
++/*!
++ * @file ioh_debug.h
++ * @brief Provides the macro definitions used for debugging.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++#ifndef __IOH_DEBUG_H__
++#define __IOH_DEBUG_H__
++
++#ifdef MODULE
++#define IOH_LOG(level, fmt, args...) printk(level "%s:" fmt "\n",\
++ THIS_MODULE->name, ##args)
++#else
++#define IOH_LOG(level, fmt, args...) printk(level "%s:" fmt "\n" ,\
++ __FILE__, ##args)
++#endif
++
++
++#ifdef DEBUG
++ #define IOH_DEBUG(fmt, args...) IOH_LOG(KERN_DEBUG, fmt, ##args)
++#else
++ #define IOH_DEBUG(fmt, args...)
++#endif
++
++#ifdef IOH_TRACE_ENABLED
++ #define IOH_TRACE IOH_DEBUG
++#else
++ #define IOH_TRACE(fmt, args...)
++#endif
++
++#define IOH_TRACE_ENTER IOH_TRACE("Enter %s", __func__)
++#define IOH_TRACE_EXIT IOH_TRACE("Exit %s", __func__)
++
++
++#endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-pcieqos.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-pcieqos.patch
new file mode 100644
index 0000000..14b9e53
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-pcieqos.patch
@@ -0,0 +1,2083 @@
+
+
+From: Masayuki Ohtake <masa-korg@dsn.okisemi.com>
+Subject: OKI Semiconductor PCH PCIEQOS driver
+
+This driver implements PCH PCIEQOS controls for PCH.
+
+Signed-off-by: Masayuki Ohtake <masa-korg@dsn.okisemi.com>
+Acked-by: Wang Qi <qi.wang@intel.com>
+
+---
+ drivers/char/Kconfig | 7++
+ drivers/char/Makefile | 1
+ drivers/char/pch_pcieqos/Makefile | 9
+ drivers/char/pch_pcieqos/pch_common.h | 146
+ driers/char/pch_pcieqos/pch_debug.h | 60
+ drivers/char/pch_pcieqos/pch_pcieqos.c | 392
+ drivers/char/pch_pcieqos/pch_pcieqos.h | 196
+ drivers/char/pch_pcieqos/pch_pcieqos_hal.c | 550
+ drivers/char/pch_pcieqos/pch_pcieqos_hal.h | 125
+ drivers/char/pch_pcieqos/pch_pcieqos_pci.c | 523
++++++++++++++++++++++++++++++++ 10 files changed, 2009 insertions(+)
+diff -urN linux-2.6.33-rc3/drivers/char/Kconfig topcliff-2.6.33-rc3/drivers/char/Kconfig
+--- linux-2.6.33-rc3/drivers/char/Kconfig 2010-03-12 14:46:34.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/Kconfig 2010-03-09 10:14:52.000000000 +0900
+@@ -11,6 +11,13 @@
+ If you say yes to this option, support will be included for the
+ PCH IEEE1588 Host controller.
+
++config PCH_PCIEQOS
++ tristate "PCH PCIEQOS"
++ depends on PCI
++ help
++ If you say yes to this option, support will be included for the
++ PCH PCIEQOS Host controller.
++
+ config VT
+ bool "Virtual terminal" if EMBEDDED
+ depends on !S390
+diff -urN linux-2.6.33-rc3/drivers/char/Makefile topcliff-2.6.33-rc3/drivers/char/Makefile
+--- linux-2.6.33-rc3/drivers/char/Makefile 2010-03-12 14:46:34.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/Makefile 2010-03-05 22:57:39.000000000 +0900
+@@ -112,6 +112,7 @@
+ js-rtc-y = rtc.o
+
+ obj-$(CONFIG_PCH_IEEE1588) += pch_ieee1588/
++obj-$(CONFIG_PCH_PCIEQOS) += pch_pcieqos/
+
+ # Files generated that shall be removed upon make clean
+ clean-files := consolemap_deftbl.c defkeymap.c
+diff -urN linux-2.6.33-rc3/drivers/char/pch_pcieqos/Makefile topcliff-2.6.33-rc3/drivers/char/pch_pcieqos/Makefile
+--- linux-2.6.33-rc3/drivers/char/pch_pcieqos/Makefile 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/pch_pcieqos/Makefile 2010-03-09 09:15:41.000000000 +0900
+@@ -0,0 +1,9 @@
++ifeq ($(CONFIG_PCIEQOS_DEBUG),y)
++EXTRA_CFLAGS += -DDEBUG
++endif
++
++obj-$(CONFIG_PCH_PCIEQOS) += pch_pcieqos_drv.o
++#to set CAN clock to 50Mhz
++EXTRA_CFLAGS+=-DIOH_CAN_PCLK_50MHZ
++
++pch_pcieqos_drv-objs := pch_pcieqos.o pch_pcieqos_pci.o pch_pcieqos_hal.o
+diff -urN linux-2.6.33-rc3/drivers/char/pch_pcieqos/pch_common.h topcliff-2.6.33-rc3/drivers/char/pch_pcieqos/pch_common.h
+--- linux-2.6.33-rc3/drivers/char/pch_pcieqos/pch_common.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/pch_pcieqos/pch_common.h 2010-03-09 05:56:11.000000000 +0900
+@@ -0,0 +1,146 @@
++/*!
++ * @file ioh_common.h
++ * @brief Provides the macro definitions used by all files.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++#ifndef __IOH_COMMON_H__
++#define __IOH_COMMON_H__
++
++/*! @ingroup Global
++@def IOH_WRITE8
++@brief Macro for writing 8 bit data to an io/mem address
++*/
++#define IOH_WRITE8(val, addr) iowrite8((val), (void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_LOG
++@brief Macro for writing 16 bit data to an io/mem address
++*/
++#define IOH_WRITE16(val, addr) iowrite16((val), (void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_LOG
++@brief Macro for writing 32 bit data to an io/mem address
++*/
++#define IOH_WRITE32(val, addr) iowrite32((val), (void __iomem *)(addr))
++
++/*! @ingroup Global
++@def IOH_READ8
++@brief Macro for reading 8 bit data from an io/mem address
++*/
++#define IOH_READ8(addr) ioread8((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_READ16
++@brief Macro for reading 16 bit data from an io/mem address
++*/
++#define IOH_READ16(addr) ioread16((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_READ32
++@brief Macro for reading 32 bit data from an io/mem address
++*/
++#define IOH_READ32(addr) ioread32((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_WRITE32_F
++@brief Macro for writing 32 bit data to an io/mem address
++*/
++#define IOH_WRITE32_F(val, addr) do \
++ { IOH_WRITE32((val), (addr)); (void)IOH_READ32((addr)); } while (0);
++
++/*! @ingroup Global
++@def IOH_WRITE_BYTE
++@brief Macro for writing 1 byte data to an io/mem address
++*/
++#define IOH_WRITE_BYTE IOH_WRITE8
++/*! @ingroup Global
++@def IOH_WRITE_WORD
++@brief Macro for writing 1 word data to an io/mem address
++*/
++#define IOH_WRITE_WORD IOH_WRITE16
++/*! @ingroup Global
++@def IOH_WRITE_LONG
++@brief Macro for writing long data to an io/mem address
++*/
++#define IOH_WRITE_LONG IOH_WRITE32
++
++/*! @ingroup Global
++@def IOH_READ_BYTE
++@brief Macro for reading 1 byte data from an io/mem address
++*/
++#define IOH_READ_BYTE IOH_READ8
++/*! @ingroup Global
++@def IOH_READ_WORD
++@brief Macro for reading 1 word data from an io/mem address
++*/
++#define IOH_READ_WORD IOH_READ16
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief Macro for reading long data from an io/mem address
++*/
++#define IOH_READ_LONG IOH_READ32
++
++/* Bit Manipulation Macros */
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bit(mask) at the
++ specified address
++*/
++#define IOH_SET_ADDR_BIT(addr, bitmask) IOH_WRITE_LONG((IOH_READ_LONG(addr) |\
++ (bitmask)), (addr))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bit(mask) at the specified address
++*/
++#define IOH_CLR_ADDR_BIT(addr, bitmask) IOH_WRITE_LONG((IOH_READ_LONG(addr) &\
++ ~(bitmask)), (addr))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bitmask for a variable
++*/
++#define IOH_SET_BITMSK(var, bitmask) ((var) |= (bitmask))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bitmask for a variable
++*/
++#define IOH_CLR_BITMSK(var, bitmask) ((var) &= (~(bitmask)))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bit for a variable
++*/
++#define IOH_SET_BIT(var, bit) ((var) |= (1<<(bit)))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bit for a variable
++*/
++#define IOH_CLR_BIT(var, bit) ((var) &= ~(1<<(bit)))
++
++#endif
+diff -urN linux-2.6.33-rc3/drivers/char/pch_pcieqos/pch_debug.h topcliff-2.6.33-rc3/drivers/char/pch_pcieqos/pch_debug.h
+--- linux-2.6.33-rc3/drivers/char/pch_pcieqos/pch_debug.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/pch_pcieqos/pch_debug.h 2010-03-09 05:37:47.000000000 +0900
+@@ -0,0 +1,60 @@
++/*!
++ * @file ioh_debug.h
++ * @brief Provides the macro definitions used for debugging.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++#ifndef __IOH_DEBUG_H__
++#define __IOH_DEBUG_H__
++
++#ifdef MODULE
++#define IOH_LOG(level, fmt, args...) printk(level "%s:" fmt "\n",\
++ THIS_MODULE->name, ##args)
++#else
++#define IOH_LOG(level, fmt, args...) printk(level "%s:" fmt "\n" ,\
++ __FILE__, ##args)
++#endif
++
++
++#ifdef DEBUG
++ #define IOH_DEBUG(fmt, args...) IOH_LOG(KERN_DEBUG, fmt, ##args)
++#else
++ #define IOH_DEBUG(fmt, args...)
++#endif
++
++#ifdef IOH_TRACE_ENABLED
++ #define IOH_TRACE IOH_DEBUG
++#else
++ #define IOH_TRACE(fmt, args...)
++#endif
++
++#define IOH_TRACE_ENTER IOH_TRACE("Enter %s", __func__)
++#define IOH_TRACE_EXIT IOH_TRACE("Exit %s", __func__)
++
++
++#endif
+diff -urN linux-2.6.33-rc3/drivers/char/pch_pcieqos/pch_pcieqos.c topcliff-2.6.33-rc3/drivers/char/pch_pcieqos/pch_pcieqos.c
+--- linux-2.6.33-rc3/drivers/char/pch_pcieqos/pch_pcieqos.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/pch_pcieqos/pch_pcieqos.c 2010-03-12 14:16:44.000000000 +0900
+@@ -0,0 +1,392 @@
++/*!
++ * @file ioh_pcieqos.c
++ * @brief Provides all the implementation of the interfaces pertaining to the PCIEQOS module.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 06/20/2009
++ * modified:
++ *
++ */
++
++/* includes */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/uaccess.h>
++#include <linux/string.h>
++
++#include "pch_common.h"
++#include "pch_debug.h"
++#include "pch_pcieqos.h"
++#include "pch_pcieqos_hal.h"
++
++#define MODULE_NAME "pch_pcieqos"
++
++/* global variables */
++s32 ioh_pcieqos_opencount; /* check whether opened or not */
++
++DEFINE_SPINLOCK(ioh_pcieqos_lock); /* for spin lock */
++
++/**
++ * file_operations structure initialization
++ */
++const struct file_operations ioh_pcieqos_fops = {
++ .owner = THIS_MODULE,
++ .open = ioh_pcieqos_open,
++ .release = ioh_pcieqos_release,
++ .ioctl = ioh_pcieqos_ioctl,
++};
++
++/*function implementations*/
++
++/*! @ingroup PCIEQOS_InterfaceLayerAPI
++ @fn int ioh_pcieqos_open( struct inode *inode,struct file *file)
++ @remarks Implements the Initializing and opening of the pcieqos module.
++ @param inode [@ref INOUT] Contains the reference of the inode
++ structure
++ @param file [@ref INOUT] Contains the reference of the file structure
++ @retval returnvalue [@ref OUT] contains the result for the concerned
++ attempt.
++ The result would generally comprise of success code
++ or failure code. The failure code will indicate reason for
++ failure.
++ @see
++ EBUSY
++ */
++int ioh_pcieqos_open(struct inode *inode, struct file *file)
++{
++ int ret;
++
++ spin_lock(&ioh_pcieqos_lock);
++ IOH_DEBUG("ioh_pcieqos_open : open count value = %d",
++ ioh_pcieqos_opencount);
++ if (ioh_pcieqos_opencount) {
++ IOH_LOG(KERN_ERR,
++ "ioh_pcieqos_open : device already opened\n");
++ ret = -EBUSY;
++ } else {
++ ioh_pcieqos_opencount++;
++ ret = IOH_PCIEQOS_SUCCESS;
++ }
++ spin_unlock(&ioh_pcieqos_lock);
++
++ IOH_DEBUG("ioh_pcieqos_open returns=%d\n", ret);
++ return ret;
++}
++
++/*! @ingroup PCIEQOS_InterfaceLayerAPI
++ @fn int ioh_pcieqos_release(struct inode *inode,struct file *file)
++ @remarks Implements the release functionality of the pcieqos module.
++ @param inode [@ref INOUT] Contains the reference of the inode
++ structure
++ @param file [@ref INOUT] Contains the reference of the file structure
++ @retval returnvalue [@ref OUT] contains the result for the concerned
++ attempt.
++ The result would generally comprise of success code
++ or failure code. The failure code will indicate reason for
++ failure.
++ @see
++ SUCCESS
++ */
++int ioh_pcieqos_release(struct inode *inode, struct file *file)
++{
++ spin_lock(&ioh_pcieqos_lock);
++
++ if (ioh_pcieqos_opencount > 0)
++ ioh_pcieqos_opencount--;
++ spin_unlock(&ioh_pcieqos_lock);
++
++ IOH_DEBUG("ioh_pcieqos_release : ioh_pcieqos_opencount =%d\n",
++ ioh_pcieqos_opencount);
++
++ IOH_DEBUG("ioh_pcieqos_release returning=%d\n", IOH_PCIEQOS_SUCCESS);
++ return IOH_PCIEQOS_SUCCESS;
++}
++
++/*! @ingroup PCIEQOS_InterfaceLayerAPI
++ @fn int ioh_pcieqos_ioctl(struct inode * inode,struct file * file,
++ unsigned int cmd,unsigned long arg)
++ @remarks Implements the various ioctl functionalities of the pcieqos module.
++ @param inode [@ref INOUT] Contains the reference of the inode
++ structure
++ @param file [@ref INOUT] Contains the reference of the file structure
++ @param cmd [@ref IN] Contains the command value
++ @param arg [@ref IN] Contains the command argument value
++ @retval returnvalue [@ref OUT] contains the result for the concerned
++ attempt.
++ The result would generally comprise of success code
++ or failure code. The failure code will indicate reason for
++ failure.
++ @see
++ EINVAL
++ EFAULT
++ */
++int ioh_pcieqos_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++
++ int ret_value = IOH_PCIEQOS_SUCCESS;
++ struct ioh_pcieqos_reqt *p_ioh_pcieqos_reqt;
++ unsigned long addr_offset;
++ unsigned long data;
++ unsigned long mask;
++
++ do {
++ if (ioh_pcieqos_suspended == true) {
++ IOH_LOG(KERN_ERR,
++ "ioh_pcieqos_ioctl : suspend initiated\
++ returning =%d\n",
++ IOH_PCIEQOS_FAIL);
++ ret_value = IOH_PCIEQOS_FAIL;
++ break;
++ }
++
++ p_ioh_pcieqos_reqt = (struct ioh_pcieqos_reqt *)arg;
++ ret_value =
++ copy_from_user((void *)&addr_offset,
++ (void *)&p_ioh_pcieqos_reqt->addr_offset,
++ sizeof(addr_offset));
++ if (ret_value) {
++ IOH_LOG(KERN_ERR,
++ "ioh_pcieqos_ioctl : copy_from_user fail\
++ returning =%d\n",
++ -EFAULT);
++ ret_value = -EFAULT;
++ break;
++ }
++ IOH_DEBUG("ioh_pcieqos_ioctl : copy_from_user returns =%d\n",
++ ret_value);
++
++ switch (cmd) {
++ case IOCTL_PCIEQOS_READ_REG:
++ {
++
++ ioh_pcieqos_read_reg(addr_offset, &data);
++ IOH_DEBUG
++ ("ioh_pcieqos_ioctl : Invoked\
++ ioh_pcieqos_read_reg successfully\n");
++
++ ret_value =
++ copy_to_user((void *)&p_ioh_pcieqos_reqt->
++ data, (void *)&data,
++ sizeof(data));
++ if (ret_value) {
++ IOH_LOG(KERN_ERR,
++ "ioh_pcieqos_ioctl :\
++ copy_to_user fail returning =%d\n",
++ -EFAULT);
++ ret_value = -EFAULT;
++ break;
++ }
++ break;
++ }
++
++ case IOCTL_PCIEQOS_WRITE_REG:
++ {
++
++ ret_value =
++ copy_from_user((void *)&data,
++ (void *)&p_ioh_pcieqos_reqt->
++ data, sizeof(data));
++ if (ret_value) {
++ IOH_LOG(KERN_ERR,
++ "ioh_pcieqos_ioctl :\
++ copy_from_user fail returning =%d\n",
++ -EFAULT);
++ ret_value = -EFAULT;
++ break;
++ }
++ ioh_pcieqos_write_reg(addr_offset, data);
++ IOH_DEBUG
++ ("ioh_pcieqos_ioctl : Invoked\
++ ioh_pcieqos_write_reg successfully\n");
++ break;
++ }
++
++ case IOCTL_PCIEQOS_READ_MODIFY_WRITE_REG:
++ {
++
++ ret_value =
++ copy_from_user((void *)&data,
++ (void *)&p_ioh_pcieqos_reqt->
++ data, sizeof(data));
++ if (ret_value) {
++ IOH_LOG(KERN_ERR,
++ "ioh_pcieqos_ioctl :\
++ copy_from_user fail returning =%d\n",
++ -EFAULT);
++ ret_value = -EFAULT;
++ break;
++ }
++ ret_value =
++ copy_from_user((void *)&mask,
++ (void *)&p_ioh_pcieqos_reqt->
++ mask, sizeof(mask));
++ if (ret_value) {
++ IOH_LOG(KERN_ERR,
++ "ioh_pcieqos_ioctl :\
++ copy_from_user fail returning =%d\n",
++ -EFAULT);
++ ret_value = -EFAULT;
++ break;
++ }
++ ioh_pcieqos_read_modify_write_reg(addr_offset,
++ data, mask);
++ IOH_DEBUG
++ ("ioh_pcieqos_ioctl : Invoked\
++ ioh_pcieqos_read_modify_write_reg successfully\n");
++ break;
++ }
++
++ case IOCTL_PCIEQOS_READ_OROM:
++ {
++
++ ret_value =
++ ioh_pcieqos_read_serial_rom(addr_offset,
++ (unsigned char *)&data);
++ if (ret_value) {
++ IOH_LOG(KERN_ERR,
++ "ioh_pcieqos_ioctl :\
++ Invoked ioh_pcieqos_read_serial_rom =%d\n",
++ -EFAULT);
++ ret_value = -EFAULT;
++ break;
++ } else {
++ IOH_DEBUG
++ ("ioh_pcieqos_ioctl :\
++ Invoked ioh_pcieqos_read_serial_rom successfully\n");
++ }
++
++ ret_value =
++ copy_to_user((void *)&p_ioh_pcieqos_reqt->
++ data, (void *)&data,
++ sizeof(data));
++ if (ret_value) {
++ IOH_LOG(KERN_ERR,
++ "ioh_pcieqos_ioctl :\
++ copy_to_user fail returning =%d\n",
++ -EFAULT);
++ ret_value = -EFAULT;
++ break;
++ }
++ break;
++ }
++
++ case IOCTL_PCIEQOS_WRITE_OROM:
++ {
++
++ ret_value =
++ copy_from_user((void *)&data,
++ (void *)&p_ioh_pcieqos_reqt->
++ data, sizeof(data));
++ if (ret_value) {
++ IOH_LOG(KERN_ERR,
++ "ioh_pcieqos_ioctl :\
++ copy_from_user fail returning =%d\n",
++ -EFAULT);
++ ret_value = -EFAULT;
++ break;
++ }
++ ret_value =
++ ioh_pcieqos_write_serial_rom(addr_offset,
++ data);
++ if (ret_value) {
++ IOH_LOG(KERN_ERR,
++ "ioh_pcieqos_ioctl :\
++ Invoked ioh_pcieqos_write_serial_rom =%d\n",
++ -EFAULT);
++ ret_value = -EFAULT;
++ break;
++ } else {
++ IOH_DEBUG
++ ("ioh_pcieqos_ioctl :\
++ Invoked ioh_pcieqos_write_serial_rom successfully\n");
++ }
++ break;
++ }
++
++ case IOCTL_PCIEQOS_READ_MAC_ADDR:
++ {
++
++ ioh_pcieqos_read_gbe_mac_addr(addr_offset,
++ (unsigned char *)&data);
++ IOH_DEBUG
++ ("ioh_pcieqos_ioctl : Invoked\
++ ioh_pcieqos_read_gbe_mac_addr successfully\n");
++
++ ret_value =
++ copy_to_user((void *)&p_ioh_pcieqos_reqt->
++ data, (void *)&data,
++ sizeof(data));
++ if (ret_value) {
++ IOH_LOG(KERN_ERR,
++ "ioh_pcieqos_ioctl :\
++ copy_to_user fail returning =%d\n",
++ -EFAULT);
++ ret_value = -EFAULT;
++ break;
++ }
++ break;
++ }
++
++ case IOCTL_PCIEQOS_WRITE_MAC_ADDR:
++ {
++
++ ret_value =
++ copy_from_user((void *)&data,
++ (void *)&p_ioh_pcieqos_reqt->
++ data, sizeof(data));
++ if (ret_value) {
++ IOH_LOG(KERN_ERR,
++ "ioh_pcieqos_ioctl :\
++ copy_from_user fail returning =%d\n",
++ -EFAULT);
++ ret_value = -EFAULT;
++ break;
++ }
++ ioh_pcieqos_write_gbe_mac_addr(addr_offset,
++ data);
++ IOH_DEBUG
++ ("ioh_pcieqos_ioctl : Invoked\
++ ioh_pcieqos_write_gbe_mac_addr successfully\n");
++ break;
++ }
++
++ default:
++ {
++ IOH_LOG(KERN_ERR,
++ "ioh_write_ioctl invalid command\
++ returning=%d\n",
++ -EINVAL);
++ ret_value = -EINVAL;
++ break;
++ }
++ }
++ break;
++
++ } while (0);
++ IOH_LOG(KERN_ERR, "ioh_write_ioctl returns=%d\n", ret_value);
++ return ret_value;
++}
+diff -urN linux-2.6.33-rc3/drivers/char/pch_pcieqos/pch_pcieqos.h topcliff-2.6.33-rc3/drivers/char/pch_pcieqos/pch_pcieqos.h
+--- linux-2.6.33-rc3/drivers/char/pch_pcieqos/pch_pcieqos.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/pch_pcieqos/pch_pcieqos.h 2010-03-08 12:14:32.000000000 +0900
+@@ -0,0 +1,196 @@
++#ifndef __IOH_PCIEQOS_H__
++#define __IOH_PCIEQOS_H__
++/*!
++ * @file ioh_pcieqos.h
++ * @brief Provides all the interfaces pertaining to the PCIEQOS module.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 06/20/2009
++ * modified:
++ *
++ */
++
++/*! @defgroup PCIEQOS */
++/*! @defgroup PCIEQOS_Global Global
++ @ingroup PCIEQOS */
++/*! @defgroup PCIEQOS_GlobalGeneral General
++ @ingroup PCIEQOS_Global */
++/*! @defgroup PCIEQOS_GlobalResultCodes StatusCodes
++ @ingroup PCIEQOS_Global */
++/*! @defgroup PCIEQOS_InterfaceLayer InterfaceLayer
++ @ingroup PCIEQOS */
++/*! @defgroup PCIEQOS_InterfaceLayerAPI Providers
++ @ingroup PCIEQOS_InterfaceLayer
++ */
++/*! @defgroup PCIEQOS_InterfaceLayerNotifyRoutines Notifiers
++ @ingroup PCIEQOS_InterfaceLayer
++ */
++/*! @defgroup PCIEQOS_PCILayer PCILayer
++ @ingroup PCIEQOS */
++/*! @defgroup PCIEQOS_PCILayerAPI Providers
++ @ingroup PCIEQOS_PCILayer
++ */
++/*! @defgroup PCIEQOS_PCILayerFacilitators Facilitators
++ @ingroup PCIEQOS_PCILayer
++ */
++/*! @defgroup PCIEQOS_HALLayer HALLayer
++ @ingroup PCIEQOS */
++/*! @defgroup PCIEQOS_HALLayerAPI Providers
++ @ingroup PCIEQOS_HALLayer
++ */
++/*! @defgroup PCIEQOS_HALLayerFacilitators Facilitators
++ @ingroup PCIEQOS_HALLayer
++ */
++/*! @defgroup PCIEQOS_Utilities Utilities
++ @ingroup PCIEQOS */
++/*! @defgroup PCIEQOS_UtilitiesAPI Providers
++ @ingroup PCIEQOS_Utilities
++ */
++
++/*! @ingroup PCIEQOS_InterfaceLayer
++ @def PCIEQOS_IOCTL_MAGIC
++ @brief Outlines the ioctl magic.
++ */
++#define PCIEQOS_IOCTL_MAGIC (0xf7)
++
++/*! @ingroup PCIEQOS_InterfaceLayer
++ @def IOCTL_PCIEQOS_READ_REG
++ @brief Outlines the read register function signature.
++ */
++#define IOCTL_PCIEQOS_READ_REG (_IOW(PCIEQOS_IOCTL_MAGIC, 1, unsigned long))
++
++/*! @ingroup PCIEQOS_InterfaceLayer
++ @def IOCTL_PCIEQOS_WRITE_REG
++ @brief Outlines the write register function signature.
++ */
++#define IOCTL_PCIEQOS_WRITE_REG (_IOW(PCIEQOS_IOCTL_MAGIC, 2, unsigned long))
++
++/*! @ingroup PCIEQOS_InterfaceLayer
++ @def IOCTL_PCIEQOS_READ_MODIFY_WRITE_REG
++ @brief Outlines the read, modify and write register function signature.
++ */
++#define IOCTL_PCIEQOS_READ_MODIFY_WRITE_REG (_IOW(PCIEQOS_IOCTL_MAGIC, 3,\
++ unsigned long))
++
++/*! @ingroup PCIEQOS_InterfaceLayer
++ @def IOCTL_PCIEQOS_READ_OROM
++ @brief Outlines the read option rom function signature.
++ */
++#define IOCTL_PCIEQOS_READ_OROM (_IOW(PCIEQOS_IOCTL_MAGIC, 4, unsigned long))
++
++/*! @ingroup PCIEQOS_InterfaceLayer
++ @def IOCTL_PCIEQOS_WRITE_OROM
++ @brief Outlines the write option rom function signature.
++ */
++#define IOCTL_PCIEQOS_WRITE_OROM (_IOW(PCIEQOS_IOCTL_MAGIC, 5, unsigned long))
++
++/*! @ingroup PCIEQOS_InterfaceLayer
++ @def IOCTL_PCIEQOS_READ_MAC_ADDR
++ @brief Outlines the read mac address function signature.
++ */
++#define IOCTL_PCIEQOS_READ_MAC_ADDR (_IOW(PCIEQOS_IOCTL_MAGIC, 6,\
++ unsigned long))
++
++/*! @ingroup PCIEQOS_InterfaceLayer
++ @def IOCTL_PCIEQOS_WRITE_MAC_ADDR
++ @brief Outlines the write mac address function signature.
++ */
++#define IOCTL_PCIEQOS_WRITE_MAC_ADDR (_IOW(PCIEQOS_IOCTL_MAGIC, 7,\
++ unsigned long))
++
++/*! @ingroup PCIEQOS_InterfaceLayer
++ @def PCIEQOS STATUS CODE
++ @brief Outlines PCIEQOS SUCCESS STATUS CODE
++ */
++#define IOH_PCIEQOS_SUCCESS (0)
++
++/*! @ingroup PCIEQOS_InterfaceLayer
++ @def PCIEQOS STATUS CODE
++ @brief Outlines PCIEQOS ERROR STATUS CODE
++ */
++#define IOH_PCIEQOS_FAIL (-1)
++
++/* Registers address offset */
++#define IOH_PCIEQOS_PHUB_ID_REG (0x0000)
++#define IOH_PCIEQOS_QUEUE_PRI_VAL_REG (0x0004)
++#define IOH_PCIEQOS_RC_QUEUE_MAXSIZE_REG (0x0008)
++#define IOH_PCIEQOS_BRI_QUEUE_MAXSIZE_REG (0x000C)
++#define IOH_PCIEQOS_COMP_RESP_TIMEOUT_REG (0x0010)
++#define IOH_PCIEQOS_BUS_SLAVE_CONTROL_REG (0x0014)
++#define IOH_PCIEQOS_DEADLOCK_AVOID_TYPE_REG (0x0018)
++#define IOH_PCIEQOS_INTPIN_REG_WPERMIT_REG0 (0x0020)
++#define IOH_PCIEQOS_INTPIN_REG_WPERMIT_REG1 (0x0024)
++#define IOH_PCIEQOS_INTPIN_REG_WPERMIT_REG2 (0x0028)
++#define IOH_PCIEQOS_INTPIN_REG_WPERMIT_REG3 (0x002C)
++#define IOH_PCIEQOS_INT_REDUCE_CONTROL_REG_BASE (0x0040)
++#define CLKCFG_REG_OFFSET (0x500)
++
++/*structures*/
++/*! @ingroup PCIEQOS_InterfaceLayer
++ @struct ioh_pcieqos_reqt
++ @brief It is a structure used for perserving information related to the
++ pcieqos request.
++ @note
++ The concerned details should be provided during the read register,
++ write register and read / modify / write register.
++ @see
++ ioh_pcieqos_ioctl
++ */
++struct ioh_pcieqos_reqt {
++ unsigned long addr_offset; /*specifies the register address
++ offset */
++ unsigned long data; /*specifies the data */
++ unsigned long mask; /*specifies the mask */
++};
++
++/* exported function prototypes */
++/*! @ingroup PCIEQOS_InterfaceLayerAPI
++ @fn nt ioh_pcieqos_open( struct inode *inode,struct file *file )
++ @brief Provides the functionality of initialization of the module
++ */
++int ioh_pcieqos_open(struct inode *inode, struct file *file);
++
++/*! @ingroup PCIEQOS_InterfaceLayerAPI
++ @fn int ioh_pcieqos_release(struct inode *inode,struct file *file)
++ @brief Provides the functionality of releasing the module
++ */
++int ioh_pcieqos_release(struct inode *inode, struct file *file);
++
++/*! @ingroup PCIEQOS_InterfaceLayerAPI
++ @fn int ioh_pcieqos_ioctl(struct inode * inode,struct file * file,
++ unsigned int cmd,
++ unsigned long arg)
++ @brief Provides the functionality of invoking various functionalities of
++ the PCIEQOS.
++ */
++int ioh_pcieqos_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
++ unsigned long arg);
++
++/**global variables*/
++extern u32 ioh_pcieqos_base_address; /* base address */
++extern s32 ioh_pcieqos_suspended; /* suspend status */
++
++extern s32 ioh_pcieqos_opencount;
++extern spinlock_t ioh_pcieqos_lock;
++extern const struct file_operations ioh_pcieqos_fops;
++#endif
+diff -urN linux-2.6.33-rc3/drivers/char/pch_pcieqos/pch_pcieqos_hal.c topcliff-2.6.33-rc3/drivers/char/pch_pcieqos/pch_pcieqos_hal.c
+--- linux-2.6.33-rc3/drivers/char/pch_pcieqos/pch_pcieqos_hal.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/pch_pcieqos/pch_pcieqos_hal.c 2010-03-08 17:51:33.000000000 +0900
+@@ -0,0 +1,550 @@
++/*!
++ * @file ioh_pcieqos_hal.c
++ * @brief Provides all the implementation of the interfaces pertaining to the
++ * HAL.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 06/20/2009
++ * modified:
++ *
++ */
++
++/*includes*/
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/io.h>
++#include <linux/delay.h>
++#include "pch_common.h"
++#include "pch_debug.h"
++#include "pch_pcieqos.h"
++#include "pch_pcieqos_hal.h"
++
++/* Status Register offset */
++#define PCIEQOS_STATUS (0x00)
++
++/* Control Register offset */
++#define PCIEQOS_CONTROL (0x04)
++
++/* Time out value for Status Register */
++#define PCIEQOS_TIMEOUT (0x05)
++
++/* Enabling for writing ROM */
++#define IOH_PCIEQOS_ROM_WRITE_ENABLE (0x01)
++
++/* Disabling for writing ROM */
++#define IOH_PCIEQOS_ROM_WRITE_DISABLE (0x00)
++
++/* ROM data area start address offset */
++#define IOH_PCIEQOS_ROM_START_ADDR (0x14)
++
++/* MAX number of INT_REDUCE_CONTROL registers */
++#define MAX_NUM_INT_REDUCE_CONTROL_REG (128)
++
++/* global variables */
++struct ioh_pcieqos_reg {
++ u32 phub_id_reg; /* PHUB_ID register val */
++ u32 q_pri_val_reg; /* QUEUE_PRI_VAL register val */
++ u32 rc_q_maxsize_reg; /* RC_QUEUE_MAXSIZE register val */
++ u32 bri_q_maxsize_reg; /* BRI_QUEUE_MAXSIZE register val */
++ u32 comp_resp_timeout_reg; /* COMP_RESP_TIMEOUT register val */
++ u32 bus_slave_control_reg; /* BUS_SLAVE_CONTROL_REG register val */
++ u32 deadlock_avoid_type_reg; /* DEADLOCK_AVOID_TYPE register val */
++ u32 intpin_reg_wpermit_reg0; /* INTPIN_REG_WPERMIT register 0 val */
++ u32 intpin_reg_wpermit_reg1; /* INTPIN_REG_WPERMIT register 1 val */
++ u32 intpin_reg_wpermit_reg2; /* INTPIN_REG_WPERMIT register 2 val */
++ u32 intpin_reg_wpermit_reg3; /* INTPIN_REG_WPERMIT register 3 val */
++ /* INT_REDUCE_CONTROL registers val */
++ u32 int_reduce_control_reg[MAX_NUM_INT_REDUCE_CONTROL_REG];
++#ifdef IOH_CAN_PCLK_50MHZ
++ u32 clkcfg_reg; /* CLK CFG register val */
++#endif
++} g_ioh_pcieqos_reg;
++
++/*functions implementations*/
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn void ioh_pcieqos_read_reg(unsigned long reg_addr_offset,
++ unsigned long *data)
++ @remarks Implements the functionality of reading register.
++ @param reg_addr_offset [@ref IN] Contains the register offset address value
++ @param *data [@ref INOUT] Contains the register value
++ @retval NONE
++ @see
++ */
++void ioh_pcieqos_read_reg(unsigned long reg_addr_offset, unsigned long *data)
++{
++ unsigned long reg_addr = ioh_pcieqos_base_address + reg_addr_offset;
++ *data = IOH_READ32(reg_addr);
++
++ return;
++}
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn void ioh_pcieqos_write_reg(unsigned long reg_addr_offset,
++ unsigned long data)
++ @remarks Implements the functionality of writing register.
++ @param reg_addr_offset [@ref IN] Contains the register offset address value
++ @param data [@ref IN] Contains the writing value
++ @retval NONE
++ @see
++ */
++void ioh_pcieqos_write_reg(unsigned long reg_addr_offset, unsigned long data)
++{
++ unsigned long reg_addr = ioh_pcieqos_base_address + reg_addr_offset;
++ IOH_WRITE32(data, reg_addr);
++
++ return;
++}
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn void ioh_pcieqos_read_modify_write_reg(unsigned long reg_addr_offset,
++ unsigned long data, unsigned long mask)
++ @remarks Implements the functionality of reading, modifying and writing
++ register.
++ @param reg_addr_offset [@ref IN] Contains the register offset address value
++ @param data [@ref IN] Contains the writing value
++ @param mask [@ref IN] Contains the mask value
++ @retval NONE
++ @see
++ */
++void ioh_pcieqos_read_modify_write_reg(unsigned long reg_addr_offset,
++ unsigned long data, unsigned long mask)
++{
++ unsigned long reg_addr = ioh_pcieqos_base_address + reg_addr_offset;
++ IOH_WRITE32(((IOH_READ32(reg_addr) & ~mask)) | data, reg_addr);
++
++ return;
++}
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn int ioh_pcieqos_read_gbe_mac_addr(unsigned long offset_address,
++ unsigned char *data)
++ @param unsigned long offset_address [@ref IN] Contains the Gigabit
++ Ethernet MAC address offset value
++ @param *data [@ref INOUT] Contains the Gigabit
++ Ethernet MAC address value
++ @retval return value [@ref OUT] contains the result
++ for the reading Gigabit Ethernet MAC address attempt
++ @see
++ */
++int ioh_pcieqos_read_gbe_mac_addr(unsigned long offset_address,
++ unsigned char *data)
++{
++ int retval = IOH_PCIEQOS_SUCCESS;
++
++ retval = ioh_pcieqos_read_serial_rom_val(offset_address, data);
++
++ return retval;
++}
++EXPORT_SYMBOL(ioh_pcieqos_read_gbe_mac_addr);
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn int ioh_pcieqos_write_gbe_mac_addr(unsigned long offset_address,
++ unsigned char data)
++ @param unsigned long offset_address [@ref IN] Contains the Gigabit
++ Ethernet MAC address offset value
++ @param data [@ref IN] Contains the Gigabit Ethernet
++ MAC address value
++ @retval return value [@ref OUT] contains the result for the
++ writing Gigabit Ethernet MAC address attempt
++ @see
++ */
++int ioh_pcieqos_write_gbe_mac_addr(unsigned long offset_address,
++ unsigned char data)
++{
++ int retval = IOH_PCIEQOS_SUCCESS;
++
++ retval = ioh_pcieqos_gbe_serial_rom_conf();
++ retval |= ioh_pcieqos_write_serial_rom_val(offset_address, data);
++
++ return retval;
++}
++EXPORT_SYMBOL(ioh_pcieqos_write_gbe_mac_addr);
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn void ioh_pcieqos_save_reg_conf(void)
++ @remarks saves register configuration
++ @param NONE
++ @retval NONE
++ @see
++ ioh_pcieqos_suspend
++ */
++void ioh_pcieqos_save_reg_conf(void)
++{
++ u32 base_addr = ioh_pcieqos_base_address;
++ u32 i = 0;
++
++ IOH_DEBUG("ioh_pcieqos_save_reg_conf ENTRY\n");
++ /* to store contents of PHUB_ID register */
++ g_ioh_pcieqos_reg.phub_id_reg =
++ IOH_READ32(base_addr + IOH_PCIEQOS_PHUB_ID_REG);
++ /* to store contents of QUEUE_PRI_VAL register */
++ g_ioh_pcieqos_reg.q_pri_val_reg =
++ IOH_READ32(base_addr + IOH_PCIEQOS_QUEUE_PRI_VAL_REG);
++ /* to store contents of RC_QUEUE_MAXSIZE register */
++ g_ioh_pcieqos_reg.rc_q_maxsize_reg =
++ IOH_READ32(base_addr + IOH_PCIEQOS_RC_QUEUE_MAXSIZE_REG);
++ /* to store contents of BRI_QUEUE_MAXSIZE register */
++ g_ioh_pcieqos_reg.bri_q_maxsize_reg =
++ IOH_READ32(base_addr + IOH_PCIEQOS_BRI_QUEUE_MAXSIZE_REG);
++ /* to store contents of COMP_RESP_TIMEOUT register */
++ g_ioh_pcieqos_reg.comp_resp_timeout_reg =
++ IOH_READ32(base_addr + IOH_PCIEQOS_COMP_RESP_TIMEOUT_REG);
++ /* to store contents of BUS_SLAVE_CONTROL_REG register */
++ g_ioh_pcieqos_reg.bus_slave_control_reg =
++ IOH_READ32(base_addr + IOH_PCIEQOS_BUS_SLAVE_CONTROL_REG);
++ /* to store contents of DEADLOCK_AVOID_TYPE register */
++ g_ioh_pcieqos_reg.deadlock_avoid_type_reg =
++ IOH_READ32(base_addr + IOH_PCIEQOS_DEADLOCK_AVOID_TYPE_REG);
++ /* to store contents of INTPIN_REG_WPERMIT register 0 */
++ g_ioh_pcieqos_reg.intpin_reg_wpermit_reg0 =
++ IOH_READ32(base_addr + IOH_PCIEQOS_INTPIN_REG_WPERMIT_REG0);
++ /* to store contents of INTPIN_REG_WPERMIT register 1 */
++ g_ioh_pcieqos_reg.intpin_reg_wpermit_reg1 =
++ IOH_READ32(base_addr + IOH_PCIEQOS_INTPIN_REG_WPERMIT_REG1);
++ /* to store contents of INTPIN_REG_WPERMIT register 2 */
++ g_ioh_pcieqos_reg.intpin_reg_wpermit_reg2 =
++ IOH_READ32(base_addr + IOH_PCIEQOS_INTPIN_REG_WPERMIT_REG2);
++ /* to store contents of INTPIN_REG_WPERMIT register 3 */
++ g_ioh_pcieqos_reg.intpin_reg_wpermit_reg3 =
++ IOH_READ32(base_addr + IOH_PCIEQOS_INTPIN_REG_WPERMIT_REG3);
++ IOH_DEBUG
++ ("ioh_pcieqos_save_reg_conf : g_ioh_pcieqos_reg.phub_id_reg=%x, \
++ g_ioh_pcieqos_reg.q_pri_val_reg=%x, \
++ g_ioh_pcieqos_reg.rc_q_maxsize_reg=%x, \
++ g_ioh_pcieqos_reg.bri_q_maxsize_reg=%x, \
++ g_ioh_pcieqos_reg.comp_resp_timeout_reg=%x, \
++ g_ioh_pcieqos_reg.bus_slave_control_reg=%x, \
++ g_ioh_pcieqos_reg.deadlock_avoid_type_reg=%x, \
++ g_ioh_pcieqos_reg.intpin_reg_wpermit_reg0=%x, \
++ g_ioh_pcieqos_reg.intpin_reg_wpermit_reg1=%x, \
++ g_ioh_pcieqos_reg.intpin_reg_wpermit_reg2=%x, \
++ g_ioh_pcieqos_reg.intpin_reg_wpermit_reg3=%x\n",
++ g_ioh_pcieqos_reg.phub_id_reg,
++ g_ioh_pcieqos_reg.q_pri_val_reg,
++ g_ioh_pcieqos_reg.rc_q_maxsize_reg,
++ g_ioh_pcieqos_reg.bri_q_maxsize_reg,
++ g_ioh_pcieqos_reg.comp_resp_timeout_reg,
++ g_ioh_pcieqos_reg.bus_slave_control_reg,
++ g_ioh_pcieqos_reg.deadlock_avoid_type_reg,
++ g_ioh_pcieqos_reg.intpin_reg_wpermit_reg0,
++ g_ioh_pcieqos_reg.intpin_reg_wpermit_reg1,
++ g_ioh_pcieqos_reg.intpin_reg_wpermit_reg2,
++ g_ioh_pcieqos_reg.intpin_reg_wpermit_reg3);
++ /* to store contents of INT_REDUCE_CONTROL registers */
++ for (i = 0; i < MAX_NUM_INT_REDUCE_CONTROL_REG; i++) {
++ g_ioh_pcieqos_reg.int_reduce_control_reg[i] =
++ IOH_READ32(base_addr +
++ IOH_PCIEQOS_INT_REDUCE_CONTROL_REG_BASE + 4 * i);
++ IOH_DEBUG
++ ("ioh_pcieqos_save_reg_conf : \
++ g_ioh_pcieqos_reg.int_reduce_control_reg[%d]=%x\n",
++ i, g_ioh_pcieqos_reg.int_reduce_control_reg[i]);
++ }
++#ifdef IOH_CAN_PCLK_50MHZ
++ /* save clk cfg register */
++ g_ioh_pcieqos_reg.clkcfg_reg =
++ IOH_READ32(base_addr + CLKCFG_REG_OFFSET);
++#endif
++ return;
++}
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn void ioh_pcieqos_restore_reg_conf(void)
++ @remarks restore register configuration
++ @param NONE
++ @retval NONE
++ @see
++ ioh_pcieqos_resume
++ */
++void ioh_pcieqos_restore_reg_conf(void)
++{
++ u32 base_addr = ioh_pcieqos_base_address;
++ u32 i = 0;
++
++ IOH_DEBUG("ioh_pcieqos_restore_reg_conf ENTRY\n");
++ /* to store contents of PHUB_ID register */
++ IOH_WRITE32(g_ioh_pcieqos_reg.phub_id_reg,
++ base_addr + IOH_PCIEQOS_PHUB_ID_REG);
++ /* to store contents of QUEUE_PRI_VAL register */
++ IOH_WRITE32(g_ioh_pcieqos_reg.q_pri_val_reg,
++ base_addr + IOH_PCIEQOS_QUEUE_PRI_VAL_REG);
++ /* to store contents of RC_QUEUE_MAXSIZE register */
++ IOH_WRITE32(g_ioh_pcieqos_reg.rc_q_maxsize_reg,
++ base_addr + IOH_PCIEQOS_RC_QUEUE_MAXSIZE_REG);
++ /* to store contents of BRI_QUEUE_MAXSIZE register */
++ IOH_WRITE32(g_ioh_pcieqos_reg.bri_q_maxsize_reg,
++ base_addr + IOH_PCIEQOS_BRI_QUEUE_MAXSIZE_REG);
++ /* to store contents of COMP_RESP_TIMEOUT register */
++ IOH_WRITE32(g_ioh_pcieqos_reg.comp_resp_timeout_reg,
++ base_addr + IOH_PCIEQOS_COMP_RESP_TIMEOUT_REG);
++ /* to store contents of BUS_SLAVE_CONTROL_REG register */
++ IOH_WRITE32(g_ioh_pcieqos_reg.bus_slave_control_reg,
++ base_addr + IOH_PCIEQOS_BUS_SLAVE_CONTROL_REG);
++ /* to store contents of DEADLOCK_AVOID_TYPE register */
++ IOH_WRITE32(g_ioh_pcieqos_reg.deadlock_avoid_type_reg,
++ base_addr + IOH_PCIEQOS_DEADLOCK_AVOID_TYPE_REG);
++ /* to store contents of INTPIN_REG_WPERMIT register 0 */
++ IOH_WRITE32(g_ioh_pcieqos_reg.intpin_reg_wpermit_reg0,
++ base_addr + IOH_PCIEQOS_INTPIN_REG_WPERMIT_REG0);
++ /* to store contents of INTPIN_REG_WPERMIT register 1 */
++ IOH_WRITE32(g_ioh_pcieqos_reg.intpin_reg_wpermit_reg1,
++ base_addr + IOH_PCIEQOS_INTPIN_REG_WPERMIT_REG1);
++ /* to store contents of INTPIN_REG_WPERMIT register 2 */
++ IOH_WRITE32(g_ioh_pcieqos_reg.intpin_reg_wpermit_reg2,
++ base_addr + IOH_PCIEQOS_INTPIN_REG_WPERMIT_REG2);
++ /* to store contents of INTPIN_REG_WPERMIT register 3 */
++ IOH_WRITE32(g_ioh_pcieqos_reg.intpin_reg_wpermit_reg3,
++ base_addr + IOH_PCIEQOS_INTPIN_REG_WPERMIT_REG3);
++ IOH_DEBUG
++ ("ioh_pcieqos_save_reg_conf : g_ioh_pcieqos_reg.phub_id_reg=%x, \
++ g_ioh_pcieqos_reg.q_pri_val_reg=%x, \
++ g_ioh_pcieqos_reg.rc_q_maxsize_reg=%x, \
++ g_ioh_pcieqos_reg.bri_q_maxsize_reg=%x, \
++ g_ioh_pcieqos_reg.comp_resp_timeout_reg=%x, \
++ g_ioh_pcieqos_reg.bus_slave_control_reg=%x, \
++ g_ioh_pcieqos_reg.deadlock_avoid_type_reg=%x, \
++ g_ioh_pcieqos_reg.intpin_reg_wpermit_reg0=%x, \
++ g_ioh_pcieqos_reg.intpin_reg_wpermit_reg1=%x, \
++ g_ioh_pcieqos_reg.intpin_reg_wpermit_reg2=%x, \
++ g_ioh_pcieqos_reg.intpin_reg_wpermit_reg3=%x\n",
++ g_ioh_pcieqos_reg.phub_id_reg, g_ioh_pcieqos_reg.q_pri_val_reg,
++ g_ioh_pcieqos_reg.rc_q_maxsize_reg,
++ g_ioh_pcieqos_reg.bri_q_maxsize_reg,
++ g_ioh_pcieqos_reg.comp_resp_timeout_reg,
++ g_ioh_pcieqos_reg.bus_slave_control_reg,
++ g_ioh_pcieqos_reg.deadlock_avoid_type_reg,
++ g_ioh_pcieqos_reg.intpin_reg_wpermit_reg0,
++ g_ioh_pcieqos_reg.intpin_reg_wpermit_reg1,
++ g_ioh_pcieqos_reg.intpin_reg_wpermit_reg2,
++ g_ioh_pcieqos_reg.intpin_reg_wpermit_reg3);
++ /* to store contents of INT_REDUCE_CONTROL register */
++ for (i = 0; i < MAX_NUM_INT_REDUCE_CONTROL_REG; i++) {
++ IOH_WRITE32(g_ioh_pcieqos_reg.int_reduce_control_reg[i],
++ base_addr +
++ IOH_PCIEQOS_INT_REDUCE_CONTROL_REG_BASE + 4 * i);
++ IOH_DEBUG
++ ("ioh_pcieqos_save_reg_conf : \
++ g_ioh_pcieqos_reg.int_reduce_control_reg[%d]=%x\n",
++ i, g_ioh_pcieqos_reg.int_reduce_control_reg[i]);
++ }
++
++#ifdef IOH_CAN_PCLK_50MHZ
++ /*restore the clock config reg */
++ IOH_WRITE32(g_ioh_pcieqos_reg.clkcfg_reg,
++ base_addr + CLKCFG_REG_OFFSET);
++#endif
++
++ return;
++}
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn void ioh_pcieqos_read_serial_rom(unsigned long offset_address,
++ unsigned char *data)
++ @remarks Implements the functionality of reading Serial ROM.
++ @param unsigned long offset_address [@ref IN] Contains the Serial ROM
++ address offset value
++ @param *data [@ref INOUT] Contains the Serial
++ ROM value
++ @retval returnvalue [@ref OUT] contains the result for the reading Serial
++ ROM attempt
++ @see
++ */
++int ioh_pcieqos_read_serial_rom(unsigned long offset_address,
++ unsigned char *data)
++{
++ unsigned long mem_addr =
++ ioh_pcieqos_extrom_base_address + offset_address;
++
++ IOH_DEBUG("ioh_pcieqos_read_serial_rom:mem_addr=0x%08x\n", mem_addr);
++ *data = IOH_READ8(mem_addr);
++
++ return IOH_PCIEQOS_SUCCESS;
++}
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn void ioh_pcieqos_write_serial_rom(unsigned long offset_address,
++ unsigned char data)
++ @remarks Implements the functionality of writing Serial ROM.
++ @param unsigned long offset_address [@ref IN] Contains the Serial ROM
++ address offset value
++ @param data [@ref IN] Contains the Serial ROM value
++ @retval returnvalue [@ref OUT] contains the result for the writing Serial
++ ROM attempt
++ @see
++ */
++int ioh_pcieqos_write_serial_rom(unsigned long offset_address,
++ unsigned char data)
++{
++ int retval = IOH_PCIEQOS_SUCCESS;
++ unsigned long mem_addr =
++ ioh_pcieqos_extrom_base_address + offset_address;
++ int i = 0;
++ unsigned long word_data = 0;
++
++ IOH_DEBUG("ioh_pcieqos_write_serial_rom:mem_addr=0x%08x\n", mem_addr);
++ IOH_WRITE32(IOH_PCIEQOS_ROM_WRITE_ENABLE,
++ ioh_pcieqos_extrom_base_address + PCIEQOS_CONTROL);
++
++ word_data = IOH_READ32((mem_addr & 0xFFFFFFFC));
++ IOH_DEBUG("word_data=0x%08x\n", word_data);
++ IOH_DEBUG("data=0x%02x\n", data);
++ switch (mem_addr % 4) {
++ case 0:
++ {
++ word_data &= 0xFFFFFF00;
++ IOH_WRITE32((word_data | (unsigned long)data),
++ (mem_addr & 0xFFFFFFFC));
++ }
++ case 1:
++ {
++ word_data &= 0xFFFF00FF;
++ IOH_WRITE32((word_data | ((unsigned long)data << 8)),
++ (mem_addr & 0xFFFFFFFC));
++ }
++ case 2:
++ {
++ word_data &= 0xFF00FFFF;
++ IOH_WRITE32((word_data | ((unsigned long)data << 16)),
++ (mem_addr & 0xFFFFFFFC));
++ }
++ case 3:
++ {
++ word_data &= 0x00FFFFFF;
++ IOH_WRITE32((word_data | ((unsigned long)data << 24)),
++ (mem_addr & 0xFFFFFFFC));
++ }
++ }
++ while (0x00 !=
++ IOH_READ8(ioh_pcieqos_extrom_base_address + PCIEQOS_STATUS)) {
++ msleep(1);
++ if (PCIEQOS_TIMEOUT == i) {
++ retval = IOH_PCIEQOS_FAIL;
++ break;
++ }
++ i++;
++ }
++
++ IOH_WRITE32(IOH_PCIEQOS_ROM_WRITE_DISABLE,
++ ioh_pcieqos_extrom_base_address + PCIEQOS_CONTROL);
++
++ return retval;
++}
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn void ioh_pcieqos_read_serial_rom_val(unsigned long offset_address,
++ unsigned char *data)
++ @remarks Implements the functionality of reading Serial ROM value.
++ @param unsigned long offset_address [@ref IN] Contains the Serial ROM
++ address offset value
++ @param *data [@ref INOUT] Contains the Serial
++ ROM value
++ @retval returnvalue [@ref OUT] contains the result for the reading Serial
++ ROM attempt
++ @see
++ */
++int ioh_pcieqos_read_serial_rom_val(unsigned long offset_address,
++ unsigned char *data)
++{
++ int retval = IOH_PCIEQOS_SUCCESS;
++ unsigned long mem_addr;
++
++ mem_addr =
++ (offset_address / 4 * 8) + 3 - (offset_address % 4) +
++ IOH_PCIEQOS_ROM_START_ADDR;
++ retval = ioh_pcieqos_read_serial_rom(mem_addr, data);
++
++ return retval;
++}
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn void ioh_pcieqos_write_serial_rom_val(unsigned long offset_address,
++ unsigned char data)
++ @remarks Implements the functionality of writing Serial ROM value.
++ @param unsigned long offset_address [@ref IN] Contains the Serial ROM
++ address offset value
++ @param data [@ref IN] Contains the Serial ROM value
++ @retval returnvalue [@ref OUT] contains the result for the writing Serial
++ ROM attempt
++ @see
++ */
++int ioh_pcieqos_write_serial_rom_val(unsigned long offset_address,
++ unsigned char data)
++{
++ int retval = IOH_PCIEQOS_SUCCESS;
++ unsigned long mem_addr;
++
++ mem_addr =
++ (offset_address / 4 * 8) + 3 - (offset_address % 4) +
++ IOH_PCIEQOS_ROM_START_ADDR;
++ retval = ioh_pcieqos_write_serial_rom(mem_addr, data);
++
++ return retval;
++}
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn int ioh_pcieqos_gbe_serial_rom_conf(void)
++ @remarks makes Serial ROM header format configuration for Gigabit Ethernet
++ MAC address
++ @param NONE
++ @retval returnvalue [@ref OUT] contains the result for the writing Serial
++ ROM attempt
++ @see
++ */
++int ioh_pcieqos_gbe_serial_rom_conf(void)
++{
++ int retval = IOH_PCIEQOS_SUCCESS;
++
++ retval |= ioh_pcieqos_write_serial_rom(0x0b, 0xbc);
++ retval |= ioh_pcieqos_write_serial_rom(0x0a, 0x10);
++ retval |= ioh_pcieqos_write_serial_rom(0x09, 0x01);
++ retval |= ioh_pcieqos_write_serial_rom(0x08, 0x02);
++
++ retval |= ioh_pcieqos_write_serial_rom(0x0f, 0x00);
++ retval |= ioh_pcieqos_write_serial_rom(0x0e, 0x00);
++ retval |= ioh_pcieqos_write_serial_rom(0x0d, 0x00);
++ retval |= ioh_pcieqos_write_serial_rom(0x0c, 0x80);
++
++ retval |= ioh_pcieqos_write_serial_rom(0x13, 0xbc);
++ retval |= ioh_pcieqos_write_serial_rom(0x12, 0x10);
++ retval |= ioh_pcieqos_write_serial_rom(0x11, 0x01);
++ retval |= ioh_pcieqos_write_serial_rom(0x10, 0x18);
++
++ retval |= ioh_pcieqos_write_serial_rom(0x1b, 0xbc);
++ retval |= ioh_pcieqos_write_serial_rom(0x1a, 0x10);
++ retval |= ioh_pcieqos_write_serial_rom(0x19, 0x01);
++ retval |= ioh_pcieqos_write_serial_rom(0x18, 0x19);
++
++ retval |= ioh_pcieqos_write_serial_rom(0x23, 0xbc);
++ retval |= ioh_pcieqos_write_serial_rom(0x22, 0x10);
++ retval |= ioh_pcieqos_write_serial_rom(0x21, 0x01);
++ retval |= ioh_pcieqos_write_serial_rom(0x20, 0x3a);
++
++ retval |= ioh_pcieqos_write_serial_rom(0x27, 0x01);
++ retval |= ioh_pcieqos_write_serial_rom(0x26, 0x00);
++ retval |= ioh_pcieqos_write_serial_rom(0x25, 0x00);
++ retval |= ioh_pcieqos_write_serial_rom(0x24, 0x00);
++
++ return retval;
++}
++
+diff -urN linux-2.6.33-rc3/drivers/char/pch_pcieqos/pch_pcieqos_hal.h topcliff-2.6.33-rc3/drivers/char/pch_pcieqos/pch_pcieqos_hal.h
+--- linux-2.6.33-rc3/drivers/char/pch_pcieqos/pch_pcieqos_hal.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/pch_pcieqos/pch_pcieqos_hal.h 2010-03-08 12:02:57.000000000 +0900
+@@ -0,0 +1,125 @@
++#ifndef __IOH_PCIEQOS_HAL_H__
++#define __IOH_PCIEQOS_HAL_H__
++/*!
++ * @file ioh_pcieqos_hal.h
++ * @brief Provides all the interfaces pertaining to the HAL.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 06/20/2009
++ * modified:
++ *
++ */
++
++/* exported function prototypes */
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn void ioh_pcieqos_read_reg(unsigned long reg_addr_offset,
++ unsigned long *data)
++ @brief Provides the functionality of reading register
++ */
++void ioh_pcieqos_read_reg(unsigned long reg_addr_offset, unsigned long *data);
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn ioh_pcieqos_write_reg(unsigned long reg_addr_offset, unsigned long data)
++ @brief Provides the functionality of writing register
++ */
++void ioh_pcieqos_write_reg(unsigned long reg_addr_offset, unsigned long data);
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn ioh_pcieqos_read_modify_write_reg(unsigned long reg_addr_offset,
++ unsigned long data, unsigned long mask)
++ @brief Provides the functionality of reading, modifying and writing register
++ */
++void ioh_pcieqos_read_modify_write_reg(unsigned long reg_addr_offset,
++ unsigned long data, unsigned long mask);
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn int ioh_pcieqos_read_gbe_mac_addr(unsigned long offset_address,
++ unsigned char *data)
++ @brief Provides the functionality of reading Gigabit Ethernet MAC address
++ */
++int ioh_pcieqos_read_gbe_mac_addr(unsigned long offset_address,
++ unsigned char *data);
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn int ioh_pcieqos_write_gbe_mac_addr(unsigned long offset_address,
++ unsigned char data)
++ @brief Provides the functionality of writing Gigabit Ethernet MAC address
++ */
++int ioh_pcieqos_write_gbe_mac_addr(unsigned long offset_address,
++ unsigned char data);
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn void ioh_pcieqos_save_reg_conf(void)
++ @brief saves register configuration
++ */
++void ioh_pcieqos_save_reg_conf(void);
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn void ioh_pcieqos_restore_reg_conf(void)
++ @brief restores register configuration
++ */
++void ioh_pcieqos_restore_reg_conf(void);
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn int ioh_pcieqos_read_serial_rom(unsigned long offset_address,
++ unsigned char *data)
++ @brief Provides the functionality of reading Serial ROM
++ */
++int ioh_pcieqos_read_serial_rom(unsigned long offset_address,
++ unsigned char *data);
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn int ioh_pcieqos_write_serial_rom(unsigned long offset_address,
++ unsigned char data)
++ @brief Provides the functionality of writing Serial ROM
++ */
++int ioh_pcieqos_write_serial_rom(unsigned long offset_address,
++ unsigned char data);
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn int ioh_pcieqos_read_serial_rom_val(unsigned long offset_address,
++ unsigned char *data)
++ @brief Provides the functionality of reading Serial ROM value
++ */
++int ioh_pcieqos_read_serial_rom_val(unsigned long offset_address,
++ unsigned char *data);
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn int ioh_pcieqos_write_serial_rom_val(unsigned long offset_address,
++ unsigned char data)
++ @brief Provides the functionality of writing Serial ROM value
++ */
++int ioh_pcieqos_write_serial_rom_val(unsigned long offset_address,
++ unsigned char data);
++
++/*! @ingroup PCIEQOS_HALLayerAPI
++ @fn int ioh_pcieqos_gbe_serial_rom_conf(void)
++ @brief makes Serial ROM data format configuration for Gigabit Ethernet
++ MAC address
++ */
++int ioh_pcieqos_gbe_serial_rom_conf(void);
++
++/* global variables */
++extern u32 ioh_pcieqos_base_address;
++extern u32 ioh_pcieqos_extrom_base_address;
++#endif
+diff -urN linux-2.6.33-rc3/drivers/char/pch_pcieqos/pch_pcieqos_pci.c topcliff-2.6.33-rc3/drivers/char/pch_pcieqos/pch_pcieqos_pci.c
+--- linux-2.6.33-rc3/drivers/char/pch_pcieqos/pch_pcieqos_pci.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/char/pch_pcieqos/pch_pcieqos_pci.c 2010-03-08 17:51:26.000000000 +0900
+@@ -0,0 +1,523 @@
++/*!
++ * @file ioh_pcieqos_pci.c
++ * @brief Provides all the implementation of the interfaces pertaining to the
++ * pci and gpic registrations.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 06/20/2009
++ * modified:
++ *
++ */
++/*includes*/
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include <linux/fs.h>
++#include <linux/cdev.h>
++#include <linux/string.h>
++
++#include "pch_common.h"
++#include "pch_debug.h"
++#include "pch_pcieqos.h"
++#include "pch_pcieqos_hal.h"
++
++/*macros*/
++
++/*! @ingroup PCIEQOS_PCILayer
++ @def PCI_DEVICE_ID_IOH1_PCIEQOS
++ @brief Outlines the PCI Device ID.
++ */
++#define PCI_DEVICE_ID_IOH1_PCIEQOS (0x8801)
++
++/*! @ingroup PCIEQOS_PCILayer
++ @def IOH_MINOR_NOS
++ @brief Outlines the PCIEQOS minor numbers limit.
++ */
++#define IOH_MINOR_NOS (1)
++
++/*values for configuring CLKCFG reg
++ * for CAN clock of 50Mhz*/
++
++/*! @ingroup PCIEQOS_PCILayer
++ @def CLKCFG_CAN_50MHZ
++ @brief CLKCFG register setting for CAN clock of 50Mhz.
++ */
++#define CLKCFG_CAN_50MHZ (0x12000000)
++
++/*! @ingroup PCIEQOS_PCILayer
++ @def CLKCFG_CANCLK_MASK
++ @brief Bit mask for bit fields in CLKCFG register
++ to set CAN clock to 50Mhz.
++ */
++#define CLKCFG_CANCLK_MASK (0xFF000000)
++
++/**global variables*/
++u32 ioh_pcieqos_base_address;
++u32 ioh_pcieqos_extrom_base_address;
++s32 ioh_pcieqos_suspended;
++
++/* ToDo: major number allocation via module parameter */
++static dev_t ioh_pcieqos_dev_no;
++static int ioh_pcieqos_major_no;
++
++static struct cdev ioh_pcieqos_dev;
++
++/*! @ingroup PCIEQOS_PCILayerAPI
++ @fn static int __devinit ioh_pcieqos_probe(struct pci_dev* ioh_pci_dev,
++ const struct pci_device_id* pci_id)
++ @brief Provides the functionality of probing the module
++ */
++static int __devinit ioh_pcieqos_probe(struct pci_dev *pdev, const
++ struct pci_device_id *id);
++
++/*! @ingroup PCIEQOS_PCILayerAPI
++ @fn static void __devexit ioh_pcieqos_remove(struct pci_dev * ioh_pci_dev)
++ @brief Provides the functionality of removing the module
++ */
++static void __devexit ioh_pcieqos_remove(struct pci_dev *pdev);
++
++/*! @ingroup PCIEQOS_PCILayerAPI
++ @fn static int ioh_pcieqos_suspend(struct pci_dev* pDev,pm_message_t state)
++ @brief Provides the functionality of suspending the module
++ */
++static int ioh_pcieqos_suspend(struct pci_dev *pdev, pm_message_t state);
++
++/*! @ingroup PCIEQOS_PCILayerAPI
++ @fn static int ioh_pcieqos_resume(struct pci_dev* pDev)
++ @brief Provides the functionality of resuming the module
++ */
++static int ioh_pcieqos_resume(struct pci_dev *pdev);
++
++/*structures*/
++/*! @ingroup PCIEQOS_PCILayerFacilitators
++ @static struct pci_device_id
++ @brief It is a structure used for perserving information related to the
++ device id.
++ @note
++ The concerned details should be provided as a reference in the pci driver
++ structure.
++ */
++static struct pci_device_id ioh_pcieqos_pcidev_id[] = {
++
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_IOH1_PCIEQOS)},
++ {0,}
++};
++
++/*! @ingroup PCIEQOS_PCILayerFacilitators
++ @static struct ioh_pcieqos_driver
++ @brief It is a structure used for perserving information related to the
++ pcieqos device and preserves function signatures to manipulate the device.
++ @note
++ The structure contains the various interfaces aspects
++ provided to the pci layer.
++ @see
++ ioh_pcieqos_probe
++ ioh_pcieqos_suspend
++ ioh_pcieqos_resume
++ ioh_pcieqos_remove
++ */
++static struct pci_driver ioh_pcieqos_driver = {
++ .name = "ioh_pcieqos",
++ .id_table = ioh_pcieqos_pcidev_id,
++ .probe = ioh_pcieqos_probe,
++ .remove = __devexit_p(ioh_pcieqos_remove),
++#ifdef CONFIG_PM
++ .suspend = ioh_pcieqos_suspend,
++ .resume = ioh_pcieqos_resume
++#endif
++};
++
++/*! @ingroup PCIEQOS_PCILayerAPI
++ * @fn static int __init ioh_pcieqos_pci_init(void)
++ * @brief Provides the functionality of initializing the module
++ * */
++static int __init ioh_pcieqos_pci_init(void);
++/*! @ingroup PCIEQOS_PCILayerAPI
++ * @fn static void __exit ioh_pcieqos_pci_exit(void)
++ * @brief Provides the functionality of exiting the module
++ * */
++static void __exit ioh_pcieqos_pci_exit(void);
++
++MODULE_DESCRIPTION("IOH PCIEQOS PCI Driver");
++MODULE_LICENSE("GPL");
++module_init(ioh_pcieqos_pci_init);
++module_exit(ioh_pcieqos_pci_exit);
++module_param(ioh_pcieqos_major_no, int, S_IRUSR | S_IWUSR);
++
++/*function implementations*/
++
++/*! @ingroup PCIEQOS_PCILayerAPI
++ @fn static int __init ioh_pcieqos_pci_init(void)
++ @remarks Implements the initialization functionality of the module.
++ @param NONE
++ @retval returnvalue [@ref OUT] contains the result for the concerned
++ attempt.
++ The result would generally comprise of success code
++ or failure code. The failure code will indicate reason for
++ failure.
++ @see
++ ioh_pcieqos_pci_exit
++ */
++static int __init ioh_pcieqos_pci_init(void)
++{
++ s32 ret;
++ ret = pci_register_driver(&ioh_pcieqos_driver);
++ IOH_DEBUG
++ ("ioh_pcieqos_pci_init : Invoked pci_register_driver\
++ successfully\n");
++ IOH_DEBUG("ioh_pcieqos_pci_init returns %d\n", ret);
++ return ret;
++}
++
++/*! @ingroup PCIEQOS_PCILayerAPI
++ @fn static void __exit ioh_pcieqos_pci_exit(void)
++ @remarks Implements the exit functionality of the module.
++ @param NONE
++ @retval returnvalue [@ref OUT] contains the result for the concerned
++ attempt.
++ The result would generally comprise of success code
++ or failure code. The failure code will indicate reason for
++ failure.
++ @see
++ ioh_pcieqos_pci_init
++ */
++static void __exit ioh_pcieqos_pci_exit(void)
++{
++ pci_unregister_driver(&ioh_pcieqos_driver);
++ IOH_DEBUG
++ ("ioh_pcieqos_pci_exit : Invoked pci_unregister_driver\
++ successfully\n");
++}
++
++/*! @ingroup PCIEQOS_PCILayerAPI
++ @fn static int __devinit ioh_pcieqos_probe(struct pci_dev* pdev,
++ const struct pci_device_id* id)
++ @remarks Implements the probe functionality of the module.
++ @param pdev [@ref INOUT] Contains the reference of the pci_dev structure
++ @param id [@ref INOUT] Contains the reference of the pci_device_id structure
++ @retval returnvalue [@ref OUT] contains the result for the concerned
++ attempt.
++ The result would generally comprise of success code
++ or failure code. The failure code will indicate reason for
++ failure.
++ @see
++ ioh_pcieqos_pci_init
++ */
++static int __devinit ioh_pcieqos_probe(struct pci_dev *pdev,
++ const struct pci_device_id *id)
++{
++
++ char *DRIVER_NAME = "ioh_pcieqos";
++ int ret;
++ unsigned int rom_size;
++
++ ioh_pcieqos_major_no = (ioh_pcieqos_major_no < 0
++ || ioh_pcieqos_major_no >
++ 254) ? 0 : ioh_pcieqos_major_no;
++
++ do {
++
++ ret = pci_enable_device(pdev);
++ if (ret) {
++ IOH_LOG(KERN_ERR,
++ "\nioh_pcieqos_probe : pci_enable_device\
++ FAILED");
++ break;
++ }
++ IOH_DEBUG("ioh_pcieqos_probe : pci_enable_device returns %d\n",
++ ret);
++
++ ret = pci_request_regions(pdev, DRIVER_NAME);
++ if (ret) {
++ IOH_LOG(KERN_ERR,
++ "ioh_pcieqos_probe : pci_request_regions\
++ FAILED");
++ pci_disable_device(pdev);
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_pcieqos_probe : pci_request_regions returns %d\n",
++ ret);
++
++ ioh_pcieqos_base_address = (unsigned long)pci_iomap(pdev, 1, 0);
++
++ if (ioh_pcieqos_base_address == 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_pcieqos_probe : pci_iomap FAILED");
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++ ret = -ENOMEM;
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_pcieqos_probe : pci_iomap SUCCESS and value in\
++ ioh_pcieqos_base_address variable is 0x%08x\n",
++ ioh_pcieqos_base_address);
++
++ ioh_pcieqos_extrom_base_address =
++ (unsigned long)pci_map_rom(pdev, &rom_size);
++ if (ioh_pcieqos_extrom_base_address == 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_pcieqos_probe : pci_map_rom FAILED");
++ pci_iounmap(pdev, (void *)ioh_pcieqos_base_address);
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++ ret = -ENOMEM;
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_pcieqos_probe : pci_map_rom SUCCESS and value in\
++ ioh_pcieqos_extrom_base_address variable is 0x%08x\n",
++ ioh_pcieqos_extrom_base_address);
++
++ if (ioh_pcieqos_major_no) {
++ ioh_pcieqos_dev_no = MKDEV(ioh_pcieqos_major_no, 0);
++ ret =
++ register_chrdev_region(ioh_pcieqos_dev_no,
++ IOH_MINOR_NOS, DRIVER_NAME);
++ if (ret) {
++ IOH_LOG(KERN_ERR,
++ "ioh_pcieqos_probe :\
++ register_chrdev_region FAILED");
++ pci_unmap_rom(pdev,
++ (void *)
++ ioh_pcieqos_extrom_base_address);
++ pci_iounmap(pdev,
++ (void *)ioh_pcieqos_base_address);
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_pcieqos_probe :\
++ register_chrdev_region returns %d\n",
++ ret);
++ } else {
++ ret =
++ alloc_chrdev_region(&ioh_pcieqos_dev_no, 0,
++ IOH_MINOR_NOS, DRIVER_NAME);
++ if (ret) {
++ IOH_LOG(KERN_ERR,
++ "ioh_pcieqos_probe :\
++ alloc_chrdev_region FAILED");
++ pci_unmap_rom(pdev,
++ (void *)
++ ioh_pcieqos_extrom_base_address);
++ pci_iounmap(pdev,
++ (void *)ioh_pcieqos_base_address);
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++ break;
++ }
++ IOH_DEBUG
++ ("ioh_pcieqos_probe :\
++ alloc_chrdev_region returns %d\n",
++ ret);
++ }
++
++ cdev_init(&ioh_pcieqos_dev, &ioh_pcieqos_fops);
++ IOH_DEBUG
++ ("ioh_pcieqos_probe : cdev_init invoked successfully\n");
++
++ ioh_pcieqos_dev.owner = THIS_MODULE;
++ ioh_pcieqos_dev.ops = &ioh_pcieqos_fops;
++
++ ret =
++ cdev_add(&ioh_pcieqos_dev, ioh_pcieqos_dev_no,
++ IOH_MINOR_NOS);
++ if (ret) {
++ IOH_LOG(KERN_ERR,
++ "ioh_pcieqos_probe : cdev_add FAILED");
++ unregister_chrdev_region(ioh_pcieqos_dev_no,
++ IOH_MINOR_NOS);
++ pci_unmap_rom(pdev,
++ (void *)ioh_pcieqos_extrom_base_address);
++ pci_iounmap(pdev, (void *)ioh_pcieqos_base_address);
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++ break;
++ }
++ IOH_DEBUG("ioh_pcieqos_probe : cdev_add returns %d\n", ret);
++
++#ifdef IOH_CAN_PCLK_50MHZ
++ /*set the clock config reg if CAN clock is 50Mhz */
++ IOH_DEBUG
++ ("ioh_pcieqos_probe : invoking\
++ ioh_pcieqos_read_modify_write_reg to set CLKCFG reg\
++ for CAN clk 50Mhz\n");
++ ioh_pcieqos_read_modify_write_reg(CLKCFG_REG_OFFSET,
++ CLKCFG_CAN_50MHZ,
++ CLKCFG_CANCLK_MASK);
++#endif
++ return IOH_PCIEQOS_SUCCESS;
++ } while (0);
++ IOH_DEBUG("ioh_pcieqos_probe returns %d\n", ret);
++ return ret;
++}
++
++/*! @ingroup PCIEQOS_PCILayerAPI
++ @fn static void __devexit ioh_pcieqos_remove(struct pci_dev * pdev)
++ @remarks Implements the remove functionality of the module.
++ @param pdev [@ref INOUT] Contains the reference of the pci_dev structure
++ @retval returnvalue [@ref OUT] contains the result for the concerned
++ attempt.
++ The result would generally comprise of success code
++ or failure code. The failure code will indicate reason for
++ failure.
++ @see
++ ioh_pcieqos_pci_init
++ */
++static void __devexit ioh_pcieqos_remove(struct pci_dev *pdev)
++{
++
++ cdev_del(&ioh_pcieqos_dev);
++ IOH_DEBUG("ioh_pcieqos_remove - cdev_del Invoked successfully\n");
++
++ unregister_chrdev_region(ioh_pcieqos_dev_no, IOH_MINOR_NOS);
++ IOH_DEBUG
++ ("ioh_pcieqos_remove - unregister_chrdev_region Invoked\
++ successfully\n");
++
++ pci_unmap_rom(pdev, (void *)ioh_pcieqos_extrom_base_address);
++
++ pci_iounmap(pdev, (void *)ioh_pcieqos_base_address);
++
++ IOH_DEBUG("ioh_pcieqos_remove - pci_iounmap Invoked successfully\n");
++
++ pci_release_regions(pdev);
++ IOH_DEBUG
++ ("ioh_pcieqos_remove - pci_release_regions Invoked successfully\n");
++
++ pci_disable_device(pdev);
++ IOH_DEBUG
++ ("ioh_pcieqos_remove - pci_disable_device Invoked successfully\n");
++
++}
++
++#ifdef CONFIG_PM
++
++/*! @ingroup PCIEQOS_PCILayerAPI
++ @fn static int ioh_pcieqos_suspend(struct pci_dev* pdev,pm_message_t state)
++ @remarks Implements the suspend functionality of the module.
++ @param pdev [@ref INOUT] Contains the reference of the pci_dev structure
++ @param state [@ref INOUT] Contains the reference of the pm_message_t
++ structure
++ @retval returnvalue [@ref OUT] contains the result for the concerned
++ attempt.
++ The result would generally comprise of success code
++ or failure code. The failure code will indicate reason for
++ failure.
++ @see
++ ioh_pcieqos_pci_init
++ ioh_pcieqos_resume
++ */
++static int ioh_pcieqos_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ int ret;
++
++ ioh_pcieqos_suspended = true; /* For blocking further IOCTLs */
++
++ ioh_pcieqos_save_reg_conf();
++ IOH_DEBUG
++ ("ioh_pcieqos_suspend - ioh_pcieqos_save_reg_conf Invoked\
++ successfully\n");
++
++ ret = pci_save_state(pdev);
++ if (ret) {
++ IOH_LOG(KERN_ERR,
++ " ioh_pcieqos_suspend -pci_save_state returns-%d\n",
++ ret);
++ return ret;
++ }
++
++ pci_enable_wake(pdev, PCI_D3hot, 0);
++ IOH_DEBUG
++ ("ioh_pcieqos_suspend - pci_enable_wake Invoked successfully\n");
++
++ IOH_DEBUG("ioh_pcieqos_suspend - pci_save_state returns %d\n", ret);
++
++ pci_disable_device(pdev);
++ IOH_DEBUG
++ ("ioh_pcieqos_suspend - pci_disable_device Invoked successfully\n");
++
++ pci_set_power_state(pdev, pci_choose_state(pdev, state));
++ IOH_DEBUG
++ ("ioh_pcieqos_suspend - pci_set_power_state Invoked\
++ successfully\n");
++
++ IOH_DEBUG("ioh_pcieqos_suspend - return %d\n", IOH_PCIEQOS_SUCCESS);
++
++ return IOH_PCIEQOS_SUCCESS;
++}
++
++/*! @ingroup PCIEQOS_PCILayerAPI
++ @fn static int ioh_pcieqos_resume(struct pci_dev* pdev)
++ @remarks Implements the resume functionality of the module.
++ @param pdev [@ref INOUT] Contains the reference of the pci_dev structure
++ @retval returnvalue [@ref OUT] contains the result for the concerned\
++ attempt.
++ The result would generally comprise of success code
++ or failure code. The failure code will indicate reason for
++ failure.
++ @see
++ ioh_pcieqos_pci_init
++ ioh_pcieqos_suspend
++ */
++static int ioh_pcieqos_resume(struct pci_dev *pdev)
++{
++
++ int ret;
++
++ pci_set_power_state(pdev, PCI_D0);
++ IOH_DEBUG
++ ("ioh_pcieqos_resume - pci_set_power_state Invoked successfully\n");
++
++ pci_restore_state(pdev);
++ IOH_DEBUG
++ ("ioh_pcieqos_resume - pci_restore_state Invoked successfully\n");
++
++ ret = pci_enable_device(pdev);
++ if (ret) {
++ IOH_LOG(KERN_ERR,
++ "ioh_pcieqos_resume-pci_enable_device failed ");
++ return ret;
++ }
++
++ IOH_DEBUG("ioh_pcieqos_resume - pci_enable_device returns -%d\n", ret);
++
++ pci_enable_wake(pdev, PCI_D3hot, 0);
++ IOH_DEBUG
++ ("ioh_pcieqos_resume - pci_enable_wake Invoked successfully\n");
++
++ ioh_pcieqos_restore_reg_conf();
++ IOH_DEBUG
++ ("ioh_pcieqos_resume - ioh_pcieqos_restore_reg_conf Invoked\
++ successfully\n");
++
++ ioh_pcieqos_suspended = false;
++
++ IOH_DEBUG("ioh_pcieqos_resume returns- %d\n", IOH_PCIEQOS_SUCCESS);
++ return IOH_PCIEQOS_SUCCESS;
++}
++
++#endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-spi.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-spi.patch
new file mode 100644
index 0000000..43129c7
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-spi.patch
@@ -0,0 +1,4377 @@
+From: Masayuki Ohtake <masa-korg@dsn.okisemi.com>
+Subject: OKI Semiconductor PCH SPI driver
+
+This driver implements SPI controls for PCH.
+
+Signed-off-by: Masayuki Ohtake <masa-korg@dsn.okisemi.com>
+Acked-by: Wang Qi <qi.wang@intel.com>
+
+---
+ drivers/spi/Kconfig | 19 ++
+ drivers/spi/Makefile | 3
+ drivers/spi/pch_common.h | 146
+ drivers/spi/pch_spi.h | 389
+ drivers/spi/pch_spi_hal.h | 298
+ drivers/spi/pch_spi_pci.c | 812
+ drivers/spi/pch_debug.h | 60
+ drivers/spi/pch_spi_hal.c | 1208
+ drivers/spi/pch_spi_main.c | 1323
+ drivers/spi/pch_spi_platform_devices.c | 50
+
++++++++++++++++++++++++++++++++ 10 files changed, yyy insertions(+)
+diff -urN linux-2.6.33-rc3/drivers/spi/Kconfig topcliff-2.6.33-rc3/drivers/spi/Kconfig
+--- linux-2.6.33-rc3/drivers/spi/Kconfig 2010-01-06 09:02:46.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/spi/Kconfig 2010-03-06 07:48:16.000000000 +0900
+@@ -53,6 +53,25 @@
+
+ comment "SPI Master Controller Drivers"
+
++config PCH_SPI_PLATFORM_DEVICE
++ bool "PCH SPI Device"
++# depends on PCH_SPI
++ help
++ This registers SPI devices for using with PCH SPI controllers.
++
++config PCH_SPI_PLATFORM_DEVICE_COUNT
++ int "PCH SPI Bus count"
++ range 1 2
++ depends on PCH_SPI_PLATFORM_DEVICE
++ help
++ The number of SPI buses/channels supported by the PCH SPI controller.
++
++config PCH_SPI
++ tristate "PCH SPI Controller"
++ depends on (PCI) && PCH_SPI_PLATFORM_DEVICE
++ help
++ This selects a driver for the PCH SPI Controller
++
+ config SPI_ATMEL
+ tristate "Atmel SPI Controller"
+ depends on (ARCH_AT91 || AVR32)
+diff -urN linux-2.6.33-rc3/drivers/spi/Makefile topcliff-2.6.33-rc3/drivers/spi/Makefile
+--- linux-2.6.33-rc3/drivers/spi/Makefile 2010-01-06 09:02:46.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/spi/Makefile 2010-03-06 01:52:28.000000000 +0900
+@@ -59,3 +59,6 @@
+
+ # SPI slave drivers (protocol for that link)
+ # ... add above this line ...
++obj-$(CONFIG_PCH_SPI) += pch_spi.o
++pch_spi-objs := pch_spi_pci.o pch_spi_hal.o pch_spi_main.o
++obj-$(CONFIG_PCH_SPI_PLATFORM_DEVICE) +=pch_spi_platform_devices.o
+diff -urN linux-2.6.33-rc3/drivers/spi/pch_common.h topcliff-2.6.33-rc3/drivers/spi/pch_common.h
+--- linux-2.6.33-rc3/drivers/spi/pch_common.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/spi/pch_common.h 2010-03-09 05:56:11.000000000 +0900
+@@ -0,0 +1,146 @@
++/*!
++ * @file ioh_common.h
++ * @brief Provides the macro definitions used by all files.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++#ifndef __IOH_COMMON_H__
++#define __IOH_COMMON_H__
++
++/*! @ingroup Global
++@def IOH_WRITE8
++@brief Macro for writing 8 bit data to an io/mem address
++*/
++#define IOH_WRITE8(val, addr) iowrite8((val), (void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_LOG
++@brief Macro for writing 16 bit data to an io/mem address
++*/
++#define IOH_WRITE16(val, addr) iowrite16((val), (void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_LOG
++@brief Macro for writing 32 bit data to an io/mem address
++*/
++#define IOH_WRITE32(val, addr) iowrite32((val), (void __iomem *)(addr))
++
++/*! @ingroup Global
++@def IOH_READ8
++@brief Macro for reading 8 bit data from an io/mem address
++*/
++#define IOH_READ8(addr) ioread8((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_READ16
++@brief Macro for reading 16 bit data from an io/mem address
++*/
++#define IOH_READ16(addr) ioread16((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_READ32
++@brief Macro for reading 32 bit data from an io/mem address
++*/
++#define IOH_READ32(addr) ioread32((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_WRITE32_F
++@brief Macro for writing 32 bit data to an io/mem address
++*/
++#define IOH_WRITE32_F(val, addr) do \
++ { IOH_WRITE32((val), (addr)); (void)IOH_READ32((addr)); } while (0);
++
++/*! @ingroup Global
++@def IOH_WRITE_BYTE
++@brief Macro for writing 1 byte data to an io/mem address
++*/
++#define IOH_WRITE_BYTE IOH_WRITE8
++/*! @ingroup Global
++@def IOH_WRITE_WORD
++@brief Macro for writing 1 word data to an io/mem address
++*/
++#define IOH_WRITE_WORD IOH_WRITE16
++/*! @ingroup Global
++@def IOH_WRITE_LONG
++@brief Macro for writing long data to an io/mem address
++*/
++#define IOH_WRITE_LONG IOH_WRITE32
++
++/*! @ingroup Global
++@def IOH_READ_BYTE
++@brief Macro for reading 1 byte data from an io/mem address
++*/
++#define IOH_READ_BYTE IOH_READ8
++/*! @ingroup Global
++@def IOH_READ_WORD
++@brief Macro for reading 1 word data from an io/mem address
++*/
++#define IOH_READ_WORD IOH_READ16
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief Macro for reading long data from an io/mem address
++*/
++#define IOH_READ_LONG IOH_READ32
++
++/* Bit Manipulation Macros */
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bit(mask) at the
++ specified address
++*/
++#define IOH_SET_ADDR_BIT(addr, bitmask) IOH_WRITE_LONG((IOH_READ_LONG(addr) |\
++ (bitmask)), (addr))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bit(mask) at the specified address
++*/
++#define IOH_CLR_ADDR_BIT(addr, bitmask) IOH_WRITE_LONG((IOH_READ_LONG(addr) &\
++ ~(bitmask)), (addr))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bitmask for a variable
++*/
++#define IOH_SET_BITMSK(var, bitmask) ((var) |= (bitmask))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bitmask for a variable
++*/
++#define IOH_CLR_BITMSK(var, bitmask) ((var) &= (~(bitmask)))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bit for a variable
++*/
++#define IOH_SET_BIT(var, bit) ((var) |= (1<<(bit)))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bit for a variable
++*/
++#define IOH_CLR_BIT(var, bit) ((var) &= ~(1<<(bit)))
++
++#endif
+diff -urN linux-2.6.33-rc3/drivers/spi/pch_debug.h topcliff-2.6.33-rc3/drivers/spi/pch_debug.h
+--- linux-2.6.33-rc3/drivers/spi/pch_debug.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/spi/pch_debug.h 2010-03-09 05:37:47.000000000 +0900
+@@ -0,0 +1,60 @@
++/*!
++ * @file ioh_debug.h
++ * @brief Provides the macro definitions used for debugging.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++#ifndef __IOH_DEBUG_H__
++#define __IOH_DEBUG_H__
++
++#ifdef MODULE
++#define IOH_LOG(level, fmt, args...) printk(level "%s:" fmt "\n",\
++ THIS_MODULE->name, ##args)
++#else
++#define IOH_LOG(level, fmt, args...) printk(level "%s:" fmt "\n" ,\
++ __FILE__, ##args)
++#endif
++
++
++#ifdef DEBUG
++ #define IOH_DEBUG(fmt, args...) IOH_LOG(KERN_DEBUG, fmt, ##args)
++#else
++ #define IOH_DEBUG(fmt, args...)
++#endif
++
++#ifdef IOH_TRACE_ENABLED
++ #define IOH_TRACE IOH_DEBUG
++#else
++ #define IOH_TRACE(fmt, args...)
++#endif
++
++#define IOH_TRACE_ENTER IOH_TRACE("Enter %s", __func__)
++#define IOH_TRACE_EXIT IOH_TRACE("Exit %s", __func__)
++
++
++#endif
+diff -urN linux-2.6.33-rc3/drivers/spi/pch_spi.h topcliff-2.6.33-rc3/drivers/spi/pch_spi.h
+--- linux-2.6.33-rc3/drivers/spi/pch_spi.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/spi/pch_spi.h 2010-03-06 09:01:42.000000000 +0900
+@@ -0,0 +1,389 @@
++#ifndef __IOH_SPI_H__
++#define __IOH_SPI_H__
++/**
++ * @file ioh_spi.h
++ *
++ * @brief This header file contains all macro,structure and function
++ * declarations
++ * for IOH SPI driver.
++ * @version 0.94
++ *
++ * @par
++ * -- Copyright Notice --
++ *
++ * @par
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * @par
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * @par
++ * -- End of Copyright Notice --
++ */
++
++/*! @defgroup SPI */
++
++/*! @defgroup SPI_Global
++@ingroup SPI
++@brief This group describes the global entities within
++ the module.
++@remarks This group includes all the global data structures
++ used within the modules. These are mainly used to
++ store the device related information, so that it can
++ be used by other functions of the modules.
++<hr>
++*/
++
++/*! @defgroup SPI_PCILayer
++@ingroup SPI
++@brief This group describes the PCI layer interface
++ functionalities.
++@remarks This group contains the functions and data structures
++ that are used to interface the module with PCI Layer
++ subsystem of the Kernel.
++<hr>
++*/
++
++/*! @defgroup SPI_InterfaceLayer
++@ingroup SPI
++@brief This group describes the Driver interface functionalities.
++@remarks This group contains the data structures and functions used
++ to interface the module driver with the kernel subsystem.
++<hr>
++*/
++
++/*! @defgroup SPI_HALLayer
++@ingroup SPI
++@brief This group describes the hardware specific functionalities.
++@remarks This group contains the functions and data structures used
++ by the module to communicate with the hardware. These
++ functions are device specific and designed according to the
++ device specifications.
++<hr>
++*/
++
++/*! @defgroup SPI_Utilities
++@ingroup SPI
++@brief This group describes the utility functionalities.
++@remarks This group contains the functions and data structures used
++ to assist the other functionalities in their operations.
++<hr>
++*/
++
++/*! @defgroup SPI_PCILayerAPI
++@ingroup SPI_PCILayer
++@brief This group contains the API(functions) used as the PCI
++ interface between the Kernel subsystem and the module.
++<hr>
++*/
++
++/*! @defgroup SPI_PCILayerFacilitators
++@ingroup SPI_PCILayer
++@brief This group contains the data structures used by the PCI
++ Layer APIs for their functionalities.
++<hr>
++*/
++
++/*! @defgroup SPI_InterfaceLayerAPI
++@ingroup SPI_InterfaceLayer
++@brief This group contains the API(functions) used as the Driver
++ interface between the Kernel subsystem and the module.
++<hr>
++*/
++
++/*! @defgroup SPI_InterfaceLayerFacilitators
++@ingroup SPI_InterfaceLayer
++@brief This group contains the data structures used by the Driver
++ interface APIs for their functionalities.
++<hr>
++*/
++
++/*! @defgroup SPI_HALLayerAPI
++@ingroup SPI_HALLayer
++@brief This group contains the APIs(functions) used to interact with
++ the hardware. These APIs act as an interface between the
++ hardware and the other driver functions.
++<hr>
++*/
++
++/*! @defgroup SPI_UtilitiesAPI
++@ingroup SPI_Utilities
++@brief This group contains the APIs(functions) used by other functions
++ in their operations.
++<hr>
++*/
++
++#include <linux/wait.h>
++#include <linux/device.h>
++#include <linux/pci.h>
++#include <linux/spi/spi.h>
++#include <linux/workqueue.h>
++
++/*! @ingroup SPI_Global
++
++@def STATUS_RUNNING
++
++@brief SPI channel is running
++
++@note The status of SPI channel is set to STATUS_RUNNING,
++ once all resources are acquired and initialized from
++ @ref ioh_spi_get_resources
++
++@see
++ - ioh_spi_get_resources
++
++<hr>
++*/
++#define STATUS_RUNNING (1)
++
++/*! @ingroup SPI_Global
++
++@def STATUS_EXITING
++
++@brief SPI device is being removed
++
++@note The status of SPI channel is set to STATUS_EXITING,
++ when SPI device is being removed.
++
++@see
++ - ioh_spi_process_messages
++ - ioh_spi_check_request_pending
++
++<hr>
++*/
++#define STATUS_EXITING (2)
++
++/*! @ingroup SPI_Global
++
++@def DRIVER_NAME
++
++@brief Name identifier for IOH SPI driver
++
++@note This name is used while printing debug logs
++
++<hr>
++*/
++#define DRIVER_NAME "ioh_spi"
++
++/*! @ingroup SPI_Global
++
++@def IOH_SPI_SLEEP_TIME
++
++@brief Sleep time used in @ref ioh_spi_check_request_pending
++
++@see
++ - ioh_spi_check_request_pending
++
++<hr>
++*/
++#define IOH_SPI_SLEEP_TIME (10)
++
++/*! @ingroup SPI_Global
++
++@def IOH_SPI_MAX_DEV
++
++@brief Denotes Maximum number of SPI channels
++
++@note This needs to be edited if number of SPI channels
++ change.
++
++@see
++ - ioh_spi_get_resources
++ - ioh_spi_free_resources
++ - ioh_spi_handler
++ - ioh_spi_check_request_pending
++ - ioh_spi_probe
++ - ioh_spi_suspend
++ - ioh_spi_resume
++ - ioh_spi_remove
++
++<hr>
++*/
++#ifdef IOH_DEVICE_GE
++#define IOH_SPI_MAX_DEV (1)
++#else
++#define IOH_SPI_MAX_DEV (1)
++#endif
++
++/*! @ingroup SPI_Global
++
++@def IOH_SPI_ADDRESS_SIZE
++
++@brief Denotes the address range used by one SPI channel.
++
++@note The base address of a subsequent SPI channel will be
++ (base address of the previous SPI channel) + (IOH_SPI_ADDRESS_SIZE)
++ This needs to be recalculated if any new register is added to a SPI
++ channel.
++
++@see
++ - ioh_spi_get_resources
++
++<hr>
++*/
++#define IOH_SPI_ADDRESS_SIZE (0x20)
++
++/*structures*/
++
++/*! @ingroup SPI_Global
++@struct ioh_spi_data
++@brief Holds the SPI channel specific details
++
++ This structure holds all the details related to a SPI channel
++
++ The status of SPI data transfer,the base address are all
++ stored in this structure.The reference to the work queue handler,
++ the SPI message and transmit and receive indices are also stored
++ in this structure.
++
++@see
++ - ioh_spi_board_data
++ - ioh_spi_select_chip
++ - ioh_spi_deselect_chip
++ - ioh_spi_transfer
++ - ioh_spi_process_messages
++<hr>
++*/
++
++struct ioh_spi_data {
++
++ u32 IORemapAddress; /**< The remapped PCI base address.*/
++
++ /**< The SPI master structure that has been registered
++ with the Kernel.*/
++ struct spi_master *pMaster;
++
++ struct work_struct Work; /**< Reference to work queue handler*/
++
++ /**< Workqueue for carrying out execution of the requests*/
++ struct workqueue_struct *pWorkQueue;
++
++ /**< Wait queue for waking up upon receiving an interrupt.*/
++ wait_queue_head_t Wait;
++
++ u8 bTransferComplete; /**< Status of SPI Transfer*/
++ u8 bCurrent_msg_processing; /**< Status flag for message processing*/
++
++ spinlock_t Lock; /**< Lock for protecting this structure*/
++
++ struct list_head Queue; /**< SPI Message queue*/
++ u8 Status; /**< Status of the SPI driver.*/
++
++ u32 lengthInBpw;/**< Length of data to be transferred in bits per word*/
++ s8 bTransferActive; /**< Flag showing active transfer*/
++ u32 TxIndex;/**< Transmit data count; for bookkeeping during transfer*/
++ u32 RxIndex;/**< Receive data count; for bookkeeping during transfer*/
++ u16 *pU16TxBuffer; /**< Data to be transmitted*/
++ u16 *pU16RxBuffer; /**< Received data*/
++
++/**< The chip number that this SPI driver currently operates on*/
++ u8 nCurrentChip;
++
++ /**< Reference to the current chip that this SPI driver currently
++ operates on*/
++ struct spi_device *pCurrentChip;
++
++ /**< The current message that this SPI driver is handling*/
++ struct spi_message *pCurMsg;
++
++ /**< The current transfer that this SPI driver is handling*/
++ struct spi_transfer *pCurTransfer;
++
++ /**< Reference to the SPI device data structure*/
++ struct ioh_spi_board_data *pBoardData;
++};
++
++/*! @ingroup SPI_Global
++@struct ioh_spi_board_data
++@brief Holds the SPI device specific details
++
++ This structure holds all the details related to a SPI device.
++
++ The reference to the pci_dev structure,status of request_irq,
++ pci_request_regions and device suspend are all stored in this structure.
++
++ This structure also has an array of pointers to ioh_spi_data structures,
++ with each pointer holding the details of one spi channel.
++
++@see
++ - ioh_spi_data
++ - ioh_spi_check_request_pending
++ - ioh_spi_get_resources
++ - ioh_spi_free_resources
++ - ioh_spi_remove
++ - ioh_spi_suspend
++ - ioh_spi_resume
++ - ioh_spi_probe
++ - ioh_spi_handler
++<hr>
++*/
++
++struct ioh_spi_board_data {
++
++ struct pci_dev *pDev; /**< Reference to the PCI device*/
++ u8 bIrqRegistered; /**< Status of IRQ registration*/
++ u8 bRegionRequested; /**< Status of pci_request_regions*/
++ u8 bSuspended; /**< Status of suspend*/
++
++ /**< Reference to SPI channel data structure*/
++ struct ioh_spi_data *pCtrlData[IOH_SPI_MAX_DEV];
++};
++
++/*function prototypes*/
++
++/*! @ingroup SPI_UtilitiesAPI
++@fn ioh_spi_callback( struct ioh_spi_data* pCtrlData)
++@brief Callback function
++*/
++void ioh_spi_callback(struct ioh_spi_data *pCtrlData);
++
++/*! @ingroup SPI_UtilitiesAPI
++@fn ioh_spi_free_resources(struct ioh_spi_board_data* pBoardData)
++@brief Frees the resources acquired by IOH SPI driver
++*/
++void ioh_spi_free_resources(struct ioh_spi_board_data *pBoardData);
++
++/*! @ingroup SPI_UtilitiesAPI
++@fn ioh_spi_check_request_pending(struct ioh_spi_board_data* pBoardData)
++@brief Checks for any pending SPI transfer request in the queue of pending
++ transfers
++*/
++int ioh_spi_check_request_pending(struct ioh_spi_board_data *pBoardData);
++
++/*! @ingroup SPI_UtilitiesAPI
++@fn ioh_spi_get_resources(struct ioh_spi_board_data* pBoardData)
++@brief Acquires the resources for IOH SPI driver
++*/
++int ioh_spi_get_resources(struct ioh_spi_board_data *pBoardData);
++
++/*! @ingroup SPI_InterfaceLayerAPI
++@fn ioh_spi_setup(struct spi_device* pSpi)
++@brief Implements the setup routine for IOH SPI driver
++*/
++int ioh_spi_setup(struct spi_device *pSpi);
++
++/*! @ingroup SPI_InterfaceLayerAPI
++@fn ioh_spi_transfer(struct spi_device* pSpi,struct spi_message* pMsg)
++@brief Implements the transfer routine for IOH SPI driver
++*/
++int ioh_spi_transfer(struct spi_device *pSpi, struct spi_message *pMsg);
++
++/*! @ingroup SPI_InterfaceLayerAPI
++@fn ioh_spi_cleanup(struct spi_device* pSpi)
++@brief Implements the cleanup routine for IOH SPI driver
++*/
++void ioh_spi_cleanup(struct spi_device *pSpi);
++
++#endif
+diff -urN linux-2.6.33-rc3/drivers/spi/pch_spi_hal.c topcliff-2.6.33-rc3/drivers/spi/pch_spi_hal.c
+--- linux-2.6.33-rc3/drivers/spi/pch_spi_hal.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/spi/pch_spi_hal.c 2010-03-09 00:41:44.000000000 +0900
+@@ -0,0 +1,1208 @@
++/**
++ * @file ioh_spi_hal.c
++ *
++ * @brief This file defines the HAL methods .
++ *
++ * @version 0.94
++ *
++ * @par
++ * -- Copyright Notice --
++ *
++ * @par
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * @par
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * @par
++ * -- End of Copyright Notice --
++ */
++
++#include <linux/io.h>
++#include <linux/interrupt.h>
++#include "pch_common.h"
++#include "pch_debug.h"
++#include "pch_spi.h"
++#include "pch_spi_hal.h"
++
++/*bit positions in SPCR*/
++
++/*! @ingroup SPI_HALLayer
++@def SPCR_SPE_BIT
++@brief SPE bit position in SPCR
++@see
++ - ioh_spi_set_enable
++*/
++#define SPCR_SPE_BIT (1 << 0)
++
++/*! @ingroup SPI_HALLayer
++@def SPCR_MSTR_BIT
++@brief MSTR bit position in SPCR
++@see
++ - ioh_spi_set_master_mode
++*/
++#define SPCR_MSTR_BIT (1 << 1)
++
++/*! @ingroup SPI_HALLayer
++@def SPCR_LSBF_BIT
++@brief LSBF bit position in SPCR
++@see
++ - ioh_spi_setup_transfer
++*/
++#define SPCR_LSBF_BIT (1 << 4)
++
++/*! @ingroup SPI_HALLayer
++@def SPCR_CPHA_BIT
++@brief CPHA bit position in SPCR
++@see
++ - ioh_spi_setup_transfer
++*/
++#define SPCR_CPHA_BIT (1 << 5)
++
++/*! @ingroup SPI_HALLayer
++@def SPCR_CPOL_BIT
++@brief CPOL bit position in SPCR
++@see
++ - ioh_spi_setup_transfer
++*/
++#define SPCR_CPOL_BIT (1 << 6)
++
++/*! @ingroup SPI_HALLayer
++@def SPCR_TFIE_BIT
++@brief TFIE bit position in SPCR
++@see
++ - ioh_spi_enable_interrupts
++ - ioh_spi_disable_interrupts
++*/
++#define SPCR_TFIE_BIT (1 << 8)
++
++/*! @ingroup SPI_HALLayer
++@def SPCR_RFIE_BIT
++@brief RFIE bit position in SPCR
++@see
++ - ioh_spi_enable_interrupts
++ - ioh_spi_disable_interrupts
++*/
++#define SPCR_RFIE_BIT (1 << 9)
++
++/*! @ingroup SPI_HALLayer
++@def SPCR_FIE_BIT
++@brief FIE bit position in SPCR
++@see
++ - ioh_spi_enable_interrupts
++ - ioh_spi_disable_interrupts
++*/
++#define SPCR_FIE_BIT (1 << 10)
++
++/*! @ingroup SPI_HALLayer
++@def SPCR_ORIE_BIT
++@brief ORIE bit position in SPCR
++@see
++ - ioh_spi_enable_interrupts
++ - ioh_spi_disable_interrupts
++*/
++#define SPCR_ORIE_BIT (1 << 11)
++
++/*! @ingroup SPI_HALLayer
++@def SPCR_MDFIE_BIT
++@brief MDFIE bit position in SPCR
++@see
++ - ioh_spi_enable_interrupts
++ - ioh_spi_disable_interrupts
++*/
++#define SPCR_MDFIE_BIT (1 << 12)
++
++/*! @ingroup SPI_HALLayer
++@def SPCR_FICLR_BIT
++@brief FICLR bit position in SPCR
++@see
++ - ioh_spi_clear_fifo
++*/
++#define SPCR_FICLR_BIT (1 << 24)
++
++/*bit positions in SPSR*/
++
++/*! @ingroup SPI_HALLayer
++@def SPSR_TFI_BIT
++@brief TFI bit position in SPCR
++*/
++#define SPSR_TFI_BIT (1 << 0)
++
++/*! @ingroup SPI_HALLayer
++@def SPSR_RFI_BIT
++@brief RFI bit position in SPCR
++@see
++ - ioh_spi_handler
++*/
++#define SPSR_RFI_BIT (1 << 1)
++
++/*! @ingroup SPI_HALLayer
++@def SPSR_FI_BIT
++@brief FI bit position in SPCR
++@see
++ - ioh_spi_handler
++*/
++#define SPSR_FI_BIT (1 << 2)
++
++/*bit positions in SPBRR*/
++
++/*! @ingroup SPI_HALLayer
++@def SPBRR_SIZE_BIT
++@brief SIZE bit position in SPCR
++@see
++ - ioh_spi_set_bits_per_word
++*/
++#define SPBRR_SIZE_BIT (1 << 10)
++
++/*! @ingroup SPI_HALLayer
++@def SPCR_RFIC_FIELD
++@brief RFIC field in SPCR
++@see
++ - ioh_spi_set_threshold
++*/
++#define SPCR_RFIC_FIELD (20)
++
++/*! @ingroup SPI_HALLayer
++@def SPCR_TFIC_FIELD
++@brief TFIC field in SPCR
++@see
++ - ioh_spi_set_threshold
++*/
++#define SPCR_TFIC_FIELD (16)
++
++/*! @ingroup SPI_HALLayer
++@def SPSR_INT_BITS
++@brief Mask for all interrupt bits in SPSR
++@see
++ - ioh_spi_reset
++*/
++#define SPSR_INT_BITS (0x1F)
++
++/*! @ingroup SPI_HALLayer
++@def MASK_SPBRR_SPBR_BITS
++@brief Mask for clearing SPBR in SPBRR
++@see
++ - ioh_spi_set_baud_rate
++*/
++#define MASK_SPBRR_SPBR_BITS (0xFFFFFC00)
++
++/*! @ingroup SPI_HALLayer
++@def MASK_RFIC_SPCR_BITS
++@brief Mask for Rx threshold in SPCR
++@see
++ - ioh_spi_set_threshold
++*/
++#define MASK_RFIC_SPCR_BITS (0xFF0FFFFF)
++
++/*! @ingroup SPI_HALLayer
++@def MASK_TFIC_SPCR_BITS
++@brief Mask for Tx threshold in SPCR
++@see
++ - ioh_spi_set_threshold
++*/
++#define MASK_TFIC_SPCR_BITS (0xFFF0FFF)
++
++/*! @ingroup SPI_HALLayer
++@def IOH_CLOCK_HZ
++@brief Pclock Freqeuncy
++@see
++ - ioh_spi_set_baud_rate
++*/
++#ifndef FPGA
++ /*LSI*/
++#define IOH_CLOCK_HZ (50000000)
++#else
++ /*FPGA*/
++#define IOH_CLOCK_HZ (62500000)
++#endif
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_MAX_SPBR
++@brief Maximum value possible for SPBR in SPBRR
++@see
++ - ioh_spi_set_baud_rate
++*/
++#define IOH_SPI_MAX_SPBR (1023)
++/*global*/
++/*! @ingroup SPI_HALLayer
++
++@var ioh_spi_gcbptr
++
++@brief SPI_Global function pointer to store reference of
++ callback function @ref
++ ioh_spi_callback
++
++@note The reference of callback function is assigend to this
++ pointer
++ from @ref ioh_spi_probe function by invoking
++ the function @ref ioh_spi_entcb.
++ This global variable is used by the function
++ @ref ioh_spi_hanlder
++ to invoke the callback function.
++
++@see
++ - ioh_spi_entcb
++ - ioh_spi_handler
++
++<hr>
++
++*/
++static void (*ioh_spi_gcbptr) (struct ioh_spi_data *);
++
++/*! @ingroup SPI_HALLayerAPI
++
++@fn ioh_spi_set_master_mode( struct spi_master *master)
++
++@remarks Sets the MSTR bit in SPCR
++
++ The main task performed by this method:
++ - Read the content of SPCR register
++ - Set the MSTR bit
++ - Write back the value to SPCR
++
++@note This function is invoked from @ref ioh_spi_probe to put the IOH SPI
++ device into master mode.
++
++@param master [@ref IN] Contains reference to struct spi_master
++
++@retval None
++
++@see
++ - ioh_spi_probe
++
++<hr>
++
++*/
++void ioh_spi_set_master_mode(struct spi_master *master)
++{
++ u32 reg_spcr_val;
++ reg_spcr_val = ioh_spi_readreg(master, IOH_SPI_SPCR);
++ IOH_DEBUG("ioh_spi_set_master_mode SPCR content=%x\n", reg_spcr_val);
++
++ /*sets the second bit of SPCR to 1:master mode */
++ IOH_SET_BITMSK(reg_spcr_val, SPCR_MSTR_BIT);
++
++ /*write the value to SPCR register */
++ ioh_spi_writereg(master, IOH_SPI_SPCR, reg_spcr_val);
++ IOH_DEBUG("ioh_spi_set_master_mode SPCR after setting MSTR bit=%x\n",
++ reg_spcr_val);
++}
++
++/*! @ingroup SPI_HALLayerAPI
++
++@fn ioh_spi_set_enable(const struct spi_device *spi, u8 enable)
++
++@remarks Sets/Resets the SPE bit in SPCR
++
++ The main tasks performed by this method are:
++ - Read the content of SPCR.
++ - If the enable parameter is true , set the SPE bit.
++ - If the enable paramter is false , clear the SPE bit.
++ - Write back the value to SPCR.
++
++@note This function is invoked by @ref ioh_spi_process_messages to enable SPI
++ transfer before start of SPI data transfer and to disable SPI data
++ transfer
++ after completion of SPI data transfer.
++
++@param spi [@ref IN] Contains reference to struct spi_device
++
++@param enable [@ref IN]
++ To enable SPI transfer enable = true
++ To disable SPI transfer enable = false
++
++@retval None
++
++@see
++ - ioh_spi_process_messages
++
++<hr>
++
++*/
++void ioh_spi_set_enable(const struct spi_device *spi, u8 enable)
++{
++ u32 reg_spcr_val;
++
++ reg_spcr_val = ioh_spi_readreg(spi->master, IOH_SPI_SPCR);
++ IOH_DEBUG("ioh_spi_set_enable SPCR content=%x\n", reg_spcr_val);
++
++ if (enable == true) {
++ IOH_DEBUG("ioh_spi_set_enable enable==true\n");
++ IOH_SET_BITMSK(reg_spcr_val, SPCR_SPE_BIT);
++ } else {
++ IOH_DEBUG("ioh_spi_set_enable enable==false\n");
++ IOH_CLR_BITMSK(reg_spcr_val, SPCR_SPE_BIT);
++ }
++
++ ioh_spi_writereg(spi->master, IOH_SPI_SPCR, reg_spcr_val);
++
++ IOH_DEBUG("ioh_spi_set_enable SPCR content after modifying SPE=%x\n",
++ reg_spcr_val);
++}
++
++/*! @ingroup SPI_HALLayerAPI
++
++@fn ioh_spi_handler(int irq, void* dev_id)
++
++@remarks Interrupt handler
++
++The main tasks performed by this method are:
++- Check if Corresponding interrupt bits are set in SPSR register.
++- If no, return IRQ_NONE.
++- If yes, read the number of bytes received and write required number of bytes
++according to space available.
++- Update all bookkeeping variables.
++- If bytes/words to be received is less than 16bytes/words,then disable RFI
++and set Rx threshold to 16 bytes/words.
++- If SPI data transfer is completed, invoke the callback function
++@ref ioh_spi_callback to inform the status to @ref ioh_spi_process_messages.
++- Repeat for all SPI channels.
++
++@note
++This is the interrupt handler for IOH SPI controller driver.This function is
++invoked by the kernel when any interrupt occurs on the interrupt line shared by
++IOH SPI device. The SPI data transfer is initiated by @ref ioh_spi_process_
++messages,but is carried on by this function.For optimised operation,the HAL
++functions to read and write registers are not used in this function.
++Also register
++address calculation is done once at the beginning to avoid the calculation each
++time while accessing registers.
++
++@param irq [@ref IN] The interrupt number
++
++@param dev_id [@ref IN] Contains reference to struct ioh_spi_board_data
++
++@retval irqreturn_t
++ - IRQ_NONE The interrupt is not ours
++ - IRQ_HANDLED The interrupt has been serviced
++
++@see
++ - ioh_spi_get_resources
++ - ioh_spi_free_resources
++ - ioh_spi_suspend
++ - ioh_spi_resume
++
++<hr>
++
++*/
++irqreturn_t ioh_spi_handler(int irq, void *dev_id)
++{
++ /*channel & read/write indices */
++ int dev, readcnt;
++
++ /*SPSR content */
++ u32 reg_spsr_val, reg_spcr_val;
++
++ /*book keeping variables */
++ u32 nReadable, TxIndex, RxIndex, lengthInBpw;
++
++ /*to hold channel data */
++
++ struct ioh_spi_data *pCtrlData;
++
++ /*buffer to store rx/tx data */
++ u16 *pU16RxBuffer, *pU16TxBuffer;
++
++ /*register addresses */
++ u32 SPSR, SPDRR, SPDWR;
++
++ /*remapped pci base address */
++ u32 IORemapAddress;
++
++ irqreturn_t tRetVal = IRQ_NONE;
++
++ struct ioh_spi_board_data *pBoardData =
++ (struct ioh_spi_board_data *)dev_id;
++
++ if (pBoardData->bSuspended == true) {
++ IOH_DEBUG("ioh_spi_handler returning due to suspend\n");
++ } else {
++ for (dev = 0; dev < IOH_SPI_MAX_DEV; dev++) {
++ pCtrlData = pBoardData->pCtrlData[dev];
++ IORemapAddress = pCtrlData->IORemapAddress;
++ SPSR = IORemapAddress + IOH_SPI_SPSR;
++
++ reg_spsr_val = IOH_READ_LONG(SPSR);
++
++ /*Check if the interrupt is for SPI device */
++
++ if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
++ IOH_DEBUG("SPSR in ioh_spi_handler=%x\n",
++ reg_spsr_val);
++ /*clear interrupt */
++ IOH_WRITE_LONG(reg_spsr_val, SPSR);
++
++ if (pCtrlData->bTransferActive == true) {
++ RxIndex = pCtrlData->RxIndex;
++ TxIndex = pCtrlData->TxIndex;
++ lengthInBpw = pCtrlData->lengthInBpw;
++ pU16RxBuffer = pCtrlData->pU16RxBuffer;
++ pU16TxBuffer = pCtrlData->pU16TxBuffer;
++
++ SPDRR = IORemapAddress + IOH_SPI_SPDRR;
++ SPDWR = IORemapAddress + IOH_SPI_SPDWR;
++
++ nReadable =
++ IOH_SPI_READABLE(reg_spsr_val);
++
++ for (readcnt = 0; (readcnt < nReadable);
++ readcnt++) {
++ /*read data */
++ pU16RxBuffer[RxIndex++] =
++ IOH_READ_LONG(SPDRR);
++ /*write data */
++
++ if (TxIndex < lengthInBpw) {
++ IOH_WRITE_LONG
++ (pU16TxBuffer
++ [TxIndex++],
++ SPDWR);
++ }
++ }
++
++ /*disable RFI if not needed */
++ if ((lengthInBpw - RxIndex) <=
++ IOH_SPI_MAX_FIFO_DEPTH) {
++ IOH_DEBUG
++ ("ioh_spi_handler disabling\
++ RFI as data remaining=%d\n",
++ (lengthInBpw - RxIndex));
++
++ reg_spcr_val =
++ IOH_READ_LONG(IORemapAddress
++ +
++ IOH_SPI_SPCR);
++
++ /*disable RFI */
++ IOH_CLR_BITMSK(reg_spcr_val,
++ SPCR_RFIE_BIT);
++
++ /*reset rx threshold */
++ reg_spcr_val &=
++ MASK_RFIC_SPCR_BITS;
++ reg_spcr_val |=
++ (IOH_SPI_RX_THOLD_MAX <<
++ SPCR_RFIC_FIELD);
++
++ IOH_WRITE_LONG(IOH_CLR_BITMSK
++ (reg_spcr_val,
++ SPCR_RFIE_BIT),
++ (IORemapAddress +
++ IOH_SPI_SPCR));
++ }
++
++ /*update counts */
++ pCtrlData->TxIndex = TxIndex;
++
++ pCtrlData->RxIndex = RxIndex;
++
++ IOH_DEBUG
++ ("ioh_spi_handler RxIndex=%d\n",
++ RxIndex);
++
++ IOH_DEBUG
++ ("ioh_spi_handler TxIndex=%d\n",
++ TxIndex);
++
++ IOH_DEBUG
++ ("ioh_spi_handler nWritable=%d\n",
++ (16 -
++ (IOH_SPI_WRITABLE
++ (reg_spsr_val))));
++
++ IOH_DEBUG
++ ("ioh_spi_handler nReadable=%d\n",
++ nReadable);
++ }
++
++ /*if transfer complete interrupt */
++ if (reg_spsr_val & SPSR_FI_BIT) {
++ IOH_DEBUG
++ ("ioh_spi_handler FI bit in SPSR\
++ set\n");
++
++ /*disable FI & RFI interrupts */
++ ioh_spi_disable_interrupts(pCtrlData->
++ pMaster,
++ IOH_SPI_FI |
++ IOH_SPI_RFI);
++
++ /*transfer is completed;inform
++ ioh_spi_process_messages */
++
++ if (ioh_spi_gcbptr != NULL) {
++ IOH_DEBUG
++ ("ioh_spi_handler invoking\
++ callback\n");
++ (*ioh_spi_gcbptr) (pCtrlData);
++ }
++ }
++
++ tRetVal = IRQ_HANDLED;
++ }
++ }
++ }
++
++ IOH_DEBUG("ioh_spi_handler EXIT return value=%d\n", tRetVal);
++
++ return tRetVal;
++}
++
++/*! @ingroup SPI_HALLayerAPI
++
++@fn ioh_spi_entcb (void (*ioh_spi_cb)( struct ioh_spi_data* ))
++
++@remarks Registers the callback function
++
++ The major tasks performed by this method are:
++ - Validate ioh_spi_cb
++ - Assign it to global pointer @ref ioh_spi_gcbptr
++
++@note This function is invoked from @ref ioh_spi_probe function
++ This function should always be invoked before the interrupt
++ handler is registered.
++
++@param ioh_spi_cb [@ref IN]
++ Contains reference to callback function pointer
++
++@retval None
++
++@see
++ - ioh_spi_probe
++
++<hr>
++
++*/
++void ioh_spi_entcb(void (*ioh_spi_cb) (struct ioh_spi_data *))
++{
++ if (ioh_spi_cb != NULL) {
++ /*Assign the above value to a global pointer */
++ ioh_spi_gcbptr = ioh_spi_cb;
++ IOH_DEBUG("ioh_spi_entcb ioh_spi_cb ptr not NULL\n");
++ IOH_DEBUG
++ ("ioh_spi_entcb ioh_spi_cb ptr saved in ioh_spi_gcbptr\n");
++ } else {
++ IOH_LOG(KERN_ERR, "ioh_spi_entcb ioh_spi_cb ptr NULL\n");
++ }
++}
++
++/*! @ingroup SPI_HALLayerAPI
++
++@fn ioh_spi_setup_transfer(struct spi_device *spi)
++
++@remarks Configures the IOH SPI hardware for transfer
++
++ The major tasks performed by this method are:
++ - Invoke @ref ioh_spi_set_baud_rate to set the baud rate.
++ - Invoke @ref ioh_spi_set_bits_per_word to set the bits per word.
++ - Set the bit justfication in SPCR.
++ - Set the Clock Polarity and Clock Phase in SPCR.
++ - Clear the Rx and Tx FIFO by toggling FICLR bit in SPCR.
++
++@note This function configures the IOH SPI hardware according to the
++ configurations specified by the user.
++
++@param spi [@ref IN] Contains reference to struct spi_device
++
++@retval int
++ @ref IOH_SPI_SUCCESS All hardware configurations have been done
++
++@see
++ - ioh_spi_select_chip
++
++<hr>
++
++*/
++s8 ioh_spi_setup_transfer(struct spi_device *spi)
++{
++ u32 reg_spcr_val;
++
++ IOH_DEBUG("ioh_spi_setup_transfer SPBRR content =%x\n",
++ ioh_spi_readreg(spi->master, IOH_SPI_SPBRR));
++
++ /*set baud rate */
++ IOH_DEBUG("ioh_spi_setup_transfer :setting baud rate=%d\n",
++ spi->max_speed_hz);
++ ioh_spi_set_baud_rate(spi->master, spi->max_speed_hz);
++
++ /*set bits per word */
++ IOH_DEBUG("ioh_spi_setup_transfer :setting bits_per_word=%d\n",
++ spi->bits_per_word);
++ ioh_spi_set_bits_per_word(spi->master, spi->bits_per_word);
++
++ IOH_DEBUG
++ ("ioh_spi_setup_transfer SPBRR content after setting baud\
++ rate & bits per word=%x\n",
++ ioh_spi_readreg(spi->master, IOH_SPI_SPBRR));
++
++ reg_spcr_val = ioh_spi_readreg(spi->master, IOH_SPI_SPCR);
++ IOH_DEBUG("ioh_spi_setup_transfer SPCR content = %x\n", reg_spcr_val);
++
++ /*set bit justification */
++
++ if ((spi->mode & SPI_LSB_FIRST) != 0) {
++ /*LSB first */
++ IOH_CLR_BITMSK(reg_spcr_val, SPCR_LSBF_BIT);
++ IOH_DEBUG("ioh_spi_setup_transfer :setting LSBF bit to 0\n");
++ } else {
++ /*MSB first */
++ IOH_SET_BITMSK(reg_spcr_val, SPCR_LSBF_BIT);
++ IOH_DEBUG("ioh_spi_setup_transfer :setting LSBF bit to 1\n");
++ }
++
++ /*set clock polarity */
++ if ((spi->mode & SPI_CPOL) != 0) {
++ IOH_SET_BITMSK(reg_spcr_val, SPCR_CPOL_BIT);
++ IOH_DEBUG("ioh_spi_setup_transfer clock polarity = 1\n");
++ } else {
++ IOH_CLR_BITMSK(reg_spcr_val, SPCR_CPOL_BIT);
++ IOH_DEBUG("ioh_spi_setup_transfer clock polarity = 0\n");
++ }
++
++ /*set the clock phase */
++ if ((spi->mode & SPI_CPHA) != 0) {
++ IOH_SET_BITMSK(reg_spcr_val, SPCR_CPHA_BIT);
++ IOH_DEBUG("ioh_spi_setup_transfer clock phase = 1\n");
++ } else {
++ IOH_CLR_BITMSK(reg_spcr_val, SPCR_CPHA_BIT);
++ IOH_DEBUG("ioh_spi_setup_transfer clock phase = 0\n");
++ }
++
++ /*write SPCR SPCR register */
++ ioh_spi_writereg(spi->master, IOH_SPI_SPCR, reg_spcr_val);
++
++ IOH_DEBUG
++ ("ioh_spi_setup_transfer SPCR content after setting LSB/MSB\
++ and MODE= %x\n",
++ reg_spcr_val);
++
++ /*Clear the FIFO by toggling FICLR to 1 and back to 0 */
++ ioh_spi_clear_fifo(spi->master);
++
++ IOH_DEBUG("ioh_spi_setup_transfer Return=%d\n", IOH_SPI_SUCCESS);
++
++ return IOH_SPI_SUCCESS;
++}
++
++/*! @ingroup SPI_HALLayerAPI
++
++@fn ioh_spi_writereg(struct spi_master *master,int idx, u32 val)
++
++@remarks Performs register writes
++
++ The major tasks performed by this method are:
++ - Obtain the SPI channel data structure from master.
++ - Calculate the register address as offset + base address
++ from SPI channel data structure.
++ - Write the value specified by val to register the address calculated.
++
++@note This function is inline.
++
++@param master [@ref IN] Contains reference to struct spi_master
++
++@param idx [@ref IN] Contains register offset
++
++@param val [@ref IN] Contains value to be written to register
++
++@retval None
++
++@see
++ - ioh_spi_setup_transfer
++ - ioh_spi_enable_interrupts
++ - ioh_spi_disable_interrupts
++ - ioh_spi_set_enable
++ - ioh_spi_set_master_mode
++ - ioh_spi_set_baud_rate
++ - ioh_spi_set_bits_per_word
++ - ioh_spi_reset
++ - ioh_spi_set_threshold
++ - ioh_spi_clear_fifo
++ - ioh_spi_process_messages
++
++<hr>
++
++*/
++inline void ioh_spi_writereg(struct spi_master *master, int idx, u32 val)
++{
++
++ struct ioh_spi_data *pCtrlData = spi_master_get_devdata(master);
++
++ IOH_WRITE_LONG(val, (pCtrlData->IORemapAddress + idx));
++
++ IOH_DEBUG("ioh_spi_writereg Offset=%x\n", idx);
++ IOH_DEBUG("ioh_spi_writereg Value=%x\n", val);
++}
++
++/*! @ingroup SPI_HALLayerAPI
++
++@fn ioh_spi_readreg(struct spi_master *master,int idx)
++
++@remarks Performs register reads
++
++ The major tasks performed by this method are:
++ - Obtain the SPI channel data structure from master.
++ - Calculate the register address as offset + base address
++ from SPI channel data structure.
++ - Read the content of the register at the address calculated.
++
++@note This function is inline
++
++@param master [@ref IN] Contains reference to struct spi_master
++
++@param idx [@ref IN] Contains register offset
++
++@retval u32
++ The content of the register at offset idx
++
++@see
++ - ioh_spi_setup_transfer
++ - ioh_spi_enable_interrupts
++ - ioh_spi_disable_interrupts
++ - ioh_spi_set_enable
++ - ioh_spi_set_master_mode
++ - ioh_spi_set_baud_rate
++ - ioh_spi_set_bits_per_word
++ - ioh_spi_set_threshold
++ - ioh_spi_clear_fifo
++
++<hr>
++*/
++inline u32 ioh_spi_readreg(struct spi_master *master, int idx)
++{
++ u32 reg_data;
++
++ struct ioh_spi_data *pCtrlData = spi_master_get_devdata(master);
++
++ IOH_DEBUG("ioh_spi_readreg Offset=%x\n", idx);
++ reg_data = IOH_READ_LONG((pCtrlData->IORemapAddress + idx));
++
++ IOH_DEBUG("ioh_spi_readreg Content=%x\n", reg_data);
++ return reg_data;
++}
++
++/*! @ingroup SPI_HALLayerAPI
++
++@fn ioh_spi_enable_interrupts (struct spi_master *master, u8 interrupt)
++
++@remarks Enables specified interrupts
++
++ The major tasks performed by this method are:
++ - Read the content of SPCR.
++ - Based on interrupt ,set corresponding bits in SPCR content.
++ - Write the value back to SPCR.
++
++@note This function is invoked from @ref ioh_spi_process_messages before
++ starting SPI data transfer.As of now only FI and RFI interrupts are
++ used.
++
++@param master [@ref IN] Contains reference to struct spi_master
++
++@param interrupt [@ref IN] Interrups to be enabled.This parameter
++ is a u8 value with five least significant bits representing
++ each of the interrupts FI,RFI,TFI,ORI and MDFI.
++
++@retval None
++
++@see
++ - ioh_spi_process_messages
++
++<hr>
++
++*/
++void ioh_spi_enable_interrupts(struct spi_master *master, u8 interrupt)
++{
++ u32 reg_val_spcr;
++
++ reg_val_spcr = ioh_spi_readreg(master, IOH_SPI_SPCR);
++
++ IOH_DEBUG("ioh_spi_enable_interrupts SPCR content=%x\n", reg_val_spcr);
++
++ if ((interrupt & IOH_SPI_RFI) != 0) {
++ /*set RFIE bit in SPCR */
++ IOH_DEBUG("setting RFI in ioh_spi_enable_interrupts\n");
++ IOH_SET_BITMSK(reg_val_spcr, SPCR_RFIE_BIT);
++ }
++
++ if ((interrupt & IOH_SPI_TFI) != 0) {
++ /*set TFIE bit in SPCR */
++ IOH_DEBUG("setting TFI in ioh_spi_enable_interrupts\n");
++ IOH_SET_BITMSK(reg_val_spcr, SPCR_TFIE_BIT);
++ }
++
++ if ((interrupt & IOH_SPI_FI) != 0) {
++ /*set FIE bit in SPCR */
++ IOH_DEBUG("setting FI in ioh_spi_enable_interrupts\n");
++ IOH_SET_BITMSK(reg_val_spcr, SPCR_FIE_BIT);
++ }
++
++ if ((interrupt & IOH_SPI_ORI) != 0) {
++ /*set ORIE bit in SPCR */
++ IOH_DEBUG("setting ORI in ioh_spi_enable_interrupts\n");
++ IOH_SET_BITMSK(reg_val_spcr, SPCR_ORIE_BIT);
++ }
++
++ if ((interrupt & IOH_SPI_MDFI) != 0) {
++ /*set MODFIE bit in SPCR */
++ IOH_DEBUG("setting MDFI in ioh_spi_enable_interrupts\n");
++ IOH_SET_BITMSK(reg_val_spcr, SPCR_MDFIE_BIT);
++ }
++
++ ioh_spi_writereg(master, IOH_SPI_SPCR, reg_val_spcr);
++
++ IOH_DEBUG
++ ("ioh_spi_enable_interrupts SPCR content after enabling interrupt\
++ =%x\n",
++ reg_val_spcr);
++}
++
++/*! @ingroup SPI_HALLayerAPI
++
++@fn ioh_spi_disable_interrupts (struct spi_master *master, u8 interrupt)
++
++@remarks Disables specified interrupts
++
++ The major tasks performed by this method are:
++ - Read the content of SPCR.
++ - Based on interrupt ,clear corresponding bits in SPCR content.
++ - Write the value back to SPCR.
++
++@param master [@ref IN] Contains reference to struct spi_master
++
++@param interrupt [@ref IN] Interrups to be disabled.This parameter
++ is a u8 value with five least significant bits representing
++ each of the interrupts FI,RFI,TFI,ORI and MDFI.
++
++@retval None
++
++@see
++ - ioh_spi_process_messages
++ - ioh_spi_handler
++ - ioh_spi_suspend
++ - ioh_spi_free_resources
++
++<hr>
++
++*/
++void ioh_spi_disable_interrupts(struct spi_master *master, u8 interrupt)
++{
++ u32 reg_val_spcr;
++
++ reg_val_spcr = ioh_spi_readreg(master, IOH_SPI_SPCR);
++
++ IOH_DEBUG("ioh_spi_disable_interrupts SPCR content =%x\n",
++ reg_val_spcr);
++
++ if ((interrupt & IOH_SPI_RFI) != 0) {
++ /*clear RFIE bit in SPCR */
++ IOH_DEBUG("clearing RFI in ioh_spi_disable_interrupts\n");
++ IOH_CLR_BITMSK(reg_val_spcr, SPCR_RFIE_BIT);
++ }
++
++ if ((interrupt & IOH_SPI_TFI) != 0) {
++ /*clear TFIE bit in SPCR */
++ IOH_DEBUG("clearing TFI in ioh_spi_disable_interrupts\n");
++ IOH_CLR_BITMSK(reg_val_spcr, SPCR_TFIE_BIT);
++ }
++
++ if ((interrupt & IOH_SPI_FI) != 0) {
++ /*clear FIE bit in SPCR */
++ IOH_DEBUG("clearing FI in ioh_spi_disable_interrupts\n");
++ IOH_CLR_BITMSK(reg_val_spcr, SPCR_FIE_BIT);
++ }
++
++ if ((interrupt & IOH_SPI_ORI) != 0) {
++ /*clear ORIE bit in SPCR */
++ IOH_DEBUG("clearing ORI in ioh_spi_disable_interrupts\n");
++ IOH_CLR_BITMSK(reg_val_spcr, SPCR_ORIE_BIT);
++ }
++
++ if ((interrupt & IOH_SPI_MDFI) != 0) {
++ /*clear MODFIE bit in SPCR */
++ IOH_DEBUG("clearing MDFI in ioh_spi_disable_interrupts\n");
++ IOH_CLR_BITMSK(reg_val_spcr, SPCR_MDFIE_BIT);
++ }
++
++ ioh_spi_writereg(master, IOH_SPI_SPCR, reg_val_spcr);
++
++ IOH_DEBUG
++ ("ioh_spi_disable_interrupts SPCR after disabling interrupts =%x\n",
++ reg_val_spcr);
++}
++
++/*! @ingroup SPI_HALLayerAPI
++
++@fn ioh_spi_set_threshold(struct spi_device *spi, u32 threshold, u8 dir)
++
++@remarks Sets Tx/Rx FIFO thresholds
++
++The major tasks performed by this function are:
++- Read the content of SPCR.
++- If the dir is @ref IOH_SPI_RX ,set the Rx threshold bits in SPCR content.
++- If the dir is @ref IOH_SPI_TX ,set the Tx threshold bits in SPCR content.
++- Write back the value to SPCR.
++
++@note This function is invoked from ioh_spi_process_messages to set the Receive
++threshold level.As of now, when the length of data to be transferred is greater
++than FIFO depth of 16 bytes/words ,the Receive FIFO threshold is set at
++ 8 bytes/words.
++If the length of data to be transferred is less than FIFO depth,the Receive FIFO
++threshold is set at 16 bytes/words.
++
++@param spi [@ref IN] Contains reference to struct spi_device
++
++@param threshold [@ref IN] Threshold value to be set
++
++@param dir [@ref IN] Rx or Tx threshold to be set
++ - dir = @ref IOH_SPI_RX implies Receive FIFO threshold needs to be set.
++ - dir = @ref IOH_SPI_TX implies Transmit FIFO threshold needs to be set.
++
++@retval None
++
++@see
++ - ioh_spi_process_messages
++
++<hr>
++*/
++void ioh_spi_set_threshold(struct spi_device *spi, u32 threshold, u8 dir)
++{
++ u32 reg_val_spcr;
++
++ reg_val_spcr = ioh_spi_readreg(spi->master, IOH_SPI_SPCR);
++ IOH_DEBUG("ioh_spi_set_threshold SPCR before modifying =%x\n",
++ reg_val_spcr);
++ IOH_DEBUG("ioh_spi_set_threshold threshold=%d\n", (threshold + 1));
++
++ if (dir == IOH_SPI_RX) {
++ IOH_DEBUG("ioh_spi_set_threshold setting Rx threshold\n");
++ reg_val_spcr &= MASK_RFIC_SPCR_BITS;
++ reg_val_spcr |= (threshold << SPCR_RFIC_FIELD);
++ } else if (dir == IOH_SPI_TX) {
++ IOH_DEBUG("ioh_spi_set_threshold setting Tx threshold\n");
++ reg_val_spcr &= MASK_TFIC_SPCR_BITS;
++ reg_val_spcr |= (threshold << SPCR_TFIC_FIELD);
++ }
++
++ ioh_spi_writereg(spi->master, IOH_SPI_SPCR, reg_val_spcr);
++
++ IOH_DEBUG("ioh_spi_set_threshold SPCR after modifying =%x\n",
++ reg_val_spcr);
++}
++
++/*! @ingroup SPI_HALLayerAPI
++
++@fn ioh_spi_reset(struct spi_master* master)
++
++@remarks Clears SPI registers
++
++ The major tasks performed by this method are:
++ - Clear all R/W bits of SPCR.
++ - Clear Receive and Transmit FIFOs by invoking @ref ioh_spi_clear_fifo
++ - Clear all R/W bits of SPBRR.
++ - Clear all interrupts in SPSR.
++ - If the device has SRST [reset register],then instead of the
++ above steps,first 1 is written to SRST to reset SPI and then
++ 0 is written to SRST to clear reset.
++
++@note This function is invoked to bring the IOH SPI device to an
++ initialized state.After this function is invoked all the SPI
++ registers need to be configured again.
++
++@param master [@ref IN] Contains reference to struct spi_master
++
++@retval None
++
++@see
++ - ioh_spi_get_resources
++ - ioh_spi_suspend
++ - ioh_spi_resume
++
++<hr>
++
++*/
++void ioh_spi_reset(struct spi_master *master)
++{
++#ifndef FPGA
++ /*LSI*/
++ /*write 1 to reset SPI */
++ ioh_spi_writereg(master, IOH_SPI_SRST, 0x1);
++ /*clear reset */
++ ioh_spi_writereg(master, IOH_SPI_SRST, 0x0);
++#else
++ /*FPGA*/
++ /*write 0 to SPCR */
++ ioh_spi_writereg(master, IOH_SPI_SPCR, 0x0);
++ IOH_DEBUG("ioh_spi_reset SPCR content after reset=%x\n",
++ ioh_spi_readreg(master, IOH_SPI_SPCR));
++ /*Clear the FIFO */
++ ioh_spi_clear_fifo(master);
++
++ /*write 0 to SPBRR */
++ ioh_spi_writereg(master, IOH_SPI_SPBRR, 0x0);
++ IOH_DEBUG("ioh_spi_reset SPBRR content after reset=%x\n",
++ ioh_spi_readreg(master, IOH_SPI_SPBRR));
++
++ /*clear interrupts in SPSR */
++ ioh_spi_writereg(master, IOH_SPI_SPSR, SPSR_INT_BITS);
++ IOH_DEBUG("ioh_spi_reset SPSR content after reset=%x\n",
++ ioh_spi_readreg(master, IOH_SPI_SPSR));
++#endif
++}
++
++/*! @ingroup SPI_HALLayerAPI
++
++@fn ioh_spi_set_baud_rate(struct spi_master* master,u32 speed_hz)
++
++@remarks Sets SPBR field in SPBRR
++
++ The major tasks performed by this method are:
++ - Read the content of SPBRR register.
++ - Calculate the value for SPBR field according to the baud rate.
++ - Set the SPBR field using the calculated value.
++ - Write the conetnt back to SPBRR.
++
++@note The SPBR value is calculated from the baud rate using the formula
++ SPBR = clock frequency / baud rate.
++
++@param master [@ref IN] Contains reference to struct spi_master
++
++@param speed_hz [@ref IN] Baud rate to be set
++
++@retval None
++
++@see
++ - ioh_spi_setup_transfer
++ - ioh_spi_process_messages
++
++<hr>
++
++*/
++void ioh_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
++{
++ u32 nSpbr, reg_spbrr_val;
++
++ nSpbr = IOH_CLOCK_HZ / (speed_hz * 2);
++
++ /*if baud rate is less than we can support
++ limit it */
++
++ if (nSpbr > IOH_SPI_MAX_SPBR)
++ nSpbr = IOH_SPI_MAX_SPBR;
++
++
++ reg_spbrr_val = ioh_spi_readreg(master, IOH_SPI_SPBRR);
++
++ IOH_DEBUG("ioh_spi_set_baud_rate SPBRR content=%x\n", reg_spbrr_val);
++
++ IOH_DEBUG("ioh_spi_set_baud_rate SPBR in SPBRR=%d\n", nSpbr);
++
++ /*clear SPBRR */
++ reg_spbrr_val &= MASK_SPBRR_SPBR_BITS;
++
++ /*set the new value */
++ reg_spbrr_val |= nSpbr;
++
++ /*write the new value */
++ ioh_spi_writereg(master, IOH_SPI_SPBRR, reg_spbrr_val);
++ IOH_DEBUG("ioh_spi_set_baud_rate SPBRR content after setting SPBR=%x\n",
++ reg_spbrr_val);
++}
++
++/*! @ingroup SPI_HALLayerAPI
++
++@fn ioh_spi_set_bits_per_word(struct spi_master* master,u8 bits_per_word)
++
++@remarks Sets SIZE field in SPBRR
++
++ The major tasks performed by this method are:
++ - Read the content of SPBRR register.
++ - Set the SIZE field in SPBRR according to bits per word.
++ - Write back the value to SPBRR.
++
++@note The allowed bits per word settings are 8 and 16.The SIZE bit in SPBRR is
++ 0 denotes bits per word of 8 and SIZE bit 1 denotes bits per word of 16.
++
++@param master [@ref IN] Contains reference to struct spi_master
++
++@param bits_per_word [@ref IN] Bits per word for SPI transfer
++
++@retval None
++
++@see
++ - ioh_spi_setup_transfer
++ - ioh_spi_process_messages
++
++<hr>
++
++*/
++void ioh_spi_set_bits_per_word(struct spi_master *master, u8 bits_per_word)
++{
++ u32 reg_spbrr_val = ioh_spi_readreg(master, IOH_SPI_SPBRR);
++ IOH_DEBUG("ioh_spi_set_bits_per_word SPBRR content=%x\n",
++ reg_spbrr_val);
++
++ if (bits_per_word == IOH_SPI_8_BPW) {
++ IOH_CLR_BITMSK(reg_spbrr_val, SPBRR_SIZE_BIT);
++ IOH_DEBUG("ioh_spi_set_bits_per_word 8\n");
++ } else {
++ IOH_SET_BITMSK(reg_spbrr_val, SPBRR_SIZE_BIT);
++ IOH_DEBUG("ioh_spi_set_bits_per_word 16\n");
++ }
++
++ ioh_spi_writereg(master, IOH_SPI_SPBRR, reg_spbrr_val);
++
++ IOH_DEBUG
++ ("ioh_spi_set_bits_per_word SPBRR after setting bits per word=%x\n",
++ reg_spbrr_val);
++}
++
++/*! @ingroup SPI_HALLayerAPI
++
++@fn ioh_spi_clear_fifo(struct spi_master *master)
++
++@remarks Clears the Transmit and Receive FIFOs
++
++ The major tasks performed by this method are:
++ - Read the content of SPCR.
++ - Set FICLR bit to 1.
++ - Write back the content to SPCR.
++ - Set the FICLR bit to 0.
++ - Write back the content to SPCR.
++
++@param master [@ref IN] Contains reference to struct spi_master
++
++@retval None
++
++@see
++ - ioh_spi_setup_transfer
++ - ioh_spi_process_messages
++
++<hr>
++
++*/
++void ioh_spi_clear_fifo(struct spi_master *master)
++{
++ u32 reg_spcr_val = ioh_spi_readreg(master, IOH_SPI_SPCR);
++
++ IOH_SET_BITMSK(reg_spcr_val, SPCR_FICLR_BIT);
++ ioh_spi_writereg(master, IOH_SPI_SPCR, reg_spcr_val);
++ IOH_DEBUG("ioh_spi_clear_fifo SPCR content after setting FICLR = %x\n",
++ reg_spcr_val);
++
++ IOH_CLR_BITMSK(reg_spcr_val, SPCR_FICLR_BIT);
++ ioh_spi_writereg(master, IOH_SPI_SPCR, reg_spcr_val);
++
++ IOH_DEBUG
++ ("ioh_spi_clear_fifo SPCR content after resetting FICLR = %x\n",
++ reg_spcr_val);
++}
+diff -urN linux-2.6.33-rc3/drivers/spi/pch_spi_hal.h topcliff-2.6.33-rc3/drivers/spi/pch_spi_hal.h
+--- linux-2.6.33-rc3/drivers/spi/pch_spi_hal.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/spi/pch_spi_hal.h 2010-03-06 09:02:20.000000000 +0900
+@@ -0,0 +1,298 @@
++/**
++ * @file ioh_spi_hal.h
++ *
++ * @brief This header file contains macro definitions and function declarations
++ * for HAL layer APIs.
++ * @version 0.94
++ *
++ * @par
++ * -- Copyright Notice --
++ *
++ * @par
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * @par
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * @par
++ * -- End of Copyright Notice --
++ */
++#ifndef __IOH_SPI_HAL__
++#define __IOH_SPI_HAL__
++
++/*Register offsets*/
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_SPCR
++@brief SPCR register offset
++*/
++#define IOH_SPI_SPCR (0x00) /*SPI control register */
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_SPBRR
++@brief SPBRR register offset
++*/
++#define IOH_SPI_SPBRR (0x04) /*SPI baud rate register */
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_SPSR
++@brief SPSR register offset
++*/
++#define IOH_SPI_SPSR (0x08) /*SPI status register */
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_SPDWR
++@brief SPDWR register offset
++*/
++#define IOH_SPI_SPDWR (0x0C) /*SPI write data register */
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_SPDRR
++@brief SPDRR register offset
++*/
++#define IOH_SPI_SPDRR (0x10) /*SPI read data register */
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_SSNXCR
++@brief SSNXCR register offset
++*/
++#define IOH_SPI_SSNXCR (0x18)/* SSN Expand Control Register */
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_SRST
++@brief SRST register offset
++*/
++#define IOH_SPI_SRST (0x1C) /*SPI reset register */
++
++/* valid bits per word settings*/
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_8_BPW
++@brief Macro to denote 8 Bits per word transfer
++*/
++#define IOH_SPI_8_BPW (8)
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_16_BPW
++@brief Macro to denote 16 Bits per word transfer
++*/
++#define IOH_SPI_16_BPW (16)
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_SPSR_TFD
++@brief Mask to obtaining TFD bits from SPSR
++*/
++#define IOH_SPI_SPSR_TFD (0x000007C0)
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_SPSR_RFD
++@brief Mask to obtaining RFD bits from SPSR
++*/
++#define IOH_SPI_SPSR_RFD (0x0000F800)
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_READABLE(x)
++@brief Macro to obtain number of bytes received in Rx FIFO
++@note x is the content of SPSR register
++*/
++#define IOH_SPI_READABLE(x) (((x) & IOH_SPI_SPSR_RFD)>>11)
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_WRITABLE(x)
++@brief Macro to obtain number of bytes te be transmitted in Tx FIFO
++@note x is the content of SPSR register
++*/
++#define IOH_SPI_WRITABLE(x) (((x) & IOH_SPI_SPSR_TFD)>>6)
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_RX_THOLD
++@brief Macro to denote Rx interrupt threshold
++@note Currently set to interrupt when 8 bytes are received
++*/
++/*set to interrupt when 8 bytes have been received */
++#define IOH_SPI_RX_THOLD (7)
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_RX_THOLD_MAX
++@brief Macro to denote Rx interrupt threshold when Rx FIFO is full
++*/
++/*set to interrupt when 16 bytes have been received */
++#define IOH_SPI_RX_THOLD_MAX (15)
++
++/*direction for interrupts*/
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_RX
++@brief Macro to indicate Receive
++*/
++#define IOH_SPI_RX (1)
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_TX
++@brief Macro to indicate Transmit
++*/
++#define IOH_SPI_TX (2)
++
++/*various interrupts*/
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_TFI
++@brief Transmit interrupt
++*/
++#define IOH_SPI_TFI (0x1)
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_RFI
++@brief Receive interrupt
++*/
++#define IOH_SPI_RFI (0x2)
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_FI
++@brief Transfer complete interrupt
++*/
++#define IOH_SPI_FI (0x4)
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_ORI
++@brief Overflow interrupt
++*/
++#define IOH_SPI_ORI (0x8)
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_MDFI
++@brief Modefault interrupt
++*/
++#define IOH_SPI_MDFI (0x10)
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_ALL
++@brief Macro to denote all interrupts
++*/
++#define IOH_SPI_ALL \
++ (IOH_SPI_TFI|IOH_SPI_RFI|IOH_SPI_FI|IOH_SPI_ORI|IOH_SPI_MDFI)
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_MAX_BAUDRATE
++@brief Macro to denote maximum possible baud rate in bits per second
++*/
++#define IOH_SPI_MAX_BAUDRATE (5000000)
++
++/*! @ingroup SPI_HALLayer
++@def IOH_SPI_MAX_FIFO_DEPTH
++@brief Macro to denote maximum FIFO depth(16)
++*/
++#define IOH_SPI_MAX_FIFO_DEPTH (16)
++
++/*status codes*/
++
++/*! @ingroup SPI_Global
++@def IOH_SPI_SUCCESS
++@brief Success status code
++*/
++#define IOH_SPI_SUCCESS (0)
++
++/*! @ingroup SPI_Global
++@def IOH_SPI_FAIL
++@brief Failure status code
++*/
++#define IOH_SPI_FAIL (-1)
++
++/* hal function prototypes */
++
++/*! @ingroup SPI_HALLayerAPI
++@fn ioh_spi_setup_transfer(struct spi_device *spi)
++@brief Configures the IOH SPI hardware for SPI transfer
++*/
++s8 ioh_spi_setup_transfer(struct spi_device *spi);
++
++/*! @ingroup SPI_HALLayerAPI
++@fn ioh_spi_set_enable(const struct spi_device *spi, u8 enable)
++@brief Sets/Resets SPE bit in SPCR based on enable parameter
++*/
++void ioh_spi_set_enable(const struct spi_device *spi, u8 enable);
++
++/*! @ingroup SPI_HALLayerAPI
++@fn ioh_spi_set_master_mode( struct spi_master *master)
++@brief Sets MSTR bit in SPCR
++*/
++void ioh_spi_set_master_mode(struct spi_master *master);
++
++/*! @ingroup SPI_HALLayerAPI
++@fn ioh_spi_writereg(struct spi_master *master,int idx, u32 val)
++@brief Performs register writes
++*/
++inline void ioh_spi_writereg(struct spi_master *master, int idx, u32 val);
++
++/*! @ingroup SPI_HALLayerAPI
++@fn ioh_spi_readreg(struct spi_master *master,int idx)
++@brief Performs register reads
++*/
++inline u32 ioh_spi_readreg(struct spi_master *master, int idx);
++
++/*! @ingroup SPI_HALLayerAPI
++@fn ioh_spi_handler (int irq, void* dev_id)
++@brief The interrupt handler
++*/
++irqreturn_t ioh_spi_handler(int irq, void *dev_id);
++
++/*! @ingroup SPI_HALLayerAPI
++@fn ioh_spi_entcb (void (*ioh_spi_cb)( struct ioh_spi_data* ))
++@brief Registers the Callback function
++*/
++void ioh_spi_entcb(void (*ioh_spi_cb) (struct ioh_spi_data *));
++
++/*! @ingroup SPI_HALLayerAPI
++@fn ioh_spi_enable_interrupts (struct spi_master *master ,u8 interrupt)
++@brief Enables specified interrupts in SPCR
++*/
++void ioh_spi_enable_interrupts(struct spi_master *master, u8 interrupt);
++
++/*! @ingroup SPI_HALLayerAPI
++@fn ioh_spi_disable_interrupts (struct spi_master *master ,u8 interrupt)
++@brief Disables specified interrupts in SPCR
++*/
++void ioh_spi_disable_interrupts(struct spi_master *master, u8 interrupt);
++
++/*! @ingroup SPI_HALLayerAPI
++@fn ioh_spi_set_threshold(struct spi_device *spi,u32 threshold, u8 dir)
++@brief Sets RFIC/TFIC fields in SPCR based on threshold and dir
++*/
++void ioh_spi_set_threshold(struct spi_device *spi, u32 threshold, u8 dir);
++
++/*! @ingroup SPI_HALLayerAPI
++@fn ioh_spi_reset(struct spi_master *master)
++@brief Resets IOH SPI register settings
++*/
++void ioh_spi_reset(struct spi_master *master);
++
++/*! @ingroup SPI_HALLayerAPI
++@fn ioh_spi_set_baud_rate(struct spi_master *master,u32 speed_hz)
++@brief Sets SPBR field in SPBRR
++*/
++void ioh_spi_set_baud_rate(struct spi_master *master, u32 speed_hz);
++
++/*! @ingroup SPI_HALLayerAPI
++@fn ioh_spi_set_bits_per_word(struct spi_master *master,u8 bits_per_word)
++@brief Sets SIZE field in SPBRR
++*/
++void ioh_spi_set_bits_per_word(struct spi_master *master, u8 bits_per_word);
++
++/*! @ingroup SPI_HALLayerAPI
++@fn ioh_spi_clear_fifo(struct spi_master *master)
++@brief Clears Tx/Rx FIFOs by toggling FICLR bit in SPCR
++*/
++void ioh_spi_clear_fifo(struct spi_master *master);
++#endif
+diff -urN linux-2.6.33-rc3/drivers/spi/pch_spi_main.c topcliff-2.6.33-rc3/drivers/spi/pch_spi_main.c
+--- linux-2.6.33-rc3/drivers/spi/pch_spi_main.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/spi/pch_spi_main.c 2010-03-09 00:40:52.000000000 +0900
+@@ -0,0 +1,1323 @@
++/**
++ * @file ioh_spi_main.c
++ *
++ * @brief This file defines the SPI_InterfaceLayer APIs of the IOH SPI
++ * controller
++ * driver.
++ *
++ * @version 0.94
++ *
++ * @par
++ * -- Copyright Notice --
++ *
++ * @par
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * @par
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * @par
++ * -- End of Copyright Notice --
++ */
++
++#include <linux/pci.h>
++#include <linux/wait.h>
++#include <linux/spi/spi.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include "pch_debug.h"
++#include "pch_spi.h"
++#include "pch_spi_hal.h"
++
++/*! @ingroup SPI_HALLayer
++@def SSN_LOW
++@brief SSNXCR register value to pull down SSN
++*/
++#define SSN_LOW (0x02U)
++
++/*! @ingroup SPI_HALLayer
++@def SSN_NO_CONTROL
++@brief SSNXCR register value to relinquish control over SSN
++*/
++#define SSN_NO_CONTROL (0x00U)
++
++/*function prototypes*/
++
++/*! @ingroup SPI_UtilitiesAPI
++@fn ioh_spi_deselect_chip(struct ioh_spi_data* pCtrlData)
++@brief Clears the details of the current slave from the SPI channel
++ data structure
++*/
++static inline void ioh_spi_deselect_chip(struct ioh_spi_data *pCtrlData);
++
++/*! @ingroup SPI_UtilitiesAPI
++@fn ioh_spi_select_chip(struct ioh_spi_data* pCtrlData,struct spi_device* pSpi)
++@brief Update the slave device details in the SPI channel data structure
++*/
++static inline void ioh_spi_select_chip(struct ioh_spi_data *pCtrlData,
++ struct spi_device *pSpi);
++
++/*! @ingroup SPI_UtilitiesAPI
++@fn ioh_spi_process_messages(struct work_struct* pWork)
++@brief Work Queue handler to handle SPI data transfers
++*/
++static void ioh_spi_process_messages(struct work_struct *pWork);
++
++/*! @ingroup SPI_UtilitiesAPI
++
++@fn ioh_spi_get_resources(struct ioh_spi_board_data* pBoardData)
++
++@remarks Acquires the resources needed by IOH SPI driver
++
++ The major tasks performed by this method are:
++ - Initialize the spin lock of all SPI channels.
++ - Initialize queue to hold pending SPI messages of all SPI channels.
++ - Initialize wait queue head of all SPI channels.
++ - Create the work structure for all SPI channels.
++ - Create the work queues for all SPI channels.
++ - Allocate PCI regions.
++ - Get PCI memory mapped address and base addresses for all SPI channels.
++ - Reset the IOH SPI hardware for all SPI channels.
++ - Register the interrupt handler.
++
++@note This function is invoked by ioh_spi_probe to acquire
++ the various resources needed by IOH SPI driver.If any of the actions
++ performed by ioh_spi_get_resources fails,@ref ioh_spi_free_resources
++ is invoked to perform the necessary cleanups.
++
++@param pBoardData [@ref INOUT]
++ Contains the reference to struct ioh_spi_board_data
++
++@retval int
++- @ref IOH_SPI_SUCCESS The function terminates normally after all
++ required resources are acquired.
++- -EBUSY create_singlethread_workqueue fails.
++ pci_request_regions fails.
++ request_irq fails.
++- -EINVAL request_irq fails.
++- -ENOSYS request_irq_fails.
++- -ENOMEM pci_iomap_fails.
++ request_irq fails.
++
++@see
++ - ioh_spi_probe
++
++<hr>
++*/
++int ioh_spi_get_resources(struct ioh_spi_board_data *pBoardData)
++{
++ int i;
++ long IORemapAddress;
++ s32 iRetVal = IOH_SPI_SUCCESS;
++ IOH_DEBUG("ioh_spi_get_resources ENTRY\n");
++
++ /*initialize resources */
++
++ for (i = 0; i < IOH_SPI_MAX_DEV; i++) {
++ /*iniatize queue of pending messages */
++ INIT_LIST_HEAD(&(pBoardData->pCtrlData[i]->Queue));
++ IOH_DEBUG
++ ("ioh_spi_get_resources pCtrlData[i]->Queue initialized using"
++ "INIT_LIST_HEAD\n");
++
++ /*initialize spin locks */
++ spin_lock_init(&(pBoardData->pCtrlData[i]->Lock));
++ IOH_DEBUG
++ ("ioh_spi_get_resources pCtrlData[i]->Lock initialized using"
++ "spin_lock_init\n");
++
++ /*set channel status */
++ pBoardData->pCtrlData[i]->Status = STATUS_RUNNING;
++ IOH_DEBUG
++ ("ioh_spi_get_resources pCtrlData[i]->Status\
++ = STATUS_RUNNING\n");
++
++ /*initialize work structure */
++ INIT_WORK(&(pBoardData->pCtrlData[i]->Work),
++ ioh_spi_process_messages);
++ IOH_DEBUG
++ ("ioh_spi_get_resources pCtrlData[i]->Work initialized\
++ using INIT_WORK\n");
++
++ /*initialize wait queues */
++ init_waitqueue_head(&(pBoardData->pCtrlData[i]->Wait));
++ IOH_DEBUG
++ ("ioh_spi_get_resources pCtrlData[i]->Wait initialized\
++ using init_waitqueue_head\n");
++ }
++
++ do {
++ for (i = 0; i < IOH_SPI_MAX_DEV; i++) {
++ /*create workqueue */
++ pBoardData->pCtrlData[i]->pWorkQueue =
++ create_singlethread_workqueue(DRIVER_NAME);
++
++ if ((pBoardData->pCtrlData[i]->pWorkQueue) == NULL) {
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_get_resources create_singlet\
++ hread_workqueue failed\n");
++ iRetVal = -EBUSY;
++ break;
++ }
++ }
++
++ if (iRetVal != 0)
++ break;
++
++
++ IOH_DEBUG
++ ("ioh_spi_get_resources create_singlethread_workqueue\
++ success\n");
++ iRetVal = pci_request_regions(pBoardData->pDev, DRIVER_NAME);
++ if (iRetVal != 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_get_resources request_region failed\n");
++ break;
++ }
++
++ IOH_DEBUG("ioh_spi_get_resources request_region returned=%d\n",
++ iRetVal);
++
++ pBoardData->bRegionRequested = true;
++ IOH_DEBUG
++ ("ioh_spi_get_resources pCtrlData->bRegionRequested = true\n");
++
++ /* Wipro 1/13/2010 Use Mem BAR */
++ IORemapAddress =
++ (unsigned long)pci_iomap(pBoardData->pDev, 1, 0);
++
++ if (IORemapAddress == 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_get_resources pci_iomap failed\n");
++ iRetVal = -ENOMEM;
++ break;
++ }
++
++ IOH_DEBUG
++ ("ioh_spi_get_resources pci_iomap success PCI Base\
++ address=%x\n",
++ (IORemapAddress));
++
++ /*calculate base address for all channels */
++
++ for (i = 0; i < IOH_SPI_MAX_DEV; i++) {
++ pBoardData->pCtrlData[i]->IORemapAddress =
++ IORemapAddress + (IOH_SPI_ADDRESS_SIZE * i);
++ IOH_DEBUG
++ ("ioh_spi_get_resources Base address for\
++ channel %d= %x\n",
++ i, (pBoardData->pCtrlData[i]->IORemapAddress));
++ }
++
++ /*reset IOH SPI h/w */
++ for (i = 0; i < IOH_SPI_MAX_DEV; i++) {
++ ioh_spi_reset(pBoardData->pCtrlData[i]->pMaster);
++ IOH_DEBUG
++ ("ioh_spi_get_resources ioh_spi_reset invoked\
++ successfully \n");
++ }
++
++ /*register IRQ */
++ iRetVal = request_irq(pBoardData->pDev->irq, ioh_spi_handler,
++ IRQF_SHARED, DRIVER_NAME,
++ (void *)pBoardData);
++ if (iRetVal != 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_get_resources request_irq failed\n");
++ break;
++ }
++
++ IOH_DEBUG("ioh_spi_get_resources request_irq returned=%d\n",
++ iRetVal);
++
++ pBoardData->bIrqRegistered = true;
++ IOH_DEBUG
++ ("ioh_spi_get_resources pCtrlData->bIrqRegistered=true\n");
++ } while (0);
++
++ if (iRetVal != IOH_SPI_SUCCESS) {
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_get_resources FAIL:invoking\
++ ioh_spi_free_resources\n");
++ ioh_spi_free_resources(pBoardData);
++ }
++
++ IOH_DEBUG("ioh_spi_get_resources Return=%d\n", iRetVal);
++
++ return iRetVal;
++}
++
++/*! @ingroup SPI_UtilitiesAPI
++
++@fn ioh_spi_free_resources(struct ioh_spi_board_data* pBoardData)
++
++@remarks Frees the resources acquired by IOH SPI driver
++
++ The main tasks performed by this method are:
++ - Destroy the workqueus created for all SPI channels.
++ - Disables interrupts and unregisters the interrupt handler.
++ - Unmaps the PCI base address.
++ - Releases PCI regions.
++
++@note This function is invoked from ioh_spi_remove when the SPI device is
++ being removed from the system or when the IOH SPI driver is being
++ unloaded from the system using "rmmod" command.
++
++@param pBoardData [@ref INOUT] Contains the reference to struct
++ ioh_spi_board_data
++
++@retval None
++
++@see
++ - ioh_spi_remove
++
++<hr>
++*/
++void ioh_spi_free_resources(struct ioh_spi_board_data *pBoardData)
++{
++ int i;
++
++ IOH_DEBUG("ioh_spi_free_resources ENTRY\n");
++
++ /*free workqueue */
++
++ for (i = 0; i < IOH_SPI_MAX_DEV; i++) {
++ if (pBoardData->pCtrlData[i]->pWorkQueue != NULL) {
++ destroy_workqueue(pBoardData->pCtrlData[i]->pWorkQueue);
++ pBoardData->pCtrlData[i]->pWorkQueue = NULL;
++ IOH_DEBUG
++ ("ioh_spi_free_resources destroy_workqueue invoked\
++ successfully\n");
++ }
++ }
++
++ /*disable interrupts & free IRQ */
++ if (pBoardData->bIrqRegistered == true) {
++ /* disable interrupts */
++ for (i = 0; i < IOH_SPI_MAX_DEV; i++) {
++ ioh_spi_disable_interrupts(pBoardData->pCtrlData[i]->
++ pMaster, IOH_SPI_ALL);
++ IOH_DEBUG
++ ("ioh_spi_free_resources ioh_spi_disable_interrupts\
++ invoked successfully\n");
++ }
++
++ /*free IRQ */
++ free_irq(pBoardData->pDev->irq, (void *)pBoardData);
++
++ IOH_DEBUG
++ ("ioh_spi_free_resources free_irq invoked successfully\n");
++
++ pBoardData->bIrqRegistered = false;
++ }
++
++ /*unmap PCI base address */
++ if ((pBoardData->pCtrlData[0]->IORemapAddress) != 0) {
++ pci_iounmap(pBoardData->pDev,
++ (void *)(pBoardData->pCtrlData[0]->IORemapAddress));
++
++ for (i = 0; i < IOH_SPI_MAX_DEV; i++)
++ pBoardData->pCtrlData[i]->IORemapAddress = 0;
++
++
++ IOH_DEBUG
++ ("ioh_spi_free_resources pci_iounmap invoked\
++ successfully\n");
++ }
++
++ /*release PCI region */
++ if (pBoardData->bRegionRequested == true) {
++ pci_release_regions(pBoardData->pDev);
++ IOH_DEBUG
++ ("ioh_spi_free_resources pci_release_regions invoked\
++ successfully\n");
++ pBoardData->bRegionRequested = false;
++ }
++}
++
++/*! @ingroup SPI_UtilitiesAPI
++
++@fn ioh_spi_process_messages(struct work_struct* pWork)
++
++@remarks Work Queue handler to handle SPI data transfers
++
++The main tasks performed by this method are:
++- If system is suspended,then flush the queue of pending transfers and return.
++- Retrieve the SPI message to be processed from the queue of pending messages.
++- Invoke @ref ioh_spi_select_chip to configure the SPI channel.
++- Retrieve the 1st or the subsequent transfer structure from SPI message
++ structure.
++- Update baud rate and bits per word,if user has specified new values.
++- Allocate memory for Transmit and Receive buffers.
++- Copy transmit data from transfer structure to Transmit buffer.
++- Pull down SSN by writing 0x2 to SSNXCR register.
++- Write transmit data to Transmit FIFO.
++- Enable required interrupts.
++- Enable SPI transfer by invoking @ref ioh_spi_set_enable.
++- Wait till SPI data transfer is completed.
++- Relinquish control over SSN by writing 0x0 to SSNXCR register.
++- Disable SPI transfer by invoking @ref ioh_spi_set_enable.
++- Clear Transmit & Receive FIFOs by invoking @ref ioh_spi_clear_fifo.
++- Copy received data from Receive buffer to transfer structure.
++- Free memory allocated for Transmit and Receive buffers.
++- Update data count in transfer structure.
++- If the SPI message has any more transfers , process them same as above.
++- If system is suspended,then flush the queue of pending transfers and return.
++- Again schedule the work queue haandler to run if there are pending messages in
++queue of pending messages.
++
++@note Work Queue handler is scheduled by @ref ioh_spi_transfer after
++the SPI message to be processed is pushed into the queue of pending
++transfers.This function will write the first set of data to Tx FIFO and sleeps
++till all SPI data transfer is over.The data transfer is handled by
++the interrupt handler ioh_spi_handler function.
++
++@param pWork [@ref IN] contains reference to struct work_struct
++
++@retval None
++
++@see
++ - ioh_spi_transfer
++
++<hr>
++*/
++static void ioh_spi_process_messages(struct work_struct *pWork)
++{
++ int j;
++ u32 nWrites;
++
++ struct spi_message *pMsg;
++ int bMemFail, size;
++ int bpw;
++
++ struct ioh_spi_data *pCtrlData =
++ container_of(pWork, struct ioh_spi_data, Work);
++ IOH_DEBUG("ioh_spi_process_messages pCtrlData initialized\n");
++
++ spin_lock(&pCtrlData->Lock);
++
++ /*check if suspend has been initiated;if yes flush queue */
++
++ if ((pCtrlData->pBoardData->bSuspended == true)
++ || (pCtrlData->Status == STATUS_EXITING)) {
++ IOH_DEBUG
++ ("ioh_spi_process_messages suspend/remove initiated,\
++ flushing queue\n");
++ list_for_each_entry(pMsg, pCtrlData->Queue.next, queue) {
++ pMsg->status = -EIO;
++
++ if (pMsg->complete != 0)
++ pMsg->complete(pMsg->context);
++
++
++ /*delete from queue */
++ list_del_init(&pMsg->queue);
++ }
++
++ spin_unlock(&pCtrlData->Lock);
++ } else {
++ pCtrlData->bCurrent_msg_processing = true;
++ IOH_DEBUG
++ ("ioh_spi_process_messages set pCtrlData->\
++ bCurrent_msg_processing"
++ "= true\n");
++
++ /*Get the message from the queue and delete it from there. */
++ pCtrlData->pCurMsg =
++ list_entry(pCtrlData->Queue.next, struct spi_message,
++ queue);
++ IOH_DEBUG
++ ("ioh_spi_process_messages :Got new message from queue \n");
++ list_del_init(&pCtrlData->pCurMsg->queue);
++
++ pCtrlData->pCurMsg->status = 0;
++
++ IOH_DEBUG
++ ("ioh_spi_process_messages :Invoking ioh_spi_select_chip\n");
++ ioh_spi_select_chip(pCtrlData, pCtrlData->pCurMsg->spi);
++
++ spin_unlock(&pCtrlData->Lock);
++
++ do {
++ /*If we are already processing a message get the next
++ transfer
++ structure from the message otherwise retrieve the
++ 1st transfer
++ request from the message. */
++ spin_lock(&pCtrlData->Lock);
++
++ if (pCtrlData->pCurTransfer == NULL) {
++ pCtrlData->pCurTransfer =
++ list_entry(pCtrlData->pCurMsg->transfers.
++ next, struct spi_transfer,
++ transfer_list);
++ IOH_DEBUG
++ ("ioh_spi_process_messages :Getting 1st\
++ transfer structure"
++ "for this message\n");
++ } else {
++ pCtrlData->pCurTransfer =
++ list_entry(pCtrlData->pCurTransfer->
++ transfer_list.next,
++ struct spi_transfer,
++ transfer_list);
++ IOH_DEBUG
++ ("ioh_spi_process_messages :Getting next\
++ transfer structure"
++ "for this message\n");
++ }
++
++ spin_unlock(&pCtrlData->Lock);
++
++ /*set baud rate if needed */
++
++ if (pCtrlData->pCurTransfer->speed_hz) {
++ IOH_DEBUG
++ ("ioh_spi_process_messages:setting\
++ baud rate\n");
++ ioh_spi_set_baud_rate(pCtrlData->pMaster,
++ (pCtrlData->pCurTransfer->
++ speed_hz));
++ }
++
++ /*set bits per word if needed */
++ if ((pCtrlData->pCurTransfer->bits_per_word) &&
++ ((pCtrlData->pCurMsg->spi->bits_per_word) !=
++ (pCtrlData->pCurTransfer->bits_per_word))) {
++ IOH_DEBUG
++ ("ioh_spi_process_messages:setting bits\
++ per word\n");
++ ioh_spi_set_bits_per_word(pCtrlData->pMaster,
++ (pCtrlData->
++ pCurTransfer->
++ bits_per_word));
++ bpw = pCtrlData->pCurTransfer->bits_per_word;
++ } else {
++ bpw = pCtrlData->pCurMsg->spi->bits_per_word;
++ }
++
++ /*reset Tx/Rx index */
++ pCtrlData->TxIndex = 0;
++
++ pCtrlData->RxIndex = 0;
++
++ if (IOH_SPI_8_BPW == bpw) {
++ /*8 bits per word */
++ pCtrlData->lengthInBpw =
++ pCtrlData->pCurTransfer->len;
++ } else {
++ /*16 bits per word */
++ pCtrlData->lengthInBpw =
++ (pCtrlData->pCurTransfer->len) / 2;
++ }
++
++ bMemFail = false;
++
++ /*find alloc size */
++ size =
++ (pCtrlData->pCurTransfer->len) *
++ (sizeof(*(pCtrlData->pU16TxBuffer)));
++ /*allocate memory for pU16TxBuffer & pU16RxBuffer */
++ pCtrlData->pU16TxBuffer =
++/* (u16 *) kzalloc(size, GFP_KERNEL);*/
++ kzalloc(size, GFP_KERNEL);
++ if (pCtrlData->pU16TxBuffer != NULL) {
++ pCtrlData->pU16RxBuffer =
++/* (u16 *) kzalloc(size, GFP_KERNEL);*/
++ kzalloc(size, GFP_KERNEL);
++ if (pCtrlData->pU16RxBuffer == NULL) {
++ bMemFail = true;
++ kfree(pCtrlData->pU16TxBuffer);
++ }
++ } else {
++ bMemFail = true;
++ }
++
++ if (bMemFail) {
++ /*flush queue and set status of all transfers
++ to -ENOMEM */
++ IOH_LOG(KERN_ERR,
++ "Kzalloc fail in\
++ ioh_spi_process_messages\n");
++ list_for_each_entry(pMsg, pCtrlData->Queue.next,
++ queue) {
++ pMsg->status = -ENOMEM;
++
++ if (pMsg->complete != 0)
++ pMsg->complete(pMsg->context);
++
++
++ /*delete from queue */
++ list_del_init(&pMsg->queue);
++ }
++
++ return;
++ }
++
++ /*copy Tx Data */
++ if ((pCtrlData->pCurTransfer->tx_buf) != NULL) {
++ for (j = 0; j < (pCtrlData->lengthInBpw); j++) {
++ if (IOH_SPI_8_BPW == bpw) {
++ pCtrlData->pU16TxBuffer[j] =
++ (((u8 *) (pCtrlData->
++ pCurTransfer->
++ tx_buf))[j]);
++ IOH_DEBUG
++ ("xmt data in\
++ ioh_spi_process_messages=%x\n",
++ (pCtrlData->
++ pU16TxBuffer[j]));
++ } else {
++ pCtrlData->pU16TxBuffer[j] =
++ ((u16 *) (pCtrlData->
++ pCurTransfer->
++ tx_buf))[j];
++ IOH_DEBUG
++ ("xmt data ioh_spi_pro\
++ cess_messages%x\n",
++ (pCtrlData->
++ pU16TxBuffer[j]));
++ }
++ }
++ }
++
++ /*if len greater than IOH_SPI_MAX_FIFO_DEPTH,
++ write 16,else len bytes */
++ if ((pCtrlData->lengthInBpw) > IOH_SPI_MAX_FIFO_DEPTH)
++ nWrites = IOH_SPI_MAX_FIFO_DEPTH;
++ else
++ nWrites = (pCtrlData->lengthInBpw);
++
++
++#ifndef FPGA
++ /*LSI*/
++ IOH_DEBUG
++ ("\nioh_spi_process_messages:Pulling down SSN low\
++ - writing 0x2 to SSNXCR\n");
++ ioh_spi_writereg(pCtrlData->pMaster, IOH_SPI_SSNXCR,
++ SSN_LOW);
++#endif
++ IOH_DEBUG
++ ("\nioh_spi_process_messages:Writing %u items\n",
++ nWrites);
++
++ for (j = 0; j < nWrites; j++) {
++ ioh_spi_writereg(pCtrlData->pMaster,
++ IOH_SPI_SPDWR,
++ pCtrlData->pU16TxBuffer[j]);
++ }
++
++ /*update TxIndex */
++ pCtrlData->TxIndex = j;
++
++ IOH_DEBUG
++ ("ioh_spi_process_messages:enabling interrupts\n");
++
++ /*reset transfer complete flag */
++ pCtrlData->bTransferComplete = false;
++
++ pCtrlData->bTransferActive = true;
++
++ IOH_DEBUG
++ ("ioh_spi_process_messages set pCtrlData->\
++ bTransferActive = true\n");
++
++ /*enable interrupts */
++ if ((pCtrlData->lengthInBpw) > IOH_SPI_MAX_FIFO_DEPTH) {
++ /*set receive threhold to IOH_SPI_RX_THOLD */
++ ioh_spi_set_threshold(pCtrlData->pCurrentChip,
++ IOH_SPI_RX_THOLD,
++ IOH_SPI_RX);
++ /*enable FI and RFI interrupts */
++ ioh_spi_enable_interrupts(pCtrlData->pMaster,
++ IOH_SPI_RFI |
++ IOH_SPI_FI);
++ } else {
++ /*set receive threhold to maximum */
++ ioh_spi_set_threshold(pCtrlData->pCurrentChip,
++ IOH_SPI_RX_THOLD_MAX,
++ IOH_SPI_RX);
++ /*enable FI interrupt */
++ ioh_spi_enable_interrupts(pCtrlData->pMaster,
++ IOH_SPI_FI);
++ }
++
++ IOH_DEBUG
++ ("ioh_spi_process_messages:invoking\
++ ioh_spi_set_enable to enable SPI\n");
++
++ ioh_spi_set_enable((pCtrlData->pCurrentChip), true);
++
++ /*Wait until the transfer completes; go to sleep
++ after initiating the transfer. */
++ IOH_DEBUG
++ ("ioh_spi_process_messages:waiting for transfer\
++ to get over\n");
++
++ wait_event_interruptible(pCtrlData->Wait,
++ false !=
++ pCtrlData->bTransferComplete);
++#ifndef FPGA
++ /*LSI*/
++ ioh_spi_writereg(pCtrlData->pMaster, IOH_SPI_SSNXCR,
++ SSN_NO_CONTROL);
++ IOH_DEBUG
++ ("\n ioh_spi_process_messages:no more control over\
++ SSN-writing 0x0 to SSNXCR");
++#endif
++ IOH_DEBUG("ioh_spi_process_messages:transmit over\n");
++
++ pCtrlData->bTransferActive = false;
++ IOH_DEBUG
++ ("ioh_spi_process_messages set pCtrlData->\
++ bTransferActive = false\n");
++
++ /*clear all interrupts */
++ ioh_spi_writereg(pCtrlData->pMaster, IOH_SPI_SPSR,
++ (ioh_spi_readreg
++ (pCtrlData->pMaster, IOH_SPI_SPSR)));
++ /*disable interrupts */
++ ioh_spi_disable_interrupts(pCtrlData->pMaster,
++ IOH_SPI_ALL);
++
++ /*Disable SPI transfer */
++ IOH_DEBUG
++ ("ioh_spi_process_messages:invoking\
++ ioh_spi_set_enable to disable"
++ "spi transfer\n");
++ ioh_spi_set_enable((pCtrlData->pCurrentChip), false);
++
++ /*clear FIFO */
++ IOH_DEBUG
++ ("ioh_spi_process_messages:invoking\
++ ioh_spi_clear_fifo to "
++ "clear fifo\n");
++ ioh_spi_clear_fifo(pCtrlData->pMaster);
++
++ /*copy Rx Data */
++
++ if ((pCtrlData->pCurTransfer->rx_buf) != NULL) {
++ for (j = 0; j < (pCtrlData->lengthInBpw); j++) {
++ if (IOH_SPI_8_BPW == bpw) {
++ ((u8 *) (pCtrlData->
++ pCurTransfer->
++ rx_buf))[j] =
++ (u8) ((pCtrlData->
++ pU16RxBuffer[j]) &
++ 0xFF);
++
++ IOH_DEBUG
++ ("rcv data in ioh_spi_proc\
++ ess_messages=%x\n",
++ (pCtrlData->
++ pU16RxBuffer[j]));
++
++ } else {
++ ((u16 *) (pCtrlData->
++ pCurTransfer->
++ rx_buf))[j] =
++ (u16) (pCtrlData->
++ pU16RxBuffer[j]);
++ IOH_DEBUG
++ ("rcv data in ioh_spi_proce\
++ ss_messages=%x\n",
++ (pCtrlData->
++ pU16RxBuffer[j]));
++ }
++ }
++ }
++
++ /*free memory */
++ kfree(pCtrlData->pU16RxBuffer);
++ pCtrlData->pU16RxBuffer = NULL;
++
++
++ kfree(pCtrlData->pU16TxBuffer);
++ pCtrlData->pU16TxBuffer = NULL;
++
++
++ /*increment message count */
++ pCtrlData->pCurMsg->actual_length +=
++ pCtrlData->pCurTransfer->len;
++
++ IOH_DEBUG
++ ("ioh_spi_process_messages:pCtrlData->pCurMsg->\
++ actual_length=%d\n",
++ pCtrlData->pCurMsg->actual_length);
++
++ /*check for delay */
++ if (pCtrlData->pCurTransfer->delay_usecs) {
++ IOH_DEBUG
++ ("ioh_spi_process_messages:delay in usec=%d\n",
++ pCtrlData->pCurTransfer->delay_usecs);
++ udelay(pCtrlData->pCurTransfer->delay_usecs);
++ }
++
++ spin_lock(&pCtrlData->Lock);
++
++ /*No more transfer in this message. */
++
++ if ((pCtrlData->pCurTransfer->transfer_list.next) ==
++ &(pCtrlData->pCurMsg->transfers)) {
++ IOH_DEBUG
++ ("ioh_spi_process_messages:no more\
++ transfers in this message\n");
++ /*Invoke complete callback
++ [To the spi core..indicating
++ end of transfer] */
++ pCtrlData->pCurMsg->status = 0;
++
++ if ((pCtrlData->pCurMsg->complete) != 0) {
++ IOH_DEBUG
++ ("ioh_spi_process_messages:Invoking\
++ callback of SPI core\n");
++ pCtrlData->pCurMsg->complete(pCtrlData->
++ pCurMsg->
++ context);
++ }
++
++ /*update status in global variable */
++ pCtrlData->bCurrent_msg_processing = false;
++
++ IOH_DEBUG
++ ("ioh_spi_process_messages:pCtrlData->\
++ bCurrent_msg_processing"
++ "set to false\n");
++
++ pCtrlData->pCurMsg = NULL;
++
++ pCtrlData->pCurTransfer = NULL;
++
++ /*check if we have items in list and not
++ suspending */
++ /*return 1 if list empty */
++ if ((list_empty(&pCtrlData->Queue) == 0) &&
++ (pCtrlData->pBoardData->bSuspended == false)
++ && (pCtrlData->Status != STATUS_EXITING)) {
++ /*We have some more work to do
++ (either there is more transfer
++ requests in the current message or
++ there are more messages) */
++ IOH_DEBUG
++ ("ioh_spi_process_messages:we\
++ have pending messages"
++ "-Invoking queue_work\n");
++ queue_work(pCtrlData->pWorkQueue,
++ &pCtrlData->Work);
++ }
++
++ /*check if suspend has been initiated;if yes
++ flush queue */
++ else if ((pCtrlData->pBoardData->bSuspended ==
++ true)
++ || (pCtrlData->Status ==
++ STATUS_EXITING)) {
++ IOH_DEBUG
++ ("ioh_spi_process_messages\
++ suspend/remove initiated,"
++ "flushing queue\n");
++ list_for_each_entry(pMsg,
++ pCtrlData->Queue.
++ next, queue) {
++ pMsg->status = -EIO;
++
++ if (pMsg->complete != 0) {
++ pMsg->complete(pMsg->
++ context);
++ }
++
++ /*delete from queue */
++ list_del_init(&pMsg->queue);
++ }
++ }
++ }
++
++ spin_unlock(&pCtrlData->Lock);
++
++ } while ((pCtrlData->pCurTransfer) != NULL);
++ }
++}
++
++/*! @ingroup SPI_UtilitiesAPI
++
++@fn ioh_spi_select_chip(struct ioh_spi_data* pCtrlData,struct spi_device* pSpi)
++
++@remarks Update the SPI device details in the SPI channel data structure
++
++The main tasks performed by this method are:
++- Check whether the active SPI device is different from the device to
++ which the previous data transfer occured.
++- If yes invoke @ref ioh_spi_deselect_chip to clear details of old device
++ from pCtrlData.
++- Update the details of the new device in pCtrlData
++- Invoke @ref ioh_spi_setup_transfer to configure the SPI channel.
++
++@note This function is invoked by @ref ioh_spi_process_messages before
++ processing
++ each SPI message.
++
++@param pCtrlData [@ref INOUT] contains reference to struct ioh_spi_data
++
++@param pSpi [@ref IN] contains reference to struct spi_device
++
++@retval None
++
++@see
++ - ioh_spi_process_messages
++
++<hr>
++*/
++static inline void ioh_spi_select_chip(struct ioh_spi_data *pCtrlData,
++ struct spi_device *pSpi)
++{
++ if ((pCtrlData->pCurrentChip) != NULL) {
++ if ((pSpi->chip_select) != (pCtrlData->nCurrentChip)) {
++ IOH_DEBUG
++ ("ioh_spi_select_chip : different slave-Invoking"
++ "ioh_spi_deselect_chip\n");
++ ioh_spi_deselect_chip(pCtrlData);
++ }
++ }
++
++ pCtrlData->pCurrentChip = pSpi;
++
++ pCtrlData->nCurrentChip = pCtrlData->pCurrentChip->chip_select;
++
++ IOH_DEBUG("ioh_spi_select_chip :Invoking ioh_spi_setup_transfer\n");
++ ioh_spi_setup_transfer(pSpi);
++}
++
++/*! @ingroup SPI_UtilitiesAPI
++
++@fn ioh_spi_deselect_chip(struct ioh_spi_data* pCtrlData)
++
++@remarks Clear the SPI device details from the SPI channel data structure
++
++ The main tasks performed by this method are:
++ - Clear the details of SPI device from SPI channel data structure.
++
++@note This function is invoked from @ref ioh_spi_select_chip
++
++@param pCtrlData [@ref INOUT] Contains reference to struct ioh_spi_data
++
++@retval None
++
++@see
++ - ioh_spi_select_chip
++
++<hr>
++*/
++static inline void ioh_spi_deselect_chip(struct ioh_spi_data *pCtrlData)
++{
++ if (pCtrlData->pCurrentChip != NULL) {
++ IOH_DEBUG
++ ("ioh_spi_deselect_chip :clearing pCurrentChip data\n");
++ pCtrlData->pCurrentChip = NULL;
++ }
++}
++
++/*! @ingroup SPI_UtilitiesAPI
++
++@fn ioh_spi_check_request_pending(struct ioh_spi_board_data* pBoardData)
++
++@remarks Checks for any pending SPI transfer request in the queue of
++ pending transfers
++
++ The main tasks performed by this method are:
++ - If the message queue is empty return IOH_SPI_SUCCESS.
++ - Sleep for 100ms and again check if message queue is empty,if yes
++ return IOH_SPI_SUCCESS.
++ - Repeat 500 times.
++ - If queue is still not empty return -EBUSY.
++
++@note This function is invoked by @ref ioh_spi_remove
++
++@param pBoardData [@ref INOUT] Contains reference to struct
++ ioh_spi_board_data
++
++@retval int
++ - @ref IOH_SPI_SUCCESS Message queue is empty
++ - -EBUSY Queue is not empty
++
++@see
++ - ioh_spi_remove
++
++<hr>
++*/
++int ioh_spi_check_request_pending(struct ioh_spi_board_data *pBoardData)
++{
++ int i;
++ int iStatus = IOH_SPI_SUCCESS;
++ u16 count;
++
++ for (i = 0; i < IOH_SPI_MAX_DEV; i++) {
++ count = 500;
++ spin_lock(&(pBoardData->pCtrlData[i]->Lock));
++ pBoardData->pCtrlData[i]->Status = STATUS_EXITING;
++
++ while ((list_empty(&(pBoardData->pCtrlData[i]->Queue)) == 0) &&
++ (--count)) {
++ IOH_DEBUG
++ ("ioh_spi_check_request_pending :Queue not empty\n");
++ spin_unlock(&(pBoardData->pCtrlData[i]->Lock));
++ msleep(IOH_SPI_SLEEP_TIME);
++ spin_lock(&(pBoardData->pCtrlData[i]->Lock));
++ }
++
++ spin_unlock(&(pBoardData->pCtrlData[i]->Lock));
++
++ if (count) {
++ IOH_DEBUG
++ ("ioh_spi_check_request_pending :Queue empty\n");
++ } else {
++ iStatus = -EBUSY;
++ }
++ }
++
++ IOH_DEBUG("ioh_spi_check_request_pending : EXIT=%d\n", iStatus);
++
++ return iStatus;
++}
++
++/*! @ingroup SPI_InterfaceLayerAPI
++
++@fn int ioh_spi_setup(struct spi_device* pSpi)
++
++@remarks Validates the SPI device configuration paramters specified by user
++
++The main tasks performed by this method are:
++- Validate the bits per word paramter (should be either 8 or 16).
++- Validate the maximum baud rate parameter (should not be greater than 5Mbps).
++
++@note This function is registered with the SPI core as the setup routine of
++IOH SPI controller driver.This function is invoked by the kernel SPI
++component when user invokes any of spidev's IOCTLs to configure the
++SPI device setting.In this function no hardware settings are modified
++as this can affect any ongoing SPI data transfers.So the setting passed
++by the user is validated and the function returns.Hardware settings are
++updated in the function @ref ioh_spi_setup_transfer, which is invoked from
++@ref ioh_spi_process_messages before initiating a SPI data transfer.
++
++@param pSpi [@ref IN] Contains reference to structure spi_device
++
++@retval int
++ - IOH_SPI_SUCCESS All paramters are valid
++ - -EINVAL Any of the paramter is invalid
++
++@see
++ - ioh_spi_probe
++
++<hr>
++*/
++int ioh_spi_setup(struct spi_device *pSpi)
++{
++ int iRetVal = IOH_SPI_SUCCESS;
++
++ /*check bits per word */
++
++ if ((pSpi->bits_per_word) == 0) {
++ pSpi->bits_per_word = IOH_SPI_8_BPW;
++ IOH_DEBUG("ioh_spi_setup 8 bits per word \n");
++ }
++
++ if (((pSpi->bits_per_word) != IOH_SPI_8_BPW) &&
++ ((pSpi->bits_per_word != IOH_SPI_16_BPW))) {
++ IOH_LOG(KERN_ERR, "ioh_spi_setup Invalid bits per word\n");
++ iRetVal = -EINVAL;
++ }
++
++ /*Check baud rate setting */
++ /*if baud rate of chip is greater than
++ max we can support,return error */
++ if ((pSpi->max_speed_hz) > IOH_SPI_MAX_BAUDRATE) {
++ iRetVal = -EINVAL;
++ IOH_LOG(KERN_ERR, "ioh_spi_setup Invalid Baud rate\n");
++ }
++
++ IOH_DEBUG(KERN_ERR, "ioh_spi_setup MODE = %x\n",
++ ((pSpi->mode) & (SPI_CPOL | SPI_CPHA)));
++
++ if (((pSpi->mode) & SPI_LSB_FIRST) != 0)
++ IOH_DEBUG("ioh_spi_setup LSB_FIRST\n");
++ else
++ IOH_DEBUG("ioh_spi_setup MSB_FIRST\n");
++
++
++ IOH_DEBUG("ioh_spi_setup Return=%d\n", iRetVal);
++
++ return iRetVal;
++}
++
++/*! @ingroup SPI_InterfaceLayerAPI
++
++@fn ioh_spi_transfer(struct spi_device* pSpi,struct spi_message* pMsg)
++
++@remarks Validates the SPI message and pushes it onto queue of pending
++ transfers
++
++ The main tasks performed by this method are:
++ - If the list of transfers is empty return -EINVAL.
++ - If the maximum baud rate is zero return -EINVAL.
++ - If Tranmit buffer and Receive buffer both are invalid for
++ any transfer return -EINVAL.
++ - If the length of transfer is zero for any transfer return -EINVAL.
++ - If maximum baud rate and bits per word are invalid return -EINVAL.
++ - If status of SPI channel is STATUS_EXITING return -ESHUTDOWN.
++ - If device is suspended return -EINVAL.
++ - Add the SPI message to queue of pending SPI messages.
++ - Schedule work queue handler to run.
++
++@note ioh_spi_transfer is registered by IOH SPI controller driver
++ with SPI core as
++ its transfer routine from the function @ref ioh_spi_probe.It
++ is invoked by the kernel's SPI component when user invokes
++ read,write or SPI_IOC_MESSAGE ioctl.
++
++@param pSpi [@ref IN] Contains reference to struct spi_device
++
++@param pMsg [@ref IN] Contains reference to struct spi_message
++
++@retval int
++- @ref IOH_SPI_SUCCESS The function exists normally after adding the SPI message
++ to queue of pending SPI messages and schedules work queue
++ handler to run.
++- -EINVAL Any of the paramters are found to be invalid or the system is
++ suspended.
++- -ESHUTDOWN When the status of the SPI channel is STATUS_EXITING
++ The status STATUS_EXITING is set when ioh_spi_remove
++ is invoked.
++
++@see
++ - ioh_spi_probe
++
++<hr>
++*/
++int ioh_spi_transfer(struct spi_device *pSpi, struct spi_message *pMsg)
++{
++
++ struct spi_transfer *pTransfer;
++
++ struct ioh_spi_data *pCtrlData = spi_master_get_devdata(pSpi->master);
++ int iRetVal = IOH_SPI_SUCCESS;
++
++ do {
++ /*validate spi message and baud rate */
++ if (unlikely((list_empty(&pMsg->transfers) == 1) ||
++ ((pSpi->max_speed_hz) == 0))) {
++ if (list_empty(&pMsg->transfers) == 1) {
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_transfer list empty\n");
++ }
++
++ if ((pSpi->max_speed_hz) == 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_tranfer maxspeed=%d\n",
++ (pSpi->max_speed_hz));
++ }
++
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_transfer returning EINVAL\n");
++
++ iRetVal = -EINVAL;
++ break;
++ }
++
++ IOH_DEBUG("ioh_spi_transfer Transfer List not empty\n");
++
++ IOH_DEBUG("ioh_spi_transfer Transfer Speed is set\n");
++
++ /*validate Tx/Rx buffers and Transfer length */
++ list_for_each_entry(pTransfer, &pMsg->transfers,
++ transfer_list) {
++ if ((((pTransfer->tx_buf) == NULL)
++ && ((pTransfer->rx_buf) == NULL))
++ || (pTransfer->len == 0)) {
++ if (((pTransfer->tx_buf) == NULL)
++ && ((pTransfer->rx_buf) == NULL)) {
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_transfer Tx and Rx\
++ buffer NULL\n");
++ }
++
++ if (pTransfer->len == 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_transfer Transfer\
++ length invalid\n");
++ }
++
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_transfer returning EINVAL\n");
++
++ iRetVal = -EINVAL;
++ break;
++ }
++
++ IOH_DEBUG("ioh_spi_transfer Tx/Rx buffer valid\n");
++
++ IOH_DEBUG("ioh_spi_transfer Transfer length valid\n");
++
++ /*if baud rate hs been specified validate the same */
++
++ if (pTransfer->speed_hz) {
++ if ((pTransfer->speed_hz) >
++ IOH_SPI_MAX_BAUDRATE) {
++ iRetVal = -EINVAL;
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_transfer Invalid\
++ Baud rate\n");
++ }
++ }
++
++ /*if bits per word has been specified validate
++ the same */
++ if (pTransfer->bits_per_word) {
++ if ((pTransfer->bits_per_word != IOH_SPI_8_BPW)
++ && (pTransfer->bits_per_word !=
++ IOH_SPI_16_BPW)) {
++ iRetVal = -EINVAL;
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_transfer Invalid bits\
++ per word\n");
++ break;
++ }
++ }
++ }
++
++ if (iRetVal == -EINVAL)
++ break;
++
++
++ spin_lock(&pCtrlData->Lock);
++
++ /*We won't process any messages if we have been asked
++ to terminate */
++
++ if (STATUS_EXITING == (pCtrlData->Status)) {
++ spin_unlock(&pCtrlData->Lock);
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_transfer -pCtrlData->Status\
++ = STATUS_EXITING"
++ "returning ESHUTDOWN\n");
++ iRetVal = -ESHUTDOWN;
++ break;
++ }
++
++ /*If suspended ,return -EINVAL */
++ if (pCtrlData->pBoardData->bSuspended == true) {
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_transfer pCtrlData->\
++ pBoardData->bSuspending"
++ "= true returning EINVAL\n");
++ spin_unlock(&pCtrlData->Lock);
++ iRetVal = -EINVAL;
++ break;
++ }
++
++ /*set status of message */
++ pMsg->actual_length = 0;
++
++ IOH_DEBUG
++ ("ioh_spi_transfer - setting pMsg->status = -EINPROGRESS\n");
++
++ pMsg->status = -EINPROGRESS;
++
++ /*add message to queue */
++ list_add_tail(&pMsg->queue, &pCtrlData->Queue);
++
++ IOH_DEBUG("ioh_spi_transfer - Invoked list_add_tail\n");
++
++ /*schedule work queue to run */
++ queue_work(pCtrlData->pWorkQueue, &pCtrlData->Work);
++
++ IOH_DEBUG("ioh_spi_transfer - Invoked Queue Work\n");
++
++ spin_unlock(&pCtrlData->Lock);
++
++ } while (0);
++
++ IOH_DEBUG("ioh_spi_transfer RETURN=%d\n", iRetVal);
++
++ return iRetVal;
++}
++
++/*! @ingroup SPI_InterfaceLayerAPI
++
++@fn ioh_spi_cleanup(struct spi_device* pSpi)
++
++@remarks Provides the Cleanup routine for IOH SPI driver
++
++@note This is a dummy function.
++ It is not mandatory to have a cleanup function.
++ If SPI master provides a cleanup function while they register
++ with the SPI core, then SPI core invokes the cleanup function
++ when SPI master calls spi_unregister_master function.This
++ driver invokes spi_unregister_master from ioh_spi_remove
++ function. Before invoking spi_unregister_master all the resources
++ used is freed i.e. cleanup activities are handled in the
++ @ref ioh_spi_remove function itself.This function is registered
++ as the cleanup routine for this SPI controller driver from
++ the @ref ioh_spi_probe function.
++
++@param pSpi [@ref IN] Contains reference to struct spi_device
++
++@retval None
++
++@see
++ - ioh_spi_probe
++
++<hr>
++
++*/
++void ioh_spi_cleanup(struct spi_device *pSpi)
++{
++ IOH_DEBUG("spi_cleanup\n");
++}
++
++/*! @ingroup SPI_UtilitiesAPI
++
++@fn ioh_spi_callback( struct ioh_spi_data* pCtrlData)
++
++@remarks Informs ioh_spi_process_messages that SPI data transfer is complete
++
++ The main tasks performed by this method are:
++ - Set transfer status of the SPI channel to completed.
++ - Inform this to @ref ioh_spi_process_messages.
++
++@note The reference to this callback function is saved in a global pointer
++by the function @ref ioh_spi_entcb invoked from @ref ioh_spi_probe function.
++This function is invoked by the interrupt handler ioh_spi_handler
++after transfer complete interrupt is received indicating the end of
++SPI data transfer.ioh_spi_callback wakes up ioh_spi_process_messages
++which blocks till SPI data transfer is completed.
++
++@param pCtrldata [@ref IN] Contains reference to struct ioh_spi_data
++
++@retval None
++
++@see
++ - ioh_spi_handler
++ - ioh_spi_probe
++
++<hr>
++*/
++void ioh_spi_callback(struct ioh_spi_data *pCtrlData)
++{
++ IOH_DEBUG("ioh_ spi _callback waking up process\n");
++ spin_lock(&pCtrlData->Lock);
++ pCtrlData->bTransferComplete = true;
++ wake_up(&pCtrlData->Wait);
++ IOH_DEBUG("ioh_ spi _callback invoked wake_up\n");
++ spin_unlock(&pCtrlData->Lock);
++}
+diff -urN linux-2.6.33-rc3/drivers/spi/pch_spi_pci.c topcliff-2.6.33-rc3/drivers/spi/pch_spi_pci.c
+--- linux-2.6.33-rc3/drivers/spi/pch_spi_pci.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/spi/pch_spi_pci.c 2010-03-17 20:05:19.000000000 +0900
+@@ -0,0 +1,811 @@
++/**
++ * @file ioh_spi_pci.c
++ *
++ * @brief This file contains the function definition for the PCI Layer APIs
++ *
++ * @version 0.94
++ *
++ * @par
++ * -- Copyright Notice --
++ *
++ * @par
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * @par
++ * -- End of Copyright Notice --
++ */
++
++/*includes*/
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/device.h>
++#include <linux/spi/spi.h>
++#include <linux/workqueue.h>
++#include <linux/delay.h>
++#include <linux/ioport.h>
++#include <linux/interrupt.h>
++/*#include <asm/io.h> modify by checkpatch.pl*/
++#include <linux/io.h>
++#include "pch_spi.h"
++#include "pch_spi_hal.h"
++#include "pch_debug.h"
++
++/*! @ingroup SPI_PCILayer
++
++@def IOH_SPI_MAX_CS
++
++@brief Denotes the maximum chip select number possible.
++
++@note Currently this is just used to set the number of chip selects in
++ spi_master structure in @ref ioh_spi_probe function.
++
++@see ioh_spi_probe
++
++<hr>
++
++*/
++#define IOH_SPI_MAX_CS (0xFF)
++
++/*pci device ids*/
++
++/*! @ingroup SPI_PCILayer
++
++@brief Denotes the PCI device ID of the supported device.
++
++@see ioh_spi_pcidev_id
++
++<hr>
++
++*/
++#ifndef FPGA
++#define PCI_DEVICE_ID_IOH_SPI (0x8816) /*LSI*/
++#else
++#define PCI_DEVICE_ID_IOH_SPI (0x8005) /*FPGA*/
++#endif
++/*! @ingroup SPI_PCILayerAPI
++
++@fn ioh_spi_probe(struct pci_dev *pDev, const struct pci_device_id *id)
++
++@brief Implements the Probe functionality for IOH SPI driver
++
++@remarks Implements the Probe functionality for IOH SPI driver
++
++ The major tasks performed by this method are:
++ - Register the callback function.
++ - Enable the PCI device.
++ - Allocate memory for SPI master.
++ - Initialize members of SPI master structure.
++ - Register the SPI master.
++ - Invoke @ref ioh_spi_get_resources to acquire and initialize
++ other resources needed by the driver.
++
++@note This function is invoked by the kernel when it detects
++ a SPI device matching the vendor ID and device ID specified
++ by this driver.
++
++@param pDev [@ref INOUT] contains reference to struct pci_dev
++
++@param id [@ref IN] contains reference to struct pci_device_id
++
++@retval int
++- @ref IOH_SPI_SUCCESS The function exists successfully
++- -ENOMEM spi_alloc_master API fails/kmalloc fails
++- -EINVAL pci_enable_device fails/spi_register_master fails
++ /ioh_spi_get_resources fails
++- -EIO pci_enable_device fails
++- -ENODEV spi_register_master API fails
++- -ENOSYS ioh_spi_get_resources fails
++- -ENOMEM ioh_spi_get_resources fails
++
++@see ioh_spi_pcidev
++
++<hr>
++
++*/
++static int ioh_spi_probe(struct pci_dev *pDev, const struct pci_device_id *id)
++{
++
++ struct spi_master *pMaster[IOH_SPI_MAX_DEV];
++
++ struct ioh_spi_board_data *pBoardData;
++ int iRetVal, i, j;
++
++ IOH_DEBUG("ioh_spi_probe ENTRY\n");
++ /*initialize the call back function */
++ ioh_spi_entcb(ioh_spi_callback);
++ IOH_DEBUG("ioh_spi_probe invoked ioh_spi_entcb\n");
++
++ do {
++ /*allocate memory for private data */
++ pBoardData =
++ kmalloc(sizeof(struct ioh_spi_board_data), GFP_KERNEL);
++
++ if (pBoardData == NULL) {
++ IOH_LOG(KERN_ERR,
++ " ioh_spi_probe memory allocation for private\
++ data failed\n");
++ iRetVal = -ENOMEM;
++ break;
++ }
++
++ IOH_DEBUG
++ (" ioh_spi_probe memory allocation for private data\
++ success\n");
++
++ /*enable PCI device */
++ iRetVal = pci_enable_device(pDev);
++ if (iRetVal != 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_probe pci_enable_device FAILED\n");
++
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_probe invoked kfree to free memory\
++ allocated for pBoardData\n");
++ kfree(pBoardData);
++ break;
++ }
++
++ IOH_DEBUG("ioh_spi_probe pci_enable_device returned=%d\n",
++ iRetVal);
++
++ pBoardData->pDev = pDev;
++
++ /*alllocate memory for SPI master */
++ i = 0;
++
++ do {
++ pMaster[i] =
++ spi_alloc_master(&pDev->dev,
++ sizeof(struct ioh_spi_data));
++
++ if (pMaster[i] == NULL) {
++ iRetVal = -ENOMEM;
++
++ if (i > 0) {
++ j = 0;
++
++ do {
++ spi_master_put(pMaster[j]);
++ j++;
++ IOH_DEBUG
++ ("ioh_spi_probe invoked\
++ spi_master_put\n");
++ } while (j < i);
++ }
++
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_probe spi_alloc_master\
++ failed\n");
++
++ break;
++ }
++
++ i++;
++ } while (i < IOH_SPI_MAX_DEV);
++
++ IOH_DEBUG("ioh_spi_probe spi_alloc_master returned non NULL\n");
++
++ if (iRetVal != 0) {
++ kfree(pBoardData);
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_probe invoked kfree to free memory\
++ allocated for pBoardData\n");
++ pci_disable_device(pDev);
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_probe Invoked pci_disable_device\n");
++ break;
++ }
++
++ /*initialize members of SPI master */
++ for (i = 0; i < IOH_SPI_MAX_DEV; i++) {
++ pMaster[i]->bus_num = i;
++ pMaster[i]->num_chipselect = IOH_SPI_MAX_CS;
++ pMaster[i]->setup = ioh_spi_setup;
++ IOH_DEBUG
++ ("ioh_spi_probe setup member of SPI master\
++ initialized\n");
++ pMaster[i]->transfer = ioh_spi_transfer;
++ IOH_DEBUG
++ ("ioh_spi_probe transfer member of SPI master\
++ initialized\n");
++ pMaster[i]->cleanup = ioh_spi_cleanup;
++ IOH_DEBUG
++ ("ioh_spi_probe cleanup member of SPI master\
++ initialized\n");
++
++ pBoardData->pCtrlData[i] =
++ spi_master_get_devdata(pMaster[i]);
++
++ pBoardData->pCtrlData[i]->pMaster = pMaster[i];
++ pBoardData->pCtrlData[i]->nCurrentChip = 255;
++ pBoardData->pCtrlData[i]->pCurrentChip = NULL;
++ pBoardData->pCtrlData[i]->bTransferComplete = false;
++ pBoardData->pCtrlData[i]->pU16TxBuffer = NULL;
++ pBoardData->pCtrlData[i]->pU16RxBuffer = NULL;
++ pBoardData->pCtrlData[i]->TxIndex = 0;
++ pBoardData->pCtrlData[i]->RxIndex = 0;
++ pBoardData->pCtrlData[i]->bTransferActive = false;
++ pBoardData->pCtrlData[i]->pBoardData = pBoardData;
++
++ /*Register the controller with the SPI core. */
++ iRetVal = spi_register_master(pMaster[i]);
++ if (iRetVal != 0) {
++ spi_master_put(pMaster[i]);
++ IOH_DEBUG
++ ("ioh_spi_probe invoked spi_master_put\n");
++ /*unregister master for any channel that has
++ registered master */
++
++ if (i > 0) {
++#if 0
++ for (j = 0; j < i; j++) {
++ spi_unregister_master(pMaster
++ [j]);
++ IOH_DEBUG
++ ("ioh_spi_probe invoked\
++ spi_unregister_master\n");
++ }
++#else
++ spi_unregister_master(pMaster[0]);
++ IOH_DEBUG
++ ("ioh_spi_probe invoked\
++ spi_unregister_master\n");
++#endif
++ }
++
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_probe spi_register_\
++ master FAILED\n");
++
++ break;
++ }
++
++ IOH_DEBUG
++ ("ioh_spi_probe spi_register_master\
++ returned=%d\n",
++ iRetVal);
++ }
++
++ if (iRetVal != 0) {
++ kfree(pBoardData);
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_probe invoked kfree to free memory\
++ allocated for pBoardData\n");
++ pci_disable_device(pDev);
++ IOH_DEBUG("ioh_spi_probe invoked pci_disable\n");
++ break;
++ }
++
++ /*allocate resources for IOH SPI */
++ iRetVal = ioh_spi_get_resources(pBoardData);
++ if (iRetVal != IOH_SPI_SUCCESS) {
++ /*
++ for (i = 0; i < IOH_SPI_MAX_DEV; i++) {
++ spi_unregister_master(pMaster[i]);
++ IOH_DEBUG
++ ("ioh_spi_probe invoked\
++ spi_unregister_master\n");
++ }
++ */
++ spi_unregister_master(pMaster[0]);
++ IOH_DEBUG
++ ("ioh_spi_probe invoked spi_unregister_master\n");
++
++
++ kfree(pBoardData);
++
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_probe invoked kfree to free memory\
++ allocated for pBoardData\n");
++ pci_disable_device(pDev);
++ IOH_DEBUG("ioh_spi_probe invoked pci_disable\n");
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_probe get_resources FAILED\n");
++ break;
++ }
++
++ IOH_DEBUG("ioh_spi_probe ioh_spi_get_resources returned=%d\n",
++ iRetVal);
++
++ /*save private data in dev */
++ pci_set_drvdata(pDev, (void *)pBoardData);
++ IOH_DEBUG("ioh_spi_probe invoked pci_set_drvdata\n");
++
++ /*set master mode */
++
++ for (i = 0; i < IOH_SPI_MAX_DEV; i++) {
++ ioh_spi_set_master_mode(pMaster[i]);
++ IOH_DEBUG
++ ("ioh_spi_probe invoked ioh_spi_set_master_mode\n");
++ }
++
++ iRetVal = IOH_SPI_SUCCESS;
++
++ } while (false);
++
++ IOH_DEBUG("ioh_spi_probe Return=%d\n", iRetVal);
++
++ return iRetVal;
++}
++
++/*! @ingroup SPI_PCILayerAPI
++
++@fn ioh_spi_remove(struct pci_dev *pDev)
++
++@brief Implements the remove routine for IOH SPI driver
++
++@remarks Implements the remove routine for IOH SPI driver
++
++ The major tasks performed by this method are:
++ - Invoke @ref ioh_spi_check_request_pending function to find
++ out if there are any pending requests.
++ - Free the allocated resources by invoking @ref ioh_spi_free_resources.
++ - Unregister SPI master.
++ - Disable PCI device.
++
++@note This function is invoked when the IOH SPI controller driver module
++ is removed from the system using "rmmod" command OR when the SPI
++ device is removed from the system.
++
++@param pDev [@ref INOUT] contains reference to struct pci_dev
++
++@retval None
++
++@see ioh_spi_pcidev
++
++<hr>
++
++*/
++static void ioh_spi_remove(struct pci_dev *pDev)
++{
++ struct ioh_spi_board_data *pBoardData = pci_get_drvdata(pDev);
++
++ IOH_DEBUG("ioh_spi_remove ENTRY\n");
++
++ if (pBoardData != NULL) {
++ IOH_DEBUG("ioh_spi_remove invoked pci_get_drvdata\n");
++
++ /*check for any pending messages */
++
++ if ((-EBUSY) == ioh_spi_check_request_pending(pBoardData)) {
++ IOH_DEBUG
++ ("ioh_spi_remove ioh_spi_check_request_pending\
++ returned EBUSY\n");
++ /*no need to take any particular action;proceed with
++ remove even
++ though queue is not empty */
++ }
++
++ IOH_DEBUG
++ ("ioh_spi_remove ioh_spi_check_request_pending invoked\n");
++
++ /*Free resources allocated for IOH SPI */
++ ioh_spi_free_resources(pBoardData);
++ IOH_DEBUG("ioh_spi_remove invoked ioh_spi_free_resources\n");
++
++ /*Unregister SPI master */
++
++#if 0
++ int i;
++ for (i = 0; i < IOH_SPI_MAX_DEV; i++) {
++ spi_unregister_master(pBoardData->pCtrlData[i]->
++ pMaster);
++ IOH_DEBUG
++ ("ioh_spi_remove invoked spi_unregister_master\n");
++ }
++#else
++ spi_unregister_master(pBoardData->pCtrlData[0]->pMaster);
++ IOH_DEBUG("ioh_spi_remove invoked spi_unregister_master\n");
++
++#endif
++
++ /*free memory for private data */
++ kfree(pBoardData);
++
++ pci_set_drvdata(pDev, NULL);
++
++ IOH_DEBUG("ioh_spi_remove memory for private data freed\n");
++
++ /*disable PCI device */
++ pci_disable_device(pDev);
++
++ IOH_DEBUG("ioh_spi_remove invoked pci_disable_device\n");
++
++ } else {
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_remove pci_get_drvdata returned NULL\n");
++ }
++}
++
++/*! @ingroup SPI_PCILayerAPI
++
++@fn ioh_spi_suspend(struct pci_dev *pDev,pm_message_t state)
++
++@brief Implements the suspend routine for IOH SPI driver
++
++@remarks Implements the suspend routine for IOH SPI driver
++
++ The major tasks performed by this method are:
++ - Wait till current message is processed.
++ - Disable interrupts by invoking @ref ioh_spi_disable_interrupts.
++ - Unregister the interrupt handler.
++ - Save current state.
++ - Disable PM notifications.
++ - Disable PCI device.
++ - Move the device to D3Hot power state.
++
++@note This function is invoked by the kernel when the system transitions
++ to low power state.
++
++@param pDev [@ref INOUT] contains reference to struct pci_dev
++
++@param state [@ref IN] contains new PM state to which to transition to.
++
++@retval int
++ - @ref IOH_SPI_SUCCESS The function returns successfully
++ - -ENOMEM pci_save_state fails
++
++@see ioh_spi_pcidev
++
++<hr>
++
++*/
++#ifdef CONFIG_PM
++static int ioh_spi_suspend(struct pci_dev *pDev, pm_message_t state)
++{
++ int i;
++ u8 count;
++ s32 iRetVal = IOH_SPI_SUCCESS;
++
++ struct ioh_spi_board_data *pBoardData = pci_get_drvdata(pDev);
++
++ IOH_DEBUG("ioh_spi_suspend ENTRY\n");
++
++ if (pBoardData == NULL) {
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_suspend pci_get_drvdata returned NULL\n");
++ iRetVal = -EFAULT;
++ } else {
++ IOH_DEBUG
++ ("ioh_spi_suspend pci_get_drvdata invoked successfully\n");
++ pBoardData->bSuspended = true;
++ IOH_DEBUG
++ ("ioh_spi_suspend pBoardData->bSuspending set to true\n");
++
++ /*check if the current message is processed:
++ Only after thats done the transfer will be suspended */
++
++ for (i = 0; i < IOH_SPI_MAX_DEV; i++) {
++ count = 255;
++
++ while ((--count) > 0) {
++ if (pBoardData->pCtrlData[i]->
++ bCurrent_msg_processing == false) {
++ IOH_DEBUG
++ ("ioh_spi_suspend pBoardData\
++ ->pCtrlData->"
++ "bCurrent_msg_processing\
++ = false\n");
++ break;
++ } else {
++ IOH_DEBUG
++ ("ioh_spi_suspend pBoardData\
++ ->pCtrlData->"
++ "bCurrent_msg_processing = true\n");
++ }
++
++ msleep(IOH_SPI_SLEEP_TIME);
++ }
++ }
++
++ /*Free IRQ */
++ if (pBoardData->bIrqRegistered == true) {
++ /*disable all interrupts */
++ for (i = 0; i < IOH_SPI_MAX_DEV; i++) {
++ ioh_spi_disable_interrupts(pBoardData->
++ pCtrlData[i]->
++ pMaster,
++ IOH_SPI_ALL);
++ ioh_spi_reset(pBoardData->pCtrlData[i]->
++ pMaster);
++ IOH_DEBUG
++ ("ioh_spi_suspend ioh_spi_\
++ disable_interrupts invoked"
++ "successfully\n");
++ }
++
++ free_irq(pBoardData->pDev->irq, (void *)pBoardData);
++
++ pBoardData->bIrqRegistered = false;
++ IOH_DEBUG
++ ("ioh_spi_suspend free_irq invoked successfully\n");
++ IOH_DEBUG
++ ("ioh_spi_suspend pCtrlData->bIrqRegistered\
++ = false\n");
++ }
++
++ /*save config space */
++ iRetVal = pci_save_state(pDev);
++
++ if (iRetVal == 0) {
++ IOH_DEBUG
++ ("ioh_spi_suspend pci_save_state returned=%d\n",
++ iRetVal);
++ /*disable PM notifications */
++ pci_enable_wake(pDev, PCI_D3hot, 0);
++ IOH_DEBUG
++ ("ioh_spi_suspend pci_enable_wake invoked\
++ successfully\n");
++ /*disable PCI device */
++ pci_disable_device(pDev);
++ IOH_DEBUG
++ ("ioh_spi_suspend pci_disable_device invoked\
++ successfully\n");
++ /*move device to D3hot state */
++ pci_set_power_state(pDev, PCI_D3hot);
++ IOH_DEBUG
++ ("ioh_spi_suspend pci_set_power_state invoked\
++ successfully\n");
++ } else {
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_suspend pci_save_state failed\n");
++ }
++ }
++
++ IOH_DEBUG("ioh_spi_suspend return=%d\n", iRetVal);
++
++ return iRetVal;
++}
++
++#endif
++/*! @ingroup SPI_PCILayerAPI
++
++@fn ioh_spi_resume(struct pci_dev *pDev)
++
++@brief Implements the resume routine for IOH SPI driver
++
++@remarks Implements the resume routine for IOH SPI driver
++
++ The major tasks performed by this method are:
++ - Move the device to D0 power state.
++ - Restore the saved state.
++ - Enable the PCI device.
++ - Disable PM notifications.
++ - Register interrupt handler.
++ - Reset IOH SPI hardware.
++ - Set IOH SPI hardware in master mode.
++
++@note This function is invoked by the kernel when the system is being
++ resumed from suspend.
++
++@param pDev [@ref INOUT] contains reference to struct pci_dev
++
++@retval int
++ - @ref IOH_SPI_SUCCESS The function returns successfully
++ - -EINVAL request_irq fails
++ - -ENOMEM request_irq fails
++ - -ENOSYS request_irq fails
++ - -EBUSY request_irq fails
++ - -EIO pci_enable_device fails
++
++@see ioh_spi_pcidev
++
++<hr>
++
++*/
++#ifdef CONFIG_PM
++static int ioh_spi_resume(struct pci_dev *pDev)
++{
++ int i;
++ s32 iRetVal = IOH_SPI_SUCCESS;
++
++ struct ioh_spi_board_data *pBoardData = pci_get_drvdata(pDev);
++ IOH_DEBUG("ioh_spi_resume ENTRY\n");
++
++ if (pBoardData == NULL) {
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_resume pci_get_drvdata returned NULL\n");
++ iRetVal = -EFAULT;
++ } else {
++ /*move device to DO power state */
++ pci_set_power_state(pDev, PCI_D0);
++ IOH_DEBUG
++ ("ioh_spi_resume pci_set_power_state invoked successfully\n");
++
++ /*restore state */
++ pci_restore_state(pDev);
++ IOH_DEBUG
++ ("ioh_spi_resume pci_restore_state invoked successfully\n");
++ iRetVal = pci_enable_device(pDev);
++ if (iRetVal < 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_resume pci_enable_device failed\n");
++ } else {
++ IOH_DEBUG
++ ("ioh_spi_resume pci_enable_device returned=%d\n",
++ iRetVal);
++
++ /*disable PM notifications */
++ pci_enable_wake(pDev, PCI_D3hot, 0);
++ IOH_DEBUG
++ ("ioh_spi_resume pci_enable_wake invoked\
++ successfully\n");
++
++ /*register IRQ handler */
++
++ if ((pBoardData->bIrqRegistered) != true) {
++ /*register IRQ */
++ iRetVal = request_irq(pBoardData->pDev->irq,
++ ioh_spi_handler, IRQF_SHARED,
++ DRIVER_NAME,
++ pBoardData);
++ if (iRetVal < 0) {
++ IOH_LOG(KERN_ERR,
++ "ioh_spi_resume\
++ request_irq failed\n");
++ } else {
++ IOH_DEBUG
++ ("ioh_spi_resume request_irq\
++ returned=%d\n",
++ iRetVal);
++ pBoardData->bIrqRegistered = true;
++
++ /*reset IOH SPI h/w */
++
++ for (i = 0; i < IOH_SPI_MAX_DEV; i++) {
++ ioh_spi_reset(pBoardData->
++ pCtrlData[i]->
++ pMaster);
++ IOH_DEBUG
++ ("ioh_spi_resume\
++ ioh_spi_reset invoked "
++ "successfully \n");
++ ioh_spi_set_master_mode
++ (pBoardData->pCtrlData[i]->
++ pMaster);
++ IOH_DEBUG
++ ("ioh_spi_resume\
++ ioh_spi_set_master_mode invoked"
++ "successfully \n");
++ }
++
++ /*set suspend status to false */
++ pBoardData->bSuspended = false;
++
++ IOH_DEBUG
++ ("ioh_spi_resume set pBoardData->\
++ bSuspending = false\n");
++ }
++ }
++ }
++ }
++
++ IOH_DEBUG("ioh_spi_resume returning=%d\n", iRetVal);
++
++ return iRetVal;
++}
++
++#endif
++/*! @ingroup SPI_PCILayerFacilitators
++
++@struct ioh_spi_pcidev_id
++
++@brief Store information of supported PCI devices
++
++@see ioh_spi_pcidev
++
++<hr>
++
++*/
++
++static struct pci_device_id ioh_spi_pcidev_id[] = {
++ /*LSI*/ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_IOH_SPI)},
++ {0,}
++
++};
++
++/*! @ingroup SPI_PCILayerFacilitators
++
++@struct ioh_spi_pcidev
++
++@brief Store the references of PCI driver interfaces to kernel
++
++@note This structure is registerd with the kernel via the call
++ pci_register_driver from @ref ioh_spi_init
++
++@see
++ - ioh_spi_init
++ - ioh_spi_exit
++
++<hr>
++
++*/
++
++static struct pci_driver ioh_spi_pcidev = {
++ .name = "ioh_spi",
++ .id_table = ioh_spi_pcidev_id,
++ .probe = ioh_spi_probe,
++ .remove = ioh_spi_remove,
++#ifdef CONFIG_PM
++ .suspend = ioh_spi_suspend,
++ .resume = ioh_spi_resume,
++#endif
++
++};
++
++/*! @ingroup SPI_InterfaceLayerAPI
++
++@fn ioh_spi_init(void)
++
++@brief Entry point function for this module.
++
++@remarks Init function for IOH SPI driver module
++
++@param None
++
++@retval int
++ - 0 Function exits successfully
++ - -EEXIST pci_register_driver fails
++ - -EINVAL pci_register_driver fails
++ - -ENOMEM pci_register_driver fails
++
++<hr>
++
++*/
++static int __init ioh_spi_init(void)
++{
++ s32 iRetVal;
++
++ iRetVal = pci_register_driver(&ioh_spi_pcidev);
++ if (iRetVal == 0) {
++ IOH_DEBUG
++ ("ioh_spi_init pci_register_driver invoked successfully\n");
++ } else {
++ IOH_LOG(KERN_ERR, "ioh_spi_init pci_register_driver failed\n");
++ }
++
++ IOH_DEBUG("ioh_spi_init returning=%d\n", iRetVal);
++
++ return iRetVal;
++}
++
++/*! @ingroup SPI_InterfaceLayerAPI
++
++@fn ioh_spi_exit(void)
++
++@brief Exit point function for this module.
++
++@remarks Function invoked when module is removed
++
++@param None
++
++@retval None
++
++<hr>
++
++*/
++static void __exit ioh_spi_exit(void)
++{
++ IOH_DEBUG("ioh_spi_exit Invoking pci_unregister_driver\n");
++ pci_unregister_driver(&ioh_spi_pcidev);
++}
++
++MODULE_DESCRIPTION("IOH SPI PCI Driver");
++MODULE_LICENSE("GPL");
++module_init(ioh_spi_init);
++module_exit(ioh_spi_exit);
+diff -urN linux-2.6.33-rc3/drivers/spi/pch_spi_platform_devices.c topcliff-2.6.33-rc3/drivers/spi/pch_spi_platform_devices.c
+--- linux-2.6.33-rc3/drivers/spi/pch_spi_platform_devices.c 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33-rc3/drivers/spi/pch_spi_platform_devices.c 2010-03-06 07:44:02.000000000 +0900
+@@ -0,0 +1,50 @@
++#include <linux/module.h>
++#include <linux/spi/spidev.h>
++#include <linux/device.h>
++#include <linux/spi/spi.h>
++
++static struct spi_board_info ioh_spi_slaves[] = {
++ {
++ .modalias = "spidev", /* Name of spi_driver for this device*/
++ .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ*/
++ .bus_num = 0, /* Framework bus number*/
++ .chip_select = 0, /* Framework chip select.*/
++ .platform_data = NULL,
++ .mode = SPI_MODE_0,
++ },
++#if (CONFIG_PCH_SPI_PLATFORM_DEVICE_COUNT - 1)
++ {
++ .modalias = "spidev", /* Name of spi_driver for this device*/
++ .max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ*/
++ .bus_num = 1, /* Framework bus number*/
++ .chip_select = 0, /* Framework chip select.*/
++ .platform_data = NULL,
++ .mode = SPI_MODE_0,
++ },
++#endif
++};
++
++static __init int Load(void)
++{
++ int iRetVal = -1;
++
++ printk(KERN_INFO "Registering IOH SPI devices... \n");
++
++ if (!spi_register_board_info
++ (ioh_spi_slaves, ARRAY_SIZE(ioh_spi_slaves)))
++ iRetVal = 0;
++ else
++ printk(KERN_ERR "Registering IOH SPI devices failed\n");
++
++ return iRetVal;
++}
++
++/*
++ * static __exit void Unload()
++ * {
++ *
++ * }
++ * */
++
++module_init(Load);
++MODULE_LICENSE("GPL");
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-uart.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-uart.patch
new file mode 100644
index 0000000..b1e1f48
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-uart.patch
@@ -0,0 +1,1589 @@
+
+
+From: Masayuki Ohtake <masa-korg@dsn.okisemi.com>
+Subject: OKI Semiconductor PCH UART driver
+
+This driver implements UART controls for PCH.
+
+Signed-off-by: Masayuki Ohtake <masa-korg@dsn.okisemi.com>
+Acked-by: Wang Qi <qi.wang@intel.com>
+
+---
+ drivers/serial/8250.c | 8
+ drivers/serial/8250_pch.h | 57
+ drivers/serial/8250_pci.c |
+ drivers/serial/Kconfig | 15
+ drivers/serial/Makefile | 11
++++++++++++++++++++++++++++++++ 5 files changed, zz insertions(+)
+diff -urN linux-2.6.33.1/drivers/serial/8250.c topcliff-2.6.33.1/drivers/serial/8250.c
+--- linux-2.6.33.1/drivers/serial/8250.c 2010-03-16 01:09:39.000000000 +0900
++++ topcliff-2.6.33.1/drivers/serial/8250.c 2010-03-23 10:34:44.000000000 +0900
+@@ -39,11 +39,19 @@
+ #include <linux/nmi.h>
+ #include <linux/mutex.h>
+
++#if defined(ENABLE_SERIAL_8250_PCH) && defined(ENABLE_PCH_DMA_FEATURE)
++/* For using DMA features. */
++#include <linux/pci.h>
++#include <linux/pci_ids.h>
++#include <linux/dma-mapping.h>
++#include "pch_dma_main.h"
++#endif
++
+ #include <asm/io.h>
+ #include <asm/irq.h>
+
+ #include "8250.h"
+-
++#include "8250_pch.h"
+ #ifdef CONFIG_SPARC
+ #include "suncore.h"
+ #endif
+@@ -129,6 +137,18 @@
+ static unsigned int probe_rsa_count;
+ #endif /* CONFIG_SERIAL_8250_RSA */
+
++#if defined(ENABLE_SERIAL_8250_PCH) && defined(ENABLE_PCH_DMA_FEATURE)
++/* Structure for storing the DMA channel related information. */
++struct ioh_dma_feature {
++ u32 buf;
++ u32 phy_addr;
++ s32 channel;
++ u32 size;
++};
++#endif
++
++
++
+ struct uart_8250_port {
+ struct uart_port port;
+ struct timer_list timer; /* "no irq" timer */
+@@ -159,6 +179,17 @@
+ */
+ void (*pm)(struct uart_port *port,
+ unsigned int state, unsigned int old);
++
++#if defined(ENABLE_SERIAL_8250_PCH) && defined(ENABLE_PCH_DMA_FEATURE)
++ struct ioh_dma_feature rx_dma; /* DMA operation for Receive. */
++ struct ioh_dma_feature tx_dma; /* DMA operation for Transmit. */
++ unsigned int buffer; /* The buffer for DMA descriptors.*/
++ unsigned int buffer_phy; /* The physical address of the buffer.*/
++ unsigned int dma_flag;/* DMA flag variable for enabling DMA transfer. */
++ unsigned int rx_fifo_size; /* The UART Rx fifo size.*/
++ unsigned int dma_progress; /* The DMA in progress flag.*/
++ unsigned int dma_enabled; /* The DMA enable flag. */
++#endif
+ };
+
+ struct irq_info {
+@@ -299,6 +330,25 @@
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00,
+ .flags = UART_CAP_FIFO | UART_CAP_AFE,
+ },
++#if defined(ENABLE_SERIAL_8250_PCH)
++ [PORT_IOH_256FIFO] = {
++ .name = "IOH_256FIFO",
++ .fifo_size = 256,
++ .tx_loadsz = 256,
++ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
++ UART_FCR7_256BYTE,
++ .flags = UART_CAP_FIFO | UART_CAP_AFE,
++ },
++
++ [PORT_IOH_64FIFO] = {
++ .name = "IOH_64FIFO",
++ .fifo_size = 64,
++ .tx_loadsz = 64,
++ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
++ UART_FCR7_64BYTE,
++ .flags = UART_CAP_FIFO | UART_BUG_NOMSR,
++ },
++#endif
+ };
+
+ #if defined (CONFIG_SERIAL_8250_AU1X00)
+@@ -383,6 +433,78 @@
+
+ #endif
+
++
++#if defined(ENABLE_SERIAL_8250_PCH) && defined(ENABLE_PCH_DMA_FEATURE)
++
++/* Function for calculating the Rx FIFO size of the IOH UART. */
++void get_rx_fifo_size(struct uart_8250_port *up, u8 fcr_value)
++{
++ unsigned fifo_size;
++
++#ifdef DEBUG
++ printk(KERN_DEBUG"get_rx_fifo -> The FCR register value: %x.\n",
++ fcr_value);
++#endif
++/* check if the UART is a 64 byte FIFO UART */
++ if ((up->port.flags & UPF_IOH_UART_64_FIFO) != 0) {
++ switch ((fcr_value & 0xC0)) {
++ case 0:
++ fifo_size = 1;
++ break;
++
++ case 0x40:
++ fifo_size = 16;
++ break;
++
++ case 0x80:
++ fifo_size = 32;
++ break;
++
++ case 0xC0:
++ fifo_size = 56;
++ break;
++
++ default:
++ fifo_size = 1;
++ break;
++ }
++ } else {
++/* UART is 256 byte byte FIFO UART */
++ switch ((fcr_value & 0xC0)) {
++ case 0:
++ fifo_size = 1;
++ break;
++
++ case 0x40:
++ fifo_size = 64;
++ break;
++
++ case 0x80:
++ fifo_size = 128;
++ break;
++
++ case 0xC0:
++ fifo_size = 224;
++ break;
++
++ default:
++ fifo_size = 1;
++ break;
++ }
++ }
++/* save the fifo size for reference */
++ up->rx_fifo_size = fifo_size;
++#ifdef DEBUG
++ printk(KERN_DEBUG"Function get_rx_fifo_size stores fifo_size as: %u.\n",
++ fifo_size);
++#endif
++}
++
++#endif
++
++
++
++
+ static unsigned int hub6_serial_in(struct uart_port *p, int offset)
+ {
+ offset = map_8250_in_reg(p, offset) << p->regshift;
+@@ -550,15 +672,285 @@
+ (up->port.serial_in(&(up)->port, (offset)))
+ #define serial_out(up, offset, value) \
+ (up->port.serial_out(&(up)->port, (offset), (value)))
++
+ /*
+ * We used to support using pause I/O for certain machines. We
+ * haven't supported this for a while, but just in case it's badly
+ * needed for certain old 386 machines, I've left these #define's
+ * in....
+ */
++
+ #define serial_inp(up, offset) serial_in(up, offset)
+ #define serial_outp(up, offset, value) serial_out(up, offset, value)
+
++
++#if defined(ENABLE_SERIAL_8250_PCH) && defined(ENABLE_PCH_DMA_FEATURE)
++
++/* DMA TX callback function */
++void ioh_dma_tx_callback(int status, unsigned long data)
++{
++ struct uart_8250_port *up = (struct uart_8250_port *)data;
++/* struct circ_buf *xmit = &up->port.info->xmit;*/
++ struct circ_buf *xmit = &up->port.state->xmit;/*for 2.6.33-rc3*/
++ u8 value;
++
++#ifdef DEBUG
++ if (status == IOH_DMA_END) {
++ printk(KERN_DEBUG"ioh_dma_tx_callback -> DMA END interrupt\
++ obtained " \
++ "for transmission.\n");
++
++ }
++#endif
++ if (status == IOH_DMA_ABORT) {
++ printk(KERN_ERR"ioh_dma_tx_callback -> DMA ABORT interrupt\
++ obtained " \
++ "for transmission.\n");
++ }
++
++ /* Un-mapping the DMA buffer. */
++ if (up->tx_dma.phy_addr > 0)
++ dma_unmap_single(up->port.dev, up->tx_dma.phy_addr,
++ up->tx_dma.size, DMA_TO_DEVICE);
++
++ dma_unmap_single(up->port.dev, up->buffer_phy,
++ PAGE_SIZE, DMA_TO_DEVICE);
++
++ /*Enable TX interrupt.*/
++ if (uart_circ_chars_pending(xmit)) {
++ value = (u8)serial_in(up, UART_IER);
++ serial_out(up, UART_IER, (value | 0x02));
++ up->ier = serial_in(up, UART_IER);
++ }
++#ifdef DEBUG
++ printk(KERN_DEBUG"Function ioh_dma_tx_callback invoked.\n");
++#endif
++}
++
++/* Function for DMA setting for Scatter Gather Mode. */
++void set_scatter_gather_dma_mode(struct uart_8250_port *up, unsigned count)
++{
++ u32 in_address;
++ u32 out_address;
++ u32 desc_address;
++ u32 total_desc;
++ u32 i, j;
++ u8 value;
++ struct ioh_dma_desc *desc;
++ int channel = up->tx_dma.channel;
++ struct ioh_dma_mode_param mode = {
++ .TransferDirection = IOH_DMA_DIR_OUT_TO_IN,
++ .DMASizeType = IOH_DMA_SIZE_TYPE_8BIT,
++ .DMATransferMode = DMA_SCATTER_GATHER_MODE
++ };
++
++ desc = (struct ioh_dma_desc *)up->tx_dma.buf;
++
++ /* Mapping the DMA buffer for transfer. */
++ out_address = dma_map_single(up->port.dev, (void *)up->buffer,
++ PAGE_SIZE, DMA_TO_DEVICE);
++ in_address = up->port.mapbase + (map_8250_in_reg(up, UART_TX));
++ desc_address = dma_map_single(up->port.dev, (void *)up->tx_dma.buf,
++ up->tx_dma.size, DMA_TO_DEVICE);
++ up->buffer_phy = out_address;
++ up->tx_dma.phy_addr = desc_address;
++
++ /* Disable Transmit hardware interrupt.*/
++ value = (u8)serial_in(up, UART_IER);
++ serial_out(up, UART_IER, (value & 0xFD));
++ up->ier = serial_in(up, UART_IER);
++
++ total_desc = count/(up->tx_loadsz);
++
++ if ((count % (up->tx_loadsz)) > 0)
++ total_desc++;
++
++ dma_sync_single_for_cpu(up->port.dev, desc_address, up->tx_dma.size,
++ DMA_TO_DEVICE);
++
++ /* Organising the DMA descriptors. */
++ for (i = 0, j = 0; (i < total_desc && count > 0); i++) {
++ desc[i].insideAddress = in_address;
++ desc[i].outsideAddress = (out_address + j);
++
++ if ((int)(count - (up->tx_loadsz)) > 0) {
++ desc[i].size = up->tx_loadsz | IOH_DMA_SIZE_TYPE_8BIT;
++ count = count - (up->tx_loadsz);
++ j += (up->tx_loadsz);
++ } else {
++ desc[i].size = count | IOH_DMA_SIZE_TYPE_8BIT;
++ j += count;
++ count = 0;
++ }
++
++ desc[i].nextDesc = ((((u32)((desc_address +
++ ((i + 1)*(sizeof(struct ioh_dma_desc)))))) & 0xFFFFFFFC) |
++ DMA_DESC_FOLLOW_WITHOUT_INTERRUPT);
++ }
++
++ desc[i - 1].nextDesc = (DMA_DESC_END_WITH_INTERRUPT);
++
++ dma_sync_single_for_device(up->port.dev, desc_address, up->tx_dma.size,
++ DMA_TO_DEVICE);
++
++ /* Initiating the DMA transfer. */
++ ioh_set_dma_mode(channel, mode);
++ ioh_set_dma_desc(channel, (struct ioh_dma_desc *)((desc_address &
++ 0xFFFFFFFC) | DMA_DESC_FOLLOW_WITHOUT_INTERRUPT),\
++ (((struct ioh_dma_desc *)desc_address) + (total_desc - 1)));
++ ioh_dma_set_callback(channel, ioh_dma_tx_callback, (u32)up);
++ ioh_enable_dma(channel);
++
++#ifdef DEBUG
++ printk(KERN_DEBUG"Function set_scatter_gather_dma_mode invoked.\n");
++#endif
++}
++
++/* Function for DMA settings for ONE SHOT mode. */
++void set_one_shot_dma_mode(struct uart_8250_port *up, unsigned count)
++{
++ u32 in_address;
++ u32 out_address;
++ u8 value;
++ int channel = up->tx_dma.channel;
++ struct ioh_dma_mode_param mode = {
++ .TransferDirection = IOH_DMA_DIR_OUT_TO_IN,
++ .DMASizeType = IOH_DMA_SIZE_TYPE_8BIT,
++ .DMATransferMode = DMA_ONE_SHOT_MODE
++ };
++
++ /* Disable Receive hardware interrupt.*/
++ value = (u8)serial_in(up, UART_IER);
++ serial_out(up, UART_IER, (value & 0xFD));
++ up->ier = serial_in(up, UART_IER);
++
++ /* Mapping the DMA buffer for transfer. */
++ out_address = dma_map_single(up->port.dev, (void *)up->buffer,
++ PAGE_SIZE, DMA_TO_DEVICE);
++ in_address = up->port.mapbase + (map_8250_in_reg(up, UART_TX));
++ up->buffer_phy = out_address;
++ up->tx_dma.phy_addr = 0;
++
++ /* Initiating the DMA transfer. */
++ ioh_set_dma_mode(channel, mode);
++ ioh_set_dma_addr(channel, in_address, out_address);
++ ioh_set_dma_count(channel, count);
++ ioh_dma_set_callback(channel, ioh_dma_tx_callback, (u32)up);
++ ioh_enable_dma(channel);
++
++#ifdef DEBUG
++ printk(KERN_DEBUG"Function set_one_shot_dma_mode invoked.\n");
++#endif
++}
++
++/* Function for pushing the received characters to tty buffer. */
++/* At high baud rates tty buffer does not get emptied sufficiently fast
++ and hence multiple retries are required to push the data into the buffer */
++
++static int push_rx(struct tty_struct *tty, const unsigned char *buf, int size)
++{
++ u32 sz, i, j;
++ u32 loop;
++ u32 pushed;
++
++ for (pushed = 0, i = 0, loop = 1; (pushed < size) && loop;
++ pushed += sz, i++) {
++ sz = tty_insert_flip_string(tty, &buf[pushed], size - pushed);
++
++ for (j = 0; (j < 100000) && (sz == 0); j++) {
++ tty_flip_buffer_push(tty);
++ sz = tty_insert_flip_string(tty, &buf[pushed],
++ size - pushed);
++ }
++
++ if (sz == 0)
++ loop = 0;
++
++ }
++
++ tty_flip_buffer_push(tty);
++
++#ifdef DEBUG
++ printk(KERN_DEBUG"push_rx -> %d characters pushed. Remained " \
++ "%d characters.\n", pushed, size - pushed);
++ printk(KERN_DEBUG"Function push_rx return %u.\n", pushed);
++#endif
++
++ return pushed;
++}
++
++/* The DMA reception callback function. */
++void ioh_dma_rx_callback(int status, unsigned long data)
++{
++ struct uart_8250_port *up = (struct uart_8250_port *)data;
++ unsigned fifo_size;
++ unsigned long flags;
++ u8 value;
++
++ spin_lock_irqsave(&up->port.lock, flags);
++
++ /* Normal end. */
++ if (status == IOH_DMA_END) {
++ /* Preparing the DMA buffer to be accessed by the CPU*/
++ dma_sync_single_for_cpu(up->port.dev, up->rx_dma.phy_addr,
++ up->rx_dma.size, DMA_FROM_DEVICE);
++
++#ifdef DEBUG
++ printk(KERN_DEBUG"ioh_dma_rx_callback -> DMA END interrupt\
++ obtained for reception.\n");
++#endif
++ fifo_size = up->rx_fifo_size;
++/* push_rx(up->port.info->port.tty, (char *)up->rx_dma.buf,
++ fifo_size);*/
++ push_rx(up->port.state->port.tty, (char *)up->rx_dma.buf,
++ fifo_size); /*for 2.6.33-rc3 */
++
++ } else if (status == IOH_DMA_ABORT) { /* DMA abort. */
++ printk(KERN_ERR"ioh_dma_rx_callback -> DMA ABORT interrupt\
++ obtained for reception.\n");
++ }
++
++ /* Unmapping the buffer from DMA accesible area. */
++ dma_unmap_single(up->port.dev, up->rx_dma.phy_addr, up->rx_dma.size,
++ DMA_FROM_DEVICE);
++
++ /*Enable hardware interrupt.*/
++ value = (u8)serial_in(up, UART_IER);
++ serial_out(up, UART_IER, (value | 0x01));
++ up->ier = serial_in(up, UART_IER);
++ up->dma_progress = 0;
++
++ spin_unlock_irqrestore(&up->port.lock, flags);
++
++#ifdef DEBUG
++ printk(KERN_DEBUG"ioh_dma_rx_callback -> Function ioh_dma_rx_callback\
++ is invoked.\n");
++#endif
++}
++
++/* For initiating the DMA operation.*/
++void handle_dma_operation(struct uart_8250_port *up)
++{
++ u8 value;
++ int channel = up->rx_dma.channel;
++
++ /* Disable Receive hardware interrupt.*/
++ value = (u8)serial_in(up, UART_IER);
++ serial_out(up, UART_IER, (value & 0xFE));
++ up->ier = serial_in(up, UART_IER);
++
++ /* Enabling the DMA transfer. */
++ ioh_enable_dma(channel);
++ up->dma_progress = 1;
++
++#ifdef DEBUG
++ printk(KERN_DEBUG"handle_dma_operation -> DMA settings for reception\
++ completed.\n");
++ printk(KERN_DEBUG"Function handle_dma_operation invoked.\n");
++#endif
++}
++#endif /*for 2.6.33-rc3 */
++
+ /* Uart divisor latch read */
+ static inline int _serial_dl_read(struct uart_8250_port *up)
+ {
+@@ -725,7 +1117,8 @@
+ result = !(mode & UART_RSA_MSR_FIFO);
+
+ if (!result) {
+- serial_outp(up, UART_RSA_MSR, mode & ~UART_RSA_MSR_FIFO);
++ serial_outp(up, UART_RSA_MSR,
++ mode & ~UART_RSA_MSR_FIFO);
+ mode = serial_inp(up, UART_RSA_MSR);
+ result = !(mode & UART_RSA_MSR_FIFO);
+ }
+@@ -1040,6 +1433,18 @@
+ return;
+ }
+
++#if defined(ENABLE_SERIAL_8250_PCH)
++ if ((up->port.flags & UPF_IOH_UART) != 0) {
++ if ((up->port.flags & UPF_IOH_UART_64_FIFO) != 0)
++ /* IOH 2 Line 64 FIFO UART */
++ up->port.type = PORT_IOH_64FIFO;
++ else
++ /* IOH 8 Line 256 FIFO UART */
++ up->port.type = PORT_IOH_256FIFO;
++
++ }
++#endif
++
+ /*
+ * Try writing and reading the UART_IER_UUE bit (b6).
+ * If it works, this is probably one of the Xscale platform's
+@@ -1093,7 +1498,7 @@
+ return;
+
+ DEBUG_AUTOCONF("ttyS%d: autoconf (0x%04lx, 0x%p): ",
+- serial_index(&up->port), up->port.iobase, up->port.membase);
++ serial_index(&up->port), up->port.iobase, up->port.membase);
+
+ /*
+ * We really do need global IRQs disabled here - we're going to
+@@ -1196,7 +1601,34 @@
+ up->port.type = PORT_16550;
+ break;
+ case 3:
+- autoconfig_16550a(up);
++#ifdef ENABLE_SERIAL_8250_PCH
++ if ((up->port.type != PORT_IOH_256FIFO) &&
++ (up->port.type != PORT_IOH_64FIFO)) {
++#endif
++
++#ifdef DEBUG
++ printk(KERN_DEBUG
++ "IOH UART LOG:function autoconfig->autoconfig_16550a\
++ invoked "
++ "for port %d\n", up->port.type);
++#endif
++ autoconfig_16550a(up);
++
++#ifdef ENABLE_SERIAL_8250_PCH
++ }
++#endif
++
++#ifdef ENABLE_SERIAL_8250_PCH
++ else {
++
++#ifdef DEBUG
++ printk(KERN_DEBUG
++ "IOH UART LOG:function autoconfig->autoconfig_16550a\
++ not "
++ "invoked for IOH UART port %d\n", up->port.type);
++#endif
++ }
++#endif
+ break;
+ }
+
+@@ -1224,18 +1656,40 @@
+ #endif
+
+ serial_outp(up, UART_LCR, save_lcr);
++#ifdef ENABLE_SERIAL_8250_PCH
++ if ((up->port.type != PORT_IOH_256FIFO) &&
++ (up->port.type != PORT_IOH_64FIFO)) {
++ /* autoconfig is not done for ioh uarts.
++ hence do not report any kernel warning */
++#endif
+
+- if (up->capabilities != uart_config[up->port.type].flags) {
+- printk(KERN_WARNING
+- "ttyS%d: detected caps %08x should be %08x\n",
+- serial_index(&up->port), up->capabilities,
+- uart_config[up->port.type].flags);
++ if (up->capabilities != uart_config[up->port.type].flags) {
++ printk(KERN_WARNING "ttyS%d: detected\
++ caps %08x should be %08x\n",
++ up->port.line, up->capabilities,
++ uart_config[up->port.type].flags);
++ }
++
++#ifdef ENABLE_SERIAL_8250_PCH
+ }
++#endif
+
+ up->port.fifosize = uart_config[up->port.type].fifo_size;
+ up->capabilities = uart_config[up->port.type].flags;
+ up->tx_loadsz = uart_config[up->port.type].tx_loadsz;
+
++#ifdef DEBUG
++ printk(KERN_DEBUG
++ "IOH UART LOG:autoconfig: up->port.type = %d, up->port.fifosize =%d,"
++ "up->capabilities = %x, up->tx_loadsz = %d\n", up->port.type,
++ up->port.fifosize, up->capabilities, up->tx_loadsz);
++
++ printk(KERN_DEBUG
++ "IOH UART LOG:autoconfig: port.name = %s, port.fcr = %x\n",
++ uart_config[up->port.type].name, uart_config[up->port.type].fcr);
++
++#endif
++
+ if (up->port.type == PORT_UNKNOWN)
+ goto out;
+
+@@ -1461,7 +1915,11 @@
+ static void transmit_chars(struct uart_8250_port *up)
+ {
+ struct circ_buf *xmit = &up->port.state->xmit;
+- int count;
++ int count = 0;
++#if defined(ENABLE_SERIAL_8250_PCH) && defined(ENABLE_PCH_DMA_FEATURE)
++ unsigned limit = 0;
++ unsigned size = 0;
++#endif
+
+ if (up->port.x_char) {
+ serial_outp(up, UART_TX, up->port.x_char);
+@@ -1478,15 +1936,53 @@
+ return;
+ }
+
++#if defined(ENABLE_SERIAL_8250_PCH) && defined(ENABLE_PCH_DMA_FEATURE)
++ if (((up->port.flags & UPF_IOH_UART) != 0)) {
++ size = uart_circ_chars_pending(xmit);
++
++ if (size > PAGE_SIZE)
++ size = PAGE_SIZE;
++
++ count = size;
++ }
++
++ else
++#endif
+ count = up->tx_loadsz;
+ do {
++#if defined(ENABLE_SERIAL_8250_PCH) && defined(ENABLE_PCH_DMA_FEATURE)
++ if ((((up->port.flags & UPF_IOH_UART) != 0)) && (size > 0)) {
++ ((char *)(up->buffer))[limit] = xmit->buf[xmit->tail];
++ limit++;
++
++ } else
++#endif
+ serial_out(up, UART_TX, xmit->buf[xmit->tail]);
++
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (--count > 0);
+
++#if defined(ENABLE_SERIAL_8250_PCH) && defined(ENABLE_PCH_DMA_FEATURE)
++ if (limit > 0) {
++ if (limit > up->tx_loadsz) {
++ set_scatter_gather_dma_mode(up, limit);
++#ifdef DEBUG
++ printk(KERN_DEBUG"transmit_chars -> Function\
++ set_scatter_gather_dma_mode invoked.\n");
++#endif
++ } else {
++ set_one_shot_dma_mode(up, limit);
++#ifdef DEBUG
++ printk(KERN_DEBUG"transmit_chars -> Function\
++ set_one_shot_dma_mode invoked.\n");
++#endif
++ }
++ }
++#endif
++
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+@@ -1494,6 +1990,10 @@
+
+ if (uart_circ_empty(xmit))
+ __stop_tx(up);
++
++#ifdef DEBUG
++ printk(KERN_DEBUG"Function transmit_chars invoked.\n");
++#endif
+ }
+
+ static unsigned int check_modem_status(struct uart_8250_port *up)
+@@ -1509,9 +2009,11 @@
+ if (status & UART_MSR_DDSR)
+ up->port.icount.dsr++;
+ if (status & UART_MSR_DDCD)
+- uart_handle_dcd_change(&up->port, status & UART_MSR_DCD);
++ uart_handle_dcd_change(&up->port,
++ status & UART_MSR_DCD);
+ if (status & UART_MSR_DCTS)
+- uart_handle_cts_change(&up->port, status & UART_MSR_CTS);
++ uart_handle_cts_change(&up->port,
++ status & UART_MSR_CTS);
+
+ wake_up_interruptible(&up->port.state->port.delta_msr_wait);
+ }
+@@ -1533,13 +2035,36 @@
+
+ DEBUG_INTR("status = %x...", status);
+
+- if (status & (UART_LSR_DR | UART_LSR_BI))
+- receive_chars(up, &status);
++#if defined(ENABLE_SERIAL_8250_PCH) && defined(ENABLE_PCH_DMA_FEATURE)
++ if ((up->dma_flag) && (up->dma_enabled)) {
++ /* If reception has to be done through DMA. */
++#ifdef DEBUG
++ printk(KERN_DEBUG"serial8250_handle_port ->\
++ Proceeding to handle reception " \
++ "interrupt through DMA operation.\n");
++#endif
++ handle_dma_operation(up);
++ } else {
++#endif
++
++ if ((status & (UART_LSR_DR | UART_LSR_BI))
++#if defined(ENABLE_SERIAL_8250_PCH) && defined(ENABLE_PCH_DMA_FEATURE)
++ && (!up->dma_progress)
++#endif
++ )
++ receive_chars(up, &status);
++#if defined(ENABLE_SERIAL_8250_PCH) && defined(ENABLE_PCH_DMA_FEATURE)
++ }
++#endif
+ check_modem_status(up);
+ if (status & UART_LSR_THRE)
+ transmit_chars(up);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
++
++#ifdef DEBUG
++ printk(KERN_DEBUG"serial8250_handle_port invoked.\n");
++#endif
+ }
+
+ /*
+@@ -1575,6 +2100,26 @@
+
+ iir = serial_in(up, UART_IIR);
+ if (!(iir & UART_IIR_NO_INT)) {
++#if defined(ENABLE_SERIAL_8250_PCH) && defined(ENABLE_PCH_DMA_FEATURE)
++ /* Determing whether the receive FIFO is full. */
++ if ((iir & UART_IIR_RDI) && !(iir & 0x8) &&
++ ((up->port.flags & UPF_IOH_UART) != 0)) {
++
++ up->dma_flag = 1;
++
++#ifdef DEBUG
++ printk(KERN_DEBUG"serial8250_interrupt ->\
++ DMA Mode enabled for reception.\n");
++#endif
++ } else {
++ up->dma_flag = 0;
++#ifdef DEBUG
++ printk(KERN_DEBUG"serial8250_interrupt ->\
++ DMA Mode disabled for reception.\n");
++#endif
++ }
++#endif
++
+ serial8250_handle_port(up);
+
+ handled = 1;
+@@ -1946,6 +2491,167 @@
+ unsigned char lsr, iir;
+ int retval;
+
++#if defined(ENABLE_SERIAL_8250_PCH) && defined(ENABLE_PCH_DMA_FEATURE)
++ /* Initialising the device for DMA support. */
++ int dma_flag = 0;
++ up->dma_progress = 0;
++ up->dma_enabled = 0;
++
++ if ((up->port.flags & UPF_IOH_UART) != 0) {
++ struct pci_dev pdev;
++
++/* switch((up->port.flags & 0xE000000))*/
++ switch ((up->port.flags & (UPF_IOH_UART | UPF_IOH_UART_BIT0 |
++ UPF_IOH_UART_BIT1))) {
++ case UPF_IOH_UART0:
++#ifdef DEBUG
++ printk(KERN_DEBUG"serial8250_startup ->\
++ UART0 detected.\n");
++#endif
++ pdev.device = PCI_DEVICE_ID_IOH_UART0;
++ up->port.mctrl |= TIOCM_RTS;
++ break;
++
++ case UPF_IOH_UART1:
++#ifdef DEBUG
++ printk(KERN_DEBUG"serial8250_startup ->\
++ UART1 detected.\n");
++#endif
++ pdev.device = PCI_DEVICE_ID_IOH_UART1;
++ break;
++
++ case UPF_IOH_UART2:
++#ifdef DEBUG
++ printk(KERN_DEBUG"serial8250_startup ->\
++ UART2 detected.\n");
++#endif
++ pdev.device = PCI_DEVICE_ID_IOH_UART2;
++ break;
++
++ case UPF_IOH_UART3:
++#ifdef DEBUG
++ printk(KERN_DEBUG"serial8250_startup ->\
++ UART3 detected.\n");
++#endif
++ pdev.device = PCI_DEVICE_ID_IOH_UART3;
++ break;
++
++ default:
++ break;
++ }
++
++ /* Allocating space for DMA buffer. */
++ up->rx_dma.buf = (u32)__get_free_page(GFP_KERNEL|GFP_DMA);
++ if (!(up->rx_dma.buf)) {
++ printk(KERN_ERR"serial8250_startup -> DMA buffer\
++ allocation " \
++ "failed for Rx DMA buffer.\n");
++ return -ENOMEM;
++ }
++
++ /* For transmission process. */
++ up->tx_dma.buf = (u32)__get_free_page(GFP_KERNEL|GFP_DMA);
++ if (!(up->tx_dma.buf)) {
++ free_page(up->rx_dma.buf);
++ printk(KERN_ERR"serial8250_startup -> DMA buffer\
++ allocation " \
++ "failed for TX DMA buffer.\n");
++ return -ENOMEM;
++ }
++
++ /* For copying of transmit data. */
++ up->buffer = (u32)__get_free_page(GFP_KERNEL|GFP_DMA);
++ if (!(up->buffer)) {
++ free_page(up->rx_dma.buf);
++ free_page(up->tx_dma.buf);
++ printk(KERN_ERR"serial8250_startup -> DMA buffer\
++ allocation " \
++ "failed for Buffer.\n");
++ return -ENOMEM;
++ }
++
++ up->rx_dma.size = PAGE_SIZE;
++ up->tx_dma.size = PAGE_SIZE;
++
++ /* Requesting for DMA channel for reception. */
++ up->rx_dma.channel = ioh_request_dma(&pdev,
++ IOH_DMA_RX_DATA_REQ0);
++ if (up->rx_dma.channel < 0) {
++ free_page(up->rx_dma.buf);
++ free_page(up->tx_dma.buf);
++ free_page(up->buffer);
++ up->rx_dma.buf = 0;
++ up->tx_dma.buf = 0;
++ up->buffer = 0;
++
++ printk(KERN_ERR"serial8250_startup -> DMA channel\
++ allocation for " \
++ "reception failed.\n");
++ return -EIO;
++ }
++
++ /* Requesting DMA channel for transmission. */
++ up->tx_dma.channel = ioh_request_dma(&pdev,
++ IOH_DMA_TX_DATA_REQ0);
++ if (up->tx_dma.channel < 0) {
++ free_page(up->rx_dma.buf);
++ free_page(up->tx_dma.buf);
++ free_page(up->buffer);
++ up->rx_dma.buf = 0;
++ up->tx_dma.buf = 0;
++ up->buffer = 0;
++ ioh_free_dma(up->rx_dma.channel);
++
++ printk(KERN_ERR"serial8250_startup -> DMA channel\
++ allocation for " \
++ "transmission failed.\n");
++ return -EIO;
++ }
++
++ /* Performing DMA settings for reception. */
++ {
++ u32 in_address;
++ u32 out_address;
++ u32 size;
++ int channel = up->rx_dma.channel;
++ struct ioh_dma_mode_param mode = {
++ .TransferDirection = IOH_DMA_DIR_IN_TO_OUT,
++ .DMASizeType = IOH_DMA_SIZE_TYPE_8BIT,
++ .DMATransferMode = DMA_ONE_SHOT_MODE
++ };
++
++ /* Mapping the DMA buffer to DMA accessible area and
++ obtaining its base address. */
++ out_address = dma_map_single(up->port.dev,
++ (void *)up->rx_dma.buf,
++ up->rx_dma.size,
++ DMA_FROM_DEVICE);
++ in_address = up->port.mapbase +
++ (map_8250_in_reg(up, UART_RX));
++ size = up->rx_fifo_size;
++ up->rx_dma.phy_addr = out_address;
++
++ /* Setting the DMA settings. */
++ (void)ioh_set_dma_mode(channel, mode);
++ (void)ioh_set_dma_addr(channel, in_address,
++ out_address);
++ (void)ioh_set_dma_count(channel, size);
++ (void)ioh_dma_set_callback(channel, ioh_dma_rx_callback,
++ (u32)up);
++ }
++
++ dma_flag = 1;
++
++#ifdef DEBUG
++ printk(KERN_DEBUG"serial8250_startup -> Buffer Allocation\
++ successful and DMA " \
++ "channels obtained are Reception: %d\
++ Transmission: %d.\n", \
++ up->rx_dma.channel, up->tx_dma.channel);
++#endif
++ }
++#endif
++
+ up->capabilities = uart_config[up->port.type].flags;
+ up->mcr = 0;
+
+@@ -1996,6 +2702,21 @@
+ (serial_inp(up, UART_LSR) == 0xff)) {
+ printk(KERN_INFO "ttyS%d: LSR safety check engaged!\n",
+ serial_index(&up->port));
++
++#if defined(ENABLE_SERIAL_8250_PCH) && defined(ENABLE_PCH_DMA_FEATURE)
++ /* Releasing the DMA resources on failure.*/
++ if (dma_flag == 1) {
++ ioh_free_dma(up->rx_dma.channel);
++ ioh_free_dma(up->tx_dma.channel);
++ free_page(up->rx_dma.buf);
++ free_page(up->tx_dma.buf);
++ free_page(up->buffer);
++ up->rx_dma.buf = 0;
++ up->tx_dma.buf = 0;
++ up->buffer = 0;
++ }
++#endif
++
+ return -ENODEV;
+ }
+
+@@ -2008,9 +2729,11 @@
+ serial_outp(up, UART_LCR, 0xbf);
+
+ fctr = serial_inp(up, UART_FCTR) & ~(UART_FCTR_RX|UART_FCTR_TX);
+- serial_outp(up, UART_FCTR, fctr | UART_FCTR_TRGD | UART_FCTR_RX);
++ serial_outp(up, UART_FCTR, fctr | UART_FCTR_TRGD |
++ UART_FCTR_RX);
+ serial_outp(up, UART_TRG, UART_TRG_96);
+- serial_outp(up, UART_FCTR, fctr | UART_FCTR_TRGD | UART_FCTR_TX);
++ serial_outp(up, UART_FCTR, fctr | UART_FCTR_TRGD |
++ UART_FCTR_TX);
+ serial_outp(up, UART_TRG, UART_TRG_96);
+
+ serial_outp(up, UART_LCR, 0);
+@@ -2076,8 +2799,22 @@
+ mod_timer(&up->timer, jiffies + poll_timeout(up->port.timeout));
+ } else {
+ retval = serial_link_irq_chain(up);
+- if (retval)
++ if (retval) {
++#if defined(ENABLE_SERIAL_8250_PCH) && defined(ENABLE_PCH_DMA_FEATURE)
++ /* Releasing the DMA resources on failure.*/
++ if (dma_flag == 1) {
++ ioh_free_dma(up->rx_dma.channel);
++ ioh_free_dma(up->tx_dma.channel);
++ free_page(up->rx_dma.buf);
++ free_page(up->tx_dma.buf);
++ free_page(up->buffer);
++ up->rx_dma.buf = 0;
++ up->tx_dma.buf = 0;
++ up->buffer = 0;
++ }
++ #endif
+ return retval;
++ }
+ }
+
+ /*
+@@ -2124,7 +2861,8 @@
+ if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) {
+ if (!(up->bugs & UART_BUG_TXEN)) {
+ up->bugs |= UART_BUG_TXEN;
+- pr_debug("ttyS%d - enabling bad tx status workarounds\n",
++ pr_debug("ttyS%d - enabling bad tx status\
++ workarounds\n",
+ serial_index(port));
+ }
+ } else {
+@@ -2172,6 +2910,31 @@
+ struct uart_8250_port *up = (struct uart_8250_port *)port;
+ unsigned long flags;
+
++#if defined(ENABLE_SERIAL_8250_PCH) && defined(ENABLE_PCH_DMA_FEATURE)
++ /* Releasing the DMA resources on exit.*/
++ if ((up->port.flags & UPF_IOH_UART) != 0) {
++ if (up->rx_dma.channel >= 0)
++ ioh_free_dma(up->rx_dma.channel);
++ if (up->tx_dma.channel >= 0)
++ ioh_free_dma(up->tx_dma.channel);
++
++ if (up->rx_dma.buf)
++ free_page(up->rx_dma.buf);
++ if (up->tx_dma.buf)
++ free_page(up->tx_dma.buf);
++ if (up->buffer)
++ free_page(up->buffer);
++
++ up->rx_dma.buf = 0;
++ up->tx_dma.buf = 0;
++ up->buffer = 0;
++
++#ifdef DEBUG
++ printk(KERN_DEBUG"serial8250_shutdown -> DMA buffers and\
++ channels released.\n");
++#endif
++ }
++#endif
+ /*
+ * Disable interrupts from this port
+ */
+@@ -2214,7 +2977,8 @@
+ serial_unlink_irq_chain(up);
+ }
+
+-static unsigned int serial8250_get_divisor(struct uart_port *port, unsigned int baud)
++static unsigned int serial8250_get_divisor(struct uart_port *port,
++ unsigned int baud)
+ {
+ unsigned int quot;
+
+@@ -2242,6 +3006,7 @@
+ unsigned char cval, fcr = 0;
+ unsigned long flags;
+ unsigned int baud, quot;
++ unsigned int bdrate;
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+@@ -2278,6 +3043,11 @@
+ port->uartclk / 16);
+ quot = serial8250_get_divisor(port, baud);
+
++#ifdef DEBUG
++ printk(KERN_DEBUG "IOH UART LOG:max_baud: %d\n,baud :%d\n quot:%d\n"
++ , max_baud, baud, quot);
++#endif
++
+ /*
+ * Oxford Semi 952 rev B workaround
+ */
+@@ -2285,12 +3055,37 @@
+ quot++;
+
+ if (up->capabilities & UART_CAP_FIFO && up->port.fifosize > 1) {
+- if (baud < 2400)
++ if (baud < 2400) {
+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1;
+- else
+- fcr = uart_config[up->port.type].fcr;
++
++#ifdef ENABLE_SERIAL_8250_PCH
++ if ((up->port.flags & UPF_IOH_UART) != 0)
++ /*This enables 256 byte FIFO
++ for UART 0.*/
++ fcr |= UART_FCR7_64BYTE;
++
++#endif
++ } else
++ fcr = uart_config[up->port.type].fcr;
+ }
+
++#if defined(ENABLE_SERIAL_8250_PCH) && defined(ENABLE_PCH_DMA_FEATURE)
++ /* Deciding whether to use DMA feature or not.*/
++ if ((baud >= 38400) && ((up->port.flags & UPF_IOH_UART) != 0))
++ up->dma_enabled = 1;
++ else
++ up->dma_enabled = 0;
++
++
++ get_rx_fifo_size(up, fcr);
++
++#ifdef DEBUG
++ printk(KERN_DEBUG"serial8250_set_termios -> The Rx fifo size is: %u\n",
++ up->rx_fifo_size);
++#endif
++
++#endif
++
+ /*
+ * MCR-based auto flow control. When AFE is enabled, RTS will be
+ * deasserted when the receive FIFO contains more characters than
+@@ -2409,8 +3204,22 @@
+ serial8250_set_mctrl(&up->port, up->port.mctrl);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ /* Don't rewrite B0 */
+- if (tty_termios_baud_rate(termios))
++
++ bdrate = tty_termios_baud_rate(termios);
++
++#ifdef DEBUG
++ printk(KERN_DEBUG "tty_termios_baud_rate value:%d\n", bdrate);
++#endif
++
++ if (bdrate) {
+ tty_termios_encode_baud_rate(termios, baud, baud);
++
++#ifdef DEBUG
++ printk(KERN_DEBUG "termios->c_ispeed:%d\n,\
++ termios->c_ospeed:%d\n "
++ , termios->c_ispeed, termios->c_ospeed);
++#endif
++ }
+ }
+
+ static void
+@@ -2580,18 +3389,24 @@
+ if (ret < 0)
+ probeflags &= ~PROBE_RSA;
+
++
+ if (up->port.iotype != up->cur_iotype)
+ set_io_from_upio(port);
+
++
+ if (flags & UART_CONFIG_TYPE)
+ autoconfig(up, probeflags);
++
+ if (up->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
+ autoconfig_irq(up);
+
++
+ if (up->port.type != PORT_RSA && probeflags & PROBE_RSA)
+ serial8250_release_rsa_resource(up);
++
+ if (up->port.type == PORT_UNKNOWN)
+ serial8250_release_std_resource(up);
++
+ }
+
+ static int
+@@ -2686,6 +3501,7 @@
+ up->port.regshift = old_serial_port[i].iomem_reg_shift;
+ set_io_from_upio(&up->port);
+ up->port.irqflags |= irqflag;
++
+ }
+ }
+
+@@ -2967,7 +3783,8 @@
+ port.irqflags |= irqflag;
+ ret = serial8250_register_port(&port);
+ if (ret < 0) {
+- dev_err(&dev->dev, "unable to register port at index %d "
++ dev_err(&dev->dev, "unable to register port at\
++ index %d "
+ "(IO%lx MEM%llx IRQ%d): %d\n", i,
+ p->iobase, (unsigned long long)p->mapbase,
+ p->irq, ret);
+@@ -3044,7 +3861,8 @@
+ */
+ static DEFINE_MUTEX(serial_mutex);
+
+-static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *port)
++static
++struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *port)
+ {
+ int i;
+
+@@ -3251,7 +4069,8 @@
+ " (unsafe)");
+
+ module_param(nr_uarts, uint, 0644);
+-MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")");
++MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. \
++ (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")");
+
+ module_param(skip_txen_test, uint, 0644);
+ MODULE_PARM_DESC(skip_txen_test, "Skip checking for the TXEN bug at init time");
+diff -urN linux-2.6.33.1/drivers/serial/8250_pch.h topcliff-2.6.33.1/drivers/serial/8250_pch.h
+--- linux-2.6.33.1/drivers/serial/8250_pch.h 1970-01-01 09:00:00.000000000 +0900
++++ topcliff-2.6.33.1/drivers/serial/8250_pch.h 2010-03-23 10:34:44.000000000 +0900
+@@ -0,0 +1,57 @@
++/*!
++ * @file 8250_pch.h
++ * @brief Provides the macro definitions used by all files.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKISEMI 03/16/2010
++ *
++ */
++
++#ifndef __IOH_8250_PCH_H__
++#define __IOH_8250_PCH_H__
++
++#define PORT_IOH_256FIFO 128 /* IOH UART with 256 byte FIFO */
++#define PORT_IOH_64FIFO 129 /* IOH UART with 64 byte FIFO */
++
++/* flags for IOH port detection */
++/* The below fields are used to identify the IOH UART port 0 to 3 */
++#define UPF_IOH_UART_BIT0 ((__force upf_t) (1 << 17))
++#define UPF_IOH_UART_BIT1 ((__force upf_t) (1 << 18))
++
++#define UPF_IOH_UART ((__force upf_t) (1 << 19))
++#define UPF_IOH_UART0 ((UPF_IOH_UART) | (0))
++#define UPF_IOH_UART1 ((UPF_IOH_UART) | ((__force upf_t) (UPF_IOH_UART_BIT0)))
++#define UPF_IOH_UART2 ((UPF_IOH_UART) | ((__force upf_t) (UPF_IOH_UART_BIT1)))
++#define UPF_IOH_UART3 ((UPF_IOH_UART) | ((__force upf_t) (UPF_IOH_UART_BIT0 |\
++ UPF_IOH_UART_BIT1)))
++#define UPF_IOH_UART_64_FIFO ((__force upf_t) (UPF_IOH_UART3))
++
++#define UART_FCR7_256BYTE 0x20 /* Go into 256 byte FIFO mode (IOH UART) */
++
++/* Intel IOH GE UART PCI device IDs */
++#define PCI_DEVICE_ID_IOH_UART0 (0x8811)
++#define PCI_DEVICE_ID_IOH_UART1 (0x8812)
++#define PCI_DEVICE_ID_IOH_UART2 (0x8813)
++#define PCI_DEVICE_ID_IOH_UART3 (0x8814)
++
++#endif
+diff -urN linux-2.6.33.1/drivers/serial/8250_pci.c topcliff-2.6.33.1/drivers/serial/8250_pci.c
+--- linux-2.6.33.1/drivers/serial/8250_pci.c 2010-03-16 01:09:39.000000000 +0900
++++ topcliff-2.6.33.1/drivers/serial/8250_pci.c 2010-03-23 10:34:44.000000000 +0900
+@@ -14,6 +14,7 @@
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/pci.h>
++#include <linux/pci_ids.h>
+ #include <linux/string.h>
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+@@ -27,7 +28,7 @@
+ #include <asm/io.h>
+
+ #include "8250.h"
+-
++#include "8250_pch.h"
+ #undef SERIAL_DEBUG_PCI
+
+ /*
+@@ -726,6 +727,87 @@
+ #define NI8430_PORTCON 0x0f
+ #define NI8430_PORTCON_TXVR_ENABLE (1 << 3)
+
++#if defined(ENABLE_SERIAL_8250_PCH)
++
++static int
++pci_ioh_init(struct pci_dev *dev)
++{
++ int retval = 0;
++
++#ifdef DEBUG
++ printk(KERN_DEBUG "IOH UART LOG:function pci_ioh_init invoked \n");
++
++ printk(KERN_DEBUG "IOH UART LOG:function pci_ioh_init->pci_enable_wake invoked \n");
++#endif
++
++ /* disable Wake on UART */
++ pci_enable_wake(dev, PCI_D3hot, 0);
++
++#ifdef DEBUG
++ printk(KERN_DEBUG "IOH UART LOG:function pci_ioh_init return = %d\n",
++ retval);
++#endif
++
++ return retval;
++}
++
++static int
++pci_ioh_setup(struct serial_private *priv, const struct pciserial_board *board,
++ struct uart_port *port, int idx)
++{
++ int retval = 1 ;
++ unsigned int bar = 0;
++ unsigned int offset = 0;
++
++ if (idx == 0) {
++ /* IOH UART has only 1 channel per device */
++ switch (priv->dev->device) {
++ case PCI_DEVICE_ID_IOH_UART0:
++ port->flags |= UPF_IOH_UART0;
++ break;
++
++ case PCI_DEVICE_ID_IOH_UART1:
++ port->flags |= UPF_IOH_UART1;
++ break;
++
++ case PCI_DEVICE_ID_IOH_UART2:
++ port->flags |= UPF_IOH_UART2;
++ break;
++
++ case PCI_DEVICE_ID_IOH_UART3:
++ port->flags |= UPF_IOH_UART3;
++ break;
++
++ default:
++ break;
++ }
++
++ retval = setup_port(priv, port, bar, offset, board->reg_shift);
++
++ #ifdef ENABLE_PCH_DMA_FEATURE
++ /* Obtaing the Memory Map base for DMA operations. */
++ port->mapbase = pci_resource_start(priv->dev, 1);
++ #ifdef DEBUG
++ printk(KERN_DEBUG"pci_ioh_setup -> The Map Base has been obtained.\n");
++ #endif
++ #endif
++ }
++
++
++#ifdef DEBUG
++ printk(KERN_DEBUG "pci_ioh_setup -> Function pci_ioh_setup invoked \n");
++ printk(KERN_DEBUG "pci_ioh_setup -> board.base_baud = %d, flags = %d,\
++ num_ports = %d,reg_shift = %d\n",
++ board->base_baud, board->flags,
++ board->num_ports, board->reg_shift);
++ printk(KERN_DEBUG "pci_ioh_setup -> port->flags =%x\n", port->flags);
++ printk(KERN_DEBUG "Function pci_ioh_setup return = %d\n", retval);
++
++#endif
++ return retval;
++}
++#endif
++
+ static int
+ pci_ni8430_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+@@ -918,7 +1000,8 @@
+ (dev->device & 0xF000) != 0xC000)
+ return 0;
+
+- p = pci_iomap(dev, 0, 5);
++ /*p = pci_iomap(dev, 0, 5);*/
++ p = pci_iomap(dev, 1, 5);
+ if (p == NULL)
+ return -ENOMEM;
+
+@@ -1393,6 +1476,43 @@
+ .setup = pci_default_setup,
+ },
+ /*
++ * IOH UART
++ */
++#if defined(ENABLE_SERIAL_8250_PCH)
++ {
++ .vendor = PCI_VENDOR_ID_INTEL,
++ .device = PCI_DEVICE_ID_IOH_UART0,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_ioh_init,
++ .setup = pci_ioh_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTEL,
++ .device = PCI_DEVICE_ID_IOH_UART1,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_ioh_init,
++ .setup = pci_ioh_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTEL,
++ .device = PCI_DEVICE_ID_IOH_UART2,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_ioh_init,
++ .setup = pci_ioh_setup,
++ },
++ {
++ .vendor = PCI_VENDOR_ID_INTEL,
++ .device = PCI_DEVICE_ID_IOH_UART3,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .init = pci_ioh_init,
++ .setup = pci_ioh_setup,
++ },
++#endif
++ /*
+ * Default "match everything" terminator entry
+ */
+ {
+@@ -1571,6 +1691,10 @@
+ pbn_ADDIDATA_PCIe_2_3906250,
+ pbn_ADDIDATA_PCIe_4_3906250,
+ pbn_ADDIDATA_PCIe_8_3906250,
++#if defined(ENABLE_SERIAL_8250_PCH)
++ pbn_ioh_uart_8L_256FIFO, /* ioh 8 Line UART with 256 byte FIFO */
++ pbn_ioh_uart_2L_64FIFO /* ioh 2 Line UART with 64 byte FIFO */
++#endif
+ };
+
+ /*
+@@ -2228,6 +2352,27 @@
+ .uart_offset = 0x200,
+ .first_offset = 0x1000,
+ },
++
++#if defined(ENABLE_SERIAL_8250_PCH)
++
++ /*
++ * IOH UART
++ */
++ [pbn_ioh_uart_8L_256FIFO] = {
++ .flags = FL_BASE0,
++ .num_ports = 1,
++ .base_baud = 115200, /* OKISEMI For LSI */
++ .reg_shift = 0,
++ },
++
++ [pbn_ioh_uart_2L_64FIFO] = {
++ .flags = FL_BASE0,
++ .num_ports = 1,
++ .base_baud = 115200, /* OKISEMI For LSI*/
++ .reg_shift = 0,
++ },
++#endif
++
+ };
+
+ static const struct pci_device_id softmodem_blacklist[] = {
+@@ -2473,8 +2618,20 @@
+ return -EINVAL;
+ }
+
++#ifdef DEBUG
++ printk(KERN_DEBUG "IOH UART LOG:function pciserial_init_one ent->vendor"
++ " = %x\n, ent->device = %x, ent->driver_data = %ld\n ",
++ ent->vendor, ent->device, ent->driver_data);
++#endif
++
+ board = &pci_boards[ent->driver_data];
+
++#ifdef DEBUG
++ printk(KERN_DEBUG "IOH UART LOG:function pciserial_init_one board->"
++ "base_baud = %u\n, board->flags = %d, board->num_ports = %d\n "
++ , board->base_baud, board->flags, board->num_ports);
++#endif
++
+ rc = pci_enable_device(dev);
+ if (rc)
+ return rc;
+@@ -2540,6 +2697,17 @@
+ if (priv)
+ pciserial_suspend_ports(priv);
+
++#if defined(ENABLE_SERIAL_8250_PCH)
++
++#ifdef DEBUG
++ printk(KERN_DEBUG "IOH UART LOG:pciserial_suspend_one->pci_enable_wake"
++ "invoked \n");
++#endif
++
++
++ pci_enable_wake(dev, PCI_D3hot, 1);
++#endif
++
+ pci_save_state(dev);
+ pci_set_power_state(dev, pci_choose_state(dev, state));
+ return 0;
+@@ -2561,6 +2729,17 @@
+ /* FIXME: We cannot simply error out here */
+ if (err)
+ printk(KERN_ERR "pciserial: Unable to re-enable ports, trying to continue.\n");
++
++#if defined(ENABLE_SERIAL_8250_PCH)
++
++#ifdef DEBUG
++ printk(KERN_DEBUG "IOH UART LOG:pciserial_resume_one->pci_enable_wake"
++ "invoked \n");
++#endif
++
++ pci_enable_wake(dev, PCI_D3hot, 0);
++#endif
++
+ pciserial_resume_ports(priv);
+ }
+ return 0;
+@@ -3649,6 +3828,48 @@
+ 0, 0, pbn_b0_1_115200 },
+
+ /*
++ * IOH UART
++ */
++#if defined(ENABLE_SERIAL_8250_PCH)
++
++ { PCI_VENDOR_ID_INTEL,
++ /*device id for ioh uart with 8 i/o lines and 256 byte fifo. */
++ PCI_DEVICE_ID_IOH_UART0,
++ PCI_ANY_ID,
++ PCI_ANY_ID,
++ 0,
++ 0,
++ pbn_ioh_uart_8L_256FIFO },
++
++ { PCI_VENDOR_ID_INTEL,
++ /*device id for ioh uart with 2 i/o lines and 256 byte fifo. */
++ PCI_DEVICE_ID_IOH_UART1,
++ PCI_ANY_ID,
++ PCI_ANY_ID,
++ 0,
++ 0,
++ pbn_ioh_uart_2L_64FIFO },
++
++ { PCI_VENDOR_ID_INTEL,
++ /*device id for ioh uart with 8 i/o lines and 64 byte fifo. */
++ PCI_DEVICE_ID_IOH_UART2,
++ PCI_ANY_ID,
++ PCI_ANY_ID,
++ 0,
++ 0,
++ pbn_ioh_uart_2L_64FIFO },
++
++ { PCI_VENDOR_ID_INTEL,
++ /*device id for ioh uart with 2 i/o lines and 64 byte fifo. */
++ PCI_DEVICE_ID_IOH_UART3,
++ PCI_ANY_ID,
++ PCI_ANY_ID,
++ 0,
++ 0,
++ pbn_ioh_uart_2L_64FIFO },
++#endif
++
++ /*
+ * These entries match devices with class COMMUNICATION_SERIAL,
+ * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
+ */
+diff -urN linux-2.6.33.1/drivers/serial/Kconfig topcliff-2.6.33.1/drivers/serial/Kconfig
+--- linux-2.6.33.1/drivers/serial/Kconfig 2010-03-16 01:09:39.000000000 +0900
++++ topcliff-2.6.33.1/drivers/serial/Kconfig 2010-03-23 10:34:44.000000000 +0900
+@@ -89,6 +89,21 @@
+ disable this feature if you only need legacy serial support.
+ Saves about 9K.
+
++config SERIAL_8250_PCH
++ tristate "PCH PCI serial device support"
++ depends on SERIAL_8250 && PCI && SERIAL_8250_PCI
++ default SERIAL_8250_PCI
++ help
++ This makes the PCH PCI serial driver to support high speed PCH serial ports.
++
++config SERIAL_8250_PCH_DMA
++ bool "Enable DMA mode of PCH PCI serial device"
++ depends on SERIAL_8250_PCH
++ select PCH_UART_DMA
++ default y
++ help
++ This makes the PCH PCI serial driver with DMA mode.
++
+ config SERIAL_8250_PNP
+ tristate "8250/16550 PNP device support" if EMBEDDED
+ depends on SERIAL_8250 && PNP
+diff -urN linux-2.6.33.1/drivers/serial/Makefile topcliff-2.6.33.1/drivers/serial/Makefile
+--- linux-2.6.33.1/drivers/serial/Makefile 2010-03-16 01:09:39.000000000 +0900
++++ topcliff-2.6.33.1/drivers/serial/Makefile 2010-03-23 10:34:44.000000000 +0900
+@@ -1,6 +1,17 @@
+ #
+ # Makefile for the kernel serial device drivers.
+ #
++#
++#This is needed to enable ioh dma#
++#EXTRA_CFLAGS +=-DENABLE_IOH_DMA_FEATURE
++ifdef CONFIG_SERIAL_8250_PCH
++EXTRA_CFLAGS +=-DENABLE_SERIAL_8250_PCH
++endif
++
++ifdef CONFIG_PCH_UART_DMA
++EXTRA_CFLAGS +=-DENABLE_PCH_DMA_FEATURE
++EXTRA_CFLAGS +=-Idrivers/dma/pch_dma/
++endif
+
+ obj-$(CONFIG_SERIAL_CORE) += serial_core.o
+ obj-$(CONFIG_SERIAL_21285) += 21285.o
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-usbdev.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-usbdev.patch
new file mode 100644
index 0000000..f9ea573
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-pch-usbdev.patch
@@ -0,0 +1,7018 @@
+From: Masayuki Ohtake <masa-korg@dsn.okisemi.com>
+Subject: OKI Semiconductor PCH USB Gadget driver
+
+This driver implements PCH Gadget controls for PCH.
+
+Signed-off-by: Masayuki Ohtake <masa-korg@dsn.okisemi.com>
+Acked-by: Wang Qi <qi.wang@intel.com>
+
+---
+ drivers/usb/gadget/gadget_chips.h | 8 ++
+ drivers/usb/gadget/Kconfig | 16
+ drivers/usb/gadget/Makefile | 6
+ drivers/usb/gadget/pch_common.h | 146
+ drivers/usb/gadget/pch_debug.h | 160
+ drivers/usb/gadget/pch_udc.c | 2530
+ drivers/usb/gadget/pch_udc.h | 172
+ drivers/usb/gadget/pch_udc_hal.c | 1110
+ drivers/usb/gadget/pch_udc_hal.h | 1829
+ drivers/usb/gadget/pch_udc_intr.c | 396
+ drivers/usb/gadget/pch_udc_pci.c | 549
+ drivers/usb/gadget/pch_udc_pci.h | 97
++++++++++++++++++++++++++++++++ 12 files changed, zz insertions(+)
+---
+ drivers/usb/gadget/Kconfig | 16
+ drivers/usb/gadget/Makefile | 6
+ drivers/usb/gadget/gadget_chips.h | 12
+ drivers/usb/gadget/pch_common.h | 146 ++
+ drivers/usb/gadget/pch_debug.h | 60
+ drivers/usb/gadget/pch_udc.c | 2530 ++++++++++++++++++++++++++++++++++++++
+ drivers/usb/gadget/pch_udc.h | 172 ++
+ drivers/usb/gadget/pch_udc_hal.c | 1110 ++++++++++++++++
+ drivers/usb/gadget/pch_udc_hal.h | 1829 +++++++++++++++++++++++++++
+ drivers/usb/gadget/pch_udc_intr.c | 396 +++++
+ drivers/usb/gadget/pch_udc_pci.c | 549 ++++++++
+ drivers/usb/gadget/pch_udc_pci.h | 97 +
+ 12 files changed, 6923 insertions(+)
+
+--- a/drivers/usb/gadget/Kconfig
++++ b/drivers/usb/gadget/Kconfig
+@@ -220,6 +220,22 @@ config USB_OTG
+
+ Select this only if your OMAP board has a Mini-AB connector.
+
++config USB_GADGET_PCH
++ boolean "PCH USB Dev"
++ depends on PCI
++ select USB_GADGET_DUALSPEED
++ help
++ PCH USB device is a PCI based USB peripheral controller which
++ supports both full and high speed USB 2.0 data transfers.
++
++config PCH_USBDEV
++ tristate
++ depends on USB_GADGET_PCH
++ default USB_GADGET
++ select USB_GADGET_SELECTED
++
++
++
+ config USB_GADGET_PXA25X
+ boolean "PXA 25x or IXP 4xx"
+ depends on (ARCH_PXA && PXA25x) || ARCH_IXP4XX
+--- a/drivers/usb/gadget/Makefile
++++ b/drivers/usb/gadget/Makefile
+@@ -58,3 +58,9 @@ obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc
+ obj-$(CONFIG_USB_G_MULTI) += g_multi.o
+ obj-$(CONFIG_USB_STILL_IMAGE) += g_still_image.o
+
++ifeq ($(CONFIG_USB_GADGET_DEBUG),y)
++ EXTRA_CFLAGS += -DDMA_PPB_MODE
++endif
++obj-$(CONFIG_PCH_USBDEV) += pch_usbdev.o
++pch_usbdev-objs := pch_udc_pci.o pch_udc.o pch_udc_hal.o pch_udc_intr.o
++
+--- a/drivers/usb/gadget/gadget_chips.h
++++ b/drivers/usb/gadget/gadget_chips.h
+@@ -180,6 +180,14 @@
+ #endif
+
+
++#ifdef CONFIG_USB_GADGET_PCH
++#define gadget_is_ioh(g) (!strcmp("ioh_udc", (g)->name))
++#else
++#define gadget_is_ioh(g) 0
++#endif
++
++
++
+ /**
+ * usb_gadget_controller_number - support bcdDevice id convention
+ * @gadget: the controller being driven
+@@ -247,6 +255,10 @@ static inline int usb_gadget_controller_
+ return 0x24;
+ else if (gadget_is_r8a66597(gadget))
+ return 0x25;
++
++ else if (gadget_is_ioh(gadget))
++ return 0x26;
++
+ return -ENOENT;
+ }
+
+--- /dev/null
++++ b/drivers/usb/gadget/pch_common.h
+@@ -0,0 +1,146 @@
++/*!
++ * @file ioh_common.h
++ * @brief Provides the macro definitions used by all files.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++#ifndef __IOH_COMMON_H__
++#define __IOH_COMMON_H__
++
++/*! @ingroup Global
++@def IOH_WRITE8
++@brief Macro for writing 8 bit data to an io/mem address
++*/
++#define IOH_WRITE8(val, addr) iowrite8((val), (void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_LOG
++@brief Macro for writing 16 bit data to an io/mem address
++*/
++#define IOH_WRITE16(val, addr) iowrite16((val), (void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_LOG
++@brief Macro for writing 32 bit data to an io/mem address
++*/
++#define IOH_WRITE32(val, addr) iowrite32((val), (void __iomem *)(addr))
++
++/*! @ingroup Global
++@def IOH_READ8
++@brief Macro for reading 8 bit data from an io/mem address
++*/
++#define IOH_READ8(addr) ioread8((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_READ16
++@brief Macro for reading 16 bit data from an io/mem address
++*/
++#define IOH_READ16(addr) ioread16((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_READ32
++@brief Macro for reading 32 bit data from an io/mem address
++*/
++#define IOH_READ32(addr) ioread32((void __iomem *)(addr))
++/*! @ingroup Global
++@def IOH_WRITE32_F
++@brief Macro for writing 32 bit data to an io/mem address
++*/
++#define IOH_WRITE32_F(val, addr) do \
++ { IOH_WRITE32((val), (addr)); (void)IOH_READ32((addr)); } while (0);
++
++/*! @ingroup Global
++@def IOH_WRITE_BYTE
++@brief Macro for writing 1 byte data to an io/mem address
++*/
++#define IOH_WRITE_BYTE IOH_WRITE8
++/*! @ingroup Global
++@def IOH_WRITE_WORD
++@brief Macro for writing 1 word data to an io/mem address
++*/
++#define IOH_WRITE_WORD IOH_WRITE16
++/*! @ingroup Global
++@def IOH_WRITE_LONG
++@brief Macro for writing long data to an io/mem address
++*/
++#define IOH_WRITE_LONG IOH_WRITE32
++
++/*! @ingroup Global
++@def IOH_READ_BYTE
++@brief Macro for reading 1 byte data from an io/mem address
++*/
++#define IOH_READ_BYTE IOH_READ8
++/*! @ingroup Global
++@def IOH_READ_WORD
++@brief Macro for reading 1 word data from an io/mem address
++*/
++#define IOH_READ_WORD IOH_READ16
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief Macro for reading long data from an io/mem address
++*/
++#define IOH_READ_LONG IOH_READ32
++
++/* Bit Manipulation Macros */
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bit(mask) at the
++ specified address
++*/
++#define IOH_SET_ADDR_BIT(addr, bitmask) IOH_WRITE_LONG((IOH_READ_LONG(addr) |\
++ (bitmask)), (addr))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bit(mask) at the specified address
++*/
++#define IOH_CLR_ADDR_BIT(addr, bitmask) IOH_WRITE_LONG((IOH_READ_LONG(addr) &\
++ ~(bitmask)), (addr))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bitmask for a variable
++*/
++#define IOH_SET_BITMSK(var, bitmask) ((var) |= (bitmask))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bitmask for a variable
++*/
++#define IOH_CLR_BITMSK(var, bitmask) ((var) &= (~(bitmask)))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to set a specified bit for a variable
++*/
++#define IOH_SET_BIT(var, bit) ((var) |= (1<<(bit)))
++
++/*! @ingroup Global
++@def IOH_READ_LONG
++@brief macro to clear a specified bit for a variable
++*/
++#define IOH_CLR_BIT(var, bit) ((var) &= ~(1<<(bit)))
++
++#endif
+--- /dev/null
++++ b/drivers/usb/gadget/pch_debug.h
+@@ -0,0 +1,60 @@
++/*!
++ * @file ioh_debug.h
++ * @brief Provides the macro definitions used for debugging.
++ * @version 1.0.0.0
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++/*
++ * History:
++ * Copyright (C) 2008 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * WIPRO 03/07/2009
++ * modified:
++ * WIPRO 05/08/2009
++ *
++ */
++
++#ifndef __IOH_DEBUG_H__
++#define __IOH_DEBUG_H__
++
++#ifdef MODULE
++#define IOH_LOG(level, fmt, args...) printk(level "%s:" fmt "\n",\
++ THIS_MODULE->name, ##args)
++#else
++#define IOH_LOG(level, fmt, args...) printk(level "%s:" fmt "\n" ,\
++ __FILE__, ##args)
++#endif
++
++
++#ifdef DEBUG
++ #define IOH_DEBUG(fmt, args...) IOH_LOG(KERN_DEBUG, fmt, ##args)
++#else
++ #define IOH_DEBUG(fmt, args...)
++#endif
++
++#ifdef IOH_TRACE_ENABLED
++ #define IOH_TRACE IOH_DEBUG
++#else
++ #define IOH_TRACE(fmt, args...)
++#endif
++
++#define IOH_TRACE_ENTER IOH_TRACE("Enter %s", __func__)
++#define IOH_TRACE_EXIT IOH_TRACE("Exit %s", __func__)
++
++
++#endif
+--- /dev/null
++++ b/drivers/usb/gadget/pch_udc.c
+@@ -0,0 +1,2530 @@
++ /*!
++ *@file ioh_udc.c
++ *@brief This file contains the definitions for IOH UDC driver APIs.
++ *
++ *The IOH UDC is a USB High speed DMA capable USB device controller.
++ *It provides 4 IN and 4 OUT endpoints (control, bulk isochronous or interrupt
++ * type).
++ *
++ *The IOH USB device controller driver provides required interface
++ *to the USB gadget framework for accessing the IOH USB device hardware.
++ *
++ *@version 0.96
++ *
++ *@section
++ *This program is free software; you can redistribute it and/or modify
++ *it under the terms of the GNU General Public License as published by
++ *the Free Software Foundation; version 2 of the License.
++ *
++ *This program is distributed in the hope that it will be useful,
++ *but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ *GNU General Public License for more details.
++ *
++ *You should have received a copy of the GNU General Public License
++ *along with this program; if not, write to the Free Software
++ *Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++ /*
++ *History:
++ *Copyright (C) 2009 OKI SEMICONDUCTOR Co., LTD.
++ *All rights reserved.
++ *
++ *created:
++ * OKI SEMICONDUCTOR 2/26/2010
++ *modified:
++ *
++ */
++
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/list.h>
++#include <linux/smp_lock.h>
++#include <linux/device.h>
++#include <linux/pci.h>
++#include <linux/usb/ch9.h>
++#include <linux/usb/gadget.h>
++
++#include "pch_common.h"
++#include "pch_debug.h"
++
++#include "pch_udc_hal.h"
++#include "pch_udc.h"
++
++#if 0 /* statis as static */
++#define static
++#endif
++
++/* stall spin lock */
++static DEFINE_SPINLOCK(udc_stall_spinlock);
++
++/* function prototypes */
++static int ioh_udc_pcd_get_frame(struct usb_gadget *gadget);
++static int ioh_udc_pcd_wakeup(struct usb_gadget *gadget);
++static int ioh_udc_pcd_selfpowered(struct usb_gadget *gadget, int value);
++static int ioh_udc_pcd_pullup(struct usb_gadget *gadget, int is_on);
++static int ioh_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active);
++static int ioh_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA);
++static void ioh_udc_pcd_reinit(struct ioh_udc_dev *dev);
++static void complete_req(struct ioh_udc_ep *ep, struct ioh_udc_request *req,
++ int status);
++static int ioh_udc_pcd_ep_enable(struct usb_ep *usbep,
++ const struct usb_endpoint_descriptor *desc);
++static int ioh_udc_pcd_ep_disable(struct usb_ep *usbep);
++static struct usb_request *ioh_udc_alloc_request(struct usb_ep *uep, gfp_t gfp);
++static int ioh_udc_free_dma_chain(struct ioh_udc_dev *dev,
++ struct ioh_udc_request *req);
++static int ioh_udc_create_dma_chain(struct ioh_udc_ep *ep,
++ struct ioh_udc_request *req,
++ unsigned long buf_len, gfp_t gfp_flags);
++static void ioh_udc_free_request(struct usb_ep *uep, struct usb_request *req);
++static int prepare_dma(struct ioh_udc_ep *ep,
++ struct ioh_udc_request *req, gfp_t gfp);
++static void process_zlp(struct ioh_udc_ep *ep, struct ioh_udc_request *req);
++static int ioh_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
++ gfp_t gfp);
++static int ioh_udc_pcd_dequeue(struct usb_ep *usbep,
++ struct usb_request *usbreq);
++static int ioh_udc_pcd_set_halt(struct usb_ep *usbep, int halt);
++static int ioh_udc_pcd_set_wedge(struct usb_ep *usbep);
++static void ioh_udc_pcd_fifo_flush(struct usb_ep *usbep);
++static void ioh_udc_svc_data_out(struct ioh_udc_dev *dev, int ep_num);
++static void ioh_udc_svc_control_in(struct ioh_udc_dev *dev);
++static void ioh_udc_svc_control_out(struct ioh_udc_dev *dev);
++static void ioh_udc_svc_data_in(struct ioh_udc_dev *dev, int ep_num);
++static void ioh_udc_read_all_epstatus(struct ioh_udc_dev *dev, u32 ep_intr);
++static void ioh_udc_setup_ep0(struct ioh_udc_dev *dev);
++static void ioh_udc_init_setup_buff(struct ioh_udc_stp_dma_desc *td_stp);
++static void ioh_udc_complete_transfer(struct ioh_udc_ep *ep);
++static void ioh_udc_complete_receiver(struct ioh_udc_ep *ep);
++static void ioh_udc_start_next_txrequest(struct ioh_udc_ep *ep);
++static void ioh_udc_start_rxrequest(struct ioh_udc_ep *ep,
++ struct ioh_udc_request *req);
++static void ioh_udc_postsvc_epinters(struct ioh_udc_dev *dev, int ep_num);
++
++/* gadget operations */
++/*!@ingroup UDC_InterfaceLayer
++ *@struct ioh_udc_ops
++ *@brief specifies gadget operations possible
++ */
++const struct usb_gadget_ops ioh_udc_ops = {
++ .get_frame = ioh_udc_pcd_get_frame,
++ .wakeup = ioh_udc_pcd_wakeup,
++ .set_selfpowered = ioh_udc_pcd_selfpowered,
++ .pullup = ioh_udc_pcd_pullup,
++ .vbus_session = ioh_udc_pcd_vbus_session,
++ .vbus_draw = ioh_udc_pcd_vbus_draw,
++};
++
++
++/* endpoint interface */
++/*!@ingroup UDC_InterfaceLayer
++ *@struct usb_ep_ops ioh_udc_ep_ops
++ *@brief specifies endpoint operations possible
++ */
++static const struct usb_ep_ops ioh_udc_ep_ops = {
++ .enable = ioh_udc_pcd_ep_enable,
++ .disable = ioh_udc_pcd_ep_disable,
++ .alloc_request = ioh_udc_alloc_request,
++ .free_request = ioh_udc_free_request,
++ .queue = ioh_udc_pcd_queue,
++ .dequeue = ioh_udc_pcd_dequeue,
++ .set_halt = ioh_udc_pcd_set_halt,
++ .set_wedge = ioh_udc_pcd_set_wedge,
++ .fifo_status = NULL,
++ .fifo_flush = ioh_udc_pcd_fifo_flush,
++};
++
++/* received setup data */
++static union ioh_udc_setup_data setup_data;
++static unsigned long ep0out_buf[64];
++static dma_addr_t dma_addr;
++
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static int ioh_udc_pcd_get_frame(struct usb_gadget *gadget)
++ *@brief This API is invoked to get the current frame number
++ *@remarks The following actions are performed:
++ * - If the argument is NULL, return -EINVAL
++ * - Return the frame number by invoking HAL API
++ * ioh_udc_get_frame
++ *@param gadget Reference to the gadget driver
++ *@return int [ the frame number ] -EINVAL [Invalid Arguments]
++ *@see
++ * - ioh_udc_get_frame
++ */
++static int ioh_udc_pcd_get_frame(struct usb_gadget *gadget)
++{
++ struct ioh_udc_dev *dev;
++
++ IOH_DEBUG("ioh_udc_pcd_get_frame: enter");
++ if (gadget == NULL) {
++ IOH_DEBUG("ioh_udc_pcd_get_frame: exit -EINVAL");
++ return -EINVAL;
++ }
++
++ dev = container_of(gadget, struct ioh_udc_dev, gadget);
++
++ IOH_DEBUG("ioh_udc_pcd_get_frame: exit");
++ return ioh_udc_get_frame(dev->regs);
++}
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static int ioh_udc_pcd_wakeup(struct usb_gadget *gadget)
++ *@brief This API is invoked to initiate a remote wakeup
++ *@remarks The following actions are performed:
++ * - If the argument is NULL, return -EINVAL
++ * - Invoke HAL API ioh_udc_rmt_wakeup to start
++ * remote signaling
++ *@param gadget Reference to the gadget driver
++ *@return int [ 0 on success and linux error number on failure ]
++ *@see
++ * - ioh_udc_rmt_wakeup
++ */
++static int ioh_udc_pcd_wakeup(struct usb_gadget *gadget)
++{
++ struct ioh_udc_dev *dev;
++ unsigned long flags;
++
++ IOH_DEBUG("ioh_udc_pcd_wakeup: enter");
++ if (gadget == NULL) {
++ IOH_DEBUG("ioh_udc_pcd_wakeup: exit -EINVAL");
++ return -EINVAL;
++ }
++
++ dev = container_of(gadget, struct ioh_udc_dev, gadget);
++
++ IOH_DEBUG("ioh_udc_pcd_wakeup: initiate remote wakeup");
++ spin_lock_irqsave(&dev->lock, flags);
++ ioh_udc_rmt_wakeup(dev->regs);
++ spin_unlock_irqrestore(&dev->lock, flags);
++
++ IOH_DEBUG("ioh_udc_pcd_wakeup: exit");
++ return 0;
++}
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static int ioh_udc_pcd_selfpowered (struct usb_gadget *gadget,
++ * int value)
++ *@brief This API is invoked to specify whether the device is self
++ * powered or not
++ *@remarks The following actions are performed:
++ *- if value is 0, invoke HAL API ioh_udc_clear_selfpowered to clear the self
++ * powered feature
++ *- otherwise, invoke ioh_udc_set_selfpowered to set the self powered
++ * feature for the device
++ *@param gadget Reference to the gadget driver
++ *@param value specifies self powered or not
++ *@return int [ 0 on success and linux error number on failure ]
++ *@see
++ * - ioh_udc_set_selfpowered
++ * - ioh_udc_clear_selfpowered
++ */
++static int ioh_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
++{
++ struct ioh_udc_dev *dev;
++
++ IOH_DEBUG("ioh_udc_pcd_selfpowered: enter");
++ if (gadget == NULL) {
++ IOH_DEBUG("ioh_udc_pcd_selfpowered: exit -EINVAL");
++ return -EINVAL;
++ }
++
++ dev = container_of(gadget, struct ioh_udc_dev, gadget);
++ if (value == 0)
++ ioh_udc_clear_selfpowered(dev->regs);
++ else
++ ioh_udc_set_selfpowered(dev->regs);
++
++ IOH_DEBUG("ioh_udc_pcd_selfpowered: exit value=%d", value);
++ return 0;
++}
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static int ioh_udc_pcd_pullup (struct usb_gadget *gadget,
++ * int is_on)
++ *@brief This API is invoked to make the device visible/invisible to the host
++ *@remarks The following actions are performed:
++ * - If gadget passed is NULL, return -EINVAL
++ * - If is_on is TRUE, call ioh_udc_clear_disconnect()
++ * to make the device visible to the host
++ * - Otherwise call the HAL API ioh_udc_set_disconnect()
++ * to make the device unavailable to the host
++ *@param gadget Reference to the gadget driver
++ *@param is_on specifies whether the pull up is made active
++ * or inactive
++ *@return int [ 0 on success and linux error number on failure ]
++ *@see
++ * - ioh_udc_clear_disconnect
++ * - ioh_udc_set_disconnect
++ */
++static int ioh_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
++{
++ struct ioh_udc_dev *dev;
++
++ IOH_DEBUG("ioh_udc_pcd_pullup: enter");
++ if (gadget == NULL) {
++ IOH_DEBUG("ioh_udc_pcd_pullup: exit -EINVAL");
++ return -EINVAL;
++ }
++
++ dev = container_of(gadget, struct ioh_udc_dev, gadget);
++ if (is_on == 0)
++ ioh_udc_set_disconnect(dev->regs);
++ else
++ ioh_udc_clear_disconnect(dev->regs);
++
++ IOH_DEBUG("ioh_udc_pcd_pullup: exit is_on=%d", is_on);
++ return 0;
++}
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static int ioh_udc_pcd_vbus_session (struct usb_gadget *gadget,
++ * int is_active)
++ *@brief This API is used by a driver for an external transceiver
++ * (or GPIO) that
++ * detects a VBUS power session starting/ending
++ *@remarks The following actions are performed:
++ * - If the gadget passed is NULL, return -EINVAL
++ * - Invoke the HAL API ioh_udc_vbus_session to notify
++ * the start/end of the vbus power
++ *@param gadget Reference to the gadget driver
++ *@param is_active specifies whether the session is starting or ending
++ *@return int [ 0 on success and linux error number on failure ]
++ *@see
++ * - ioh_udc_vbus_session
++ */
++static int ioh_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
++{
++ struct ioh_udc_dev *dev;
++
++ IOH_DEBUG("ioh_udc_pcd_vbus_session: enter");
++ if (gadget == NULL) {
++ IOH_DEBUG("ioh_udc_pcd_vbus_session: exit -EINVAL");
++ return -EINVAL;
++ }
++ dev = container_of(gadget, struct ioh_udc_dev, gadget);
++
++ ioh_udc_vbus_session(dev->regs, is_active);
++ IOH_DEBUG("ioh_udc_pcd_vbus_session: exit");
++ return 0;
++}
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static int ioh_udc_pcd_vbus_draw (struct usb_gadget *gadget,
++ * unsigned int mA)
++ *@brief This API is used by gadget drivers during SET_CONFIGURATION
++ * calls to
++ * specify how much power the device can consume
++ *@remarks The following actions are performed:
++ * - If the gadget passed is NULL, return -EINVAL
++ * - Return -EOPNOTSUPP
++ *@param gadget Reference to the gadget driver
++ *@param mA specifies the current limit in 2mA unit
++ *@return int [ 0 on success and linux error number on failure ]
++ */
++static int ioh_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
++{
++ IOH_DEBUG("ioh_udc_pcd_vbus_draw: enter");
++ if ((gadget == NULL) || (mA > 250)) { /* Max is 250 in 2mA unit */
++ IOH_DEBUG("ioh_udc_pcd_vbus_draw: exit -EINVAL");
++ return -EINVAL;
++ }
++ IOH_DEBUG("ioh_udc_pcd_vbus_draw: exit -EOPNOTSUPP");
++
++ /* Could not find any regs where we can set the limit */
++ return -EOPNOTSUPP;
++}
++
++const char ep0_string[] = "ep0in";
++/*!@ingroup UDC_UtilitiesAPI
++ *@fn static void ioh_udc_pcd_reinit(struct ioh_udc_dev *dev)
++ *@brief This API initializes the endpoint structures
++ *@remarks The following actions are performed:
++ *- Initialize gadgets speed as unknown
++ *- Initialize the gadget endpoint list (ep_list)
++ *- Assign name for each endpoint and their reference to endpoint operations
++ * to ioh_udc_ep_ops
++ *- Add all endpoints other than ep0 IN and ep0 OUT to the gadgete ep_list
++ *@param dev Reference to the driver structure
++ *@return none
++ */
++static void ioh_udc_pcd_reinit(struct ioh_udc_dev *dev)
++{
++ static const char *ep_string[] = {
++ ep0_string, "ep0out",
++ "ep1in", "ep1out",
++ "ep2in", "ep2out",
++ "ep3in", "ep3out",
++ "ep4in", "ep4out",
++ "ep5in", "ep5out",
++ "ep6in", "ep6out",
++ "ep7in", "ep7out",
++ "ep8in", "ep8out",
++ "ep9in", "ep9out",
++ "ep10in", "ep10out",
++ "ep11in", "ep11out",
++ "ep12in", "ep12out",
++ "ep13in", "ep13out",
++ "ep14in", "ep14out",
++ "ep15in", "ep15out",
++ };
++ int i;
++
++ IOH_DEBUG("ioh_udc_pcd_reinit: enter");
++
++ dev->gadget.speed = USB_SPEED_UNKNOWN;
++ INIT_LIST_HEAD(&dev->gadget.ep_list);
++
++ /* Initialize the endpoints structures */
++ for (i = 0; i < IOH_UDC_EP_NUM; i++) {
++ struct ioh_udc_ep *ep = &dev->ep[i];
++ memset(ep, 0, sizeof(*ep));
++
++ ep->desc = NULL;
++ ep->dev = dev;
++ ep->halted = 1;
++ ep->num = i / 2;
++ ep->in = ((i & 1) == 0) ? 1 : 0;
++
++ ep->ep.name = ep_string[i];
++ ep->ep.ops = &ioh_udc_ep_ops;
++ if (ep->in)
++ ep->regs = (struct ioh_udc_ep_regs *)\
++ ((int)dev->ep_regs + ep->num * UDC_EP_REG_OFS);
++ else
++ ep->regs = (struct ioh_udc_ep_regs *)\
++ ((int)dev->ep_regs + \
++ (UDC_EPINT_OUT_EP0 + ep->num) * UDC_EP_REG_OFS);
++
++ ep->dma = &ep->regs->epctl;
++ /* need to set ep->ep.maxpacket and set Default Configuration?*/
++ ep->ep.maxpacket = UDC_BULK_MAX_PKT_SIZE;
++ list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
++ INIT_LIST_HEAD(&ep->queue);
++ }
++ dev->ep[UDC_EP0IN_IDX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
++ dev->ep[UDC_EP0OUT_IDX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
++
++ dma_addr = pci_map_single(dev->pdev, ep0out_buf,
++ 256, PCI_DMA_FROMDEVICE);
++
++ /* remove ep0 in and out from the list. They have own pointer */
++ list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
++ list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
++
++ dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
++ INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
++ IOH_DEBUG("ioh_udc_pcd_reinit: exit");
++}
++
++/*!@ingroup UDC_UtilitiesAPI
++ *@fn int ioh_udc_pcd_init(struct ioh_udc_dev *dev)
++ *@brief This API initializes the driver structure
++ *@remarks The following actions are performed:
++ *- Set udc csr register base in dev->csr
++ *- Set Device Configuration Register base in dev->regs
++ *- Set Endpoint-specific CSR base in dev->ep_regs
++ *- Invoke ioh_udc_init to initialize registers, interrupts
++ *- Invoke ioh_udc_pcd_reinit to perform initialization of the endpoint
++ * structures
++ *- Return Success (0)
++ *@param dev Reference to the driver structure
++ *@return success
++ *@see
++ * - ioh_udc_init
++ * - ioh_udc_pcd_reinit
++ */
++int ioh_udc_pcd_init(struct ioh_udc_dev *dev)
++{
++ IOH_DEBUG("ioh_udc_pcd_init: enter");
++
++ /* udc csr registers base */
++ dev->csr = dev->virt_addr + UDC_CSR_ADDR;
++ /* dev registers base */
++ dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
++ /* ep registers base */
++ dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
++
++ /* init registers, interrupts, ... */
++ ioh_udc_init(dev->regs);
++#ifdef IOH_PRINT_REG
++ ioh_udc_print_regs((u32)dev->virt_addr);
++#endif
++ ioh_udc_pcd_reinit(dev);
++ IOH_DEBUG("ioh_udc_pcd_init: exit");
++ return 0;
++}
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn int usb_gadget_register_driver(struct usb_gadget_driver *driver)
++ *@brief This API is used to make this driver available to the Linux USB
++ * gadget framework
++ *@remarks The following actions are performed:
++ *- If the argument is NULL or if it doesnt have necessary callbacks,
++ * return -EINVAL
++ *- If any class driver is already bound, return -EBUSY
++ *- If drivers structure is not allocated, return -ENODEV
++ *- Hook up the gadget driver by invoking the drivers bind routine
++ *- Invoke ioh_udc_setup_ep0 to get ready for endpoint 0 traffic
++ *- Enable the host detection and resume signaling on USB
++ *@param driver Reference to the USB gadget driver structure
++ *@return int [ 0 on success and linux error number on failure ]
++ *@see
++ * - ioh_udc_setup_ep0
++ * - ioh_udc_clear_disconnect
++ */
++int usb_gadget_register_driver(struct usb_gadget_driver *driver)
++{
++ struct ioh_udc_dev *dev = ioh_udc;
++ int retval;
++
++ IOH_DEBUG("usb_gadget_register_driver: enter");
++ if ((driver == NULL) || (driver->speed == USB_SPEED_UNKNOWN) ||
++ (driver->bind == NULL) || (driver->setup == NULL) ||
++ (driver->unbind == NULL) || (driver->disconnect == NULL)) {
++ IOH_LOG(KERN_ERR, "usb_gadget_register_driver: invalid\
++ driver parameter");
++ return -EINVAL;
++ }
++
++ if (dev == NULL)
++ return -ENODEV;
++
++ if (dev->driver != NULL) {
++ IOH_LOG(KERN_ERR, "usb_gadget_register_driver: already bound");
++ return -EBUSY;
++ }
++ driver->driver.bus = NULL;
++ dev->driver = driver;
++ dev->gadget.dev.driver = &driver->driver;
++
++ /* Invoke the bind routine of the gadget driver */
++ retval = driver->bind(&dev->gadget);
++
++ if (retval != 0) {
++ IOH_LOG(KERN_ERR, "usb_gadget_register_driver: binding to\
++ %s returning %d",
++ driver->driver.name, retval);
++ dev->driver = NULL;
++ dev->gadget.dev.driver = NULL;
++ return retval;
++ }
++ /* get ready for ep0 traffic */
++ ioh_udc_setup_ep0(dev);
++
++ /* clear SD */
++ ioh_udc_clear_disconnect(dev->regs);
++
++ dev->connected = 1;
++
++ IOH_DEBUG("usb_gadget_register_driver: exit");
++ return 0;
++}
++EXPORT_SYMBOL(usb_gadget_register_driver);
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
++ *@brief This API is used to make this driver unavailable to the Linux
++ * USB gadget framework
++ *@remarks The following actions are performed:
++ * - If the argument is NULL or if it doesnt match
++ * with the driver already bound, return -EINVAL
++ * - Call the function drivers unbind method
++ * - Disable the host detection and stop signaling
++ *@param driver Reference to the USB gadget driver structure
++ *@return int [ 0 on success and linux error number on failure ]
++ *@see
++ * - ioh_udc_set_disconnect
++ */
++int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
++{
++ struct ioh_udc_dev *dev = ioh_udc;
++
++ IOH_DEBUG("usb_gadget_unregister_driver: enter");
++ if (dev == NULL)
++ return -ENODEV;
++
++ if ((driver == NULL) || (driver != dev->driver)) {
++ IOH_LOG(KERN_ERR, "usb_gadget_unregister_driver: invalid\
++ driver parameter");
++ return -EINVAL;
++ }
++
++ ioh_udc_disable_interrupts(dev->regs, UDC_DEVINT_MSK);
++
++ /* Assues that there are no pending requets with this driver */
++ driver->unbind(&dev->gadget);
++ dev->gadget.dev.driver = NULL;
++ dev->driver = NULL;
++ dev->connected = 0;
++
++ /* set SD */
++ ioh_udc_set_disconnect(dev->regs);
++
++ IOH_DEBUG("usb_gadget_unregister_driver: %s: unregistered",
++ driver->driver.name);
++
++ IOH_DEBUG("usb_gadget_unregister_driver: exit");
++ return 0;
++}
++EXPORT_SYMBOL(usb_gadget_unregister_driver);
++
++/* Completes request packet ... caller MUST hold lock */
++/*!@ingroup UDC_UtilitiesAPI
++ *@fn static void complete_req(struct ioh_udc_ep *ep, struct
++ * ioh_udc_request *req, int status)
++ *@brief This API is invoked from the driver when processing of a
++ * request is complete
++ *@remarks The main functions performed by this function include:
++ - Delete the request from the endpoints request queue
++ - Update the requests status with the status passed
++ - Unmap the requests buffer
++ - Reset DMA mapping status
++ - Set endpoint halted status
++ - Invoke the requests completion handler specified by the gadget driver
++ *@param ep Reference to the endpoint structure
++ *@param req Reference to the request structure
++ *@param status indicates the success/failure of completion
++ *@return none
++ */
++static void complete_req(struct ioh_udc_ep *ep, struct ioh_udc_request *req,
++ int status)
++{
++ struct ioh_udc_dev *dev;
++ unsigned halted = ep->halted;
++
++ list_del_init(&req->queue);
++
++ /* set new status if pending */
++ if (req->req.status == -EINPROGRESS)
++ req->req.status = status;
++ else
++ status = req->req.status;
++
++
++ dev = ep->dev;
++ if (req->dma_mapped) {
++ if (ep->in) {
++ pci_unmap_single(dev->pdev, req->req.dma,
++ req->req.length,
++ PCI_DMA_TODEVICE);
++ } else {
++ pci_unmap_single(dev->pdev, req->req.dma,
++ req->req.length,
++ PCI_DMA_FROMDEVICE);
++ }
++ req->dma_mapped = 0;
++ req->req.dma = DMA_ADDR_INVALID;
++ }
++ ep->halted = 1;
++
++ IOH_DEBUG("complete %s req %p status %d len %u",
++ ep->ep.name, &req->req, status, req->req.length);
++ IOH_DEBUG("complete %s ioh-req 0x%08x req->queue 0x%08x",
++ ep->ep.name, (u32)req, (u32)(&(req->queue)));
++ spin_unlock(&dev->lock);
++ if (!ep->in)
++ ioh_udc_ep_clear_rrdy(ep->regs);
++
++ req->req.complete(&ep->ep, &req->req);
++
++ spin_lock(&dev->lock);
++ ep->halted = halted;
++}
++
++/* Empty request queue of an endpoint; caller holds spinlock */
++/*!@ingroup UDC_UtilitiesAPI
++ *@fn void empty_req_queue(struct ioh_udc_ep *ep)
++ *@brief This API empties the request queue of an endpoint
++ *@remarks The following actions are performed:
++ * - Set endpoint halted status as 1 in ep->halted
++ * - For each request in the endpoints queue, invoke
++ * the completion handler specified by the gadget driver
++ *@param ep Reference to the endpoint structure
++ *@return none
++ *@see
++ * - complete_req
++ */
++void empty_req_queue(struct ioh_udc_ep *ep)
++{
++ struct ioh_udc_request *req;
++
++ ep->halted = 1;
++ while (!list_empty(&ep->queue)) {
++ req = list_entry(ep->queue.next, struct ioh_udc_request, queue);
++ IOH_DEBUG("empty_req_queue: complete_req ep%d%s", ep->num,
++ (ep->in ? "in" : "out"));
++ complete_req(ep, req, -ESHUTDOWN);
++ }
++}
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static int ioh_udc_pcd_ep_enable(struct usb_ep *usbep,
++ * const struct usb_endpoint_descriptor *desc)
++ *@brief This API enables the endpoint. It is called from gadget driver
++ *@remarks The following actions are performed:
++ * - If usbep or descriptor is NULL or the endpoint is 0,
++ * return error -EINVAL
++ * - If packet size not specified in the descriptor,
++ * return -ERANGE
++ * - Configure the endpoint by invoking ioh_udc_ep_enable()
++ * - Enable Endpoint interrupts by invoking
++ * ioh_udc_enable_ep_interrupts
++ *@param usbep Reference to the USB endpoint structure
++ *@param desc Reference to the USB endpoint descriptor structure
++ *@return int [ 0 on success and linux error number on failure ]
++ *@see
++ * - ioh_udc_ep_enable
++ * - ioh_udc_enable_ep_interrupts
++ */
++static int ioh_udc_pcd_ep_enable(struct usb_ep *usbep,
++ const struct usb_endpoint_descriptor *desc)
++{
++ struct ioh_udc_ep *ep;
++ struct ioh_udc_dev *dev;
++ unsigned long iflags;
++
++ if ((usbep == NULL) || (usbep->name == ep0_string) || (desc == NULL) ||
++ (desc->bDescriptorType != USB_DT_ENDPOINT) ||
++ (desc->wMaxPacketSize == 0)) {
++ return -EINVAL;
++ }
++
++ ep = container_of(usbep, struct ioh_udc_ep, ep);
++ dev = ep->dev;
++
++ IOH_DEBUG("ioh_udc_pcd_ep_enable ep %d", ep->num);
++ if ((dev->driver == NULL) || (dev->gadget.speed == USB_SPEED_UNKNOWN))
++ return -ESHUTDOWN;
++
++
++ spin_lock_irqsave(&dev->lock, iflags);
++ ep->desc = desc;
++ ep->halted = 0;
++ ioh_udc_ep_enable(ep->regs, &ep->dev->cfg_data, desc);
++ ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
++ ioh_udc_enable_ep_interrupts(ep->dev->regs,
++ 1 << (ep->in ? ep->num : ep->num + UDC_EPINT_OUT_EP0));
++
++ IOH_DEBUG("ioh_udc_pcd_ep_enable: %s enabled", usbep->name);
++
++ spin_unlock_irqrestore(&dev->lock, iflags);
++ return 0;
++}
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static int ioh_udc_pcd_ep_disable(struct usb_ep *usbep)
++ *@brief This API disables endpoint and is called from gadget driver
++ *@remarks The following actions are performed:
++ *- If usbep or endpoints descriptor is NULL, return error -EINVAL
++ *- Empty request queue using empty_req_queue API
++ *- Un-configure the endpoint by invoking the HAL API ioh_udc_ep_disable API
++ *- Disable interrupts using ioh_udc_disable_ep_interrupts API
++ *- Set usb endpoint descriptor for the endpoint to NULL
++ *- Set ops method for endpoint request to ioh_udc_ep_ops
++ *- Initialize endpoint queue head using INIT_LIST_HEAD API
++ *@param usbep Reference to the USB endpoint structure
++ *@return int [ 0 on success and linux error number on failure ]
++ *@see
++ * - empty_req_queue
++ * - ioh_udc_ep_disable
++ * - ioh_udc_disable_ep_interrupts
++ */
++static int ioh_udc_pcd_ep_disable(struct usb_ep *usbep)
++{
++ struct ioh_udc_ep *ep = NULL;
++ unsigned long iflags;
++
++ if (usbep == NULL)
++ return -EINVAL;
++
++
++ ep = container_of(usbep, struct ioh_udc_ep, ep);
++ if ((usbep->name == ep0_string) || (ep->desc == NULL))
++ return -EINVAL;
++
++
++ IOH_DEBUG("ioh_udc_pcd_ep_disable: ep%d%s", ep->num,
++ (ep->in ? "in" : "out"));
++ spin_lock_irqsave(&ep->dev->lock, iflags);
++ empty_req_queue(ep);
++ ep->halted = 1;
++ ioh_udc_ep_disable(ep->regs);
++
++ /* disable interrupt */
++ ioh_udc_disable_ep_interrupts(ep->dev->regs,
++ 1 << (ep->in ? ep->num : ep->num + UDC_EPINT_OUT_EP0));
++ ep->desc = NULL;
++ ep->ep.ops = &ioh_udc_ep_ops;
++ INIT_LIST_HEAD(&ep->queue);
++
++ spin_unlock_irqrestore(&ep->dev->lock, iflags);
++
++ return 0;
++}
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static struct usb_request *ioh_udc_alloc_request
++ * (struct usb_ep *usbep, gfp_t gfp)
++ *@brief This function allocates request structure. It iscalled by gadget driver
++ *@remarks The following actions are performed:
++ *- If usbep is NULL, return NULL
++ *- Allocate and initialize memory for request structure
++ *- If allocation fails, return NULL
++ *- Initialize the linked list usbep->queue
++ *- If DMA mode is enabled, create data pool for EP0 IN requests. If it fails,
++ * release memory allocated for request structure
++ *- Set DMA descriptor status as HOST BUSY to prevent its usage
++ *@param usbep Reference to the USB endpoint structure
++ *@param gfp Flag to be used while allocating memory
++ *@return struct usb_request *
++ * [ NULL on failure and allocated address on success ]
++ *@see
++ * - pci_pool_alloc
++ */
++static struct usb_request *ioh_udc_alloc_request(struct usb_ep *usbep,
++ gfp_t gfp)
++{
++ struct ioh_udc_request *req;
++ struct ioh_udc_ep *ep;
++
++ if (usbep == NULL)
++ return NULL;
++
++ ep = container_of(usbep, struct ioh_udc_ep, ep);
++ IOH_DEBUG("ioh_udc_alloc_request: ep %s", usbep->name);
++ req = kzalloc(sizeof(struct ioh_udc_request), gfp);
++ if (req == NULL) {
++ IOH_DEBUG("ioh_udc_alloc_request: no memory for request");
++ return NULL;
++ }
++ memset(req, 0, sizeof(struct ioh_udc_request));
++ req->req.dma = DMA_ADDR_INVALID;
++ INIT_LIST_HEAD(&req->queue);
++
++ if (ep->dma != NULL) {
++ struct ioh_udc_data_dma_desc *dma_desc;
++
++ /* ep0 in requests are allocated from data pool here */
++ dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
++ &req->td_data_phys);
++ if (NULL == dma_desc) {
++ kfree(req);
++ return NULL;
++ }
++
++ IOH_DEBUG("ioh_udc_alloc_request: req = 0x%p dma_desc = 0x%p, "
++ "td_phys = 0x%08lx",
++ req, dma_desc, (unsigned long)req->td_data_phys);
++
++ /* prevent from using desc. - set HOST BUSY */
++ dma_desc->status |= IOH_UDC_BS_HST_BSY;
++ dma_desc->dataptr = __constant_cpu_to_le32(DMA_ADDR_INVALID);
++ req->td_data = dma_desc;
++ req->td_data_last = dma_desc;
++ req->chain_len = 1;
++ }
++
++ return &req->req;
++}
++
++/* frees pci pool descriptors of a DMA chain */
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static int ioh_udc_free_dma_chain(struct ioh_udc_dev *dev,
++ * struct ioh_udc_request *req)
++ *@brief This function frees the DMA chain created for the request
++ *@remarks The following actions are performed:
++ *- Get the virtual address of second DMA descriptor,
++ * of the chain using phys_to_virt
++ *- Release the allocated block back into PCI pool using pci_pool_free
++ *- Get the virtual address of next DMA descriptor,
++ * of the chain using phys_to_virt
++ *- Repeat steps 2, 3 for the chain length
++ *@param dev Reference to the driver structure
++ *@param req Reference to the request to be freed
++ *@return success
++ */
++static int ioh_udc_free_dma_chain(struct ioh_udc_dev *dev,
++ struct ioh_udc_request *req)
++{
++ int ret_val = 0;
++ struct ioh_udc_data_dma_desc *td;
++ struct ioh_udc_data_dma_desc *td_last = NULL;
++ unsigned int i;
++
++ /* do not free first desc., will be done by free for request */
++ td_last = req->td_data;
++ td = phys_to_virt(td_last->next);
++
++ for (i = 1; i < req->chain_len; i++) {
++ pci_pool_free(dev->data_requests, td,
++ (dma_addr_t) td_last->next);
++ td_last = td;
++ td = phys_to_virt(td_last->next);
++ }
++
++ return ret_val;
++}
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static int ioh_udc_create_dma_chain(struct ioh_udc_ep *ep,
++ * struct ioh_udc_request *req, unsigned long buf_len, gfp_t gfp_flags)
++ *@brief This function creates or reinitializes a DMA chain
++ *@remarks The following actions are performed:
++ *- If the endpoint is output, set the first descriptor status as HOST_BUSY
++ *- Calculate the new chain length from the requested length and the maximum
++ * packet size available on the end point
++ *- Check if a shorter chain is already allocated (req->chain_len > 1).
++ * If TRUE, invoke ioh_udc_free_dma_chain to free the DMA chain.
++ *- Generate the required number of buffers and descriptors as follows:
++ *- Invoke pci_pool_alloc with the following arguments:
++ * DMA pool for data requests (ep->dev->data_requests),
++ * gfp_flags and a pointer to the DMA address.
++ *- If pci_pool_alloc fails, return -ENOMEM.
++ *- If the descriptor is valid, assign the DMA address corresponding
++ * to 'buf' in the current descriptors dataptr field (buffer descriptor).
++ *- Link the descriptor and assign the bytes to be transferred.
++ *- Set the byte count as well as the status (HOST_BUSY) for
++ * input endpoints. For Output endpoint, set the status alone.
++ *- For the last descriptor, set the last bit in the status field
++ * and update the next field to point to itself.
++ *- Return with IOH_UDC_SUCCESS
++ *@param ep Reference to the endpoint structure
++ *@param req Reference to the request
++ *@param buf_len The buffer length
++ *@param gfp_flags Flags to be used while mapping the data buffer
++ *@return success, -ENOMEM (pci_pool_alloc invocation fails)
++ *@see
++ * - ioh_udc_free_dma_chain
++ */
++static int ioh_udc_create_dma_chain(struct ioh_udc_ep *ep,
++ struct ioh_udc_request *req,
++ unsigned long buf_len, gfp_t gfp_flags)
++{
++ unsigned long bytes = req->req.length;
++ unsigned int i;
++ dma_addr_t dma_addr;
++ struct ioh_udc_data_dma_desc *td = NULL;
++ struct ioh_udc_data_dma_desc *last = NULL;
++ unsigned long txbytes;
++ unsigned len;
++
++ IOH_DEBUG("ioh_udc_create_dma_chain: bytes = %ld buf_len = %ld",
++ bytes, buf_len);
++ /* unset L bit in first desc for OUT */
++ if (!ep->in)
++ req->td_data->status = IOH_UDC_BS_HST_BSY;
++
++
++ /* alloc only new desc's if not already available */
++ len = req->req.length / buf_len;
++ if (req->req.length % buf_len)
++ len++;
++
++ /* shorter chain already allocated before */
++ if (req->chain_len > 1)
++ ioh_udc_free_dma_chain(ep->dev, req);
++
++ req->chain_len = len;
++
++ td = req->td_data;
++ /* gen. required number of descriptors and buffers */
++ for (i = buf_len; i < bytes; i += buf_len) {
++ dma_addr = DMA_ADDR_INVALID;
++ /* create or determine next desc. */
++ td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
++ &dma_addr);
++ if (td == NULL)
++ return -ENOMEM;
++
++ td->status = 0;
++ td->dataptr = req->req.dma + i; /* assign buffer */
++
++ if ((bytes - i) >= buf_len) {
++ txbytes = buf_len;
++ } else { /* short packet */
++ txbytes = bytes - i;
++ }
++ /* link td and assign tx bytes */
++ if (i == buf_len) {
++ req->td_data->next = dma_addr;
++ /* set the count bytes */
++ if (ep->in) {
++ req->td_data->status = IOH_UDC_BS_HST_BSY |
++ buf_len;
++ /* second desc */
++ td->status = IOH_UDC_BS_HST_BSY | txbytes;
++ } else {
++ td->status = IOH_UDC_BS_HST_BSY;
++ }
++ } else {
++ last->next = dma_addr;
++ if (ep->in)
++ td->status = IOH_UDC_BS_HST_BSY | txbytes;
++ else
++ td->status = IOH_UDC_BS_HST_BSY;
++
++ }
++ last = td;
++ }
++ /* set last bit */
++ if (td) {
++ td->status |= IOH_UDC_DMA_LAST;
++ /* last desc. points to itself */
++ req->td_data_last = td;
++ td->next = req->td_data_phys;
++ }
++ return 0;
++}
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static void ioh_udc_free_request(struct usb_ep *usbep,
++ * struct usb_request *usbreq)
++ *@brief This function frees request structure. It is called by gadget driver
++ *@remarks The following actions are performed:
++ * - If any of the argument is NULL, return
++ * - Warn if req->queue is empty
++ * - Otherwise, if the chain length is greater than 1,
++ * invoke ioh_udc_free_dma_chain to free the DMA chain.
++ * - If the chain length is less than 1,
++ * release memory allocated for request structure and data pool
++ * by invoking pci_pool_free
++ *@param usbep Reference to the USB endpoint structure
++ *@param usbreq Reference to the USB request
++ *@return int [ 0 on success and linux error number on failure ]
++ *@see
++ * - ioh_udc_free_dma_chain
++ */
++static void ioh_udc_free_request(struct usb_ep *usbep,
++ struct usb_request *usbreq)
++{
++ struct ioh_udc_ep *ep;
++ struct ioh_udc_request *req;
++
++ if ((usbep == NULL) || (usbreq == NULL))
++ return;
++
++
++ ep = container_of(usbep, struct ioh_udc_ep, ep);
++ req = container_of(usbreq, struct ioh_udc_request, req);
++ IOH_DEBUG("ioh_udc_free_request: %s req = 0x%p", usbep->name, req);
++
++ if (!list_empty(&req->queue))
++ IOH_LOG(KERN_ERR, "ioh_udc_free_request: %s req = 0x%p\
++ queue not empty", usbep->name, req);
++
++ if (req->td_data != NULL) {
++ if (req->chain_len > 1)
++ ioh_udc_free_dma_chain(ep->dev, req);
++ else
++ pci_pool_free(ep->dev->data_requests, req->td_data,
++ req->td_data_phys);
++
++ }
++ kfree(req);
++}
++
++/*!@ingroup UDC_UtilitiesAPI
++ *@fn static int prepare_dma(struct ioh_udc_ep *ep,
++ * struct ioh_udc_request *req, gfp_t gfp)
++ *@brief This function creates and initializes the DMA chain for the request
++ *@remarks The following actions are performed:
++ *- Set buffer pointer in req->td_data->dataptr and last descriptor
++ * indication status in req->td_data->status.
++ *- Allocate and create a DMA chain using ioh_udc_create_dma_chain.
++ *- If the allocation fails, return -ENOMEM.
++ *- If the request is on an IN endpoint, update the count in the first
++ *descriptors status and mark the status (req->td_data->status) as host busy
++ * - Return success
++ *@param ep Reference to the endpoint structure
++ *@param req Reference to the request
++ *@param gfp Flag to be used while mapping the data buffer
++ *@return int [ 0 on success and linux error number on failure ]
++ *@see
++ * - ioh_udc_create_dma_chain
++ */
++static int prepare_dma(struct ioh_udc_ep *ep, struct ioh_udc_request *req,
++ gfp_t gfp)
++{
++ int retval = 0;
++ IOH_DEBUG("prepare_dma: enter req->req.dma = 0x%08x", req->req.dma);
++
++ /* set buffer pointer */
++ req->td_data->dataptr = req->req.dma;
++ /* set last bit */
++ req->td_data->status |= IOH_UDC_DMA_LAST;
++
++ /* Allocate and create a DMA chain */
++ retval = ioh_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
++ if (retval != 0) {
++ if (retval == -ENOMEM)
++ IOH_LOG(KERN_ERR, "prepare_dma: Out of DMA memory");
++
++ return retval;
++ }
++ if (ep->in) {
++ if (req->req.length <= ep->ep.maxpacket) {
++ /* write tx bytes */
++ req->td_data->status = IOH_UDC_DMA_LAST |
++ IOH_UDC_BS_HST_BSY | req->req.length;
++ }
++ }
++
++ if (ep->in) {
++ /* if bytes < max packet then tx bytes must
++ *be written in packet per buffer mode
++ */
++ if ((req->req.length < ep->ep.maxpacket) || (ep->num == 0)) {
++ /* write the count */
++ req->td_data->status = (req->td_data->status &
++ ~IOH_UDC_RXTX_BYTES) |
++ req->req.length;
++ }
++ /* set HOST BUSY */
++ req->td_data->status = (req->td_data->status &
++ ~IOH_UDC_BUFF_STS) |
++ IOH_UDC_BS_HST_BSY;
++ }
++ return retval;
++}
++
++/*!@ingroup UDC_UtilitiesAPI
++ *@fn static void process_zlp(struct ioh_udc_ep *ep,
++ * struct ioh_udc_request *req)
++ *@brief This function process zero length packets from the gadget driver
++ *@remarks The following actions are performed:
++ *- Invoke the requests completion routine
++ *- If there is a set config or set interface request that is not
++ * acknowledged, set CSR_DONE using ioh_udc_set_csr_done API.
++ * Reset dev->set_cfg_not_acked to 0.
++ *- If there is a setup request waiting for acknowledgement,
++ *clear the NAK on EP0 IN using ioh_udc_ep_clear_nak API and set the
++ *naking status as 0 in dev->ep[UDC_EP0IN_IDX].naking. Reset dev->
++ * waiting_zlp_ack to 0
++ *@param ep Reference to the endpoint structure
++ *@param req Reference to the request
++ *@return none
++ *@see
++ * - complete_req
++ * - ioh_udc_set_csr_done
++ * - ioh_udc_ep_clear_nak
++ */
++static void process_zlp(struct ioh_udc_ep *ep, struct ioh_udc_request *req)
++{
++ struct ioh_udc_dev *dev = ep->dev;
++
++ IOH_DEBUG("process_zlp: enter ep%d%s",
++ ep->num, (ep->in ? "in" : "out"));
++ /* IN zlp's are handled by hardware */
++ complete_req(ep, req, 0);
++
++ /* if set_config or set_intf is waiting for ack by zlp
++ *then set CSR_DONE
++ */
++ if (dev->set_cfg_not_acked) {
++ IOH_DEBUG("process_zlp: csr done");
++ ioh_udc_set_csr_done(dev->regs);
++ dev->set_cfg_not_acked = 0;
++ }
++ /* setup command is ACK'ed now by zlp */
++ if (!dev->stall) {
++ if (dev->waiting_zlp_ack) {
++ /* clear NAK by writing CNAK in EP0_IN */
++ ioh_udc_ep_clear_nak(dev->ep[UDC_EP0IN_IDX].regs);
++ dev->waiting_zlp_ack = 0;
++ }
++ }
++ IOH_DEBUG("process_zlp: exit");
++}
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static int ioh_udc_pcd_queue(struct usb_ep *usbep,
++ * struct usb_request *usbreq, gfp_t gfp)
++ *@brief This function queues a request packet. It is called by gadget driver
++ *@remarks The following actions are performed:
++ * - If any of the argument is NULL or the request has NULL buffer
++ * or NULL completion handler, return -EINVAL
++ * - Verify if the endpoint is enabled before trying to queue. if not,
++ * return -EINVAL
++ * - Verify if the request queue is non empty. If so, return -EINVAL
++ * - If the gadget is not bound or invalid, return -ESHUTDOWN
++ * - Setup a DMA mapping for the request buffer and
++ * allocate the DMA descriptors
++ * - If usbeps request queue is empty
++ * - If length of data equals 0, call process_zlp API
++ * and return IOH_UDC_SUCCESS
++ * - if there are no IN endpoints,
++ * - Start the receive request by invoking
++ * ioh_udc_start_rxrequest
++ * - Oterwise
++ * - Wait till STALL bit in endpoint control register
++ * is cleared
++ * - Clear NAK by invoking ioh_udc_ep_clear_nak.
++ * - Enable endpoint interrupts for out endpoint
++ * - Enable TX DMA by invoking ioh_udc_set_dma
++ * - If request queue not empty,
++ * - Add the request to end of usbeps queue
++ * - Return with IOH_UDC_SUCCESS
++ *@param usbep Reference to the USB endpoint structure
++ *@param usbreq Reference to the USB request
++ *@param gfp Flag to be used while mapping the data buffer
++ *@return int [ 0 on success and linux error number on failure ]
++ *@see
++ * - prepare_dma
++ * - process_zlp
++ * - ioh_udc_start_rxrequest
++ * - ioh_udc_read_ep_control
++ * - ioh_udc_ep_clear_nak
++ * - ioh_udc_enable_ep_interrupts
++ * - ioh_udc_set_dma
++ */
++static int ioh_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
++ gfp_t gfp)
++{
++ int retval = 0;
++ struct ioh_udc_ep *ep;
++ struct ioh_udc_dev *dev;
++ struct ioh_udc_request *req;
++ unsigned long iflags;
++
++ IOH_DEBUG("ioh_udc_pcd_queue: enter");
++ if ((usbep == NULL) || (usbreq == NULL) || (usbreq->complete == NULL) ||
++ (usbreq->buf == NULL)) {
++ IOH_DEBUG("ioh_udc_pcd_queue: Invalid end point OR request");
++ return -EINVAL;
++ }
++
++ ep = container_of(usbep, struct ioh_udc_ep, ep);
++ if ((ep->desc == NULL) && (ep->num != 0)) {
++ IOH_DEBUG("ioh_udc_pcd_queue: Trying to queue before before\
++ enabling the end point %d", ep->num);
++ /* Don't let non-control ep queue before enable */
++ return -EINVAL;
++ }
++ req = container_of(usbreq, struct ioh_udc_request, req);
++ IOH_DEBUG("ioh_udc_pcd_queue: ep%d%s req = 0x%08x", ep->num,
++ (ep->in ? "in" : "out"), (u32)req);
++ if (!list_empty(&req->queue)) {
++ IOH_DEBUG("list_empty error: req->queue = 0x%08x\
++ ioh-req = 0x%08x", (u32)(&(req->queue)), (u32)req);
++ return -EINVAL;
++ }
++ dev = ep->dev;
++ if ((dev->driver == NULL) || (dev->gadget.speed == USB_SPEED_UNKNOWN)) {
++ IOH_DEBUG("ioh_udc_pcd_queue: Gadget not bound/invalid");
++ IOH_DEBUG("dev->driver = 0x%p speed = 0x%x", dev->driver,
++ dev->gadget.speed);
++ return -ESHUTDOWN;
++ }
++ spin_lock_irqsave(&ep->dev->lock, iflags);
++ /* map the buffer for dma */
++ if ((usbreq->length != 0) &&
++ ((usbreq->dma == DMA_ADDR_INVALID) || (usbreq->dma == 0))) {
++ if (ep->in) {
++ usbreq->dma = pci_map_single(dev->pdev, usbreq->buf,
++ usbreq->length, PCI_DMA_TODEVICE);
++ } else {
++ usbreq->dma = pci_map_single(dev->pdev, usbreq->buf,
++ usbreq->length, PCI_DMA_FROMDEVICE);
++ }
++ req->dma_mapped = 1;
++ }
++
++ if (usbreq->length > 0) { /* setup the descriptors */
++ retval = prepare_dma(ep, req, gfp);
++ if (retval != 0) {
++ /* Need to unmap before returning? ...
++ req->dma_mapped = 1; */
++ spin_unlock_irqrestore(&dev->lock, iflags);
++ return retval;
++ }
++ }
++
++ usbreq->actual = 0;
++ usbreq->status = -EINPROGRESS;
++ req->dma_done = 0;
++
++ if (list_empty(&ep->queue) && !ep->halted) {
++ /* no pending transfer, so start this req */
++ if ((usbreq->length == 0)) {
++ process_zlp(ep, req);
++ spin_unlock_irqrestore(&dev->lock, iflags);
++ return 0;
++ }
++ if (!ep->in) {
++ ioh_udc_start_rxrequest(ep, req);
++ } else {
++ /*
++ * For IN trfr the descriptors will be programmed and
++ * P bit will be set when
++ * we get an IN token
++ */
++
++ while (ioh_udc_read_ep_control(ep->regs) &
++ (1 << UDC_EPCTL_S))
++ udelay(100);
++
++ ioh_udc_ep_clear_nak(ep->regs);
++ ioh_udc_enable_ep_interrupts(ep->dev->regs,
++ (1 << ep->num));
++ /* enable DMA */
++ ioh_udc_set_dma(dev->regs, DMA_DIR_TX);
++ }
++ }
++ IOH_DEBUG("ioh_udc_pcd_queue: desc[stat:0x%08x dptr:0x%08x\
++ next:0x%08x]",
++ req->td_data->status, req->td_data->dataptr,
++ req->td_data->next);
++ /* Now add this request to the ep's pending requests */
++ if (req != NULL)
++ list_add_tail(&req->queue, &ep->queue);
++
++#ifdef IOH_PRINT_REG
++ ioh_udc_print_regs((u32)dev->virt_addr);
++#endif
++ spin_unlock_irqrestore(&dev->lock, iflags);
++ return retval;
++}
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static int ioh_udc_pcd_dequeue(struct usb_ep *usbep,
++ * struct usb_request *usbreq)
++ *@brief This function de-queues a request packet. It is called
++ * by gadget driver
++ *@remarks The following actions are performed:
++ * - If any of the argument is NULL, return -EINVAL
++ * - If the request is not in usbeps queue, return -EINVAL
++ * - Set the NAK bit
++ * - If the request is not in processing , call its\
++ * completion handler
++ *@param usbep Reference to the USB endpoint structure
++ *@param usbreq Reference to the USB request
++ *@return int [ 0 on success and linux error number on failure ]
++ *@see
++ * - ioh_udc_ep_set_nak
++ * - complete_req
++ */
++static int ioh_udc_pcd_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
++{
++ struct ioh_udc_ep *ep;
++ struct ioh_udc_request *req;
++ unsigned long flags;
++
++ ep = container_of(usbep, struct ioh_udc_ep, ep);
++ if ((usbep == NULL) || (usbreq == NULL) ||
++ ((ep->desc == NULL) && (ep->num != 0))) {
++ return -EINVAL;
++ }
++ IOH_DEBUG("ioh_udc_pcd_dequeue: enter ep%d%s", ep->num,
++ (ep->in ? "in" : "out"));
++ req = container_of(usbreq, struct ioh_udc_request, req);
++ spin_lock_irqsave(&ep->dev->lock, flags);
++ /* make sure it's still queued on this endpoint */
++ list_for_each_entry(req, &ep->queue, queue) {
++ if (&req->req == usbreq)
++ break;
++
++ }
++
++ if (&req->req != usbreq) {
++ spin_unlock_irqrestore(&ep->dev->lock, flags);
++ return -EINVAL;
++ }
++ ioh_udc_ep_set_nak(ep->regs);
++ if (!list_empty(&req->queue))
++ complete_req(ep, req, -ECONNRESET);
++
++ spin_unlock_irqrestore(&ep->dev->lock, flags);
++ return 0;
++}
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static int ioh_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
++ *@brief This function Sets or clear the endpoint halt feature
++ *@remarks The following actions are performed:
++ * - If the ep->desc is NULL and ep->num is 0,
++ * return -EINVAL.
++ * - If ep->dev->driver is NULL and speed is unknown,
++ * return -ESHUTDOWN
++ * - If there are pending transfers, return -EAGAIN
++ * - If halt is 0, Clear the STALL for the endpoint
++ * - Otherwise,
++ * - Set the STALL for the endpoint
++ * - Enable endpoint interrupts for out endpoint
++ *@param usbep Reference to the USB endpoint structure
++ *@param halt Specifies whether to set or clear the feature
++ *@return int [ 0 on success and linux error number on failure ]
++ *@see
++ * - ioh_udc_ep_clear_stall
++ * - ioh_udc_ep_set_stall
++ * - ioh_udc_enable_ep_interrupts
++ */
++static int ioh_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
++{
++ struct ioh_udc_ep *ep;
++ unsigned long iflags;
++
++ if (usbep == NULL)
++ return -EINVAL;
++
++
++ IOH_DEBUG("ioh_udc_pcd_set_halt: %s: halt=%d", usbep->name, halt);
++
++ ep = container_of(usbep, struct ioh_udc_ep, ep);
++
++ if ((ep->desc == NULL) && (ep->num == 0)) {
++ IOH_DEBUG("ioh_udc_pcd_set_halt: ep->desc = 0x%x:\
++ ep->num = 0x%x", (u32)(ep->desc), ep->num);
++ return -EINVAL;
++ }
++ if ((ep->dev->driver == NULL) || (ep->dev->gadget.speed\
++ == USB_SPEED_UNKNOWN)) {
++ IOH_DEBUG("ioh_udc_pcd_set_halt: ep->dev->driver = 0x%x:\
++ ep->dev->gadget.speed = 0x%x", (u32)(ep->dev->driver),
++ ep->dev->gadget.speed);
++ return -ESHUTDOWN;
++ }
++
++ spin_lock_irqsave(&udc_stall_spinlock, iflags);
++
++ if (!list_empty(&ep->queue)) {
++ IOH_DEBUG("ioh_udc_pcd_set_halt: list not empty");
++ spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
++ return -EAGAIN;
++ }
++ /* halt or clear halt */
++ if (halt == 0) {
++ ioh_udc_ep_clear_stall(ep->regs);
++ } else {
++ if (ep->num == IOH_UDC_EP0)
++ ep->dev->stall = 1;
++
++ ioh_udc_ep_set_stall(ep->regs);
++ ioh_udc_enable_ep_interrupts(ep->dev->regs,
++ 1 << (ep->in ? ep->num : ep->num + UDC_EPINT_OUT_EP0));
++ }
++ spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
++ return 0;
++}
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static int ioh_udc_pcd_set_wedge(struct usb_ep *usbep)
++ *@brief This function Sets or clear the endpoint halt feature
++ *@param usbep Reference to the USB endpoint structure
++ *@param halt Specifies whether to set or clear the feature
++ *@return int [ 0 on success and linux error number on failure ]
++ */
++static int ioh_udc_pcd_set_wedge(struct usb_ep *usbep)
++{
++ struct ioh_udc_ep *ep;
++ unsigned long iflags;
++
++ if (usbep == NULL)
++ return -EINVAL;
++
++
++ IOH_DEBUG("ioh_udc_pcd_set_wedge: %s:", usbep->name);
++
++ ep = container_of(usbep, struct ioh_udc_ep, ep);
++
++ if ((ep->desc == NULL) && (ep->num == 0)) {
++ IOH_DEBUG("ioh_udc_pcd_set_wedge: ep->desc = 0x%x:\
++ ep->num = 0x%x", (u32)(ep->desc), ep->num);
++ return -EINVAL;
++ }
++ if ((ep->dev->driver == NULL) || (ep->dev->gadget.speed ==\
++ USB_SPEED_UNKNOWN)) {
++ IOH_DEBUG("ioh_udc_pcd_set_wedge: ep->dev->driver = 0x%x:\
++ ep->dev->gadget.speed = 0x%x", (u32)(ep->dev->driver),
++ ep->dev->gadget.speed);
++ return -ESHUTDOWN;
++ }
++
++ spin_lock_irqsave(&udc_stall_spinlock, iflags);
++
++ if (!list_empty(&ep->queue)) {
++ IOH_DEBUG("ioh_udc_pcd_set_wedge: list not empty");
++ spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
++ return -EAGAIN;
++ }
++ /* halt */
++ if (ep->num == IOH_UDC_EP0)
++ ep->dev->stall = 1;
++
++ ioh_udc_ep_set_stall(ep->regs);
++ ioh_udc_enable_ep_interrupts(ep->dev->regs,
++ 1 << (ep->in ? ep->num : ep->num + UDC_EPINT_OUT_EP0));
++
++ ep->dev->prot_stall = 1;
++ spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
++ return 0;
++}
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static void ioh_udc_pcd_fifo_flush(struct usb_ep *usbep)
++ *@brief This function Flush the FIFO of specified endpoint
++ *@remarks The following actions are performed:
++ * - Depending on the endpoint direction, flush the TX or RX FIFO
++ *@param usbep Reference to the USB endpoint structure
++ *@return none
++ *@see
++ * - ioh_udc_ep_fifo_flush
++ */
++static void ioh_udc_pcd_fifo_flush(struct usb_ep *usbep)
++{
++ struct ioh_udc_ep *ep;
++
++ if (usbep == NULL)
++ return;
++
++
++ IOH_DEBUG("ioh_udc_pcd_fifo_flush: %s", usbep->name);
++
++ ep = container_of(usbep, struct ioh_udc_ep, ep);
++ if ((ep->desc == NULL) && (ep->num != 0))
++ return;
++
++
++ ioh_udc_ep_fifo_flush(ep->regs, ep->in);
++}
++
++/*!@ingroup UDC_UtilitiesAPI
++ *@fn static void ioh_udc_svc_data_out(struct ioh_udc_dev *dev,
++ * int ep_num)
++ *@brief Handles interrupts from OUT endpoint
++ *@remarks The following actions are performed:
++ *- Depending on the endpoint direction, flush the TX or RX FIFO
++ *- If the interrupt is BNA and request queue is not empty, get next request
++ * - If DMA state is DONE and dma_going flag is not set, start receive
++ * request.
++ *- If the interrupt is HE, log the error message
++ *- If the interrupt is RSS,
++ * - Set STALL useig ioh_udc_ep_set_stall API.
++ * - Enable endpoint interrupts for out endpoint
++ *- If the interrupt is RCS,
++ * - If the protocol stall flag not set,
++ * - Clear stall useing ioh_udc_ep_clear_stall API.
++ * - Otherwise
++ * - Set STALL using ioh_udc_ep_set_stall API.
++ * - Enable endpoint interrupts for out endpoint
++ *- If the interrupt is OUT_DATA,
++ * - If the protocol stall flag is set,
++ * - Set STALL using ioh_udc_ep_set_stall API.
++ * - Enable endpoint interrupts for out endpoint
++ * - Otherwise
++ * - invoke the completion routine using ioh_udc_complete_receiver
++ *- If request queue is empty,
++ * - Enable DMA using ioh_udc_set_dma API
++ *@param dev Reference to the device structure
++ *@param ep_num Endpoint that generated the interrupt
++ *@return none
++ *@see
++ * - ioh_udc_start_rxrequest
++ * - ioh_udc_ep_set_stall
++ * - ioh_udc_enable_ep_interrupts
++ * - ioh_udc_ep_clear_stall
++ * - ioh_udc_complete_receiver
++ * - ioh_udc_set_dma
++ */
++static void ioh_udc_svc_data_out(struct ioh_udc_dev *dev, int ep_num)
++{
++ u32 epsts;
++ struct ioh_udc_ep *ep;
++ struct ioh_udc_request *req = NULL;
++
++ ep = &dev->ep[2*ep_num + 1];
++ epsts = ep->epsts;
++ ep->epsts = 0;
++
++ IOH_DEBUG("ioh_udc_svc_data_out: ep%d%s status = 0x%08x", ep->num,
++ (ep->in ? "in" : "out"), epsts);
++ if (epsts & (1 << UDC_EPSTS_BNA)) { /* Just log it; only in DMA mode */
++ if (!list_empty(&ep->queue)) {
++ /* next request */
++ req = list_entry(ep->queue.next, struct ioh_udc_request,
++ queue);
++ IOH_DEBUG("BNA on ep%dout occured", ep->num);
++ if ((req->td_data_last->status & IOH_UDC_BUFF_STS) !=
++ IOH_UDC_BS_DMA_DONE) {
++ if (req->dma_going == 0)
++ ioh_udc_start_rxrequest(ep, req);
++
++ return;
++ }
++ }
++ }
++ if (epsts & (1 << UDC_EPSTS_HE)) { /* Host error - Just log it */
++ IOH_DEBUG("Host Error on ep%dout occured", ep->num);
++ return;
++ }
++ if (epsts & (1 << UDC_EPSTS_RSS)) {
++ IOH_DEBUG("ioh_udc_svc_data_out: RSS");
++ ioh_udc_ep_set_stall(ep->regs);
++ ioh_udc_enable_ep_interrupts(ep->dev->regs,
++ 1 << (ep->in ? ep->num : ep->num + UDC_EPINT_OUT_EP0));
++ }
++ if (epsts & (1 << UDC_EPSTS_RCS)) {
++ IOH_DEBUG("ioh_udc_svc_data_out: RCS prot_stall = %d",
++ dev->prot_stall);
++ if (dev->prot_stall == 0) {
++ ioh_udc_ep_clear_stall(ep->regs);
++ } else {
++ ioh_udc_ep_set_stall(ep->regs);
++ ioh_udc_enable_ep_interrupts(ep->dev->regs,
++ 1 << (ep->in ? ep->num : ep->num +
++ UDC_EPINT_OUT_EP0));
++ }
++ }
++ if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_OFS) ==
++ UDC_EPSTS_OUT_DATA) {
++ if (ep->dev->prot_stall == 1) {
++ ioh_udc_ep_set_stall(ep->regs);
++ ioh_udc_enable_ep_interrupts(ep->dev->regs,
++ 1 << (ep->in ? ep->num : ep->num +
++ UDC_EPINT_OUT_EP0));
++ } else {
++ ioh_udc_complete_receiver(ep);
++ }
++ }
++
++ if (list_empty(&ep->queue)) {
++ /* enable DMA */
++ ioh_udc_set_dma(dev->regs, DMA_DIR_RX);
++ }
++}
++/*!@ingroup UDC_UtilitiesAPI
++ *@fn static void ioh_udc_svc_control_in(struct ioh_udc_dev *dev)
++ *@brief Handle Control IN endpoint interrupts
++ *@remarks The following actions are performed:
++ * - if the interrupt is TDC [DMA completed] or XFERDONE or TXEMPTY
++ * or ISOINDONE or HE or BNA, log the status.
++ * - If there is an TDC token
++ * - Invoke the requests completion routine.
++ * - If there is an IN token
++ * - Invoke the next request start routine.
++ *@param dev Reference to the device structure
++ *@return none
++ *@see
++ * - ioh_udc_complete_transfer
++ * - ioh_udc_start_next_txrequest
++ */
++static void ioh_udc_svc_control_in(struct ioh_udc_dev *dev)
++{
++ u32 epsts;
++ struct ioh_udc_ep *ep;
++
++ ep = &dev->ep[UDC_EP0IN_IDX];
++ epsts = ep->epsts;
++ ep->epsts = 0;
++
++ IOH_DEBUG("ioh_udc_ep0_in intr status 0x%x", epsts);
++
++ if ((epsts & ((1 << UDC_EPSTS_IN) | (1 << UDC_EPSTS_BNA) |
++ (1 << UDC_EPSTS_HE) |
++ (1 << UDC_EPSTS_TDC) | (1 << UDC_EPSTS_RCS) |
++ (1 << UDC_EPSTS_TXEMPTY) |
++ (1 << UDC_EPSTS_XFERDONE))) == 0) {
++ IOH_DEBUG("Non interrupt request ep%din status %x",
++ ep->num, epsts);
++ return;
++ }
++ if ((epsts & (1 << UDC_EPSTS_BNA))) { /* Just log it */
++ IOH_DEBUG("BNA on ep%din occured", ep->num);
++ return;
++ }
++ if (epsts & (1 << UDC_EPSTS_HE)) {
++ IOH_DEBUG("Host Error on ep%din occured", ep->num);
++ return;
++ }
++ if (epsts & (1 << UDC_EPSTS_TXEMPTY)) { /* Tx empty */
++ IOH_DEBUG("ioh_udc_ep0_in intr: TXEMPTY");
++ }
++ if ((epsts & (1 << UDC_EPSTS_TDC)) && (!dev->stall)) {
++ /* DMA completed */
++ IOH_DEBUG("TDC on ep%din", ep->num);
++ ioh_udc_complete_transfer(ep);
++ }
++ /* On IN interrupt, provide data if we have any */
++ if ((epsts & (1 << UDC_EPSTS_IN)) &&
++ ((epsts & (1 << UDC_EPSTS_TDC)) == 0) &&
++ ((epsts & (1 << UDC_EPSTS_TXEMPTY)) == 0)) {
++ ioh_udc_start_next_txrequest(ep);
++ }
++}
++
++/*!@ingroup UDC_UtilitiesAPI
++ *@fn static void ioh_udc_svc_control_out(struct ioh_udc_dev *dev)
++ *@brief Routine that handle Control OUT endpoint interrupts
++ *@param dev Reference to the device structure
++ *@return none
++ *@remarks The following actions are performed:
++ * - If Buffer NOT Available, log a debug message
++ * - If setup data present
++ *- Set device stall status to False
++ *- Invoke ioh_udc_ep_set_nak API to set SNAK field of IN endpoint register
++ *- Get the data from setup data descriptor
++ * Initialize setup data descriptor using ioh_udc_fifo_read API
++ *- Disable the TX DMA using ioh_udc_clear_dma API
++ *- Make the gadget's ep0 point to appropriate endpoint
++ * structure (ep0 IN or OUT)
++ * based on the request type
++ *- If the receive data is "Mass storage Reset", clear the protocol stall flag.
++ *- Invoke the gadget driver's setup routine with data received
++ *- if the setup routine returns a value between 0 and maximum packet size
++ * (ep0 in returns data on IN phase),
++ * - Invoke ioh_udc_ep_clear_nak API to clear NAK for In endpoint 0.
++ * - Enable DMA for RX using ioh_udc_set_dma API.
++ * - Invoke ioh_udc_ep_clear_nak API to clear NAK for In endpoint 0.
++ *- If the setup routine fails (returns < 0),
++ * - Stall the device using ioh_udc_ep_set_stall API
++ * - Enable the interrupt endpoint
++ * - Enable DMA for RX using ioh_udc_set_dma API.
++ *- Otherwise
++ * - Set 'waiting for zlp ACK' status as TRUE
++ *- If data is present
++ *- If request queue is empty,
++ * - Set the descriptor status as HOST_BUSY
++ * - Enable DMA for RX using ioh_udc_set_dma API.
++ *- Otherwise
++ * - Invoke ioh_udc_svc_data_out to handle the request.
++ * - Re-program the data descriptor using ioh_udc_ep_set_ddptr API
++ * - Enable Rx DMA using ioh_udc_set_dma API
++ *@see
++ * - ioh_udc_ep_set_nak
++ * - ioh_udc_init_setup_buff
++ * - ioh_udc_clear_dma
++ * - ioh_udc_ep_fifo_flush
++ * - ioh_udc_ep_clear_nak
++ * - ioh_udc_set_dma
++ * - ioh_udc_ep_set_stall
++ * - ioh_udc_enable_ep_interrupts
++ * - ioh_udc_svc_data_out
++ * - ioh_udc_ep_set_ddptr
++ * - ioh_udc_ep_set_rrdy
++ */
++static void ioh_udc_svc_control_out(struct ioh_udc_dev *dev)
++{
++ u32 stat;
++ int setup_supported;
++ struct ioh_udc_ep *ep;
++
++ IOH_DEBUG("ioh_udc_ep0_out");
++ ep = &dev->ep[UDC_EP0OUT_IDX];
++ stat = ep->epsts;
++ ep->epsts = 0;
++
++ if (stat & (1 << UDC_EPSTS_BNA)) {
++ IOH_DEBUG("EP0: BNA");
++ /* When we get a request, we will populate the descriptors. */
++ /* Anything else to do? */
++ }
++ /* If setup data */
++ if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_OFS) ==
++ UDC_EPSTS_OUT_SETUP) {
++ dev->stall = 0;
++ dev->ep[UDC_EP0IN_IDX].halted = 0;
++ dev->ep[UDC_EP0OUT_IDX].halted = 0;
++ /* In data not ready */
++ ioh_udc_ep_set_nak(dev->ep[UDC_EP0IN_IDX].regs);
++ setup_data.data[0] = ep->td_stp->data12;
++ setup_data.data[1] = ep->td_stp->data34;
++ IOH_DEBUG("EP0 setup data12: 0x%x data34:0x%x",
++ ep->td_stp->data12, ep->td_stp->data34);
++ ioh_udc_init_setup_buff(ep->td_stp);
++ ioh_udc_clear_dma(dev->regs, DMA_DIR_TX);
++ ioh_udc_ep_fifo_flush(dev->ep[UDC_EP0IN_IDX].regs,
++ dev->ep[UDC_EP0IN_IDX].in);
++ if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
++ dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
++ } else { /* OUT */
++ dev->gadget.ep0 = &ep->ep;
++ }
++ IOH_DEBUG("EP0 setup data: 0x%x 0x%x", setup_data.data[0],
++ setup_data.data[1]);
++ spin_unlock(&dev->lock);
++ /* Mass storage Reset */
++ if ((setup_data.data[0] == 0x0000ff21) && (setup_data.data[1] ==
++ 0x00000000)) {
++ dev->prot_stall = 0;
++ IOH_DEBUG("Mass storage reset prot_stall = %d",
++ dev->prot_stall);
++ }
++ /* call gadget with setup data received */
++ setup_supported = dev->driver->setup(&dev->gadget,
++ &setup_data.request);
++ spin_lock(&dev->lock);
++
++ /* ep0 in returns data on IN phase */
++ if (setup_supported >= 0 && setup_supported <
++ UDC_EP0IN_MAX_PKT_SIZE) {
++ ioh_udc_ep_clear_nak(dev->ep[UDC_EP0IN_IDX].regs);
++ /* Gadget would have queued a request when
++ we called the setup */
++ ioh_udc_set_dma(dev->regs, DMA_DIR_RX);
++ ioh_udc_ep_clear_nak(ep->regs);
++ } else if (setup_supported < 0) {
++ /* if unsupported request, then stall */
++ IOH_DEBUG("EP0 setup unsupported: ep0_set_stall");
++ ioh_udc_ep_set_stall(dev->ep[UDC_EP0IN_IDX].regs);
++ ioh_udc_enable_ep_interrupts(ep->dev->regs,
++ 1 << (ep->in ? ep->num : ep->num +
++ UDC_EPINT_OUT_EP0));
++ dev->stall = 0;
++ ioh_udc_set_dma(dev->regs, DMA_DIR_RX);
++ } else {
++ dev->waiting_zlp_ack = 1;
++ }
++ } else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_OFS) ==
++ UDC_EPSTS_OUT_DATA) && (dev->stall == 0)) {
++ if (list_empty(&ep->queue)) {
++ IOH_LOG(KERN_ERR, "control_out: ZLP");
++ /* If no requests, reactivate */
++ ep->td_data->status =
++ (ep->td_data->status &
++ ~IOH_UDC_BUFF_STS) |
++ IOH_UDC_BS_HST_RDY;
++ /* Enable RDE */
++ ioh_udc_set_dma(dev->regs, DMA_DIR_RX);
++ } else {
++ /* control write */
++ ioh_udc_svc_data_out(dev, UDC_EP0OUT_IDX);
++ /* re-program desc. pointer for possible ZLPs */
++ ioh_udc_ep_set_ddptr(ep->regs,
++ ep->td_data_phys);
++ /* Enable RDE */
++ ioh_udc_set_dma(dev->regs, DMA_DIR_RX);
++ }
++ }
++ ioh_udc_ep_set_rrdy(ep->regs);
++}
++
++/*!@ingroup UDC_UtilitiesAPI
++ *@fn static void ioh_udc_svc_data_in(struct ioh_udc_dev *dev, int ep_num)
++ *@brief This function process endpoint interrupts for IN endpoints
++ *@param dev Reference to the device structure
++ *@param ep_num Endpoint that generated the interrupt
++ *@return none
++ *@remarks The interrupts are processed as follows:
++ *- If the interrupt is HE, log the error message
++ *- If the interrupt is RSS,
++ * - Set STALL useig ioh_udc_ep_set_stall API
++ * - Enable endpoint interrupts for out endpoint
++ *- If the interrupt is RCS,
++ * - If the protocol stall flag not set,
++ * - Clear stall useing ioh_udc_ep_clear_stall API
++ * - Otherwise
++ * - Set STALL using ioh_udc_ep_set_stall API.
++ * - Enable endpoint interrupts for out endpoint
++ *- If the interrupt is TDC,
++ * - Invoke the completion routine using ioh_udc_complete_receiver API
++ *- If the interrupt is IN, and is not RSS/TDC/TXEMPTY
++ * - Start the next transmit request using ioh_udc_start_next_txrequest API
++ *@see
++ * - ioh_udc_ep_set_stall
++ * - ioh_udc_enable_ep_interrupts
++ * - ioh_udc_ep_clear_stall
++ * - ioh_udc_complete_transfer
++ * - ioh_udc_start_next_txrequest
++ */
++static void ioh_udc_svc_data_in(struct ioh_udc_dev *dev, int ep_num)
++{
++ u32 epsts;
++ struct ioh_udc_ep *ep;
++ ep = &dev->ep[2*ep_num];
++ epsts = ep->epsts;
++ ep->epsts = 0;
++
++ IOH_DEBUG("ioh_udc_svc_data_in: ep%d%s status = 0x%08x", ep->num,
++ (ep->in ? "in" : "out"), epsts);
++
++ if ((epsts & ((1 << UDC_EPSTS_IN) | (1 << UDC_EPSTS_BNA) |
++ (1 << UDC_EPSTS_HE) |
++ (1 << UDC_EPSTS_TDC) | (1 << UDC_EPSTS_RCS) |
++ (1 << UDC_EPSTS_TXEMPTY) |
++ (1 << UDC_EPSTS_RSS) |
++ (1 << UDC_EPSTS_XFERDONE))) == 0) {
++ IOH_DEBUG("Non interrupt request ep%din status %x",
++ ep->num, epsts);
++ return;
++ }
++ if ((epsts & (1 << UDC_EPSTS_BNA))) { /* Just log it */
++ IOH_DEBUG("BNA on ep%din occured", ep->num);
++ return;
++ }
++ if (epsts & (1 << UDC_EPSTS_HE)) {
++ IOH_DEBUG("Host Error on ep%din occured", ep->num);
++ return;
++ }
++ if (epsts & (1 << UDC_EPSTS_RSS)) {
++ IOH_DEBUG("ioh_udc_svc_data_in: RSS");
++ ioh_udc_ep_set_stall(ep->regs);
++ ioh_udc_enable_ep_interrupts(ep->dev->regs,
++ 1 << (ep->in ? ep->num : ep->num + UDC_EPINT_OUT_EP0));
++ }
++ if (epsts & (1 << UDC_EPSTS_RCS)) {
++ IOH_DEBUG("ioh_udc_svc_data_in: RCS prot_stall = %d",
++ dev->prot_stall);
++ if (dev->prot_stall == 0) {
++ ioh_udc_ep_clear_stall(ep->regs);
++ } else {
++ ioh_udc_ep_set_stall(ep->regs);
++ ioh_udc_enable_ep_interrupts(ep->dev->regs,
++ 1 << (ep->in ? ep->num : ep->num +
++ UDC_EPINT_OUT_EP0));
++ }
++ }
++ if (epsts & (1 << UDC_EPSTS_TDC)) { /* DMA completed */
++ ioh_udc_complete_transfer(ep);
++ }
++ /* On IN interrupt, provide data if we have any */
++ if ((epsts & (1 << UDC_EPSTS_IN)) &&
++ ((epsts & (1 << UDC_EPSTS_RSS)) == 0) &&
++ ((epsts & (1 << UDC_EPSTS_TDC)) == 0) &&
++ ((epsts & (1 << UDC_EPSTS_TXEMPTY)) == 0)) {
++ ioh_udc_start_next_txrequest(ep);
++ }
++ IOH_DEBUG("ioh_udc_ep_in intr ep ctrl = 0x%x",
++ IOH_READ32((u32)&ep->regs->epctl));
++}
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static void ioh_udc_complete_transfer(struct ioh_udc_ep *ep)
++ *@brief This function completes a transfer
++ *@param ep Reference to the endpoint structure
++ *@remarks The following actions are performed:
++ * - Get the request from the queue
++ * - If the request is valid, do the following
++ *- Get the last dma descriptor. if the DMA transfer is completed,
++ * - update the request length as actual
++ *- If the actual length has been updated,
++ * - update the status field of the last request
++ * - complete the request using complete_req function
++ * - set dma in progress status as false
++ * - if there are pending requests, clear NAK using ioh_udc_ep_clear_nak
++ * and enable
++ * endpoint interrupts
++ * - if there are no pending requests, set DMA in progress status
++ * as false and disable
++ * endpoint interrupts
++ *@return none
++ *@see
++ * - complete_req
++ * - ioh_udc_read_ep_control
++ * - ioh_udc_ep_clear_nak
++ * - ioh_udc_enable_ep_interrupts
++ * - ioh_udc_disable_ep_interrupts
++ */
++static void ioh_udc_complete_transfer(struct ioh_udc_ep *ep)
++{
++ struct ioh_udc_request *req;
++
++ IOH_DEBUG("ioh_udc_complete_transfer");
++
++ if (!list_empty(&ep->queue)) {
++ IOH_DEBUG("list_entry");
++ req = list_entry(ep->queue.next, struct ioh_udc_request, queue);
++ if (req && ((req->td_data_last->status & IOH_UDC_BUFF_STS) ==
++ IOH_UDC_BS_DMA_DONE)) {
++#ifdef DMA_PPB_WITH_DESC_UPDATE
++ struct ioh_udc_data_dma_desc *td_data = req->td_data;
++ for (i = 0; i < req->chain_len; i++) {
++ if ((td_data->status & IOH_UDC_RXTX_STS) !=
++ IOH_UDC_RTS_SUCC) {
++ IOH_LOG(KERN_ERR, "Invalid RXTX status\
++ (0x%08x) epstatus=0x%08x\n",
++ (td_data->status &
++ IOH_UDC_RXTX_STS),
++ (int)(ep->epsts));
++ return;
++ }
++ td_data = (struct ioh_udc_data_dma_desc *)
++ phys_to_virt(td_data->next);
++ }
++#else
++ if ((req->td_data_last->status & IOH_UDC_RXTX_STS) !=
++ IOH_UDC_RTS_SUCC) {
++ IOH_LOG(KERN_ERR, "Invalid RXTX status (0x%08x)\
++ epstatus=0x%08x\n",
++ (req->td_data_last->status &
++ IOH_UDC_RXTX_STS),
++ (int)(ep->epsts));
++ return;
++ }
++#endif
++ req->req.actual = req->req.length;
++ req->td_data_last->status = IOH_UDC_BS_HST_BSY |
++ IOH_UDC_DMA_LAST;
++ req->td_data->status = IOH_UDC_BS_HST_BSY |
++ IOH_UDC_DMA_LAST;
++ /* complete req */
++ complete_req(ep, req, 0);
++ req->dma_going = 0;
++ if (!list_empty(&ep->queue)) {
++ while (ioh_udc_read_ep_control(ep->regs) &
++ (1 << UDC_EPCTL_S))
++ udelay(100);
++
++ ioh_udc_ep_clear_nak(ep->regs);
++ ioh_udc_enable_ep_interrupts(ep->dev->regs,
++ 1 << (ep->in ? ep->num : ep->num +
++ UDC_EPINT_OUT_EP0));
++ } else {
++ ioh_udc_disable_ep_interrupts(ep->dev->regs,
++ 1 << (ep->in ? ep->num : ep->num +
++ UDC_EPINT_OUT_EP0));
++ }
++ }
++ }
++}
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static void ioh_udc_complete_receiver(struct ioh_udc_ep *ep)
++ *@brief This function completes a receiver
++ *@param ep Reference to the endpoint structure
++ *@remarks The following actions are performed:
++ * - If request queue is not empty, get next request
++ * - If buffer status is DMA Done
++ * - Disable DMA using ioh_udc_clear_dma API
++ * - Get size of Rx bytes
++ * - Set the descriptor status
++ * - Complete the request using complete_req API
++ * - If there is a new/failed requests try that
++ * now using ioh_udc_start_rxrequest API
++ * - Otherwise
++ * - Set receive ready using ioh_udc_ep_set_rrdy
++ *@return none
++ *@see
++ * - ioh_udc_clear_dma
++ * - complete_req
++ * - ioh_udc_start_rxrequest
++ * - ioh_udc_ep_set_rrdy
++ */
++static void ioh_udc_complete_receiver(struct ioh_udc_ep *ep)
++{
++ struct ioh_udc_request *req;
++ unsigned int count;
++
++ IOH_DEBUG("ioh_udc_complete_receiver");
++
++ if (!list_empty(&ep->queue)) {
++ /* next request */
++ req = list_entry(ep->queue.next, struct ioh_udc_request, queue);
++ if (req && (req->td_data_last->status & IOH_UDC_BUFF_STS) ==
++ IOH_UDC_BS_DMA_DONE) {
++ IOH_DEBUG("ioh_udc_complete_receiver: ep%d%s DMA Done",
++ ep->num, (ep->in ? "in" : "out"));
++ /* Disable DMA */
++ ioh_udc_clear_dma(ep->dev->regs, DMA_DIR_RX);
++#ifdef DMA_PPB_WITH_DESC_UPDATE
++{
++ /* Get Rx bytes */
++ struct ioh_udc_data_dma_desc *td_data = req->td_data;
++ for (i = 0, count = 0; i < req->chain_len; i++) {
++ if ((td_data->status & IOH_UDC_RXTX_STS) !=
++ IOH_UDC_RTS_SUCC) {
++ IOH_LOG(KERN_ERR, "Invalid RXTX status\
++ (0x%08x) epstatus=0x%08x\n",
++ (td_data->status &
++ IOH_UDC_RXTX_STS),
++ (int)(ep->epsts));
++ return;
++ }
++ count += td_data->status & IOH_UDC_RXTX_BYTES;
++ td_data = (struct ioh_udc_data_dma_desc *)\
++ phys_to_virt(td_data->next);
++ }
++}
++#else
++ if ((req->td_data_last->status & IOH_UDC_RXTX_STS) !=
++ IOH_UDC_RTS_SUCC) {
++ IOH_LOG(KERN_ERR, "Invalid RXTX status (0x%08x)\
++ epstatus=0x%08x\n",
++ (req->td_data_last->status &
++ IOH_UDC_RXTX_STS),
++ (int)(ep->epsts));
++ return;
++ }
++ count = req->td_data_last->status & IOH_UDC_RXTX_BYTES;
++#endif
++ if ((count == 0) && (req->req.length ==
++ UDC_DMA_MAXPACKET)) {
++ /* on 64k packets the RXBYTES field is zero */
++ count = UDC_DMA_MAXPACKET;
++ }
++
++ /* Set the descriptor status */
++ req->td_data->status |= IOH_UDC_DMA_LAST;
++ req->td_data_last->status |= IOH_UDC_BS_HST_BSY;
++
++ req->dma_going = 0;
++ /* complete request */
++ req->req.actual = count;
++ complete_req(ep, req, 0);
++
++ /* If there is a new/failed requests try that now */
++ if (!list_empty(&ep->queue)) {
++ req = list_entry(ep->queue.next,
++ struct ioh_udc_request, queue);
++ ioh_udc_start_rxrequest(ep, req);
++ }
++ }
++#ifdef DMA_PPB_WITH_DESC_UPDATE
++ else {
++ IOH_DEBUG("ioh_udc_complete_receiver: ep%d%s \
++ DMA not Done", ep->num, (ep->in ? "in" : "out"));
++ ioh_udc_ep_set_rrdy(ep->regs);
++ }
++#endif
++ }
++}
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static void ioh_udc_start_next_txrequest(struct ioh_udc_ep *ep)
++ *@brief This function starts the next transmission requirement
++ *@param ep Reference to the endpoint structure
++ *@remarks The following actions are performed:
++ *- If poll demand bit is set, return
++ *- If request queue is not empty, get next request from the queue
++ *- If the DMA going flag of next request queue is 1, invoke the following
++ * actions.
++ * - Clear the descriptor pointer
++ * - Set the all descriptor status to "Host ready"
++ * - Write the descriptor pointer
++ * - Enable the Tx DMA
++ * - Set the poll demand bit
++ * - Enable the interrupts of endpoint
++ * - Clear the status of NAK
++ *@see
++ * - ioh_udc_read_ep_control
++ * - ioh_udc_ep_set_ddptr
++ * - ioh_udc_set_dma
++ * - ioh_udc_ep_set_pd
++ * - ioh_udc_enable_ep_interrupts
++ * - ioh_udc_ep_clear_nak
++ */
++static void ioh_udc_start_next_txrequest(struct ioh_udc_ep *ep)
++{
++ struct ioh_udc_request *req;
++ IOH_DEBUG("ioh_udc_start_next_txrequest: enter");
++ if (ioh_udc_read_ep_control(ep->regs) & (1 << UDC_EPCTL_P))
++ return;
++
++ if (!list_empty(&ep->queue)) {
++ /* next request */
++ req = list_entry(ep->queue.next, struct ioh_udc_request, queue);
++ if (req && !req->dma_going) {
++ IOH_DEBUG("Set request: req=%p req->td_data=%p", req,
++ req->td_data);
++ if (req->td_data) {
++ struct ioh_udc_data_dma_desc *td_data;
++
++ while (ioh_udc_read_ep_control(ep->regs) &
++ (1 << UDC_EPCTL_S))
++ udelay(100);
++
++ req->dma_going = 1;
++ /* Clear the descriptor pointer */
++ ioh_udc_ep_set_ddptr(ep->regs, 0);
++
++ td_data = req->td_data;
++ while (1) {
++ td_data->status = (td_data->status &
++ ~IOH_UDC_BUFF_STS) |
++ IOH_UDC_BS_HST_RDY;
++ if ((td_data->status &
++ IOH_UDC_DMA_LAST) ==
++ IOH_UDC_DMA_LAST)
++ break;
++
++ td_data =
++ (struct ioh_udc_data_dma_desc *)\
++ phys_to_virt(td_data->next);
++ }
++ /* Write the descriptor pointer */
++ ioh_udc_ep_set_ddptr(ep->regs,
++ req->td_data_phys);
++ /* Enable the Tx DMA */
++ ioh_udc_set_dma(ep->dev->regs, DMA_DIR_TX);
++ /* Set the poll demand bit */
++ ioh_udc_ep_set_pd(ep->regs);
++ /* Enable the interrupts of endpoint */
++ ioh_udc_enable_ep_interrupts(ep->dev->regs,
++ 1 << (ep->in ? ep->num :\
++ ep->num + UDC_EPINT_OUT_EP0));
++ /* Clear NAK */
++ ioh_udc_ep_clear_nak(ep->regs);
++ }
++ }
++ }
++}
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static void ioh_udc_start_rxrequest(struct ioh_udc_ep *ep,
++ * struct ioh_udc_request *req)
++ *@brief This function starts the receive requirement.
++ *@param ep Reference to the endpoint structure
++ *@param req Reference to the request structure
++ *@remarks The following actions are performed:
++ * - Disable the Rx DMA
++ * - Set the status bits for all descriptors
++ * - Write the descriptor pointer
++ * - Enable the interrupts of endpoint
++ * - Enable the Rx DMA
++ * - Clear NAK
++ * - Set the receive ready
++ *@see
++ * - ioh_udc_clear_dma
++ * - ioh_udc_ep_set_ddptr
++ * - ioh_udc_enable_ep_interrupts
++ * - ioh_udc_set_dma
++ * - ioh_udc_ep_clear_nak
++ * - ioh_udc_ep_set_rrdy
++ */
++static void ioh_udc_start_rxrequest(struct ioh_udc_ep *ep,
++ struct ioh_udc_request *req)
++{
++ struct ioh_udc_data_dma_desc *td_data;
++
++ IOH_DEBUG("ioh_udc_start_request: enter");
++ /* Disable Rx DMA */
++ ioh_udc_clear_dma(ep->dev->regs, DMA_DIR_RX);
++ td_data = req->td_data;
++ ep->td_data = req->td_data;
++ /* Set the status bits for all descriptors */
++ while (1) {
++ td_data->status = (td_data->status & ~IOH_UDC_BUFF_STS) |
++ IOH_UDC_BS_HST_RDY;
++ if ((td_data->status & IOH_UDC_DMA_LAST) == IOH_UDC_DMA_LAST)
++ break;
++
++ td_data = (struct ioh_udc_data_dma_desc *) \
++ phys_to_virt(td_data->next);
++ }
++ /* Write the descriptor pointer */
++ ioh_udc_ep_set_ddptr(ep->regs, req->td_data_phys);
++ req->dma_going = 1;
++ /* Enable endpoint interrupts */
++ ioh_udc_enable_ep_interrupts(ep->dev->regs, 1 <<
++ (ep->num + UDC_EPINT_OUT_EP0));
++ /* Enable Rx DMA */
++ ioh_udc_set_dma(ep->dev->regs, DMA_DIR_RX);
++ /* Clear NAK */
++ ioh_udc_ep_clear_nak(ep->regs);
++ /* Set receive ready */
++ ioh_udc_ep_set_rrdy(ep->regs);
++}
++
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn static void ioh_udc_postsvc_epinters(struct ioh_udc_dev *dev,
++ * int ep_num)
++ *@brief This function enables end point interrupts and clears NAK status
++ *@param dev Reference to the device structure
++ *@param ep_num End point number
++ *@return none
++ *@remarks The following actions are performed:
++ * - If the end point request queue is not empty,
++ * - Enable the endpoint interrupts using
++ * ioh_udc_enable_ep_interrupts
++ * - Clear Endpoint NAK status using ioh_udc_ep_clear_nak
++ *@see
++ * - ioh_udc_enable_ep_interrupts
++ * - ioh_udc_ep_clear_nak
++ */
++static void ioh_udc_postsvc_epinters(struct ioh_udc_dev *dev, int ep_num)
++{
++ struct ioh_udc_ep *ep;
++ struct ioh_udc_request *req;
++ ep = &dev->ep[2*ep_num];
++
++ if (!list_empty(&ep->queue)) {
++ req = list_entry(ep->queue.next, struct ioh_udc_request, queue);
++ ioh_udc_enable_ep_interrupts(ep->dev->regs,
++ 1 << (ep->in ? ep->num : ep->num + UDC_EPINT_OUT_EP0));
++ ioh_udc_ep_clear_nak(ep->regs);
++ }
++}
++
++/* Interrupt Service Routine */
++/*!@ingroup UDC_InterfaceLayerAPI
++ *@fn irqreturn_t ioh_udc_isr(int irq, void *pdev)
++ *@brief This function handles interrupts from the IOH USB Device
++ *@param irq Interrupt request number
++ *@param dev Reference to the device structure
++ *@return none
++ *@remarks The following actions are performed:
++ *- Read the device and endpoint interrupt registers
++ *- Clear the device and endpoint interrupts using
++ * ioh_udc_write_device_interrupts
++ * and ioh_udc_write_ep_interrupts APIs respectively
++ *- If there are no interrupts, return IRQ_NONE
++ *- If there are any device interrupt(s), invoke ioh_udc_dev_isr
++ * to process those
++ *- Invoke ioh_udc_read_all_epstatus to read endpoint interrupts status
++ *- If there is interrupt asserted on EP0 IN,
++ * invoke ioh_udc_svc_control_in and ioh_udc_postsvc_epinters.
++ *- If there is interrupt asserted on EP0 OUT, invoke ioh_udc_svc_control_out
++ *- For all other IN end points for which the interrupt bit is set,
++ * invoke ioh_udc_svc_data_in and ioh_udc_postsvc_epinters.
++ *- For all other OUT end points for which the interrupt bit is set,
++ * invoke ioh_udc_svc_data_out
++ *- Return IRQ_HANDLED
++ *@see
++ * - ioh_udc_read_device_interrupts
++ * - ioh_udc_read_ep_interrupts
++ * - ioh_udc_write_device_interrupts
++ * - ioh_udc_write_ep_interrupts
++ * - ioh_udc_dev_isr
++ * - ioh_udc_svc_control_out
++ * - ioh_udc_svc_control_in
++ * - ioh_udc_svc_data_out
++ * - ioh_udc_svc_data_in
++ * - ioh_udc_read_all_epstatus
++ * - ioh_udc_postsvc_epinters
++ */
++irqreturn_t ioh_udc_isr(int irq, void *pdev)
++{
++ struct ioh_udc_dev *dev;
++ u32 dev_intr, ep_intr;
++ int i;
++
++ IOH_DEBUG("ioh_udc_isr: enter");
++ dev = (struct ioh_udc_dev *) pdev;
++ dev_intr = ioh_udc_read_device_interrupts(dev->regs);
++ ep_intr = ioh_udc_read_ep_interrupts(dev->regs);
++
++ if (dev_intr != 0) {
++ /* Clear device interrupts */
++ ioh_udc_write_device_interrupts(dev->regs, dev_intr);
++ }
++ if (ep_intr != 0) {
++ /* Clear ep interrupts */
++ ioh_udc_write_ep_interrupts(dev->regs, ep_intr);
++ }
++ if ((dev_intr == 0) && (ep_intr == 0)) {
++ IOH_DEBUG("ioh_udc_isr: exit IRQ_NONE");
++ return IRQ_NONE;
++ }
++ spin_lock(&dev->lock);
++
++ if (dev_intr != 0) {
++ IOH_DEBUG("ioh_udc_isr: device intr 0x%x", dev_intr);
++ ioh_udc_dev_isr(dev, dev_intr);
++ }
++
++ if (ep_intr != 0) {
++ IOH_DEBUG("ioh_udc_isr: ep intr 0x%x", ep_intr);
++ ioh_udc_read_all_epstatus(dev, ep_intr);
++
++ /* Process Control In interrupts, if present */
++ if (ep_intr & (1 << UDC_EPINT_IN_EP0)) {
++ ioh_udc_svc_control_in(dev);
++ ioh_udc_postsvc_epinters(dev, 0);
++ }
++ /* Process Control Out interrupts, if present */
++ if (ep_intr & (1 << UDC_EPINT_OUT_EP0))
++ ioh_udc_svc_control_out(dev);
++
++ /* Process data in end point interrupts */
++ for (i = 1; i < IOH_UDC_USED_EP_NUM; i++) {
++ if (ep_intr & (1 << i)) {
++ ioh_udc_svc_data_in(dev, i);
++ ioh_udc_postsvc_epinters(dev, i);
++ }
++ }
++ /* Process data out end point interrupts */
++ for (i = UDC_EPINT_OUT_EP1; i < (UDC_EPINT_OUT_EP0 +
++ IOH_UDC_USED_EP_NUM); i++) {
++ if (ep_intr & (1 << i))
++ ioh_udc_svc_data_out(dev, i -
++ UDC_EPINT_OUT_EP0);
++ }
++ }
++ spin_unlock(&dev->lock);
++ IOH_DEBUG("ioh_udc_isr: exit IRQ_HANDLED");
++
++ return IRQ_HANDLED;
++}
++
++/*!@ingroup UDC_UtilitiesAPI
++ *@fn void ioh_udc_activate_control_ep(struct ioh_udc_dev *dev)
++ *@brief This function enables the control endpoints for traffic after a reset
++ *@param dev Reference to the device structure
++ *@return none
++ *@remarks The following actions are performed:
++ *- Setup IN endpoint,
++ * - Flush the TX FIFO using ioh_udc_ep_fifo_flush API
++ * - Set the buffer size for TXFIFO entries of EP0_IN using
++ * ioh_udc_ep_set_bufsz API
++ * - Set max packet size of EP0 IN using ioh_udc_ep_set_maxpkt API
++ * - Initialize the IN EP Descriptor
++ *- Setup OUT endpoint,
++ * - Flush the RX FIFO using ioh_udc_ep_fifo_flush API
++ * - Set max packet size of EP0 OUT using ioh_udc_ep_set_maxpkt.
++ * - Set max packet size of EP0 OUT UDC CSR
++ * - Initialize the SETUP buffer
++ * - Write the DMA Descriptor address
++ * - Write the Setup Descriptor address
++ * - Initialize the DMA descriptor
++ * - Clear NAK
++ *@see
++ * - ioh_udc_clear_ep_control
++ * - ioh_udc_ep_fifo_flush
++ * - ioh_udc_ep_set_bufsz
++ * - ioh_udc_ep_set_maxpkt
++ * - ioh_udc_write_csr
++ * - ioh_udc_init_setup_buff
++ * - ioh_udc_ep_set_subptr
++ * - ioh_udc_ep_set_ddptr
++ * - ioh_udc_ep_clear_nak
++ */
++void ioh_udc_activate_control_ep(struct ioh_udc_dev *dev)
++{
++ struct ioh_udc_ep *ep;
++ u32 val;
++ /* Setup IN endpoint ----------------------------- */
++ ep = &dev->ep[UDC_EP0IN_IDX];
++
++ /* Flush TX fifo */
++ ioh_udc_clear_ep_control(ep->regs);
++ ioh_udc_ep_fifo_flush(ep->regs, ep->in);
++
++ /* Set buffer size (tx fifo entries) of EP0_IN */
++ ioh_udc_ep_set_bufsz(ep->regs, UDC_EP0IN_BUFF_SIZE, ep->in);
++
++ /* Set max packet size of EP0_IN */
++ ioh_udc_ep_set_maxpkt(ep->regs, UDC_EP0IN_MAX_PKT_SIZE);
++
++ /* Initialize the IN EP Descriptor */
++ ep->td_data = NULL;
++ ep->td_stp = NULL;
++ ep->td_data_phys = 0;
++ ep->td_stp_phys = 0;
++
++ /* Setup OUT endpoint ----------------------------- */
++ ep = &dev->ep[UDC_EP0OUT_IDX];
++
++ /* Flush RX fifo */
++ ioh_udc_clear_ep_control(ep->regs);
++ ioh_udc_ep_fifo_flush(ep->regs, ep->in);
++
++ /* Set buffer size (rx fifo entries) of EP0_OUT */
++ ioh_udc_ep_set_bufsz(ep->regs, UDC_EP0OUT_BUFF_SIZE, ep->in);
++
++ /* Set max packet size of EP0_OUT */
++ ioh_udc_ep_set_maxpkt(ep->regs, UDC_EP0OUT_MAX_PKT_SIZE);
++
++ /* Set max packet size of EP0 OUT UDC CSR */
++ val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_OFS;
++ ioh_udc_write_csr(val, (u32) (&dev->csr->ne[UDC_EP0OUT_IDX]));
++
++ /* Initialize the SETUP buffer */
++ ioh_udc_init_setup_buff(ep->td_stp);
++
++ /* Write dma desc address */
++ ioh_udc_ep_set_subptr(ep->regs, ep->td_stp_phys);
++
++ /* Write Setup desc address */
++ ioh_udc_ep_set_ddptr(ep->regs, ep->td_data_phys);
++
++ /* Initialize dma descriptor */
++ ep->td_data->status = IOH_UDC_DMA_LAST;
++ ep->td_data->dataptr = dma_addr;
++ ep->td_data->next = ep->td_data_phys;
++
++ /* Clear NAK */
++ ioh_udc_ep_clear_nak(ep->regs);
++}
++
++/*!@ingroup UDC_UtilitiesAPI
++ *@fn ioh_udc_read_all_epstatus(struct ioh_udc_dev *dev, u32 ep_intr)
++ *@brief This function read all endpoint status
++ *@param dev Reference to the device structure
++ *@param ep_intr Status of endpoint interrupt
++ *@return none
++ *@remarks The following actions are performed:
++ * - For each of the used endpoints,
++ * - Read the endpoint status using
++ * ioh_udc_read_ep_status
++ * - Invoke ioh_udc_clear_ep_status to clear
++ * the status
++ *@see
++ * - ioh_udc_read_ep_status
++ * - ioh_udc_clear_ep_status
++ */
++static void ioh_udc_read_all_epstatus(struct ioh_udc_dev *dev, u32 ep_intr)
++{
++ int i;
++ struct ioh_udc_ep *ep;
++
++ for (i = 0; i < IOH_UDC_USED_EP_NUM; i++) {
++ /* IN */
++ if (ep_intr & (0x1 << i)) {
++ ep = &dev->ep[2*i];
++ ep->epsts = ioh_udc_read_ep_status(ep->regs);
++ ioh_udc_clear_ep_status(ep->regs, ep->epsts);
++ }
++ /* OUT */
++ if (ep_intr & (0x10000 << i)) {
++ ep = &dev->ep[2*i+1];
++ ep->epsts = ioh_udc_read_ep_status(ep->regs);
++ ioh_udc_clear_ep_status(ep->regs, ep->epsts);
++ }
++ }
++ return;
++}
++
++/*!@ingroup UDC_UtilitiesAPI
++ *@fn static void ioh_udc_setup_ep0(struct ioh_udc_dev *dev)
++ *@brief This function enables control endpoint for traffic
++ *@remarks The following actions are performed:
++ * - Enable ep0 interrupts using
++ * ioh_udc_enable_ep_interrupts API.
++ * - Enable device interrupts (Set configuration command
++ * interrupt,
++ * Set interface command interrupt, ENUM interrupt,
++ * USB Reset interrupt)
++ * using ioh_udc_enable_interrupts API
++ *@param dev Reference to the device structure
++ *@return none
++ *@see
++ * - ioh_udc_enable_ep_interrupts
++ * - ioh_udc_enable_interrupts
++ */
++static void ioh_udc_setup_ep0(struct ioh_udc_dev *dev)
++{
++ /* enable ep0 interrupts */
++ ioh_udc_enable_ep_interrupts(dev->regs, 1 << UDC_EPINT_IN_EP0 |
++ 1 << UDC_EPINT_OUT_EP0);
++
++ /* enable device interrupts */
++ ioh_udc_enable_interrupts(dev->regs, (1 << UDC_DEVINT_UR) |
++ (1 << UDC_DEVINT_US) | (1 << UDC_DEVINT_ES) |
++ (1 << UDC_DEVINT_ENUM) | (1 << UDC_DEVINT_SI) |
++ (1 << UDC_DEVINT_SC));
++ IOH_DEBUG("Dev intr mask set to %x",
++ IOH_READ32((u32 *)&(dev->regs->devirqmsk)));
++ IOH_DEBUG("Ep intr mask set to %x",
++ IOH_READ32((u32 *)&(dev->regs->epirqmsk)));
++}
++
++/*!@ingroup UDC_UtilitiesAPI
++ *@fn ioh_udc_init_setup_buff(struct ioh_udc_stp_dma_desc *td_stp)
++ *@brief This function initializes the SETUP buffer
++ *@param td_stp Reference to the SETP buffer structure
++ *@return none
++ */
++static void ioh_udc_init_setup_buff(struct ioh_udc_stp_dma_desc *td_stp)
++{
++ static u32 pky_marker;
++ IOH_DEBUG("ioh_udc_init_setup_buff");
++
++ if (td_stp == NULL) {
++ IOH_DEBUG("SETUP BUFF == NULL");
++ return;
++ }
++ td_stp->reserved = ++pky_marker;
++ td_stp->data12 = 0xFFFFFFFF;
++ td_stp->data34 = 0xFFFFFFFF;
++ td_stp->status = IOH_UDC_BS_HST_RDY;
++}
++
+--- /dev/null
++++ b/drivers/usb/gadget/pch_udc.h
+@@ -0,0 +1,172 @@
++ /*!
++ * @file ioh_udc.h
++ * @brief
++ * The IOH UDC is a USB High speed DMA capable USB device controller.
++ * It provides 4 IN and 4 OUT endpoints (control, bulk isochronous or
++ * interrupt type).
++ *
++ * The IOH USB device controller driver provides required interface
++ * to the USB gadget framework for accessing the IOH USB device hardware.
++ *
++ * @version 0.96
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++ /*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 2/26/2010
++ * modified:
++ *
++ */
++
++#ifndef _IOH_UDC_H_
++#include <linux/types.h>
++#include <linux/irq.h>
++
++extern const struct usb_gadget_ops ioh_udc_ops;
++extern struct ioh_udc_dev *ioh_udc;
++
++extern irqreturn_t ioh_udc_isr(int irq, void *pdev);
++extern int ioh_udc_pcd_init(struct ioh_udc_dev *dev);
++extern void ioh_udc_dev_isr(struct ioh_udc_dev *dev, u32 dev_intr);
++
++/*!@ingroup UDC_Global
++ * @struct ioh_udc_request
++ * @brief Structure holding a IOH USB device request
++ */
++struct ioh_udc_request /* request packet */
++{
++ struct usb_request req; /**< embedded ep request */
++ dma_addr_t td_data_phys; /**< phys. address */
++ struct ioh_udc_data_dma_desc *td_data;/*< first dma desc. of chain */
++ struct ioh_udc_data_dma_desc *td_data_last;/**< last dma desc.
++ of chain */
++ struct list_head queue; /**< associated queue */
++
++ /* flags */
++ unsigned dma_going:1,/**< DMA in progress for request */
++ dma_mapped:1,/**< DMA memory mapped
++ for request */
++ dma_done:1; /**< DMA completed
++ for request */
++ unsigned chain_len; /**< chain length */
++};
++
++/*!@ingroup UDC_Global
++ * @struct ioh_udc_ep
++ * @brief Structure holding a IOH USB device Endpoint information
++ */
++struct ioh_udc_ep {
++ struct usb_ep ep; /**< embedded ep request */
++ dma_addr_t td_stp_phys;/**< for setup request */
++ dma_addr_t td_data_phys;/**< for data request */
++ struct ioh_udc_stp_dma_desc *td_stp; /**< for setup request */
++ struct ioh_udc_data_dma_desc *td_data; /**< for data request */
++
++ struct ioh_udc_dev *dev; /**< reference to device struct */
++ struct ioh_udc_ep_regs __iomem *regs;
++ u32 __iomem *dma; /**< dma enabled or not */
++ const struct usb_endpoint_descriptor *desc; /**< for this ep */
++
++ /* queue for requests */
++ struct list_head queue;
++
++ unsigned num:5; /**< endpoint number */
++ unsigned in:1; /**< endpoint is IN */
++ unsigned halted; /**< endpoint halted? */
++ unsigned long epsts; /**< Endpoint status */
++};
++
++
++/*!@ingroup UDC_Global
++ * @struct ioh_udc_dev
++ * @brief Structure holding complete information of the
++ IOH USB device
++ */
++struct ioh_udc_dev {
++ struct usb_gadget gadget; /**< gadget driver data */
++ struct usb_gadget_driver *driver;/**< reference to gadget
++ driver bound */
++ struct pci_dev *pdev; /**< reference to the
++ PCI device */
++
++ /* all endpoints */
++ struct ioh_udc_ep ep[IOH_UDC_EP_NUM]; /**< array of
++ endpoints */
++ spinlock_t lock; /* protects all state */
++ /* operational flags */
++ unsigned active:1, /**< enabled the PCI device */
++ stall:1, /**< stall requested */
++ prot_stall:1, /**< protcol stall requested */
++ irq_registered:1, /**< irq registered with system */
++ mem_region:1, /**< device memory mapped */
++ registered:1, /**< driver regsitered with system */
++ suspended:1, /**< driver in suspended state */
++ connected:1, /**< gadget driver associated */
++ set_cfg_not_acked:1,/**< pending acknowledgement
++ 4 setup */
++ waiting_zlp_ack:1;/**< pending acknowledgement 4 ZLP */
++
++ /* registers */
++ struct ioh_udc_csrs __iomem *csr; /**< address of config & status
++ registers */
++ struct ioh_udc_regs __iomem *regs; /**< address of device
++ registers */
++ struct ioh_udc_ep_regs __iomem *ep_regs; /**< address of endpoint
++ registers */
++
++ /* DMA desc pools */
++ struct pci_pool *data_requests; /**< DMA pool for
++ data requests */
++ struct pci_pool *stp_requests; /**< DMA pool for
++ setup requests */
++
++ /* device data */
++ unsigned long phys_addr; /**< of device memory */
++ void __iomem *virt_addr;/**< for mapped device
++ memory */
++ unsigned irq; /**< IRQ line for the device */
++
++ struct ioh_udc_cfg_data cfg_data;/**< current cfg, intf,
++ and alt in use */
++};
++
++/*!@ingroup UDC_Global
++ * @struct ioh_udc_ep
++ * @brief Structure holding setup request data
++ */
++union ioh_udc_setup_data {
++ u32 data[2]; /**< 8 bytes of setup data */
++ struct usb_ctrlrequest request; /**< setup request for gadget driver */
++};
++
++/*!@ingroup UDC_UtilitiesAPI
++ * @fn ioh_udc_activate_control_ep(struct ioh_udc_dev *dev)
++ * @brief Performs necessary operations to enable endpoint 0
++ */
++extern void ioh_udc_activate_control_ep(struct ioh_udc_dev *dev);
++
++/*!@ingroup UDC_UtilitiesAPI
++ * @fn empty_req_queue(struct ioh_udc_ep *ep)
++ * @brief Removes all requests queued on the endpoint
++ */
++extern void empty_req_queue(struct ioh_udc_ep *ep);
++
++#endif /* _IOH_UDC_H_ */
+--- /dev/null
++++ b/drivers/usb/gadget/pch_udc_hal.c
+@@ -0,0 +1,1110 @@
++ /*!
++ * @file ioh_udc_hal.c
++ * @brief
++ * The IOH UDC is a USB High speed DMA capable USB device controller.
++ * It provides 4 IN and 4 OUT endpoints (control, bulk isochronous or
++ * interrupt type).
++ *
++ * The IOH USB device controller driver provides required interface
++ * to the USB gadget framework for accessing the IOH USB device hardware.
++ *
++ * @version 0.96
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++ /*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 2/26/2010
++ * modified:
++ *
++ */
++
++#include <linux/types.h>
++#include <linux/delay.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++/*#include <asm/io.h>*/
++#include <linux/io.h>
++#include "pch_common.h"
++#include "pch_debug.h"
++
++#include <linux/usb/ch9.h>
++#include "pch_udc_hal.h"
++
++#define INLINE inline
++static u32 ioh_udc_base;
++
++#define MAX_LOOP 200
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn inline void ioh_udc_write_csr(unsigned long val, unsigned long
++ * addr)
++ * @remarks The following actions are performed:
++ * - Wait for any ongoing access to config registers to
++ * complete, by polling the CSR busy flag.
++ * - Write the value to the CSR register.
++ * - Wait till the write gets completed by polling the
++ * CSR busy flag.
++ * @param val [IN] value to be written to CSR register
++ * @param addr [IN] address of CSR register
++ * @retval none
++ */
++INLINE void ioh_udc_write_csr(unsigned long val, unsigned long addr)
++{
++ int count = MAX_LOOP;
++
++ /* Wait till idle */
++ while ((count > 0) &&\
++ (IOH_READ32((unsigned long *)(IOH_UDC_CSR_BUSY_ADDR + ioh_udc_base)) &
++ IOH_UDC_CSR_BUSY))
++ count--;
++
++ if (count < 0)
++ IOH_DEBUG("ioh_udc_write_csr: wait error; count = %x", count);
++
++ iowrite32(val, (unsigned long *)addr);
++ /* Wait till idle */
++ count = MAX_LOOP;
++ while ((count > 0) &&
++ (IOH_READ32((unsigned long *)(IOH_UDC_CSR_BUSY_ADDR + ioh_udc_base)) &
++ IOH_UDC_CSR_BUSY))
++ count--;
++
++ if (count < 0)
++ IOH_DEBUG("ioh_udc_write_csr: wait error; count = %x", count);
++
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_read_csr(unsigned long addr)
++ * @remarks The main tasks performed by this method are:
++ * - This function waits for any access to internal
++ * USB_DEVICE registers to
++ * complete and then reads the config register
++ * @param addr [IN] address of CSR register
++ * @retval u32 - content of CSR register
++ */
++INLINE u32 ioh_udc_read_csr(unsigned long addr)
++{
++ int count = MAX_LOOP;
++
++ /* Wait till idle */
++ while ((count > 0) &&
++ (IOH_READ32((unsigned long *)(IOH_UDC_CSR_BUSY_ADDR + ioh_udc_base)) &
++ IOH_UDC_CSR_BUSY))
++ count--;
++
++ if (count < 0)
++ IOH_DEBUG("ioh_udc_read_csr: wait error; count = %x", count);
++ /* Dummy read */
++ IOH_READ32((unsigned long *)addr);
++ count = MAX_LOOP;
++ /* Wait till idle */
++ while ((count > 0) &&
++ (IOH_READ32((unsigned long *)(IOH_UDC_CSR_BUSY_ADDR + ioh_udc_base)) &
++ IOH_UDC_CSR_BUSY))
++ count--;
++ /* actual read */
++ if (count < 0)
++ IOH_DEBUG("ioh_udc_read_csr: wait error; count = %x", count);
++
++ return IOH_READ32((unsigned long *)addr);
++}
++
++/* Prints UDC device registers and endpoint irq registers */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_print_regs(u32 base)
++ * @remarks The main tasks performed by this method are:
++ * - Read UDC device registers and endpoint registers and print them
++ * @param base [IN] the remapped base address
++ */
++void ioh_udc_print_regs(u32 base)
++{
++ struct ioh_udc_data_dma_desc *data_desc;
++ struct ioh_udc_stp_dma_desc *stp_desc;
++ u32 *regs = (u32 *) (base + UDC_DEVCFG_ADDR);
++ u32 ddp, stp;
++ int i;
++
++ IOH_DEBUG("print_regs: base %x", base);
++ IOH_DEBUG("------- Device registers ---------");
++ IOH_DEBUG("dev config = %08x",
++ IOH_READ32((u32)&((struct ioh_udc_regs *)
++ regs)->devcfg));
++ IOH_DEBUG("dev control = %08x",
++ IOH_READ32((u32)&((struct ioh_udc_regs *)
++ regs)->devctl));
++ IOH_DEBUG("dev status = %08x",
++ IOH_READ32((u32)&((struct ioh_udc_regs *)
++ regs)->devsts));
++ IOH_DEBUG("dev int's = %08x",
++ IOH_READ32((u32)&((struct ioh_udc_regs *)
++ regs)->devirqsts));
++ IOH_DEBUG("dev intmask = %08x",
++ IOH_READ32((u32)&((struct ioh_udc_regs *)
++ regs)->devirqmsk));
++ IOH_DEBUG("dev ep int's = %08x",
++ IOH_READ32((u32)&((struct ioh_udc_regs *)
++ regs)->epirqsts));
++ IOH_DEBUG("dev ep intmask = %08x",
++ IOH_READ32((u32)&((struct ioh_udc_regs *)
++ regs)->epirqmsk));
++
++ regs = (u32 *) (base + UDC_EPIN_REGS_ADDR);
++ IOH_DEBUG("------- Endpoint registers -------");
++ for (i = 0; i < IOH_UDC_USED_EP_NUM; i++) {
++ IOH_DEBUG("Endpoint IN %1d ----------", i);
++ IOH_DEBUG("Ep control = %08x",
++ IOH_READ32((u32)&((struct ioh_udc_ep_regs *)
++ regs)->epctl));
++ IOH_DEBUG("Ep status = %08x",
++ IOH_READ32((u32)&((struct ioh_udc_ep_regs *)
++ regs)->epsts));
++ IOH_DEBUG("Buffer size = %08x",
++ IOH_READ32((u32)&((struct ioh_udc_ep_regs *)
++ regs)->bufin_framenum));
++ IOH_DEBUG("Max packet = %08x",
++ IOH_READ32((u32)&((struct ioh_udc_ep_regs *)
++ regs)->bufout_maxpkt));
++ ddp = IOH_READ32((u32)&((struct ioh_udc_ep_regs *)
++ regs)->desptr);
++ IOH_DEBUG("Data Desc Ptr = %08x", ddp);
++
++ if ((ddp != 0) && (ddp != 0xFFFFFFFF)) {
++ data_desc = phys_to_virt(ddp);
++ IOH_DEBUG(" |- status = %08x", data_desc->status);
++ IOH_DEBUG(" |- reserved = %08x", data_desc->reserved);
++ IOH_DEBUG(" |- dataptr = %08x", data_desc->dataptr);
++ IOH_DEBUG(" +- next = %08x", data_desc->next);
++ }
++ regs += UDC_EP_REG_OFS / sizeof regs;
++ }
++ regs = (u32 *) (base + UDC_EPOUT_REGS_ADDR);
++ for (i = 0; i < IOH_UDC_USED_EP_NUM; i++) {
++ IOH_DEBUG("Endpoint OUT %1d ---------", i);
++ IOH_DEBUG("Ep control = %08x",
++ IOH_READ32((u32)&((struct ioh_udc_ep_regs *)
++ regs)->epctl));
++ IOH_DEBUG("Ep status = %08x",
++ IOH_READ32((u32)&((struct ioh_udc_ep_regs *)
++ regs)->epsts));
++ IOH_DEBUG("Frame Number = %08x",
++ IOH_READ32((u32)&((struct ioh_udc_ep_regs *)
++ regs)->bufin_framenum));
++ IOH_DEBUG("Buf / Max pkt = %08x",
++ IOH_READ32((u32)&((struct ioh_udc_ep_regs *)
++ regs)->bufout_maxpkt));
++ stp = IOH_READ32((u32)&((struct ioh_udc_ep_regs *)
++ regs)->subptr);
++ IOH_DEBUG("Setup buf ptr = %08x", stp);
++
++ if ((stp != 0) && (stp != 0xFFFFFFFF)) {
++ stp_desc = phys_to_virt(stp);
++ IOH_DEBUG(" |- status = %08x", stp_desc->status);
++ IOH_DEBUG(" |- reserved = %08x", stp_desc->reserved);
++ IOH_DEBUG(" |- data12 = %08x", stp_desc->data12);
++ IOH_DEBUG(" +- data34 = %08x", stp_desc->data34);
++ }
++
++ ddp = IOH_READ32((u32)&((struct ioh_udc_ep_regs *)
++ regs)->desptr);
++ IOH_DEBUG("Data Desc Ptr = %08x", ddp);
++ if ((ddp != 0) && (ddp != 0xFFFFFFFF)) {
++ data_desc = phys_to_virt(ddp);
++ IOH_DEBUG(" |- status = %08x", data_desc->status);
++ IOH_DEBUG(" |- reserved = %08x", data_desc->reserved);
++ IOH_DEBUG(" |- dataptr = %08x", data_desc->dataptr);
++ IOH_DEBUG(" +- next = %08x", data_desc->next);
++ }
++ regs += UDC_EP_REG_OFS / sizeof regs;
++ }
++ regs = (u32 *) (base + UDC_CSR_ADDR);
++ IOH_DEBUG("------- Endpoint Configuration ---");
++ for (i = 0; i < IOH_UDC_USED_EP_NUM * 2; i++) {
++ IOH_DEBUG("EP config%d = %08x", i,
++ ioh_udc_read_csr((u32)&((struct ioh_udc_csrs *)
++ regs)->ne[i]));
++ }
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_init(struct ioh_udc_regs *dev)
++ * @remarks The main tasks performed by this method are:
++ * - If argument passed is NULL, return.
++ * - Set the base address for USB device registers
++ * - Invoke soft reset and PHY reset
++ * - Disable and clear the Device and endpoint interrupts
++ * - Enable dynamic CSR programming, set self powered and configure device
++ * speed in the Device Config register
++ * - Enable TX DMA, BURST and THRESHOLD modes
++ * and set BURST and THRESHOLD length.
++ * - Put the device in soft disconnect mode
++ * @param dev [IN] reference to ioh_udc_regs structure
++ * @retval none
++ */
++void ioh_udc_init(struct ioh_udc_regs *dev)
++{
++ u32 reset_reg;
++
++ IOH_DEBUG("ioh_udc_init: enter");
++ if (NULL == dev) {
++ IOH_LOG(KERN_ERR, "ioh_udc_init: Invalid address");
++ return;
++ }
++ /* Set the UDC_Global variable */
++ ioh_udc_base = (u32) dev - UDC_DEVCFG_ADDR;
++ /* Soft Reset and Reset PHY */
++ reset_reg = (ioh_udc_base + IOH_UDC_SRST_ADDR);
++ IOH_WRITE32((1 << IOH_UDC_SRST), (u32 *)(reset_reg));
++ IOH_WRITE32((1 << IOH_UDC_SRST) | (1 << IOH_UDC_PSRST),
++ (u32 *)(reset_reg));
++ mdelay(1);
++ IOH_WRITE32((1 << IOH_UDC_SRST), (u32 *)(reset_reg));
++ IOH_WRITE32(0x00, (u32 *)(reset_reg));
++ mdelay(1);
++
++ /* mask and clear all device interrupts */
++ IOH_SET_ADDR_BIT((u32)&dev->devirqmsk, UDC_DEVINT_MSK);
++ IOH_SET_ADDR_BIT((u32)&dev->devirqsts, UDC_DEVINT_MSK);
++
++ /* mask and clear all ep interrupts */
++ IOH_SET_ADDR_BIT((u32)&dev->epirqmsk, UDC_EPINT_MSK_DISABLE_ALL);
++ IOH_SET_ADDR_BIT((u32)&dev->epirqsts, UDC_EPINT_MSK_DISABLE_ALL);
++
++ /* enable dynamic CSR programmingi, self powered and device speed */
++ if (speed_fs) {
++ IOH_SET_ADDR_BIT((u32)&dev->devcfg, (1 << UDC_DEVCFG_CSR_PRG) |
++ (1 << UDC_DEVCFG_SP) | /* set self powered */
++ UDC_DEVCFG_SPD_FS); /* program speed - full speed */
++ } else { /* defaul high speed */
++ IOH_SET_ADDR_BIT((u32)&dev->devcfg, (1 << UDC_DEVCFG_CSR_PRG) |
++ (1 << UDC_DEVCFG_SP) | /* set self powered */
++ UDC_DEVCFG_SPD_HS); /* program speed - high speed */
++ }
++#ifdef DMA_PPB_WITH_DESC_UPDATE
++ IOH_SET_ADDR_BIT((u32)&dev->devctl,
++ (IOH_UDC_THLEN << UDC_DEVCTL_THLEN_OFS) |
++ (IOH_UDC_BRLEN << UDC_DEVCTL_BRLEN_OFS) |
++ (1 << UDC_DEVCTL_MODE) | (1 << UDC_DEVCTL_BREN) |
++ (1 << UDC_DEVCTL_DU) |
++ (1 << UDC_DEVCTL_THE));
++#else
++ IOH_SET_ADDR_BIT((u32)&dev->devctl,
++ (IOH_UDC_THLEN << UDC_DEVCTL_THLEN_OFS) |
++ (IOH_UDC_BRLEN << UDC_DEVCTL_BRLEN_OFS) |
++ (1 << UDC_DEVCTL_MODE) | (1 << UDC_DEVCTL_BREN) |
++ (1 << UDC_DEVCTL_THE));
++#endif
++ IOH_DEBUG("ioh_udc_init: exit");
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_exit(struct ioh_udc_regs *dev)
++ * @remarks The main tasks performed by this method are:
++ * - Put the device in soft disconnect mode
++ * - Clear the device and endpoint interrupts
++ * and disable them
++ * @param dev [IN] Reference to ioh_udc_regs structure
++ * @retval none
++ */
++void ioh_udc_exit(struct ioh_udc_regs *dev)
++{
++ IOH_DEBUG("ioh_udc_exit: enter");
++ /* mask all device interrupts */
++ IOH_SET_ADDR_BIT((u32)&dev->devirqmsk, UDC_DEVINT_MSK);
++
++ /* mask all ep interrupts */
++ IOH_SET_ADDR_BIT((u32)&dev->epirqmsk, UDC_EPINT_MSK_DISABLE_ALL);
++
++ /* put device in disconnected state */
++ ioh_udc_set_disconnect(dev);
++ IOH_DEBUG("ioh_udc_exit: exit");
++}
++
++/* Initiates a remote wakeup */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_rmt_wakeup(struct ioh_udc_regs *dev)
++ * @remarks The main tasks performed by this method are:
++ * - Set the RES (bit 1) of the device control register
++ * - Wait for 1 msec
++ * - Clear the RES bit
++ * @param dev [IN] Reference to ioh_udc_regs structure
++ * @retval none
++ */
++void ioh_udc_rmt_wakeup(struct ioh_udc_regs *dev)
++{
++ IOH_DEBUG("ioh_udc_rmt_wakeup: enter");
++ IOH_SET_ADDR_BIT((u32)&dev->devctl, 1 << UDC_DEVCTL_RES);
++ mdelay(1);
++ IOH_CLR_ADDR_BIT((u32)&dev->devctl, 1 << UDC_DEVCTL_RES);
++ IOH_DEBUG("ioh_udc_rmt_wakeup: exit");
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_get_frame(struct ioh_udc_regs *dev)
++ * @remarks The main tasks performed by this method are:
++ * - Retrieve the current frame from device status register
++ * @param dev [IN] Reference to ioh_udc_regs structure
++ * @retval int
++ * - current frame
++ */
++int ioh_udc_get_frame(struct ioh_udc_regs *dev)
++{
++ u32 frame;
++
++ IOH_DEBUG("ioh_udc_get_frame: enter");
++ frame = IOH_READ32((u32)&dev->devsts);
++ IOH_DEBUG("ioh_udc_get_frame: exit");
++ return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_OFS;
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_clear_selfpowered (struct ioh_udc_regs __iomem *dev)
++ * @remarks The main tasks performed by this method are:
++ * - Clear the 'SP' bit (bit 3) of the device config register
++ * @param dev [IN] Reference to ioh_udc_regs structure
++ * @retval none
++ */
++void ioh_udc_clear_selfpowered(struct ioh_udc_regs __iomem *dev)
++{
++ IOH_CLR_ADDR_BIT((u32)&dev->devcfg, 1 << UDC_DEVCFG_SP);
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_set_selfpowered (struct ioh_udc_regs __iomem *dev)
++ * @remarks The main tasks performed by this method are:
++ * - Set the 'SP' bit (bit 3) of the device config register
++ * @param dev [IN] Reference to ioh_udc_regs structure
++ * @retval none
++ */
++void ioh_udc_set_selfpowered(struct ioh_udc_regs __iomem *dev)
++{
++ IOH_SET_ADDR_BIT((u32)&dev->devcfg, 1 << UDC_DEVCFG_SP);
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_set_disconnect (struct ioh_udc_regs __iomem *dev)
++ * @remarks The main tasks performed by this method are:
++ * - Set the SD bit (bit 10) of device control register
++ * @param dev [IN] Reference to ioh_udc_regs structure
++ * @retval none
++ */
++void ioh_udc_set_disconnect(struct ioh_udc_regs __iomem *dev)
++{
++ IOH_SET_ADDR_BIT((u32)&dev->devctl, 1 << UDC_DEVCTL_SD);
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_get_speed (struct ioh_udc_regs __iomem *dev)
++ * @remarks The main tasks performed by this method are:
++ * - Return the speed from Device status register ENUMSPD (bit 13, 14)
++ * @param dev [IN] Reference to ioh_udc_regs structure
++ * @retval int
++ * - The speed(LOW=1, FULL=2, HIGH=3)
++ */
++int ioh_udc_get_speed(struct ioh_udc_regs __iomem *dev)
++{
++ u32 val;
++
++ val = IOH_READ32((u32)&dev->devsts);
++ return (val & UDC_DEVSTS_ENUM_SPEED_MASK) >> UDC_DEVSTS_ENUM_SPEED_OFS;
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_clear_disconnect (struct ioh_udc_regs __iomem *dev)
++ * @remarks The main tasks performed by this method are
++ * - Clear the SD bit (bit 10) and set the RES bit (bit 1)
++ * of the device control register
++ * - After 1msec, clear the RES bit (bit 1) of the device
++ * control register
++ * @param dev [IN] Reference to ioh_udc_regs structure
++ * @retval none
++ */
++void ioh_udc_clear_disconnect(struct ioh_udc_regs __iomem *dev)
++{
++ /* Clear the disconnect */
++ IOH_SET_ADDR_BIT((u32)&dev->devctl, 1 << UDC_DEVCTL_RES);
++ IOH_CLR_ADDR_BIT((u32)&dev->devctl, 1 << UDC_DEVCTL_SD);
++ mdelay(1);
++ /* Resume USB signalling */
++ IOH_CLR_ADDR_BIT((u32)&dev->devctl, 1 << UDC_DEVCTL_RES);
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_vbus_session (struct ioh_udc_regs __iomem *dev, int is_active)
++ * @remarks The main tasks performed by this method are:
++ * - If 'is_active' invoke ioh_udc_clear_disconnect to make
++ * this device available to host
++ * - Otherwise invoke ioh_udc_set_disconnect to make the
++ * device unavailable
++ * @param dev [IN] Reference to ioh_udc_regs structure
++ * @param is_active [IN] Parameter specifying the action
++ * - is_active = 0 indicating VBUS power is ending
++ * - is_active != 0 indicating VBUS power is starting
++ * @retval none
++ */
++void ioh_udc_vbus_session(struct ioh_udc_regs __iomem *dev, int is_active)
++{
++ if (is_active == 0)
++ ioh_udc_set_disconnect(dev);
++ else
++ ioh_udc_clear_disconnect(dev);
++
++}
++
++/* Stall or clear stall of endpoint */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_ep_set_stall(struct ioh_udc_ep_regs __iomem *ep)
++ * @remarks The main tasks performed by this method are:
++ * - If this is a IN endpoint, flush the TX FIFO ( F bit)
++ * of the endpoint control register
++ * - Set the bit 0 (S bit) of the endpoint control register
++ * @param ep [IN] reference to structure of type
++ * ioh_udc_ep_regs
++ * @retval none
++ */
++void ioh_udc_ep_set_stall(struct ioh_udc_ep_regs __iomem *ep)
++{
++ if (EP_IS_IN(ep)) { /* flush fifo */
++ IOH_SET_ADDR_BIT((u32)&ep->epctl, 1 << UDC_EPCTL_F);
++ IOH_SET_ADDR_BIT((u32)&ep->epctl, 1 << UDC_EPCTL_S);
++ } else {
++ IOH_SET_ADDR_BIT((u32)&ep->epctl, 1 << UDC_EPCTL_S);
++ }
++}
++
++/* Halt or clear halt of endpoint */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_ep_clear_stall(struct ioh_udc_ep_regs __iomem *ep)
++ * @remarks The main tasks performed by this method are:
++ * - Clear the bit 0 (S bit) of the endpoint control register
++ * - Set the bit 8 (CNAK bit) of the endpoint control register
++ * @param ep [IN] reference to structure of type ioh_udc_ep_regs
++ * @retval none
++ */
++void ioh_udc_ep_clear_stall(struct ioh_udc_ep_regs __iomem *ep)
++{
++ /* Clear the stall */
++ IOH_CLR_ADDR_BIT((u32)&ep->epctl, 1 << UDC_EPCTL_S);
++
++ /* clear NAK by writing CNAK */
++ IOH_SET_ADDR_BIT((u32)&ep->epctl, 1 << UDC_EPCTL_CNAK);
++}
++
++
++/* Set the transfer type of endpoint */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_ep_set_trfr_type(struct ioh_udc_ep_regs __iomem *ep, u8 type)
++ * @remarks The main tasks performed by this method are:
++ * - Set the transfer type of endpoint
++ * @param ep [IN] reference to structure of type ioh_udc_ep_regs
++ * @param type [IN] type of endpoint
++ * @retval none
++ */
++void ioh_udc_ep_set_trfr_type(struct ioh_udc_ep_regs __iomem *ep, u8 type)
++{
++ IOH_WRITE32(((type << UDC_EPCTL_ET_OFS) & UDC_EPCTL_ET_MASK) ,
++ (u32)&ep->epctl);
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_ep_set_bufsz(struct ioh_udc_ep_regs __iomem *ep,
++ * u32 buf_size)
++ * @remarks The main task performed by this method is:
++ * - Set the maximum packet size for the endpoint
++ * @param ep [IN] reference to structure of
++ * type ioh_udc_ep_regs
++ * @param buf_size [IN] the buffer size
++ * @retval none
++ */
++void ioh_udc_ep_set_bufsz(struct ioh_udc_ep_regs __iomem *ep,
++ u32 buf_size, u32 ep_in)
++{
++ u32 data;
++ if (ep_in) {
++ data = IOH_READ32((u32)&ep->bufin_framenum);
++ data = (data & 0xffff0000) | (buf_size & 0xffff);
++ IOH_WRITE32(data, (u32)&ep->bufin_framenum);
++ } else {
++ data = IOH_READ32((u32)&ep->bufout_maxpkt);
++ data = (buf_size << 16) | (data & 0xffff);
++ IOH_WRITE32(data, (u32)&ep->bufout_maxpkt);
++ }
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_ep_set_maxpkt(struct ioh_udc_ep_regs __iomem *ep,
++ * u32 pkt_size)
++ * @remarks Set the Max packet size for the endpoint
++ * @param ep [IN] reference to structure of type ioh_udc_ep_regs
++ * @param pkt_size [IN] the packet size
++ * @retval none
++ */
++/* Set the Max packet size for the endpoint */
++void ioh_udc_ep_set_maxpkt(struct ioh_udc_ep_regs __iomem *ep, u32 pkt_size)
++{
++ u32 data;
++ data = IOH_READ32((u32)&ep->bufout_maxpkt);
++ data = (data & 0xffff0000) | (pkt_size & 0xffff);
++ IOH_WRITE32(data, (u32)&ep->bufout_maxpkt);
++}
++
++/* Set the Setup buffer pointer for the endpoint */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_ep_set_subptr(struct ioh_udc_ep_regs __iomem *ep, u32 addr)
++ * @remarks Set the Setup buffer pointer for the endpoint
++ * @param ep [IN] reference to structure of type ioh_udc_ep_regs
++ * @param addr [IN] address of the register
++ * @retval none
++ */
++INLINE void ioh_udc_ep_set_subptr(struct ioh_udc_ep_regs __iomem *ep, u32 addr)
++{
++ IOH_WRITE32(addr, (u32)&ep->subptr);
++}
++
++/* Set the Data descriptor pointer for the endpoint */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_ep_set_ddptr(struct ioh_udc_ep_regs __iomem *ep, u32 addr)
++ * @remarks Set the Data descriptor pointer for the endpoint
++ * @param ep [IN] reference to structure of type ioh_udc_ep_regs
++ * @param addr [IN] address of the register
++ * @retval none
++ */
++INLINE void ioh_udc_ep_set_ddptr(struct ioh_udc_ep_regs __iomem *ep, u32 addr)
++{
++ IOH_WRITE32(addr, (u32)&ep->desptr);
++}
++
++/* Set the poll demand bit for the endpoint */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_ep_set_pd(struct ioh_udc_ep_regs __iomem *ep)
++ * @remarks Set the poll demand bit for the endpoint
++ * @param ep [IN] reference to structure of type ioh_udc_ep_regs
++ * @retval none
++ */
++INLINE void ioh_udc_ep_set_pd(struct ioh_udc_ep_regs __iomem *ep)
++{
++ IOH_SET_ADDR_BIT((u32)&ep->epctl, 1 << UDC_EPCTL_P);
++}
++
++/* Set the receive ready bit for the endpoint */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_ep_set_rrdy(struct ioh_udc_ep_regs __iomem *ep)
++ * @remarks Set the receive ready bit for the endpoint
++ * @param ep [IN] reference to structure of type ioh_udc_ep_regs
++ * @retval none
++ */
++INLINE void ioh_udc_ep_set_rrdy(struct ioh_udc_ep_regs __iomem *ep)
++{
++ IOH_DEBUG("ioh_udc_ep_set_rrdy: ep%d%s", EP_NUM(ep),
++ (EP_IS_IN(ep) ? "in" : "out"));
++ IOH_SET_ADDR_BIT((u32)&ep->epctl, 1 << UDC_EPCTL_RRDY);
++}
++
++/* Clear the receive ready bit for the endpoint */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_ep_clear_rrdy(struct ioh_udc_ep_regs __iomem *ep)
++ * @remarks Clear the receive ready bit for the endpoint
++ * @param ep [IN] reference to structure of type ioh_udc_ep_regs
++ * @retval none
++ */
++INLINE void ioh_udc_ep_clear_rrdy(struct ioh_udc_ep_regs __iomem *ep)
++{
++ IOH_CLR_ADDR_BIT((u32)&ep->epctl, 1 << UDC_EPCTL_RRDY);
++}
++
++/* Enabling RX/TX DMA */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_set_dma(struct ioh_udc_regs __iomem *dev, int dir)
++ * @remarks Set the 'TDE' or RDE bit of device control register depending
++ * on the direction specified
++ * @param dev [IN] reference to structure of type ioh_udc_regs
++ * @param dir [IN] whether Tx or Rx
++ * - dir = DMA_DIR_RX Receive
++ * - dir = DMA_DIR_TX Transmit
++ * @retval none
++ */
++INLINE void ioh_udc_set_dma(struct ioh_udc_regs __iomem *dev, int dir)
++{
++ if (dir == DMA_DIR_RX)
++ IOH_SET_ADDR_BIT((u32)&dev->devctl, 1 << UDC_DEVCTL_RDE);
++ else if (dir == DMA_DIR_TX)
++ IOH_SET_ADDR_BIT((u32)&dev->devctl, (1 << UDC_DEVCTL_TDE));
++
++}
++
++/* Disable RX/TX DMA */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_clear_dma(struct ioh_udc_regs __iomem *dev, int dir)
++ * @remarks Clear the 'TDE' or RDE bit of device control
++ * register depending on the direction specified
++ * @param dev [IN] reference to structure of type ioh_udc_regs
++ * @param dir [IN] whether Tx or Rx
++ * - dir = DMA_DIR_RX Receive
++ * - dir = DMA_DIR_TX Transmit
++ * @retval none
++ */
++void ioh_udc_clear_dma(struct ioh_udc_regs __iomem *dev, int dir)
++{
++ if (dir == DMA_DIR_RX)
++ IOH_CLR_ADDR_BIT((u32)&dev->devctl, 1 << UDC_DEVCTL_RDE); /*
++ clear RDE */
++ else if (dir == DMA_DIR_TX)
++ IOH_CLR_ADDR_BIT((u32)&dev->devctl, 1 << UDC_DEVCTL_TDE);
++
++}
++
++/* Set CSR done */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_set_csr_done(struct ioh_udc_regs __iomem *dev)
++ * @remarks Set the device control register CSR done field (bit 13)
++ * @param dev [IN] reference to structure of type ioh_udc_regs
++ * @retval none
++ */
++void ioh_udc_set_csr_done(struct ioh_udc_regs __iomem *dev)
++{
++ /* set CSR Done */
++ IOH_SET_ADDR_BIT((u32)&dev->devctl, 1 << UDC_DEVCTL_CSR_DONE);
++}
++
++/* Set Burst length */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_set_burst_length(struct ioh_udc_regs __iomem *dev, u8 len)
++ * @remarks The main tasks done by this method are:
++ * - Set the device control register burst length field
++ * - Enable the bust mode
++ * @param dev [IN] reference to structure of type ioh_udc_regs
++ * @param len [IN] burst length
++ * @retval none
++ */
++void ioh_udc_set_burst_length(struct ioh_udc_regs __iomem *dev, u8 len)
++{
++ IOH_CLR_ADDR_BIT((u32)&dev->devctl, (0xff << UDC_DEVCTL_BRLEN_OFS));
++ /* set Burst length and enable burst mode*/
++ IOH_SET_ADDR_BIT((u32)&dev->devctl, (len << UDC_DEVCTL_BRLEN_OFS) |
++ (1 << UDC_DEVCTL_BREN));
++}
++
++/* Set Threshold length */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_set_threshold_length(struct ioh_udc_regs __iomem *dev, u8 len)
++ * @remarks - Set the device control register threshold
++ * length field
++ * - Enable the threshold mode
++ * @param dev [IN] reference to structure of type ioh_udc_regs
++ * @param len [IN] burst length
++ * @retval none
++ */
++void ioh_udc_set_threshold_length(struct ioh_udc_regs __iomem *dev, u8 len)
++{
++ IOH_CLR_ADDR_BIT((u32)&dev->devctl, (0xff << UDC_DEVCTL_THLEN_OFS));
++ /* set Burst Threshold length and enable threshold mode*/
++ IOH_SET_ADDR_BIT((u32)&dev->devctl, (len << UDC_DEVCTL_THLEN_OFS) |
++ (1 << UDC_DEVCTL_THE));
++}
++
++/* Disable device interrupts */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_disable_interrupts(struct ioh_udc_regs __iomem *dev, u32 mask)
++ * @remarks Disables the specified interrupts
++ * @param dev [IN] reference to structure of type ioh_udc_regs
++ * @param mask [IN] mask to disable interrupts
++ * @retval none
++ */
++void ioh_udc_disable_interrupts(struct ioh_udc_regs __iomem *dev, u32 mask)
++{
++ /* set the mask */
++ IOH_SET_ADDR_BIT((u32)&dev->devirqmsk, mask);
++ IOH_DEBUG("Interrupt mask reg = %08x",
++ IOH_READ32((u32)&dev->devirqmsk));
++}
++
++/* Enable device interrupts */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_enable_interrupts(struct ioh_udc_regs __iomem *dev,
++ * u32 mask)
++ * @remarks Enable the specified interrupts
++ * @param dev [IN] reference to structure of type ioh_udc_regs
++ * @param mask [IN] mask to enable interrupts
++ * @retval none
++ */
++void ioh_udc_enable_interrupts(struct ioh_udc_regs __iomem *dev, u32 mask)
++{
++ /* set the mask */
++ IOH_CLR_ADDR_BIT((u32)&dev->devirqmsk, mask);
++ IOH_DEBUG("Interrupt mask reg = %08x",
++ IOH_READ32((u32)&dev->devirqmsk));
++}
++
++/* Disable Ep interrupts */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_disable_ep_interrupts(struct ioh_udc_regs __iomem *dev,
++ * u32 mask)
++ * @remarks Disable endpoint interrupts
++ * @param dev [IN] reference to structure of type ioh_udc_regs
++ * @param mask [IN] mask to disable interrupts
++ * @retval none
++ */
++void ioh_udc_disable_ep_interrupts(struct ioh_udc_regs __iomem *dev, u32 mask)
++{
++ /* set the mask */
++ IOH_SET_ADDR_BIT((u32)&dev->epirqmsk, mask);
++ IOH_DEBUG("Interrupt ep mask reg = %08x",
++ IOH_READ32((u32)&dev->epirqmsk));
++}
++
++/* Enable Ep interrupts */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_enable_ep_interrupts(struct ioh_udc_regs __iomem *dev,
++ * u32 mask)
++ * @remarks Enable endpoint interrupts
++ * @param dev [IN] reference to structure of type ioh_udc_regs
++ * @param mask [IN] mask to enable interrupts
++ * @retval none
++ */
++void ioh_udc_enable_ep_interrupts(struct ioh_udc_regs __iomem *dev, u32 mask)
++{
++ /* set the mask */
++ IOH_CLR_ADDR_BIT((u32)&dev->epirqmsk, mask);
++ IOH_DEBUG("Interrupt ep mask reg = %08x",
++ IOH_READ32((u32)&dev->epirqmsk));
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_read_device_interrupts(struct ioh_udc_regs __iomem *dev)
++ * @remarks Read the device interrupts
++ * @param dev [IN] reference to structure of type ioh_udc_regs
++ * @retval u32
++ * - The device interrupts
++ */
++INLINE u32 ioh_udc_read_device_interrupts(struct ioh_udc_regs __iomem *dev)
++{
++ return IOH_READ32((u32)&dev->devirqsts);
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_write_device_interrupts
++ * (struct ioh_udc_regs __iomem *dev, u32 val)
++ * @remarks Write device interrupts
++ * @param dev [IN] reference to structure of type ioh_udc_regs
++ * @param val [IN] the value to be written to interrupt register
++ * @retval none
++ */
++INLINE void ioh_udc_write_device_interrupts(struct ioh_udc_regs __iomem *dev,
++ u32 val)
++{
++ IOH_WRITE32(val, (u32)&dev->devirqsts);
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_read_ep_interrupts(struct ioh_udc_regs __iomem *dev)
++ * @remarks Read the endpoint interrupts
++ * @param dev [IN] reference to structure of type ioh_udc_regs
++ * @retval u32
++ * - the endpoint interrupt
++ */
++INLINE u32 ioh_udc_read_ep_interrupts(struct ioh_udc_regs __iomem *dev)
++{
++ return IOH_READ32((u32)&dev->epirqsts);
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_write_ep_interrupts
++ * @remarks Clear endpoint interupts
++ * @param dev [IN] reference to structure of type ioh_udc_regs
++ * @param val [IN] the value to be written to interrupt register
++ * @retval none
++ */
++INLINE void ioh_udc_write_ep_interrupts(struct ioh_udc_regs __iomem *dev,
++ u32 val)
++{
++ IOH_WRITE32(val, (u32)&dev->epirqsts);
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_read_device_status
++ * @remarks Read the device status
++ * @param dev [IN] reference to structure of type ioh_udc_regs
++ * @retval u32
++ * - the device status
++ */
++INLINE u32 ioh_udc_read_device_status(struct ioh_udc_regs __iomem *dev)
++{
++ return IOH_READ32((u32)&dev->devsts);
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_read_ep_control(struct ioh_udc_ep_regs __iomem *ep)
++ * @remarks Read the endpoint control
++ * @param ep [IN] reference to structure of type ioh_udc_ep_regs
++ * @retval u32
++ * - the endpoint control register value
++ */
++INLINE u32 ioh_udc_read_ep_control(struct ioh_udc_ep_regs __iomem *ep)
++{
++ return IOH_READ32((u32)&ep->epctl);
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_clear_ep_control(struct ioh_udc_ep_regs __iomem *ep)
++ * @remarks Clear the endpoint control register
++ * @param ep [IN] reference to structure of type ioh_udc_ep_regs
++ * @retval u32
++ * - the endpoint control register value
++ */
++INLINE void ioh_udc_clear_ep_control(struct ioh_udc_ep_regs __iomem *ep)
++{
++ return IOH_WRITE32(0, (u32)&ep->epctl);
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_read_ep_status(struct ioh_udc_ep_regs __iomem *ep)
++ * @remarks Read the endpoint status
++ * @param ep [IN] reference to structure of type ioh_udc_ep_regs
++ * @retval u32
++ * - the endpoint status
++ */
++INLINE u32 ioh_udc_read_ep_status(struct ioh_udc_ep_regs __iomem *ep)
++{
++ return IOH_READ32((u32)&ep->epsts);
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_clear_ep_status(struct ioh_udc_ep_regs __iomem *ep,
++ * u32 stat)
++ * @remarks Clear the endpoint status
++ * @param ep [IN] reference to structure of type ioh_udc_ep_regs
++ * @param stat [IN] endpoint status
++ * @retval none
++ */
++INLINE void ioh_udc_clear_ep_status(struct ioh_udc_ep_regs __iomem *ep,
++ u32 stat)
++{
++ return IOH_WRITE32(stat, (u32)&ep->epsts);
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_ep_set_nak(struct ioh_udc_ep_regs __iomem *ep)
++ * @remarks Set the bit 7 (SNAK field) of the endpoint control register
++ * @param ep [IN] reference to structure of type ioh_udc_ep_regs
++ * @retval none
++ */
++void ioh_udc_ep_set_nak(struct ioh_udc_ep_regs __iomem *ep)
++{
++ IOH_SET_ADDR_BIT((u32)&ep->epctl, 1 << UDC_EPCTL_SNAK);
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_ep_clear_nak(struct ioh_udc_ep_regs __iomem *ep)
++ * @remarks Set the bit 8 (CNAK field) of the endpoint control register
++ * @param ep [IN] reference to structure of type ioh_udc_ep_regs
++ * @retval none
++ */
++void ioh_udc_ep_clear_nak(struct ioh_udc_ep_regs __iomem *ep)
++{
++ unsigned int loopcnt = 0;
++
++ if (IOH_READ32((u32)&ep->epctl) & (1 << UDC_EPCTL_NAK)) {
++ if (!(EP_IS_IN(ep))) {
++ while ((ioh_udc_read_ep_status(ep) &
++ (1 << UDC_EPSTS_MRXFIFO_EMP)) == 0) {
++ if (loopcnt++ > 100000) {
++ IOH_DEBUG("RxFIFO not Empty loop \
++ count = %d", loopcnt);
++ break;
++ }
++ udelay(100);
++ }
++ }
++ while (IOH_READ32((u32)&ep->epctl) & (1 << UDC_EPCTL_NAK)) {
++ IOH_SET_ADDR_BIT((u32)&ep->epctl, 1 << UDC_EPCTL_CNAK);
++ udelay(5);
++ if (loopcnt++ >= 25) {
++ IOH_DEBUG("Clear NAK not set for ep%d%s:\
++ counter=%d",
++ EP_NUM(ep), (EP_IS_IN(ep) ? \
++ "in" : "out"), loopcnt);
++ break;
++ }
++ }
++ }
++}
++
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_ep_fifo_flush(struct ioh_udc_ep_regs __iomem *ep, int dir)
++ * @remarks The main tasks performed by this method are:
++ *- If the endpoint is IN,
++ * - set the bit 1 (F) of endpoint control register
++ *- Otherwise
++ * - If the Rx FIFO is not empty,
++ * - Set bit 12 (MRX FLUSH) of EP control register
++ * - Wait till MRXFIFO EMPTY (bit 8) is set in EP status register
++ * - Clear bit 12 (MRX FLUSH) of EP control register
++ * @param ep [IN] reference to structure of type ioh_udc_ep_regs
++ * @param dir [IN] direction of endpoint
++ * - dir = 0 endpoint is OUT
++ * - dir != 0 endpoint is IN
++ * @retval none
++ */
++void ioh_udc_ep_fifo_flush(struct ioh_udc_ep_regs __iomem *ep, int dir)
++{
++ unsigned int loopcnt = 0;
++
++ IOH_DEBUG("ioh_udc_ep_fifo_flush: ep%d%s", EP_NUM(ep),
++ (EP_IS_IN(ep) ? "in" : "out"));
++ if (dir) { /* IN ep */
++ IOH_SET_ADDR_BIT((u32)&ep->epctl, 1 << UDC_EPCTL_F);
++ } else {
++ if ((ioh_udc_read_ep_status(ep) &
++ (1 << UDC_EPSTS_MRXFIFO_EMP)) == 0) {
++ IOH_SET_ADDR_BIT((u32)&ep->epctl, 1 <<
++ UDC_EPCTL_MRXFLUSH);
++ /* Wait for RxFIFO Empty */
++ while ((ioh_udc_read_ep_status(ep) &
++ (1 << UDC_EPSTS_MRXFIFO_EMP)) == 0) {
++ if (loopcnt++ > 1000000) {
++ IOH_DEBUG("RxFIFO not Empty loop\
++ count = %d", loopcnt);
++ break;
++ }
++ udelay(100);
++ }
++ IOH_CLR_ADDR_BIT((u32)&ep->epctl, 1 <<
++ UDC_EPCTL_MRXFLUSH);
++ }
++ }
++}
++
++/* Enables endpoint */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_ep_enable(struct ioh_udc_ep_regs __iomem *regs,
++ * struct ioh_udc_cfg_data *cfg,
++ * const struct usb_endpoint_descriptor *desc)
++ * @remarks The following actions are performed:
++ *- Set transfer type of the endpoint using ioh_udc_ep_set_trfr_type
++ * API passing desc->bmAttributes
++ *- Set Buffer size for the endpoint by ioh_udc_ep_set_bufsz
++ *- Set Maximum packet size for the endpoint using ioh_udc_ep_set_maxpkt
++ * API, passing desc->wMaxPacketSize
++ *- Flush FIFO of the endpoint by ioh_udc_ep_fifo_flush
++ *- Set NAK of the endpoint by ioh_udc_ep_set_nak
++ *- Calculate the endpoint configuration value from the following parameters
++ * and update the same in corresponding register [based on endpoint
++ * direction]using ioh_udc_write_csr API:
++ * - endpoint (ep) number
++ * - ep direction
++ * - ep transfer type mask
++ * - cfg->cur_cfg
++ * - cfg->cur_intf
++ * - cfg->cur_intf
++ * - desc->wMaxPacketSize
++ * @param regs [IN] Reference to structure ioh_udc_ep_regs
++ * @param desc [IN] endpoint descriptor
++ * @retval none
++ * @see
++ * - ioh_udc_ep_set_trfr_type
++ * - ioh_udc_ep_set_bufsz
++ * - ioh_udc_ep_set_maxpkt
++ * - ioh_udc_ep_set_nak
++ * - ioh_udc_ep_fifo_flush
++ * - ioh_udc_write_csr
++ */
++void ioh_udc_ep_enable(struct ioh_udc_ep_regs __iomem *regs,
++ struct ioh_udc_cfg_data *cfg,
++ const struct usb_endpoint_descriptor *desc)
++{
++ u32 ep_num = EP_NUM(regs);
++ u32 ep_in = EP_IS_IN(regs);
++ u32 val = 0;
++ u32 buff_size = 0;
++
++ IOH_DEBUG("ioh_udc_ep_enable: ep%x%s bmAttributes = %d\
++ wMaxPacketSize = %d",
++ ep_num, (ep_in ? "in" : "out"), desc->bmAttributes,
++ desc->wMaxPacketSize);
++ /* set traffic type */
++ ioh_udc_ep_set_trfr_type(regs, desc->bmAttributes);
++ /* Set buff size */
++ if (ep_in)
++ buff_size = UDC_EPIN_BUFF_SIZE;
++ else
++ buff_size = UDC_EPOUT_BUFF_SIZE;
++
++ ioh_udc_ep_set_bufsz(regs, buff_size, ep_in);
++ /* Set max packet size */
++ ioh_udc_ep_set_maxpkt(regs, le16_to_cpu(desc->wMaxPacketSize));
++ /* Set NAK */
++ ioh_udc_ep_set_nak(regs);
++ /* Flush fifo */
++ ioh_udc_ep_fifo_flush(regs, ep_in);
++ /* Configure the endpoint */
++ val = ep_num << UDC_CSR_NE_NUM_OFS | ep_in << UDC_CSR_NE_DIR_OFS |
++ ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
++ UDC_CSR_NE_TYPE_OFS) |
++ (cfg->cur_cfg << UDC_CSR_NE_CFG_OFS) |
++ (cfg->cur_intf << UDC_CSR_NE_INTF_OFS) |
++ (cfg->cur_alt << UDC_CSR_NE_ALT_OFS) |
++ le16_to_cpu(desc->wMaxPacketSize) <<
++ UDC_CSR_NE_MAX_PKT_OFS;
++
++ if (ep_in)
++ ioh_udc_write_csr(val, (u32) (ioh_udc_base + UDC_CSR_ADDR +
++ (ep_num * 2) * 4));
++ else
++ ioh_udc_write_csr(val, (u32) (ioh_udc_base + UDC_CSR_ADDR +
++ (ep_num * 2 + 1) * 4));
++
++ IOH_DEBUG("ioh_udc_ep_enable: Endpoint register = 0x%08x", val);
++}
++
++/* Resets endpoint */
++/*!@ingroup UDC_HALLayerAPI
++ * @fn ioh_udc_ep_disable(struct ioh_udc_ep_regs __iomem *regs)
++ * @remarks The following actions are performed:
++ *- If the endpoint is IN,
++ * - Set the bit 1 (Flush field) and clear other bit of endpoint control
++ * register
++ * - Set the bit 7 (SNAK field) and clear other bit of the endpoint control
++ * register
++ * - Set the bit 6 (IN field) of the endpoint status register
++ *- Otherwise
++ * - Set the bit 7 (SNAK field) and clear other bit of the endpoint control
++ * register
++ *- Initialize the data descriptor pointer to 0 using
++ * ioh_udc_ep_set_ddptr API
++ * @param regs [IN] Reference to structure ioh_udc_ep_regs
++ * @retval none
++ */
++void ioh_udc_ep_disable(struct ioh_udc_ep_regs __iomem *regs)
++{
++ if (EP_IS_IN(regs)) {
++ /* flush the fifo */
++ IOH_WRITE32(1 << UDC_EPCTL_F , (u32)&regs->epctl);
++ /* set NAK */
++ IOH_WRITE32(1 << UDC_EPCTL_SNAK , (u32)&regs->epctl);
++
++ IOH_SET_ADDR_BIT((u32)&regs->epsts, 1 << UDC_EPSTS_IN);
++ } else {
++ /* set NAK */
++ IOH_WRITE32(1 << UDC_EPCTL_SNAK , (u32)&regs->epctl);
++ }
++ /* reset desc pointer */
++ IOH_WRITE32(0, (u32)&regs->desptr);
++}
+--- /dev/null
++++ b/drivers/usb/gadget/pch_udc_hal.h
+@@ -0,0 +1,1829 @@
++ /*!
++ * @file ioh_udc_hal.h
++ * @brief This file contains the declarations for all HAL layer APIs.
++ * It also
++ * lists the various macros used by HAL layer.
++ *
++ * The IOH UDC is a USB High speed DMA capable USB device controller.
++ * It provides 4 IN and 4 OUT endpoints (control, bulk isochronous or
++ * interrupt type).
++ *
++ * The IOH USB device controller driver provides required interface
++ * to the USB gadget framework for accessing the IOH USB device hardware.
++ *
++ * @version 0.96
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++ /*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 2/26/2010
++ * modified:
++ *
++ */
++
++#ifndef _IOH_UDC_HAL_H_
++#define _IOH_UDC_HAL_H_
++
++#include <linux/types.h>
++
++/*! @defgroup UDC */
++
++/*! @defgroup UDC_Global
++@ingroup UDC
++@brief This group describes the global entities within
++ the module.
++@remarks This group includes all the global data structures
++ used within the modules. These are mainly used to
++ store the device related information, so that it can
++ be used by other functions of the modules.
++<hr>
++*/
++
++/*! @defgroup UDC_PCILayer
++@ingroup UDC
++@brief This group describes the PCI layer interface
++ functionalities.
++@remarks This group contains the functions and data structures
++ that are used to interface the module with PCI Layer
++ subsystem of the Kernel.
++<hr>
++*/
++
++/*! @defgroup UDC_InterfaceLayer
++@ingroup UDC
++@brief This group describes the Driver interface functionalities.
++@remarks This group contains the data structures and functions used
++ to interface the module driver with the kernel subsystem.
++<hr>
++*/
++
++/*! @defgroup UDC_HALLayer
++@ingroup UDC
++@brief This group describes the hardware specific functionalities.
++@remarks This group contains the functions and data structures used
++ by the module to communicate with the hardware. These
++ functions are device specific and designed according to the
++ device specifications.
++<hr>
++*/
++
++/*! @defgroup UDC_Utilities
++@ingroup UDC
++@brief This group describes the utility functionalities.
++@remarks This group contains the functions and data structures used
++ to assist the other functionalities in their operations.
++<hr>
++*/
++
++/*! @defgroup UDC_PCILayerAPI
++@ingroup UDC_PCILayer
++@brief This group contains the API(functions) used as the PCI
++ interface between the Kernel subsystem and the module.
++<hr>
++*/
++
++/*! @defgroup UDC_PCILayerFacilitators
++@ingroup UDC_PCILayer
++@brief This group contains the data structures used by the PCI
++ Layer APIs for their functionalities.
++<hr>
++*/
++
++/*! @defgroup UDC_InterfaceLayerAPI
++@ingroup UDC_InterfaceLayer
++@brief This group contains the API(functions) used as the Driver
++
++<hr>
++*/
++
++/*! @defgroup UDC_InterfaceLayerFacilitators
++@ingroup UDC_InterfaceLayer
++@brief This group contains the data structures used by the Driver
++ interface APIs for their functionalities.
++<hr>
++*/
++
++/*! @defgroup UDC_HALLayerAPI
++@ingroup UDC_HALLayer
++@brief This group contains the APIs(functions) used to interact with
++ the hardware. These APIs act as an interface between the
++ hardware and the other driver functions.
++<hr>
++*/
++
++/*! @defgroup UDC_UtilitiesAPI
++@ingroup UDC_Utilities
++@brief This group contains the APIs(functions) used by other functions
++ in their operations.
++<hr>
++*/
++
++
++/* Device Config Register */
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCFG_ADDR
++@brief Address offset of Device Configuration Register
++*/
++#define UDC_DEVCFG_ADDR 0x400
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCFG_SET_DESC
++@brief Bit position of SET_DESC field in
++ Device configuration register
++*/
++#define UDC_DEVCFG_SET_DESC 18
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCFG_CSR_PRG
++@brief Bit position of CSR_PRG field in
++ Device configuration register
++*/
++
++#define UDC_DEVCFG_CSR_PRG 17
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCFG_HALT_STATUS
++@brief Bit position of HALT_STATUS field in
++ Device configuration register.
++*/
++#define UDC_DEVCFG_HALT_STATUS 16
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCFG_STATUS1
++@brief Bit position of STATUS1 field in
++ Device configuration register.
++*/
++#define UDC_DEVCFG_STATUS1 8
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCFG_STATUS
++@brief Bit position of STATUS field in
++ Device configuration register.
++*/
++#define UDC_DEVCFG_STATUS 7
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCFG_DIR
++@brief Bit position of DIR field in
++ Device configuration register.
++*/
++#define UDC_DEVCFG_DIR 6
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCFG_PI
++@brief Bit position of PI field in
++ Device configuration register.
++*/
++#define UDC_DEVCFG_PI 5
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCFG_SS
++@brief Bit position of SS field in
++ Device configuration register.
++*/
++#define UDC_DEVCFG_SS 4
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCFG_SP
++@brief Bit position of SP field in
++ Device configuration register.
++*/
++#define UDC_DEVCFG_SP 3
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCFG_RWKP
++@brief Bit position of RWKP field in
++ Device configuration register.
++*/
++#define UDC_DEVCFG_RWKP 2
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCFG_SPD_MASK
++@brief Mask for SPD bits in
++ Device configuration register.
++*/
++#define UDC_DEVCFG_SPD_MASK 0x3
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCFG_SPD_OFS
++*/
++#define UDC_DEVCFG_SPD_OFS 0
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCFG_SPD_HS
++*/
++#define UDC_DEVCFG_SPD_HS 0x0
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCFG_SPD_FS
++*/
++#define UDC_DEVCFG_SPD_FS 0x1
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCFG_SPD_LS
++*/
++#define UDC_DEVCFG_SPD_LS 0x2
++/*#define UDC_DEVCFG_SPD_FS 0x3*/
++
++/* Device Control Register */
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCTL_ADDR
++@brief Address offset for Device control register
++*/
++#define UDC_DEVCTL_ADDR 0x404
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCTL_THLEN_MASK
++@brief Mask for Threshold length field of
++ Device control register.
++*/
++#define UDC_DEVCTL_THLEN_MASK 0xff000000
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCTL_THLEN_OFS
++*/
++#define UDC_DEVCTL_THLEN_OFS 24
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCTL_BRLEN_MASK
++@brief Mask for Burst length field of
++ Device control register.
++*/
++#define UDC_DEVCTL_BRLEN_MASK 0x00ff0000
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCTL_BRLEN_OFS
++@brief Starting bit position of
++ Burst length field in Device
++ control register.
++*/
++#define UDC_DEVCTL_BRLEN_OFS 16
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCTL_CSR_DONE
++@brief The bit position of CSR_DONE
++ field in Device control register.
++*/
++#define UDC_DEVCTL_CSR_DONE 13
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCTL_DEVNAK
++@brief The bit position of DEVNAK
++ field in Device control register.
++*/
++#define UDC_DEVCTL_DEVNAK 12
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCTL_SD
++@brief The bit position of SD field in
++ Device control register.
++*/
++#define UDC_DEVCTL_SD 10
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCTL_MODE
++@brief The bit position of MODE
++ field in Device control register.
++*/
++#define UDC_DEVCTL_MODE 9
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCTL_BREN
++@brief The bit position of BREN
++ field in Device control register.
++*/
++#define UDC_DEVCTL_BREN 8
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCTL_THE
++@brief The bit position of THE
++ field in Device control register.
++*/
++#define UDC_DEVCTL_THE 7
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCTL_BF
++@brief The bit position of BF
++ field in Device control register.
++*/
++#define UDC_DEVCTL_BF 6
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCTL_BE
++@brief The bit position of BE
++ field in Device control register.
++*/
++#define UDC_DEVCTL_BE 5
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCTL_DU
++@brief The bit position of DU
++ field in Device control register.
++*/
++#define UDC_DEVCTL_DU 4
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCTL_TDE
++@brief The bit position of TDE
++ field in Device control register.
++*/
++#define UDC_DEVCTL_TDE 3
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCTL_RDE
++@brief The bit position of RDE
++ field in Device control register.
++*/
++#define UDC_DEVCTL_RDE 2
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVCTL_RES
++@brief The bit position of RES
++ field in Device control register.
++*/
++#define UDC_DEVCTL_RES 0
++
++/*! @ingroup UDC_HALLayer
++@def IOH_UDC_BRLEN
++@brief Specifies the Burst Length
++*/
++#define IOH_UDC_BRLEN 0xF /* Burst length */
++
++/*! @ingroup UDC_HALLayer
++@def IOH_UDC_THLEN
++@brief Specifies the Threshold Length
++*/
++#define IOH_UDC_THLEN 0x1F /* Threshold length */
++
++/*! @ingroup UDC_HALLayer
++@def DMA_DIR_RX
++@brief Specifies DMA for data receive
++*/
++#define DMA_DIR_RX 1
++
++/*! @ingroup UDC_HALLayer
++@def DMA_DIR_TX
++@brief Specifies DMA for data transmit
++*/
++#define DMA_DIR_TX 2
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DMA_MAXPACKET
++@brief Specifies maximum packet size for DMA
++*/
++#define UDC_DMA_MAXPACKET 65536
++
++/* Device Status Register */
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVSTS_ADDR
++@brief The address offset for Device Status register.
++*/
++#define UDC_DEVSTS_ADDR 0x408
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVSTS_TS_MASK
++@brief Mask for TS field of
++ Device status register
++*/
++#define UDC_DEVSTS_TS_MASK 0xfffc0000
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVSTS_TS_OFS
++@brief Starting bit position of TS field of
++ Device status register
++*/
++#define UDC_DEVSTS_TS_OFS 18
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVSTS_RWKPST
++@brief Bit position of RWKPST field of
++ Device status register
++*/
++#define UDC_DEVSTS_RWKPST 17
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVSTS_PHY_ERROR
++@brief Bit position of PHY_ERROR field of
++ Device status register
++*/
++#define UDC_DEVSTS_PHY_ERROR 16
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVSTS_RXFIFO_EMPTY
++@brief Bit position of RXFIFO_EMPTY field of
++ Device status register
++*/
++#define UDC_DEVSTS_RXFIFO_EMPTY 15
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVSTS_ENUM_SPEED_MASK
++@brief Mask for SPEED field of
++ Device status register
++*/
++#define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVSTS_ENUM_SPEED_OFS
++@brief Starting bit position of SPEED field
++ of Device status register
++*/
++#define UDC_DEVSTS_ENUM_SPEED_OFS 13
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVSTS_ENUM_SPEED_FULL
++@brief Specifies value for maximum speed for
++ SPEED field of Device status register
++*/
++#define UDC_DEVSTS_ENUM_SPEED_FULL 1
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVSTS_ENUM_SPEED_HIGH
++@brief Specifies value for high speed for
++ SPEED field of Device status register
++*/
++#define UDC_DEVSTS_ENUM_SPEED_HIGH 0
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVSTS_ENUM_SPEED_LOW
++@brief Specifies value for low speed for
++ SPEED field of Device status register
++*/
++#define UDC_DEVSTS_ENUM_SPEED_LOW 2
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVSTS_ENUM_SPEED_FULLX
++@brief Specifies value for full speed for
++ SPEED field of Device status register
++*/
++#define UDC_DEVSTS_ENUM_SPEED_FULLX 3
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVSTS_SUSP
++@brief Bit position of SUSP field of
++ Device status register.
++*/
++#define UDC_DEVSTS_SUSP 12
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVSTS_ALT_MASK
++@brief Mask for ALT field of
++ Device status register.
++*/
++#define UDC_DEVSTS_ALT_MASK 0x00000f00
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVSTS_ALT_OFS
++@brief Starting bit position of
++ ALT field of Device status register.
++*/
++#define UDC_DEVSTS_ALT_OFS 8
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVSTS_INTF_MASK
++@brief Mask for INTF field of
++ Device status register.
++*/
++#define UDC_DEVSTS_INTF_MASK 0x000000f0
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVSTS_INTF_OFS
++@brief The starting bit position for INTF field
++ in Device status register.
++*/
++#define UDC_DEVSTS_INTF_OFS 4
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVSTS_CFG_MASK
++@brief Mask for CFG field in
++ Device status register.
++*/
++#define UDC_DEVSTS_CFG_MASK 0x0000000f
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVSTS_CFG_OFS
++@brief Starting bit position for CFG field
++ in Device status register.
++*/
++#define UDC_DEVSTS_CFG_OFS 0
++
++
++/* Device Interrupt Register */
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVINT_ADDR
++@brief The address offset for
++ Device interrupt register
++*/
++#define UDC_DEVINT_ADDR 0x40c
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVINT_RWKP
++@brief The bit position for
++ RWKP field in Device interrupt register
++*/
++#define UDC_DEVINT_RWKP 7
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVINT_ENUM
++@brief Bit position for ENUM field in
++ Device interrupt register
++*/
++#define UDC_DEVINT_ENUM 6
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVINT_SOF
++@brief Bit position for SOF field in
++ Device interrupt register
++*/
++#define UDC_DEVINT_SOF 5
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVINT_US
++@brief Bit position for US field in
++ Device interrupt register
++*/
++#define UDC_DEVINT_US 4
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVINT_UR
++@brief Bit position for UR field in
++ Device interrupt register
++*/
++#define UDC_DEVINT_UR 3
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVINT_ES
++@brief Bit position for ES field in
++ Device interrupt register
++*/
++#define UDC_DEVINT_ES 2
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVINT_SI
++@brief Bit position for SI field in
++ Device interrupt register
++*/
++#define UDC_DEVINT_SI 1
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVINT_SC
++@brief Bit position for SC field in
++ Device interrupt register
++*/
++#define UDC_DEVINT_SC 0
++
++/* Device Interrupt Mask Register */
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVINT_MASK_ADDR
++@brief The address offset for
++ Device interrupt mask register
++*/
++#define UDC_DEVINT_MSK_ADDR 0x410
++
++/*! @ingroup UDC_HALLayer
++@def UDC_DEVINT_MSK
++@brief Interrupt Mask
++*/
++#define UDC_DEVINT_MSK 0x7f
++
++/* Endpoint Interrupt Register */
++/*! @ingroup UDC_HALLayer
++@def UDC_EPINT_ADDR
++@brief The address offset for
++ Endpoint interrupt register
++*/
++#define UDC_EPINT_ADDR 0x414
++
++/*! @ingroup UDC_HALLayer
++@def UDC_EPINT_OUT_MASK
++@brief Mask for OUT field in
++ Endpoint interrupt register
++*/
++#define UDC_EPINT_OUT_MASK 0xffff0000
++
++/*! @ingroup UDC_HALLayer
++@def UDC_EPINT_OUT_OFS
++@brief Starting bit position for OUT field of
++ Endpoint interrupt register
++*/
++#define UDC_EPINT_OUT_OFS 16
++
++/*! @ingroup UDC_HALLayer
++@def UDC_EPINT_IN_MASK
++@brief Mask for IN field in
++ Endpoint interrupt register
++*/
++#define UDC_EPINT_IN_MASK 0x0000ffff
++/*! @ingroup UDC_HALLayer
++@def UDC_EPINT_IN_OFS
++@brief Starting bit position for IN field of
++ Endpoint interrupt register
++*/
++#define UDC_EPINT_IN_OFS 0
++
++/*! @ingroup UDC_HALLayer
++@def UDC_EPINT_IN_EP0
++@brief Bit position for IN_EP0 field in
++ Endpoint interrupt register
++*/
++#define UDC_EPINT_IN_EP0 0
++
++/*! @ingroup UDC_HALLayer
++@def UDC_EPINT_IN_EP1
++@brief Bit position for IN_EP1 field in
++ Endpoint interrupt register
++*/
++#define UDC_EPINT_IN_EP1 1
++
++/*! @ingroup UDC_HALLayer
++@def UDC_EPINT_IN_EP2
++@brief Bit position for IN_EP2 field in
++ Endpoint interrupt register
++*/
++#define UDC_EPINT_IN_EP2 2
++
++/*! @ingroup UDC_HALLayer
++@def UDC_EPINT_IN_EP3
++@brief Bit position for IN_EP3 field in
++ Endpoint interrupt register
++*/
++#define UDC_EPINT_IN_EP3 3
++
++/*! @ingroup UDC_HALLayer
++@def UDC_EPINT_OUT_EP0
++@brief Bit position for OUT_EP0 field in
++ Endpoint interrupt register
++*/
++#define UDC_EPINT_OUT_EP0 16
++
++/*! @ingroup UDC_HALLayer
++@def UDC_EPINT_OUT_EP1
++@brief Bit position for OUT_EP1 field in
++ Endpoint interrupt register
++*/
++#define UDC_EPINT_OUT_EP1 17
++
++/*! @ingroup UDC_HALLayer
++@def UDC_EPINT_OUT_EP2
++@brief Bit position for OUT_EP2 field in
++ Endpoint interrupt register
++*/
++#define UDC_EPINT_OUT_EP2 18
++
++/*! @ingroup UDC_HALLayer
++@def UDC_EPINT_OUT_EP3
++@brief Bit position for OUT_EP3 field in
++ Endpoint interrupt register
++*/
++#define UDC_EPINT_OUT_EP3 19
++
++/*! @ingroup UDC_HALLayer
++@def UDC_EPINT_EP0_ENABLE_MSK
++@brief Mask to enable EP0 interrupt in
++ Endpoint interrupt register
++*/
++#define UDC_EPINT_EP0_ENABLE_MSK 0x000e000e
++
++/* Endpoint Interrupt Mask Register -----------------------------------------*/
++/*! @ingroup UDC_HALLayer
++@def UDC_EPINT_MSK_ADDR
++@brief Address offset for Endpoint Interrupt Mask Register
++*/
++#define UDC_EPINT_MSK_ADDR 0x418
++
++/*! @ingroup UDC_HALLayer
++@def UDC_EPINT_OUT_MSK_MASK
++@brief Mask for OUT_MSK field of
++ Endpoint Interrupt Mask Register
++*/
++#define UDC_EPINT_OUT_MSK_MASK 0xffff0000
++
++/*! @ingroup UDC_HALLayer
++@def UDC_EPINT_OUT_MSK_OFS
++@brief Starting bit position for OUT_MSK field of
++ Endpoint Interrupt Mask Register
++*/
++#define UDC_EPINT_OUT_MSK_OFS 16
++
++/*! @ingroup UDC_HALLayer
++@def UDC_EPINT_IN_MSK_MASK
++@brief Mask for IN_MSK field of
++ Endpoint Interrupt Mask Register
++*/
++#define UDC_EPINT_IN_MSK_MASK 0x0000ffff
++
++/*! @ingroup UDC_HALLayer
++@def UDC_EPINT_IN_MSK_MASK
++@brief Starting bit position for IN_MSK field of
++ Endpoint Interrupt Mask Register
++*/
++#define UDC_EPINT_IN_MSK_OFS 0
++
++/*! @ingroup UDC_HALLayer
++@def UDC_EPINT_IN_MSK_MASK
++@brief Mask to disable all interrupts in
++ Endpoint Interrupt Mask Register
++*/
++#define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff
++
++/* mask non-EP0 endpoints */
++/*! @ingroup UDC_HALLayer
++@def UDC_EPDATAINT_MSK_DISABLE
++@brief mask non-EP0 endpoints
++*/
++#define UDC_EPDATAINT_MSK_DISABLE 0xfffefffe
++
++/* mask all dev interrupts */
++/*! @ingroup UDC_HALLayer
++@def UDC_DEV_MSK_DISABLE
++@brief mask all dev interrupts
++*/
++#define UDC_DEV_MSK_DISABLE 0x7f
++
++/* UDC CSR Busy status Register -----------------------------------------*/
++/*!@ingroup UDC_HALLayer
++@def IOH_UDC_CSR_BUSY_ADDR
++@brief Address offset for UDC CSR Busy status Register
++*/
++#define IOH_UDC_CSR_BUSY_ADDR 0x4f0
++
++/*!@ingroup UDC_HALLayer
++@def IOH_UDC_CSR_BUSY
++@brief Bit position of CSR field in
++ UDC CSR Busy status Register
++*/
++#define IOH_UDC_CSR_BUSY 1
++
++/* SOFT RESET Register ------------------------------------------------------*/
++/*!@ingroup UDC_HALLayer
++@def IOH_UDC_SRST_ADDR
++@brief Address offset for UDC Soft reset Register
++*/
++#define IOH_UDC_SRST_ADDR 0x4fc
++
++/*!@ingroup UDC_HALLayer
++@def IOH_UDC_PHY_RESET
++@brief Bit position of PSRST field in
++ UDC Soft Reset Register
++*/
++#define IOH_UDC_PSRST 1
++
++/*!@ingroup UDC_HALLayer
++@def IOH_UDC_SOFT_RESET
++@brief Bit position of SRST field in
++ UDC Soft Reset Register
++*/
++#define IOH_UDC_SRST 0
++
++/* Endpoint-specific CSR's --------------------------------------------------*/
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPREGS_ADDR
++@brief address offset for Endpoint-specific CSR
++*/
++#define UDC_EPREGS_ADDR 0x0
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPIN_REGS_ADDR
++@brief address offset for EPIN register
++*/
++#define UDC_EPIN_REGS_ADDR 0x0
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPOUT_REGS_ADDR
++@brief address offset for EPOUT register
++*/
++#define UDC_EPOUT_REGS_ADDR 0x200
++
++/*!@ingroup UDC_HALLayer
++@def EP_IS_IN(ep)
++*/
++#define EP_IS_IN(ep) (((u32)(ep)) < (ioh_udc_base + UDC_EPOUT_REGS_ADDR))
++
++/*!@ingroup UDC_HALLayer
++@def EP_NUM(ep)
++*/
++#define EP_NUM(ep) ((((u32)(ep) - (ioh_udc_base +\
++ UDC_EPREGS_ADDR)) / 0x20) & 0xf)
++
++/* Endpoint Control Registers */
++/*!@ingroup UDC_HALLayer
++@def UDC_EPCTL_ADDR
++@brief Address offset for Endpoint Control Register
++*/
++#define UDC_EPCTL_ADDR 0x0
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPCTL_RRDY
++@brief Bit position of RRDY field in
++ Endpoint Control Register
++*/
++#define UDC_EPCTL_RRDY 9
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPCTL_CNAK
++@brief Bit position of CNAK field in
++ Endpoint Control Register
++*/
++#define UDC_EPCTL_CNAK 8
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPCTL_SNAK
++@brief Bit position of SNAK field in
++ Endpoint Control Register
++*/
++#define UDC_EPCTL_SNAK 7
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPCTL_NAK
++@brief Bit position of NAK field in
++ Endpoint Control Register
++*/
++#define UDC_EPCTL_NAK 6
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPCTL_ET_MASK
++@brief Mask for ET field in
++ Endpoint Control Register
++*/
++#define UDC_EPCTL_ET_MASK 0x00000030
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPCTL_ET_OFS
++@brief Starting bit position for ET field in
++ Endpoint Control Register
++*/
++#define UDC_EPCTL_ET_OFS 4
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPCTL_ET_CONTROL
++@brief Value for ET field in
++ Endpoint Control Register
++*/
++#define UDC_EPCTL_ET_CONTROL 0
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPCTL_ET_ISO
++@brief Value for ET field in
++ Endpoint Control Register
++*/
++#define UDC_EPCTL_ET_ISO 1
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPCTL_ET_BULK
++@brief Value for ET field in
++ Endpoint Control Register
++*/
++#define UDC_EPCTL_ET_BULK 2
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPCTL_ET_INTERRUPT
++@brief Value for ET field in
++ Endpoint Control Register
++*/
++#define UDC_EPCTL_ET_INTERRUPT 3
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPCTL_MRXFLUSH
++@brief Bit position for MRXFLUSH in
++ Endpoint Control Register
++*/
++#define UDC_EPCTL_MRXFLUSH 12
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPCTL_P
++@brief Bit position for P in
++ Endpoint Control Register
++*/
++#define UDC_EPCTL_P 3
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPCTL_SN
++@brief Bit position for SN in
++ Endpoint Control Register
++*/
++#define UDC_EPCTL_SN 2
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPCTL_F
++@brief Bit position for F in
++ Endpoint Control Register
++*/
++#define UDC_EPCTL_F 1
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPCTL_S
++@brief Bit position for S in
++ Endpoint Control Register
++*/
++#define UDC_EPCTL_S 0
++
++/* Endpoint Status Registers */
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPSTS_ADDR
++@brief Address offset for Endpoint Status Register
++*/
++#define UDC_EPSTS_ADDR 0x4
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPSTS_XFERDONE
++@brief Bit position for XFERDONE in
++ Endpoint Status Register
++*/
++#define UDC_EPSTS_XFERDONE 27
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPSTS_RSS
++@brief Bit position for RSS in
++ Endpoint Status Register
++*/
++#define UDC_EPSTS_RSS 26
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPSTS_RCS
++@brief Bit position for RCS in
++ Endpoint Status Register
++*/
++#define UDC_EPSTS_RCS 25
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPSTS_TXEMPTY
++@brief Bit position for TXEMPTY in
++ Endpoint Status Register
++*/
++#define UDC_EPSTS_TXEMPTY 24
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPSTS_ISOINDONE
++@brief Bit position for ISOINDONE in
++ Endpoint Status Register
++*/
++#define UDC_EPSTS_ISOINDONE 23
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPSTS_RX_PKT_SIZE_MASK
++@brief Mask for RX_PKT_SIZE field in
++ Endpoint Status Register
++*/
++#define UDC_EPSTS_RX_PKT_SIZE_MASK 0x007ff800
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPSTS_RX_PKT_SIZE_OFS
++@brief Starting offset for RX_PKT_SIZE field in
++ Endpoint Status Register
++*/
++#define UDC_EPSTS_RX_PKT_SIZE_OFS 11
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPSTS_TDC
++@brief Bit position for TDC field in
++ Endpoint Status Register
++*/
++#define UDC_EPSTS_TDC 10
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPSTS_HE
++@brief Bit position for HE field in
++ Endpoint Status Register
++*/
++#define UDC_EPSTS_HE 9
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPSTS_MRXFIFO_EMP
++@brief Bit position for MRXFIFO EMPTY field in
++ Endpoint Status Register
++*/
++#define UDC_EPSTS_MRXFIFO_EMP 8
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPSTS_BNA
++@brief Bit position for BNA field in
++ Endpoint Status Register
++*/
++#define UDC_EPSTS_BNA 7
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPSTS_IN
++@brief Bit position for IN field in
++ Endpoint Status Register
++*/
++#define UDC_EPSTS_IN 6
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPSTS_OUT_MASK
++@brief Mask for OUT field in
++ Endpoint Status Register
++*/
++#define UDC_EPSTS_OUT_MASK 0x00000030
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPSTS_OUT_OFS
++@brief Starting bit position for OUT field in
++ Endpoint Status Register
++*/
++#define UDC_EPSTS_OUT_OFS 4
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPSTS_OUT_DATA
++@brief Value for OUT field in
++ Endpoint Status Register
++*/
++#define UDC_EPSTS_OUT_DATA 1
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPSTS_OUT_DATA_CLEAR
++@brief Clear OUT field in
++ Endpoint Status Register
++*/
++#define UDC_EPSTS_OUT_DATA_CLEAR 0x10
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPSTS_OUT_DATA_CLEAR
++@brief Clear OUT field in
++ Endpoint Status Register
++*/
++#define UDC_EPSTS_OUT_SETUP 2
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPSTS_OUT_DATA_SETUP_CLEAR
++*/
++#define UDC_EPSTS_OUT_SETUP_CLEAR 0x20
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPSTS_OUT_CLEAR
++*/
++#define UDC_EPSTS_OUT_CLEAR 0x30
++
++/* Endpoint Buffer Size IN/ Receive Packet Frame Number OUT Registers */
++/*!@ingroup UDC_HALLayer
++@def UDC_EPIN_BUFF_SIZE_ADDR
++@brief Address offset for Endpoint Buffer Size IN register
++*/
++#define UDC_EPIN_BUFF_SIZE_ADDR 0x8
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPOUT_FRAME_NUMBER_ADDR
++@brief Address offset for Receive Packet Frame Number OUT Register
++*/
++#define UDC_EPOUT_FRAME_NUMBER_ADDR 0x8
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPIN_BUFF_SIZE_MASK
++@brief Mask for EPIN buffer size field in
++ Endpoint Buffer Size IN register
++*/
++#define UDC_EPIN_BUFF_SIZE_MASK 0x0000ffff
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPIN_BUFF_SIZE_OFS
++@brief Starting bit position for EPIN buffer size field in
++ Endpoint Buffer Size IN register
++*/
++#define UDC_EPIN_BUFF_SIZE_OFS 0
++/* EP0in txfifo = 256 bytes*/
++/*!@ingroup UDC_HALLayer
++@def UDC_EP0IN_BUFF_SIZE
++@brief EPIN0 Buffer Size
++*/
++#define UDC_EP0IN_BUFF_SIZE 64
++
++/* EP0in fullspeed txfifo = 128 bytes*/
++/*!@ingroup UDC_HALLayer
++@def UDC_FS_EPIN0_BUFF_SIZE
++@brief EPIN0 Buffer Size
++*/
++#define UDC_FS_EPIN0_BUFF_SIZE 32
++
++/* fifo size mult = fifo size / max packet */
++/*!@ingroup UDC_HALLayer
++@def UDC_EPIN0_BUFF_SIZE_MULT
++*/
++#define UDC_EPIN_BUFF_SIZE_MULT 2
++
++/* EPin data fifo size = 2048 bytes DOUBLE BUFFERING */
++/*!@ingroup UDC_HALLayer
++@def UDC_EPIN_BUFF_SIZE
++@brief Buffer size
++*/
++#define UDC_EPIN_BUFF_SIZE 512
++/* EPin small INT data fifo size = 128 bytes */
++/*!@ingroup UDC_HALLayer
++@def UDC_EPIN_SMALLINT_BUFF_SIZE
++@brief Buffer size
++*/
++#define UDC_EPIN_SMALLINT_BUFF_SIZE 32
++
++/* EPin fullspeed data fifo size = 128 bytes DOUBLE BUFFERING */
++/*!@ingroup UDC_HALLayer
++@def UDC_FS_EPIN_BUFF_SIZE
++@brief Buffer size
++*/
++#define UDC_FS_EPIN_BUFF_SIZE 32
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPOUT_FRAME_NUMBER_MASK
++@brief Mask for frame number
++*/
++#define UDC_EPOUT_FRAME_NUMBER_MASK 0x0000ffff
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPOUT_FRAME_NUMBER_OFS
++@brief Starting bit position for frame number
++*/
++#define UDC_EPOUT_FRAME_NUMBER_OFS 0
++
++/* Endpoint Buffer Size OUT/Max Packet Size Registers -----------------------*/
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPOUT_BUFF_SIZE_ADDR
++@brief Address offset for Endpoint Buffer Size OUT Register
++*/
++#define UDC_EPOUT_BUFF_SIZE_ADDR 0x0c
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EP_MAX_PKT_SIZE_ADDR
++@brief Address offset for Max Packet Size Register
++*/
++#define UDC_EP_MAX_PKT_SIZE_ADDR 0x0c
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPOUT_BUFF_SIZE_MASK
++@brief Mask for EPOUT buffer size field
++*/
++#define UDC_EPOUT_BUFF_SIZE_MASK 0xffff0000
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EPOUT_BUFF_SIZE_OFS
++@brief Starting bit position for EPOUT buffer size field
++*/
++#define UDC_EPOUT_BUFF_SIZE_OFS 16
++
++/* EP0out rxfifo = 256 bytes*/
++/*!@ingroup UDC_HALLayer
++@def UDC_EP0OUT_BUFF_SIZE
++@brief EP0 OUT Buffer Size
++*/
++#define UDC_EP0OUT_BUFF_SIZE 64
++
++/* EPout data fifo size = 2048 bytes DOUBLE BUFFERING */
++/*!@ingroup UDC_HALLayer
++@def UDC_EPOUT_BUFF_SIZE
++@brief Buffer size
++*/
++#define UDC_EPOUT_BUFF_SIZE 512
++
++/* EPout fullspeed data fifo size = 128 bytes DOUBLE BUFFERING */
++/*!@ingroup UDC_HALLayer
++@def UDC_FS_EPOUT_BUFF_SIZE
++@brief Buffer size
++*/
++#define UDC_FS_EPOUT_BUFF_SIZE 32
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EP_MAX_PKT_SIZE_MASK
++@brief Mask for EP maximum packet size
++*/
++#define UDC_EP_MAX_PKT_SIZE_MASK 0x0000ffff
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EP_MAX_PKT_SIZE_OFS
++@brief Starting bit position for EP maximum packet size
++*/
++#define UDC_EP_MAX_PKT_SIZE_OFS 0
++/* EP0in max packet size = 64 bytes */
++/*!@ingroup UDC_HALLayer
++@def UDC_EP0IN_MAX_PKT_SIZE
++@brief EP0 IN maximum packet size
++*/
++#define UDC_EP0IN_MAX_PKT_SIZE 64
++/* EP0out max packet size = 64 bytes */
++/*!@ingroup UDC_HALLayer
++@def UDC_EP0OUT_MAX_PKT_SIZE
++@brief EP0 OUT maximum packet size
++*/
++#define UDC_EP0OUT_MAX_PKT_SIZE 64
++
++/* Bulk max packet size = 512 bytes */
++/*!@ingroup UDC_HALLayer
++@def UDC_BULK_MAX_PKT_SIZE
++@brief BULK maximum packet size
++*/
++#define UDC_BULK_MAX_PKT_SIZE 512
++
++/* EP0in fullspeed max packet size = 64 bytes */
++/*!@ingroup UDC_HALLayer
++@def UDC_FS_EP0IN_MAX_PKT_SIZE
++@brief Full speed EP0 IN maximum packet size
++*/
++#define UDC_FS_EP0IN_MAX_PKT_SIZE 64
++/* EP0out fullspeed max packet size = 64 bytes */
++/*!@ingroup UDC_HALLayer
++@def UDC_FS_EP0OUT_MAX_PKT_SIZE
++@brief Full speed EP0 OUT maximum packet size
++*/
++#define UDC_FS_EP0OUT_MAX_PKT_SIZE 64
++
++/* Offset to next EP registers */
++/*!@ingroup UDC_HALLayer
++@def UDC_EP_REG_OFS
++@brief Offset to next EP registers
++*/
++#define UDC_EP_REG_OFS 0x20
++
++/* UDC_Global CSR's */
++/*!@ingroup UDC_HALLayer
++@def UDC_CSR_ADD
++@brief Offset of CSR register
++*/
++#define UDC_CSR_ADDR 0x500
++
++/* EP NE bits */
++/* EP number */
++/*!@ingroup UDC_HALLayer
++@def UDC_CSR_NE_NUM_MASK
++*/
++#define UDC_CSR_NE_NUM_MASK 0x0000000f
++
++/*!@ingroup UDC_HALLayer
++@def UDC_CSR_NE_NUM_OFS
++*/
++#define UDC_CSR_NE_NUM_OFS 0
++
++/* EP direction */
++/*!@ingroup UDC_HALLayer
++@def UDC_CSR_NE_DIR_MASK
++@brief EP direction mask
++*/
++#define UDC_CSR_NE_DIR_MASK 0x00000010
++
++/*!@ingroup UDC_HALLayer
++@def UDC_CSR_NE_DIR_OFS
++@brief Offset for EP direction bits
++*/
++#define UDC_CSR_NE_DIR_OFS 4
++
++/* EP type */
++/*!@ingroup UDC_HALLayer
++@def UDC_CSR_NE_TYPE_MASK
++@brief Mask for EP type
++*/
++#define UDC_CSR_NE_TYPE_MASK 0x00000060
++
++/*!@ingroup UDC_HALLayer
++@def UDC_CSR_NE_TYPE_OFS
++@brief Offset for EP type bits
++*/
++#define UDC_CSR_NE_TYPE_OFS 5
++
++/* EP config number */
++
++/*!@ingroup UDC_HALLayer
++@def UDC_CSR_NE_CFG_MASK
++@brief Mask for EP config number
++*/
++#define UDC_CSR_NE_CFG_MASK 0x00000780
++
++/*!@ingroup UDC_HALLayer
++@def UDC_CSR_NE_CFG_OFS
++@brief Offset for EP config number bits
++*/
++#define UDC_CSR_NE_CFG_OFS 7
++
++/* EP interface number */
++/*!@ingroup UDC_HALLayer
++@def UDC_CSR_NE_INTF_MASK
++@brief Mask for EP interface number bits
++*/
++#define UDC_CSR_NE_INTF_MASK 0x00007800
++
++/*!@ingroup UDC_HALLayer
++@def UDC_CSR_NE_INTF_OFS
++@brief Offset for EP interface number bits
++*/
++#define UDC_CSR_NE_INTF_OFS 11
++
++/* EP alt setting */
++/*!@ingroup UDC_HALLayer
++@def UDC_CSR_NE_ALT_MASK
++@brief Mask for EP alt setting
++*/
++#define UDC_CSR_NE_ALT_MASK 0x00078000
++
++/*!@ingroup UDC_HALLayer
++@def UDC_CSR_NE_ALT_OFS
++@brief Offset for EP alt bits
++*/
++#define UDC_CSR_NE_ALT_OFS 15
++
++/* max pkt */
++/*!@ingroup UDC_HALLayer
++@def UDC_CSR_NE_MAX_PKT_MASK
++@brief Mask for max packet bits
++*/
++#define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000
++
++/*!@ingroup UDC_HALLayer
++@def UDC_CSR_NE_MAX_PKT_OFS
++@brief Offset for max packet bits
++*/
++#define UDC_CSR_NE_MAX_PKT_OFS 19
++
++/*!@ingroup UDC_HALLayer
++@def IOH_UDC_EP_NUM
++@brief Total number of EPs
++*/
++#define IOH_UDC_EP_NUM 32 /* 16 IN and 16 OUT */
++/* EP number of EP's really used */
++
++/*!@ingroup UDC_HALLayer
++@def IOH_UDC_USED_EP_NUM
++@brief number of EPs used
++*/
++#define IOH_UDC_USED_EP_NUM 4
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EP0IN_IDX
++@brief Control IN ep index
++*/
++#define UDC_EP0IN_IDX 0
++
++/*!@ingroup UDC_HALLayer
++@def UDC_EP0UT_IDX
++@brief Control OUT ep index
++*/
++#define UDC_EP0OUT_IDX 1
++
++/*!@ingroup UDC_HALLayer
++@def IOH_UDC_EP0
++@brief number of EP0
++*/
++#define IOH_UDC_EP0 0
++
++/*!@ingroup UDC_HALLayer
++@def IOH_UDC_EP1
++@brief number of EP1
++*/
++#define IOH_UDC_EP1 1
++
++/*!@ingroup UDC_HALLayer
++@def IOH_UDC_EP2
++@brief number of EP2
++*/
++#define IOH_UDC_EP2 2
++
++/*!@ingroup UDC_HALLayer
++@def IOH_UDC_EP3
++@brief number of EP3
++*/
++#define IOH_UDC_EP3 3
++
++/* Rx fifo address and size = 2k -------------------------------------------*/
++/*!@ingroup UDC_HALLayer
++@def UDC_RXFIFO_ADDR
++@brief Address offset of Rx FIFO
++*/
++#define UDC_RXFIFO_ADDR 0x800
++
++/*!@ingroup UDC_HALLayer
++@def UDC_RXFIFO_SIZE
++@brief Rx FIFO size
++*/
++#define UDC_RXFIFO_SIZE 0x800
++
++/* Tx fifo address and size = 4k -----------------------------------------*/
++
++/*!@ingroup UDC_HALLayer
++@def UDC_TXFIFO_ADDR
++@brief Address offset of Tx FIFO
++*/
++#define UDC_TXFIFO_ADDR 0x1000
++
++/*!@ingroup UDC_HALLayer
++@def UDC_TXFIFO_SIZE
++@brief Tx FIFO size
++*/
++#define UDC_TXFIFO_SIZE 0x1000
++
++/* general constants */
++
++/*!@ingroup UDC_HALLayer
++@def UDC_DWORD_BYTES
++@brief Bytes in DWORD
++*/
++#define UDC_DWORD_BYTES 4
++
++/*!@ingroup UDC_HALLayer
++@def UDC_BITS_PER_BYTE
++@brief Bits in a byte
++*/
++#define UDC_BITS_PER_BYTE 8
++
++/*!@ingroup UDC_HALLayer
++@def UDC_BITS_PER_BYTE_SHIFT
++*/
++#define UDC_BITS_PER_BYTE_SHIFT 3
++
++/*!@ingroup UDC_HALLayer
++@def UDC_BYTE_MASK
++@brief Mask to get lower byte
++*/
++#define UDC_BYTE_MASK 0xff
++
++/* Endpoint configuration regsisters */
++/*!@ingroup UDC_HALLayer
++@struct ioh_udc_csrs_
++@brief Structure to Endpoint configuration registers
++*/
++struct ioh_udc_csrs {
++ u32 ne[IOH_UDC_USED_EP_NUM * 2];
++};
++
++/* UDC_Global registers */
++/*!@ingroup UDC_Global
++@struct ioh_udc_regs_
++@brief Structure holding values of configuration registers
++*/
++struct ioh_udc_regs {
++ u32 devcfg; /**< Device configuration register */
++ u32 devctl; /**< Device control register */
++ u32 devsts; /**< Device status register */
++ u32 devirqsts; /**< Device irq status register */
++ u32 devirqmsk; /**< Device irq mask register */
++ u32 epirqsts; /**< Endpoint irq status register */
++ u32 epirqmsk; /**< Endpoint irq mask register */
++ u32 devlpm; /**< LPM control/status register */
++};
++
++/*!@ingroup UDC_Global
++@struct ioh_udc_ep_regs__
++@brief Structure holding values of ep configuration registers
++*/
++struct ioh_udc_ep_regs {
++ u32 epctl; /**< Endpoint control register */
++ u32 epsts; /**< Endpoint status register */
++ u32 bufin_framenum; /**< buffer size in / frame number out */
++ u32 bufout_maxpkt; /**< buffer size out / maxpkt in */
++ u32 subptr; /**< setup buffer pointer */
++ u32 desptr; /**< Data descriptor pointer */
++ u32 confirm; /**< Write/Read confirmation for slave mode only */
++};
++
++#define DMA_ADDR_INVALID (~(dma_addr_t)0)
++
++/*!@ingroup UDC_Global
++@struct ioh_udc_data_dma_desc_
++@brief Structure to hold DMA descriptor information for data
++*/
++struct ioh_udc_data_dma_desc {
++ u32 status; /**< status quadlet */
++ u32 reserved;
++ u32 dataptr; /**< buffer descriptor */
++ u32 next; /**< next descriptor */
++};
++
++/*!@ingroup UDC_Global
++@struct struct ioh_udc_stp_dma_desc_
++@brief Structure to hold DMA descriptor information for control data
++*/
++struct ioh_udc_stp_dma_desc {
++ u32 status;
++ u32 reserved;
++ u32 data12; /**< first setup word */
++ u32 data34; /**< second setup word */
++};
++
++/*!@ingroup UDC_Global
++@struct ioh_udc_cfg_data
++@brief Structure to hold current configuration and interface information
++*/
++struct ioh_udc_cfg_data {
++ u16 cur_cfg; /**< current configuration in use */
++ u16 cur_intf; /**< current interface in use */
++ u16 cur_alt; /**< current alt interface in use */
++};
++
++/* DMA status definitions */
++#define IOH_UDC_BUFF_STS 0xC0000000 /**< Buffer status mask */
++#define IOH_UDC_BS_HST_RDY 0x00000000 /**< 2'b00 : Host Ready */
++#define IOH_UDC_BS_DMA_BSY 0x40000000 /**< 2'b01 : DMA Busy */
++#define IOH_UDC_BS_DMA_DONE 0x80000000 /**< 2'b10 : DMA Done */
++#define IOH_UDC_BS_HST_BSY 0xC0000000 /**< 2'b11 : HOST busy */
++
++#define IOH_UDC_RXTX_STS 0x30000000 /**< Rx/Tx Status Mask */
++#define IOH_UDC_RTS_SUCC 0x00000000 /**< Success */
++#define IOH_UDC_RTS_DESERR 0x10000000 /**< Descriptor Error */
++#define IOH_UDC_RTS_BUFERR 0x30000000 /**< Buffer Error */
++
++#define IOH_UDC_DMA_LAST 0x08000000 /**< Last Descriptor Indication */
++#define IOH_UDC_RXTX_BYTES 0x0000ffff /**< Number of Rx/Tx Bytes Mask */
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_print_regs(u32 base)
++@brief Prints UDC device registers and endpoint irq registers
++*/
++extern void ioh_udc_print_regs(u32 base);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_init(struct ioh_udc_regs *dev)
++@brief Initializes the UDC hardware
++*/
++extern void ioh_udc_init(struct ioh_udc_regs *dev);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_exit(struct ioh_udc_regs *dev)
++@brief This API will do any cleanup required for the USB device hardware.
++*/
++extern void ioh_udc_exit(struct ioh_udc_regs *dev);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_write_csr(unsigned long val, unsigned long addr)
++@brief Write to CSR register
++*/
++extern void ioh_udc_write_csr(unsigned long val, unsigned long addr);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_read_csr(unsigned long addr)
++@brief Read the content of CSR
++*/
++extern u32 ioh_udc_read_csr(unsigned long addr);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_ep_enable(struct ioh_udc_ep_regs __iomem *regs,
++ struct ioh_udc_cfg_data *cfg, struct usb_endpoint_descriptor *desc);
++@brief Enables endpoint
++*/
++extern void ioh_udc_ep_enable(struct ioh_udc_ep_regs __iomem *regs,
++ struct ioh_udc_cfg_data *cfg,
++ const struct usb_endpoint_descriptor *desc);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_ep_disable(struct ioh_udc_ep_regs __iomem *regs)
++@brief Disables endpoint
++*/
++extern void ioh_udc_ep_disable(struct ioh_udc_ep_regs __iomem *regs);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_get_frame(struct ioh_udc_regs __iomem *dev)
++@brief This API will return the current frame number
++*/
++extern int ioh_udc_get_frame(struct ioh_udc_regs __iomem *dev);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_set_dma(struct ioh_udc_regs *dev, int dir)
++@brief Enables Tx/Rx DMA
++*/
++extern void ioh_udc_set_dma(struct ioh_udc_regs *dev, int dir);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_clear_dma(struct ioh_udc_regs *dev, int dir)
++@brief Disable Tx/Rx DMA
++*/
++extern void ioh_udc_clear_dma(struct ioh_udc_regs *dev, int dir);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_rmt_wakeup(struct ioh_udc_regs __iomem *dev)
++@brief Initiates a remote wakeup
++*/
++extern void ioh_udc_rmt_wakeup(struct ioh_udc_regs __iomem *dev);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_clear_selfpowered (struct ioh_udc_regs __iomem *dev)
++@brief This API will clear the self powered feature of the device
++*/
++extern void ioh_udc_clear_selfpowered(struct ioh_udc_regs __iomem *dev);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_set_selfpowered (struct ioh_udc_regs __iomem *dev)
++@brief This API will set the self powered feature of the device
++*/
++extern void ioh_udc_set_selfpowered(struct ioh_udc_regs __iomem *dev);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_set_disconnect (struct ioh_udc_regs __iomem *dev)
++@brief This API will cause the device to enter soft disconnect state
++*/
++extern void ioh_udc_set_disconnect(struct ioh_udc_regs __iomem *dev);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_clear_disconnect (struct ioh_udc_regs __iomem *dev)
++@brief This API will get the device out of soft disconnect state.
++*/
++extern void ioh_udc_clear_disconnect(struct ioh_udc_regs __iomem *dev);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_vbus_session (struct ioh_udc_regs __iomem *dev, int is_active)
++@brief This API will be called when VBUS power is made active for the device.
++*/
++extern void ioh_udc_vbus_session(struct ioh_udc_regs __iomem *dev,
++ int is_active);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_ep_clear_stall(struct ioh_udc_ep_regs __iomem *ep)
++@brief This API will clear the STALL handshake feature of the specified endpoint
++*/
++extern void ioh_udc_ep_clear_stall(struct ioh_udc_ep_regs __iomem *ep);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_ep_set_stall(struct ioh_udc_ep_regs __iomem *ep)
++@brief This API will set the STALL handshake feature of the specified endpoint.
++*/
++extern void ioh_udc_ep_set_stall(struct ioh_udc_ep_regs __iomem *ep);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_ep_clear_nak(struct ioh_udc_ep_regs __iomem *ep)
++@brief This API will stop the endpoint from issuing NAK packets.
++*/
++extern void ioh_udc_ep_clear_nak(struct ioh_udc_ep_regs __iomem *ep);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_ep_set_nak(struct ioh_udc_ep_regs __iomem *ep)
++@brief This API will cause the endpoint to issue NAK packets.
++*/
++extern void ioh_udc_ep_set_nak(struct ioh_udc_ep_regs __iomem *ep);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_ep_fifo_flush(struct ioh_udc_ep_regs __iomem *ep, int dir)
++@brief This API will flush the FIFO of the specified endpoint.
++*/
++extern void ioh_udc_ep_fifo_flush(struct ioh_udc_ep_regs __iomem *ep, int dir);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_disable_interrupts(struct ioh_udc_regs *dev, u32 mask)
++@brief This API will disable the specified device interrupts
++*/
++extern void ioh_udc_disable_interrupts(struct ioh_udc_regs *dev, u32 mask);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_enable_interrupts(struct ioh_udc_regs *dev, u32 mask)
++@brief This API will enable specified device interrupts
++*/
++extern void ioh_udc_enable_interrupts(struct ioh_udc_regs *dev, u32 mask);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_disable_ep_interrupts(struct ioh_udc_regs *dev, u32 mask)
++@brief This API will disable the interrupts from specific endpoint.
++*/
++extern void ioh_udc_disable_ep_interrupts(struct ioh_udc_regs *dev, u32 mask);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_enable_ep_interrupts(struct ioh_udc_regs *dev, u32 mask)
++@brief This API will enable the interrupts from specific endpoint.
++*/
++extern void ioh_udc_enable_ep_interrupts(struct ioh_udc_regs *dev, u32 mask);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_set_csr_done(struct ioh_udc_regs *dev)
++@brief This API will inform the device the completion of
++ USB device programming.
++*/
++extern void ioh_udc_set_csr_done(struct ioh_udc_regs *dev);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_set_burst_length(struct ioh_udc_regs *dev, u8 len)
++@brief This API will set the length of 32bit words on a
++ single burst of DMA .
++*/
++extern void ioh_udc_set_burst_length(struct ioh_udc_regs *dev, u8 len);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_set_threshold_length(struct ioh_udc_regs *dev, u8 len)
++@brief This API will set the length of 32bit words after
++ which DMA can start
++*/
++extern void ioh_udc_set_threshold_length(struct ioh_udc_regs *dev, u8 len);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_get_speed (struct ioh_udc_regs __iomem *dev
++@brief This API will return the current speed
++*/
++extern int ioh_udc_get_speed(struct ioh_udc_regs __iomem *dev);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_ep_set_trfr_type(struct ioh_udc_ep_regs __iomem *ep, u8 type)
++@brief This API will set the endpoint type in the endpoint control register.
++*/
++extern void ioh_udc_ep_set_trfr_type(struct ioh_udc_ep_regs __iomem *ep,
++ u8 type);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_ep_set_maxpkt(struct ioh_udc_ep_regs __iomem *ep, u32 pkt_size)
++@brief This API will set the maximum packet size for the endpoint.
++*/
++extern void ioh_udc_ep_set_maxpkt(struct ioh_udc_ep_regs __iomem *ep,
++ u32 pkt_size);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_ep_set_bufsz(struct ioh_udc_ep_regs __iomem *ep,
++ u32 buf_size, u32 ep_in)
++@brief Sets buffer size for the endpoint
++*/
++extern void ioh_udc_ep_set_bufsz(struct ioh_udc_ep_regs __iomem *ep,
++ u32 buf_size, u32 ep_in);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_ep_set_ddptr(struct ioh_udc_ep_regs __iomem *ep,
++ u32 addr)
++@brief This API will set the data descriptor pointer for the endpoint.
++*/
++extern void ioh_udc_ep_set_ddptr(struct ioh_udc_ep_regs __iomem *ep, u32 addr);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_ep_set_subptr(struct ioh_udc_ep_regs __iomem *ep, u32 addr)
++@brief Set the Setup buffer pointer for the endpoint
++*/
++extern void ioh_udc_ep_set_subptr(struct ioh_udc_ep_regs __iomem *ep, u32 addr);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_read_device_interrupts(struct ioh_udc_regs __iomem *dev)
++@brief Returns the interrupt status
++*/
++extern u32 ioh_udc_read_device_interrupts(struct ioh_udc_regs __iomem *dev);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_write_device_interrupts(struct ioh_udc_regs __iomem *dev, u32 val);
++@brief Writes Interrupts
++*/
++extern void ioh_udc_write_device_interrupts(struct ioh_udc_regs __iomem *dev,
++ u32 val);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_read_ep_interrupts(struct ioh_udc_regs __iomem *ep)
++@brief Reads endpoint interrupts
++*/
++extern u32 ioh_udc_read_ep_interrupts(struct ioh_udc_regs __iomem *ep);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_write_ep_interrupts(struct ioh_udc_regs __iomem *ep, u32 val)
++@brief Writes endpoint interrupts
++*/
++extern void ioh_udc_write_ep_interrupts(struct ioh_udc_regs __iomem *ep,
++ u32 val);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_read_device_status(struct ioh_udc_regs __iomem *dev)
++@brief Reads device status register
++*/
++extern u32 ioh_udc_read_device_status(struct ioh_udc_regs __iomem *dev);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_read_ep_control(struct ioh_udc_ep_regs __iomem *dev)
++@brief Reads the endpoint status registers.
++*/
++extern u32 ioh_udc_read_ep_control(struct ioh_udc_ep_regs __iomem *dev);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_clear_ep_control(struct ioh_udc_ep_regs __iomem *dev)
++@brief Clear the endpoint status registers.
++*/
++extern void ioh_udc_clear_ep_control(struct ioh_udc_ep_regs __iomem *ep);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_read_ep_status(struct ioh_udc_ep_regs __iomem *dev)
++@brief Reads the endpoint status registers.
++*/
++extern u32 ioh_udc_read_ep_status(struct ioh_udc_ep_regs __iomem *dev);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_ep_set_pd(struct ioh_udc_ep_regs __iomem *ep)
++@brief Set the poll demand bit for the endpoint
++*/
++extern void ioh_udc_ep_set_pd(struct ioh_udc_ep_regs __iomem *ep);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_ep_set_rrdy(struct ioh_udc_ep_regs __iomem *ep)
++@brief Set the receive ready bit for the endpoint
++*/
++extern void ioh_udc_ep_set_rrdy(struct ioh_udc_ep_regs __iomem *ep);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_ep_clear_rrdy(struct ioh_udc_ep_regs __iomem *ep)
++@brief Clear the receive ready bit for the endpoint
++*/
++extern void ioh_udc_ep_clear_rrdy(struct ioh_udc_ep_regs __iomem *ep);
++
++/*!@ingroup UDC_HALLayerAPI
++@fn ioh_udc_clear_ep_status(struct ioh_udc_ep_regs __iomem *ep, u32 stat)
++@brief Clears endpoint status register
++*/
++extern void ioh_udc_clear_ep_status(struct ioh_udc_ep_regs __iomem *ep,
++ u32 stat);
++extern int speed_fs;
++#endif /* IOH_UDC_HAL_H_ */
+--- /dev/null
++++ b/drivers/usb/gadget/pch_udc_intr.c
+@@ -0,0 +1,396 @@
++ /*!
++ * @file ioh_udc_intr.c
++ * @brief
++ * The IOH UDC is a USB High speed DMA capable USB device controller.
++ * It provides 4 IN and 4 OUT endpoints (control, bulk isochronous or
++ * interrupt type).
++ *
++ * The IOH USB device controller driver provides required interface
++ * to the USB gadget framework for accessing the IOH USB device hardware.
++ *
++ * @version 0.96
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++ /*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 2/26/2010
++ * modified:
++ *
++ */
++
++#include <linux/types.h>
++#include <linux/irq.h>
++#include <linux/device.h>
++#include <linux/usb/ch9.h>
++#include <linux/usb/gadget.h>
++/*#include <asm/io.h>*/
++#include <linux/io.h>
++#include "pch_common.h"
++#include "pch_debug.h"
++
++#include "pch_udc_hal.h"
++#include "pch_udc.h"
++
++/* received setup data */
++static union ioh_udc_setup_data setup_data;
++
++/*! @ingroup UDC_UtilitiesAPI
++ * @fn void ioh_udc_svc_ur_interrupt(struct ioh_udc_dev *dev)
++ * @brief This function handles a USB reset interrupt
++ * @param dev [@ref IN] reference to driver structure
++ * @return none
++ * @remarks The following actions are performed:
++ *- Disable Rx/TX DMA using ioh_udc_clear_dma API
++ *- Mask all endpoint interrupts by invoking ioh_udc_disable_ep_interrupts API
++ *- Clear all endpoint interrupts by invoking ioh_udc_write_ep_interrupts API
++ *- Clear all endpoint status register invoking ioh_udc_clear_ep_status API
++ *- Clear all endpoint control register by invoking ioh_udc_clear_ep_control API
++ *- Clear all endpoint Data descriptor pointer register by invoking
++ * ioh_udc_ep_set_ddptr API
++ *- Clear all endpoint UDC Endpoint register by invoking ioh_udc_write_csr API
++ *- Clear status flags
++ *- Set the all endpoint's NAK by invoking ioh_udc_ep_set_nak API
++ *- Flush the all endpoint's Tx/RxFIFO by invoking ioh_udc_ep_fifo_flush API
++ *- Disable ep0 to empty request queue by invoking empty_req_queue API
++ *- Invoke the gadget driver's fs disconnect method
++- Empty EP0 IN request queue using empty_req_queue API
++ * @see
++ * - ioh_udc_clear_dma
++ * - ioh_udc_disable_ep_interrupts
++ * - ioh_udc_write_ep_interrupts
++ * - ioh_udc_clear_ep_status
++ * - ioh_udc_clear_ep_control
++ * - ioh_udc_ep_set_ddptr
++ * - ioh_udc_write_csr
++ * - ioh_udc_ep_set_nak
++ * - ioh_udc_ep_fifo_flush
++ * - empty_req_queue
++ */
++void ioh_udc_svc_ur_interrupt(struct ioh_udc_dev *dev)
++{
++ struct ioh_udc_ep *ep;
++ int i;
++
++ IOH_DEBUG("USB_RESET Enter");
++
++#ifdef IOH_PRINT_REG
++ ioh_udc_print_regs((u32)dev->virt_addr);
++#endif
++
++ /* Disable DMA */
++ ioh_udc_clear_dma(dev->regs, DMA_DIR_TX);
++ ioh_udc_clear_dma(dev->regs, DMA_DIR_RX);
++ /* Mask all endpoint interrupts */
++ ioh_udc_disable_ep_interrupts(dev->regs, UDC_EPINT_MSK_DISABLE_ALL);
++ /* clear all endpoint interrupts */
++ ioh_udc_write_ep_interrupts(dev->regs, UDC_EPINT_MSK_DISABLE_ALL);
++
++ for (i = 0; i < IOH_UDC_EP_NUM; i++) {
++ ep = &dev->ep[i];
++ ioh_udc_clear_ep_status(ep->regs, 0x1F0006F0);
++ ioh_udc_clear_ep_control(ep->regs);
++ ioh_udc_ep_set_ddptr(ep->regs, 0);
++ ioh_udc_write_csr(0x00, (u32) (&dev->csr->ne[i]));
++ }
++
++ dev->stall = 0;
++ dev->prot_stall = 0;
++ dev->waiting_zlp_ack = 0;
++ dev->set_cfg_not_acked = 0;
++
++ /* disable ep to empty req queue. Skip the control EP's */
++ for (i = 0; i < (IOH_UDC_USED_EP_NUM*2); i++) {
++ ep = &dev->ep[i];
++ /* Set NAK */
++ ioh_udc_ep_set_nak(ep->regs);
++ /* Flush fifo */
++ ioh_udc_ep_fifo_flush(ep->regs , ep->in);
++ /* Complete request queue */
++ empty_req_queue(ep);
++ }
++ if (dev->driver && dev->driver->disconnect)
++ dev->driver->disconnect(&dev->gadget);
++
++ IOH_DEBUG("USB_RESET Exit");
++}
++
++/*! @ingroup UDC_UtilitiesAPI
++ * @fn ioh_udc_svc_enum_interrupt(struct ioh_udc_dev *dev)
++ * @brief This function handles a USB speed enumeration done interrupt
++ * @param dev [@ref IN] reference to driver structure
++ * @return none
++ * @remarks The following actions are performed:
++ * - Read the device status using ioh_udc_read_device_status API
++ * - Set the speed element in device structure.
++ * - Activate control endpoint using ioh_udc_activate_control_ep API
++ * - Enable EP0 interrupts using ioh_udc_enable_ep_interrupts API
++ * - Enable Rx/TX DMA using ioh_udc_set_dma API
++ * - Set receive ready using ioh_udc_ep_set_rrdy API
++ * @see
++ * - ioh_udc_read_device_status
++ * - ioh_udc_activate_control_ep
++ * - ioh_udc_enable_ep_interrupts
++ * - ioh_udc_set_dma
++ * - ioh_udc_ep_set_rrdy
++ */
++void
++ioh_udc_svc_enum_interrupt(struct ioh_udc_dev *dev)
++{
++ u32 dev_stat, dev_speed;
++ u32 speed = USB_SPEED_FULL;
++
++ dev_stat = ioh_udc_read_device_status(dev->regs);
++ dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
++ UDC_DEVSTS_ENUM_SPEED_OFS;
++
++ IOH_DEBUG("ioh_udc_svc_enum_interrupt: dev_speed = 0x%08x", dev_speed);
++
++ if (dev_speed == UDC_DEVSTS_ENUM_SPEED_HIGH) {
++ IOH_DEBUG("HighSpeed");
++ speed = USB_SPEED_HIGH;
++ } else if (dev_speed == UDC_DEVSTS_ENUM_SPEED_FULL) {
++ IOH_DEBUG("FullSpeed");
++ speed = USB_SPEED_FULL;
++ } else if (dev_speed == UDC_DEVSTS_ENUM_SPEED_LOW) {
++ IOH_DEBUG("LowSpeed?");
++ speed = USB_SPEED_LOW;
++ } else {
++ IOH_DEBUG("FullSpeed?");
++ }
++ dev->gadget.speed = speed;
++
++ ioh_udc_activate_control_ep(dev);
++
++ /* enable ep0 interrupts */
++ ioh_udc_enable_ep_interrupts(dev->regs, 1 << UDC_EPINT_IN_EP0 |
++ 1 << UDC_EPINT_OUT_EP0);
++
++ /* enable DMA */
++ ioh_udc_set_dma(dev->regs, DMA_DIR_TX);
++ ioh_udc_set_dma(dev->regs, DMA_DIR_RX);
++ ioh_udc_ep_set_rrdy(dev->ep[UDC_EP0OUT_IDX].regs);
++
++#ifdef IOH_PRINT_REG
++ ioh_udc_print_regs((u32)dev->virt_addr);
++#endif
++
++ IOH_DEBUG("EP mask set to %x",
++ IOH_READ32((u32 *)&(dev->regs->epirqmsk)));
++ IOH_DEBUG("USB_SPEED_ENUM Exit");
++}
++
++/*! @ingroup UDC_UtilitiesAPI
++ * @fn ioh_udc_svc_intf_interrupt(struct ioh_udc_dev *dev)
++ * @brief This function handles a set interface interrupt
++ * @param dev [@ref IN] reference to driver structure
++ * @return none
++ * @remarks The following actions are performed:
++ * - Read the device status using ioh_udc_read_device_status API
++ * - Update current interface in use and current alternate interface
++ * in use status from the device status information
++ * - Set device "Set Config Not ACKed" status as TRUE
++ * - Construct the usb request for gadget driver and inform it
++ * - Program the Endpoint configuration registers using ioh_udc_read_csr
++ * and ioh_udc_write_csr APIs
++ * - Clear stall bit status using ioh_udc_ep_clear_stall
++ * - Call gadget with setup data received
++ * @see
++ * - ioh_udc_read_device_status
++ * - ioh_udc_read_csr
++ * - ioh_udc_write_csr
++ * - ioh_udc_ep_clear_stall
++ */
++void
++ioh_udc_svc_intf_interrupt(struct ioh_udc_dev *dev)
++{
++ u32 reg, dev_stat = 0;
++ int i, ret;
++
++ IOH_DEBUG("SI");
++ dev_stat = ioh_udc_read_device_status(dev->regs);
++ dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
++ UDC_DEVSTS_INTF_OFS;
++ dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
++ UDC_DEVSTS_ALT_OFS;
++ IOH_DEBUG("DVSTATUS=%08x, cfg=%d, intf=%d, alt=%d", dev_stat,
++ (dev_stat & UDC_CSR_NE_CFG_MASK) >> UDC_CSR_NE_CFG_OFS,
++ dev->cfg_data.cur_intf, dev->cfg_data.cur_alt);
++
++ dev->set_cfg_not_acked = 1;
++
++ /* Construct the usb request for gadget driver and inform it */
++ memset(&setup_data, 0 , sizeof setup_data);
++ setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
++ setup_data.request.bRequestType = USB_RECIP_INTERFACE;
++ setup_data.request.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
++ setup_data.request.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
++
++ /* programm the Endpoint Cfg registers */
++ for (i = 0; i < IOH_UDC_USED_EP_NUM * 2; i++) {
++ if (i == 1) { /* Only one end point cfg register */
++ reg = ioh_udc_read_csr((u32) (&dev->csr->ne[i]));
++ reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
++ (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_OFS);
++ reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
++ (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_OFS);
++ ioh_udc_write_csr(reg, (u32) (&dev->csr->ne[i]));
++ }
++ /* clear stall bits */
++ ioh_udc_ep_clear_stall(dev->ep[i].regs);
++ dev->ep[i].halted = 0;
++ }
++ dev->stall = 0;
++ spin_unlock(&dev->lock);
++ ret = dev->driver->setup(&dev->gadget, &setup_data.request);
++ spin_lock(&dev->lock);
++
++ IOH_DEBUG("EXIT -- SI");
++}
++
++/*! @ingroup UDC_UtilitiesAPI
++ * @fn ioh_udc_svc_cfg_interrupt(struct ioh_udc_dev *dev)
++ * @brief This function handles a set configuration interrupt
++ * @param dev [@ref IN] reference to driver structure
++ * @return none
++ * @remarks The following actions are performed:
++ * - Read the device status using ioh_udc_read_device_status API
++ * - Set the set_cfg_not_acked element in device structure to 1.
++ * - Update "current configuration in use" status based on the
++ * device status
++ * - Construct a usb request for gadget driver
++ * - Program the Endpoint configuration registers using
++ * ioh_udc_read_csr and ioh_udc_write_csr APIs
++ * - Clear stall bit status using ioh_udc_ep_clear_stall
++ * - Call gadget with setup data received
++ * @see
++ * - ioh_udc_read_device_status
++ * - ioh_udc_read_csr
++ * - ioh_udc_write_csr
++ * - ioh_udc_ep_clear_stall
++ */
++void
++ioh_udc_svc_cfg_interrupt(struct ioh_udc_dev *dev)
++{
++ int i, ret;
++ u32 reg, dev_stat = 0;
++
++ IOH_DEBUG("SC");
++ dev_stat = ioh_udc_read_device_status(dev->regs);
++ IOH_DEBUG("DVSTATUS=%08x, cfg=%d, intf=%d, alt=%d", dev_stat,
++ (dev_stat & UDC_DEVSTS_CFG_MASK) >> UDC_DEVSTS_CFG_OFS,
++ (dev_stat & UDC_DEVSTS_INTF_MASK) >> UDC_DEVSTS_INTF_OFS,
++ (dev_stat & UDC_DEVSTS_ALT_MASK) >> UDC_DEVSTS_ALT_OFS);
++
++ dev->set_cfg_not_acked = 1;
++
++ dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
++ UDC_DEVSTS_CFG_OFS;
++ /* make usb request for gadget driver */
++ memset(&setup_data, 0 , sizeof setup_data);
++ setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
++ setup_data.request.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
++
++ /* program the NE registers */
++ for (i = 0; i < IOH_UDC_USED_EP_NUM * 2; i++) {
++ if (i == 1) {
++ reg = ioh_udc_read_csr((u32) (&dev->csr->ne[i]));
++ reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
++ (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_OFS);
++ ioh_udc_write_csr(reg, (u32) (&dev->csr->ne[i]));
++ }
++ /* clear stall bits */
++ ioh_udc_ep_clear_stall(dev->ep[i].regs);
++ dev->ep[i].halted = 0;
++ }
++ dev->stall = 0;
++
++ /* call gadget zero with setup data received */
++ spin_unlock(&dev->lock);
++ ret = dev->driver->setup(&dev->gadget, &setup_data.request);
++ spin_lock(&dev->lock);
++
++ IOH_DEBUG("SC Exit...ret %d", ret);
++}
++
++/*! @ingroup UDC_UtilitiesAPI
++ * @fn void ioh_udc_dev_isr(struct ioh_udc_dev *dev, u32 dev_intr)
++ * @brief This function services device interrupts by invoking appropriate
++ * routines.
++ * @remarks The following actions are performed:
++ *- If USB reset interrupt status is received, invoke ioh_udc_svc_ur_interrupt
++ * function
++ *- If Enumeration done interrupt is received, invoke ioh_udc_svc_enum_interrupt
++ * function
++ *- If Set Interface interrupt is received, invoke ioh_udc_svc_intf_interrupt
++ * function
++ *- If Set Config interrupt is received, invoke ioh_udc_svc_cfg_interrupt
++ * function
++ *- If USB suspend interrupt or ES interrupt is received, invoke the
++ * API ioh_udc_rmt_wakeup
++ *- For the following interrupts, log a message in the system log:
++ * - USB Suspend interrupt
++ * - SOF token detection interrupt
++ * - ES interrupt, (IDLE > 3ms on the USB)
++ * - RWKP interrupt (Remote Wakeup)
++ * @param dev Reference to the device structure
++ * @param dev_intr The Device interrupt status.
++ * @see
++ * - ioh_udc_svc_ur_interrupt
++ * - ioh_udc_svc_enum_interrupt
++ * - ioh_udc_svc_intf_interrupt
++ * - ioh_udc_svc_cfg_interrupt
++ * @return none
++ */
++void ioh_udc_dev_isr(struct ioh_udc_dev *dev, u32 dev_intr)
++{
++ /* USB Reset Interrupt */
++ if (dev_intr & (1 << UDC_DEVINT_UR))
++ ioh_udc_svc_ur_interrupt(dev);
++
++ /* Enumeration Done Interrupt */
++ if (dev_intr & (1 << UDC_DEVINT_ENUM))
++ ioh_udc_svc_enum_interrupt(dev);
++
++ /* Set Interface Interrupt */
++ if (dev_intr & (1 << UDC_DEVINT_SI))
++ ioh_udc_svc_intf_interrupt(dev);
++
++ /* Set Config Interrupt */
++ if (dev_intr & (1 << UDC_DEVINT_SC))
++ ioh_udc_svc_cfg_interrupt(dev);
++
++ /* USB Suspend interrupt */
++ if (dev_intr & (1 << UDC_DEVINT_US))
++ IOH_DEBUG("USB_SUSPEND");
++
++ /* Clear the SOF interrupt, if enabled */
++ if (dev_intr & (1 << UDC_DEVINT_SOF))
++ IOH_DEBUG("SOF");
++
++ /* ES interrupt, IDLE > 3ms on the USB */
++ if (dev_intr & (1 << UDC_DEVINT_ES))
++ IOH_DEBUG("ES");
++
++ /* RWKP interrupt */
++ if (dev_intr & (1 << UDC_DEVINT_RWKP))
++ IOH_DEBUG("RWKP");
++
++}
+--- /dev/null
++++ b/drivers/usb/gadget/pch_udc_pci.c
+@@ -0,0 +1,549 @@
++/*!
++ * @file ioh_udc_pci.c
++ * @brief
++ * The IOH UDC is a USB High speed DMA capable USB device controller.
++ * It provides 4 IN and 4 OUT endpoints (control, bulk isochronous or
++ * interrupt type).
++ *
++ * The IOH USB device controller driver provides required interface
++ * to the USB gadget framework for accessing the IOH USB device hardware.
++ *
++ * @version 0.96
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++ /*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 2/26/2010
++ * modified:
++ *
++ */
++
++/*
++ * ioh_udc_pci.c -- IOH UDC high/full speed USB device controller
++ *
++ */
++
++
++/*!@ingroup UDC_PCILayer
++ * @def UDC_MOD_DESCRIPTION
++ * @brief USB device controller driver description.
++ */
++#define UDC_MOD_DESCRIPTION "OKISEMI IOH UDC - USB Device Controller"
++
++/*!@ingroup UDC_PCILayer
++ * @def IOH_UDC_PCI_BAR
++ * @brief Number of PCI BAR.
++ */
++#define IOH_UDC_PCI_BAR 1
++
++/* udc specific */
++#include "pch_common.h"
++#include "pch_debug.h"
++#include "pch_udc_pci.h"
++#include "pch_udc.h"
++
++static int ioh_udc_probe(struct pci_dev *pdev, const struct pci_device_id *id);
++static void ioh_udc_remove(struct pci_dev *pdev);
++static void ioh_udc_shutdown(struct pci_dev *pdev);
++static int ioh_udc_suspend(struct pci_dev *pdev, pm_message_t state);
++static int ioh_udc_resume(struct pci_dev *pdev);
++
++/* description */
++static const char mod_desc[] = UDC_MOD_DESCRIPTION;
++static const char name[] = "ioh_udc";
++
++/* pointer to device object */
++/*!@ingroup UDC_Global
++ * @brief pointer to device object
++ */
++struct ioh_udc_dev *ioh_udc;
++
++/* Speed selection flag */
++/*!@ingroup UDC_Global
++ * @brief Specifies operation speed (High or FULL) - passed as module parameter
++ */
++int speed_fs;
++
++/* module parameters */
++module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
++MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
++
++/* Tears down device */
++/*!@ingroup UDC_PCILayerAPI
++ * @fn static void gadget_release(struct device *pdev)
++ * @brief Free the gadget driver private data
++ * @remarks The main tasks performed by this method are:
++ * - Retrieve the pointer to the private data using dev_get_drvdata API.
++ * - Free the memory allocated for the device structure using kfree API.
++ * @param pdev [@ref IN] reference to struct pci_dev
++ * @return none
++ */
++static void gadget_release(struct device *pdev)
++{
++ struct ioh_udc_dev *dev = dev_get_drvdata(pdev);
++ kfree(dev);
++}
++
++/*!@ingroup UDC_PCILayerAPI
++ * @fn static void ioh_udc_remove(struct pci_dev *pdev)
++ * @brief Implements the remove routine for IOH USB device controller driver
++ * @param pdev [@ref IN] reference to struct pci_dev
++ * @remarks The main tasks performed by this method are:
++ * - Deallocate any PCI memory pool created
++ * - Release IRQ
++ * - Unmap device memory
++ * - Disable the PCI device
++ * - Unregister the device from system
++ * @return none
++ */
++static void ioh_udc_remove(struct pci_dev *pdev)
++{
++ struct ioh_udc_dev *dev = pci_get_drvdata(pdev);
++
++ IOH_DEBUG("ioh_udc_remove enter");
++ /* gadget driver must not be registered */
++ if (dev->driver != NULL)
++ IOH_LOG(KERN_ERR, "udc_pci_remove: gadget driver\
++ still bound!!!");
++
++ /* dma pool cleanup */
++ if (dev->data_requests != NULL)
++ pci_pool_destroy(dev->data_requests);
++
++
++ if (dev->stp_requests != NULL) {
++ /* cleanup DMA desc's for ep0in */
++ if (dev->ep[UDC_EP0OUT_IDX].td_stp != NULL) {
++ pci_pool_free(dev->stp_requests,
++ dev->ep[UDC_EP0OUT_IDX].td_stp,
++ dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
++ }
++ if (dev->ep[UDC_EP0OUT_IDX].td_data != NULL) {
++ pci_pool_free(dev->stp_requests,
++ dev->ep[UDC_EP0OUT_IDX].td_data,
++ dev->ep[UDC_EP0OUT_IDX].td_data_phys);
++ }
++ pci_pool_destroy(dev->stp_requests);
++ }
++
++ ioh_udc_exit(dev->regs);
++
++ if (dev->irq_registered)
++ free_irq(pdev->irq, dev);
++
++ if (dev->virt_addr != NULL)
++ iounmap(dev->virt_addr);
++
++ if (dev->mem_region)
++ release_mem_region(dev->phys_addr, pci_resource_len(pdev,
++ IOH_UDC_PCI_BAR));
++
++ if (dev->active)
++ pci_disable_device(pdev);
++
++ if (dev->registered)
++ device_unregister(&dev->gadget.dev);
++ else
++ kfree(dev);
++
++ pci_set_drvdata(pdev, NULL);
++}
++
++/*!@ingroup UDC_UtilitiesAPI
++ * @fn static int init_dma_pools(struct ioh_udc_dev *dev)
++ * @brief create dma pools during initialization
++ * @param pdev [@ref IN] reference to struct pci_dev
++ * @remarks The following actions are performed:
++ *- Create a PCI memory pool of DMA descriptors for handling data requests
++ * using pci_pool_create API
++ *- If failed to create the pool, return -ENOMEM
++ *- Create a PCI memory pool of DMA descriptors for handling setup requests
++ * using pci_pool_create API
++ *- If failed to create the pool, return -ENOMEM
++ *- Allocate one entry from setup pool to be used for setup requests using
++ * pci_pool_alloc API
++ *- If failed to allocate, return -ENOMEM
++ *- Allocate one entry from setup pool to be used for control IN requests
++ * using pci_pool_alloc API
++ *- If failed to allocate, return -ENOMEM
++ * @return none
++ */
++static int init_dma_pools(struct ioh_udc_dev *dev)
++{
++ struct ioh_udc_stp_dma_desc *td_stp;
++ struct ioh_udc_data_dma_desc *td_data;
++
++ /* DMA setup */
++ dev->data_requests = pci_pool_create("data_requests", dev->pdev,
++ sizeof(struct ioh_udc_data_dma_desc), 0, 0);
++ if (dev->data_requests == NULL) {
++ IOH_LOG(KERN_ERR, "init_dma_pools: can't get request\
++ data pool");
++ return -ENOMEM;
++ }
++
++ /* dma desc for setup data */
++ dev->stp_requests = pci_pool_create("setup requests", dev->pdev,
++ sizeof(struct ioh_udc_stp_dma_desc), 0, 0);
++ if (dev->stp_requests == NULL) {
++ IOH_LOG(KERN_ERR, "init_dma_pools: can't get setup\
++ request pool");
++ return -ENOMEM;
++ }
++ /* setup */
++ td_stp = pci_pool_alloc(dev->stp_requests, GFP_KERNEL,
++ &dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
++ if (td_stp == NULL) {
++ IOH_LOG(KERN_ERR, "init_dma_pools: can't allocate setup\
++ dma descriptor");
++ return -ENOMEM;
++ }
++ dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
++
++ /* data: 0 packets !? */
++ td_data = pci_pool_alloc(dev->data_requests, GFP_KERNEL,
++ &dev->ep[UDC_EP0OUT_IDX].td_data_phys);
++ if (td_data == NULL) {
++ IOH_LOG(KERN_ERR, "init_dma_pools: can't allocate data dma\
++ descriptor");
++ return -ENOMEM;
++ }
++ dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
++ dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
++ dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
++ dev->ep[UDC_EP0IN_IDX].td_data = NULL;
++ dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
++ return 0;
++}
++
++
++
++/* Called by pci bus driver to init pci context */
++/*!@ingroup UDC_PCILayerAPI
++ * @fn static int ioh_udc_probe(struct pci_dev *pdev,
++ * const struct pci_device_id *id)
++ * @brief Implements the Probe routine for IOH USB device controller driver
++ * @param pdev [@ref IN] reference to struct pci_dev
++ * @param id [@ref IN] reference to struct pci_device_id table
++ * @return int [ 0 on Success and linux error number on failure ]
++ * @remarks The following actions are performed:
++ *- Allocate and initialize the device driver data structures
++ *- Enable the PCI device and set driver private data
++ *- Do PCI resource allocation for the device
++ *- Request memory region for the device
++ *- Map PCI device memory to kernel virtual space
++ *- Initialize the HAL layer by invoking ioh_udc_pcd_init API
++ *- Register the interrupt handler ioh_udc_isr
++ *- Enable Bus mastering for the device using pci_set_master API.
++ *- Enable memory write invalidate PCI transaction using pci_try_set_mwi API.
++ *- Set up device structure and ops structure and initialize the gadget driver
++ * data structure
++ *- If using DMA (specified during module loading), initialize DMA pools using
++ * init_dma_pools API
++ *- Register the device with the system using device_register API
++ *- Put the device in disconnected state till a driver is bound, using
++ * ioh_udc_set_disconnect API
++ *- Invoke ioh_udc_remove to perform clean-up on any error.
++ */
++static int ioh_udc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
++{
++ unsigned long resource;
++ unsigned long len;
++ int retval = 0;
++ struct ioh_udc_dev *dev;
++
++ IOH_DEBUG("ioh_udc_probe: enter");
++ /* one udc only */
++ if (ioh_udc != NULL) {
++ IOH_LOG(KERN_ERR, "ioh_udc_probe: already probed");
++ return -EBUSY;
++ }
++
++ /* init */
++ dev = kzalloc(sizeof(struct ioh_udc_dev), GFP_KERNEL);
++ if (dev == NULL) {
++ IOH_LOG(KERN_ERR, "ioh_udc_probe: no memory for device\
++ structure");
++ return -ENOMEM;
++ }
++ memset(dev, 0, sizeof(struct ioh_udc_dev));
++ /* pci setup */
++ if (pci_enable_device(pdev) < 0) {
++ kfree(dev);
++ IOH_LOG(KERN_ERR, "ioh_udc_probe: pci_enable_device failed");
++ return -ENODEV;
++ }
++ dev->active = 1;
++ pci_set_drvdata(pdev, dev);
++
++ /* PCI resource allocation */
++ resource = pci_resource_start(pdev, 1);
++ len = pci_resource_len(pdev, 1);
++ IOH_DEBUG("ioh_udc_probe: resource %lx, len %ld", resource, len);
++
++ if (request_mem_region(resource, len, name) == NULL) {
++ IOH_LOG(KERN_ERR, "ioh_udc_probe: pci device used already");
++ retval = -EBUSY;
++ goto finished;
++ }
++ dev->phys_addr = resource;
++ dev->mem_region = 1;
++
++ dev->virt_addr = ioremap_nocache(resource, len);
++ if (dev->virt_addr == NULL) {
++ IOH_LOG(KERN_ERR, "ioh_udc_probe: device memory cannot be\
++ mapped");
++ retval = -ENOMEM;
++ goto finished;
++ }
++ IOH_DEBUG("ioh_udc_probe: device memory mapped at %x",
++ (int)dev->virt_addr);
++
++ if (pdev->irq == 0) {
++ IOH_LOG(KERN_ERR, "ioh_udc_probe: irq not set");
++ retval = -ENODEV;
++ goto finished;
++ }
++
++ ioh_udc = dev;
++
++ /* initialize the hardware */
++ if (ioh_udc_pcd_init(dev) != 0)
++ goto finished;
++
++
++ if (request_irq(pdev->irq, ioh_udc_isr, IRQF_SHARED, name, dev) != 0) {
++ IOH_LOG(KERN_ERR, "ioh_udc_probe: request_irq(%d) fail",
++ pdev->irq);
++ retval = -ENODEV;
++ goto finished;
++ }
++ dev->irq = pdev->irq;
++ dev->irq_registered = 1;
++
++ pci_set_master(pdev);
++ pci_try_set_mwi(pdev);
++
++ /* device struct setup */
++ spin_lock_init(&dev->lock);
++ dev->pdev = pdev;
++ dev->gadget.ops = &ioh_udc_ops;
++
++ /* init dma pools */
++ retval = init_dma_pools(dev);
++ if (retval != 0)
++ goto finished;
++
++/* strcpy(dev->gadget.dev.bus_id, "gadget");*/
++ dev_set_name(&dev->gadget.dev, "gadget");
++ dev->gadget.dev.parent = &pdev->dev;
++ dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
++ dev->gadget.dev.release = gadget_release;
++ dev->gadget.name = name;
++ dev->gadget.is_dualspeed = 1;
++
++ retval = device_register(&dev->gadget.dev);
++ if (retval != 0)
++ goto finished;
++
++ dev->registered = 1;
++
++ /* Put the device in disconnected state till a driver is bound */
++ ioh_udc_set_disconnect(dev->regs);
++
++#ifdef IOH_PRINT_REG
++ /* print dev register info */
++ ioh_udc_print_regs((u32)dev->virt_addr);
++#endif
++ return 0;
++
++finished:
++ ioh_udc_remove(pdev);
++ return retval;
++}
++
++/*!@ingroup UDC_PCILayerAPI
++ * @fn static void ioh_udc_shutdown(struct pci_dev *pdev)
++ * @brief This routine makes sure that the device is quiescent.
++ * @param pdev [@ref IN] reference to struct pci_dev
++ * @return none
++ * @remarks The main tasks performed by this method are:
++ * - Disable interrupts from the device
++ * - Put the PCI device in soft disconnect mode
++ */
++static void ioh_udc_shutdown(struct pci_dev *pdev)
++{
++ struct ioh_udc_dev *dev = pci_get_drvdata(pdev);
++
++ IOH_DEBUG("ioh_udc_shutdown enter");
++
++ ioh_udc_disable_interrupts(dev->regs, UDC_DEVINT_MSK);
++ ioh_udc_disable_ep_interrupts(dev->regs, UDC_EPINT_MSK_DISABLE_ALL);
++
++ /* disable the pullup so the host will think we're gone */
++ ioh_udc_set_disconnect(dev->regs);
++}
++
++/*!@ingroup UDC_PCILayerAPI
++ * @fn static int ioh_udc_suspend(struct pci_dev *pdev, pm_message_t state)
++ * @brief Implements the Suspend functionality for IOH USB device controller
++ * driver
++ * @param pdev [@ref IN] reference to struct pci_dev
++ * @param state [@ref IN] specifies new PM state to which to transition to
++ * @return int [ 0 on Success and linux error number on failure ]
++ * @remarks The main tasks performed by this method are:
++ * - Disable interrupts from the device
++ * - Disable the PCI device
++ * - Save the PCI state
++ * - Transition to new power state
++ */
++static int ioh_udc_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ struct ioh_udc_dev *dev = pci_get_drvdata(pdev);
++
++ IOH_DEBUG("ioh_udc_suspend enter");
++
++ ioh_udc_disable_interrupts(dev->regs, UDC_DEVINT_MSK);
++ ioh_udc_disable_ep_interrupts(dev->regs, UDC_EPINT_MSK_DISABLE_ALL);
++
++ pci_disable_device(pdev);
++ pci_enable_wake(pdev, PCI_D3hot, 0);
++
++ if (pci_save_state(pdev) != 0) {
++ IOH_LOG(KERN_ERR, "ioh_udc_suspend: could not save PCI config\
++ state");
++ return -ENOMEM;
++ }
++
++ if (pci_set_power_state(pdev, pci_choose_state(pdev, state)) == -EIO)
++ IOH_DEBUG("ioh_udc_suspend: does not support PM cpabilities");
++
++
++ return 0;
++}
++
++/*!@ingroup UDC_PCILayerAPI
++ * @fn static int ioh_udc_resume(struct pci_dev *pdev)
++ * @brief Implements the Resume functionality for IOH USB device
++ * controller driver
++ * @param pdev [@ref IN] reference to struct pci_dev
++ * @return int [ 0 on Success and linux error number on failure ]
++ * @remarks The main tasks performed by this method are:
++ * - Set power state to PCI_D0 using pci_set_power_state
++ * - Restore the device state using pci_restore_state
++ * - Enable the device using pci_enable_device
++ * - Enable the bus mastering using pci_set_master
++ * - Disable the PM notifications using pci_enable_wake
++ */
++static int ioh_udc_resume(struct pci_dev *pdev)
++{
++ int ret;
++
++ ret = pci_set_power_state(pdev, PCI_D0);
++ if (ret != 0)
++ IOH_DEBUG("ioh_udc_resume: does not support PM cpabilities");
++
++
++ ret = pci_restore_state(pdev);
++ if (ret != 0) {
++ IOH_LOG(KERN_ERR, "ioh_udc_resume: pci_restore_state failed");
++ return ret;
++ }
++
++ ret = pci_enable_device(pdev);
++
++ if (ret != 0) {
++ IOH_LOG(KERN_ERR, "ioh_udc_resume: pci_enable_device failed");
++ return ret;
++ }
++ pci_enable_wake(pdev, PCI_D3hot, 0);
++
++ return 0;
++}
++
++/* PCI device parameters */
++/*!@ingroup UDC_InterfaceLayer
++ * @struct ioh_udc_pcidev_id
++ * @brief This is an instance of pci_device_id structure which holds
++ * information
++ * about the PCI USB device that are supported by this
++ * driver.
++ */
++static const struct pci_device_id ioh_udc_pcidev_id[] = {
++ {
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOH1_UDC),
++ .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
++ .class_mask = 0xffffffff,
++ },
++ {
++ PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ROHM_IOH2_UDC),
++ .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
++ .class_mask = 0xffffffff,
++ },
++ { 0 },
++};
++
++MODULE_DEVICE_TABLE(pci, ioh_udc_pcidev_id);
++
++/* PCI functions */
++/*!@ingroup UDC_InterfaceLayer
++ * @struct ioh_udc_driver
++ * @brief Linux pci_driver structure which provides the reference to
++ * PCI methods of this driver
++ */
++static struct pci_driver ioh_udc_driver = {
++ .name = (char *) name,
++ .id_table = ioh_udc_pcidev_id,
++ .probe = ioh_udc_probe,
++ .remove = ioh_udc_remove,
++ .suspend = ioh_udc_suspend,
++ .resume = ioh_udc_resume,
++ .shutdown = ioh_udc_shutdown,
++};
++
++/* Initialize the driver */
++/*!@ingroup UDC_InterfaceLayerAPI
++ * @fn static int __init ioh_udc_pci_init(void)
++ * @brief This function is the entry point for the driver
++ * @param none
++ * @return int [ 0 on success and <0 on failure ]
++ */
++static int __init ioh_udc_pci_init(void)
++{
++ return pci_register_driver(&ioh_udc_driver);
++}
++module_init(ioh_udc_pci_init);
++
++/* Cleans driver */
++/*!@ingroup UDC_InterfaceLayerAPI
++ * @fn static void __exit ioh_udc_pci_exit(void)
++ * @brief This function is the exit point for the driver
++ * @param none
++ * @return none
++ */
++static void __exit ioh_udc_pci_exit(void)
++{
++ pci_unregister_driver(&ioh_udc_driver);
++}
++module_exit(ioh_udc_pci_exit);
++
++MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/usb/gadget/pch_udc_pci.h
+@@ -0,0 +1,97 @@
++ /*!
++ * @file ioh_udc_pci.h
++ * @brief
++ * The IOH UDC is a USB High speed DMA capable USB device controller.
++ * It provides 4 IN and 4 OUT endpoints (control, bulk isochronous or interrupt
++ * type).
++ *
++ * The IOH USB device controller driver provides required interface
++ * to the USB gadget framework for accessing the IOH USB device hardware.
++ *
++ * @version 0.96
++ *
++ * @section
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ */
++
++ /*
++ * History:
++ * Copyright (C) 2009 OKI SEMICONDUCTOR Co., LTD.
++ * All rights reserved.
++ *
++ * created:
++ * OKI SEMICONDUCTOR 2/26/2010
++ * modified:
++ *
++ */
++
++#ifndef IOH_UDC_PCI_H
++#define IOH_UDC_PCI_H
++
++#include <linux/types.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/ioport.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/smp_lock.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/timer.h>
++#include <linux/list.h>
++#include <linux/interrupt.h>
++#include <linux/ioctl.h>
++#include <linux/fs.h>
++#include <linux/dmapool.h>
++#include <linux/moduleparam.h>
++#include <linux/device.h>
++#include <linux/io.h>
++#include <linux/irq.h>
++
++#include <asm/byteorder.h>
++#include <asm/system.h>
++#include <asm/unaligned.h>
++
++/* gadget stack */
++#include <linux/usb/ch9.h>
++#include <linux/usb/gadget.h>
++#include "pch_udc_hal.h"
++
++/*!@ingroup UDC_InterfaceLayer
++ * @def PCI_VENDOR_ID_INTEL
++ * @brief PCI Vendor ID for Intel.
++ */
++#define PCI_VENDOR_ID_INTEL 0x8086
++/*!@ingroup UDC_InterfaceLayer
++ * @def PCI_VENDOR_ID_ROHM
++ * @brief PCI Vendor ID for ROHM.
++ */
++#define PCI_VENDOR_ID_ROHM 0x10db
++
++/*!@ingroup UDC_InterfaceLayer
++ * @def PCI_DEVICE_ID_INTEL_IOH1_UDC
++ * @brief Outlines the PCI Device ID for Intel IOH GE UDC device.
++ */
++#define PCI_DEVICE_ID_INTEL_IOH1_UDC 0x8808 /*Device Id for GE device*/
++
++/*!@ingroup UDC_InterfaceLayer
++ * @def PCI_DEVICE_ID_INTEL_IOH1_UDC
++ * @brief Outlines the PCI Device ID for ROHM IOH IVI UDC device.
++ */
++#define PCI_DEVICE_ID_ROHM_IOH2_UDC 0x801D /* Device ID for IVI*/
++
++extern u32 ioh_udc_base;
++#endif /* #ifdef IOH_UDC_PCI_H */
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-rt2860-no-debug.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-rt2860-no-debug.patch
new file mode 100644
index 0000000..e1bc13c
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-rt2860-no-debug.patch
@@ -0,0 +1,36 @@
+diff --git a/drivers/staging/rt2860/rt_linux.c b/drivers/staging/rt2860/rt_linux.c
+index 9357fb2..7e0b6c0 100644
+--- a/drivers/staging/rt2860/rt_linux.c
++++ b/drivers/staging/rt2860/rt_linux.c
+@@ -28,7 +28,7 @@
+ #include <linux/sched.h>
+ #include "rt_config.h"
+
+-unsigned long RTDebugLevel = RT_DEBUG_ERROR;
++unsigned long RTDebugLevel = RT_DEBUG_OFF;
+
+ /* for wireless system event message */
+ char const *pWirelessSysEventText[IW_SYS_EVENT_TYPE_NUM] = {
+diff --git a/drivers/staging/rt2860/rt_linux.h b/drivers/staging/rt2860/rt_linux.h
+index f85508d..92ce551 100644
+--- a/drivers/staging/rt2860/rt_linux.h
++++ b/drivers/staging/rt2860/rt_linux.h
+@@ -425,16 +425,9 @@ do{ \
+
+ #define DBGPRINT(Level, Fmt) DBGPRINT_RAW(Level, Fmt)
+
+-#define DBGPRINT_ERR(Fmt) \
+-{ \
+- printk("ERROR! "); \
+- printk Fmt; \
+-}
++#define DBGPRINT_ERR(Fmt)
+
+-#define DBGPRINT_S(Status, Fmt) \
+-{ \
+- printk Fmt; \
+-}
++#define DBGPRINT_S(Status, Fmt)
+
+ #else
+ #define DBGPRINT(Level, Fmt)
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-stantum-and-mosart-multitouch-drivers.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-stantum-and-mosart-multitouch-drivers.patch
new file mode 100644
index 0000000..6f8aaca
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.34-stantum-and-mosart-multitouch-drivers.patch
@@ -0,0 +1,714 @@
+From f7e13f4d9a7a9025244b37a3ad188af7dae841d9 Mon Sep 17 00:00:00 2001
+From: Stephane Chatty <chatty@enac.fr>
+Date: Fri, 9 Apr 2010 15:33:54 -0700
+Subject: [PATCH 105/105] Stantum and Mosart multitouch drivers
+
+HID Driver and configs for Stantum and Mosart multitouch panels.
+
+Patch-mainline: 2.6.34
+
+Signed-off-by: Stephane Chatty <chatty@enac.fr>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Priya Vijayan <priya.vijayan@intel.com>
+---
+ drivers/hid/Kconfig | 12 ++
+ drivers/hid/Makefile | 3 +-
+ drivers/hid/hid-core.c | 7 +-
+ drivers/hid/hid-ids.h | 15 ++-
+ drivers/hid/hid-mosart.c | 274 +++++++++++++++++++++++++++++++++++++++++++
+ drivers/hid/hid-stantum.c | 285 +++++++++++++++++++++++++++++++++++++++++++++
+ 6 files changed, 590 insertions(+), 6 deletions(-)
+ create mode 100644 drivers/hid/hid-mosart.c
+ create mode 100644 drivers/hid/hid-stantum.c
+
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index 37fb241..55906bc 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -203,6 +203,12 @@ config HID_MONTEREY
+ ---help---
+ Support for Monterey Genius KB29E.
+
++config HID_MOSART
++ tristate "MosArt"
++ depends on USB_HID
++ ---help---
++ Support for MosArt dual-touch panels.
++
+ config HID_NTRIG
+ tristate "NTrig" if EMBEDDED
+ depends on USB_HID
+@@ -247,6 +253,12 @@ config HID_SONY
+ ---help---
+ Support for Sony PS3 controller.
+
++config HID_STANTUM
++ tristate "Stantum"
++ depends on USB_HID
++ ---help---
++ Support for Stantum multitouch panel.
++
+ config HID_SUNPLUS
+ tristate "Sunplus" if EMBEDDED
+ depends on USB_HID
+diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
+index b05f921..bbda0b0 100644
+--- a/drivers/hid/Makefile
++++ b/drivers/hid/Makefile
+@@ -34,12 +34,14 @@ obj-$(CONFIG_HID_KYE) += hid-kye.o
+ obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
+ obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o
+ obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o
++obj-$(CONFIG_HID_MOSART) += hid-mosart.o
+ obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o
+ obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
+ obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
+ obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
+ obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
+ obj-$(CONFIG_HID_SONY) += hid-sony.o
++obj-$(CONFIG_HID_STANTUM) += hid-stantum.o
+ obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o
+ obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o
+ obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
+@@ -51,4 +53,3 @@ obj-$(CONFIG_HID_WACOM) += hid-wacom.o
+ obj-$(CONFIG_USB_HID) += usbhid/
+ obj-$(CONFIG_USB_MOUSE) += usbhid/
+ obj-$(CONFIG_USB_KBD) += usbhid/
+-
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index b126102..fbf6f3e 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1342,6 +1342,9 @@ static const struct hid_device_id hid_blacklist[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, USB_DEVICE_ID_MTP) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM2, USB_DEVICE_ID_MTP2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_STMICRO, USB_DEVICE_ID_STMICRO_MTP1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
+@@ -1544,8 +1546,9 @@ static const struct hid_device_id hid_ignore_list[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
+- { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM)},
+- { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM2)},
++ { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)},
++ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
++ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
+ { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 6865ca2..92c8a78 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -96,9 +96,12 @@
+ #define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241
+ #define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242
+
+-#define USB_VENDOR_ID_ASUS 0x0b05
+-#define USB_DEVICE_ID_ASUS_LCM 0x1726
+-#define USB_DEVICE_ID_ASUS_LCM2 0x175b
++#define USB_VENDOR_ID_ASUS 0x0486
++#define USB_DEVICE_ID_ASUS_T91MT 0x0185
++
++#define USB_VENDOR_ID_ASUSTEK 0x0b05
++#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
++#define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b
+
+ #define USB_VENDOR_ID_ATEN 0x0557
+ #define USB_DEVICE_ID_ATEN_UC100KM 0x2004
+@@ -399,6 +402,15 @@
+ #define USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST 0x0034
+ #define USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST 0x0046
+
++#define USB_VENDOR_ID_STANTUM 0x1f87
++#define USB_DEVICE_ID_MTP 0x0002
++
++#define USB_VENDOR_ID_STANTUM2 0x1f87
++#define USB_DEVICE_ID_MTP2 0x0001
++
++#define USB_VENDOR_ID_STMICRO 0x0483
++#define USB_DEVICE_ID_STMICRO_MTP1 0x3261
++
+ #define USB_VENDOR_ID_SUN 0x0430
+ #define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab
+
+diff --git a/drivers/hid/hid-mosart.c b/drivers/hid/hid-mosart.c
+new file mode 100644
+index 0000000..e91437c
+--- /dev/null
++++ b/drivers/hid/hid-mosart.c
+@@ -0,0 +1,274 @@
++/*
++ * HID driver for the multitouch panel on the ASUS EeePC T91MT
++ *
++ * Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr>
++ * Copyright (c) 2010 Teemu Tuominen <teemu.tuominen@cybercom.com>
++ *
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
++ */
++
++#include <linux/device.h>
++#include <linux/hid.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/usb.h>
++#include "usbhid/usbhid.h"
++
++MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
++MODULE_DESCRIPTION("MosArt dual-touch panel");
++MODULE_LICENSE("GPL");
++
++#include "hid-ids.h"
++
++struct mosart_data {
++ __u16 x, y;
++ __u8 id;
++ bool valid; /* valid finger data, or just placeholder? */
++ bool first; /* is this the first finger in this frame? */
++ bool activity_now; /* at least one active finger in this frame? */
++ bool activity; /* at least one active finger previously? */
++};
++
++static int mosart_input_mapping(struct hid_device *hdev, struct hid_input *hi,
++ struct hid_field *field, struct hid_usage *usage,
++ unsigned long **bit, int *max)
++{
++ switch (usage->hid & HID_USAGE_PAGE) {
++
++ case HID_UP_GENDESK:
++ switch (usage->hid) {
++ case HID_GD_X:
++ hid_map_usage(hi, usage, bit, max,
++ EV_ABS, ABS_MT_POSITION_X);
++ /* touchscreen emulation */
++ input_set_abs_params(hi->input, ABS_X,
++ field->logical_minimum,
++ field->logical_maximum, 0, 0);
++ return 1;
++ case HID_GD_Y:
++ hid_map_usage(hi, usage, bit, max,
++ EV_ABS, ABS_MT_POSITION_Y);
++ /* touchscreen emulation */
++ input_set_abs_params(hi->input, ABS_Y,
++ field->logical_minimum,
++ field->logical_maximum, 0, 0);
++ return 1;
++ }
++ return 0;
++
++ case HID_UP_DIGITIZER:
++ switch (usage->hid) {
++ case HID_DG_CONFIDENCE:
++ case HID_DG_TIPSWITCH:
++ case HID_DG_INPUTMODE:
++ case HID_DG_DEVICEINDEX:
++ case HID_DG_CONTACTCOUNT:
++ case HID_DG_CONTACTMAX:
++ case HID_DG_TIPPRESSURE:
++ case HID_DG_WIDTH:
++ case HID_DG_HEIGHT:
++ return -1;
++ case HID_DG_INRANGE:
++ /* touchscreen emulation */
++ hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
++ return 1;
++
++ case HID_DG_CONTACTID:
++ hid_map_usage(hi, usage, bit, max,
++ EV_ABS, ABS_MT_TRACKING_ID);
++ return 1;
++
++ }
++ return 0;
++
++ case 0xff000000:
++ /* ignore HID features */
++ return -1;
++ }
++
++ return 0;
++}
++
++static int mosart_input_mapped(struct hid_device *hdev, struct hid_input *hi,
++ struct hid_field *field, struct hid_usage *usage,
++ unsigned long **bit, int *max)
++{
++ if (usage->type == EV_KEY || usage->type == EV_ABS)
++ clear_bit(usage->code, *bit);
++
++ return 0;
++}
++
++/*
++ * this function is called when a whole finger has been parsed,
++ * so that it can decide what to send to the input layer.
++ */
++static void mosart_filter_event(struct mosart_data *td, struct input_dev *input)
++{
++ td->first = !td->first; /* touchscreen emulation */
++
++ if (!td->valid) {
++ /*
++ * touchscreen emulation: if no finger in this frame is valid
++ * and there previously was finger activity, this is a release
++ */
++ if (!td->first && !td->activity_now && td->activity) {
++ input_event(input, EV_KEY, BTN_TOUCH, 0);
++ td->activity = false;
++ }
++ return;
++ }
++
++ input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
++ input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
++ input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
++
++ input_mt_sync(input);
++ td->valid = false;
++
++ /* touchscreen emulation: if first active finger in this frame... */
++ if (!td->activity_now) {
++ /* if there was no previous activity, emit touch event */
++ if (!td->activity) {
++ input_event(input, EV_KEY, BTN_TOUCH, 1);
++ td->activity = true;
++ }
++ td->activity_now = true;
++ /* and in any case this is our preferred finger */
++ input_event(input, EV_ABS, ABS_X, td->x);
++ input_event(input, EV_ABS, ABS_Y, td->y);
++ }
++}
++
++
++static int mosart_event(struct hid_device *hid, struct hid_field *field,
++ struct hid_usage *usage, __s32 value)
++{
++ struct mosart_data *td = hid_get_drvdata(hid);
++
++ if (hid->claimed & HID_CLAIMED_INPUT) {
++ struct input_dev *input = field->hidinput->input;
++ switch (usage->hid) {
++ case HID_DG_INRANGE:
++ td->valid = !!value;
++ break;
++ case HID_GD_X:
++ td->x = value;
++ break;
++ case HID_GD_Y:
++ td->y = value;
++ mosart_filter_event(td, input);
++ break;
++ case HID_DG_CONTACTID:
++ td->id = value;
++ break;
++ case HID_DG_CONTACTCOUNT:
++ /* touch emulation: this is the last field in a frame */
++ td->first = false;
++ td->activity_now = false;
++ break;
++ case HID_DG_CONFIDENCE:
++ case HID_DG_TIPSWITCH:
++ /* avoid interference from generic hidinput handling */
++ break;
++
++ default:
++ /* fallback to the generic hidinput handling */
++ return 0;
++ }
++ }
++
++ /* we have handled the hidinput part, now remains hiddev */
++ if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
++ hid->hiddev_hid_event(hid, field, usage, value);
++
++ return 1;
++}
++
++static int mosart_probe(struct hid_device *hdev, const struct hid_device_id *id)
++{
++ int ret;
++ struct mosart_data *td;
++
++
++ td = kmalloc(sizeof(struct mosart_data), GFP_KERNEL);
++ if (!td) {
++ dev_err(&hdev->dev, "cannot allocate MosArt data\n");
++ return -ENOMEM;
++ }
++ td->valid = false;
++ td->activity = false;
++ td->activity_now = false;
++ td->first = false;
++ hid_set_drvdata(hdev, td);
++
++ /* currently, it's better to have one evdev device only */
++#if 0
++ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
++#endif
++
++ ret = hid_parse(hdev);
++ if (ret == 0)
++ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
++
++ if (ret == 0) {
++ struct hid_report_enum *re = hdev->report_enum
++ + HID_FEATURE_REPORT;
++ struct hid_report *r = re->report_id_hash[7];
++
++ r->field[0]->value[0] = 0x02;
++ usbhid_submit_report(hdev, r, USB_DIR_OUT);
++ } else
++ kfree(td);
++
++ return ret;
++}
++
++static void mosart_remove(struct hid_device *hdev)
++{
++ hid_hw_stop(hdev);
++ kfree(hid_get_drvdata(hdev));
++ hid_set_drvdata(hdev, NULL);
++}
++
++static const struct hid_device_id mosart_devices[] = {
++ { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
++ { }
++};
++MODULE_DEVICE_TABLE(hid, mosart_devices);
++
++static const struct hid_usage_id mosart_grabbed_usages[] = {
++ { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
++ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
++};
++
++static struct hid_driver mosart_driver = {
++ .name = "mosart",
++ .id_table = mosart_devices,
++ .probe = mosart_probe,
++ .remove = mosart_remove,
++ .input_mapping = mosart_input_mapping,
++ .input_mapped = mosart_input_mapped,
++ .usage_table = mosart_grabbed_usages,
++ .event = mosart_event,
++};
++
++static int __init mosart_init(void)
++{
++ return hid_register_driver(&mosart_driver);
++}
++
++static void __exit mosart_exit(void)
++{
++ hid_unregister_driver(&mosart_driver);
++}
++
++module_init(mosart_init);
++module_exit(mosart_exit);
++
+diff --git a/drivers/hid/hid-stantum.c b/drivers/hid/hid-stantum.c
+new file mode 100644
+index 0000000..bb4430f
+--- /dev/null
++++ b/drivers/hid/hid-stantum.c
+@@ -0,0 +1,286 @@
++/*
++ * HID driver for Stantum multitouch panels
++ *
++ * Copyright (c) 2009 Stephane Chatty <chatty@enac.fr>
++ *
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
++ */
++
++#include <linux/device.h>
++#include <linux/hid.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++
++MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
++MODULE_DESCRIPTION("Stantum HID multitouch panels");
++MODULE_LICENSE("GPL");
++
++#include "hid-ids.h"
++
++struct stantum_data {
++ __s32 x, y, z, w, h; /* x, y, pressure, width, height */
++ __u16 id; /* touch id */
++ bool valid; /* valid finger data, or just placeholder? */
++ bool first; /* first finger in the HID packet? */
++ bool activity; /* at least one active finger so far? */
++};
++
++static int stantum_input_mapping(struct hid_device *hdev, struct hid_input *hi,
++ struct hid_field *field, struct hid_usage *usage,
++ unsigned long **bit, int *max)
++{
++ switch (usage->hid & HID_USAGE_PAGE) {
++
++ case HID_UP_GENDESK:
++ switch (usage->hid) {
++ case HID_GD_X:
++ hid_map_usage(hi, usage, bit, max,
++ EV_ABS, ABS_MT_POSITION_X);
++ /* touchscreen emulation */
++ input_set_abs_params(hi->input, ABS_X,
++ field->logical_minimum,
++ field->logical_maximum, 0, 0);
++ return 1;
++ case HID_GD_Y:
++ hid_map_usage(hi, usage, bit, max,
++ EV_ABS, ABS_MT_POSITION_Y);
++ /* touchscreen emulation */
++ input_set_abs_params(hi->input, ABS_Y,
++ field->logical_minimum,
++ field->logical_maximum, 0, 0);
++ return 1;
++ }
++ return 0;
++
++ case HID_UP_DIGITIZER:
++ switch (usage->hid) {
++ case HID_DG_INRANGE:
++ case HID_DG_CONFIDENCE:
++ case HID_DG_INPUTMODE:
++ case HID_DG_DEVICEINDEX:
++ case HID_DG_CONTACTCOUNT:
++ case HID_DG_CONTACTMAX:
++ return -1;
++
++ case HID_DG_TIPSWITCH:
++ /* touchscreen emulation */
++ hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
++ return 1;
++
++ case HID_DG_WIDTH:
++ hid_map_usage(hi, usage, bit, max,
++ EV_ABS, ABS_MT_TOUCH_MAJOR);
++ return 1;
++ case HID_DG_HEIGHT:
++ hid_map_usage(hi, usage, bit, max,
++ EV_ABS, ABS_MT_TOUCH_MINOR);
++ input_set_abs_params(hi->input, ABS_MT_ORIENTATION,
++ 1, 1, 0, 0);
++ return 1;
++ case HID_DG_TIPPRESSURE:
++ hid_map_usage(hi, usage, bit, max,
++ EV_ABS, ABS_MT_PRESSURE);
++ return 1;
++
++ case HID_DG_CONTACTID:
++ hid_map_usage(hi, usage, bit, max,
++ EV_ABS, ABS_MT_TRACKING_ID);
++ return 1;
++
++ }
++ return 0;
++
++ case 0xff000000:
++ /* no input-oriented meaning */
++ return -1;
++ }
++
++ return 0;
++}
++
++static int stantum_input_mapped(struct hid_device *hdev, struct hid_input *hi,
++ struct hid_field *field, struct hid_usage *usage,
++ unsigned long **bit, int *max)
++{
++ if (usage->type == EV_KEY || usage->type == EV_ABS)
++ clear_bit(usage->code, *bit);
++
++ return 0;
++}
++
++/*
++ * this function is called when a whole finger has been parsed,
++ * so that it can decide what to send to the input layer.
++ */
++static void stantum_filter_event(struct stantum_data *sd,
++ struct input_dev *input)
++{
++ bool wide;
++
++ if (!sd->valid) {
++ /*
++ * touchscreen emulation: if the first finger is not valid and
++ * there previously was finger activity, this is a release
++ */
++ if (sd->first && sd->activity) {
++ input_event(input, EV_KEY, BTN_TOUCH, 0);
++ sd->activity = false;
++ }
++ return;
++ }
++
++ input_event(input, EV_ABS, ABS_MT_TRACKING_ID, sd->id);
++ input_event(input, EV_ABS, ABS_MT_POSITION_X, sd->x);
++ input_event(input, EV_ABS, ABS_MT_POSITION_Y, sd->y);
++
++ wide = (sd->w > sd->h);
++ input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide);
++ input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, wide ? sd->w : sd->h);
++ input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, wide ? sd->h : sd->w);
++
++ input_event(input, EV_ABS, ABS_MT_PRESSURE, sd->z);
++
++ input_mt_sync(input);
++ sd->valid = false;
++
++ /* touchscreen emulation */
++ if (sd->first) {
++ if (!sd->activity) {
++ input_event(input, EV_KEY, BTN_TOUCH, 1);
++ sd->activity = true;
++ }
++ input_event(input, EV_ABS, ABS_X, sd->x);
++ input_event(input, EV_ABS, ABS_Y, sd->y);
++ }
++ sd->first = false;
++}
++
++
++static int stantum_event(struct hid_device *hid, struct hid_field *field,
++ struct hid_usage *usage, __s32 value)
++{
++ struct stantum_data *sd = hid_get_drvdata(hid);
++
++ if (hid->claimed & HID_CLAIMED_INPUT) {
++ struct input_dev *input = field->hidinput->input;
++
++ switch (usage->hid) {
++ case HID_DG_INRANGE:
++ /* this is the last field in a finger */
++ stantum_filter_event(sd, input);
++ break;
++ case HID_DG_WIDTH:
++ sd->w = value;
++ break;
++ case HID_DG_HEIGHT:
++ sd->h = value;
++ break;
++ case HID_GD_X:
++ sd->x = value;
++ break;
++ case HID_GD_Y:
++ sd->y = value;
++ break;
++ case HID_DG_TIPPRESSURE:
++ sd->z = value;
++ break;
++ case HID_DG_CONTACTID:
++ sd->id = value;
++ break;
++ case HID_DG_CONFIDENCE:
++ sd->valid = !!value;
++ break;
++ case 0xff000002:
++ /* this comes only before the first finger */
++ sd->first = true;
++ break;
++
++ default:
++ /* ignore the others */
++ return 1;
++ }
++ }
++
++ /* we have handled the hidinput part, now remains hiddev */
++ if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
++ hid->hiddev_hid_event(hid, field, usage, value);
++
++ return 1;
++}
++
++static int stantum_probe(struct hid_device *hdev,
++ const struct hid_device_id *id)
++{
++ int ret;
++ struct stantum_data *sd;
++
++ sd = kmalloc(sizeof(struct stantum_data), GFP_KERNEL);
++ if (!sd) {
++ dev_err(&hdev->dev, "cannot allocate Stantum data\n");
++ return -ENOMEM;
++ }
++ sd->valid = false;
++ sd->first = false;
++ sd->activity = false;
++ hid_set_drvdata(hdev, sd);
++
++ ret = hid_parse(hdev);
++ if (!ret)
++ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
++
++ if (ret)
++ kfree(sd);
++
++ return ret;
++}
++
++static void stantum_remove(struct hid_device *hdev)
++{
++ hid_hw_stop(hdev);
++ kfree(hid_get_drvdata(hdev));
++ hid_set_drvdata(hdev, NULL);
++}
++
++static const struct hid_device_id stantum_devices[] = {
++ { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, USB_DEVICE_ID_MTP) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM2, USB_DEVICE_ID_MTP2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_STMICRO, USB_DEVICE_ID_STMICRO_MTP1) },
++ { }
++};
++MODULE_DEVICE_TABLE(hid, stantum_devices);
++
++static const struct hid_usage_id stantum_grabbed_usages[] = {
++ { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
++ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
++};
++
++static struct hid_driver stantum_driver = {
++ .name = "stantum",
++ .id_table = stantum_devices,
++ .probe = stantum_probe,
++ .remove = stantum_remove,
++ .input_mapping = stantum_input_mapping,
++ .input_mapped = stantum_input_mapped,
++ .usage_table = stantum_grabbed_usages,
++ .event = stantum_event,
++};
++
++static int __init stantum_init(void)
++{
++ return hid_register_driver(&stantum_driver);
++}
++
++static void __exit stantum_exit(void)
++{
++ hid_unregister_driver(&stantum_driver);
++}
++
++module_init(stantum_init);
++module_exit(stantum_exit);
++
+--
+1.6.2.2
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-DSS2-Add-ACX565AKM-Panel-Driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-DSS2-Add-ACX565AKM-Panel-Driver.patch
new file mode 100644
index 0000000..0b3b501
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-DSS2-Add-ACX565AKM-Panel-Driver.patch
@@ -0,0 +1,813 @@
+From 635f44cfde6c057a2ecbb8c9d9a67225e53b6545 Mon Sep 17 00:00:00 2001
+From: Roger Quadros <roger.quadros@nokia.com>
+Date: Wed, 10 Mar 2010 17:32:44 +0200
+Subject: [PATCH 3/10] OMAP: DSS2: Add ACX565AKM Panel Driver
+
+From: Roger Quadros <roger.quadros@nokia.com>
+
+Patch-mainline: 2.6.35?
+Git-repo: http://www.gitorious.org/linux-omap-dss2/linux/commit/4f2308f3be2fe631412ea85a80c91414c3bfe730
+
+This is the panel used on Nokia N900
+
+Signed-off-by: Roger Quadros <roger.quadros@nokia.com>
+---
+ drivers/video/omap2/displays/Kconfig | 6 +
+ drivers/video/omap2/displays/Makefile | 1 +
+ drivers/video/omap2/displays/panel-acx565akm.c | 760 ++++++++++++++++++++++++
+ 3 files changed, 767 insertions(+), 0 deletions(-)
+ create mode 100644 drivers/video/omap2/displays/panel-acx565akm.c
+
+diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
+index b12a59c..1f5b7d1 100644
+--- a/drivers/video/omap2/displays/Kconfig
++++ b/drivers/video/omap2/displays/Kconfig
+@@ -19,4 +19,10 @@ config PANEL_TAAL
+ help
+ Taal DSI command mode panel from TPO.
+
++config PANEL_ACX565AKM
++ tristate "ACX565AKM Panel"
++ depends on OMAP2_DSS_SDI
++ select BACKLIGHT_CLASS_DEVICE
++ help
++ This is the LCD panel used on Nokia N900
+ endmenu
+diff --git a/drivers/video/omap2/displays/Makefile b/drivers/video/omap2/displays/Makefile
+index 9556464..0af16b7 100644
+--- a/drivers/video/omap2/displays/Makefile
++++ b/drivers/video/omap2/displays/Makefile
+@@ -2,3 +2,4 @@ obj-$(CONFIG_PANEL_GENERIC) += panel-generic.o
+ obj-$(CONFIG_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
+
+ obj-$(CONFIG_PANEL_TAAL) += panel-taal.o
++obj-$(CONFIG_PANEL_ACX565AKM) += panel-acx565akm.o
+diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
+new file mode 100644
+index 0000000..27e9847
+--- /dev/null
++++ b/drivers/video/omap2/displays/panel-acx565akm.c
+@@ -0,0 +1,760 @@
++/*
++ * Support for ACX565AKM LCD Panel used on Nokia N900
++ *
++ * Copyright (C) 2010 Nokia Corporation
++ *
++ * Original Driver Author: Imre Deak <imre.deak@nokia.com>
++ * Based on panel-generic.c by Tomi Valkeinen <tomi.valkeinen@nokia.com>
++ * Adapted to new DSS2 framework: Roger Quadros <roger.quadros@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/delay.h>
++#include <linux/spi/spi.h>
++#include <linux/jiffies.h>
++#include <linux/sched.h>
++#include <linux/backlight.h>
++#include <linux/fb.h>
++
++#include <plat/display.h>
++
++#define MIPID_CMD_READ_DISP_ID 0x04
++#define MIPID_CMD_READ_RED 0x06
++#define MIPID_CMD_READ_GREEN 0x07
++#define MIPID_CMD_READ_BLUE 0x08
++#define MIPID_CMD_READ_DISP_STATUS 0x09
++#define MIPID_CMD_RDDSDR 0x0F
++#define MIPID_CMD_SLEEP_IN 0x10
++#define MIPID_CMD_SLEEP_OUT 0x11
++#define MIPID_CMD_DISP_OFF 0x28
++#define MIPID_CMD_DISP_ON 0x29
++#define MIPID_CMD_WRITE_DISP_BRIGHTNESS 0x51
++#define MIPID_CMD_READ_DISP_BRIGHTNESS 0x52
++#define MIPID_CMD_WRITE_CTRL_DISP 0x53
++
++#define CTRL_DISP_BRIGHTNESS_CTRL_ON (1 << 5)
++#define CTRL_DISP_AMBIENT_LIGHT_CTRL_ON (1 << 4)
++#define CTRL_DISP_BACKLIGHT_ON (1 << 2)
++#define CTRL_DISP_AUTO_BRIGHTNESS_ON (1 << 1)
++
++#define MIPID_CMD_READ_CTRL_DISP 0x54
++#define MIPID_CMD_WRITE_CABC 0x55
++#define MIPID_CMD_READ_CABC 0x56
++
++#define MIPID_VER_LPH8923 3
++#define MIPID_VER_LS041Y3 4
++#define MIPID_VER_L4F00311 8
++#define MIPID_VER_ACX565AKM 9
++
++struct acx565akm_device {
++ char *name;
++ int enabled;
++ int model;
++ int revision;
++ u8 display_id[3];
++ unsigned has_bc:1;
++ unsigned has_cabc:1;
++ unsigned cabc_mode;
++ unsigned long hw_guard_end; /* next value of jiffies
++ when we can issue the
++ next sleep in/out command */
++ unsigned long hw_guard_wait; /* max guard time in jiffies */
++
++ struct spi_device *spi;
++ struct mutex mutex;
++
++ struct omap_dss_device *dssdev;
++ struct backlight_device *bl_dev;
++};
++
++static struct acx565akm_device acx_dev;
++static int acx565akm_bl_update_status(struct backlight_device *dev);
++
++/*--------------------MIPID interface-----------------------------*/
++
++static void acx565akm_transfer(struct acx565akm_device *md, int cmd,
++ const u8 *wbuf, int wlen, u8 *rbuf, int rlen)
++{
++ struct spi_message m;
++ struct spi_transfer *x, xfer[5];
++ int r;
++
++ BUG_ON(md->spi == NULL);
++
++ spi_message_init(&m);
++
++ memset(xfer, 0, sizeof(xfer));
++ x = &xfer[0];
++
++ cmd &= 0xff;
++ x->tx_buf = &cmd;
++ x->bits_per_word = 9;
++ x->len = 2;
++
++ if (rlen > 1 && wlen == 0) {
++ /*
++ * Between the command and the response data there is a
++ * dummy clock cycle. Add an extra bit after the command
++ * word to account for this.
++ */
++ x->bits_per_word = 10;
++ cmd <<= 1;
++ }
++ spi_message_add_tail(x, &m);
++
++ if (wlen) {
++ x++;
++ x->tx_buf = wbuf;
++ x->len = wlen;
++ x->bits_per_word = 9;
++ spi_message_add_tail(x, &m);
++ }
++
++ if (rlen) {
++ x++;
++ x->rx_buf = rbuf;
++ x->len = rlen;
++ spi_message_add_tail(x, &m);
++ }
++
++ r = spi_sync(md->spi, &m);
++ if (r < 0)
++ dev_dbg(&md->spi->dev, "spi_sync %d\n", r);
++}
++
++static inline void acx565akm_cmd(struct acx565akm_device *md, int cmd)
++{
++ acx565akm_transfer(md, cmd, NULL, 0, NULL, 0);
++}
++
++static inline void acx565akm_write(struct acx565akm_device *md,
++ int reg, const u8 *buf, int len)
++{
++ acx565akm_transfer(md, reg, buf, len, NULL, 0);
++}
++
++static inline void acx565akm_read(struct acx565akm_device *md,
++ int reg, u8 *buf, int len)
++{
++ acx565akm_transfer(md, reg, NULL, 0, buf, len);
++}
++
++static void hw_guard_start(struct acx565akm_device *md, int guard_msec)
++{
++ md->hw_guard_wait = msecs_to_jiffies(guard_msec);
++ md->hw_guard_end = jiffies + md->hw_guard_wait;
++}
++
++static void hw_guard_wait(struct acx565akm_device *md)
++{
++ unsigned long wait = md->hw_guard_end - jiffies;
++
++ if ((long)wait > 0 && wait <= md->hw_guard_wait) {
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ schedule_timeout(wait);
++ }
++}
++
++/*----------------------MIPID wrappers----------------------------*/
++
++static void set_sleep_mode(struct acx565akm_device *md, int on)
++{
++ int cmd;
++
++ if (on)
++ cmd = MIPID_CMD_SLEEP_IN;
++ else
++ cmd = MIPID_CMD_SLEEP_OUT;
++ /*
++ * We have to keep 120msec between sleep in/out commands.
++ * (8.2.15, 8.2.16).
++ */
++ hw_guard_wait(md);
++ acx565akm_cmd(md, cmd);
++ hw_guard_start(md, 120);
++}
++
++static void set_display_state(struct acx565akm_device *md, int enabled)
++{
++ int cmd = enabled ? MIPID_CMD_DISP_ON : MIPID_CMD_DISP_OFF;
++
++ acx565akm_cmd(md, cmd);
++}
++
++static int panel_enabled(struct acx565akm_device *md)
++{
++ u32 disp_status;
++ int enabled;
++
++ acx565akm_read(md, MIPID_CMD_READ_DISP_STATUS, (u8 *)&disp_status, 4);
++ disp_status = __be32_to_cpu(disp_status);
++ enabled = (disp_status & (1 << 17)) && (disp_status & (1 << 10));
++ dev_dbg(&md->spi->dev,
++ "LCD panel %senabled by bootloader (status 0x%04x)\n",
++ enabled ? "" : "not ", disp_status);
++ return enabled;
++}
++
++static int panel_detect(struct acx565akm_device *md)
++{
++ acx565akm_read(md, MIPID_CMD_READ_DISP_ID, md->display_id, 3);
++ dev_dbg(&md->spi->dev, "MIPI display ID: %02x%02x%02x\n",
++ md->display_id[0], md->display_id[1], md->display_id[2]);
++
++ switch (md->display_id[0]) {
++ case 0x10:
++ md->model = MIPID_VER_ACX565AKM;
++ md->name = "acx565akm";
++ md->has_bc = 1;
++ md->has_cabc = 1;
++ break;
++ case 0x29:
++ md->model = MIPID_VER_L4F00311;
++ md->name = "l4f00311";
++ break;
++ case 0x45:
++ md->model = MIPID_VER_LPH8923;
++ md->name = "lph8923";
++ break;
++ case 0x83:
++ md->model = MIPID_VER_LS041Y3;
++ md->name = "ls041y3";
++ break;
++ default:
++ md->name = "unknown";
++ dev_err(&md->spi->dev, "invalid display ID\n");
++ return -ENODEV;
++ }
++
++ md->revision = md->display_id[1];
++
++ dev_info(&md->spi->dev, "omapfb: %s rev %02x LCD detected\n",
++ md->name, md->revision);
++
++ return 0;
++}
++
++/*----------------------Backlight Control-------------------------*/
++
++static void enable_backlight_ctrl(struct acx565akm_device *md, int enable)
++{
++ u16 ctrl;
++
++ acx565akm_read(md, MIPID_CMD_READ_CTRL_DISP, (u8 *)&ctrl, 1);
++ if (enable) {
++ ctrl |= CTRL_DISP_BRIGHTNESS_CTRL_ON |
++ CTRL_DISP_BACKLIGHT_ON;
++ } else {
++ ctrl &= ~(CTRL_DISP_BRIGHTNESS_CTRL_ON |
++ CTRL_DISP_BACKLIGHT_ON);
++ }
++
++ ctrl |= 1 << 8;
++ acx565akm_write(md, MIPID_CMD_WRITE_CTRL_DISP, (u8 *)&ctrl, 2);
++}
++
++static void set_cabc_mode(struct acx565akm_device *md, unsigned mode)
++{
++ u16 cabc_ctrl;
++
++ md->cabc_mode = mode;
++ if (!md->enabled)
++ return;
++ cabc_ctrl = 0;
++ acx565akm_read(md, MIPID_CMD_READ_CABC, (u8 *)&cabc_ctrl, 1);
++ cabc_ctrl &= ~3;
++ cabc_ctrl |= (1 << 8) | (mode & 3);
++ acx565akm_write(md, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2);
++}
++
++static unsigned get_cabc_mode(struct acx565akm_device *md)
++{
++ return md->cabc_mode;
++}
++
++static unsigned get_hw_cabc_mode(struct acx565akm_device *md)
++{
++ u8 cabc_ctrl;
++
++ acx565akm_read(md, MIPID_CMD_READ_CABC, &cabc_ctrl, 1);
++ return cabc_ctrl & 3;
++}
++
++static void acx565akm_set_brightness(struct acx565akm_device *md, int level)
++{
++ int bv;
++
++ bv = level | (1 << 8);
++ acx565akm_write(md, MIPID_CMD_WRITE_DISP_BRIGHTNESS, (u8 *)&bv, 2);
++
++ if (level)
++ enable_backlight_ctrl(md, 1);
++ else
++ enable_backlight_ctrl(md, 0);
++}
++
++static int acx565akm_get_actual_brightness(struct acx565akm_device *md)
++{
++ u8 bv;
++
++ acx565akm_read(md, MIPID_CMD_READ_DISP_BRIGHTNESS, &bv, 1);
++
++ return bv;
++}
++
++
++static int acx565akm_bl_update_status(struct backlight_device *dev)
++{
++ struct acx565akm_device *md = dev_get_drvdata(&dev->dev);
++ int r;
++ int level;
++
++ dev_dbg(&md->spi->dev, "%s\n", __func__);
++
++ mutex_lock(&md->mutex);
++
++ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
++ dev->props.power == FB_BLANK_UNBLANK)
++ level = dev->props.brightness;
++ else
++ level = 0;
++
++ r = 0;
++ if (md->has_bc)
++ acx565akm_set_brightness(md, level);
++ else if (md->dssdev->set_backlight)
++ r = md->dssdev->set_backlight(md->dssdev, level);
++ else
++ r = -ENODEV;
++
++ mutex_unlock(&md->mutex);
++
++ return r;
++}
++
++static int acx565akm_bl_get_intensity(struct backlight_device *dev)
++{
++ struct acx565akm_device *md = dev_get_drvdata(&dev->dev);
++
++ dev_dbg(&dev->dev, "%s\n", __func__);
++
++ if (!md->has_bc && md->dssdev->set_backlight == NULL)
++ return -ENODEV;
++
++ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
++ dev->props.power == FB_BLANK_UNBLANK) {
++ if (md->has_bc)
++ return acx565akm_get_actual_brightness(md);
++ else
++ return dev->props.brightness;
++ }
++
++ return 0;
++}
++
++static struct backlight_ops acx565akm_bl_ops = {
++ .get_brightness = acx565akm_bl_get_intensity,
++ .update_status = acx565akm_bl_update_status,
++};
++
++/*--------------------Auto Brightness control via Sysfs---------------------*/
++
++static const char *cabc_modes[] = {
++ "off", /* always used when CABC is not supported */
++ "ui",
++ "still-image",
++ "moving-image",
++};
++
++static ssize_t show_cabc_mode(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct acx565akm_device *md = dev_get_drvdata(dev);
++ const char *mode_str;
++ int mode;
++ int len;
++
++ if (!md->has_cabc)
++ mode = 0;
++ else
++ mode = get_cabc_mode(md);
++ mode_str = "unknown";
++ if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes))
++ mode_str = cabc_modes[mode];
++ len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str);
++
++ return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1;
++}
++
++static ssize_t store_cabc_mode(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct acx565akm_device *md = dev_get_drvdata(dev);
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) {
++ const char *mode_str = cabc_modes[i];
++ int cmp_len = strlen(mode_str);
++
++ if (count > 0 && buf[count - 1] == '\n')
++ count--;
++ if (count != cmp_len)
++ continue;
++
++ if (strncmp(buf, mode_str, cmp_len) == 0)
++ break;
++ }
++
++ if (i == ARRAY_SIZE(cabc_modes))
++ return -EINVAL;
++
++ if (!md->has_cabc && i != 0)
++ return -EINVAL;
++
++ mutex_lock(&md->mutex);
++ set_cabc_mode(md, i);
++ mutex_unlock(&md->mutex);
++
++ return count;
++}
++
++static ssize_t show_cabc_available_modes(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct acx565akm_device *md = dev_get_drvdata(dev);
++ int len;
++ int i;
++
++ if (!md->has_cabc)
++ return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]);
++
++ for (i = 0, len = 0;
++ len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++)
++ len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s",
++ i ? " " : "", cabc_modes[i],
++ i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : "");
++
++ return len < PAGE_SIZE ? len : PAGE_SIZE - 1;
++}
++
++static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR,
++ show_cabc_mode, store_cabc_mode);
++static DEVICE_ATTR(cabc_available_modes, S_IRUGO,
++ show_cabc_available_modes, NULL);
++
++static struct attribute *bldev_attrs[] = {
++ &dev_attr_cabc_mode.attr,
++ &dev_attr_cabc_available_modes.attr,
++ NULL,
++};
++
++static struct attribute_group bldev_attr_group = {
++ .attrs = bldev_attrs,
++};
++
++/*---------------------------ACX Panel----------------------------*/
++
++static struct omap_video_timings acx_panel_timings = {
++ .x_res = 800,
++ .y_res = 480,
++ .pixel_clock = 24000,
++ .hfp = 28,
++ .hsw = 4,
++ .hbp = 24,
++ .vfp = 3,
++ .vsw = 3,
++ .vbp = 4,
++};
++
++static int acx_panel_probe(struct omap_dss_device *dssdev)
++{
++ int r;
++ struct acx565akm_device *md = &acx_dev;
++ struct backlight_device *bldev;
++ int max_brightness, brightness;
++
++ dev_dbg(&dssdev->dev, "%s\n", __func__);
++ dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
++ OMAP_DSS_LCD_IHS;
++ /* FIXME AC bias ? */
++ dssdev->panel.timings = acx_panel_timings;
++ dssdev->panel.recommended_bpp = 16;
++
++ if (dssdev->platform_enable)
++ dssdev->platform_enable(dssdev);
++ /*
++ * After reset we have to wait 5 msec before the first
++ * command can be sent.
++ */
++ msleep(5);
++
++ md->enabled = panel_enabled(md);
++
++ r = panel_detect(md);
++ if (r) {
++ dev_err(&dssdev->dev, "%s panel detect error\n", __func__);
++ if (!md->enabled && dssdev->platform_disable)
++ dssdev->platform_disable(dssdev);
++ return r;
++ }
++
++ mutex_lock(&acx_dev.mutex);
++ acx_dev.dssdev = dssdev;
++ mutex_unlock(&acx_dev.mutex);
++
++ if (!md->enabled) {
++ if (dssdev->platform_disable)
++ dssdev->platform_disable(dssdev);
++ }
++
++ /*------- Backlight control --------*/
++
++ bldev = backlight_device_register("acx565akm", &md->spi->dev,
++ md, &acx565akm_bl_ops);
++ md->bl_dev = bldev;
++ if (md->has_cabc) {
++ r = sysfs_create_group(&bldev->dev.kobj, &bldev_attr_group);
++ if (r) {
++ dev_err(&bldev->dev,
++ "%s failed to create sysfs files\n", __func__);
++ backlight_device_unregister(bldev);
++ return r;
++ }
++ md->cabc_mode = get_hw_cabc_mode(md);
++ }
++
++ bldev->props.fb_blank = FB_BLANK_UNBLANK;
++ bldev->props.power = FB_BLANK_UNBLANK;
++
++ if (md->has_bc)
++ max_brightness = 255;
++ else
++ max_brightness = dssdev->max_backlight_level;
++
++ if (md->has_bc)
++ brightness = acx565akm_get_actual_brightness(md);
++ else if (dssdev->get_backlight)
++ brightness = dssdev->get_backlight(dssdev);
++ else
++ brightness = 0;
++
++ bldev->props.max_brightness = max_brightness;
++ bldev->props.brightness = brightness;
++
++ acx565akm_bl_update_status(bldev);
++ return 0;
++}
++
++static void acx_panel_remove(struct omap_dss_device *dssdev)
++{
++ struct acx565akm_device *md = &acx_dev;
++
++ dev_dbg(&dssdev->dev, "%s\n", __func__);
++ sysfs_remove_group(&md->bl_dev->dev.kobj, &bldev_attr_group);
++ backlight_device_unregister(md->bl_dev);
++ mutex_lock(&acx_dev.mutex);
++ acx_dev.dssdev = NULL;
++ mutex_unlock(&acx_dev.mutex);
++}
++
++static int acx_panel_power_on(struct omap_dss_device *dssdev)
++{
++ struct acx565akm_device *md = &acx_dev;
++ int r;
++
++ dev_dbg(&dssdev->dev, "%s\n", __func__);
++
++ mutex_lock(&md->mutex);
++
++ if (dssdev->platform_enable) {
++ r = dssdev->platform_enable(dssdev);
++ if (r)
++ return r;
++ }
++
++ if (md->enabled) {
++ dev_dbg(&md->spi->dev, "panel already enabled\n");
++ mutex_unlock(&md->mutex);
++ return 0;
++ }
++
++ /*
++ * We have to meet all the following delay requirements:
++ * 1. tRW: reset pulse width 10usec (7.12.1)
++ * 2. tRT: reset cancel time 5msec (7.12.1)
++ * 3. Providing PCLK,HS,VS signals for 2 frames = ~50msec worst
++ * case (7.6.2)
++ * 4. 120msec before the sleep out command (7.12.1)
++ */
++ msleep(120);
++
++ set_sleep_mode(md, 0);
++ md->enabled = 1;
++
++ /* 5msec between sleep out and the next command. (8.2.16) */
++ msleep(5);
++ set_display_state(md, 1);
++ set_cabc_mode(md, md->cabc_mode);
++
++ mutex_unlock(&md->mutex);
++
++ return acx565akm_bl_update_status(md->bl_dev);
++}
++
++static void acx_panel_power_off(struct omap_dss_device *dssdev)
++{
++ struct acx565akm_device *md = &acx_dev;
++
++ dev_dbg(&dssdev->dev, "%s\n", __func__);
++
++ mutex_lock(&md->mutex);
++
++ if (!md->enabled) {
++ mutex_unlock(&md->mutex);
++ return;
++ }
++ set_display_state(md, 0);
++ set_sleep_mode(md, 1);
++ md->enabled = 0;
++ /*
++ * We have to provide PCLK,HS,VS signals for 2 frames (worst case
++ * ~50msec) after sending the sleep in command and asserting the
++ * reset signal. We probably could assert the reset w/o the delay
++ * but we still delay to avoid possible artifacts. (7.6.1)
++ */
++ msleep(50);
++
++ if (dssdev->platform_disable)
++ dssdev->platform_disable(dssdev);
++
++ mutex_unlock(&md->mutex);
++}
++
++static int acx_panel_enable(struct omap_dss_device *dssdev)
++{
++ int r;
++
++ dev_dbg(&dssdev->dev, "%s\n", __func__);
++ r = acx_panel_power_on(dssdev);
++
++ if (r)
++ return r;
++
++ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
++ return 0;
++}
++
++static void acx_panel_disable(struct omap_dss_device *dssdev)
++{
++ dev_dbg(&dssdev->dev, "%s\n", __func__);
++ acx_panel_power_off(dssdev);
++ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
++}
++
++static int acx_panel_suspend(struct omap_dss_device *dssdev)
++{
++ dev_dbg(&dssdev->dev, "%s\n", __func__);
++ acx_panel_power_off(dssdev);
++ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
++ return 0;
++}
++
++static int acx_panel_resume(struct omap_dss_device *dssdev)
++{
++ int r;
++
++ dev_dbg(&dssdev->dev, "%s\n", __func__);
++ r = acx_panel_power_on(dssdev);
++ if (r)
++ return r;
++
++ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
++ return 0;
++}
++
++static struct omap_dss_driver acx_panel_driver = {
++ .probe = acx_panel_probe,
++ .remove = acx_panel_remove,
++
++ .enable = acx_panel_enable,
++ .disable = acx_panel_disable,
++ .suspend = acx_panel_suspend,
++ .resume = acx_panel_resume,
++
++ .driver = {
++ .name = "panel-acx565akm",
++ .owner = THIS_MODULE,
++ },
++};
++
++/*--------------------SPI probe-------------------------*/
++
++static int acx565akm_spi_probe(struct spi_device *spi)
++{
++ struct acx565akm_device *md = &acx_dev;
++
++ dev_dbg(&spi->dev, "%s\n", __func__);
++
++ spi->mode = SPI_MODE_3;
++ md->spi = spi;
++ mutex_init(&md->mutex);
++ dev_set_drvdata(&spi->dev, md);
++
++ omap_dss_register_driver(&acx_panel_driver);
++
++ return 0;
++}
++
++static int acx565akm_spi_remove(struct spi_device *spi)
++{
++ struct acx565akm_device *md = dev_get_drvdata(&spi->dev);
++
++ dev_dbg(&md->spi->dev, "%s\n", __func__);
++ omap_dss_unregister_driver(&acx_panel_driver);
++
++ return 0;
++}
++
++static struct spi_driver acx565akm_spi_driver = {
++ .driver = {
++ .name = "acx565akm",
++ .bus = &spi_bus_type,
++ .owner = THIS_MODULE,
++ },
++ .probe = acx565akm_spi_probe,
++ .remove = __devexit_p(acx565akm_spi_remove),
++};
++
++static int __init acx565akm_init(void)
++{
++ return spi_register_driver(&acx565akm_spi_driver);
++}
++
++static void __exit acx565akm_exit(void)
++{
++ spi_unregister_driver(&acx565akm_spi_driver);
++}
++
++module_init(acx565akm_init);
++module_exit(acx565akm_exit);
++
++MODULE_AUTHOR("Nokia Corporation");
++MODULE_DESCRIPTION("acx565akm LCD Driver");
++MODULE_LICENSE("GPL");
+--
+1.6.0.4
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-DSS2-Add-Kconfig-option-for-DPI-display-type.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-DSS2-Add-Kconfig-option-for-DPI-display-type.patch
new file mode 100644
index 0000000..c96f97f
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-DSS2-Add-Kconfig-option-for-DPI-display-type.patch
@@ -0,0 +1,107 @@
+From 8fe1805debbf54143073a2f85e8568ed7b9ca38b Mon Sep 17 00:00:00 2001
+From: Roger Quadros <roger.quadros@nokia.com>
+Date: Wed, 17 Mar 2010 12:35:19 +0000
+Subject: [PATCH 1/10] OMAP: DSS2: Add Kconfig option for DPI display type
+
+From: Roger Quadros <roger.quadros@nokia.com>
+
+Patch-mainline: 2.6.35?
+Git-repo: http://www.gitorious.org/linux-omap-dss2/linux/commit/36b33efe80eb07e3447107c2bdba3c674c10a41a
+
+This allows us to disable DPI on systems that do not have it
+
+Signed-off-by: Roger Quadros <roger.quadros@nokia.com>
+Signed-off-by: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+---
+ drivers/video/omap2/dss/Kconfig | 6 ++++++
+ drivers/video/omap2/dss/Makefile | 3 ++-
+ drivers/video/omap2/dss/core.c | 4 ++++
+ drivers/video/omap2/dss/display.c | 4 ++++
+ 4 files changed, 16 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
+index c63ce76..cbe8ea0 100644
+--- a/drivers/video/omap2/dss/Kconfig
++++ b/drivers/video/omap2/dss/Kconfig
+@@ -32,6 +32,12 @@ config OMAP2_DSS_COLLECT_IRQ_STATS
+ help
+ Collect DSS IRQ statistics, printable via debugfs
+
++config OMAP2_DSS_DPI
++ bool "DPI support"
++ default y
++ help
++ DPI Interface. This is the Parallel Display Interface.
++
+ config OMAP2_DSS_RFBI
+ bool "RFBI support"
+ default n
+diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
+index 980c72c..d71b5d9 100644
+--- a/drivers/video/omap2/dss/Makefile
++++ b/drivers/video/omap2/dss/Makefile
+@@ -1,5 +1,6 @@
+ obj-$(CONFIG_OMAP2_DSS) += omapdss.o
+-omapdss-y := core.o dss.o dispc.o dpi.o display.o manager.o overlay.o
++omapdss-y := core.o dss.o dispc.o display.o manager.o overlay.o
++omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
+ omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
+ omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
+ omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
+diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
+index 82918ee..0988781 100644
+--- a/drivers/video/omap2/dss/core.c
++++ b/drivers/video/omap2/dss/core.c
+@@ -473,11 +473,13 @@ static int omap_dss_probe(struct platform_device *pdev)
+ }
+ #endif
+
++#ifdef CONFIG_OMAP2_DSS_DPI
+ r = dpi_init();
+ if (r) {
+ DSSERR("Failed to initialize dpi\n");
+ goto fail0;
+ }
++#endif
+
+ r = dispc_init();
+ if (r) {
+@@ -548,7 +550,9 @@ static int omap_dss_remove(struct platform_device *pdev)
+ venc_exit();
+ #endif
+ dispc_exit();
++#ifdef CONFIG_OMAP2_DSS_DPI
+ dpi_exit();
++#endif
+ #ifdef CONFIG_OMAP2_DSS_RFBI
+ rfbi_exit();
+ #endif
+diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
+index 3b92b84..2150f12 100644
+--- a/drivers/video/omap2/dss/display.c
++++ b/drivers/video/omap2/dss/display.c
+@@ -405,7 +405,9 @@ void dss_init_device(struct platform_device *pdev,
+ int r;
+
+ switch (dssdev->type) {
++#ifdef CONFIG_OMAP2_DSS_DPI
+ case OMAP_DISPLAY_TYPE_DPI:
++#endif
+ #ifdef CONFIG_OMAP2_DSS_RFBI
+ case OMAP_DISPLAY_TYPE_DBI:
+ #endif
+@@ -430,9 +432,11 @@ void dss_init_device(struct platform_device *pdev,
+ dssdev->wait_vsync = default_wait_vsync;
+
+ switch (dssdev->type) {
++#ifdef CONFIG_OMAP2_DSS_DPI
+ case OMAP_DISPLAY_TYPE_DPI:
+ r = dpi_init_display(dssdev);
+ break;
++#endif
+ #ifdef CONFIG_OMAP2_DSS_RFBI
+ case OMAP_DISPLAY_TYPE_DBI:
+ r = rfbi_init_display(dssdev);
+--
+1.6.0.4
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-DSS2-Use-vdds_sdi-regulator-supply-in-SDI.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-DSS2-Use-vdds_sdi-regulator-supply-in-SDI.patch
new file mode 100644
index 0000000..db0bf72
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-DSS2-Use-vdds_sdi-regulator-supply-in-SDI.patch
@@ -0,0 +1,118 @@
+From 4111c672962a8df130b294961ab552fef6a498d9 Mon Sep 17 00:00:00 2001
+From: Roger Quadros <roger.quadros@nokia.com>
+Date: Wed, 17 Mar 2010 12:35:21 +0000
+Subject: [PATCH 2/10] OMAP: DSS2: Use vdds_sdi regulator supply in SDI
+
+From: Roger Quadros <roger.quadros@nokia.com>
+
+Patch-mainline: 2.6.35?
+Git-repo: http://www.gitorious.org/linux-omap-dss2/linux/commit/1d5c6663d92b37539617d833e6049e5dd21751c4
+
+This patch enables the use of vdds_sdi regulator in SDI subsystem.
+We can disable the vdds_sdi voltage when not in use to save
+power.
+
+Signed-off-by: Roger Quadros <roger.quadros@nokia.com>
+Signed-off-by: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+---
+ drivers/video/omap2/dss/core.c | 2 +-
+ drivers/video/omap2/dss/dss.h | 2 +-
+ drivers/video/omap2/dss/sdi.c | 17 ++++++++++++++++-
+ 3 files changed, 18 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
+index 0988781..97f929b 100644
+--- a/drivers/video/omap2/dss/core.c
++++ b/drivers/video/omap2/dss/core.c
+@@ -495,7 +495,7 @@ static int omap_dss_probe(struct platform_device *pdev)
+ #endif
+ if (cpu_is_omap34xx()) {
+ #ifdef CONFIG_OMAP2_DSS_SDI
+- r = sdi_init(skip_init);
++ r = sdi_init(pdev, skip_init);
+ if (r) {
+ DSSERR("Failed to initialize SDI\n");
+ goto fail0;
+diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
+index 2bcb124..8490bdf 100644
+--- a/drivers/video/omap2/dss/dss.h
++++ b/drivers/video/omap2/dss/dss.h
+@@ -231,7 +231,7 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
+ struct dispc_clock_info *dispc_cinfo);
+
+ /* SDI */
+-int sdi_init(bool skip_init);
++int sdi_init(struct platform_device *pdev, bool skip_init);
+ void sdi_exit(void);
+ int sdi_init_display(struct omap_dss_device *display);
+
+diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
+index c24f307..025c56c 100644
+--- a/drivers/video/omap2/dss/sdi.c
++++ b/drivers/video/omap2/dss/sdi.c
+@@ -23,6 +23,8 @@
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+ #include <linux/err.h>
++#include <linux/platform_device.h>
++#include <linux/regulator/consumer.h>
+
+ #include <plat/display.h>
+ #include "dss.h"
+@@ -30,6 +32,7 @@
+ static struct {
+ bool skip_init;
+ bool update_enabled;
++ struct regulator *vdds_sdi_reg;
+ } sdi;
+
+ static void sdi_basic_init(void)
+@@ -63,6 +66,10 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
+ goto err1;
+ }
+
++ r = regulator_enable(sdi.vdds_sdi_reg);
++ if (r)
++ goto err1;
++
+ /* In case of skip_init sdi_init has already enabled the clocks */
+ if (!sdi.skip_init)
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+@@ -136,6 +143,7 @@ err3:
+ dispc_enable_lcd_out(0);
+ err2:
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
++ regulator_disable(sdi.vdds_sdi_reg);
+ err1:
+ omap_dss_stop_device(dssdev);
+ err0:
+@@ -164,6 +172,8 @@ static void sdi_display_disable(struct omap_dss_device *dssdev)
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+
++ regulator_disable(sdi.vdds_sdi_reg);
++
+ omap_dss_stop_device(dssdev);
+ }
+
+@@ -258,11 +268,16 @@ int sdi_init_display(struct omap_dss_device *dssdev)
+ return 0;
+ }
+
+-int sdi_init(bool skip_init)
++int sdi_init(struct platform_device *pdev, bool skip_init)
+ {
+ /* we store this for first display enable, then clear it */
+ sdi.skip_init = skip_init;
+
++ sdi.vdds_sdi_reg = regulator_get(&pdev->dev, "vdds_sdi");
++ if (IS_ERR(sdi.vdds_sdi_reg)) {
++ DSSERR("can't get VDDS_SDI regulator\n");
++ return PTR_ERR(sdi.vdds_sdi_reg);
++ }
+ /*
+ * Enable clocks already here, otherwise there would be a toggle
+ * of them until sdi_display_enable is called.
+--
+1.6.0.4
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-RX51-Add-LCD-Panel-support.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-RX51-Add-LCD-Panel-support.patch
new file mode 100644
index 0000000..e418648
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-RX51-Add-LCD-Panel-support.patch
@@ -0,0 +1,207 @@
+From 2377c3d6cf49c057bd2237fbabf429a9b243c663 Mon Sep 17 00:00:00 2001
+From: Roger Quadros <roger.quadros@nokia.com>
+Date: Wed, 10 Mar 2010 15:30:05 +0200
+Subject: [PATCH 4/10] OMAP: RX51: Add LCD Panel support
+
+From: Roger Quadros <roger.quadros@nokia.com>
+
+Patch-mainline: 2.6.35?
+Git-repo: http://www.gitorious.org/linux-omap-dss2/linux/commit/c296146d9fe00624cbb34da56c6b2927ef9bbbad
+
+Adds basic support for LCD Panel on Nokia N900
+
+Signed-off-by: Roger Quadros <roger.quadros@nokia.com>
+---
+ arch/arm/mach-omap2/Makefile | 1 +
+ arch/arm/mach-omap2/board-rx51-peripherals.c | 13 +++
+ arch/arm/mach-omap2/board-rx51-video.c | 107 ++++++++++++++++++++++++++
+ arch/arm/mach-omap2/board-rx51.c | 2 +
+ 4 files changed, 123 insertions(+), 0 deletions(-)
+ create mode 100644 arch/arm/mach-omap2/board-rx51-video.c
+
+diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
+index b32678b..05e7c9c 100644
+--- a/arch/arm/mach-omap2/Makefile
++++ b/arch/arm/mach-omap2/Makefile
+@@ -89,6 +89,7 @@ obj-$(CONFIG_MACH_NOKIA_N8X0) += board-n8x0.o
+ obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51.o \
+ board-rx51-sdram.o \
+ board-rx51-peripherals.o \
++ board-rx51-video.o \
+ mmc-twl4030.o
+ obj-$(CONFIG_MACH_OMAP_ZOOM2) += board-zoom2.o \
+ board-zoom-peripherals.o \
+diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
+index acafdbc..7bae364 100644
+--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
++++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
+@@ -45,6 +45,7 @@
+ /* list all spi devices here */
+ enum {
+ RX51_SPI_WL1251,
++ RX51_SPI_MIPID, /* LCD panel */
+ };
+
+ static struct wl12xx_platform_data wl1251_pdata;
+@@ -54,6 +55,11 @@ static struct omap2_mcspi_device_config wl1251_mcspi_config = {
+ .single_channel = 1,
+ };
+
++static struct omap2_mcspi_device_config mipid_mcspi_config = {
++ .turbo_mode = 0,
++ .single_channel = 1,
++};
++
+ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
+ [RX51_SPI_WL1251] = {
+ .modalias = "wl1251",
+@@ -64,6 +70,13 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
+ .controller_data = &wl1251_mcspi_config,
+ .platform_data = &wl1251_pdata,
+ },
++ [RX51_SPI_MIPID] = {
++ .modalias = "acx565akm",
++ .bus_num = 1,
++ .chip_select = 2,
++ .max_speed_hz = 6000000,
++ .controller_data = &mipid_mcspi_config,
++ },
+ };
+
+ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
+diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c
+new file mode 100644
+index 0000000..e4a9d4c
+--- /dev/null
++++ b/arch/arm/mach-omap2/board-rx51-video.c
+@@ -0,0 +1,107 @@
++/*
++ * linux/arch/arm/mach-omap2/board-rx51-video.c
++ *
++ * Copyright (C) 2010 Nokia
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/gpio.h>
++#include <linux/spi/spi.h>
++#include <linux/mm.h>
++
++#include <asm/mach-types.h>
++#include <plat/mux.h>
++#include <plat/display.h>
++#include <plat/vram.h>
++#include <plat/mcspi.h>
++
++#include "mux.h"
++
++#define RX51_LCD_RESET_GPIO 90
++
++#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
++
++static int rx51_lcd_enable(struct omap_dss_device *dssdev)
++{
++ gpio_set_value(dssdev->reset_gpio, 1);
++ return 0;
++}
++
++static void rx51_lcd_disable(struct omap_dss_device *dssdev)
++{
++ gpio_set_value(dssdev->reset_gpio, 0);
++}
++
++static struct omap_dss_device rx51_lcd_device = {
++ .name = "lcd",
++ .driver_name = "panel-acx565akm",
++ .type = OMAP_DISPLAY_TYPE_SDI,
++ .phy.sdi.datapairs = 2,
++ .reset_gpio = RX51_LCD_RESET_GPIO,
++ .platform_enable = rx51_lcd_enable,
++ .platform_disable = rx51_lcd_disable,
++};
++
++static struct omap_dss_device *rx51_dss_devices[] = {
++ &rx51_lcd_device,
++};
++
++static struct omap_dss_board_info rx51_dss_board_info = {
++ .num_devices = ARRAY_SIZE(rx51_dss_devices),
++ .devices = rx51_dss_devices,
++ .default_device = &rx51_lcd_device,
++};
++
++struct platform_device rx51_display_device = {
++ .name = "omapdss",
++ .id = -1,
++ .dev = {
++ .platform_data = &rx51_dss_board_info,
++ },
++};
++
++static struct platform_device *rx51_video_devices[] __initdata = {
++ &rx51_display_device,
++};
++
++static int __init rx51_video_init(void)
++{
++ if (!machine_is_nokia_rx51())
++ return 0;
++
++ if (omap_mux_init_gpio(RX51_LCD_RESET_GPIO, OMAP_PIN_OUTPUT)) {
++ pr_err("%s cannot configure MUX for LCD RESET\n", __func__);
++ return 0;
++ }
++
++ if (gpio_request(RX51_LCD_RESET_GPIO, "LCD ACX565AKM reset")) {
++ pr_err("%s failed to get LCD Reset GPIO\n", __func__);
++ return 0;
++ }
++
++ gpio_direction_output(RX51_LCD_RESET_GPIO, 1);
++
++ platform_add_devices(rx51_video_devices,
++ ARRAY_SIZE(rx51_video_devices));
++ return 0;
++}
++
++subsys_initcall(rx51_video_init);
++
++void __init rx51_video_mem_init(void)
++{
++ /*
++ * GFX 864x480x32bpp
++ * VID1/2 1280x720x32bpp double buffered
++ */
++ omap_vram_set_sdram_vram(PAGE_ALIGN(864 * 480 * 4) +
++ 2 * PAGE_ALIGN(1280 * 720 * 4 * 2), 0);
++}
++
++#endif /* defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE) */
+diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
+index 67bb347..f1f81cf 100644
+--- a/arch/arm/mach-omap2/board-rx51.c
++++ b/arch/arm/mach-omap2/board-rx51.c
+@@ -32,6 +32,7 @@
+ #include "mux.h"
+
+ struct omap_sdrc_params *rx51_get_sdram_timings(void);
++extern void rx51_video_mem_init(void);
+
+ static struct omap_lcd_config rx51_lcd_config = {
+ .ctrl_name = "internal",
+@@ -93,6 +94,7 @@ static void __init rx51_init(void)
+ static void __init rx51_map_io(void)
+ {
+ omap2_set_globals_343x();
++ rx51_video_mem_init();
+ omap2_map_common_io();
+ }
+
+--
+1.6.0.4
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-RX51-Add-Touch-Controller-in-SPI-board-info.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-RX51-Add-Touch-Controller-in-SPI-board-info.patch
new file mode 100644
index 0000000..1599c02
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-RX51-Add-Touch-Controller-in-SPI-board-info.patch
@@ -0,0 +1,64 @@
+From 186ac697a023e7b95db72433fb8d4e8a9553826d Mon Sep 17 00:00:00 2001
+From: Roger Quadros <roger.quadros@nokia.com>
+Date: Mon, 22 Mar 2010 17:16:25 +0200
+Subject: [PATCH 6/10] OMAP: RX51: Add Touch Controller in SPI board info
+
+From: Roger Quadros <roger.quadros@nokia.com>
+
+Patch-mainline: 2.6.35?
+Git-repo: http://www.gitorious.org/linux-omap-dss2/linux/commit/a77ee8b332b62f3b10ffc15b27b889adf50cd013
+
+The Touch controller and LCD Panel share the same SPI bus 1.
+So, we need to define the touch controller in the SPI board info
+else, the SPI bus will be contended due to invalid state of
+Touch controller's Chip Select thus preventing the LCD panel
+from working.
+
+Signed-off-by: Roger Quadros <roger.quadros@nokia.com>
+---
+ arch/arm/mach-omap2/board-rx51-peripherals.c | 15 +++++++++++++++
+ 1 files changed, 15 insertions(+), 0 deletions(-)
+
+diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
+index 6a41a0a..bdb1c54 100644
+--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
++++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
+@@ -46,6 +46,7 @@
+ enum {
+ RX51_SPI_WL1251,
+ RX51_SPI_MIPID, /* LCD panel */
++ RX51_SPI_TSC2005, /* Touch Controller */
+ };
+
+ static struct wl12xx_platform_data wl1251_pdata;
+@@ -60,6 +61,11 @@ static struct omap2_mcspi_device_config mipid_mcspi_config = {
+ .single_channel = 1,
+ };
+
++static struct omap2_mcspi_device_config tsc2005_mcspi_config = {
++ .turbo_mode = 0,
++ .single_channel = 1,
++};
++
+ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
+ [RX51_SPI_WL1251] = {
+ .modalias = "wl1251",
+@@ -77,6 +83,15 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
+ .max_speed_hz = 6000000,
+ .controller_data = &mipid_mcspi_config,
+ },
++ [RX51_SPI_TSC2005] = {
++ .modalias = "tsc2005",
++ .bus_num = 1,
++ .chip_select = 0,
++ /* .irq = OMAP_GPIO_IRQ(RX51_TSC2005_IRQ_GPIO),*/
++ .max_speed_hz = 6000000,
++ .controller_data = &tsc2005_mcspi_config,
++ /* .platform_data = &tsc2005_config,*/
++ },
+ };
+
+ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
+--
+1.6.0.4
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-RX51-Add-vdds_sdi-supply-voltage-for-SDI.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-RX51-Add-vdds_sdi-supply-voltage-for-SDI.patch
new file mode 100644
index 0000000..f4d4325
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-OMAP-RX51-Add-vdds_sdi-supply-voltage-for-SDI.patch
@@ -0,0 +1,54 @@
+From 48c56cb077e21cf56f0673c9010dde7be4c1bd88 Mon Sep 17 00:00:00 2001
+From: Roger Quadros <roger.quadros@nokia.com>
+Date: Fri, 12 Mar 2010 16:14:22 +0200
+Subject: [PATCH 5/10] OMAP: RX51: Add "vdds_sdi" supply voltage for SDI
+
+From: Roger Quadros <roger.quadros@nokia.com>
+
+Patch-mainline: 2.6.35?
+Git-repo: http://www.gitorious.org/linux-omap-dss2/linux/commit/659550d7f54a2620ba2cc1a98273793ce97de230
+
+The SDI Display subsystem needs access to the vdds_sdi supply
+regulator. This is TWL4030's VAUX1 supply on RX-51.
+
+Signed-off-by: Roger Quadros <roger.quadros@nokia.com>
+---
+ arch/arm/mach-omap2/board-rx51-peripherals.c | 15 +++++++++++++++
+ 1 files changed, 15 insertions(+), 0 deletions(-)
+
+diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
+index 7bae364..6a41a0a 100644
+--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
++++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
+@@ -256,6 +256,19 @@ static struct regulator_consumer_supply rx51_vsim_supply = {
+ .supply = "vmmc_aux",
+ };
+
++#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
++extern struct platform_device rx51_display_device;
++#endif
++
++static struct regulator_consumer_supply rx51_vaux1_consumers[] = {
++#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
++ {
++ .supply = "vdds_sdi",
++ .dev = &rx51_display_device.dev,
++ },
++#endif
++};
++
+ static struct regulator_init_data rx51_vaux1 = {
+ .constraints = {
+ .name = "V28",
+@@ -266,6 +279,8 @@ static struct regulator_init_data rx51_vaux1 = {
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
++ .num_consumer_supplies = ARRAY_SIZE(rx51_vaux1_consumers),
++ .consumer_supplies = rx51_vaux1_consumers,
+ };
+
+ static struct regulator_init_data rx51_vaux2 = {
+--
+1.6.0.4
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-input-touchscreen-introduce-tsc2005-driver.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-input-touchscreen-introduce-tsc2005-driver.patch
new file mode 100644
index 0000000..41de3db
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-input-touchscreen-introduce-tsc2005-driver.patch
@@ -0,0 +1,804 @@
+From c0960b696fbccc83925134aec007801073bcac54 Mon Sep 17 00:00:00 2001
+From: Lauri Leukkunen <lauri.leukkunen@nokia.com>
+Date: Fri, 12 Mar 2010 16:54:33 +0000
+Subject: [PATCH 7/10] input: touchscreen: introduce tsc2005 driver
+
+Patch-mainline: 2.6.35?
+Discussions: http://www.mail-archive.com/linux-omap@vger.kernel.org/msg26748.html
+
+Introduce a driver for the Texas Instruments TSC2005 touchscreen
+controller (http://focus.ti.com/docs/prod/folders/print/tsc2005.html).
+
+The patch is based on a driver by Lauri Leukkunen, with modifications
+by David Brownell, Phil Carmody, Imre Deak, Hiroshi DOYU, Ari Kauppi,
+Tony Lindgren, Jarkko Nikula, Eero Nurkkala and Roman Tereshonkov.
+
+Signed-off-by: Lauri Leukkunen <lauri.leukkunen@nokia.com>
+[aaro.koskinen@nokia.com: patch description, rebasing & cleanup]
+Signed-off-by: Aaro Koskinen <aaro.koskinen@nokia.com>
+Cc: David Brownell <dbrownell@users.sourceforge.net>
+Cc: Phil Carmody <ext-phil.2.carmody@nokia.com>
+Cc: Imre Deak <imre.deak@nokia.com>
+Cc: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+Cc: Ari Kauppi <Ext-Ari.Kauppi@nokia.com>
+Cc: Tony Lindgren <tony@atomide.com>
+Cc: Jarkko Nikula <jhnikula@gmail.com>
+Cc: Eero Nurkkala <ext-eero.nurkkala@nokia.com>
+Cc: Roman Tereshonkov <roman.tereshonkov@nokia.com>
+---
+ drivers/input/touchscreen/Kconfig | 11 +
+ drivers/input/touchscreen/Makefile | 1 +
+ drivers/input/touchscreen/tsc2005.c | 678 +++++++++++++++++++++++++++++++++++
+ include/linux/spi/tsc2005.h | 41 +++
+ 4 files changed, 731 insertions(+), 0 deletions(-)
+ create mode 100644 drivers/input/touchscreen/tsc2005.c
+ create mode 100644 include/linux/spi/tsc2005.h
+
+diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
+index dfafc76..72c1797 100644
+--- a/drivers/input/touchscreen/Kconfig
++++ b/drivers/input/touchscreen/Kconfig
+@@ -548,6 +548,17 @@ config TOUCHSCREEN_TOUCHIT213
+ To compile this driver as a module, choose M here: the
+ module will be called touchit213.
+
++config TOUCHSCREEN_TSC2005
++ tristate "TSC2005 based touchscreens"
++ depends on SPI_MASTER
++ help
++ Say Y here if you have a TSC2005 based touchscreen.
++
++ If unsure, say N.
++
++ To compile this driver as a module, choose M here: the
++ module will be called tsc2005.
++
+ config TOUCHSCREEN_TSC2007
+ tristate "TSC2007 based touchscreens"
+ depends on I2C
+diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
+index d61a3b4..61fa8b5 100644
+--- a/drivers/input/touchscreen/Makefile
++++ b/drivers/input/touchscreen/Makefile
+@@ -33,6 +33,7 @@ obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
+ obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
+ obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
+ obj-$(CONFIG_TOUCHSCREEN_TOUCHWIN) += touchwin.o
++obj-$(CONFIG_TOUCHSCREEN_TSC2005) += tsc2005.o
+ obj-$(CONFIG_TOUCHSCREEN_TSC2007) += tsc2007.o
+ obj-$(CONFIG_TOUCHSCREEN_UCB1400) += ucb1400_ts.o
+ obj-$(CONFIG_TOUCHSCREEN_WACOM_W8001) += wacom_w8001.o
+diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
+new file mode 100644
+index 0000000..27ee361
+--- /dev/null
++++ b/drivers/input/touchscreen/tsc2005.c
+@@ -0,0 +1,678 @@
++/*
++ * TSC2005 touchscreen driver
++ *
++ * Copyright (C) 2006-2010 Nokia Corporation
++ *
++ * Author: Lauri Leukkunen <lauri.leukkunen@nokia.com>
++ * based on TSC2301 driver by Klaus K. Pedersen <klaus.k.pedersen@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/input.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/tsc2005.h>
++
++/*
++ * The touchscreen interface operates as follows:
++ *
++ * 1) Pen is pressed against the touchscreen.
++ * 2) TSC2005 performs AD conversion.
++ * 3) After the conversion is done TSC2005 drives DAV line down.
++ * 4) GPIO IRQ is received and tsc2005_irq_thread() is scheduled.
++ * 5) tsc2005_irq_thread() queues up an spi transfer to fetch the x, y, z1, z2
++ * values.
++ * 6) tsc2005_irq_thread() reports coordinates to input layer and sets up
++ * tsc2005_penup_timer() to be called after TSC2005_PENUP_TIME_MS (40ms).
++ * 7) When the penup timer expires, there have not been touch or DAV interrupts
++ * during the last 40ms which means the pen has been lifted.
++ *
++ * ESD recovery via a hardware reset is done if the TSC2005 doesn't respond
++ * after a configurable period (in ms) of activity. If esd_timeout is 0, the
++ * watchdog is disabled.
++ */
++
++/* control byte 1 */
++#define TSC2005_CMD 0x80
++#define TSC2005_CMD_NORMAL 0x00
++#define TSC2005_CMD_STOP 0x01
++#define TSC2005_CMD_12BIT 0x04
++
++/* control byte 0 */
++#define TSC2005_REG_READ 0x0001
++#define TSC2005_REG_PND0 0x0002
++#define TSC2005_REG_X 0x0000
++#define TSC2005_REG_Y 0x0008
++#define TSC2005_REG_Z1 0x0010
++#define TSC2005_REG_Z2 0x0018
++#define TSC2005_REG_TEMP_HIGH 0x0050
++#define TSC2005_REG_CFR0 0x0060
++#define TSC2005_REG_CFR1 0x0068
++#define TSC2005_REG_CFR2 0x0070
++
++/* configuration register 0 */
++#define TSC2005_CFR0_PRECHARGE_276US 0x0040
++#define TSC2005_CFR0_STABTIME_1MS 0x0300
++#define TSC2005_CFR0_CLOCK_1MHZ 0x1000
++#define TSC2005_CFR0_RESOLUTION12 0x2000
++#define TSC2005_CFR0_PENMODE 0x8000
++#define TSC2005_CFR0_INITVALUE (TSC2005_CFR0_STABTIME_1MS | \
++ TSC2005_CFR0_CLOCK_1MHZ | \
++ TSC2005_CFR0_RESOLUTION12 | \
++ TSC2005_CFR0_PRECHARGE_276US | \
++ TSC2005_CFR0_PENMODE)
++
++/* bits common to both read and write of configuration register 0 */
++#define TSC2005_CFR0_RW_MASK 0x3fff
++
++/* configuration register 1 */
++#define TSC2005_CFR1_BATCHDELAY_4MS 0x0003
++#define TSC2005_CFR1_INITVALUE TSC2005_CFR1_BATCHDELAY_4MS
++
++/* configuration register 2 */
++#define TSC2005_CFR2_MAVE_Z 0x0004
++#define TSC2005_CFR2_MAVE_Y 0x0008
++#define TSC2005_CFR2_MAVE_X 0x0010
++#define TSC2005_CFR2_AVG_7 0x0800
++#define TSC2005_CFR2_MEDIUM_15 0x3000
++#define TSC2005_CFR2_INITVALUE (TSC2005_CFR2_MAVE_X | \
++ TSC2005_CFR2_MAVE_Y | \
++ TSC2005_CFR2_MAVE_Z | \
++ TSC2005_CFR2_MEDIUM_15 | \
++ TSC2005_CFR2_AVG_7)
++
++#define MAX_12BIT 0xfff
++#define TSC2005_SPI_MAX_SPEED_HZ 10000000
++#define TSC2005_PENUP_TIME_MS 40
++
++struct tsc2005_spi_rd {
++ struct spi_transfer spi_xfer;
++ u32 spi_tx;
++ u32 spi_rx;
++};
++
++struct tsc2005 {
++ struct spi_device *spi;
++
++ struct spi_message spi_read_msg;
++ struct tsc2005_spi_rd spi_x;
++ struct tsc2005_spi_rd spi_y;
++ struct tsc2005_spi_rd spi_z1;
++ struct tsc2005_spi_rd spi_z2;
++
++ struct input_dev *idev;
++ char phys[32];
++
++ struct mutex mutex;
++
++ struct timer_list penup_timer;
++ struct work_struct penup_work;
++
++ unsigned int esd_timeout;
++ struct timer_list esd_timer;
++ struct work_struct esd_work;
++
++ unsigned int x_plate_ohm;
++
++ bool disabled;
++ unsigned int disable_depth;
++
++ void (*set_reset)(bool enable);
++};
++
++static void tsc2005_cmd(struct tsc2005 *ts, u8 cmd)
++{
++ u8 tx;
++ struct spi_message msg;
++ struct spi_transfer xfer = { 0 };
++
++ tx = TSC2005_CMD | TSC2005_CMD_12BIT | cmd;
++
++ xfer.tx_buf = &tx;
++ xfer.rx_buf = NULL;
++ xfer.len = 1;
++ xfer.bits_per_word = 8;
++
++ spi_message_init(&msg);
++ spi_message_add_tail(&xfer, &msg);
++ spi_sync(ts->spi, &msg);
++}
++
++static void tsc2005_write(struct tsc2005 *ts, u8 reg, u16 value)
++{
++ u32 tx;
++ struct spi_message msg;
++ struct spi_transfer xfer = { 0 };
++
++ tx = (reg | TSC2005_REG_PND0) << 16;
++ tx |= value;
++
++ xfer.tx_buf = &tx;
++ xfer.rx_buf = NULL;
++ xfer.len = 4;
++ xfer.bits_per_word = 24;
++
++ spi_message_init(&msg);
++ spi_message_add_tail(&xfer, &msg);
++ spi_sync(ts->spi, &msg);
++}
++
++static void tsc2005_setup_read(struct tsc2005_spi_rd *rd, u8 reg, bool last)
++{
++ rd->spi_tx = (reg | TSC2005_REG_READ) << 16;
++ rd->spi_xfer.tx_buf = &rd->spi_tx;
++ rd->spi_xfer.rx_buf = &rd->spi_rx;
++ rd->spi_xfer.len = 4;
++ rd->spi_xfer.bits_per_word = 24;
++ rd->spi_xfer.cs_change = !last;
++}
++
++static void tsc2005_read(struct tsc2005 *ts, u8 reg, u16 *value)
++{
++ struct spi_message msg;
++ struct tsc2005_spi_rd spi_rd = { { 0 }, 0, 0 };
++
++ tsc2005_setup_read(&spi_rd, reg, 1);
++
++ spi_message_init(&msg);
++ spi_message_add_tail(&spi_rd.spi_xfer, &msg);
++ spi_sync(ts->spi, &msg);
++ *value = spi_rd.spi_rx;
++}
++
++static void tsc2005_update_pen_state(struct tsc2005 *ts,
++ int x, int y, int pressure)
++{
++ if (pressure) {
++ input_report_abs(ts->idev, ABS_X, x);
++ input_report_abs(ts->idev, ABS_Y, y);
++ }
++ input_report_abs(ts->idev, ABS_PRESSURE, pressure);
++ input_report_key(ts->idev, BTN_TOUCH, !!pressure);
++ input_sync(ts->idev);
++ dev_dbg(&ts->spi->dev, "point(%4d,%4d), pressure (%4d)\n", x, y,
++ pressure);
++}
++
++static irqreturn_t tsc2005_irq_handler(int irq, void *dev_id)
++{
++ struct tsc2005 *ts = dev_id;
++
++ /* update the penup timer only if it's pending */
++ mod_timer_pending(&ts->penup_timer,
++ jiffies + msecs_to_jiffies(TSC2005_PENUP_TIME_MS));
++
++ return IRQ_WAKE_THREAD;
++}
++
++static irqreturn_t tsc2005_irq_thread(int irq, void *_ts)
++{
++ struct tsc2005 *ts = _ts;
++ unsigned int pressure;
++ u32 x;
++ u32 y;
++ u32 z1;
++ u32 z2;
++
++ mutex_lock(&ts->mutex);
++
++ if (unlikely(ts->disable_depth))
++ goto out;
++
++ /* read the coordinates */
++ spi_sync(ts->spi, &ts->spi_read_msg);
++ x = ts->spi_x.spi_rx;
++ y = ts->spi_y.spi_rx;
++ z1 = ts->spi_z1.spi_rx;
++ z2 = ts->spi_z2.spi_rx;
++
++ /* validate position */
++ if (unlikely(x > MAX_12BIT || y > MAX_12BIT))
++ goto out;
++
++ /* skip coords if the pressure components are out of range */
++ if (unlikely(z1 == 0 || z2 > MAX_12BIT || z1 >= z2))
++ goto out;
++
++ /* compute touch pressure resistance using equation #1 */
++ pressure = x * (z2 - z1) / z1;
++ pressure = pressure * ts->x_plate_ohm / 4096;
++ if (unlikely(pressure > MAX_12BIT))
++ goto out;
++
++ tsc2005_update_pen_state(ts, x, y, pressure);
++
++ /* set the penup timer */
++ mod_timer(&ts->penup_timer,
++ jiffies + msecs_to_jiffies(TSC2005_PENUP_TIME_MS));
++
++ if (!ts->esd_timeout)
++ goto out;
++
++ /* update the watchdog timer */
++ mod_timer(&ts->esd_timer,
++ round_jiffies(jiffies + msecs_to_jiffies(ts->esd_timeout)));
++
++out:
++ mutex_unlock(&ts->mutex);
++ return IRQ_HANDLED;
++}
++
++static void tsc2005_penup_timer(unsigned long data)
++{
++ struct tsc2005 *ts = (struct tsc2005 *)data;
++
++ schedule_work(&ts->penup_work);
++}
++
++static void tsc2005_penup_work(struct work_struct *work)
++{
++ struct tsc2005 *ts = container_of(work, struct tsc2005, penup_work);
++
++ mutex_lock(&ts->mutex);
++ tsc2005_update_pen_state(ts, 0, 0, 0);
++ mutex_unlock(&ts->mutex);
++}
++
++static void tsc2005_start_scan(struct tsc2005 *ts)
++{
++ tsc2005_write(ts, TSC2005_REG_CFR0, TSC2005_CFR0_INITVALUE);
++ tsc2005_write(ts, TSC2005_REG_CFR1, TSC2005_CFR1_INITVALUE);
++ tsc2005_write(ts, TSC2005_REG_CFR2, TSC2005_CFR2_INITVALUE);
++ tsc2005_cmd(ts, TSC2005_CMD_NORMAL);
++}
++
++static void tsc2005_stop_scan(struct tsc2005 *ts)
++{
++ tsc2005_cmd(ts, TSC2005_CMD_STOP);
++}
++
++/* must be called with mutex held */
++static void tsc2005_disable(struct tsc2005 *ts)
++{
++ if (ts->disable_depth++ != 0)
++ return;
++ disable_irq(ts->spi->irq);
++ if (ts->esd_timeout)
++ del_timer_sync(&ts->esd_timer);
++ del_timer_sync(&ts->penup_timer);
++ tsc2005_stop_scan(ts);
++}
++
++/* must be called with mutex held */
++static void tsc2005_enable(struct tsc2005 *ts)
++{
++ if (--ts->disable_depth != 0)
++ return;
++ tsc2005_start_scan(ts);
++ enable_irq(ts->spi->irq);
++ if (!ts->esd_timeout)
++ return;
++ mod_timer(&ts->esd_timer,
++ round_jiffies(jiffies + msecs_to_jiffies(ts->esd_timeout)));
++}
++
++static ssize_t tsc2005_disable_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct tsc2005 *ts = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%u\n", ts->disabled);
++}
++
++static ssize_t tsc2005_disable_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct tsc2005 *ts = dev_get_drvdata(dev);
++ unsigned long res;
++ int i;
++
++ if (strict_strtoul(buf, 10, &res) < 0)
++ return -EINVAL;
++ i = res ? 1 : 0;
++
++ mutex_lock(&ts->mutex);
++ if (i == ts->disabled)
++ goto out;
++ ts->disabled = i;
++ if (i)
++ tsc2005_disable(ts);
++ else
++ tsc2005_enable(ts);
++out:
++ mutex_unlock(&ts->mutex);
++ return count;
++}
++static DEVICE_ATTR(disable, 0664, tsc2005_disable_show, tsc2005_disable_store);
++
++static ssize_t tsc2005_selftest_show(struct device *dev,
++ struct device_attribute *attr,
++ char *buf)
++{
++ struct tsc2005 *ts = dev_get_drvdata(dev);
++ u16 temp_high;
++ u16 temp_high_orig;
++ u16 temp_high_test;
++ unsigned int result;
++
++ if (!ts->set_reset) {
++ dev_warn(&ts->spi->dev,
++ "unable to selftest: no reset function\n");
++ result = 0;
++ goto out;
++ }
++
++ mutex_lock(&ts->mutex);
++
++ /*
++ * Test TSC2005 communications via temp high register.
++ */
++ tsc2005_disable(ts);
++ result = 1;
++ tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high_orig);
++ temp_high_test = (temp_high_orig - 1) & MAX_12BIT;
++ tsc2005_write(ts, TSC2005_REG_TEMP_HIGH, temp_high_test);
++ tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high);
++ if (temp_high != temp_high_test) {
++ dev_warn(dev, "selftest failed: %d != %d\n",
++ temp_high, temp_high_test);
++ result = 0;
++ }
++
++ /* hardware reset */
++ ts->set_reset(0);
++ msleep(1); /* only 10us required */
++ ts->set_reset(1);
++ tsc2005_enable(ts);
++
++ /* test that the reset really happened */
++ tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high);
++ if (temp_high != temp_high_orig) {
++ dev_warn(dev, "selftest failed after reset: %d != %d\n",
++ temp_high, temp_high_orig);
++ result = 0;
++ }
++
++ mutex_unlock(&ts->mutex);
++
++out:
++ return sprintf(buf, "%u\n", result);
++}
++static DEVICE_ATTR(selftest, S_IRUGO, tsc2005_selftest_show, NULL);
++
++static void tsc2005_esd_timer(unsigned long data)
++{
++ struct tsc2005 *ts = (struct tsc2005 *)data;
++
++ schedule_work(&ts->esd_work);
++}
++
++static void tsc2005_esd_work(struct work_struct *work)
++{
++ struct tsc2005 *ts = container_of(work, struct tsc2005, esd_work);
++ u16 r;
++
++ mutex_lock(&ts->mutex);
++
++ if (ts->disable_depth)
++ goto out;
++
++ /*
++ * If we cannot read our known value from configuration register 0 then
++ * reset the controller as if from power-up and start scanning again.
++ */
++ tsc2005_read(ts, TSC2005_REG_CFR0, &r);
++ if ((r ^ TSC2005_CFR0_INITVALUE) & TSC2005_CFR0_RW_MASK) {
++ dev_info(&ts->spi->dev, "TSC2005 not responding - resetting\n");
++ ts->set_reset(0);
++ msleep(1); /* only 10us required */
++ ts->set_reset(1);
++ tsc2005_start_scan(ts);
++ }
++
++ /* re-arm the watchdog */
++ mod_timer(&ts->esd_timer,
++ round_jiffies(jiffies + msecs_to_jiffies(ts->esd_timeout)));
++
++out:
++ mutex_unlock(&ts->mutex);
++}
++
++static void __devinit tsc2005_setup_spi_xfer(struct tsc2005 *ts)
++{
++ tsc2005_setup_read(&ts->spi_x, TSC2005_REG_X, 0);
++ tsc2005_setup_read(&ts->spi_y, TSC2005_REG_Y, 0);
++ tsc2005_setup_read(&ts->spi_z1, TSC2005_REG_Z1, 0);
++ tsc2005_setup_read(&ts->spi_z2, TSC2005_REG_Z2, 1);
++
++ spi_message_init(&ts->spi_read_msg);
++ spi_message_add_tail(&ts->spi_x.spi_xfer, &ts->spi_read_msg);
++ spi_message_add_tail(&ts->spi_y.spi_xfer, &ts->spi_read_msg);
++ spi_message_add_tail(&ts->spi_z1.spi_xfer, &ts->spi_read_msg);
++ spi_message_add_tail(&ts->spi_z2.spi_xfer, &ts->spi_read_msg);
++}
++
++static struct attribute *tsc2005_attrs[] = {
++ &dev_attr_disable.attr,
++ &dev_attr_selftest.attr,
++ NULL
++};
++
++static struct attribute_group tsc2005_attr_group = {
++ .attrs = tsc2005_attrs,
++};
++
++static int __devinit tsc2005_setup(struct tsc2005 *ts,
++ struct tsc2005_platform_data *pdata)
++{
++ int r;
++ int fudge_x;
++ int fudge_y;
++ int fudge_p;
++ int p_max;
++ int x_max;
++ int y_max;
++
++ mutex_init(&ts->mutex);
++
++ tsc2005_setup_spi_xfer(ts);
++
++ init_timer(&ts->penup_timer);
++ setup_timer(&ts->penup_timer, tsc2005_penup_timer, (unsigned long)ts);
++ INIT_WORK(&ts->penup_work, tsc2005_penup_work);
++
++ fudge_x = pdata->ts_x_fudge ? : 0;
++ fudge_y = pdata->ts_y_fudge ? : 0;
++ fudge_p = pdata->ts_pressure_fudge ? : 0;
++ x_max = pdata->ts_x_max ? : MAX_12BIT;
++ y_max = pdata->ts_y_max ? : MAX_12BIT;
++ p_max = pdata->ts_pressure_max ? : MAX_12BIT;
++ ts->x_plate_ohm = pdata->ts_x_plate_ohm ? : 0;
++ ts->esd_timeout = pdata->esd_timeout_ms;
++ ts->set_reset = pdata->set_reset;
++
++ ts->idev = input_allocate_device();
++ if (ts->idev == NULL)
++ return -ENOMEM;
++ ts->idev->name = "TSC2005 touchscreen";
++ snprintf(ts->phys, sizeof(ts->phys), "%s/input-ts",
++ dev_name(&ts->spi->dev));
++ ts->idev->phys = ts->phys;
++ ts->idev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
++ ts->idev->absbit[0] = BIT(ABS_X) | BIT(ABS_Y) | BIT(ABS_PRESSURE);
++ ts->idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
++
++ input_set_abs_params(ts->idev, ABS_X, 0, x_max, fudge_x, 0);
++ input_set_abs_params(ts->idev, ABS_Y, 0, y_max, fudge_y, 0);
++ input_set_abs_params(ts->idev, ABS_PRESSURE, 0, p_max, fudge_p, 0);
++
++ r = request_threaded_irq(ts->spi->irq, tsc2005_irq_handler,
++ tsc2005_irq_thread, IRQF_TRIGGER_RISING,
++ "tsc2005", ts);
++ if (r) {
++ dev_err(&ts->spi->dev, "request_threaded_irq(): %d\n", r);
++ goto err1;
++ }
++ set_irq_wake(ts->spi->irq, 1);
++
++ r = input_register_device(ts->idev);
++ if (r) {
++ dev_err(&ts->spi->dev, "input_register_device(): %d\n", r);
++ goto err2;
++ }
++
++ r = sysfs_create_group(&ts->spi->dev.kobj, &tsc2005_attr_group);
++ if (r)
++ dev_warn(&ts->spi->dev, "sysfs entry creation failed: %d\n", r);
++
++ tsc2005_start_scan(ts);
++
++ if (!ts->esd_timeout || !ts->set_reset)
++ goto done;
++
++ /* start the optional ESD watchdog */
++ setup_timer(&ts->esd_timer, tsc2005_esd_timer, (unsigned long)ts);
++ INIT_WORK(&ts->esd_work, tsc2005_esd_work);
++ mod_timer(&ts->esd_timer,
++ round_jiffies(jiffies + msecs_to_jiffies(ts->esd_timeout)));
++
++done:
++ return 0;
++
++err2:
++ free_irq(ts->spi->irq, ts);
++
++err1:
++ input_free_device(ts->idev);
++ return r;
++}
++
++static int __devinit tsc2005_probe(struct spi_device *spi)
++{
++ struct tsc2005_platform_data *pdata = spi->dev.platform_data;
++ struct tsc2005 *ts;
++ int r;
++
++ if (spi->irq < 0) {
++ dev_dbg(&spi->dev, "no irq\n");
++ return -ENODEV;
++ }
++
++ if (!pdata) {
++ dev_dbg(&spi->dev, "no platform data\n");
++ return -ENODEV;
++ }
++
++ ts = kzalloc(sizeof(*ts), GFP_KERNEL);
++ if (ts == NULL)
++ return -ENOMEM;
++
++ dev_set_drvdata(&spi->dev, ts);
++ ts->spi = spi;
++ spi->dev.power.power_state = PMSG_ON;
++ spi->mode = SPI_MODE_0;
++ spi->bits_per_word = 8;
++ if (!spi->max_speed_hz)
++ spi->max_speed_hz = TSC2005_SPI_MAX_SPEED_HZ;
++ spi_setup(spi);
++
++ r = tsc2005_setup(ts, pdata);
++ if (r)
++ kfree(ts);
++ return r;
++}
++
++static int __devexit tsc2005_remove(struct spi_device *spi)
++{
++ struct tsc2005 *ts = dev_get_drvdata(&spi->dev);
++
++ mutex_lock(&ts->mutex);
++ tsc2005_disable(ts);
++ mutex_unlock(&ts->mutex);
++
++ if (ts->esd_timeout)
++ del_timer_sync(&ts->esd_timer);
++ del_timer_sync(&ts->penup_timer);
++
++ flush_work(&ts->esd_work);
++ flush_work(&ts->penup_work);
++
++ sysfs_remove_group(&ts->spi->dev.kobj, &tsc2005_attr_group);
++ free_irq(ts->spi->irq, ts);
++ input_unregister_device(ts->idev);
++ kfree(ts);
++
++ return 0;
++}
++
++#ifdef CONFIG_PM
++static int tsc2005_suspend(struct spi_device *spi, pm_message_t mesg)
++{
++ struct tsc2005 *ts = dev_get_drvdata(&spi->dev);
++
++ mutex_lock(&ts->mutex);
++ tsc2005_disable(ts);
++ mutex_unlock(&ts->mutex);
++
++ return 0;
++}
++
++static int tsc2005_resume(struct spi_device *spi)
++{
++ struct tsc2005 *ts = dev_get_drvdata(&spi->dev);
++
++ mutex_lock(&ts->mutex);
++ tsc2005_enable(ts);
++ mutex_unlock(&ts->mutex);
++
++ return 0;
++}
++#endif
++
++static struct spi_driver tsc2005_driver = {
++ .driver = {
++ .name = "tsc2005",
++ .owner = THIS_MODULE,
++ },
++#ifdef CONFIG_PM
++ .suspend = tsc2005_suspend,
++ .resume = tsc2005_resume,
++#endif
++ .probe = tsc2005_probe,
++ .remove = __devexit_p(tsc2005_remove),
++};
++
++static int __init tsc2005_init(void)
++{
++ printk(KERN_INFO "TSC2005 driver initializing\n");
++ return spi_register_driver(&tsc2005_driver);
++}
++module_init(tsc2005_init);
++
++static void __exit tsc2005_exit(void)
++{
++ spi_unregister_driver(&tsc2005_driver);
++}
++module_exit(tsc2005_exit);
++
++MODULE_AUTHOR("Lauri Leukkunen <lauri.leukkunen@nokia.com>");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:tsc2005");
+diff --git a/include/linux/spi/tsc2005.h b/include/linux/spi/tsc2005.h
+new file mode 100644
+index 0000000..d9b0c84
+--- /dev/null
++++ b/include/linux/spi/tsc2005.h
+@@ -0,0 +1,41 @@
++/*
++ * This file is part of TSC2005 touchscreen driver
++ *
++ * Copyright (C) 2009-2010 Nokia Corporation
++ *
++ * Contact: Aaro Koskinen <aaro.koskinen@nokia.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++
++#ifndef _LINUX_SPI_TSC2005_H
++#define _LINUX_SPI_TSC2005_H
++
++#include <linux/types.h>
++
++struct tsc2005_platform_data {
++ int ts_pressure_max;
++ int ts_pressure_fudge;
++ int ts_x_max;
++ int ts_x_fudge;
++ int ts_y_max;
++ int ts_y_fudge;
++ int ts_x_plate_ohm;
++ unsigned int esd_timeout_ms;
++ void (*set_reset)(bool enable);
++};
++
++#endif
+--
+1.6.0.4
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-moorestown-camera-driver-10.0-1-3.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-moorestown-camera-driver-10.0-1-3.patch
new file mode 100644
index 0000000..6a9c431
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-moorestown-camera-driver-10.0-1-3.patch
@@ -0,0 +1,10167 @@
+From 3c67994c10958001bebd89356a96358904ac9779 Mon Sep 17 00:00:00 2001
+From: Zheng Ba <zheng.ba@intel.com>
+Date: Thu, 1 Apr 2010 16:16:57 +0800
+Subject: [PATCH 1/3] Moorestown Camera Imaging driver Beta 10.0
+
+Patch-mainline: 2.6.35?
+
+Changes from Beta 9.0:
+1. Fixed hsd sighting
+ 3469638 3469639 3469710 3469822 (high)
+ 3469697 (medium)
+
+Changes from Beta 8.0:
+1. Fixed hsd sighting
+ 3469056 3469058 (critical)
+ 3469705 3469696 3469709 3469510 (medium)
+
+Changes from Beta 7.0:
+1. Fixed hsd sighting 3469681,3469682,3469683 (high)
+
+Changes from Beta 6.0:
+1. Fixed hsd sighting 3469668 (high)
+2. Fixed ov5630 v4l2 view-finding dark issue
+3. Enabled support for popular v4l2 applications (cheese, skype, ffmpeg)
+
+Changes from Beta 5.1:
+1. Fixed CRITICAL sighting 3469558 -- ciapp fails to launch with segment fault
+2. Fixed HIGH sighting 3479513 -- ov5630 AWB unstable
+3. Improved KMOT sensor 720p fps from 30 to 40
+
+Changes from Beta 5.0:
+Fixed a critical issue of camera driver not loading -- hsd 3469557
+
+Main changes from Beta 4.0:
+Fixed 4 HSD sightings: 3469392,3469099,3469470,3469500
+
+Main changes from Beta 3.0:
+Fixed 7 HSD sightings: 3469264,3469112,3469395,3469103,3469105,3469471,3469484
+
+Main changes from Beta 2.0:
+Fixed 6 HSD sightings: 3469047,3469315,3469317,3469101,3468409,3469391
+
+Main changes from Beta 1.1:
+1. Added interrupt mode for jpeg capture and KMOT viewfinding
+2. Fixed HSD sighting 3469228 and 3469147
+
+Main changes from Alpha2:
+Enabled MIPI interface in ISP driver and KMOT sensor s5k4e1.
+Enabled FIFO in ISP driver, which doubled the fps in view-finding mode.
+Enabled Subdev Framework in CI kernel driver.
+Enabled AF Continuous Mode.
+Enabled AE scene evaluation.
+
+Enabled the camera drivers in kernel:
+Device Drivers --> Multimedia support --> Video For Linux
+Device Drivers --> Mulitmedia support --> Video capture adapters -->
+--> Moorestown Langwell Camera Imaging Subsystem support.
+
+Kernel configs:
+1. camera driver depends on GPIO library and I2C driver.
+CONFIG_GENERIC_GPIO=y
+CONFIG_I2C=y
+CONFIG_GPIOLIB=y
+2. camera driver depends on videobuf-core and videobuf-dma-contig.
+VIDEOBUF_GEN=y
+VIDEOBUF_DMA_CONTIG=y
+3. enable multimedia support and video capture.
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_VIDEO_DEV=y
+CONFIG_VIDEO_V4L2_COMMON=y
+CONFIG_VIDEO_MEDIA=y
+CONFIG_VIDEO_V4L2=y
+4. camera drivers incluing ISP, 5630, 5630-motor, s5k4e1, s5k4e1-motor, 2650,
+9665, flash.
+CONFIG_VIDEO_MRSTCI=y
+CONFIG_VIDEO_MRST_ISP=y
+CONFIG_VIDEO_MRST_OV5630=y
+CONFIG_VIDEO_MRST_OV5630_MOTOR=y
+CONFIG_VIDEO_MRST_S5K4E1=y
+CONFIG_VIDEO_MRST_S5K4E1_MOTOR=y
+CONFIG_VIDEO_MRST_FLASH=y
+CONFIG_VIDEO_MRST_OV2650=y
+CONFIG_VIDEO_MRST_OV9665=y
+
+Signed-off-by: Zheng Ba <zheng.ba@intel.com>
+---
+ drivers/media/video/Kconfig | 1
+ drivers/media/video/Makefile | 9
+ drivers/media/video/mrstci/include/ci_isp_common.h | 1422 +++
+ drivers/media/video/mrstci/include/ci_isp_fmts_common.h | 128
+ drivers/media/video/mrstci/include/ci_sensor_common.h | 1233 ++
+ drivers/media/video/mrstci/include/ci_va.h | 42
+ drivers/media/video/mrstci/include/v4l2_jpg_review.h | 48
+ drivers/media/video/mrstci/mrstisp/include/def.h | 122
+ drivers/media/video/mrstci/mrstisp/include/mrstisp.h | 279
+ drivers/media/video/mrstci/mrstisp/include/mrstisp_dp.h | 317
+ drivers/media/video/mrstci/mrstisp/include/mrstisp_hw.h | 245
+ drivers/media/video/mrstci/mrstisp/include/mrstisp_isp.h | 42
+ drivers/media/video/mrstci/mrstisp/include/mrstisp_jpe.h | 426
+ drivers/media/video/mrstci/mrstisp/include/mrstisp_reg.h | 4698 ++++++++++
+ drivers/media/video/mrstci/mrstisp/include/mrstisp_stdinc.h | 119
+ drivers/media/video/mrstci/mrstisp/include/project_settings_mrv.h | 622 +
+ drivers/media/video/mrstci/mrstisp/include/reg_access.h | 233
+ 17 files changed, 9986 insertions(+)
+ create mode 100644 drivers/media/video/mrstci/include/ci_isp_common.h
+ create mode 100644 drivers/media/video/mrstci/include/ci_isp_fmts_common.h
+ create mode 100644 drivers/media/video/mrstci/include/ci_sensor_common.h
+ create mode 100644 drivers/media/video/mrstci/include/ci_va.h
+ create mode 100644 drivers/media/video/mrstci/include/v4l2_jpg_review.h
+ create mode 100644 drivers/media/video/mrstci/mrstisp/include/def.h
+ create mode 100644 drivers/media/video/mrstci/mrstisp/include/mrstisp.h
+ create mode 100644 drivers/media/video/mrstci/mrstisp/include/mrstisp_dp.h
+ create mode 100644 drivers/media/video/mrstci/mrstisp/include/mrstisp_hw.h
+ create mode 100644 drivers/media/video/mrstci/mrstisp/include/mrstisp_isp.h
+ create mode 100644 drivers/media/video/mrstci/mrstisp/include/mrstisp_jpe.h
+ create mode 100644 drivers/media/video/mrstci/mrstisp/include/mrstisp_reg.h
+ create mode 100644 drivers/media/video/mrstci/mrstisp/include/mrstisp_stdinc.h
+ create mode 100644 drivers/media/video/mrstci/mrstisp/include/project_settings_mrv.h
+ create mode 100644 drivers/media/video/mrstci/mrstisp/include/reg_access.h
+
+--- a/drivers/media/video/Kconfig
++++ b/drivers/media/video/Kconfig
+@@ -1074,4 +1074,5 @@ config USB_S2255
+ This driver can be compiled as a module, called s2255drv.
+
+ endif # V4L_USB_DRIVERS
++source "drivers/media/video/mrstci/Kconfig"
+ endif # VIDEO_CAPTURE_DRIVERS
+--- a/drivers/media/video/Makefile
++++ b/drivers/media/video/Makefile
+@@ -169,6 +169,15 @@ obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2
+
+ obj-$(CONFIG_ARCH_DAVINCI) += davinci/
+
++obj-$(CONFIG_VIDEO_MRST_OV2650) += mrstci/mrstov2650/
++obj-$(CONFIG_VIDEO_MRST_OV5630) += mrstci/mrstov5630/
++obj-$(CONFIG_VIDEO_MRST_OV5630_MOTOR) += mrstci/mrstov5630_motor/
++obj-$(CONFIG_VIDEO_MRST_S5K4E1) += mrstci/mrsts5k4e1/
++obj-$(CONFIG_VIDEO_MRST_S5K4E1_MOTOR) += mrstci/mrsts5k4e1_motor/
++obj-$(CONFIG_VIDEO_MRST_OV9665) += mrstci/mrstov9665/
++obj-$(CONFIG_VIDEO_MRST_FLASH) += mrstci/mrstflash/
++obj-$(CONFIG_VIDEO_MRST_ISP) += mrstci/mrstisp/
++
+ EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
+ EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
+ EXTRA_CFLAGS += -Idrivers/media/common/tuners
+--- /dev/null
++++ b/drivers/media/video/mrstci/include/ci_isp_common.h
+@@ -0,0 +1,1422 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#ifndef _CI_ISP_COMMON_H
++#define _CI_ISP_COMMON_H
++
++#include "v4l2_jpg_review.h"
++
++/*
++ * MARVIN VI ID defines -> changed to MARVIN_FEATURE_CHIP_ID and moved to
++ * the other chip features in project_settings.h
++ * JPEG compression ratio defines
++ */
++
++#define CI_ISP_JPEG_HIGH_COMPRESSION 1
++#define CI_ISP_JPEG_LOW_COMPRESSION 2
++/* Low Compression / High Quality */
++#define CI_ISP_JPEG_01_PERCENT 3
++#define CI_ISP_JPEG_20_PERCENT 4
++#define CI_ISP_JPEG_30_PERCENT 5
++#define CI_ISP_JPEG_40_PERCENT 6
++/* Mid Compression / Mid Quality */
++#define CI_ISP_JPEG_50_PERCENT 7
++#define CI_ISP_JPEG_60_PERCENT 8
++#define CI_ISP_JPEG_70_PERCENT 9
++#define CI_ISP_JPEG_80_PERCENT 10
++#define CI_ISP_JPEG_90_PERCENT 11
++/* High Compression / Low Quality */
++#define CI_ISP_JPEG_99_PERCENT 12
++
++/* Size of lens shading data table in 16 Bit words */
++#define CI_ISP_DATA_TBL_SIZE 289
++/* Size of lens shading grad table in 16 Bit words */
++#define CI_ISP_GRAD_TBL_SIZE 8
++/* Number of lens shading sectors in x or y direction */
++#define CI_ISP_MAX_LSC_SECTORS 16
++
++/*
++ * Value representing 1.0 for fixed-point values
++ * used by marvin drivers
++ */
++#define CI_ISP_FIXEDPOINT_ONE (0x1000)
++/* JPEG encoding */
++
++enum ci_isp_jpe_enc_mode {
++ /* motion JPEG with header generation */
++ CI_ISP_JPE_LARGE_CONT_MODE = 0x04,
++ /* motion JPEG only first frame with header */
++ CI_ISP_JPE_SHORT_CONT_MODE = 0x02,
++ /* JPEG with single snapshot */
++ CI_ISP_JPE_SINGLE_SHOT = 0x01
++};
++
++/* for demosaic mode */
++enum ci_isp_demosaic_mode {
++ CI_ISP_DEMOSAIC_STANDARD,
++ CI_ISP_DEMOSAIC_ENHANCED
++};
++
++struct ci_isp_window{
++ unsigned short hoffs;
++ unsigned short voffs;
++ unsigned short hsize;
++ unsigned short vsize;
++};
++
++/* scale settings for both self and main resize unit */
++struct ci_isp_scale {
++ u32 scale_hy;
++ u32 scale_hcb;
++ u32 scale_hcr;
++ u32 scale_vy;
++ u32 scale_vc;
++ u16 phase_hy;
++ u16 phase_hc;
++ u16 phase_vy;
++ u16 phase_vc;
++};
++
++/* A Lookup table for the upscale parameter in the self and main scaler */
++struct ci_isp_rsz_lut{
++ u8 rsz_lut[64];
++};
++
++#if (MARVIN_FEATURE_SCALE_FACTORWIDTH == MARVIN_FEATURE_16BITS)
++/* flag to set in scalefactor values to enable upscaling */
++#define RSZ_UPSCALE_ENABLE 0x20000
++#else
++/* flag to set in scalefactor values to enable upscaling */
++#define RSZ_UPSCALE_ENABLE 0x8000
++/* #if (MARVIN_FEATURE_SCALE_FACTORWIDTH == MARVIN_FEATURE_16BITS) */
++#endif
++
++/*
++ * Flag to set in scalefactor values to bypass the scaler block.
++ * Since this define is also used in calculations of scale factors and
++ * coordinates, it needs to reflect the scale factor precision. In other
++ * words:
++ * RSZ_SCALER_BYPASS = max. scalefactor value> + 1
++ */
++#if (MARVIN_FEATURE_SCALE_FACTORWIDTH == MARVIN_FEATURE_12BITS)
++#define RSZ_SCALER_BYPASS 0x1000
++#elif (MARVIN_FEATURE_SCALE_FACTORWIDTH == MARVIN_FEATURE_14BITS)
++#define RSZ_SCALER_BYPASS 0x4000
++#elif (MARVIN_FEATURE_SCALE_FACTORWIDTH == MARVIN_FEATURE_16BITS)
++#define RSZ_SCALER_BYPASS 0x10000
++#endif
++
++#define RSZ_FLAGS_MASK (RSZ_UPSCALE_ENABLE | RSZ_SCALER_BYPASS)
++
++/* color settings */
++struct ci_isp_color_settings {
++ u8 contrast;
++ u8 brightness;
++ u8 saturation;
++ u8 hue;
++ u32 flags;
++};
++
++/* color processing chrominance clipping range */
++#define CI_ISP_CPROC_C_OUT_RANGE 0x08
++/* color processing luminance input range (offset processing) */
++#define CI_ISP_CPROC_Y_IN_RANGE 0x04
++/* color processing luminance output clipping range */
++#define CI_ISP_CPROC_Y_OUT_RANGE 0x02
++/* color processing enable */
++#define CI_ISP_CPROC_ENABLE 0x01
++
++/* black level config */
++struct ci_isp_blc_config {
++ int bls_auto;
++ int henable;
++ int venable;
++ u16 hstart;
++ u16 hstop;
++ u16 vstart;
++ u16 vstop;
++ u8 blc_samples;
++ u8 ref_a;
++ u8 ref_b;
++ u8 ref_c;
++ u8 ref_d;
++};
++
++/* black level compensation mean values */
++struct ci_isp_blc_mean {
++ u8 mean_a;
++ u8 mean_b;
++ u8 mean_c;
++ u8 mean_d;
++};
++
++/* BLS window */
++struct ci_isp_bls_window {
++
++ /* En-/disable the measurement window. */
++ int enable_window;
++ /* Horizontal start address. */
++ u16 start_h;
++ /* Horizontal stop address. */
++ u16 stop_h;
++ /* Vertical start address. */
++ u16 start_v;
++ /* Vertical stop address. */
++ u16 stop_v;
++};
++
++/* BLS mean measured values */
++struct ci_isp_bls_measured {
++ /* Mean measured value for Bayer pattern position A. */
++ u16 meas_a;
++ /* Mean measured value for Bayer pattern position B. */
++ u16 meas_b;
++ /* Mean measured value for Bayer pattern position C. */
++ u16 meas_c;
++ /* Mean measured value for Bayer pattern position D. */
++ u16 meas_d;
++};
++
++/*
++ * BLS fixed subtraction values. The values will be subtracted from the sensor
++ * values. Therefore a negative value means addition instead of subtraction
++ */
++struct ci_isp_bls_subtraction {
++ /* Fixed (signed ) subtraction value for Bayer pattern position A. */
++ s16 fixed_a;
++ /* Fixed (signed ) subtraction value for Bayer pattern position B. */
++ s16 fixed_b;
++ /* Fixed (signed ) subtraction value for Bayer pattern position C. */
++ s16 fixed_c;
++ /* Fixed (signed ) subtraction value for Bayer pattern position D. */
++ s16 fixed_d;
++};
++
++/* BLS configuration */
++struct ci_isp_bls_config {
++ /*
++ * Automatic mode activated means that the measured values are
++ * subtracted. Otherwise the fixed subtraction values will be
++ * subtracted.
++ */
++ int enable_automatic;
++ /* En-/disable horizontal accumulation for mean black value. */
++ int disable_h;
++ /*
++ * BLS module versions 4 or higher imply that it is enabled.
++ * En-/disable vertical accumulation for mean black value.
++ */
++ int disable_v;
++ /* Measurement window 1. */
++ struct ci_isp_bls_window isp_bls_window1;
++ /* Measurement window 2. */
++ struct ci_isp_bls_window isp_bls_window2;
++
++ /*
++ * BLS module version 3 and lower do not support a second
++ * measurement window. Therefore the second window has to
++ * be disabled for these versions.
++ */
++
++ /*
++ * Set amount of measured pixels for each Bayer position (A, B,
++ * C and D) to 2^bls_samples.
++ */
++ u8 bls_samples;
++ /* Fixed subtraction values. */
++ struct ci_isp_bls_subtraction bls_subtraction;
++};
++
++/* white balancing modes for the marvin hardware */
++enum ci_isp_awb_mode {
++ CI_ISP_AWB_COMPLETELY_OFF = 0,
++ CI_ISP_AWB_AUTO,
++ CI_ISP_AWB_MAN_MEAS,
++ CI_ISP_AWB_MAN_NOMEAS,
++ CI_ISP_AWB_MAN_PUSH_AUTO,
++ CI_ISP_AWB_ONLY_MEAS
++};
++
++/* white balancing modes for the marvin hardware */
++enum ci_isp_awb_sub_mode {
++ CI_ISP_AWB_SUB_OFF = 0,
++ CI_ISP_AWB_MAN_DAYLIGHT,
++ CI_ISP_AWB_MAN_CLOUDY,
++ CI_ISP_AWB_MAN_SHADE,
++ CI_ISP_AWB_MAN_FLUORCNT,
++ CI_ISP_AWB_MAN_FLUORCNTH,
++ CI_ISP_AWB_MAN_TUNGSTEN,
++ CI_ISP_AWB_MAN_TWILIGHT,
++ CI_ISP_AWB_MAN_SUNSET,
++ CI_ISP_AWB_MAN_FLASH,
++ CI_ISP_AWB_MAN_CIE_D65,
++ CI_ISP_AWB_MAN_CIE_D75,
++ CI_ISP_AWB_MAN_CIE_F2,
++ CI_ISP_AWB_MAN_CIE_F11,
++ CI_ISP_AWB_MAN_CIE_F12,
++ CI_ISP_AWB_MAN_CIE_A,
++ CI_ISP_AWB_AUTO_ON
++};
++
++/*
++ * white balancing gains
++ * xiaolin, typedef ci_sensor_component_gain tsMrvWbGains;
++ * white balancing measurement configuration
++ */
++struct ci_isp_wb_meas_config {
++ /* white balance measurement window (in pixels) */
++ struct ci_isp_window awb_window;
++ /*
++ * only pixels values max_y contribute to WB measurement
++ * (set to 0 to disable this feature)
++ */
++ u8 max_y;
++ /* only pixels values > min_y contribute to WB measurement */
++ u8 ref_cr_MaxR;
++ u8 minY_MaxG;
++ u8 ref_cb_MaxB;
++ /*
++ * Chrominance sum maximum value, only consider pixels with Cb+Cr
++ * smaller than threshold for WB measurements
++ */
++ u8 max_csum;
++
++ /*
++ * Chrominance minimum value, only consider pixels with Cb/Cr each
++ * greater than threshold value for WB measurements
++ */
++ u8 min_c;
++ /*
++ * number of frames+1 used for mean value calculation (frames=0
++ * means 1 Frame)
++ */
++ u8 frames;
++ u8 meas_mode;
++};
++
++/* white balancing measurement configuration limits */
++struct ci_isp_wb_meas_conf_limit {
++ /* maximum value for MinY */
++ u8 min_y_max;
++ /* minimum value for MinY */
++ u8 min_y_min;
++ /* maximum value for MinC */
++ u8 min_c_max;
++ /* minimum value for MinC */
++ u8 min_c_min;
++ /* maximum value for MaxCSum */
++ u8 max_csum_max;
++ /* minimum value for MaxCSum */
++ u8 max_csum_min;
++ /* maximum value for white pixel percentage */
++ u8 white_percent_max;
++ /* minimum value for white pixel percentage */
++ u8 white_percent_min;
++ /*
++ * maximum number of not measured frames until the gain values
++ * will be set to their initial values
++ */
++ u8 error_counter;
++};
++
++/* white balancing HW automatic configuration */
++struct ci_isp_wb_auto_hw_config {
++ /* reference C values */
++ u8 ref_cr;
++ u8 ref_cb;
++ /* lock / unlock settings */
++ u8 unlock_dly;
++ u8 unlock_rng;
++ u8 lock_dly;
++ u8 lock_rng;
++ /* maximum gain step size */
++ u8 step;
++ /* gain limits */
++ u8 max_gain;
++ u8 min_gain;
++};
++
++/* white balancing configuration */
++struct ci_isp_wb_config {
++ /* mode of operation */
++ enum ci_isp_awb_mode mrv_wb_mode;
++ enum ci_isp_awb_sub_mode mrv_wb_sub_mode;
++ /* measurement configuration */
++ struct ci_isp_wb_meas_config mrv_wb_meas_conf;
++ /* HW automatic configuration */
++ struct ci_isp_wb_auto_hw_config mrv_wb_auto_hw_conf;
++ /*
++ * gain values
++ * xiaolin, tsMrvWbGains mrv_wb_gains;
++ * measurement limits
++ */
++ struct ci_isp_wb_meas_conf_limit mrv_wb_meas_conf_limit;
++ /* Pca Damping for awb auto mode */
++ u8 awb_pca_damping;
++ /* PriorExp Damping for awb auto mode */
++ u8 awb_prior_exp_damping;
++ /* Pca Damping for AWB auto push mode */
++ u8 awb_pca_push_damping;
++ /* PriorExp Damping for AWB auto push mode */
++ u8 awb_prior_exp_push_damping;
++ /* Max Y in AWB auto mode */
++ u8 awb_auto_max_y;
++ /* Max Y in AWB auto push mode */
++ u8 awb_push_max_y;
++ /* Max Y in AWB measurement only mode */
++ u8 awb_measure_max_y;
++ /* Distance for underexposure detecture */
++ u16 awb_underexp_det;
++ /* Distance for underexposure push detecture */
++ u16 awb_push_underexp_det;
++
++};
++
++/* possible AEC modes */
++enum ci_isp_aec_mode {
++ /* AEC turned off */
++ CI_ISP_AEC_OFF,
++ /* AEC measurements based on (almost) the entire picture */
++ CI_ISP_AEC_INTEGRAL,
++ /*
++ * AEC measurements based on a single little square in the center of
++ * the picture
++ */
++ CI_ISP_AEC_SPOT,
++ /*
++ * AEC measurements based on 5 little squares spread over the picture
++ */
++ CI_ISP_AEC_MFIELD5,
++ /*
++ * AEC measurements based on 9 little squares spread over the picture
++ */
++ CI_ISP_AEC_MFIELD9
++};
++
++
++/*
++ * histogram weight 5x5 matrix coefficients
++* (possible values are 1=0x10,15/16=0x0F,14/16,...,1/16,0)
++*/
++struct ci_isp_hist_matrix {
++ u8 weight_00; u8 weight_10; u8 weight_20; u8 weight_30; u8 weight_40;
++ u8 weight_01; u8 weight_11; u8 weight_21; u8 weight_31; u8 weight_41;
++ u8 weight_02; u8 weight_12; u8 weight_22; u8 weight_32; u8 weight_42;
++ u8 weight_03; u8 weight_13; u8 weight_23; u8 weight_33; u8 weight_43;
++ u8 weight_04; u8 weight_14; u8 weight_24; u8 weight_34; u8 weight_44;
++};
++
++/* autoexposure config */
++struct ci_isp_aec_config {
++ /*
++ * Size of 1 window of MARVIN's 5x5 mean luminance
++ * measurement grid and offset of grid
++ */
++ struct ci_isp_window isp_aecmean_lumaWindow;
++ /* Size and offset of histogram window */
++ struct ci_isp_window isp_aechist_calcWindow;
++ /* Weight martix of histogram */
++ struct ci_isp_hist_matrix isp_aechist_calcWeight;
++ /* possible AEC modes */
++ enum ci_isp_aec_mode advanced_aec_mode;
++};
++
++/* autoexposure mean values */
++struct ci_isp_aec_mean {
++ u8 occ;
++ u8 mean;
++ u8 max;
++ u8 min;
++};
++
++
++
++/* histogram weight 5x5 matrix coefficients
++ * (possible values are 1=0x10,15/16=0x0F,14/16,...,1/16,0)
++ */
++struct tsMrvHistMatrix {
++ u8 weight_00; u8 weight_10; u8 weight_20; u8 weight_30; u8 weight_40;
++ u8 weight_01; u8 weight_11; u8 weight_21; u8 weight_31; u8 weight_41;
++ u8 weight_02; u8 weight_12; u8 weight_22; u8 weight_32; u8 weight_42;
++ u8 weight_03; u8 weight_13; u8 weight_23; u8 weight_33; u8 weight_43;
++ u8 weight_04; u8 weight_14; u8 weight_24; u8 weight_34; u8 weight_44;
++};
++
++/*
++ * vi_dpcl path selector, channel mode
++ * Configuration of the Y/C splitter
++ */
++enum ci_isp_ycs_chn_mode {
++ /*
++ * 8bit data/Y only output (depreciated, please use CI_ISP_YCS_MVRaw for
++ * new implementations)
++ */
++ CI_ISP_YCS_Y,
++ /* separated 8bit Y, C routed to both main and self path */
++ CI_ISP_YCS_MV_SP,
++ /*
++ * separated 8bit Y, C routed to main path only (self path input
++ * switched off)
++ */
++ CI_ISP_YCS_MV,
++ /*
++ * separated 8bit Y, C routed to self path only (main path input
++ * switched off)
++ */
++ CI_ISP_YCS_SP,
++ /*
++ * raw camera data routed to main path (8 or 16 bits, depends on
++ * marvin drivative)
++ */
++ CI_ISP_YCS_MVRaw,
++ /* both main and self path input switched off */
++ CI_ISP_YCS_OFF
++};
++
++/* vi_dpcl path selector, main path cross-switch */
++enum ci_isp_dp_switch {
++ /* raw data mode */
++ CI_ISP_DP_RAW,
++ /* JPEG encoding mode */
++ CI_ISP_DP_JPEG,
++ /* main video path only */
++ CI_ISP_DP_MV
++};
++
++/* DMA-read mode selector */
++enum ci_isp_dma_read_mode {
++ /* DMA-read feature deactivated */
++ CI_ISP_DMA_RD_OFF = 0,
++ /* data from the DMA-read block feeds the self path */
++ CI_ISP_DMA_RD_SELF_PATH = 1,
++ /* data from the DMA-read block feeds the Superimpose block */
++ CI_ISP_DMA_RD_SUPERIMPOSE = 2,
++ /* data from the DMA-read block feeds the Image effects path */
++ CI_ISP_DMA_RD_IE_PATH = 3,
++ /* data from the DMA-read block feeds the JPEG encoder directly */
++ CI_ISP_DMA_RD_JPG_ENC = 4
++};
++
++/* ISP path selector */
++enum ci_isp_path {
++ /* Isp path is unknown or invalid */
++ CI_ISP_PATH_UNKNOWN = 0,
++ /* Raw data bypass */
++ CI_ISP_PATH_RAW = 1,
++ /* YCbCr path */
++ CI_ISP_PATH_YCBCR = 2,
++ /* Bayer RGB path */
++ CI_ISP_PATH_BAYER = 3
++};
++
++/* possible autofocus measurement modes */
++enum ci_isp_afm_mode {
++ /* no autofocus measurement */
++ CI_ISP_AFM_OFF,
++ /* use AF hardware to measure sharpness */
++ CI_ISP_AFM_HW,
++ /* use "Tenengrad" algorithm implemented in software */
++ CI_ISP_AFM_SW_TENENGRAD,
++ /*
++ * use "Threshold Squared Gradient" algorithm implemented in software
++ */
++ CI_ISP_AFM_SW_TRESH_SQRT_GRAD,
++ /*
++ * use "Frequency selective weighted median" algorithm implemented in
++ * software
++ */
++ CI_ISP_AFM_SW_FSWMEDIAN,
++ /* use AF hardware and normalize with mean luminance */
++ CI_ISP_AFM_HW_norm,
++ /* use "Tenengrad" algorithm and normalize with mean luminance */
++ CI_ISP_AFM_SW_TENENGRAD_norm,
++ /*
++ * use "Frequency selective weighted median" algorithm and normalize
++ * with mean luminance
++ */
++ CI_ISP_AFM_SW_FSWMEDIAN_norm
++};
++
++/* possible autofocus search strategy modes */
++enum ci_isp_afss_mode {
++ /* no focus searching */
++ CI_ISP_AFSS_OFF,
++ /* scan the full focus range to find the point of best focus */
++ CI_ISP_AFSS_FULL_RANGE,
++ /* use hillclimbing search */
++ CI_ISP_AFSS_HILLCLIMBING,
++ /*
++ * similar to full range search, but with multiple subsequent scans
++ * with
++ */
++ CI_ISP_AFSS_ADAPTIVE_RANGE,
++ /*
++ * decreasing range and step size will be performed. search strategy
++ * suggested by OneLimited for their Helimorph actuator
++ */
++ CI_ISP_AFSS_HELIMORPH_OPT,
++ /*
++ * search strategy optimized for omnivision 2630 module equipped with
++ */
++ CI_ISP_AFSS_OV2630_LPD4_OPT
++ /*
++ * autofocus lend driven through a LPD4 stepper motor produced by
++ * Nidec Copal USA Corp. of Torrance, CA.
++ */
++};
++
++/* possible bad pixel correction type */
++enum ci_isp_bp_corr_type {
++ /* correction of bad pixel from the table */
++ CI_ISP_BP_CORR_TABLE,
++ /* direct detection and correction */
++ CI_ISP_BP_CORR_DIRECT
++};
++
++/* possible bad pixel replace approach */
++enum ci_isp_bp_corr_rep {
++ /* nearest neighbour approach */
++ CI_ISP_BP_CORR_REP_NB,
++ /* simple bilinear interpolation approach */
++ CI_ISP_BP_CORR_REP_LIN
++};
++
++/* possible bad pixel correction mode */
++enum ci_isp_bp_corr_mode {
++ /* hot pixel correction */
++ CI_ISP_BP_CORR_HOT_EN,
++ /* dead pixel correction */
++ CI_ISP_BP_CORR_DEAD_EN,
++ /* hot and dead pixel correction */
++ CI_ISP_BP_CORR_HOT_DEAD_EN
++};
++
++/* Gamma out curve (independent from the sensor characteristic). */
++#define CI_ISP_GAMMA_OUT_CURVE_ARR_SIZE (17)
++
++struct ci_isp_gamma_out_curve {
++ u16 isp_gamma_y[CI_ISP_GAMMA_OUT_CURVE_ARR_SIZE];
++ u8 gamma_segmentation;
++};
++
++/* configuration of autofocus measurement block */
++struct ci_isp_af_config {
++ /* position and size of measurement window A */
++ struct ci_isp_window wnd_pos_a;
++ /* position and size of measurement window B */
++ struct ci_isp_window wnd_pos_b;
++ /* position and size of measurement window C */
++ struct ci_isp_window wnd_pos_c;
++ /* AF measurment threshold */
++ u32 threshold;
++ /* measurement variable shift (before sum operation) */
++ u32 var_shift;
++};
++
++/* measurement results of autofocus measurement block */
++struct ci_isp_af_meas {
++ /* sharpness value of window A */
++ u32 afm_sum_a;
++ /* sharpness value of window B */
++ u32 afm_sum_b;
++ /* sharpness value of window C */
++ u32 afm_sum_c;
++ /* luminance value of window A */
++ u32 afm_lum_a;
++ /* luminance value of window B */
++ u32 afm_lum_b;
++ /* luminance value of window C */
++ u32 afm_lum_c;
++};
++
++/* configuration for correction of bad pixel block */
++struct ci_isp_bp_corr_config {
++ /* bad pixel correction type */
++ enum ci_isp_bp_corr_type bp_corr_type;
++ /* replace approach */
++ enum ci_isp_bp_corr_rep bp_corr_rep;
++ /* bad pixel correction mode */
++ enum ci_isp_bp_corr_mode bp_corr_mode;
++ /* Absolute hot pixel threshold */
++ u16 bp_abs_hot_thres;
++ /* Absolute dead pixel threshold */
++ u16 bp_abs_dead_thres;
++ /* Hot Pixel deviation Threshold */
++ u16 bp_dev_hot_thres;
++ /* Dead Pixel deviation Threshold */
++ u16 bp_dev_dead_thres;
++};
++
++/* configuration for correction of lens shading */
++struct ci_isp_ls_corr_config {
++ /* correction values of R color part */
++ u16 ls_rdata_tbl[CI_ISP_DATA_TBL_SIZE];
++ /* correction values of G color part */
++ u16 ls_gdata_tbl[CI_ISP_DATA_TBL_SIZE];
++ /* correction values of B color part */
++ u16 ls_bdata_tbl[CI_ISP_DATA_TBL_SIZE];
++ /* multiplication factors of x direction */
++ u16 ls_xgrad_tbl[CI_ISP_GRAD_TBL_SIZE];
++ /* multiplication factors of y direction */
++ u16 ls_ygrad_tbl[CI_ISP_GRAD_TBL_SIZE];
++ /* sector sizes of x direction */
++ u16 ls_xsize_tbl[CI_ISP_GRAD_TBL_SIZE];
++ /* sector sizes of y direction */
++ u16 ls_ysize_tbl[CI_ISP_GRAD_TBL_SIZE];
++
++};
++
++/* configuration for detection of bad pixel block */
++struct ci_isp_bp_det_config {
++ /* abs_dead_thres Absolute dead pixel threshold */
++ u32 bp_dead_thres;
++};
++
++/* new table element */
++struct ci_isp_bp_new_table_elem {
++ /* Bad Pixel vertical address */
++ u16 bp_ver_addr;
++ /* Bad Pixel horizontal address */
++ u16 bp_hor_addr;
++ /* MSB value of fixed pixel (deceide if dead or hot) */
++ u8 bp_msb_value;
++};
++
++/* new Bad Pixel table */
++struct ci_isp_bp_new_table {
++ /* Number of possible new detected bad pixel */
++ u32 bp_number;
++ /* Array of Table element */
++ struct ci_isp_bp_new_table_elem bp_new_table_elem[8];
++};
++
++/* image effect modes */
++enum ci_isp_ie_mode {
++ /* no image effect (bypass) */
++ CI_ISP_IE_MODE_OFF,
++ /* Set a fixed chrominance of 128 (neutral grey) */
++ CI_ISP_IE_MODE_GRAYSCALE,
++ /* Luminance and chrominance data is being inverted */
++ CI_ISP_IE_MODE_NEGATIVE,
++ /*
++ * Chrominance is changed to produce a historical like brownish image
++ * color
++ */
++ CI_ISP_IE_MODE_SEPIA,
++ /*
++ * Converting picture to grayscale while maintaining one color
++ * component.
++ */
++ CI_ISP_IE_MODE_COLOR_SEL,
++ /* Edge detection, will look like an relief made of metal */
++ CI_ISP_IE_MODE_EMBOSS,
++ /* Edge detection, will look like a pencil drawing */
++ CI_ISP_IE_MODE_SKETCH
++};
++
++/* image effect color selection */
++enum ci_isp_ie_color_sel {
++ /* in CI_ISP_IE_MODE_COLOR_SEL mode, maintain the red color */
++ CI_ISP_IE_MAINTAIN_RED = 0x04,
++ /* in CI_ISP_IE_MODE_COLOR_SEL mode, maintain the green color */
++ CI_ISP_IE_MAINTAIN_GREEN = 0x02,
++ /* in CI_ISP_IE_MODE_COLOR_SEL mode, maintain the blue color */
++ CI_ISP_IE_MAINTAIN_BLUE = 0x01
++};
++
++/*
++ * image effect 3x3 matrix coefficients (possible values are -8, -4, -2, -1,
++ * 0, 1, 2, 4, 8)
++ */
++struct ci_isp_ie_matrix {
++ s8 coeff_11;
++ s8 coeff_12;
++ s8 coeff_13;
++ s8 coeff_21;
++ s8 coeff_22;
++ s8 coeff_23;
++ s8 coeff_31;
++ s8 coeff_32;
++ s8 coeff_33;
++};
++
++/* image effect configuration struct */
++struct ci_isp_ie_config {
++ /* image effect mode */
++ enum ci_isp_ie_mode mode;
++ u8 color_sel;
++ /* threshold for color selection */
++ u8 color_thres;
++ /* Cb chroma component of 'tint' color for sepia effect */
++ u8 tint_cb;
++ /* Cr chroma component of 'tint' color for sepia effect */
++ u8 tint_cr;
++ /* coefficient maxrix for emboss effect */
++ struct ci_isp_ie_matrix mat_emboss;
++ /* coefficient maxrix for sketch effect */
++ struct ci_isp_ie_matrix mat_sketch;
++};
++
++/* super impose transparency modes */
++enum ci_isp_si_trans_mode {
++ /* SI transparency mode is unknown (module is switched off) */
++ CI_ISP_SI_TRANS_UNKNOWN = 0,
++ /* SI transparency mode enabled */
++ CI_ISP_SI_TRANS_ENABLE = 1,
++ /* SI transparency mode disabled */
++ CI_ISP_SI_TRANS_DISABLE = 2
++};
++
++/* super impose reference image */
++enum ci_isp_si_ref_image {
++ /* SI reference image is unknown (module is switched off) */
++ CI_ISP_SI_REF_IMG_UNKNOWN = 0,
++ /* SI reference image from sensor */
++ CI_ISP_SI_REF_IMG_SENSOR = 1,
++ /* SI reference image from memory */
++ CI_ISP_SI_REF_IMG_MEMORY = 2
++};
++
++/* super impose configuration struct */
++struct ci_isp_si_config {
++ /* transparency mode on/off */
++ enum ci_isp_si_trans_mode trans_mode;
++ /* reference image from sensor/memory */
++ enum ci_isp_si_ref_image ref_image;
++ /* x offset (coordinate system of the reference image) */
++ u16 offs_x;
++ /* y offset (coordinate system of the reference image) */
++ u16 offs_y;
++ /* Y component of transparent key color */
++ u8 trans_comp_y;
++ /* Cb component of transparent key color */
++ u8 trans_comp_cb;
++ /* Cr component of transparent key color */
++ u8 trans_comp_cr;
++};
++
++/* image stabilisation modes */
++enum ci_isp_is_mode {
++ /* IS mode is unknown (module is switched off) */
++ CI_ISP_IS_MODE_UNKNOWN = 0,
++ /* IS mode enabled */
++ CI_ISP_IS_MODE_ON = 1,
++ /* IS mode disabled */
++ CI_ISP_IS_MODE_OFF = 2
++};
++
++/* image stabilisation configuration struct */
++struct ci_isp_is_config {
++ /* position and size of image stabilisation window */
++ struct ci_isp_window mrv_is_window;
++ /* maximal margin distance for X */
++ u16 max_dx;
++ /* maximal margin distance for Y */
++ u16 max_dy;
++};
++
++/* image stabilisation control struct */
++struct ci_isp_is_ctrl {
++ /* image stabilisation mode on/off */
++ enum ci_isp_is_mode is_mode;
++ /* recenter every frame by ((cur_v_offsxV_OFFS)/(2^RECENTER)) */
++ u8 recenter;
++};
++
++/* for data path switching */
++enum ci_isp_data_path {
++ CI_ISP_PATH_RAW816,
++ CI_ISP_PATH_RAW8,
++ CI_ISP_PATH_JPE,
++ CI_ISP_PATH_OFF,
++ CI_ISP_PATH_ON
++};
++
++/* buffer for memory interface */
++struct ci_isp_bufferOld {
++ u8 *pucbuffer;
++ u32 size;
++ u32 offs;
++ /* not used for Cb and Cr buffers, IRQ offset for */
++ u32 irq_offs_llength;
++ /* stores the malloc pointer address */
++ u8 *pucmalloc_start;
++ /* main buffer and line length for self buffer */
++};
++
++/* buffer for DMA memory interface */
++struct ci_isp_dma_buffer {
++ /*
++ * start of the buffer memory. Note that panning in an larger picture
++ * memory is possible by altering the buffer start address (and
++ * choosing pic_width llength)
++ */
++ u8 *pucbuffer;
++ /* size of the entire picture in bytes */
++ u32 pic_size;
++ /*
++ * width of the picture area of interest (not necessaryly the entire
++ * picture)
++ */
++ u32 pic_width;
++ /* inter-line-increment. This is the amount of bytes between */
++ u32 llength;
++ /* pixels in the same column but on different lines. */
++
++};
++
++/* color format for self picture input/output and DMA input */
++enum ci_isp_mif_col_format {
++ /* YCbCr 4:2:2 format */
++ CI_ISP_MIF_COL_FORMAT_YCBCR_422 = 0,
++ /* YCbCr 4:4:4 format */
++ CI_ISP_MIF_COL_FORMAT_YCBCR_444 = 1,
++ /* YCbCr 4:2:0 format */
++ CI_ISP_MIF_COL_FORMAT_YCBCR_420 = 2,
++ /* YCbCr 4:0:0 format */
++ CI_ISP_MIF_COL_FORMAT_YCBCR_400 = 3,
++ /* RGB 565 format */
++ CI_ISP_MIF_COL_FORMAT_RGB_565 = 4,
++ /* RGB 666 format */
++ CI_ISP_MIF_COL_FORMAT_RGB_666 = 5,
++ /* RGB 888 format */
++ CI_ISP_MIF_COL_FORMAT_RGB_888 = 6
++};
++
++/* color range for self picture input of RGB m*/
++enum ci_isp_mif_col_range {
++ mrv_mif_col_range_std = 0,
++ mrv_mif_col_range_full = 1
++};
++
++/* color phase for self picture input of RGB */
++enum ci_isp_mif_col_phase {
++ mrv_mif_col_phase_cosited = 0,
++ mrv_mif_col_phase_non_cosited = 1
++};
++
++/*
++ * picture write/read format
++ * The following examples apply to YCbCr 4:2:2 images, as all modes
++ */
++ enum ci_isp_mif_pic_form {
++ /* planar : separated buffers for Y, Cb and Cr */
++ CI_ISP_MIF_PIC_FORM_PLANAR = 0,
++ /* semi-planar: one buffer for Y and a combined buffer for Cb and Cr */
++ CI_ISP_MIF_PIC_FORM_SEMI_PLANAR = 1,
++ /* interleaved: one buffer for all */
++ CI_ISP_MIF_PIC_FORM_INTERLEAVED = 2
++};
++
++/* self picture operating modes */
++enum ci_isp_mif_sp_mode {
++ /* no rotation, no horizontal or vertical flipping */
++ CI_ISP_MIF_SP_ORIGINAL = 0,
++ /* vertical flipping (no additional rotation) */
++ CI_ISP_MIF_SP_VERTICAL_FLIP = 1,
++ /* horizontal flipping (no additional rotation) */
++ CI_ISP_MIF_SP_HORIZONTAL_FLIP = 2,
++ /* rotation 90 degrees ccw (no additional flipping) */
++ CI_ISP_MIF_SP_ROTATION_090_DEG = 3,
++ /*
++ * rotation 180 degrees ccw (equal to horizontal plus vertical
++ * flipping)
++ */
++ CI_ISP_MIF_SP_ROTATION_180_DEG = 4,
++ /* rotation 270 degrees ccw (no additional flipping) */
++ CI_ISP_MIF_SP_ROTATION_270_DEG = 5,
++ /* rotation 90 degrees ccw plus vertical flipping */
++ CI_ISP_MIF_SP_ROT_090_V_FLIP = 6,
++ /* rotation 270 degrees ccw plus vertical flipping */
++ CI_ISP_MIF_SP_ROT_270_V_FLIP = 7
++};
++
++/* MI burst length settings */
++enum ci_isp_mif_burst_length {
++ /* burst length = 4 */
++ CI_ISP_MIF_BURST_LENGTH_4 = 0,
++ /* burst length = 8 */
++ CI_ISP_MIF_BURST_LENGTH_8 = 1,
++ /* burst length = 16 */
++ CI_ISP_MIF_BURST_LENGTH_16 = 2
++};
++
++
++/* MI apply initial values settings */
++enum ci_isp_mif_init_vals {
++ /* do not set initial values */
++ CI_ISP_MIF_NO_INIT_VALS = 0,
++ /* set initial values for offset registers */
++ CI_ISP_MIF_INIT_OFFS = 1,
++ /* set initial values for base address registers */
++ CI_ISP_MIF_INIT_BASE = 2,
++ /* set initial values for offset and base address registers */
++ CI_ISP_MIF_INIT_OFFSAndBase = 3
++};
++
++/* MI when to update configuration */
++enum ci_isp_conf_update_time {
++ CI_ISP_CFG_UPDATE_FRAME_SYNC = 0,
++ CI_ISP_CFG_UPDATE_IMMEDIATE = 1,
++ CI_ISP_CFG_UPDATE_LATER = 2
++};
++
++/* control register of the MI */
++struct ci_isp_mi_ctrl {
++ /* self picture path output format */
++ enum ci_isp_mif_col_format mrv_mif_sp_out_form;
++ /* self picture path input format */
++ enum ci_isp_mif_col_format mrv_mif_sp_in_form;
++ enum ci_isp_mif_col_range mrv_mif_sp_in_range;
++ enum ci_isp_mif_col_phase mrv_mif_sp_in_phase;
++ /* self picture path write format */
++ enum ci_isp_mif_pic_form mrv_mif_sp_pic_form;
++ /* main picture path write format */
++ enum ci_isp_mif_pic_form mrv_mif_mp_pic_form;
++ /* burst length for chrominance for write port */
++ enum ci_isp_mif_burst_length burst_length_chrom;
++ /* burst length for luminance for write port */
++ enum ci_isp_mif_burst_length burst_length_lum;
++ /* enable updating of the shadow registers */
++ enum ci_isp_mif_init_vals init_vals;
++ /*
++ * for main and self picture to their init values
++ */
++ /* enable change of byte order for write port */
++ int byte_swap_enable;
++ /* enable the last pixel signalization */
++ int last_pixel_enable;
++ /* self picture path operating mode */
++ enum ci_isp_mif_sp_mode mrv_mif_sp_mode;
++ /* enable path */
++ enum ci_isp_data_path main_path;
++ /* enable path */
++ enum ci_isp_data_path self_path;
++ /*
++ * offset counter interrupt generation for fill_mp_y (counted in
++ * bytes)
++ */
++ u32 irq_offs_init;
++
++};
++
++/* buffer for memory interface */
++struct ci_isp_buffer {
++ /* buffer start address */
++ u8 *pucbuffer;
++ /* buffer size (counted in bytes) */
++ u32 size;
++ /* buffer offset count (counted in bytes) */
++ u32 offs;
++};
++
++/* main or self picture path, or DMA configuration */
++struct ci_isp_mi_path_conf {
++ /* Y picture width (counted in pixels) */
++ u32 ypic_width;
++ /* Y picture height (counted in pixels) */
++ u32 ypic_height;
++ /*
++ * line length means the distance from one pixel to the vertically
++ * next
++ */
++ u32 llength;
++ /*
++ * pixel below including the not-used blanking area, etc.
++ * (counted in pixels)
++ */
++ /* Y buffer structure */
++ struct ci_isp_buffer ybuffer;
++ /* Cb buffer structure */
++ struct ci_isp_buffer cb_buffer;
++ /* Cr buffer structure */
++ struct ci_isp_buffer cr_buffer;
++};
++
++/* DMA configuration */
++struct ci_isp_mi_dma_conf {
++ /* start DMA immediately after configuration */
++ int start_dma;
++ /* suppress v_end so that no frame end can be */
++ int frame_end_disable;
++ /*
++ * detected by the following instances
++ * enable change of byte order for read port
++ */
++ int byte_swap_enable;
++ /*
++ * Enables continuous mode. If set the same frame is read back
++ * over and over. A start pulse on dma_start is need only for the
++ * first time. To stop continuous mode reset this bit (takes
++ * effect after the next frame end) or execute a soft reset.
++ */
++ int continuous_enable;
++ /* DMA input color format */
++ enum ci_isp_mif_col_format mrv_mif_col_format;
++ /* DMA read buffer format */
++ enum ci_isp_mif_pic_form mrv_mif_pic_form;
++ /* burst length for chrominance for read port */
++ enum ci_isp_mif_burst_length burst_length_chrom;
++ /* burst length for luminance for read port */
++ enum ci_isp_mif_burst_length burst_length_lum;
++ /*
++ * Set this to TRUE if the DMA-read data is routed through
++ * the path that is normally used for the live camera
++ * data (e.g. through the image effects module).
++ */
++ int via_cam_path;
++};
++
++/* Public CAC Defines and Typedefs */
++
++/*
++ * configuration of chromatic aberration correction block (given to the
++ * CAC driver)
++ */
++struct ci_isp_cac_config {
++ /* size of the input image in pixels */
++ u16 hsize;
++ u16 vsize;
++ /* offset between image center and optical */
++ s16 hcenter_offset;
++ /* center of the input image in pixels */
++ s16 vcenter_offset;
++ /* maximum red/blue pixel shift in horizontal */
++ u8 hclip_mode;
++ /* and vertival direction, range 0..2 */
++ u8 vclip_mode;
++ /* parameters for radial shift calculation, */
++ u16 ablue;
++ /* 9 bit twos complement with 4 fractional */
++ u16 ared;
++ /* digits, valid range -16..15.9375 */
++ u16 bblue;
++ u16 bred;
++ u16 cblue;
++ u16 cred;
++ /* 0 = square pixel sensor, all other = aspect */
++ float aspect_ratio;
++ /* ratio of non-square pixel sensor */
++
++};
++
++/*
++ * register values of chromatic aberration correction block (delivered by
++ * the CAC driver)
++ */
++struct ci_isp_cac_reg_values {
++ /* maximum red/blue pixel shift in horizontal */
++ u8 hclip_mode;
++ /* and vertival direction, range 0..2 */
++ u8 vclip_mode;
++ /* TRUE=enabled, FALSE=disabled */
++ int cac_enabled;
++ /*
++ * preload value of the horizontal CAC pixel
++ * counter, range 1..4095
++ */
++ u16 hcount_start;
++ /*
++ * preload value of the vertical CAC pixel
++ * counter, range 1..4095
++ */
++ u16 vcount_start;
++ /* parameters for radial shift calculation, */
++ u16 ablue;
++ /* 9 bit twos complement with 4 fractional */
++ u16 ared;
++ /* digits, valid range -16..15.9375 */
++ u16 bblue;
++ u16 bred;
++ u16 cblue;
++ u16 cred;
++ /* horizontal normalization shift, range 0..7 */
++ u8 xnorm_shift;
++ /* horizontal normalization factor, range 16..31 */
++ u8 xnorm_factor;
++ /* vertical normalization shift, range 0..7 */
++ u8 ynorm_shift;
++ /* vertical normalization factor, range 16..31 */
++ u8 ynorm_factor;
++};
++
++struct ci_snapshot_config {
++ /* snapshot flags */
++ u32 flags;
++ /* user zoom factor to use ( Zoomfactor = 1 + (<value>*1024) ) */
++ int user_zoom;
++ /* user width (in pixel) */
++ int user_w;
++ /* user height (in pixel) */
++ int user_h;
++ /* compression ratio for JPEG snapshots */
++ u8 compression_ratio;
++};
++
++struct ci_isp_view_finder_config {
++ /* how to display the viewfinder */
++ u32 flags;
++ /* zoom factor to use ( Zoomfactor = 1 + (<value>*1024) ) */
++ int zoom;
++ /* contrast setting for LCD */
++ int lcd_contrast;
++ /* following settings are only used in VFFLAG_MODE_USER mode */
++
++ /* start pixel of upper left corner on LCD */
++ int x;
++ /* start pixel of upper left corner on LCD */
++ int y;
++ /* width (in pixel) */
++ int w;
++ /* height (in pixel) */
++ int h;
++ /* keeps the aspect ratio by cropping the input to match the output
++ * aspect ratio. */
++ int keep_aspect;
++};
++
++/* ! Number of supported DIP-Switches */
++#define FF_DIPSWITCH_COUNT 10
++
++
++#define CI_ISP_HIST_DATA_BIN_ARR_SIZE 16
++
++struct ci_isp_hist_data_bin {
++ u8 hist_bin[CI_ISP_HIST_DATA_BIN_ARR_SIZE];
++};
++
++#define MRV_MEAN_LUMA_ARR_SIZE_COL 5
++#define MRV_MEAN_LUMA_ARR_SIZE_ROW 5
++#define MRV_MEAN_LUMA_ARR_SIZE \
++ (MRV_MEAN_LUMA_ARR_SIZE_COL*MRV_MEAN_LUMA_ARR_SIZE_ROW)
++
++/* Structure contains a 2-dim 5x5 array
++ * for mean luminance values from 5x5 MARVIN measurement grid.
++ */
++struct ci_isp_mean_luma {
++ u8 mean_luma_block[MRV_MEAN_LUMA_ARR_SIZE_COL][MRV_MEAN_LUMA_ARR_SIZE_ROW];
++};
++
++/* Structure contains bits autostop and exp_meas_mode of isp_exp_ctrl */
++struct ci_isp_exp_ctrl {
++ int auto_stop;
++ int exp_meas_mode;
++ int exp_start;
++} ;
++
++
++struct ci_isp_cfg_flags {
++ /*
++ * following flag tripels controls the behaviour of the associated
++ * marvin control loops.
++ * For feature XXX, the 3 flags are totally independant and
++ * have the following meaning:
++ * fXXX:
++ * If set, there is any kind of software interaction during runtime
++ * thatmay lead to a modification of the feature-dependant settings.
++ * For each frame, a feature specific loop control routine is called
++ * may perform other actions based on feature specific configuration.
++ * If not set, only base settings will be applied during setup, or the
++ * reset values are left unchanged. No control routine will be called
++ * inside the processing loop.
++ * fXXXprint:
++ * If set, some status informations will be printed out inside
++ * the processing loop. Status printing is independant of the
++ * other flags regarding this feature.
++ * fXXX_dis:
++ * If set, the feature dependant submodule of the marvin is
++ * disabled or is turned into bypass mode. Note that it is
++ * still possible to set one or more of the other flags too,
++ * but this wouldn't make much sense...
++ * lens shading correction
++ */
++
++ unsigned int lsc:1;
++ unsigned int lscprint:1;
++ unsigned int lsc_dis:1;
++
++ /* bad pixel correction */
++
++ unsigned int bpc:1;
++ unsigned int bpcprint:1;
++ unsigned int bpc_dis:1;
++
++ /* black level correction */
++
++ unsigned int bls:1;
++ /* only fixed values */
++ unsigned int bls_man:1;
++ /* fixed value read from smia interface */
++ unsigned int bls_smia:1;
++ unsigned int blsprint:1;
++ unsigned int bls_dis:1;
++
++ /* (automatic) white balancing
++ * (if automatic or manual can be configured elsewhere) */
++
++ unsigned int awb:1;
++ unsigned int awbprint:1;
++ unsigned int awbprint2:1;
++ unsigned int awb_dis:1;
++
++ /* automatic exposure (and gain) control */
++
++ unsigned int aec:1;
++ unsigned int aecprint:1;
++ unsigned int aec_dis:1;
++ unsigned int aec_sceval:1;
++
++ /* auto focus */
++
++ unsigned int af:1;
++ unsigned int afprint:1;
++ unsigned int af_dis:1;
++
++ /* enable flags for various other components of the marvin */
++
++ /* color processing (brightness, contrast, saturation, hue) */
++ unsigned int cp:1;
++ /* input gamma block */
++ unsigned int gamma:1;
++ /* color conversion matrix */
++ unsigned int cconv:1;
++ /* demosaicing */
++ unsigned int demosaic:1;
++ /* output gamma block */
++ unsigned int gamma2:1;
++ /* Isp de-noise and sharpenize filters */
++ unsigned int isp_filters:1;
++ /* Isp CAC */
++ unsigned int cac:1;
++
++ /* demo stuff */
++
++ /* demo: saturation loop enable */
++ unsigned int cp_sat_loop:1;
++ /* demo: contrast loop enable */
++ unsigned int cp_contr_loop:1;
++ /* demo: brightness loop enable */
++ unsigned int cp_bright_loop:1;
++ /* demo: scaler loop enable */
++ unsigned int scaler_loop:1;
++ /* demo: use standard color conversion matrix */
++ unsigned int cconv_basic:1;
++
++ /* demo: use joystick to cycle through the image effect modes */
++ unsigned int cycle_ie_mode:1;
++
++ /* others */
++
++ /* enable continous autofocus */
++ unsigned int continous_af:1;
++
++ unsigned int bad_pixel_generation:1;
++ /* enable YCbCr full range */
++ unsigned int ycbcr_full_range:1;
++ /* enable YCbCr color phase non cosited */
++ unsigned int ycbcr_non_cosited:1;
++
++};
++
++struct ci_isp_config {
++ struct ci_isp_cfg_flags flags;
++ struct ci_sensor_ls_corr_config lsc_cfg;
++ struct ci_isp_bp_corr_config bpc_cfg;
++ struct ci_isp_bp_det_config bpd_cfg;
++ struct ci_isp_wb_config wb_config;
++ struct ci_isp_cac_config cac_config;
++ struct ci_isp_aec_config aec_cfg;
++ struct ci_isp_window aec_v2_wnd;
++ struct ci_isp_bls_config bls_cfg;
++ struct ci_isp_af_config af_cfg;
++ struct ci_isp_color_settings color;
++ struct ci_isp_ie_config img_eff_cfg;
++ enum ci_isp_demosaic_mode demosaic_mode;
++ u8 demosaic_th;
++ u8 exposure;
++ enum ci_isp_aec_mode advanced_aec_mode;
++ /* what to include in reports; */
++ u32 report_details;
++ /* an or'ed combination of the FF_REPORT_xxx defines */
++ struct ci_isp_view_finder_config view_finder;
++ /* primary snapshot */
++ struct ci_snapshot_config snapshot_a;
++ /* secondary snapshot */
++ struct ci_snapshot_config snapshot_b;
++ /* auto focus measurement mode */
++ enum ci_isp_afm_mode afm_mode;
++ /* auto focus search strategy mode */
++ enum ci_isp_afss_mode afss_mode;
++ int wb_get_gains_from_sensor_driver;
++ u8 filter_level_noise_reduc;
++ u8 filter_level_sharp;
++ u8 jpeg_enc_ratio;
++};
++
++struct ci_isp_mem_info {
++ u32 isp_bar0_pa;
++ u32 isp_bar0_size;
++ u32 isp_bar1_pa;
++ u32 isp_bar1_size;
++};
++
++struct ci_pl_system_config {
++ /* to be removed */
++ struct ci_sensor_config *isi_config;
++ struct ci_sensor_caps *isi_caps;
++ struct ci_sensor_awb_profile *sensor_awb_profile;
++
++ struct ci_isp_config isp_cfg;
++ u32 focus_max;
++ unsigned int isp_hal_enable;
++ struct v4l2_jpg_review_buffer jpg_review;
++ int jpg_review_enable;
++};
++
++/* intel private ioctl code for ci isp hal interface */
++#define BASE BASE_VIDIOC_PRIVATE
++
++#define VIDIOC_SET_SYS_CFG _IOWR('V', BASE + 1, struct ci_pl_system_config)
++#define VIDIOC_SET_JPG_ENC_RATIO _IOWR('V', BASE + 2, int)
++#define VIDIOC_GET_ISP_MEM_INFO _IOWR('V', BASE + 4, struct ci_isp_mem_info)
++
++#include "ci_va.h"
++
++/* support camera flash on CDK */
++struct ci_isp_flash_cmd {
++ int preflash_on;
++ int flash_on;
++ int prelight_on;
++};
++
++struct ci_isp_flash_config {
++ int prelight_off_at_end_of_flash;
++ int vsync_edge_positive;
++ int output_polarity_low_active;
++ int use_external_trigger;
++ u8 capture_delay;
++};
++
++#endif
+--- /dev/null
++++ b/drivers/media/video/mrstci/include/ci_isp_fmts_common.h
+@@ -0,0 +1,128 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#ifndef _ISP_FMTS_COMMON_H
++#define _ISP_FMTS_COMMON_H
++
++#define intel_fourcc(d, c, b, a) \
++ (((__u32)(d)<<0)|((__u32)(c)<<8)|((__u32)(b)<<16)|((__u32)(a)<<24))
++
++/* more bayer pattern formats support by ISP */
++
++/* RAW 8-bit */
++#define INTEL_PIX_FMT_RAW08 intel_fourcc('R', 'W', '0', '8')
++/* RAW 10-bit */
++#define INTEL_PIX_FMT_RAW10 intel_fourcc('R', 'W', '1', '0')
++/* RAW 12-bit */
++#define INTEL_PIX_FMT_RAW12 intel_fourcc('R', 'W', '1', '2')
++
++
++/*
++ * various config and info structs concentrated into one struct
++ * for simplification
++ */
++#define FORMAT_FLAGS_DITHER 0x01
++#define FORMAT_FLAGS_PACKED 0x02
++#define FORMAT_FLAGS_PLANAR 0x04
++#define FORMAT_FLAGS_RAW 0x08
++#define FORMAT_FLAGS_CrCb 0x10
++
++struct intel_fmt {
++ char *name;
++ unsigned long fourcc; /* v4l2 format id */
++ int depth;
++ int flags;
++};
++
++static struct intel_fmt fmts[] = {
++ {
++ .name = "565 bpp RGB",
++ .fourcc = V4L2_PIX_FMT_RGB565,
++ .depth = 16,
++ .flags = FORMAT_FLAGS_PACKED,
++ },
++ {
++ .name = "888 bpp BGR",
++ .fourcc = V4L2_PIX_FMT_BGR32,
++ .depth = 32,
++ .flags = FORMAT_FLAGS_PLANAR,
++ },
++ {
++ .name = "4:2:2, packed, YUYV",
++ .fourcc = V4L2_PIX_FMT_YUYV,
++ .depth = 16,
++ .flags = FORMAT_FLAGS_PACKED,
++ },
++ {
++ .name = "4:2:2 planar, YUV422P",
++ .fourcc = V4L2_PIX_FMT_YUV422P,
++ .depth = 16,
++ .flags = FORMAT_FLAGS_PLANAR,
++ },
++ {
++ .name = "4:2:0 planar, YUV420",
++ .fourcc = V4L2_PIX_FMT_YUV420,
++ .depth = 12,
++ .flags = FORMAT_FLAGS_PLANAR,
++ },
++ {
++ .name = "4:2:0 planar, YVU420",
++ .fourcc = V4L2_PIX_FMT_YVU420,
++ .depth = 12,
++ .flags = FORMAT_FLAGS_PLANAR,
++ },
++ {
++ .name = "4:2:0 semi planar, NV12",
++ .fourcc = V4L2_PIX_FMT_NV12,
++ .depth = 12,
++ .flags = FORMAT_FLAGS_PLANAR,
++ },
++ {
++ .name = "Compressed format, JPEG",
++ .fourcc = V4L2_PIX_FMT_JPEG,
++ .depth = 12,
++ .flags = FORMAT_FLAGS_PLANAR,
++ },
++ {
++ .name = "Sequential RGB",
++ .fourcc = INTEL_PIX_FMT_RAW08,
++ .depth = 8,
++ .flags = FORMAT_FLAGS_RAW,
++ },
++ {
++ .name = "Sequential RGB",
++ .fourcc = INTEL_PIX_FMT_RAW10,
++ .depth = 16,
++ .flags = FORMAT_FLAGS_RAW,
++ },
++ {
++ .name = "Sequential RGB",
++ .fourcc = INTEL_PIX_FMT_RAW12,
++ .depth = 16,
++ .flags = FORMAT_FLAGS_RAW,
++ },
++};
++
++static int NUM_FORMATS = sizeof(fmts) / sizeof(struct intel_fmt);
++#endif /* _ISP_FMTS_H */
++
+--- /dev/null
++++ b/drivers/media/video/mrstci/include/ci_sensor_common.h
+@@ -0,0 +1,1233 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#ifndef _SENSOR_COMMON_H
++#define _SENSOR_COMMON_H
++
++#include <media/v4l2-subdev.h>
++
++#define AEC_ALGO_V1 1
++#define AEC_ALGO_V2 2
++#define AEC_ALGO_V3 3
++#define AEC_ALGO_V4 4
++
++#ifndef AEC_ALGO
++#define AEC_ALGO AEC_ALGO_V3 /*AEC_ALGO_V2*/
++#endif
++/*
++ * interface version
++ * please increment the version if you add something new to the interface.
++ * This helps upper layer software to deal with different interface versions.
++ */
++#define SENSOR_INTERFACE_VERSION 4
++#define SENSOR_TYPE_SOC 0
++#define SENSOR_TYPE_RAW 1
++/* Just for current use case */
++#define SENSOR_TYPE_2M 0
++#define SENSOR_TYPE_5M 1
++
++/*
++ * capabilities / configuration
++ */
++
++/* ulBusWidth; */
++/*
++ * to expand to a (possibly higher) resolution in marvin, the LSBs will be set
++ * to zero
++ */
++#define SENSOR_BUSWIDTH_8BIT_ZZ 0x00000001
++/*
++ * to expand to a (possibly higher) resolution in marvin, the LSBs will be
++ * copied from the MSBs
++ */
++#define SENSOR_BUSWIDTH_8BIT_EX 0x00000002
++/*
++ * formerly known as SENSOR_BUSWIDTH_10BIT (at times no marvin derivative was
++ * able to process more than 10 bit)
++ */
++#define SENSOR_BUSWIDTH_10BIT_EX 0x00000004
++#define SENSOR_BUSWIDTH_10BIT_ZZ 0x00000008
++#define SENSOR_BUSWIDTH_12BIT 0x00000010
++
++#define SENSOR_BUSWIDTH_10BIT SENSOR_BUSWIDTH_10BIT_EX
++
++/*
++ * ulMode, operating mode of the image sensor in termns of output data format
++ * and
++ */
++
++/* timing data transmission */
++
++/* YUV-Data with separate h/v sync lines (ITU-R BT.601) */
++#define SENSOR_MODE_BT601 0x00000001
++/* YUV-Data with sync words inside the datastream (ITU-R BT.656) */
++#define SENSOR_MODE_BT656 0x00000002
++/* Bayer data with separate h/v sync lines */
++#define SENSOR_MODE_BAYER 0x00000004
++/*
++ * Any binary data without line/column-structure, (e.g. already JPEG encoded)
++ * h/v sync lines act as data valid signals
++ */
++#define SENSOR_MODE_DATA 0x00000008
++/* RAW picture data with separate h/v sync lines */
++#define SENSOR_MODE_PICT 0x00000010
++/* RGB565 data with separate h/v sync lines */
++#define SENSOR_MODE_RGB565 0x00000020
++/* SMIA conform data stream (see ulSmiaMode for details) */
++#define SENSOR_MODE_SMIA 0x00000040
++/* MIPI conform data stream (see ulMipiMode for details) */
++#define SENSOR_MODE_MIPI 0x00000080
++/*
++ * Bayer data with sync words inside the datastream (similar to ITU-R BT.656)
++ */
++#define SENSOR_MODE_BAY_BT656 0x00000100
++/*
++ * Raw picture data with sync words inside the datastream (similar to ITU-R
++ * BT.656)
++ */
++#define SENSOR_MODE_RAW_BT656 0x00000200
++
++/* ulSmiaMode */
++
++/* compression mode */
++#define SENSOR_SMIA_MODE_COMPRESSED 0x00000001
++/* 8bit to 10 bit decompression */
++#define SENSOR_SMIA_MODE_RAW_8_TO_10_DECOMP 0x00000002
++/* 12 bit RAW Bayer Data */
++#define SENSOR_SMIA_MODE_RAW_12 0x00000004
++/* 10 bit RAW Bayer Data */
++#define SENSOR_SMIA_MODE_RAW_10 0x00000008
++/* 8 bit RAW Bayer Data */
++#define SENSOR_SMIA_MODE_RAW_8 0x00000010
++/* 7 bit RAW Bayer Data */
++#define SENSOR_SMIA_MODE_RAW_7 0x00000020
++/* 6 bit RAW Bayer Data */
++#define SENSOR_SMIA_MODE_RAW_6 0x00000040
++/* RGB 888 Display ready Data */
++#define SENSOR_SMIA_MODE_RGB_888 0x00000080
++/* RGB 565 Display ready Data */
++#define SENSOR_SMIA_MODE_RGB_565 0x00000100
++/* RGB 444 Display ready Data */
++#define SENSOR_SMIA_MODE_RGB_444 0x00000200
++/* YUV420 Data */
++#define SENSOR_SMIA_MODE_YUV_420 0x00000400
++/* YUV422 Data */
++#define SENSOR_SMIA_MODE_YUV_422 0x00000800
++/* SMIA is disabled */
++#define SENSOR_SMIA_OFF 0x80000000
++
++/* ulMipiMode */
++
++/* YUV 420 8-bit */
++#define SENSOR_MIPI_MODE_YUV420_8 0x00000001
++/* YUV 420 10-bit */
++#define SENSOR_MIPI_MODE_YUV420_10 0x00000002
++/* Legacy YUV 420 8-bit */
++#define SENSOR_MIPI_MODE_LEGACY_YUV420_8 0x00000004
++/* YUV 420 8-bit (CSPS) */
++#define SENSOR_MIPI_MODE_YUV420_CSPS_8 0x00000008
++/* YUV 420 10-bit (CSPS) */
++#define SENSOR_MIPI_MODE_YUV420_CSPS_10 0x00000010
++/* YUV 422 8-bit */
++#define SENSOR_MIPI_MODE_YUV422_8 0x00000020
++/* YUV 422 10-bit */
++#define SENSOR_MIPI_MODE_YUV422_10 0x00000040
++/* RGB 444 */
++#define SENSOR_MIPI_MODE_RGB444 0x00000080
++/* RGB 555 */
++#define SENSOR_MIPI_MODE_RGB555 0x00000100
++/* RGB 565 */
++#define SENSOR_MIPI_MODE_RGB565 0x00000200
++/* RGB 666 */
++#define SENSOR_MIPI_MODE_RGB666 0x00000400
++/* RGB 888 */
++#define SENSOR_MIPI_MODE_RGB888 0x00000800
++/* RAW_6 */
++#define SENSOR_MIPI_MODE_RAW_6 0x00001000
++/* RAW_7 */
++#define SENSOR_MIPI_MODE_RAW_7 0x00002000
++/* RAW_8 */
++#define SENSOR_MIPI_MODE_RAW_8 0x00004000
++/* RAW_10 */
++#define SENSOR_MIPI_MODE_RAW_10 0x00008000
++/* RAW_12 */
++#define SENSOR_MIPI_MODE_RAW_12 0x00010000
++/* MIPI is disabled */
++#define SENSOR_MIPI_OFF 0x80000000
++
++/* ulFieldInv; */
++
++#define SENSOR_FIELDINV_NOSWAP 0x00000001
++#define SENSOR_FIELDINV_SWAP 0x00000002
++
++/* ulFieldSel; */
++
++#define SENSOR_FIELDSEL_BOTH 0x00000001
++#define SENSOR_FIELDSEL_EVEN 0x00000002
++#define SENSOR_FIELDSEL_ODD 0x00000004
++
++/* ulYCSeq; */
++
++#define SENSOR_YCSEQ_YCBYCR 0x00000001
++#define SENSOR_YCSEQ_YCRYCB 0x00000002
++#define SENSOR_YCSEQ_CBYCRY 0x00000004
++#define SENSOR_YCSEQ_CRYCBY 0x00000008
++
++/* ulConv422; */
++#if 0
++#define SENSOR_CONV422_COSITED 0x00000001
++#define SENSOR_CONV422_NOCOSITED 0x00000002
++#define SENSOR_CONV422_COLORINT 0x00000004
++#endif
++#define SENSOR_CONV422_COSITED 0x00000001
++#define SENSOR_CONV422_INTER 0x00000002
++#define SENSOR_CONV422_NOCOSITED 0x00000004
++
++/* ulBPat; */
++
++#define SENSOR_BPAT_RGRGGBGB 0x00000001
++#define SENSOR_BPAT_GRGRBGBG 0x00000002
++#define SENSOR_BPAT_GBGBRGRG 0x00000004
++#define SENSOR_BPAT_BGBGGRGR 0x00000008
++
++/* ulHPol; */
++
++/* sync signal pulses high between lines */
++#define SENSOR_HPOL_SYNCPOS 0x00000001
++/* sync signal pulses low between lines */
++#define SENSOR_HPOL_SYNCNEG 0x00000002
++/* reference signal is high as long as sensor puts out line data */
++#define SENSOR_HPOL_REFPOS 0x00000004
++/* reference signal is low as long as sensor puts out line data */
++#define SENSOR_HPOL_REFNEG 0x00000008
++
++/* ulVPol; */
++
++#define SENSOR_VPOL_POS 0x00000001
++#define SENSOR_VPOL_NEG 0x00000002
++
++/* ulEdge; */
++
++#define SENSOR_EDGE_RISING 0x00000001
++#define SENSOR_EDGE_FALLING 0x00000002
++
++/* ulBls; */
++
++/* turns on/off additional black lines at frame start */
++#define SENSOR_BLS_OFF 0x00000001
++#define SENSOR_BLS_TWO_LINES 0x00000002
++/* two lines top and two lines bottom */
++#define SENSOR_BLS_FOUR_LINES 0x00000004
++
++/* ulGamma; */
++
++/* turns on/off gamma correction in the sensor ISP */
++#define SENSOR_GAMMA_ON 0x00000001
++#define SENSOR_GAMMA_OFF 0x00000002
++
++/* ulCConv; */
++
++/* turns on/off color conversion matrix in the sensor ISP */
++#define SENSOR_CCONV_ON 0x00000001
++#define SENSOR_CCONV_OFF 0x00000002
++
++/* ulRes; */
++
++/* 88x72 */
++#define SENSOR_RES_QQCIF 0x00000001
++/* 160x120 */
++#define SENSOR_RES_QQVGA 0x00000002
++/* 176x144 */
++#define SENSOR_RES_QCIF 0x00000004
++/* 320x240 */
++#define SENSOR_RES_QVGA 0x00000008
++/* 352x288 */
++#define SENSOR_RES_CIF 0x00000010
++/* 640x480 */
++#define SENSOR_RES_VGA 0x00000020
++/* 800x600 */
++#define SENSOR_RES_SVGA 0x00000040
++/* 1024x768 */
++#define SENSOR_RES_XGA 0x00000080
++/* 1280x960 max. resolution of OV9640 (QuadVGA) */
++#define SENSOR_RES_XGA_PLUS 0x00000100
++/* 1280x1024 */
++#define SENSOR_RES_SXGA 0x00000200
++/* 1600x1200 */
++#define SENSOR_RES_UXGA 0x00000400
++/* 2048x1536 */
++#define SENSOR_RES_QXGA 0x00000800
++#define SENSOR_RES_QXGA_PLUS 0x00001000
++#define SENSOR_RES_RAWMAX 0x00002000
++/* 4080x1024 */
++#define SENSOR_RES_YUV_HMAX 0x00004000
++/* 1024x4080 */
++#define SENSOR_RES_YUV_VMAX 0x00008000
++#ifdef _DEBUG
++/* depends on further defines (TEST_SIZE_H and TEST_SIZE_V) */
++#define SENSOR_RES_TEST 0x00010000
++
++#define TEST_SIZE_H (2600)
++#define TEST_SIZE_V (2046)
++/* #define TEST_SIZE_V (1950) */
++
++/* #ifdef _DEBUG */
++#endif
++/* 720x480 */
++#define SENSOR_RES_L_AFM 0x00020000
++/* 128x96 */
++#define SENSOR_RES_M_AFM 0x00040000
++/* 64x32 */
++#define SENSOR_RES_S_AFM 0x00080000
++/* 352x240 */
++#define SENSOR_RES_BP1 0x00100000
++/* 2586x2048, quadruple SXGA, 5,3 Mpix */
++#define SENSOR_RES_QSXGA 0x00200000
++/* 2600x2048, max. resolution of M5, 5,32 Mpix */
++#define SENSOR_RES_QSXGA_PLUS 0x00400000
++/* 2600x1950 */
++#define SENSOR_RES_QSXGA_PLUS2 0x00800000
++/* 2686x2048, 5.30M */
++#define SENSOR_RES_QSXGA_PLUS3 0x01000000
++/* 3200x2048, 6.56M */
++#define SENSOR_RES_WQSXGA 0x02000000
++/* 3200x2400, 7.68M */
++#define SENSOR_RES_QUXGA 0x04000000
++/* 3840x2400, 9.22M */
++#define SENSOR_RES_WQUXGA 0x08000000
++/* 4096x3072, 12.59M */
++#define SENSOR_RES_HXGA 0x10000000
++
++/* 2592x1044 replace with SENSOR_RES_QXGA_PLUS */
++/*#define SENSOR_RES_QSXGA_PLUS4 0x10000000*/
++/* 1920x1080 */
++#define SENSOR_RES_1080P 0x20000000
++/* 1280x720 */
++#define SENSOR_RES_720P 0x40000000
++
++/* FIXME 1304x980*/
++#define SENSOR_RES_VGA_PLUS 0x80000000
++#define VGA_PLUS_SIZE_H (1304)
++#define VGA_PLUS_SIZE_V (980)
++
++#define QSXGA_PLUS4_SIZE_H (2592)
++#define QSXGA_PLUS4_SIZE_V (1944)
++#define RES_1080P_SIZE_H (1920)
++#define RES_1080P_SIZE_V (1080)
++#define RES_720P_SIZE_H (1280)
++#define RES_720P_SIZE_V (720)
++#define QQCIF_SIZE_H (88)
++#define QQCIF_SIZE_V (72)
++#define QQVGA_SIZE_H (160)
++#define QQVGA_SIZE_V (120)
++#define QCIF_SIZE_H (176)
++#define QCIF_SIZE_V (144)
++#define QVGA_SIZE_H (320)
++#define QVGA_SIZE_V (240)
++#define CIF_SIZE_H (352)
++#define CIF_SIZE_V (288)
++#define VGA_SIZE_H (640)
++#define VGA_SIZE_V (480)
++#define SVGA_SIZE_H (800)
++#define SVGA_SIZE_V (600)
++#define XGA_SIZE_H (1024)
++#define XGA_SIZE_V (768)
++#define XGA_PLUS_SIZE_H (1280)
++#define XGA_PLUS_SIZE_V (960)
++#define SXGA_SIZE_H (1280)
++#define SXGA_SIZE_V (1024)
++/* will be removed soon */
++#define QSVGA_SIZE_H (1600)
++/* will be removed soon */
++#define QSVGA_SIZE_V (1200)
++#define UXGA_SIZE_H (1600)
++#define UXGA_SIZE_V (1200)
++#define QXGA_SIZE_H (2048)
++#define QXGA_SIZE_V (1536)
++#define QXGA_PLUS_SIZE_H (2592)
++#define QXGA_PLUS_SIZE_V (1944)
++#define RAWMAX_SIZE_H (4096)
++#define RAWMAX_SIZE_V (2048)
++#define YUV_HMAX_SIZE_H (4080)
++#define YUV_HMAX_SIZE_V (1024)
++#define YUV_VMAX_SIZE_H (1024)
++#define YUV_VMAX_SIZE_V (4080)
++#define BP1_SIZE_H (352)
++#define BP1_SIZE_V (240)
++#define L_AFM_SIZE_H (720)
++#define L_AFM_SIZE_V (480)
++#define M_AFM_SIZE_H (128)
++#define M_AFM_SIZE_V (96)
++#define S_AFM_SIZE_H (64)
++#define S_AFM_SIZE_V (32)
++#define QSXGA_SIZE_H (2560)
++#define QSXGA_SIZE_V (2048)
++#define QSXGA_MINUS_SIZE_V (1920)
++#define QSXGA_PLUS_SIZE_H (2600)
++#define QSXGA_PLUS_SIZE_V (2048)
++#define QSXGA_PLUS2_SIZE_H (2600)
++#define QSXGA_PLUS2_SIZE_V (1950)
++#define QUXGA_SIZE_H (3200)
++#define QUXGA_SIZE_V (2400)
++#define SIZE_H_2500 (2500)
++#define QSXGA_PLUS3_SIZE_H (2686)
++#define QSXGA_PLUS3_SIZE_V (2048)
++#define QSXGA_PLUS4_SIZE_V (1944)
++#define WQSXGA_SIZE_H (3200)
++#define WQSXGA_SIZE_V (2048)
++#define WQUXGA_SIZE_H (3200)
++#define WQUXGA_SIZE_V (2400)
++#define HXGA_SIZE_H (4096)
++#define HXGA_SIZE_V (3072)
++
++/* ulBLC; */
++#define SENSOR_DWNSZ_SUBSMPL 0x00000001
++#define SENSOR_DWNSZ_SCAL_BAY 0x00000002
++#define SENSOR_DWNSZ_SCAL_COS 0x00000004
++
++/* Camera BlackLevelCorrection on */
++#define SENSOR_BLC_AUTO 0x00000001
++/* Camera BlackLevelCorrection off */
++#define SENSOR_BLC_OFF 0x00000002
++
++/* ulAGC; */
++
++/* Camera AutoGainControl on */
++#define SENSOR_AGC_AUTO 0x00000001
++/* Camera AutoGainControl off */
++#define SENSOR_AGC_OFF 0x00000002
++
++/* ulAWB; */
++
++/* Camera AutoWhiteBalance on */
++#define SENSOR_AWB_AUTO 0x00000001
++/* Camera AutoWhiteBalance off */
++#define SENSOR_AWB_OFF 0x00000002
++
++/* ulAEC; */
++
++/* Camera AutoExposureControl on */
++#define SENSOR_AEC_AUTO 0x00000001
++/* Camera AutoExposureControl off */
++#define SENSOR_AEC_OFF 0x00000002
++
++/* ulCieProfile; */
++#define ISI_AEC_MODE_STAND 0x00000001
++#define ISI_AEC_MODE_SLOW 0x00000002
++#define ISI_AEC_MODE_FAST 0x00000004
++#define ISI_AEC_MODE_NORMAL 0x00000008
++#define SENSOR_CIEPROF_A 0x00000001
++#define SENSOR_CIEPROF_B 0x00000002
++#define SENSOR_CIEPROF_C 0x00000004
++#define SENSOR_CIEPROF_D50 0x00000008
++#define SENSOR_CIEPROF_D55 0x00000010
++#define SENSOR_CIEPROF_D65 0x00000020
++#define SENSOR_CIEPROF_D75 0x00000040
++#define SENSOR_CIEPROF_E 0x00000080
++#define SENSOR_CIEPROF_FLUOR 0x00000100
++#define SENSOR_CIEPROF_FLUORH 0x00000200
++#define SENSOR_CIEPROF_TUNG 0x00000400
++#define SENSOR_CIEPROF_TWI 0x00000800
++#define SENSOR_CIEPROF_SUN 0x00001000
++#define SENSOR_CIEPROF_FLASH 0x00002000
++#define SENSOR_CIEPROF_SHADE 0x00004000
++#define SENSOR_CIEPROF_DAY 0x00008000
++#define SENSOR_CIEPROF_F1 0x00010000
++#define SENSOR_CIEPROF_F2 0x00020000
++#define SENSOR_CIEPROF_F3 0x00040000
++#define SENSOR_CIEPROF_F4 0x00080000
++#define SENSOR_CIEPROF_F5 0x00100000
++#define SENSOR_CIEPROF_F6 0x00200000
++#define SENSOR_CIEPROF_F7 0x00400000
++#define SENSOR_CIEPROF_F8 0x00800000
++#define SENSOR_CIEPROF_F9 0x01000000
++#define SENSOR_CIEPROF_F10 0x02000000
++#define SENSOR_CIEPROF_F11 0x04000000
++#define SENSOR_CIEPROF_F12 0x08000000
++#define SENSOR_CIEPROF_CLOUDY 0x10000000
++#define SENSOR_CIEPROF_SUNNY 0x20000000
++#define SENSOR_CIEPROF_OLDISS 0x80000000
++#define SENSOR_CIEPROF_DEFAULT 0x00000000
++
++/* ulFlickerFreq */
++
++/* no compensation for flickering environmental illumination */
++#define SENSOR_FLICKER_OFF 0x00000001
++/* compensation for 100Hz flicker frequency (at 50Hz mains frequency) */
++#define SENSOR_FLICKER_100 0x00000002
++/* compensation for 120Hz flicker frequency (at 60Hz mains frequency) */
++#define SENSOR_FLICKER_120 0x00000004
++
++/*
++ * sensor capabilities struct: a struct member may have 0, 1 or several bits
++ * set according to the capabilities of the sensor. All struct members must be
++ * unsigned int and no padding is allowed. Thus, access to the fields is also
++ * possible by means of a field of unsigned int values. Indicees for the
++ * field-like access are given below.
++ */
++struct ci_sensor_caps{
++ unsigned int bus_width;
++ unsigned int mode;
++ unsigned int field_inv;
++ unsigned int field_sel;
++ unsigned int ycseq;
++ unsigned int conv422;
++ unsigned int bpat;
++ unsigned int hpol;
++ unsigned int vpol;
++ unsigned int edge;
++ unsigned int bls;
++ unsigned int gamma;
++ unsigned int cconv;
++ unsigned int res;
++ unsigned int dwn_sz;
++ unsigned int blc;
++ unsigned int agc;
++ unsigned int awb;
++ unsigned int aec;
++ /* extention SENSOR version 2 */
++ unsigned int cie_profile;
++
++ /* extention SENSOR version 3 */
++ unsigned int flicker_freq;
++
++ /* extension SENSOR version 4 */
++ unsigned int smia_mode;
++ unsigned int mipi_mode;
++
++ /* Add name here to load shared library */
++ unsigned int type;
++ char name[32];
++
++ struct v4l2_subdev sd;
++};
++
++#define SENSOR_CAP_BUSWIDTH 0
++#define SENSOR_CAP_MODE 1
++#define SENSOR_CAP_FIELDINV 2
++#define SENSOR_CAP_FIELDSEL 3
++#define SENSOR_CAP_YCSEQ 4
++#define SENSOR_CAP_CONV422 5
++#define SENSOR_CAP_BPAT 6
++#define SENSOR_CAP_HPOL 7
++#define SENSOR_CAP_VPOL 8
++#define SENSOR_CAP_EDGE 9
++#define SENSOR_CAP_BLS 10
++#define SENSOR_CAP_GAMMA 11
++#define SENSOR_CAP_CCONF 12
++#define SENSOR_CAP_RES 13
++#define SENSOR_CAP_DWNSZ 14
++#define SENSOR_CAP_BLC 15
++#define SENSOR_CAP_AGC 16
++#define SENSOR_CAP_AWB 17
++#define SENSOR_CAP_AEC 18
++#define SENSOR_CAP_CIEPROFILE 19
++#define SENSOR_CAP_FLICKERFREQ 20
++#define SENSOR_CAP_SMIAMODE 21
++#define SENSOR_CAP_MIPIMODE 22
++#define SENSOR_CAP_AECMODE 23
++
++
++/* size of capabilities array (in number of unsigned int fields) */
++#define SENSOR_CAP_COUNT 24
++
++/*
++ * Sensor configuration struct: same layout as the capabilities struct, but to
++ * configure the sensor all struct members which are supported by the sensor
++ * must have only 1 bit set. Members which are not supported by the sensor
++ * must not have any bits set.
++ */
++#define ci_sensor_config ci_sensor_caps
++
++/* single parameter support */
++
++/* exposure time */
++#define SENSOR_PARM_EXPOSURE 0
++/* index in the AE control table */
++#define SENSOR_PARM_EXPTBL_INDEX 1
++
++/* gain */
++/* overall gain (all components) */
++#define SENSOR_PARM_GAIN 2
++/* component gain of the red pixels */
++#define SENSOR_PARM_CGAIN_R 3
++/* component gain of the green pixels */
++#define SENSOR_PARM_CGAIN_G 4
++/* component gain of the blue pixels */
++#define SENSOR_PARM_CGAIN_B 5
++/*
++ * component gain of the green pixels sharing a bayer line with the red ones
++ */
++#define SENSOR_PARM_CGAINB_GR 6
++/*
++ * component gain of the green pixels sharing a bayer line with the blue ones
++ */
++#define SENSOR_PARM_CGAINB_GB 7
++
++/* blacklevel */
++
++/* black-level adjustment (all components) */
++#define SENSOR_PARM_BLKL 8
++/* component black-level of the red pixels */
++#define SENSOR_PARM_CBLKL_R 9
++/* component black-level of the green pixels */
++#define SENSOR_PARM_CBLKL_G 10
++/* component black-level of the blue pixels */
++#define SENSOR_PARM_CBLKL_B 11
++/*
++ * component black-level of the green pixels sharing a bayer line with the red
++ * ones
++ */
++#define SENSOR_PARM_CBLKLB_GR 12
++/*
++ * component black-level of the green pixels sharing a bayer line with the
++ * blue ones
++ */
++#define SENSOR_PARM_CBLKLB_GB 13
++
++/* resolution & cropping */
++
++/* base resolution in pixel (X) */
++#define SENSOR_PARM_BASERES_X 14
++/* base resolution in pixel (Y) */
++#define SENSOR_PARM_BASERES_Y 15
++/* window top-left pixel (X) */
++#define SENSOR_PARM_WINDOW_X 16
++/* window top-left pixel (Y) */
++#define SENSOR_PARM_WINDOW_Y 17
++/* window width in pixel */
++#define SENSOR_PARM_WINDOW_W 18
++/* window height in pixel */
++#define SENSOR_PARM_WINDOW_H 19
++
++/* frame rate / clock */
++
++/*
++ * frame rate in frames per second, fixed point format, 16 bit fractional part
++ */
++#define SENSOR_PARM_FRAMERATE_FPS 20
++/* frame rate fine adjustment */
++#define SENSOR_PARM_FRAMERATE_PITCH 21
++/* clock divider setting */
++#define SENSOR_PARM_CLK_DIVIDER 22
++/* input clock in Hz. */
++#define SENSOR_PARM_CLK_INPUT 23
++/*
++ * output (pixel-) clock in Hz. Note that for e.g. YUV422-formats, 2 pixel
++ * clock cycles are needed per pixel
++ */
++#define SENSOR_PARM_CLK_PIXEL 24
++
++/* number of parameter IDs */
++
++#define SENSOR_PARM__COUNT 25
++
++/* bit description of the result of the IsiParmInfo routine */
++
++/* parameter can be retrieved from the sensor */
++#define SENSOR_PARMINFO_GET 0x00000001
++/* parameter can be set into the sensor */
++#define SENSOR_PARMINFO_SET 0x00000002
++/* parameter can change at any time during operation */
++#define SENSOR_PARMINFO_VOLATILE 0x00000004
++/* range information available for the parameter in question */
++#define SENSOR_PARMINFO_RANGE 0x00000008
++/* range of possible values is not continous. */
++#define SENSOR_PARMINFO_DISCRETE 0x00000010
++/* parameter may change after a configuration update. */
++#define SENSOR_PARMINFO_CONFIG 0x00000020
++/* range information may change after a configuration update. */
++#define SENSOR_PARMINFO_RCONFIG 0x00000040
++
++/* multi-camera support */
++#define SENSOR_UNKNOWN_SENSOR_ID (0)
++
++/* structure / type definitions */
++/*
++ * Input gamma correction curve for R, G or B of the sensor. Since this gamma
++ * curve is sensor specific, it will be deliveres by the sensors specific code.
++ * This curve will be programmed into Marvin registers.
++ */
++#define SENSOR_GAMMA_CURVE_ARR_SIZE (17)
++
++struct ci_sensor_gamma_curve{
++ unsigned short isp_gamma_y[SENSOR_GAMMA_CURVE_ARR_SIZE];
++
++ /* if three curves are given separately for RGB */
++ unsigned int gamma_dx0;
++
++ /* only the struct for R holds valid DX values */
++ unsigned int gamma_dx1;
++};
++
++/*
++ * SENSOR fixed point constant values They are represented as signed fixed point
++ * numbers with 12 bit integer and 20 bit fractional part, thus ranging from
++ * -2048.0000000 (0x80000000) to +2047.9999990 (0x7FFFFFFF). In the following
++ * some frequently used constant values are defined.
++ */
++/* - 0.794944 */
++#define SENSOR_FP_M0000_794944 (0xFFF347E9)
++/* - 0.500000 */
++#define SENSOR_FP_M0000_500000 (0xFFF80000)
++/* - 0.404473 */
++#define SENSOR_FP_M0000_404473 (0xFFF98748)
++/* - 0.062227 */
++#define SENSOR_FP_M0000_062227 (0xFFFF011F)
++/* - 0.024891 */
++#define SENSOR_FP_M0000_024891 (0xFFFF9A0C)
++
++/* 0.000000 */
++#define SENSOR_FP_P0000_000000 (0x00000000)
++
++/* + 0.500000 */
++#define SENSOR_FP_P0000_500000 (0x00080000)
++/* + 1.000000 */
++#define SENSOR_FP_P0001_000000 (0x00100000)
++/* + 1.163636 */
++#define SENSOR_FP_P0001_163636 (0x00129E40)
++/* + 1.600778 */
++#define SENSOR_FP_P0001_600778 (0x00199CC9)
++/* + 1.991249 */
++#define SENSOR_FP_P0001_991249 (0x001FDC27)
++/* + 16.000000 */
++#define SENSOR_FP_P0016_000000 (0x01000000)
++/* + 128.000000 */
++#define SENSOR_FP_P0128_000000 (0x08000000)
++/* + 255.000000 */
++#define SENSOR_FP_P0255_000000 (0x0FF00000)
++/* + 256.000000 */
++#define SENSOR_FP_P0256_000000 (0x10000000)
++
++/*
++ * Matrix coefficients used for CrossTalk and/or color conversion. The 9
++ * coefficients are laid out as follows (zero based index):
++ * 0 | 1 | 2
++ * 3 | 4 | 5
++ * 6 | 7 | 8
++ * They are represented as signed fixed point numbers with 12 bit integer and
++ * 20 bit fractional part, thus ranging from -2048.0000000 (0x80000000) to
++ * +2047.9999990 (0x7FFFFFFF).
++ */
++struct ci_sensor_3x3_matrix{
++ int coeff[9];
++};
++
++/*
++ * Matrix coefficients used for CrossTalk and/or color conversion. The 9
++ * coefficients are laid out as follows (zero based index):
++ * 0 | 1 | 2
++ * 3 | 4 | 5
++ * 6 | 7 | 8
++ * They are represented as float numbers
++ */
++struct ci_sensor_3x3_float_matrix{
++ float coeff[9];
++};
++
++struct ci_sensor_3x1_float_matrix{
++ float coeff[3];
++};
++
++struct ci_sensor_4x1_float_matrix{
++ float coeff[4];
++};
++
++struct ci_sensor_3x2_float_matrix{
++ float coeff[6];
++};
++
++struct ci_sensor_2x1_float_matrix{
++ float coeff[2];
++};
++
++struct ci_sensor_2x2_float_matrix{
++ float coeff[4];
++};
++
++struct ci_sensor_1x1_float_matrix{
++ float coeff[1];
++};
++
++struct ci_sensor_gauss_factor{
++ float gauss_factor;
++};
++
++struct isp_pca_values{
++ float pcac1;
++ float pcac2;
++};
++
++/*
++ * CrossTalk offset. In addition to the matrix multiplication an offset can be
++ * added to the pixel values for R, G and B separately. This offset is applied
++ * after the matrix multiplication. The values are arranged as unified, see
++ * above.
++ */
++struct ci_sensor_xtalk_offset{
++ int ct_offset_red;
++ int ct_offset_green;
++ int ct_offset_blue;
++};
++
++struct ci_sensor_xtalk_float_offset{
++ float ct_offset_red;
++ float ct_offset_green;
++ float ct_offset_blue;
++};
++
++/*
++ * white balancing gains There are two green gains: One for the green Bayer
++ * patterns in the red and one for the blue line. In the case the used MARVIN
++ * derivative is not able to apply separate green gains the mean value of both
++ * greens will be used for the green gain. The component gains are represented
++ * as signed fixed point numbers with 12 bit integer and 20 bit fractional
++ * part, thus ranging from -2048.0000000 (0x80000000) to +2047.9999990
++ * (0x7FFFFFFF). Example: +1.0 is represented by 0x00100000.
++ */
++struct ci_sensor_component_gain{
++ float red;
++ float green_r;
++ float green_b;
++ float blue;
++};
++
++/*
++ * white balance values, default is 0x80 for all components. The struct can be
++ * used to provide linear scaling factors to achive a suitable white balance
++ * for certain lightning conditions.
++ */
++struct ci_sensor_comp_gain{
++ float red;
++ float green;
++ float blue;
++};
++
++/*
++ * cross-talk matrix dependent minimum / maximum red and blue gains
++ */
++struct ci_sensor_component_gain_limits{
++ unsigned short red_lower_limit;
++ unsigned short red_upper_limit;
++ unsigned short blue_lower_limit;
++ unsigned short blue_upper_limit;
++ unsigned int next_cie_higher_temp;
++ unsigned int next_cie_lower_temp;
++};
++
++/*
++* sensor characteristic struct. Is filled in by sensor specific code after
++* main configuration. Features not supported by the sensor driver code
++* will be initialized with default values (1x linear gamma, standard
++* color conversion, cross talk and component gain settings).
++*/
++struct ci_sensor_awb_profile{
++
++ /*
++ * In the case that all 3 gamma curves are identically, just
++ * set all 3 pointers to the same address.
++ */
++
++ /* input gammaR */
++ const struct ci_sensor_gamma_curve *gamma_curve_r;
++
++ /* input gammaG */
++ const struct ci_sensor_gamma_curve *gamma_curve_g;
++
++ /* input gammaB */
++ const struct ci_sensor_gamma_curve *gamma_curve_b;
++
++ /* ColorConversion matrix coefficients */
++ const struct ci_sensor_3x3_float_matrix *color_conv_coeff;
++
++ /* CrossTalk matrix coefficients */
++ const struct ci_sensor_3x3_float_matrix *cross_talk_coeff;
++
++ /* CrossTalk offsets */
++ const struct ci_sensor_xtalk_float_offset *cross_talk_offset;
++ const struct ci_sensor_3x1_float_matrix *svd_mean_value;
++ const struct ci_sensor_3x2_float_matrix *pca_matrix;
++ const struct ci_sensor_2x1_float_matrix *gauss_mean_value;
++ const struct ci_sensor_2x2_float_matrix *covariance_matrix;
++ const struct ci_sensor_gauss_factor *gauss_factor;
++ const struct ci_sensor_2x1_float_matrix *threshold;
++ const struct ci_sensor_1x1_float_matrix *k_factor;
++ const struct ci_sensor_1x1_float_matrix *gexp_middle;
++ const struct ci_sensor_1x1_float_matrix *var_distr_in;
++ const struct ci_sensor_1x1_float_matrix *mean_distr_in;
++ const struct ci_sensor_1x1_float_matrix *var_distr_out;
++ const struct ci_sensor_1x1_float_matrix *mean_distr_out;
++ const struct ci_sensor_component_gain *component_gain;
++ const struct ci_sensor_loc_dist *loc_dist;
++
++};
++
++/*
++ * General purpose window. Normally it is used to describe a WOI (Window Of
++ * Interest) inside the background area (e.g. image data area). The offset
++ * values count from 0 of the background area. The defined point is the upper
++ * left corner of the WOI with the specified width and height.
++ */
++struct ci_sensor_window{
++ unsigned short hoffs;
++ unsigned short voffs;
++ unsigned short hsize;
++ unsigned short vsize;
++};
++
++/*
++ * Image data description. The frame size describes the complete image data
++ * area output of the sensor. This includes dummy, black, dark, visible and
++ * manufacturer specific pixels which could be combined in rows and / or in
++ * columns. The visible window describes the visible pixel area inside the
++ * image data area. In the case the image data area does only contain visible
++ * pixels, the offset values have to be 0 and the horizontal and vertical
++ * sizes are equal to the frame size.
++ */
++struct ci_sensor_image_data_info{
++ unsigned short frame_h_size;
++ unsigned short frame_v_size;
++ struct ci_sensor_window visible_window;
++};
++
++/* black level compensation mean values */
++struct ci_sensor_blc_mean{
++ unsigned char mean_a;
++ unsigned char mean_b;
++ unsigned char mean_c;
++ unsigned char mean_d;
++};
++
++/* autowhitebalance mean values */
++
++struct ci_sensor_awb_mean{
++#if 0
++ unsigned int white;
++ unsigned char mean_y;
++ unsigned char mean_cb;
++ unsigned char mean_cr;
++#else
++ unsigned int white;
++ unsigned char mean_Y__G;
++ unsigned char mean_cb__B;
++ unsigned char mean_cr__R;
++#endif
++};
++
++/* autowhitebalance mean values */
++
++struct ci_sensor_awb_float_mean{
++ unsigned int white;
++ float mean_y;
++ float mean_cb;
++ float mean_cr;
++};
++
++/* autoexposure mean values */
++
++struct ci_sensor_aec_mean{
++ unsigned char occ;
++ unsigned char mean;
++ unsigned char max;
++ unsigned char min;
++};
++
++/* bad pixel element attribute */
++
++enum ci_sensor_bp_corr_attr{
++
++ /* hot pixel */
++ SENSOR_BP_HOT,
++
++ /* dead pixel */
++ SENSOR_BP_DEAD
++};
++
++/* table element */
++
++struct ci_sensor_bp_table_elem{
++
++ /* Bad Pixel vertical address */
++ unsigned short bp_ver_addr;
++
++ /* Bad Pixel horizontal address */
++ unsigned short bp_hor_addr;
++
++ /* Bad pixel type (dead or hot) */
++ enum ci_sensor_bp_corr_attr bp_type;
++};
++
++/* Bad Pixel table */
++
++struct ci_sensor_bp_table{
++
++ /* Number of detected bad pixel */
++ unsigned int bp_number;
++
++ /* Pointer to BP Table */
++ struct ci_sensor_bp_table_elem *bp_table_elem;
++
++ /* Number of Table elements */
++ unsigned int bp_table_elem_num;
++};
++
++#define SENSOR_CTRL_TYPE_INTEGER 1
++#define SENSOR_CTRL_TYPE_BOOLEAN 2
++#define SENSOR_CTRL_TYPE_MENU 3
++#define SENSOR_CTRL_TYPE_BUTTON 4
++#define SENSOR_CTRL_TYPE_INTEGER64 5
++#define SENSOR_CTRL_TYPE_CTRL_CLASS 6
++
++#define SENSOR_CTRL_CLASS_USER 0x00980000
++#define SENSOR_CID_BASE (SENSOR_CTRL_CLASS_USER | 0x900)
++#define SENSOR_CID_USER_BASE SENSOR_CID_BASE
++/* IDs reserved for driver specific controls */
++#define SENSOR_CID_PRIVATE_BASE 0x08000000
++
++#define SENSOR_CID_USER_CLASS (SENSOR_CTRL_CLASS_USER | 1)
++#define SENSOR_CID_BRIGHTNESS (SENSOR_CID_BASE+0)
++#define SENSOR_CID_CONTRAST (SENSOR_CID_BASE+1)
++#define SENSOR_CID_SATURATION (SENSOR_CID_BASE+2)
++#define SENSOR_CID_HUE (SENSOR_CID_BASE+3)
++#define SENSOR_CID_AUDIO_VOLUME (SENSOR_CID_BASE+5)
++#define SENSOR_CID_AUDIO_BALANCE (SENSOR_CID_BASE+6)
++#define SENSOR_CID_AUDIO_BASS (SENSOR_CID_BASE+7)
++#define SENSOR_CID_AUDIO_TREBLE (SENSOR_CID_BASE+8)
++#define SENSOR_CID_AUDIO_MUTE (SENSOR_CID_BASE+9)
++#define SENSOR_CID_AUDIO_LOUDNESS (SENSOR_CID_BASE+10)
++#define SENSOR_CID_BLACK_LEVEL (SENSOR_CID_BASE+11)
++#define SENSOR_CID_AUTO_WHITE_BALANCE (SENSOR_CID_BASE+12)
++#define SENSOR_CID_DO_WHITE_BALANCE (SENSOR_CID_BASE+13)
++#define SENSOR_CID_RED_BALANCE (SENSOR_CID_BASE+14)
++#define SENSOR_CID_BLUE_BALANCE (SENSOR_CID_BASE+15)
++#define SENSOR_CID_GAMMA (SENSOR_CID_BASE+16)
++#define SENSOR_CID_WHITENESS (SENSOR_CID_GAMMA)
++#define SENSOR_CID_EXPOSURE (SENSOR_CID_BASE+17)
++#define SENSOR_CID_AUTOGAIN (SENSOR_CID_BASE+18)
++#define SENSOR_CID_GAIN (SENSOR_CID_BASE+19)
++#define SENSOR_CID_HFLIP (SENSOR_CID_BASE+20)
++#define SENSOR_CID_VFLIP (SENSOR_CID_BASE+21)
++#define SENSOR_CID_HCENTER (SENSOR_CID_BASE+22)
++#define SENSOR_CID_VCENTER (SENSOR_CID_BASE+23)
++#define SENSOR_CID_LASTP1 (SENSOR_CID_BASE+24)
++
++struct ci_sensor_parm{
++ unsigned int index;
++ int value;
++ int max;
++ int min;
++ int info;
++ int type;
++ char name[32];
++ int step;
++ int def_value;
++ int flags;
++};
++
++#define MRV_GRAD_TBL_SIZE 8
++#define MRV_DATA_TBL_SIZE 289
++struct ci_sensor_ls_corr_config{
++ /* correction values of R color part */
++ unsigned short ls_rdata_tbl[MRV_DATA_TBL_SIZE];
++ /* correction values of G color part */
++ unsigned short ls_gdata_tbl[MRV_DATA_TBL_SIZE];
++ /* correction values of B color part */
++ unsigned short ls_bdata_tbl[MRV_DATA_TBL_SIZE];
++ /* multiplication factors of x direction */
++ unsigned short ls_xgrad_tbl[MRV_GRAD_TBL_SIZE];
++ /* multiplication factors of y direction */
++ unsigned short ls_ygrad_tbl[MRV_GRAD_TBL_SIZE];
++ /* sector sizes of x direction */
++ unsigned short ls_xsize_tbl[MRV_GRAD_TBL_SIZE];
++ /* sector sizes of y direction */
++ unsigned short ls_ysize_tbl[MRV_GRAD_TBL_SIZE];
++};
++
++struct ci_sensor_reg{
++ unsigned int addr;
++ unsigned int value;
++};
++
++struct ci_sensor_loc_dist{
++ float pca1_low_temp;
++ float pca1_high_temp;
++ float locus_distance;
++ float a2;
++ float a1;
++ float a0;
++};
++
++static inline int ci_sensor_res2size(unsigned int res, unsigned short *h_size,
++ unsigned short *v_size)
++{
++ unsigned short hsize;
++ unsigned short vsize;
++ int err = 0;
++
++ switch (res) {
++ case SENSOR_RES_QQCIF:
++ hsize = QQCIF_SIZE_H;
++ vsize = QQCIF_SIZE_V;
++ break;
++ case SENSOR_RES_QQVGA:
++ hsize = QQVGA_SIZE_H;
++ vsize = QQVGA_SIZE_V;
++ break;
++ case SENSOR_RES_QCIF:
++ hsize = QCIF_SIZE_H;
++ vsize = QCIF_SIZE_V;
++ break;
++ case SENSOR_RES_QVGA:
++ hsize = QVGA_SIZE_H;
++ vsize = QVGA_SIZE_V;
++ break;
++ case SENSOR_RES_CIF:
++ hsize = CIF_SIZE_H;
++ vsize = CIF_SIZE_V;
++ break;
++ case SENSOR_RES_VGA:
++ hsize = VGA_SIZE_H;
++ vsize = VGA_SIZE_V;
++ break;
++ case SENSOR_RES_SVGA:
++ hsize = SVGA_SIZE_H;
++ vsize = SVGA_SIZE_V;
++ break;
++ case SENSOR_RES_XGA:
++ hsize = XGA_SIZE_H;
++ vsize = XGA_SIZE_V;
++ break;
++ case SENSOR_RES_XGA_PLUS:
++ hsize = XGA_PLUS_SIZE_H;
++ vsize = XGA_PLUS_SIZE_V;
++ break;
++ case SENSOR_RES_SXGA:
++ hsize = SXGA_SIZE_H;
++ vsize = SXGA_SIZE_V;
++ break;
++ case SENSOR_RES_UXGA:
++ hsize = UXGA_SIZE_H;
++ vsize = UXGA_SIZE_V;
++ break;
++ case SENSOR_RES_QXGA:
++ hsize = QXGA_SIZE_H;
++ vsize = QXGA_SIZE_V;
++ break;
++ case SENSOR_RES_QSXGA:
++ hsize = QSXGA_SIZE_H;
++ vsize = QSXGA_SIZE_V;
++ break;
++ case SENSOR_RES_QSXGA_PLUS:
++ hsize = QSXGA_PLUS_SIZE_H;
++ vsize = QSXGA_PLUS_SIZE_V;
++ break;
++ case SENSOR_RES_QSXGA_PLUS2:
++ hsize = QSXGA_PLUS2_SIZE_H;
++ vsize = QSXGA_PLUS2_SIZE_V;
++ break;
++ case SENSOR_RES_QSXGA_PLUS3:
++ hsize = QSXGA_PLUS3_SIZE_H;
++ vsize = QSXGA_PLUS3_SIZE_V;
++ break;
++ case SENSOR_RES_WQSXGA:
++ hsize = WQSXGA_SIZE_H;
++ vsize = WQSXGA_SIZE_V;
++ break;
++ case SENSOR_RES_QUXGA:
++ hsize = QUXGA_SIZE_H;
++ vsize = QUXGA_SIZE_V;
++ break;
++ case SENSOR_RES_WQUXGA:
++ hsize = WQUXGA_SIZE_H;
++ vsize = WQUXGA_SIZE_V;
++ break;
++ case SENSOR_RES_HXGA:
++ hsize = HXGA_SIZE_H;
++ vsize = HXGA_SIZE_V;
++ break;
++ case SENSOR_RES_RAWMAX:
++ hsize = RAWMAX_SIZE_H;
++ vsize = RAWMAX_SIZE_V;
++ break;
++ case SENSOR_RES_YUV_HMAX:
++ hsize = YUV_HMAX_SIZE_H;
++ vsize = YUV_HMAX_SIZE_V;
++ break;
++ case SENSOR_RES_YUV_VMAX:
++ hsize = YUV_VMAX_SIZE_H;
++ vsize = YUV_VMAX_SIZE_V;
++ break;
++ case SENSOR_RES_BP1:
++ hsize = BP1_SIZE_H;
++ vsize = BP1_SIZE_V;
++ break;
++ case SENSOR_RES_L_AFM:
++ hsize = L_AFM_SIZE_H;
++ vsize = L_AFM_SIZE_V;
++ break;
++ case SENSOR_RES_M_AFM:
++ hsize = M_AFM_SIZE_H;
++ vsize = M_AFM_SIZE_V;
++ break;
++ case SENSOR_RES_S_AFM:
++ hsize = S_AFM_SIZE_H;
++ vsize = S_AFM_SIZE_V;
++ break;
++
++ case SENSOR_RES_QXGA_PLUS:
++ hsize = QXGA_PLUS_SIZE_H;
++ vsize = QXGA_PLUS_SIZE_V;
++ break;
++
++ case SENSOR_RES_1080P:
++ hsize = RES_1080P_SIZE_H;
++ vsize = 1080;
++ break;
++
++ case SENSOR_RES_720P:
++ hsize = RES_720P_SIZE_H;
++ vsize = RES_720P_SIZE_V;
++ break;
++
++ case SENSOR_RES_VGA_PLUS:
++ hsize = VGA_PLUS_SIZE_H;
++ vsize = VGA_PLUS_SIZE_V;
++ break;
++
++ default:
++ hsize = 0;
++ vsize = 0;
++ err = -1;
++ printk(KERN_ERR "ci_sensor_res2size: Resolution 0x%08x"
++ "unknown\n", res);
++ break;
++ }
++
++ if (h_size != NULL)
++ *h_size = hsize;
++ if (v_size != NULL)
++ *v_size = vsize;
++
++ return err;
++}
++#endif
+--- /dev/null
++++ b/drivers/media/video/mrstci/include/ci_va.h
+@@ -0,0 +1,42 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++/* for buffer sharing between CI and VA */
++#ifndef _CI_VA_H
++#define _CI_VA_H
++
++struct ci_frame_info {
++ unsigned long frame_id; /* in */
++ unsigned int width; /* out */
++ unsigned int height; /* out */
++ unsigned int stride; /* out */
++ unsigned int fourcc; /* out */
++ unsigned int offset; /* out */
++};
++
++#define ISP_IOCTL_GET_FRAME_INFO _IOWR('V', 192 + 5, struct ci_frame_info)
++
++#endif
++
+--- /dev/null
++++ b/drivers/media/video/mrstci/include/v4l2_jpg_review.h
+@@ -0,0 +1,48 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#ifndef __V4L2_JPG_REVIEW_EXT_H
++#define __V4L2_JPG_REVIEW_EXT_H
++
++#include <linux/videodev2.h>
++
++/*
++ * Moorestown JPG image auto review structure and IOCTL.
++ */
++struct v4l2_jpg_review_buffer{
++ __u32 width; /* in: frame width */
++ __u32 height; /* in: frame height */
++ __u32 pix_fmt; /* in: frame fourcc */
++ __u32 jpg_frame; /* in: corresponding jpg frame id */
++ __u32 bytesperline; /* out: 0 if not used */
++ __u32 frame_size; /* out: frame size */
++ __u32 offset; /* out: mmap offset */
++};
++
++#define BASE_VIDIOC_PRIVATE_JPG_REVIEW (BASE_VIDIOC_PRIVATE + 10)
++
++#define VIDIOC_CREATE_JPG_REVIEW_BUF _IOWR('V', \
++ BASE_VIDIOC_PRIVATE_JPG_REVIEW + 1, \
++ struct v4l2_jpg_review_buffer)
++
++#endif
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstisp/include/def.h
+@@ -0,0 +1,122 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#ifndef _DEF_H
++#define _DEF_H
++
++#include <linux/stddef.h>
++
++#ifndef ON
++/* all bits to '1' but prevent "shift overflow" warning */
++#define ON -1
++#endif
++#ifndef OFF
++#define OFF 0
++#endif
++
++#ifndef ENABLE
++/* all bits to '1' but prevent "shift overflow" warning */
++#define ENABLE -1
++#endif
++#ifndef DISABLE
++#define DISABLE 0
++#endif
++
++/* this is crop flag, to enable crop, define it to be 1*/
++#define crop_flag 0
++
++/* this has to be 0, if clauses rely on it */
++#define CI_STATUS_SUCCESS 0
++/* general failure */
++#define CI_STATUS_FAILURE 1
++/* feature not supported */
++#define CI_STATUS_NOTSUPP 2
++/* there's already something going on... */
++#define CI_STATUS_BUSY 3
++/* operation canceled */
++#define CI_STATUS_CANCELED 4
++/* out of memory */
++#define CI_STATUS_OUTOFMEM 5
++/* parameter/value out of range */
++#define CI_STATUS_OUTOFRANGE 6
++/* feature/subsystem is in idle state */
++#define CI_STATUS_IDLE 7
++/* handle is wrong */
++#define CI_STATUS_WRONG_HANDLE 8
++/* the/one/all parameter(s) is a(are) NULL pointer(s) */
++#define CI_STATUS_NULL_POINTER 9
++/* profile not available */
++#define CI_STATUS_NOTAVAILABLE 10
++
++#ifndef UNUSED_PARAM
++#define UNUSED_PARAM(x) ((x) = (x))
++#endif
++
++/* to avoid Lint warnings, use it within const context */
++
++#ifndef UNUSED_PARAM1
++#define UNUSED_PARAM1(x)
++#endif
++
++/*
++ * documentation keywords for pointer arguments, to tell the direction of the
++ * passing
++ */
++
++#ifndef OUT
++/* pointer content is expected to be filled by called function */
++#define OUT
++#endif
++#ifndef IN
++/* pointer content contains parameters from the caller */
++#define IN
++#endif
++#ifndef INOUT
++/* content is expected to be read and changed */
++#define INOUT
++#endif
++
++/* some useful macros */
++
++#ifndef MIN
++#define MIN(x, y) ((x) < (y) ? (x) : (y))
++#endif
++
++#ifndef MAX
++#define MAX(x, y) ((x) > (y) ? (x) : (y))
++#endif
++
++#ifndef ABS
++#define ABS(val) ((val) < 0 ? -(val) : (val))
++#endif
++
++/*
++ * converts a term to a string (two macros are required, never use _VAL2STR()
++ * directly)
++ */
++#define _VAL2STR(x) #x
++#define VAL2STR(x) _VAL2STR(x)
++
++#endif
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstisp/include/mrstisp.h
+@@ -0,0 +1,279 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#ifndef _MRSTISP_H
++#define _MRSTISP_H
++
++#define INTEL_MAJ_VER 0
++#define INTEL_MIN_VER 5
++#define INTEL_PATCH_VER 0
++#define DRIVER_NAME "lnw isp"
++#define VID_HARDWARE_INTEL 100
++
++#define INTEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
++
++#define MRST_ISP_REG_MEMORY_MAP 0xFF0E0000
++
++/* self path maximum width/height, VGA */
++#define INTEL_MAX_WIDTH 640
++#define INTEL_MAX_HEIGHT 480
++
++#define INTEL_MIN_WIDTH 32
++#define INTEL_MIN_HEIGHT 16
++
++/* main path maximum widh/height, 5M */
++#define INTEL_MAX_WIDTH_MP 2600
++#define INTEL_MAX_HEIGHT_MP 2048
++
++/* image size returned by the driver */
++#define INTEL_IMAGE_WIDTH 640
++#define INTEL_IMAGE_HEIGHT 480
++
++/* Default capture queue buffers. */
++#define INTEL_CAPTURE_BUFFERS 3
++
++/* Default capture buffer size. */
++#define INTEL_CAPTURE_BUFSIZE PAGE_ALIGN(INTEL_MAX_WIDTH * INTEL_MAX_HEIGHT * 2)
++#define INTEL_IMAGE_BUFSIEZE (INTEL_IMAGE_WIDTH * INTEL_IMAGE_HEIGHT * 2)
++
++#define MAX_KMALLOC_MEM (4*1024*1024)
++
++#define MEM_SNAPSHOT_MAX_SIZE (1*1024*1024)
++
++#include <media/v4l2-device.h>
++
++enum frame_state {
++ S_UNUSED = 0, /* unused */
++ S_QUEUED, /* ready to capture */
++ S_GRABBING, /* in the process of being captured */
++ S_DONE, /* finished grabbing, but not been synced yet */
++ S_ERROR, /* something bad happened while capturing */
++};
++
++struct frame_info {
++ enum frame_state state;
++ u32 flags;
++};
++
++struct fifo {
++ int front;
++ int back;
++ int data[INTEL_CAPTURE_BUFFERS + 1];
++ struct frame_info info[INTEL_CAPTURE_BUFFERS + 1];
++};
++
++enum mrst_isp_state {
++ S_NOTREADY, /* Not yet initialized */
++ S_IDLE, /* Just hanging around */
++ S_FLAKED, /* Some sort of problem */
++ S_STREAMING /* Streaming data */
++};
++
++struct mrst_isp_buffer {
++ struct videobuf_buffer vb;
++ int fmt_useless;
++};
++
++struct mrst_isp_device {
++ struct v4l2_device v4l2_dev;
++ /* v4l2 device handler */
++ struct video_device *vdev;
++
++ /* locks this structure */
++ struct mutex mutex;
++
++ /* if the port is open or not */
++ int open;
++
++ /* pci information */
++ struct pci_dev *pci_dev;
++ unsigned long mb0;
++ unsigned long mb0_size;
++ unsigned char *regs;
++ unsigned long mb1;
++ unsigned long mb1_size;
++ unsigned char *mb1_va;
++ unsigned short vendorID;
++ unsigned short deviceID;
++ unsigned char revision;
++
++ /* subdev */
++ struct v4l2_subdev *sensor_soc;
++ int sensor_soc_index;
++ struct v4l2_subdev *sensor_raw;
++ int sensor_raw_index;
++ struct v4l2_subdev *sensor_curr;
++ struct v4l2_subdev *motor;
++ struct v4l2_subdev *flash;
++ struct i2c_adapter *adapter_sensor;
++ struct i2c_adapter *adapter_flash;
++
++ int streaming;
++ int buffer_required;
++
++ /* interrupt */
++ unsigned char int_enable;
++ unsigned long int_flag;
++ unsigned long interrupt_count;
++
++ /* frame management */
++
++ /* allocated memory for km_mmap */
++ char *fbuffer;
++
++ /* virtual address of cap buf */
++ char *capbuf;
++
++ /* physcial address of cap buf */
++ u32 capbuf_pa;
++
++ struct fifo frame_queue;
++
++ /* current capture frame number */
++ int cap_frame;
++ /* total frames */
++ int num_frames;
++
++ u32 field_count;
++ u32 pixelformat;
++ u16 depth;
++ u32 bufwidth;
++ u32 bufheight;
++ u32 frame_size;
++ u32 frame_size_used;
++
++
++ enum mrst_isp_state state;
++
++ /* active mappings*/
++ int vmas;
++
++ /* isp system configuration */
++ struct ci_pl_system_config sys_conf;
++
++ struct completion jpe_complete;
++ struct completion mi_complete;
++ int irq_stat;
++
++ spinlock_t lock;
++ spinlock_t qlock;
++ struct videobuf_buffer *active;
++ struct videobuf_buffer *next;
++ struct list_head capture;
++ u32 streambufs;
++ u32 stopbuf;
++ u32 stopflag;
++};
++
++struct mrst_isp_fh {
++ struct mrst_isp_device *dev;
++ struct videobuf_queue vb_q;
++ u32 qbuf_flag;
++};
++
++/* viewfinder mode mask */
++#define VFFLAG_MODE_MASK 0x0000000F
++/*
++ * play on complete LCD, but do not use upscaler
++ * or small camera resolutions, the picture will be
++ * played in the upper left corner)
++ */
++#define VFFLAG_MODE_FULLLCD_DSONLY 0x00000000
++/* display on complete LCD, use upscaler if necessary */
++#define VFFLAG_MODE_FULLLCD_USDS 0x00000001
++/* display full camera picture with black borders on top and bottom */
++#define VFFLAG_MODE_LETTERBOX 0x00000002
++/* use the values given by the user (x, y, w, h, keep_aspect) */
++#define VFFLAG_MODE_USER 0x00000003
++/* hardware RGB conversion */
++#define VFFLAG_HWRGB 0x00000010
++/* horizontal mirror */
++#define VFFLAG_MIRROR 0x00000020
++/* use the main path for viewfinding too. */
++#define VFFLAG_USE_MAINPATH 0x00000040
++/* vertical flipping (mirror) (MARVIN_FEATURE_MI_V3) */
++#define VFFLAG_V_FLIP 0x00000100
++/* rotation 90 degree counterclockwise (left) (MARVIN_FEATURE_MI_V3) */
++#define VFFLAG_ROT90_CCW 0x00000200
++
++/* abbreviations for local debug control ( level | module ) */
++#define DERR (DBG_ERR | DBG_MRV)
++#define DWARN (DBG_WARN | DBG_MRV)
++#define DINFO (DBG_INFO | DBG_MRV)
++
++struct ci_isp_rect {
++ /* zero based x coordinate of the upper left edge of the
++ * rectangle (in pixels)
++ */
++ int x;
++ /* zero based y coordinate of the upper left edge of the
++ * rectangle (in pixels)
++ */
++ int y;
++ /* width of the rectangle in pixels */
++ int w;
++ /* height of the rectangle in pixels */
++ int h;
++};
++
++/* the address/size of one region */
++struct ci_frame_region {
++ unsigned char *phy_addr;
++ unsigned int size;
++};
++
++struct ci_frame_addr {
++ /*
++ * how many regions of the frame, a region is
++ * pages with contiguous physical address
++ */
++ int num_of_regs;
++ struct ci_frame_region *regs;
++};
++
++/* type in mrst_camer*/
++#define MRST_CAMERA_NONE -1
++#define MRST_CAMERA_SOC 0
++#define MRST_CAMERA_RAW 1
++
++struct mrst_camera {
++ int type;
++ char *name;
++ u8 sensor_addr;
++ char *motor_name;
++ u8 motor_addr;
++};
++
++#define MRST_I2C_BUS_FLASH 0
++#define MRST_I2C_BUS_SENSOR 1
++
++long mrst_isp_vidioc_default(struct file *file, void *fh,
++ int cmd, void *arg);
++
++void mrst_timer_start(void);
++
++void mrst_timer_stop(void);
++
++unsigned long mrst_get_micro_sec(void);
++
++#endif
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstisp/include/mrstisp_dp.h
+@@ -0,0 +1,317 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++
++#ifndef _MRV_SLS_H
++#define _MRV_SLS_H
++
++/*
++ * simplified datapath and output formatter/resizer adjustment
++ * can be used to setup the main and self datapathes in a convenient way.
++ */
++
++/* data path descriptor */
++struct ci_isp_datapath_desc {
++ /* width of output picture (after scaling) in pixels */
++ u16 out_w;
++ /* height of output picture (after scaling) in pixels */
++ u16 out_h;
++ /* how to configure the datapath. An or'ed combination of the */
++ u32 flags;
++ /* MRV_DPD_xxx defines */
++};
++
++/*
++ * possible Frags for the Datapath descriptor general features
++ */
++
++/* disables the datapath */
++#define CI_ISP_DPD_DISABLE 0x00000000
++/* enables the datapath in general */
++#define CI_ISP_DPD_ENABLE 0x00000001
++/*
++ * the out_w and out_h members will be ignored. and the
++ * resize module of the datapath is switched off. Note that
++ * the resize module is also needed for croma subsampling
++ */
++#define CI_ISP_DPD_NORESIZE 0x00000002
++/*
++ * The input picture from ISP is being cropped to match the
++ * aspect ratio of the desired output. If this flag is not
++ * set, different scaling factors for X and Y axis
++ * may be used.
++ */
++#define CI_ISP_DPD_KEEPRATIO 0x00000004
++/* mirror the output picture (only applicable for self path) data path mode */
++#define CI_ISP_DPD_MIRROR 0x00000008
++/* mode mask (3 bits) */
++#define CI_ISP_DPD_MODE_MASK 0x00000070
++/* 16(12) bit raw data from ISP block (only applicable for main path) */
++#define CI_ISP_DPD_MODE_ISPRAW_16B 0x00000000
++/* separated Y, Cb and Cr data from ISP block */
++#define CI_ISP_DPD_MODE_ISPYC 0x00000010
++/* raw data from ISP block (only applicable for main path) */
++#define CI_ISP_DPD_MODE_ISPRAW 0x00000020
++/* Jpeg encoding with data from ISP block (only applicable for main path) */
++#define CI_ISP_DPD_MODE_ISPJPEG 0x00000030
++/*
++ * YCbCr data from system memory directly routed to the main/self
++ * path (DMA-read, only applicable for self path)
++ */
++#define CI_ISP_DPD_MODE_DMAYC_DIRECT 0x00000040
++/*
++ * YCbCr data from system memory routed through the main processing
++ * chain substituting ISP data (DMA-read)
++ */
++#define CI_ISP_DPD_MODE_DMAYC_ISP 0x00000050
++/*
++ * YCbCr data from system memory directly routed to the jpeg encoder
++ * (DMA-read, R2B-bufferless encoding, only applicable for main path)
++ */
++#define CI_ISP_DPD_MODE_DMAJPEG_DIRECT 0x00000060
++/*
++ * Jpeg encoding with YCbCr data from system memory routed through the
++ * main processing chain substituting ISP data (DMA-read, only applicable
++ * for main path) top blackline support
++ */
++#define CI_ISP_DPD_MODE_DMAJPEG_ISP 0x00000070
++
++/*
++ * If set, blacklines at the top of the sensor are
++ * shown in the output (if there are any). Note that this
++ * will move the window of interest out of the center
++ * to the upper border, so especially at configurations
++ * with digital zoom, the field of sight is not centered
++ * on the optical axis anymore. If the sensor does not deliver
++ * blacklines, setting this bit has no effect.
++ * additional chroma subsampling (CSS) amount and sample position
++ */
++#define CI_ISP_DPD_BLACKLINES_TOP 0x00000080
++/* horizontal subsampling */
++#define CI_ISP_DPD_CSS_H_MASK 0x00000700
++/* no horizontal subsampling */
++#define CI_ISP_DPD_CSS_H_OFF 0x00000000
++/* horizontal subsampling by 2 */
++#define CI_ISP_DPD_CSS_H2 0x00000100
++/* horizontal subsampling by 4 */
++#define CI_ISP_DPD_CSS_H4 0x00000200
++/* 2 times horizontal upsampling */
++#define CI_ISP_DPD_CSS_HUP2 0x00000500
++/* 4 times horizontal upsampling */
++#define CI_ISP_DPD_CSS_HUP4 0x00000600
++/* vertical subsampling */
++#define CI_ISP_DPD_CSS_V_MASK 0x00003800
++/* no vertical subsampling */
++#define CI_ISP_DPD_CSS_V_OFF 0x00000000
++/* vertical subsampling by 2 */
++#define CI_ISP_DPD_CSS_V2 0x00000800
++/* vertical subsampling by 4 */
++#define CI_ISP_DPD_CSS_V4 0x00001000
++/* 2 times vertical upsampling */
++#define CI_ISP_DPD_CSS_VUP2 0x00002800
++/* 4 times vertical upsampling */
++#define CI_ISP_DPD_CSS_VUP4 0x00003000
++/* apply horizontal chroma phase shift by half the sample distance */
++#define CI_ISP_DPD_CSS_HSHIFT 0x00004000
++/* apply vertical chroma phase shift by half the sample distance */
++#define CI_ISP_DPD_CSS_VSHIFT 0x00008000
++
++/*
++ * Hardware RGB conversion (currly, only supported for self path)
++ * output mode mask (3 bits, not all combination used yet)
++ */
++#define CI_ISP_DPD_HWRGB_MASK 0x00070000
++/* no rgb conversion */
++#define CI_ISP_DPD_HWRGB_OFF 0x00000000
++/* conversion to RGB565 */
++#define CI_ISP_DPD_HWRGB_565 0x00010000
++/* conversion to RGB666 */
++#define CI_ISP_DPD_HWRGB_666 0x00020000
++/* conversion to RGB888 */
++#define CI_ISP_DPD_HWRGB_888 0x00030000
++
++#define CI_ISP_DPD_YUV_420 0x00040000
++#define CI_ISP_DPD_YUV_422 0x00050000
++#define CI_ISP_DPD_YUV_NV12 0x00060000
++#define CI_ISP_DPD_YUV_YUYV 0x00070000
++/*
++ * DMA-read feature input format. (depends on chip derivative if
++ * supported for both pathes, self or not at all)
++ */
++
++/* input mode mask (2 bits) */
++#define CI_ISP_DPD_DMA_IN_MASK 0x00180000
++/* input is YCbCr 422 */
++#define CI_ISP_DPD_DMA_IN_422 0x00000000
++/* input is YCbCr 444 */
++#define CI_ISP_DPD_DMA_IN_444 0x00080000
++/* input is YCbCr 420 */
++#define CI_ISP_DPD_DMA_IN_420 0x00100000
++/* input is YCbCr 411 */
++#define CI_ISP_DPD_DMA_IN_411 0x00180000
++
++/*
++ * Upscaling interpolation mode (tells how newly created pixels
++ * will be interpolated from the existing ones)
++ * Upscaling interpolation mode mask (2 bits, not all combinations
++ * used yet)
++ */
++#define CI_ISP_DPD_UPSCALE_MASK 0x00600000
++/* smooth edges, linear interpolation */
++#define CI_ISP_DPD_UPSCALE_SMOOTH_LIN 0x00000000
++/*
++ * sharp edges, no interpolation, just duplicate pixels, creates
++ * the typical 'blocky' effect.
++ */
++#define CI_ISP_DPD_UPSCALE_SHARP 0x00200000
++
++/*
++ * additional luminance phase shift
++ * apply horizontal luminance phase shift by half the sample distance
++ */
++#define CI_ISP_DPD_LUMA_HSHIFT 0x00800000
++/* apply vertical luminance phase shift by half the sample distance */
++#define CI_ISP_DPD_LUMA_VSHIFT 0x01000000
++
++/*
++ * picture flipping and rotation
++ * Note that when combining the flags, the rotation is applied first.
++ * This enables to configure all 8 possible orientations
++ */
++
++/* horizontal flipping - same as mirroring */
++#define CI_ISP_DPD_H_FLIP CI_ISP_DPD_MIRROR
++/* vertical flipping */
++#define CI_ISP_DPD_V_FLIP 0x02000000
++/* rotation 90 degrees counter-clockwise */
++#define CI_ISP_DPD_90DEG_CCW 0x04000000
++
++/*
++ * switch to differentiate between full range of values for YCbCr (0-255)
++ * and restricted range (16-235 for Y) (16-240 for CbCr)'
++ * if set leads to unrestricted range (0-255) for YCbCr
++ * package length of a system interface transfer
++ */
++#define CI_ISP_DPD_YCBCREXT 0x10000000
++/* burst mask (2 bits) */
++#define CI_ISP_DPD_BURST_MASK 0x60000000
++/* AHB 4 beat burst */
++#define CI_ISP_DPD_BURST_4 0x00000000
++/* AHB 8 beat burst */
++#define CI_ISP_DPD_BURST_8 0x20000000
++/* AHB 16 beat burst */
++#define CI_ISP_DPD_BURST_16 0x40000000
++
++/* configures main and self datapathes and scaler for data coming from the
++ * ISP */
++
++
++int ci_datapath_isp(const struct ci_pl_system_config *sys_conf,
++ const struct ci_sensor_config *isi_config,
++ const struct ci_isp_datapath_desc *main,
++ const struct ci_isp_datapath_desc *self, int zoom);
++
++
++/*
++ * Coordinate transformations: The pixel data coming from the sensor passes
++ * through the ISP output formatter where they may be cropped and through
++ * the main path scaler where they may be stretched and/or squeezed. Thus,
++ * the coordinate systems of input and output are different, but somewhat
++ * related. Further, we can do digital zoom, which adds a third coordinate
++ * system: the virtual input (e.g. a cropped sensor frame zoomed in to the
++ * full sensor frame size. Following routines are intended to transform
++ * pixel resp. window positions from one coordinate systen to another.
++ * Folloin coordinate systems exist: Cam : original frame coming from the
++ * camera VCam : virtual camera; a system in which a cropped original
++ * camera frame is up-scaled to the camera frame size. If no digital zoom
++ * is to be done, Cam and VCam are identical. Main : output of main path
++ * Self : output of self path
++ */
++/* coordinate transformation from (real) camera coordinate system to main
++ * path output */
++int ci_transform_cam2_main(
++ const struct ci_isp_window *wnd_in,
++ struct ci_isp_window *wnd_out
++);
++/* coordinate transformation from (real) camera coordinate system to self
++ * path output */
++int ci_transform_cam2_self(
++ const struct ci_isp_window *wnd_in,
++ struct ci_isp_window *wnd_out
++);
++/* coordinate transformation from virtual camera to real camera coordinate
++ * system */
++void ci_transform_vcam2_cam(
++ const struct ci_sensor_config *isi_sensor_config,
++ const struct ci_isp_window *wnd_in,
++ struct ci_isp_window *wnd_out
++);
++
++/*
++ * Still image snapshot support
++ * The routine re-configures the main path for taking the snapshot. On
++ * successful return, the snapshot has been stored in the given memory
++ * location. Note that the settings of MARVIN will not be restored.
++ */
++
++/*
++ * take the desired snapshot. The type of snapshot (YUV, RAW or JPEG) is
++ * determined by the datapath selection bits in ci_isp_datapath_desc::flags.
++ * Note that the MARVIN configuration may be changed but will not be
++ * restored after the snapshot.
++ */
++int ci_do_snapshot(
++ const struct ci_sensor_config *isi_sensor_config,
++ const struct ci_isp_datapath_desc *main,
++ int zoom,
++ u8 jpeg_compression,
++ struct ci_isp_mi_path_conf *isp_mi_path_conf
++);
++
++
++/* Initialization of the Bad Pixel Detection and Correction */
++int ci_bp_init(
++ const struct ci_isp_bp_corr_config *bp_corr_config,
++ const struct ci_isp_bp_det_config *bp_det_config
++);
++/* Bad Pixel Correction */
++int ci_bp_correction(void);
++/* Disable Bad Pixel Correction and dectection */
++int ci_bp_end(const struct ci_isp_bp_corr_config *bp_corr_config);
++
++/* Capture a whole JPEG snapshot */
++u32 ci_jpe_capture(struct mrst_isp_device *intel,
++ enum ci_isp_conf_update_time update_time);
++int ci_jpe_encode(struct mrst_isp_device *intel,
++ enum ci_isp_conf_update_time update_time,
++ enum ci_isp_jpe_enc_mode mrv_jpe_encMode);
++/* Encode motion JPEG */
++int ci_isp_jpe_enc_motion(enum ci_isp_jpe_enc_mode jpe_enc_mode,
++ u16 frames_num, u32 *byte_count);
++
++void ci_isp_set_yc_mode(void);
++
++/* _MRV_SLS_H */
++#endif
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstisp/include/mrstisp_hw.h
+@@ -0,0 +1,245 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++
++#ifndef _MRV_H
++#define _MRV_H
++
++/* move structure definination to ci_isp_common.h */
++#include "ci_isp_common.h"
++
++/*
++ * FUNCTIONS
++ */
++
++/* sensor struct related functions */
++int ci_isp_bp_write_table(
++ const struct ci_sensor_bp_table *bp_table
++);
++
++int ci_isp_bp_read_table(struct ci_sensor_bp_table *bp_table);
++
++enum ci_isp_path ci_isp_select_path(
++ const struct ci_sensor_config *isi_cfg,
++ u8 *words_per_pixel
++);
++
++int ci_isp_set_input_aquisition(
++ const struct ci_sensor_config *isi_cfg
++);
++
++void ci_isp_set_gamma(
++ const struct ci_sensor_gamma_curve *r,
++ const struct ci_sensor_gamma_curve *g,
++ const struct ci_sensor_gamma_curve *b
++);
++
++int ci_isp_get_wb_meas(struct ci_sensor_awb_mean *awb_mean);
++
++int ci_isp_set_bp_correction(
++ const struct ci_isp_bp_corr_config *bp_corr_config
++);
++
++int ci_isp_set_bp_detection(
++ const struct ci_isp_bp_det_config *bp_det_config
++);
++
++
++int ci_isp_clear_bp_int(void);
++
++u32 ci_isp_get_frame_end_irq_mask_dma(void);
++
++u32 ci_isp_get_frame_end_irq_mask_isp(void);
++int ci_isp_wait_for_frame_end(struct mrst_isp_device *intel);
++
++void ci_isp_set_output_formatter(
++ const struct ci_isp_window *window,
++ enum ci_isp_conf_update_time update_time
++);
++
++int ci_isp_is_set_config(const struct ci_isp_is_config *is_config);
++
++int ci_isp_set_data_path(
++ enum ci_isp_ycs_chn_mode ycs_chn_mode,
++ enum ci_isp_dp_switch dp_switch
++);
++
++void ci_isp_res_set_main_resize(const struct ci_isp_scale *scale,
++ enum ci_isp_conf_update_time update_time,
++ const struct ci_isp_rsz_lut *rsz_lut
++);
++
++void ci_isp_res_get_main_resize(struct ci_isp_scale *scale);
++
++void ci_isp_res_set_self_resize(const struct ci_isp_scale *scale,
++ enum ci_isp_conf_update_time update_time,
++ const struct ci_isp_rsz_lut *rsz_lut
++);
++
++void ci_isp_res_get_self_resize(struct ci_isp_scale *scale);
++
++int ci_isp_mif_set_main_buffer(
++ const struct ci_isp_mi_path_conf *mrv_mi_path_conf,
++ enum ci_isp_conf_update_time update_time
++);
++
++int ci_isp_mif_set_self_buffer(
++ const struct ci_isp_mi_path_conf *mrv_mi_path_conf,
++ enum ci_isp_conf_update_time update_time
++);
++
++int ci_isp_mif_set_dma_buffer(
++ const struct ci_isp_mi_path_conf *mrv_mi_path_conf
++);
++
++void ci_isp_mif_disable_all_paths(int perform_wait_for_frame_end);
++
++int ci_isp_mif_get_main_buffer(
++ struct ci_isp_mi_path_conf *mrv_mi_path_conf
++);
++
++int ci_isp_mif_get_self_buffer(
++ struct ci_isp_mi_path_conf *mrv_mi_path_conf
++);
++
++int ci_isp_mif_set_path_and_orientation(
++ const struct ci_isp_mi_ctrl *mrv_mi_ctrl
++);
++
++int ci_isp_mif_get_path_and_orientation(
++ struct ci_isp_mi_ctrl *mrv_mi_ctrl
++);
++
++int ci_isp_mif_set_configuration(
++ const struct ci_isp_mi_ctrl *mrv_mi_ctrl,
++ const struct ci_isp_mi_path_conf *mrv_mi_mp_path_conf,
++ const struct ci_isp_mi_path_conf *mrv_mi_sp_path_conf,
++ const struct ci_isp_mi_dma_conf *mrv_mi_dma_conf
++);
++
++int ci_isp_mif_set_dma_config(
++ const struct ci_isp_mi_dma_conf *mrv_mi_dma_conf
++);
++
++int ci_isp_mif_get_pixel_per32_bit_of_line(
++ u8 *pixel_per32_bit,
++ enum ci_isp_mif_col_format mrv_mif_sp_format,
++ enum ci_isp_mif_pic_form mrv_mif_pic_form,
++ int luminance_buffer
++);
++
++void ci_isp_set_ext_ycmode(void);
++
++int ci_isp_set_mipi_smia(u32 mode);
++
++void ci_isp_sml_out_set_path(enum ci_isp_data_path main_path);
++
++void ci_isp_set_dma_read_mode(
++ enum ci_isp_dma_read_mode mode,
++ enum ci_isp_conf_update_time update_time
++);
++
++u32 ci_isp_mif_get_byte_cnt(void);
++
++void ci_isp_start(
++ u16 number_of_frames,
++ enum ci_isp_conf_update_time update_time
++);
++
++int ci_isp_jpe_init_ex(
++ u16 hsize,
++ u16 vsize,
++ u8 compression_ratio,
++ u8 jpe_scale
++);
++
++void ci_isp_reset_interrupt_status(void);
++
++void ci_isp_get_output_formatter(struct ci_isp_window *window);
++
++int ci_isp_set_auto_focus(const struct ci_isp_af_config *af_config);
++
++void ci_isp_get_auto_focus_meas(struct ci_isp_af_meas *af_meas);
++
++int ci_isp_chk_bp_int_stat(void);
++
++int ci_isp_bls_get_measured_values(
++ struct ci_isp_bls_measured *bls_measured
++);
++
++int ci_isp_get_wb_measConfig(
++ struct ci_isp_wb_meas_config *wb_meas_config
++);
++
++void ci_isp_col_set_color_processing(
++ const struct ci_isp_color_settings *col
++);
++
++int ci_isp_ie_set_config(const struct ci_isp_ie_config *ie_config);
++
++int ci_isp_set_ls_correction(struct ci_sensor_ls_corr_config *ls_corr_config);
++
++int ci_isp_ls_correction_on_off(int ls_corr_on_off);
++
++int ci_isp_activate_filter(int activate_filter);
++
++int ci_isp_set_filter_params(u8 noise_reduc_level, u8 sharp_level);
++
++int ci_isp_bls_set_config(const struct ci_isp_bls_config *bls_config);
++
++int ci_isp_set_wb_mode(enum ci_isp_awb_mode wb_mode);
++
++int ci_isp_set_wb_meas_config(
++ const struct ci_isp_wb_meas_config *wb_meas_config
++);
++
++int ci_isp_set_wb_auto_hw_config(
++ const struct ci_isp_wb_auto_hw_config *wb_auto_hw_config
++);
++
++void ci_isp_init(void);
++void ci_isp_off(void);
++
++void ci_isp_stop(enum ci_isp_conf_update_time update_time);
++
++void ci_isp_mif_reset_offsets(enum ci_isp_conf_update_time update_time);
++
++int ci_isp_get_wb_measConfig(
++ struct ci_isp_wb_meas_config *wb_meas_config
++);
++
++void ci_isp_set_gamma2(const struct ci_isp_gamma_out_curve *gamma);
++
++void ci_isp_set_demosaic(
++ enum ci_isp_demosaic_mode demosaic_mode,
++ u8 demosaic_th
++);
++
++void mrst_isp_disable_interrupt(struct mrst_isp_device *isp);
++
++void mrst_isp_enable_interrupt(struct mrst_isp_device *isp);
++
++/* #ifndef _MRV_H */
++#endif
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstisp/include/mrstisp_isp.h
+@@ -0,0 +1,42 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++#define MRV_MEAN_LUMA_ARR_SIZE_COL 5
++#define MRV_MEAN_LUMA_ARR_SIZE_ROW 5
++#define MRV_MEAN_LUMA_ARR_SIZE \
++ (MRV_MEAN_LUMA_ARR_SIZE_COL*MRV_MEAN_LUMA_ARR_SIZE_ROW)
++int ci_isp_meas_exposure_initialize_module(void);
++
++int ci_isp_meas_exposure_set_config(const struct ci_isp_window *wnd,
++ const struct ci_isp_exp_ctrl *isp_exp_ctrl);
++int ci_isp_meas_exposure_get_config(struct ci_isp_window *wnd,
++ struct ci_isp_exp_ctrl *isp_exp_ctrl);
++
++int ci_isp_meas_exposure_get_mean_luma_values(
++ struct ci_isp_mean_luma *mrv_mean_luma);
++int ci_isp_meas_exposure_get_mean_luma_by_num(
++ u8 BlockNum, u8 *luma);
++int ci_isp_meas_exposure_get_mean_luma_by_pos(
++ u8 XPos, u8 YPos, u8 *luma);
++int mrst_isp_set_color_conversion_ex(void);
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstisp/include/mrstisp_jpe.h
+@@ -0,0 +1,426 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include "mrstisp.h"
++
++/* DC luma table according to ISO/IEC 10918-1 annex K */
++static const u8 ci_isp_dc_luma_table_annex_k[] = {
++ 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
++ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++ 0x08, 0x09, 0x0a, 0x0b
++};
++
++/* DC chroma table according to ISO/IEC 10918-1 annex K */
++static const u8 ci_isp_dc_chroma_table_annex_k[] = {
++ 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
++ 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++ 0x08, 0x09, 0x0a, 0x0b
++};
++
++/* AC luma table according to ISO/IEC 10918-1 annex K */
++static const u8 ci_isp_ac_luma_table_annex_k[] = {
++ 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03,
++ 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7d,
++ 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
++ 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
++ 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
++ 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
++ 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
++ 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
++ 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
++ 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
++ 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
++ 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
++ 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
++ 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
++ 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
++ 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
++ 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
++ 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
++ 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
++ 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
++ 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
++ 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
++ 0xf9, 0xfa
++};
++
++/* AC Chroma table according to ISO/IEC 10918-1 annex K */
++static const u8 ci_isp_ac_chroma_table_annex_k[] = {
++ 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04,
++ 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77,
++ 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
++ 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
++ 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
++ 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0,
++ 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34,
++ 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26,
++ 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38,
++ 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
++ 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
++ 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
++ 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
++ 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
++ 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96,
++ 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5,
++ 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4,
++ 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3,
++ 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2,
++ 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
++ 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9,
++ 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
++ 0xf9, 0xfa
++};
++
++/* luma quantization table 75% quality setting */
++static const u8 ci_isp_yq_table75_per_cent[] = {
++ 0x08, 0x06, 0x06, 0x07, 0x06, 0x05, 0x08, 0x07,
++ 0x07, 0x07, 0x09, 0x09, 0x08, 0x0a, 0x0c, 0x14,
++ 0x0d, 0x0c, 0x0b, 0x0b, 0x0c, 0x19, 0x12, 0x13,
++ 0x0f, 0x14, 0x1d, 0x1a, 0x1f, 0x1e, 0x1d, 0x1a,
++ 0x1c, 0x1c, 0x20, 0x24, 0x2e, 0x27, 0x20, 0x22,
++ 0x2c, 0x23, 0x1c, 0x1c, 0x28, 0x37, 0x29, 0x2c,
++ 0x30, 0x31, 0x34, 0x34, 0x34, 0x1f, 0x27, 0x39,
++ 0x3d, 0x38, 0x32, 0x3c, 0x2e, 0x33, 0x34, 0x32
++};
++
++/* chroma quantization table 75% quality setting */
++static const u8 ci_isp_uv_qtable75_per_cent[] = {
++ 0x09, 0x09, 0x09, 0x0c, 0x0b, 0x0c, 0x18, 0x0d,
++ 0x0d, 0x18, 0x32, 0x21, 0x1c, 0x21, 0x32, 0x32,
++ 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32,
++ 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32,
++ 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32,
++ 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32,
++ 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32,
++ 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32
++};
++
++/*
++ * luma quantization table very low compression(about factor 2)
++ */
++static const u8 ci_isp_yq_table_low_comp1[] = {
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02
++};
++
++/*
++ * chroma quantization table very low compression
++ * (about factor 2)
++ */
++static const u8 ci_isp_uv_qtable_low_comp1[] = {
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02
++};
++
++/*
++ * The jpg Quantization Tables were parsed by jpeg_parser from
++ * jpg images generated by Jasc PaintShopPro.
++ *
++ */
++
++/* 01% */
++
++/* luma quantization table */
++static const u8 ci_isp_yq_table01_per_cent[] = {
++ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
++ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
++ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
++ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
++ 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x01, 0x01,
++ 0x02, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x01, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02
++};
++
++/* chroma quantization table */
++static const u8 ci_isp_uv_qtable01_per_cent[] = {
++ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
++ 0x01, 0x01, 0x02, 0x01, 0x01, 0x01, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
++ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02
++};
++
++/* 20% */
++
++/* luma quantization table */
++static const u8 ci_isp_yq_table20_per_cent[] = {
++ 0x06, 0x04, 0x05, 0x06, 0x05, 0x04, 0x06, 0x06,
++ 0x05, 0x06, 0x07, 0x07, 0x06, 0x08, 0x0a, 0x10,
++ 0x0a, 0x0a, 0x09, 0x09, 0x0a, 0x14, 0x0e, 0x0f,
++ 0x0c, 0x10, 0x17, 0x14, 0x18, 0x18, 0x17, 0x14,
++ 0x16, 0x16, 0x1a, 0x1d, 0x25, 0x1f, 0x1a, 0x1b,
++ 0x23, 0x1c, 0x16, 0x16, 0x20, 0x2c, 0x20, 0x23,
++ 0x26, 0x27, 0x29, 0x2a, 0x29, 0x19, 0x1f, 0x2d,
++ 0x30, 0x2d, 0x28, 0x30, 0x25, 0x28, 0x29, 0x28
++};
++
++/* chroma quantization table */
++static const u8 ci_isp_uv_qtable20_per_cent[] = {
++ 0x07, 0x07, 0x07, 0x0a, 0x08, 0x0a, 0x13, 0x0a,
++ 0x0a, 0x13, 0x28, 0x1a, 0x16, 0x1a, 0x28, 0x28,
++ 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
++ 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
++ 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
++ 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
++ 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
++ 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28
++};
++
++/* 30% */
++
++/* luma quantization table */
++static const u8 ci_isp_yq_table30_per_cent[] = {
++ 0x0a, 0x07, 0x07, 0x08, 0x07, 0x06, 0x0a, 0x08,
++ 0x08, 0x08, 0x0b, 0x0a, 0x0a, 0x0b, 0x0e, 0x18,
++ 0x10, 0x0e, 0x0d, 0x0d, 0x0e, 0x1d, 0x15, 0x16,
++ 0x11, 0x18, 0x23, 0x1f, 0x25, 0x24, 0x22, 0x1f,
++ 0x22, 0x21, 0x26, 0x2b, 0x37, 0x2f, 0x26, 0x29,
++ 0x34, 0x29, 0x21, 0x22, 0x30, 0x41, 0x31, 0x34,
++ 0x39, 0x3b, 0x3e, 0x3e, 0x3e, 0x25, 0x2e, 0x44,
++ 0x49, 0x43, 0x3c, 0x48, 0x37, 0x3d, 0x3e, 0x3b
++};
++
++/* chroma quantization table */
++static const u8 ci_isp_uv_qtable30_per_cent[] = {
++ 0x0a, 0x0b, 0x0b, 0x0e, 0x0d, 0x0e, 0x1c, 0x10,
++ 0x10, 0x1c, 0x3b, 0x28, 0x22, 0x28, 0x3b, 0x3b,
++ 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b,
++ 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b,
++ 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b,
++ 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b,
++ 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b,
++ 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b
++};
++
++
++/* 40% */
++
++/* luma quantization table */
++static const u8 ci_isp_yq_table40_per_cent[] = {
++ 0x0d, 0x09, 0x0a, 0x0b, 0x0a, 0x08, 0x0d, 0x0b,
++ 0x0a, 0x0b, 0x0e, 0x0e, 0x0d, 0x0f, 0x13, 0x20,
++ 0x15, 0x13, 0x12, 0x12, 0x13, 0x27, 0x1c, 0x1e,
++ 0x17, 0x20, 0x2e, 0x29, 0x31, 0x30, 0x2e, 0x29,
++ 0x2d, 0x2c, 0x33, 0x3a, 0x4a, 0x3e, 0x33, 0x36,
++ 0x46, 0x37, 0x2c, 0x2d, 0x40, 0x57, 0x41, 0x46,
++ 0x4c, 0x4e, 0x52, 0x53, 0x52, 0x32, 0x3e, 0x5a,
++ 0x61, 0x5a, 0x50, 0x60, 0x4a, 0x51, 0x52, 0x4f
++};
++
++/* chroma quantization table */
++static const u8 ci_isp_uv_qtable40_per_cent[] = {
++ 0x0e, 0x0e, 0x0e, 0x13, 0x11, 0x13, 0x26, 0x15,
++ 0x15, 0x26, 0x4f, 0x35, 0x2d, 0x35, 0x4f, 0x4f,
++ 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f,
++ 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f,
++ 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f,
++ 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f,
++ 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f,
++ 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f, 0x4f
++};
++
++/* 50% */
++
++/* luma quantization table */
++static const u8 ci_isp_yq_table50_per_cent[] = {
++ 0x10, 0x0b, 0x0c, 0x0e, 0x0c, 0x0a, 0x10, 0x0e,
++ 0x0d, 0x0e, 0x12, 0x11, 0x10, 0x13, 0x18, 0x28,
++ 0x1a, 0x18, 0x16, 0x16, 0x18, 0x31, 0x23, 0x25,
++ 0x1d, 0x28, 0x3a, 0x33, 0x3d, 0x3c, 0x39, 0x33,
++ 0x38, 0x37, 0x40, 0x48, 0x5c, 0x4e, 0x40, 0x44,
++ 0x57, 0x45, 0x37, 0x38, 0x50, 0x6d, 0x51, 0x57,
++ 0x5f, 0x62, 0x67, 0x68, 0x67, 0x3e, 0x4d, 0x71,
++ 0x79, 0x70, 0x64, 0x78, 0x5c, 0x65, 0x67, 0x63
++};
++
++/* chroma quantization table */
++static const u8 ci_isp_uv_qtable50_per_cent[] = {
++ 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a,
++ 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63,
++ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
++ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
++ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
++ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
++ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
++ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63
++};
++
++/* 60% */
++
++/* luma quantization table */
++static const u8 ci_isp_yq_table60_per_cent[] = {
++ 0x14, 0x0e, 0x0f, 0x12, 0x0f, 0x0d, 0x14, 0x12,
++ 0x10, 0x12, 0x17, 0x15, 0x14, 0x18, 0x1e, 0x32,
++ 0x21, 0x1e, 0x1c, 0x1c, 0x1e, 0x3d, 0x2c, 0x2e,
++ 0x24, 0x32, 0x49, 0x40, 0x4c, 0x4b, 0x47, 0x40,
++ 0x46, 0x45, 0x50, 0x5a, 0x73, 0x62, 0x50, 0x55,
++ 0x6d, 0x56, 0x45, 0x46, 0x64, 0x88, 0x65, 0x6d,
++ 0x77, 0x7b, 0x81, 0x82, 0x81, 0x4e, 0x60, 0x8d,
++ 0x97, 0x8c, 0x7d, 0x96, 0x73, 0x7e, 0x81, 0x7c
++};
++
++/* chroma quantization table */
++static const u8 ci_isp_uv_qtable60_per_cent[] = {
++ 0x15, 0x17, 0x17, 0x1e, 0x1a, 0x1e, 0x3b, 0x21,
++ 0x21, 0x3b, 0x7c, 0x53, 0x46, 0x53, 0x7c, 0x7c,
++ 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c,
++ 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c,
++ 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c,
++ 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c,
++ 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c,
++ 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c
++};
++
++/* 70% */
++
++/* luma quantization table */
++static const u8 ci_isp_yq_table70_per_cent[] = {
++ 0x1b, 0x12, 0x14, 0x17, 0x14, 0x11, 0x1b, 0x17,
++ 0x16, 0x17, 0x1e, 0x1c, 0x1b, 0x20, 0x28, 0x42,
++ 0x2b, 0x28, 0x25, 0x25, 0x28, 0x51, 0x3a, 0x3d,
++ 0x30, 0x42, 0x60, 0x55, 0x65, 0x64, 0x5f, 0x55,
++ 0x5d, 0x5b, 0x6a, 0x78, 0x99, 0x81, 0x6a, 0x71,
++ 0x90, 0x73, 0x5b, 0x5d, 0x85, 0xb5, 0x86, 0x90,
++ 0x9e, 0xa3, 0xab, 0xad, 0xab, 0x67, 0x80, 0xbc,
++ 0xc9, 0xba, 0xa6, 0xc7, 0x99, 0xa8, 0xab, 0xa4
++};
++
++/* chroma quantization table */
++static const u8 ci_isp_uv_qtable70_per_cent[] = {
++ 0x1c, 0x1e, 0x1e, 0x28, 0x23, 0x28, 0x4e, 0x2b,
++ 0x2b, 0x4e, 0xa4, 0x6e, 0x5d, 0x6e, 0xa4, 0xa4,
++ 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4,
++ 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4,
++ 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4,
++ 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4,
++ 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4,
++ 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4
++};
++
++/* 80% */
++
++/* luma quantization table */
++static const u8 ci_isp_yq_table80_per_cent[] = {
++ 0x28, 0x1c, 0x1e, 0x23, 0x1e, 0x19, 0x28, 0x23,
++ 0x21, 0x23, 0x2d, 0x2b, 0x28, 0x30, 0x3c, 0x64,
++ 0x41, 0x3c, 0x37, 0x37, 0x3c, 0x7b, 0x58, 0x5d,
++ 0x49, 0x64, 0x91, 0x80, 0x99, 0x96, 0x8f, 0x80,
++ 0x8c, 0x8a, 0xa0, 0xb4, 0xe6, 0xc3, 0xa0, 0xaa,
++ 0xda, 0xad, 0x8a, 0x8c, 0xc8, 0xff, 0xcb, 0xda,
++ 0xee, 0xf5, 0xff, 0xff, 0xff, 0x9b, 0xc1, 0xff,
++ 0xff, 0xff, 0xfa, 0xff, 0xe6, 0xfd, 0xff, 0xf8
++};
++
++/* chroma quantization table */
++static const u8 ci_isp_uv_qtable80_per_cent[] = {
++ 0x2b, 0x2d, 0x2d, 0x3c, 0x35, 0x3c, 0x76, 0x41,
++ 0x41, 0x76, 0xf8, 0xa5, 0x8c, 0xa5, 0xf8, 0xf8,
++ 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8,
++ 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8,
++ 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8,
++ 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8,
++ 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8,
++ 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8
++};
++
++/* 90% */
++
++/* luma quantization table */
++static const u8 ci_isp_yq_table90_per_cent[] = {
++ 0x50, 0x37, 0x3c, 0x46, 0x3c, 0x32, 0x50, 0x46,
++ 0x41, 0x46, 0x5a, 0x55, 0x50, 0x5f, 0x78, 0xc8,
++ 0x82, 0x78, 0x6e, 0x6e, 0x78, 0xf5, 0xaf, 0xb9,
++ 0x91, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
++};
++
++/* chroma quantization table */
++static const u8 ci_isp_uv_qtable90_per_cent[] = {
++ 0x55, 0x5a, 0x5a, 0x78, 0x69, 0x78, 0xeb, 0x82,
++ 0x82, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
++};
++
++/* 99% */
++
++/* luma quantization table */
++static const u8 ci_isp_yq_table99_per_cent[] = {
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
++};
++
++/* chroma quantization table */
++static const u8 ci_isp_uv_qtable99_per_cent[] = {
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
++};
++
++int ci_isp_wait_for_vsyncHelper(void);
++void ci_isp_jpe_set_tables(u8 compression_ratio);
++void ci_isp_jpe_select_tables(void);
++void ci_isp_jpe_set_config(u16 hsize, u16 vsize, int jpe_scale);
++int ci_isp_jpe_generate_header(struct mrst_isp_device *intel, u8 header_mode);
++void ci_isp_jpe_prep_enc(enum ci_isp_jpe_enc_mode jpe_enc_mode);
++int ci_isp_jpe_wait_for_header_gen_done(struct mrst_isp_device *intel);
++int ci_isp_jpe_wait_for_encode_done(struct mrst_isp_device *intel);
++
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstisp/include/mrstisp_reg.h
+@@ -0,0 +1,4698 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#ifndef _MRV_PRIV_H
++#define _MRV_PRIV_H
++
++
++#define MRV_ISP_GAMMA_R_Y_ARR_SIZE 17
++#define MRV_ISP_GAMMA_G_Y_ARR_SIZE 17
++#define MRV_ISP_GAMMA_B_Y_ARR_SIZE 17
++#define MRV_ISP_CT_COEFF_ARR_SIZE 9
++#define MRV_ISP_GAMMA_OUT_Y_ARR_SIZE 17
++#define MRV_ISP_BP_NEW_TABLE_ARR_SIZE 8
++#define MRV_ISP_HIST_BIN_ARR_SIZE 16
++
++struct isp_register {
++ u32 vi_ccl;
++ u32 vi_custom_reg1;
++ u32 vi_id;
++ u32 vi_custom_reg2;
++ u32 vi_iccl;
++ u32 vi_ircl;
++ u32 vi_dpcl;
++
++ u32 notused_mrvbase1;
++
++
++ u32 notused_mrvbase2[(0x200 - 0x20) / 4];
++
++ u32 img_eff_ctrl;
++ u32 img_eff_color_sel;
++ u32 img_eff_mat_1;
++ u32 img_eff_mat_2;
++ u32 img_eff_mat_3;
++ u32 img_eff_mat_4;
++ u32 img_eff_mat_5;
++ u32 img_eff_tint;
++ u32 img_eff_ctrl_shd;
++ u32 notused_imgeff[(0x300 - 0x224) / 4];
++
++
++ u32 super_imp_ctrl;
++ u32 super_imp_offset_x;
++ u32 super_imp_offset_y;
++ u32 super_imp_color_y;
++ u32 super_imp_color_cb;
++ u32 super_imp_color_cr;
++ u32 notused_simp[(0x400 - 0x318) / 4];
++
++ u32 isp_ctrl;
++ u32 isp_acq_prop;
++ u32 isp_acq_h_offs;
++ u32 isp_acq_v_offs;
++ u32 isp_acq_h_size;
++ u32 isp_acq_v_size;
++ u32 isp_acq_nr_frames;
++ u32 isp_gamma_dx_lo;
++ u32 isp_gamma_dx_hi;
++ u32 isp_gamma_r_y[MRV_ISP_GAMMA_R_Y_ARR_SIZE];
++ u32 isp_gamma_g_y[MRV_ISP_GAMMA_G_Y_ARR_SIZE];
++ u32 isp_gamma_b_y[MRV_ISP_GAMMA_B_Y_ARR_SIZE];
++
++
++ u32 notused_ispbls1[(0x510 - 0x4F0) / 4];
++
++ u32 isp_awb_prop;
++ u32 isp_awb_h_offs;
++ u32 isp_awb_v_offs;
++ u32 isp_awb_h_size;
++ u32 isp_awb_v_size;
++ u32 isp_awb_frames;
++ u32 isp_awb_ref;
++ u32 isp_awb_thresh;
++
++ u32 notused_ispawb2[(0x538-0x530)/4];
++
++ u32 isp_awb_gain_g;
++ u32 isp_awb_gain_rb;
++
++ u32 isp_awb_white_cnt;
++ u32 isp_awb_mean;
++
++ u32 notused_ispae[(0x570 - 0x548) / 4];
++ u32 isp_cc_coeff_0;
++ u32 isp_cc_coeff_1;
++ u32 isp_cc_coeff_2;
++ u32 isp_cc_coeff_3;
++ u32 isp_cc_coeff_4;
++ u32 isp_cc_coeff_5;
++ u32 isp_cc_coeff_6;
++ u32 isp_cc_coeff_7;
++ u32 isp_cc_coeff_8;
++
++ u32 isp_out_h_offs;
++ u32 isp_out_v_offs;
++ u32 isp_out_h_size;
++ u32 isp_out_v_size;
++
++
++ u32 isp_demosaic;
++ u32 isp_flags_shd;
++
++ u32 isp_out_h_offs_shd;
++ u32 isp_out_v_offs_shd;
++ u32 isp_out_h_size_shd;
++ u32 isp_out_v_size_shd;
++
++
++ u32 isp_imsc;
++ u32 isp_ris;
++ u32 isp_mis;
++ u32 isp_icr;
++ u32 isp_isr;
++
++ u32 isp_ct_coeff[MRV_ISP_CT_COEFF_ARR_SIZE];
++
++ u32 isp_gamma_out_mode;
++ u32 isp_gamma_out_y[MRV_ISP_GAMMA_OUT_Y_ARR_SIZE];
++
++
++ u32 isp_err;
++ u32 isp_err_clr;
++
++
++ u32 isp_frame_count;
++
++ u32 isp_ct_offset_r;
++ u32 isp_ct_offset_g;
++ u32 isp_ct_offset_b;
++ u32 notused_ispctoffs[(0x660 - 0x654) / 4];
++
++
++ u32 isp_flash_cmd;
++ u32 isp_flash_config;
++ u32 isp_flash_prediv;
++ u32 isp_flash_delay;
++ u32 isp_flash_time;
++ u32 isp_flash_maxp;
++ u32 notused_ispflash[(0x680 - 0x678) / 4];
++
++
++ u32 isp_sh_ctrl;
++ u32 isp_sh_prediv;
++ u32 isp_sh_delay;
++ u32 isp_sh_time;
++ u32 notused_ispsh[(0x800 - 0x690) / 4];
++
++ u32 c_proc_ctrl;
++ u32 c_proc_contrast;
++ u32 c_proc_brightness;
++ u32 c_proc_saturation;
++ u32 c_proc_hue;
++ u32 notused_cproc[(0xC00 - 0x814) / 4];
++
++ u32 mrsz_ctrl;
++ u32 mrsz_scale_hy;
++ u32 mrsz_scale_hcb;
++ u32 mrsz_scale_hcr;
++ u32 mrsz_scale_vy;
++ u32 mrsz_scale_vc;
++ u32 mrsz_phase_hy;
++ u32 mrsz_phase_hc;
++ u32 mrsz_phase_vy;
++ u32 mrsz_phase_vc;
++ u32 mrsz_scale_lut_addr;
++ u32 mrsz_scale_lut;
++ u32 mrsz_ctrl_shd;
++ u32 mrsz_scale_hy_shd;
++ u32 mrsz_scale_hcb_shd;
++ u32 mrsz_scale_hcr_shd;
++ u32 mrsz_scale_vy_shd;
++ u32 mrsz_scale_vc_shd;
++ u32 mrsz_phase_hy_shd;
++ u32 mrsz_phase_hc_shd;
++ u32 mrsz_phase_vy_shd;
++ u32 mrsz_phase_vc_shd;
++ u32 notused_mrsz[(0x1000 - 0x0C58) / 4];
++
++ u32 srsz_ctrl;
++ u32 srsz_scale_hy;
++ u32 srsz_scale_hcb;
++ u32 srsz_scale_hcr;
++ u32 srsz_scale_vy;
++ u32 srsz_scale_vc;
++ u32 srsz_phase_hy;
++ u32 srsz_phase_hc;
++ u32 srsz_phase_vy;
++ u32 srsz_phase_vc;
++ u32 srsz_scale_lut_addr;
++ u32 srsz_scale_lut;
++ u32 srsz_ctrl_shd;
++ u32 srsz_scale_hy_shd;
++ u32 srsz_scale_hcb_shd;
++ u32 srsz_scale_hcr_shd;
++ u32 srsz_scale_vy_shd;
++ u32 srsz_scale_vc_shd;
++ u32 srsz_phase_hy_shd;
++ u32 srsz_phase_hc_shd;
++ u32 srsz_phase_vy_shd;
++ u32 srsz_phase_vc_shd;
++ u32 notused_srsz[(0x1400 - 0x1058) / 4];
++
++ u32 mi_ctrl;
++ u32 mi_init;
++ u32 mi_mp_y_base_ad_init;
++ u32 mi_mp_y_size_init;
++ u32 mi_mp_y_offs_cnt_init;
++ u32 mi_mp_y_offs_cnt_start;
++ u32 mi_mp_y_irq_offs_init;
++ u32 mi_mp_cb_base_ad_init;
++ u32 mi_mp_cb_size_init;
++ u32 mi_mp_cb_offs_cnt_init;
++ u32 mi_mp_cb_offs_cnt_start;
++ u32 mi_mp_cr_base_ad_init;
++ u32 mi_mp_cr_size_init;
++ u32 mi_mp_cr_offs_cnt_init;
++ u32 mi_mp_cr_offs_cnt_start;
++ u32 mi_sp_y_base_ad_init;
++ u32 mi_sp_y_size_init;
++ u32 mi_sp_y_offs_cnt_init;
++ u32 mi_sp_y_offs_cnt_start;
++ u32 mi_sp_y_llength;
++ u32 mi_sp_cb_base_ad_init;
++ u32 mi_sp_cb_size_init;
++ u32 mi_sp_cb_offs_cnt_init;
++ u32 mi_sp_cb_offs_cnt_start;
++ u32 mi_sp_cr_base_ad_init;
++ u32 mi_sp_cr_size_init;
++ u32 mi_sp_cr_offs_cnt_init;
++ u32 mi_sp_cr_offs_cnt_start;
++ u32 mi_byte_cnt;
++ u32 mi_ctrl_shd;
++ u32 mi_mp_y_base_ad_shd;
++ u32 mi_mp_y_size_shd;
++ u32 mi_mp_y_offs_cnt_shd;
++ u32 mi_mp_y_irq_offs_shd;
++ u32 mi_mp_cb_base_ad_shd;
++ u32 mi_mp_cb_size_shd;
++ u32 mi_mp_cb_offs_cnt_shd;
++ u32 mi_mp_cr_base_ad_shd;
++ u32 mi_mp_cr_size_shd;
++ u32 mi_mp_cr_offs_cnt_shd;
++ u32 mi_sp_y_base_ad_shd;
++ u32 mi_sp_y_size_shd;
++ u32 mi_sp_y_offs_cnt_shd;
++
++ u32 notused_mi1;
++
++ u32 mi_sp_cb_base_ad_shd;
++ u32 mi_sp_cb_size_shd;
++ u32 mi_sp_cb_offs_cnt_shd;
++ u32 mi_sp_cr_base_ad_shd;
++ u32 mi_sp_cr_size_shd;
++ u32 mi_sp_cr_offs_cnt_shd;
++ u32 mi_dma_y_pic_start_ad;
++ u32 mi_dma_y_pic_width;
++ u32 mi_dma_y_llength;
++ u32 mi_dma_y_pic_size;
++ u32 mi_dma_cb_pic_start_ad;
++ u32 notused_mi2[(0x14E8 - 0x14DC) / 4];
++ u32 mi_dma_cr_pic_start_ad;
++ u32 notused_mi3[(0x14F8 - 0x14EC) / 4];
++ u32 mi_imsc;
++ u32 mi_ris;
++ u32 mi_mis;
++ u32 mi_icr;
++ u32 mi_isr;
++ u32 mi_status;
++ u32 mi_status_clr;
++ u32 mi_sp_y_pic_width;
++ u32 mi_sp_y_pic_height;
++ u32 mi_sp_y_pic_size;
++ u32 mi_dma_ctrl;
++ u32 mi_dma_start;
++ u32 mi_dma_status;
++ u32 notused_mi6[(0x1800 - 0x152C) / 4];
++ u32 jpe_gen_header;
++ u32 jpe_encode;
++
++ u32 jpe_init;
++
++ u32 jpe_y_scale_en;
++ u32 jpe_cbcr_scale_en;
++ u32 jpe_table_flush;
++ u32 jpe_enc_hsize;
++ u32 jpe_enc_vsize;
++ u32 jpe_pic_format;
++ u32 jpe_restart_interval;
++ u32 jpe_tq_y_select;
++ u32 jpe_tq_u_select;
++ u32 jpe_tq_v_select;
++ u32 jpe_dc_table_select;
++ u32 jpe_ac_table_select;
++ u32 jpe_table_data;
++ u32 jpe_table_id;
++ u32 jpe_tac0_len;
++ u32 jpe_tdc0_len;
++ u32 jpe_tac1_len;
++ u32 jpe_tdc1_len;
++ u32 notused_jpe2;
++ u32 jpe_encoder_busy;
++ u32 jpe_header_mode;
++ u32 jpe_encode_mode;
++ u32 jpe_debug;
++ u32 jpe_error_imr;
++ u32 jpe_error_ris;
++ u32 jpe_error_mis;
++ u32 jpe_error_icr;
++ u32 jpe_error_isr;
++ u32 jpe_status_imr;
++ u32 jpe_status_ris;
++ u32 jpe_status_mis;
++ u32 jpe_status_icr;
++ u32 jpe_status_isr;
++ u32 notused_jpe3[(0x1A00 - 0x1890) / 4];
++
++ u32 smia_ctrl;
++ u32 smia_status;
++ u32 smia_imsc;
++ u32 smia_ris;
++ u32 smia_mis;
++ u32 smia_icr;
++ u32 smia_isr;
++ u32 smia_data_format_sel;
++ u32 smia_sof_emb_data_lines;
++
++ u32 smia_emb_hstart;
++ u32 smia_emb_hsize;
++ u32 smia_emb_vstart;
++
++ u32 smia_num_lines;
++ u32 smia_emb_data_fifo;
++
++ u32 smia_fifo_fill_level;
++ u32 notused_smia2[(0x1A40 - 0x1A3C) / 4];
++
++ u32 notused_smia3[(0x1A60 - 0x1A40) / 4];
++ u32 notused_smia4[(0x1C00 - 0x1A60) / 4];
++
++
++ u32 mipi_ctrl;
++ u32 mipi_status;
++ u32 mipi_imsc;
++ u32 mipi_ris;
++ u32 mipi_mis;
++ u32 mipi_icr;
++ u32 mipi_isr;
++ u32 mipi_cur_data_id;
++ u32 mipi_img_data_sel;
++ u32 mipi_add_data_sel_1;
++ u32 mipi_add_data_sel_2;
++ u32 mipi_add_data_sel_3;
++ u32 mipi_add_data_sel_4;
++ u32 mipi_add_data_fifo;
++ u32 mipi_add_data_fill_level;
++ u32 notused_mipi[(0x2000 - 0x1C3C) / 4];
++
++
++ u32 isp_afm_ctrl;
++ u32 isp_afm_lt_a;
++ u32 isp_afm_rb_a;
++ u32 isp_afm_lt_b;
++ u32 isp_afm_rb_b;
++ u32 isp_afm_lt_c;
++ u32 isp_afm_rb_c;
++ u32 isp_afm_thres;
++ u32 isp_afm_var_shift;
++ u32 isp_afm_sum_a;
++ u32 isp_afm_sum_b;
++ u32 isp_afm_sum_c;
++ u32 isp_afm_lum_a;
++ u32 isp_afm_lum_b;
++ u32 isp_afm_lum_c;
++ u32 notused_ispafm[(0x2100 - 0x203C) / 4];
++
++
++ u32 isp_bp_ctrl;
++ u32 isp_bp_cfg1;
++ u32 isp_bp_cfg2;
++ u32 isp_bp_number;
++ u32 isp_bp_table_addr;
++ u32 isp_bp_table_data;
++ u32 isp_bp_new_number;
++ u32 isp_bp_new_table[MRV_ISP_BP_NEW_TABLE_ARR_SIZE];
++
++ u32 notused_ispbp[(0x2200 - 0x213C) / 4];
++
++
++ u32 isp_lsc_ctrl;
++ u32 isp_lsc_r_table_addr;
++ u32 isp_lsc_g_table_addr;
++ u32 isp_lsc_b_table_addr;
++ u32 isp_lsc_r_table_data;
++ u32 isp_lsc_g_table_data;
++ u32 isp_lsc_b_table_data;
++ u32 notused_isplsc1;
++ u32 isp_lsc_xgrad_01;
++ u32 isp_lsc_xgrad_23;
++ u32 isp_lsc_xgrad_45;
++ u32 isp_lsc_xgrad_67;
++ u32 isp_lsc_ygrad_01;
++ u32 isp_lsc_ygrad_23;
++ u32 isp_lsc_ygrad_45;
++ u32 isp_lsc_ygrad_67;
++ u32 isp_lsc_xsize_01;
++ u32 isp_lsc_xsize_23;
++ u32 isp_lsc_xsize_45;
++ u32 isp_lsc_xsize_67;
++ u32 isp_lsc_ysize_01;
++ u32 isp_lsc_ysize_23;
++ u32 isp_lsc_ysize_45;
++ u32 isp_lsc_ysize_67;
++ u32 notused_isplsc2[(0x2300 - 0x2260) / 4];
++
++
++ u32 isp_is_ctrl;
++ u32 isp_is_recenter;
++
++ u32 isp_is_h_offs;
++ u32 isp_is_v_offs;
++ u32 isp_is_h_size;
++ u32 isp_is_v_size;
++
++ u32 isp_is_max_dx;
++ u32 isp_is_max_dy;
++ u32 isp_is_displace;
++
++ u32 isp_is_h_offs_shd;
++ u32 isp_is_v_offs_shd;
++ u32 isp_is_h_size_shd;
++ u32 isp_is_v_size_shd;
++ u32 notused_ispis4[(0x2400 - 0x2334) / 4];
++
++ u32 isp_hist_prop;
++ u32 isp_hist_h_offs;
++ u32 isp_hist_v_offs;
++ u32 isp_hist_h_size;
++ u32 isp_hist_v_size;
++ u32 isp_hist_bin[MRV_ISP_HIST_BIN_ARR_SIZE];
++ u32 notused_isphist[(0x2500-0x2454)/4];
++
++ u32 isp_filt_mode;
++ u32 _notused_28[(0x2528 - 0x2504) / 4];
++ u32 isp_filt_thresh_bl0;
++ u32 isp_filt_thresh_bl1;
++ u32 isp_filt_thresh_sh0;
++ u32 isp_filt_thresh_sh1;
++ u32 isp_filt_lum_weight;
++ u32 isp_filt_fac_sh1;
++ u32 isp_filt_fac_sh0;
++ u32 isp_filt_fac_mid;
++ u32 isp_filt_fac_bl0;
++ u32 isp_filt_fac_bl1;
++ u32 notused_ispfilt[(0x2580 - 0x2550) / 4];
++
++ u32 notused_ispcac[(0x2600 - 0x2580) / 4];
++
++ u32 isp_exp_ctrl;
++ u32 isp_exp_h_offset;
++ u32 isp_exp_v_offset;
++ u32 isp_exp_h_size;
++ u32 isp_exp_v_size;
++ u32 isp_exp_mean_00;
++ u32 isp_exp_mean_10;
++ u32 isp_exp_mean_20;
++ u32 isp_exp_mean_30;
++ u32 isp_exp_mean_40;
++ u32 isp_exp_mean_01;
++ u32 isp_exp_mean_11;
++ u32 isp_exp_mean_21;
++ u32 isp_exp_mean_31;
++ u32 isp_exp_mean_41;
++ u32 isp_exp_mean_02;
++ u32 isp_exp_mean_12;
++ u32 isp_exp_mean_22;
++ u32 isp_exp_mean_32;
++ u32 isp_exp_mean_42;
++ u32 isp_exp_mean_03;
++ u32 isp_exp_mean_13;
++ u32 isp_exp_mean_23;
++ u32 isp_exp_mean_33;
++ u32 isp_exp_mean_43;
++ u32 isp_exp_mean_04;
++ u32 isp_exp_mean_14;
++ u32 isp_exp_mean_24;
++ u32 isp_exp_mean_34;
++ u32 isp_exp_mean_44;
++ u32 notused_ispexp[(0x2700 - 0x2678) / 4];
++
++ u32 isp_bls_ctrl;
++ u32 isp_bls_samples;
++ u32 isp_bls_h1_start;
++ u32 isp_bls_h1_stop;
++ u32 isp_bls_v1_start;
++ u32 isp_bls_v1_stop;
++ u32 isp_bls_h2_start;
++ u32 isp_bls_h2_stop;
++ u32 isp_bls_v2_start;
++ u32 isp_bls_v2_stop;
++ u32 isp_bls_a_fixed;
++ u32 isp_bls_b_fixed;
++ u32 isp_bls_c_fixed;
++ u32 isp_bls_d_fixed;
++ u32 isp_bls_a_measured;
++ u32 isp_bls_b_measured;
++ u32 isp_bls_c_measured;
++ u32 isp_bls_d_measured;
++ u32 notused_ispbls2[(0x2800 - 0x2748) / 4];
++
++
++};
++
++
++
++
++
++
++
++#define MRV_VI_CCLFDIS
++#define MRV_VI_CCLFDIS_MASK 0x00000004
++#define MRV_VI_CCLFDIS_SHIFT 2
++#define MRV_VI_CCLFDIS_ENABLE 0
++#define MRV_VI_CCLFDIS_DISABLE 1
++
++#define MRV_VI_CCLDISS
++#define MRV_VI_CCLDISS_MASK 0x00000002
++#define MRV_VI_CCLDISS_SHIFT 1
++
++#define MRV_REV_ID
++#define MRV_REV_ID_MASK 0xFFFFFFFF
++#define MRV_REV_ID_SHIFT 0
++
++#define MRV_VI_MIPI_CLK_ENABLE
++#define MRV_VI_MIPI_CLK_ENABLE_MASK 0x00000800
++#define MRV_VI_MIPI_CLK_ENABLE_SHIFT 11
++
++
++#define MRV_VI_SMIA_CLK_ENABLE
++#define MRV_VI_SMIA_CLK_ENABLE_MASK 0x00000400
++#define MRV_VI_SMIA_CLK_ENABLE_SHIFT 10
++#define MRV_VI_SIMP_CLK_ENABLE
++#define MRV_VI_SIMP_CLK_ENABLE_MASK 0x00000200
++#define MRV_VI_SIMP_CLK_ENABLE_SHIFT 9
++
++#define MRV_VI_IE_CLK_ENABLE
++#define MRV_VI_IE_CLK_ENABLE_MASK 0x00000100
++#define MRV_VI_IE_CLK_ENABLE_SHIFT 8
++
++#define MRV_VI_EMP_CLK_ENABLE_MASK 0
++#define MRV_VI_MI_CLK_ENABLE
++#define MRV_VI_MI_CLK_ENABLE_MASK 0x00000040
++#define MRV_VI_MI_CLK_ENABLE_SHIFT 6
++
++#define MRV_VI_JPEG_CLK_ENABLE
++#define MRV_VI_JPEG_CLK_ENABLE_MASK 0x00000020
++#define MRV_VI_JPEG_CLK_ENABLE_SHIFT 5
++#define MRV_VI_SRSZ_CLK_ENABLE
++#define MRV_VI_SRSZ_CLK_ENABLE_MASK 0x00000010
++#define MRV_VI_SRSZ_CLK_ENABLE_SHIFT 4
++
++#define MRV_VI_MRSZ_CLK_ENABLE
++#define MRV_VI_MRSZ_CLK_ENABLE_MASK 0x00000008
++#define MRV_VI_MRSZ_CLK_ENABLE_SHIFT 3
++#define MRV_VI_CP_CLK_ENABLE
++#define MRV_VI_CP_CLK_ENABLE_MASK 0x00000002
++#define MRV_VI_CP_CLK_ENABLE_SHIFT 1
++#define MRV_VI_ISP_CLK_ENABLE
++#define MRV_VI_ISP_CLK_ENABLE_MASK 0x00000001
++#define MRV_VI_ISP_CLK_ENABLE_SHIFT 0
++
++
++#define MRV_VI_ALL_CLK_ENABLE
++#define MRV_VI_ALL_CLK_ENABLE_MASK \
++(0 \
++| MRV_VI_MIPI_CLK_ENABLE_MASK \
++| MRV_VI_SMIA_CLK_ENABLE_MASK \
++| MRV_VI_SIMP_CLK_ENABLE_MASK \
++| MRV_VI_IE_CLK_ENABLE_MASK \
++| MRV_VI_EMP_CLK_ENABLE_MASK \
++| MRV_VI_MI_CLK_ENABLE_MASK \
++| MRV_VI_JPEG_CLK_ENABLE_MASK \
++| MRV_VI_SRSZ_CLK_ENABLE_MASK \
++| MRV_VI_MRSZ_CLK_ENABLE_MASK \
++| MRV_VI_CP_CLK_ENABLE_MASK \
++| MRV_VI_ISP_CLK_ENABLE_MASK \
++)
++#define MRV_VI_ALL_CLK_ENABLE_SHIFT 0
++
++#define MRV_VI_MIPI_SOFT_RST
++#define MRV_VI_MIPI_SOFT_RST_MASK 0x00000800
++#define MRV_VI_MIPI_SOFT_RST_SHIFT 11
++
++#define MRV_VI_SMIA_SOFT_RST
++#define MRV_VI_SMIA_SOFT_RST_MASK 0x00000400
++#define MRV_VI_SMIA_SOFT_RST_SHIFT 10
++#define MRV_VI_SIMP_SOFT_RST
++#define MRV_VI_SIMP_SOFT_RST_MASK 0x00000200
++#define MRV_VI_SIMP_SOFT_RST_SHIFT 9
++
++#define MRV_VI_IE_SOFT_RST
++#define MRV_VI_IE_SOFT_RST_MASK 0x00000100
++#define MRV_VI_IE_SOFT_RST_SHIFT 8
++#define MRV_VI_MARVIN_RST
++#define MRV_VI_MARVIN_RST_MASK 0x00000080
++#define MRV_VI_MARVIN_RST_SHIFT 7
++
++#define MRV_VI_EMP_SOFT_RST_MASK 0
++#define MRV_VI_MI_SOFT_RST
++#define MRV_VI_MI_SOFT_RST_MASK 0x00000040
++#define MRV_VI_MI_SOFT_RST_SHIFT 6
++
++#define MRV_VI_JPEG_SOFT_RST
++#define MRV_VI_JPEG_SOFT_RST_MASK 0x00000020
++#define MRV_VI_JPEG_SOFT_RST_SHIFT 5
++#define MRV_VI_SRSZ_SOFT_RST
++#define MRV_VI_SRSZ_SOFT_RST_MASK 0x00000010
++#define MRV_VI_SRSZ_SOFT_RST_SHIFT 4
++
++#define MRV_VI_MRSZ_SOFT_RST
++#define MRV_VI_MRSZ_SOFT_RST_MASK 0x00000008
++#define MRV_VI_MRSZ_SOFT_RST_SHIFT 3
++#define MRV_VI_YCS_SOFT_RST
++#define MRV_VI_YCS_SOFT_RST_MASK 0x00000004
++#define MRV_VI_YCS_SOFT_RST_SHIFT 2
++#define MRV_VI_CP_SOFT_RST
++#define MRV_VI_CP_SOFT_RST_MASK 0x00000002
++#define MRV_VI_CP_SOFT_RST_SHIFT 1
++#define MRV_VI_ISP_SOFT_RST
++#define MRV_VI_ISP_SOFT_RST_MASK 0x00000001
++#define MRV_VI_ISP_SOFT_RST_SHIFT 0
++
++#define MRV_VI_ALL_SOFT_RST
++#define MRV_VI_ALL_SOFT_RST_MASK \
++(0 \
++| MRV_VI_MIPI_SOFT_RST_MASK \
++| MRV_VI_SMIA_SOFT_RST_MASK \
++| MRV_VI_SIMP_SOFT_RST_MASK \
++| MRV_VI_IE_SOFT_RST_MASK \
++| MRV_VI_EMP_SOFT_RST_MASK \
++| MRV_VI_MI_SOFT_RST_MASK \
++| MRV_VI_JPEG_SOFT_RST_MASK \
++| MRV_VI_SRSZ_SOFT_RST_MASK \
++| MRV_VI_MRSZ_SOFT_RST_MASK \
++| MRV_VI_YCS_SOFT_RST_MASK \
++| MRV_VI_CP_SOFT_RST_MASK \
++| MRV_VI_ISP_SOFT_RST_MASK \
++)
++#define MRV_VI_ALL_SOFT_RST_SHIFT 0
++
++
++#define MRV_VI_DMA_SPMUX
++#define MRV_VI_DMA_SPMUX_MASK 0x00000800
++#define MRV_VI_DMA_SPMUX_SHIFT 11
++#define MRV_VI_DMA_SPMUX_CAM 0
++#define MRV_VI_DMA_SPMUX_DMA 1
++#define MRV_VI_DMA_IEMUX
++#define MRV_VI_DMA_IEMUX_MASK 0x00000400
++#define MRV_VI_DMA_IEMUX_SHIFT 10
++#define MRV_VI_DMA_IEMUX_CAM 0
++#define MRV_VI_DMA_IEMUX_DMA 1
++#define MRV_IF_SELECT
++#define MRV_IF_SELECT_MASK 0x00000300
++#define MRV_IF_SELECT_SHIFT 8
++#define MRV_IF_SELECT_PAR 0
++#define MRV_IF_SELECT_SMIA 1
++#define MRV_IF_SELECT_MIPI 2
++#define MRV_VI_DMA_SWITCH
++#define MRV_VI_DMA_SWITCH_MASK 0x00000070
++#define MRV_VI_DMA_SWITCH_SHIFT 4
++#define MRV_VI_DMA_SWITCH_SELF 0
++#define MRV_VI_DMA_SWITCH_SI 1
++#define MRV_VI_DMA_SWITCH_IE 2
++#define MRV_VI_DMA_SWITCH_JPG 3
++#define MRV_VI_CHAN_MODE
++#define MRV_VI_CHAN_MODE_MASK 0x0000000C
++#define MRV_VI_CHAN_MODE_SHIFT 2
++
++#define MRV_VI_CHAN_MODE_OFF 0x00
++#define MRV_VI_CHAN_MODE_Y 0xFF
++#define MRV_VI_CHAN_MODE_MP_RAW 0x01
++#define MRV_VI_CHAN_MODE_MP 0x01
++#define MRV_VI_CHAN_MODE_SP 0x02
++#define MRV_VI_CHAN_MODE_MP_SP 0x03
++
++#define MRV_VI_MP_MUX
++#define MRV_VI_MP_MUX_MASK 0x00000003
++#define MRV_VI_MP_MUX_SHIFT 0
++
++#define MRV_VI_MP_MUX_JPGDIRECT 0x00
++#define MRV_VI_MP_MUX_MP 0x01
++#define MRV_VI_MP_MUX_RAW 0x01
++#define MRV_VI_MP_MUX_JPEG 0x02
++
++
++
++
++#define MRV_IMGEFF_CFG_UPD
++#define MRV_IMGEFF_CFG_UPD_MASK 0x00000010
++#define MRV_IMGEFF_CFG_UPD_SHIFT 4
++#define MRV_IMGEFF_EFFECT_MODE
++#define MRV_IMGEFF_EFFECT_MODE_MASK 0x0000000E
++#define MRV_IMGEFF_EFFECT_MODE_SHIFT 1
++#define MRV_IMGEFF_EFFECT_MODE_GRAY 0
++#define MRV_IMGEFF_EFFECT_MODE_NEGATIVE 1
++#define MRV_IMGEFF_EFFECT_MODE_SEPIA 2
++#define MRV_IMGEFF_EFFECT_MODE_COLOR_SEL 3
++#define MRV_IMGEFF_EFFECT_MODE_EMBOSS 4
++#define MRV_IMGEFF_EFFECT_MODE_SKETCH 5
++#define MRV_IMGEFF_BYPASS_MODE
++#define MRV_IMGEFF_BYPASS_MODE_MASK 0x00000001
++#define MRV_IMGEFF_BYPASS_MODE_SHIFT 0
++#define MRV_IMGEFF_BYPASS_MODE_PROCESS 1
++#define MRV_IMGEFF_BYPASS_MODE_BYPASS 0
++
++#define MRV_IMGEFF_COLOR_THRESHOLD
++#define MRV_IMGEFF_COLOR_THRESHOLD_MASK 0x0000FF00
++#define MRV_IMGEFF_COLOR_THRESHOLD_SHIFT 8
++#define MRV_IMGEFF_COLOR_SELECTION
++#define MRV_IMGEFF_COLOR_SELECTION_MASK 0x00000007
++#define MRV_IMGEFF_COLOR_SELECTION_SHIFT 0
++#define MRV_IMGEFF_COLOR_SELECTION_RGB 0
++#define MRV_IMGEFF_COLOR_SELECTION_B 1
++#define MRV_IMGEFF_COLOR_SELECTION_G 2
++#define MRV_IMGEFF_COLOR_SELECTION_BG 3
++#define MRV_IMGEFF_COLOR_SELECTION_R 4
++#define MRV_IMGEFF_COLOR_SELECTION_BR 5
++#define MRV_IMGEFF_COLOR_SELECTION_GR 6
++#define MRV_IMGEFF_COLOR_SELECTION_BGR 7
++
++#define MRV_IMGEFF_EMB_COEF_21_EN
++#define MRV_IMGEFF_EMB_COEF_21_EN_MASK 0x00008000
++#define MRV_IMGEFF_EMB_COEF_21_EN_SHIFT 15
++#define MRV_IMGEFF_EMB_COEF_21
++#define MRV_IMGEFF_EMB_COEF_21_MASK 0x00007000
++#define MRV_IMGEFF_EMB_COEF_21_SHIFT 12
++
++#define MRV_IMGEFF_EMB_COEF_21_4
++#define MRV_IMGEFF_EMB_COEF_21_4_MASK 0x0000F000
++#define MRV_IMGEFF_EMB_COEF_21_4_SHIFT 12
++#define MRV_IMGEFF_EMB_COEF_13_EN
++#define MRV_IMGEFF_EMB_COEF_13_EN_MASK 0x00000800
++#define MRV_IMGEFF_EMB_COEF_13_EN_SHIFT 11
++#define MRV_IMGEFF_EMB_COEF_13
++#define MRV_IMGEFF_EMB_COEF_13_MASK 0x00000700
++#define MRV_IMGEFF_EMB_COEF_13_SHIFT 8
++
++#define MRV_IMGEFF_EMB_COEF_13_4
++#define MRV_IMGEFF_EMB_COEF_13_4_MASK 0x00000F00
++#define MRV_IMGEFF_EMB_COEF_13_4_SHIFT 8
++#define MRV_IMGEFF_EMB_COEF_12_EN
++#define MRV_IMGEFF_EMB_COEF_12_EN_MASK 0x00000080
++#define MRV_IMGEFF_EMB_COEF_12_EN_SHIFT 7
++#define MRV_IMGEFF_EMB_COEF_12
++#define MRV_IMGEFF_EMB_COEF_12_MASK 0x00000070
++#define MRV_IMGEFF_EMB_COEF_12_SHIFT 4
++
++#define MRV_IMGEFF_EMB_COEF_12_4
++#define MRV_IMGEFF_EMB_COEF_12_4_MASK 0x000000F0
++#define MRV_IMGEFF_EMB_COEF_12_4_SHIFT 4
++#define MRV_IMGEFF_EMB_COEF_11_EN
++#define MRV_IMGEFF_EMB_COEF_11_EN_MASK 0x00000008
++#define MRV_IMGEFF_EMB_COEF_11_EN_SHIFT 3
++#define MRV_IMGEFF_EMB_COEF_11
++#define MRV_IMGEFF_EMB_COEF_11_MASK 0x00000007
++#define MRV_IMGEFF_EMB_COEF_11_SHIFT 0
++
++#define MRV_IMGEFF_EMB_COEF_11_4
++#define MRV_IMGEFF_EMB_COEF_11_4_MASK 0x0000000F
++#define MRV_IMGEFF_EMB_COEF_11_4_SHIFT 0
++
++#define MRV_IMGEFF_EMB_COEF_32_EN
++#define MRV_IMGEFF_EMB_COEF_32_EN_MASK 0x00008000
++#define MRV_IMGEFF_EMB_COEF_32_EN_SHIFT 15
++#define MRV_IMGEFF_EMB_COEF_32
++#define MRV_IMGEFF_EMB_COEF_32_MASK 0x00007000
++#define MRV_IMGEFF_EMB_COEF_32_SHIFT 12
++
++#define MRV_IMGEFF_EMB_COEF_32_4
++#define MRV_IMGEFF_EMB_COEF_32_4_MASK 0x0000F000
++#define MRV_IMGEFF_EMB_COEF_32_4_SHIFT 12
++#define MRV_IMGEFF_EMB_COEF_31_EN
++#define MRV_IMGEFF_EMB_COEF_31_EN_MASK 0x00000800
++#define MRV_IMGEFF_EMB_COEF_31_EN_SHIFT 11
++#define MRV_IMGEFF_EMB_COEF_31
++#define MRV_IMGEFF_EMB_COEF_31_MASK 0x00000700
++#define MRV_IMGEFF_EMB_COEF_31_SHIFT 8
++
++#define MRV_IMGEFF_EMB_COEF_31_4
++#define MRV_IMGEFF_EMB_COEF_31_4_MASK 0x00000F00
++#define MRV_IMGEFF_EMB_COEF_31_4_SHIFT 8
++#define MRV_IMGEFF_EMB_COEF_23_EN
++#define MRV_IMGEFF_EMB_COEF_23_EN_MASK 0x00000080
++#define MRV_IMGEFF_EMB_COEF_23_EN_SHIFT 7
++#define MRV_IMGEFF_EMB_COEF_23
++#define MRV_IMGEFF_EMB_COEF_23_MASK 0x00000070
++#define MRV_IMGEFF_EMB_COEF_23_SHIFT 4
++
++#define MRV_IMGEFF_EMB_COEF_23_4
++#define MRV_IMGEFF_EMB_COEF_23_4_MASK 0x000000F0
++#define MRV_IMGEFF_EMB_COEF_23_4_SHIFT 4
++
++#define MRV_IMGEFF_EMB_COEF_22_EN
++#define MRV_IMGEFF_EMB_COEF_22_EN_MASK 0x00000008
++#define MRV_IMGEFF_EMB_COEF_22_EN_SHIFT 3
++#define MRV_IMGEFF_EMB_COEF_22
++#define MRV_IMGEFF_EMB_COEF_22_MASK 0x00000007
++#define MRV_IMGEFF_EMB_COEF_22_SHIFT 0
++
++#define MRV_IMGEFF_EMB_COEF_22_4
++#define MRV_IMGEFF_EMB_COEF_22_4_MASK 0x0000000F
++#define MRV_IMGEFF_EMB_COEF_22_4_SHIFT 0
++
++#define MRV_IMGEFF_SKET_COEF_13_EN
++#define MRV_IMGEFF_SKET_COEF_13_EN_MASK 0x00008000
++#define MRV_IMGEFF_SKET_COEF_13_EN_SHIFT 15
++#define MRV_IMGEFF_SKET_COEF_13
++#define MRV_IMGEFF_SKET_COEF_13_MASK 0x00007000
++#define MRV_IMGEFF_SKET_COEF_13_SHIFT 12
++
++#define MRV_IMGEFF_SKET_COEF_13_4
++#define MRV_IMGEFF_SKET_COEF_13_4_MASK 0x0000F000
++#define MRV_IMGEFF_SKET_COEF_13_4_SHIFT 12
++#define MRV_IMGEFF_SKET_COEF_12_EN
++#define MRV_IMGEFF_SKET_COEF_12_EN_MASK 0x00000800
++#define MRV_IMGEFF_SKET_COEF_12_EN_SHIFT 11
++#define MRV_IMGEFF_SKET_COEF_12
++#define MRV_IMGEFF_SKET_COEF_12_MASK 0x00000700
++#define MRV_IMGEFF_SKET_COEF_12_SHIFT 8
++
++#define MRV_IMGEFF_SKET_COEF_12_4
++#define MRV_IMGEFF_SKET_COEF_12_4_MASK 0x00000F00
++#define MRV_IMGEFF_SKET_COEF_12_4_SHIFT 8
++#define MRV_IMGEFF_SKET_COEF_11_EN
++#define MRV_IMGEFF_SKET_COEF_11_EN_MASK 0x00000080
++#define MRV_IMGEFF_SKET_COEF_11_EN_SHIFT 7
++#define MRV_IMGEFF_SKET_COEF_11
++#define MRV_IMGEFF_SKET_COEF_11_MASK 0x00000070
++#define MRV_IMGEFF_SKET_COEF_11_SHIFT 4
++
++#define MRV_IMGEFF_SKET_COEF_11_4
++#define MRV_IMGEFF_SKET_COEF_11_4_MASK 0x000000F0
++#define MRV_IMGEFF_SKET_COEF_11_4_SHIFT 4
++#define MRV_IMGEFF_EMB_COEF_33_EN
++#define MRV_IMGEFF_EMB_COEF_33_EN_MASK 0x00000008
++#define MRV_IMGEFF_EMB_COEF_33_EN_SHIFT 3
++#define MRV_IMGEFF_EMB_COEF_33
++#define MRV_IMGEFF_EMB_COEF_33_MASK 0x00000007
++#define MRV_IMGEFF_EMB_COEF_33_SHIFT 0
++
++#define MRV_IMGEFF_EMB_COEF_33_4
++#define MRV_IMGEFF_EMB_COEF_33_4_MASK 0x0000000F
++#define MRV_IMGEFF_EMB_COEF_33_4_SHIFT 0
++
++#define MRV_IMGEFF_SKET_COEF_31_EN
++#define MRV_IMGEFF_SKET_COEF_31_EN_MASK 0x00008000
++#define MRV_IMGEFF_SKET_COEF_31_EN_SHIFT 15
++#define MRV_IMGEFF_SKET_COEF_31
++#define MRV_IMGEFF_SKET_COEF_31_MASK 0x00007000
++#define MRV_IMGEFF_SKET_COEF_31_SHIFT 12
++
++#define MRV_IMGEFF_SKET_COEF_31_4
++#define MRV_IMGEFF_SKET_COEF_31_4_MASK 0x0000F000
++#define MRV_IMGEFF_SKET_COEF_31_4_SHIFT 12
++#define MRV_IMGEFF_SKET_COEF_23_EN
++#define MRV_IMGEFF_SKET_COEF_23_EN_MASK 0x00000800
++#define MRV_IMGEFF_SKET_COEF_23_EN_SHIFT 11
++#define MRV_IMGEFF_SKET_COEF_23
++#define MRV_IMGEFF_SKET_COEF_23_MASK 0x00000700
++#define MRV_IMGEFF_SKET_COEF_23_SHIFT 8
++
++#define MRV_IMGEFF_SKET_COEF_23_4
++#define MRV_IMGEFF_SKET_COEF_23_4_MASK 0x00000F00
++#define MRV_IMGEFF_SKET_COEF_23_4_SHIFT 8
++#define MRV_IMGEFF_SKET_COEF_22_EN
++#define MRV_IMGEFF_SKET_COEF_22_EN_MASK 0x00000080
++#define MRV_IMGEFF_SKET_COEF_22_EN_SHIFT 7
++#define MRV_IMGEFF_SKET_COEF_22
++#define MRV_IMGEFF_SKET_COEF_22_MASK 0x00000070
++#define MRV_IMGEFF_SKET_COEF_22_SHIFT 4
++
++#define MRV_IMGEFF_SKET_COEF_22_4
++#define MRV_IMGEFF_SKET_COEF_22_4_MASK 0x000000F0
++#define MRV_IMGEFF_SKET_COEF_22_4_SHIFT 4
++#define MRV_IMGEFF_SKET_COEF_21_EN
++#define MRV_IMGEFF_SKET_COEF_21_EN_MASK 0x00000008
++#define MRV_IMGEFF_SKET_COEF_21_EN_SHIFT 3
++#define MRV_IMGEFF_SKET_COEF_21
++#define MRV_IMGEFF_SKET_COEF_21_MASK 0x00000007
++#define MRV_IMGEFF_SKET_COEF_21_SHIFT 0
++
++#define MRV_IMGEFF_SKET_COEF_21_4
++#define MRV_IMGEFF_SKET_COEF_21_4_MASK 0x0000000F
++#define MRV_IMGEFF_SKET_COEF_21_4_SHIFT 0
++
++#define MRV_IMGEFF_SKET_COEF_33_EN
++#define MRV_IMGEFF_SKET_COEF_33_EN_MASK 0x00000080
++#define MRV_IMGEFF_SKET_COEF_33_EN_SHIFT 7
++#define MRV_IMGEFF_SKET_COEF_33
++#define MRV_IMGEFF_SKET_COEF_33_MASK 0x00000070
++#define MRV_IMGEFF_SKET_COEF_33_SHIFT 4
++
++#define MRV_IMGEFF_SKET_COEF_33_4
++#define MRV_IMGEFF_SKET_COEF_33_4_MASK 0x000000F0
++#define MRV_IMGEFF_SKET_COEF_33_4_SHIFT 4
++#define MRV_IMGEFF_SKET_COEF_32_EN
++#define MRV_IMGEFF_SKET_COEF_32_EN_MASK 0x00000008
++#define MRV_IMGEFF_SKET_COEF_32_EN_SHIFT 3
++#define MRV_IMGEFF_SKET_COEF_32
++#define MRV_IMGEFF_SKET_COEF_32_MASK 0x00000007
++#define MRV_IMGEFF_SKET_COEF_32_SHIFT 0
++
++#define MRV_IMGEFF_SKET_COEF_32_4
++#define MRV_IMGEFF_SKET_COEF_32_4_MASK 0x0000000F
++#define MRV_IMGEFF_SKET_COEF_32_4_SHIFT 0
++
++#define MRV_IMGEFF_INCR_CR
++#define MRV_IMGEFF_INCR_CR_MASK 0x0000FF00
++#define MRV_IMGEFF_INCR_CR_SHIFT 8
++#define MRV_IMGEFF_INCR_CB
++#define MRV_IMGEFF_INCR_CB_MASK 0x000000FF
++#define MRV_IMGEFF_INCR_CB_SHIFT 0
++
++#define MRV_IMGEFF_EFFECT_MODE_SHD
++#define MRV_IMGEFF_EFFECT_MODE_SHD_MASK 0x0000000E
++#define MRV_IMGEFF_EFFECT_MODE_SHD_SHIFT 1
++
++
++#define MRV_SI_TRANSPARENCY_MODE
++#define MRV_SI_TRANSPARENCY_MODE_MASK 0x00000004
++#define MRV_SI_TRANSPARENCY_MODE_SHIFT 2
++#define MRV_SI_TRANSPARENCY_MODE_DISABLED 1
++#define MRV_SI_TRANSPARENCY_MODE_ENABLED 0
++#define MRV_SI_REF_IMAGE
++#define MRV_SI_REF_IMAGE_MASK 0x00000002
++#define MRV_SI_REF_IMAGE_SHIFT 1
++#define MRV_SI_REF_IMAGE_MEM 1
++#define MRV_SI_REF_IMAGE_IE 0
++#define MRV_SI_BYPASS_MODE
++#define MRV_SI_BYPASS_MODE_MASK 0x00000001
++#define MRV_SI_BYPASS_MODE_SHIFT 0
++#define MRV_SI_BYPASS_MODE_BYPASS 0
++#define MRV_SI_BYPASS_MODE_PROCESS 1
++
++#define MRV_SI_OFFSET_X
++#define MRV_SI_OFFSET_X_MASK 0x00001FFE
++#define MRV_SI_OFFSET_X_SHIFT 0
++#define MRV_SI_OFFSET_X_MAX 0x00001FFE
++
++#define MRV_SI_OFFSET_Y
++#define MRV_SI_OFFSET_Y_MASK 0x00000FFF
++#define MRV_SI_OFFSET_Y_SHIFT 0
++#define MRV_SI_OFFSET_Y_MAX 0x00000FFF
++
++#define MRV_SI_Y_COMP
++#define MRV_SI_Y_COMP_MASK 0x000000FF
++#define MRV_SI_Y_COMP_SHIFT 0
++
++#define MRV_SI_CB_COMP
++#define MRV_SI_CB_COMP_MASK 0x000000FF
++#define MRV_SI_CB_COMP_SHIFT 0
++
++#define MRV_SI_CR_COMP
++#define MRV_SI_CR_COMP_MASK 0x000000FF
++#define MRV_SI_CR_COMP_SHIFT 0
++
++#define MRV_ISP_ISP_CSM_C_RANGE
++#define MRV_ISP_ISP_CSM_C_RANGE_MASK 0x00004000
++#define MRV_ISP_ISP_CSM_C_RANGE_SHIFT 14
++#define MRV_ISP_ISP_CSM_C_RANGE_BT601 0
++#define MRV_ISP_ISP_CSM_C_RANGE_FULL 1
++
++#define MRV_ISP_ISP_CSM_Y_RANGE
++#define MRV_ISP_ISP_CSM_Y_RANGE_MASK 0x00002000
++#define MRV_ISP_ISP_CSM_Y_RANGE_SHIFT 13
++#define MRV_ISP_ISP_CSM_Y_RANGE_BT601 0
++#define MRV_ISP_ISP_CSM_Y_RANGE_FULL 1
++#define MRV_ISP_ISP_FLASH_MODE
++#define MRV_ISP_ISP_FLASH_MODE_MASK 0x00001000
++#define MRV_ISP_ISP_FLASH_MODE_SHIFT 12
++#define MRV_ISP_ISP_FLASH_MODE_INDEP 0
++#define MRV_ISP_ISP_FLASH_MODE_SYNC 1
++#define MRV_ISP_ISP_GAMMA_OUT_ENABLE
++#define MRV_ISP_ISP_GAMMA_OUT_ENABLE_MASK 0x00000800
++#define MRV_ISP_ISP_GAMMA_OUT_ENABLE_SHIFT 11
++
++#define MRV_ISP_ISP_GEN_CFG_UPD
++#define MRV_ISP_ISP_GEN_CFG_UPD_MASK 0x00000400
++#define MRV_ISP_ISP_GEN_CFG_UPD_SHIFT 10
++
++#define MRV_ISP_ISP_CFG_UPD
++#define MRV_ISP_ISP_CFG_UPD_MASK 0x00000200
++#define MRV_ISP_ISP_CFG_UPD_SHIFT 9
++
++
++#define MRV_ISP_ISP_AWB_ENABLE
++#define MRV_ISP_ISP_AWB_ENABLE_MASK 0x00000080
++#define MRV_ISP_ISP_AWB_ENABLE_SHIFT 7
++#define MRV_ISP_ISP_GAMMA_IN_ENABLE
++#define MRV_ISP_ISP_GAMMA_IN_ENABLE_MASK 0x00000040
++#define MRV_ISP_ISP_GAMMA_IN_ENABLE_SHIFT 6
++
++#define MRV_ISP_ISP_INFORM_ENABLE
++#define MRV_ISP_ISP_INFORM_ENABLE_MASK 0x00000010
++#define MRV_ISP_ISP_INFORM_ENABLE_SHIFT 4
++#define MRV_ISP_ISP_MODE
++#define MRV_ISP_ISP_MODE_MASK 0x0000000E
++#define MRV_ISP_ISP_MODE_SHIFT 1
++#define MRV_ISP_ISP_MODE_RAW 0
++#define MRV_ISP_ISP_MODE_656 1
++#define MRV_ISP_ISP_MODE_601 2
++#define MRV_ISP_ISP_MODE_RGB 3
++#define MRV_ISP_ISP_MODE_DATA 4
++#define MRV_ISP_ISP_MODE_RGB656 5
++#define MRV_ISP_ISP_MODE_RAW656 6
++#define MRV_ISP_ISP_ENABLE
++#define MRV_ISP_ISP_ENABLE_MASK 0x00000001
++#define MRV_ISP_ISP_ENABLE_SHIFT 0
++
++#define MRV_ISP_INPUT_SELECTION
++#define MRV_ISP_INPUT_SELECTION_MASK 0x00007000
++#define MRV_ISP_INPUT_SELECTION_SHIFT 12
++#define MRV_ISP_INPUT_SELECTION_12EXT 0
++#define MRV_ISP_INPUT_SELECTION_10ZERO 1
++#define MRV_ISP_INPUT_SELECTION_10MSB 2
++#define MRV_ISP_INPUT_SELECTION_8ZERO 3
++#define MRV_ISP_INPUT_SELECTION_8MSB 4
++#define MRV_ISP_FIELD_SELECTION
++#define MRV_ISP_FIELD_SELECTION_MASK 0x00000600
++#define MRV_ISP_FIELD_SELECTION_SHIFT 9
++#define MRV_ISP_FIELD_SELECTION_BOTH 0
++#define MRV_ISP_FIELD_SELECTION_EVEN 1
++#define MRV_ISP_FIELD_SELECTION_ODD 2
++#define MRV_ISP_CCIR_SEQ
++#define MRV_ISP_CCIR_SEQ_MASK 0x00000180
++#define MRV_ISP_CCIR_SEQ_SHIFT 7
++#define MRV_ISP_CCIR_SEQ_YCBYCR 0
++#define MRV_ISP_CCIR_SEQ_YCRYCB 1
++#define MRV_ISP_CCIR_SEQ_CBYCRY 2
++#define MRV_ISP_CCIR_SEQ_CRYCBY 3
++#define MRV_ISP_CONV_422
++#define MRV_ISP_CONV_422_MASK 0x00000060
++#define MRV_ISP_CONV_422_SHIFT 5
++#define MRV_ISP_CONV_422_CO 0
++#define MRV_ISP_CONV_422_INTER 1
++#define MRV_ISP_CONV_422_NONCO 2
++#define MRV_ISP_BAYER_PAT
++#define MRV_ISP_BAYER_PAT_MASK 0x00000018
++#define MRV_ISP_BAYER_PAT_SHIFT 3
++#define MRV_ISP_BAYER_PAT_RG 0
++#define MRV_ISP_BAYER_PAT_GR 1
++#define MRV_ISP_BAYER_PAT_GB 2
++#define MRV_ISP_BAYER_PAT_BG 3
++#define MRV_ISP_VSYNC_POL
++#define MRV_ISP_VSYNC_POL_MASK 0x00000004
++#define MRV_ISP_VSYNC_POL_SHIFT 2
++#define MRV_ISP_HSYNC_POL
++#define MRV_ISP_HSYNC_POL_MASK 0x00000002
++#define MRV_ISP_HSYNC_POL_SHIFT 1
++#define MRV_ISP_SAMPLE_EDGE
++#define MRV_ISP_SAMPLE_EDGE_MASK 0x00000001
++#define MRV_ISP_SAMPLE_EDGE_SHIFT 0
++
++#define MRV_ISP_ACQ_H_OFFS
++#define MRV_ISP_ACQ_H_OFFS_MASK 0x00003FFF
++#define MRV_ISP_ACQ_H_OFFS_SHIFT 0
++
++#define MRV_ISP_ACQ_V_OFFS
++#define MRV_ISP_ACQ_V_OFFS_MASK 0x00000FFF
++#define MRV_ISP_ACQ_V_OFFS_SHIFT 0
++
++#define MRV_ISP_ACQ_H_SIZE
++#define MRV_ISP_ACQ_H_SIZE_MASK 0x00003FFF
++#define MRV_ISP_ACQ_H_SIZE_SHIFT 0
++
++#define MRV_ISP_ACQ_V_SIZE
++#define MRV_ISP_ACQ_V_SIZE_MASK 0x00000FFF
++#define MRV_ISP_ACQ_V_SIZE_SHIFT 0
++
++
++#define MRV_ISP_ACQ_NR_FRAMES
++#define MRV_ISP_ACQ_NR_FRAMES_MASK 0x000003FF
++#define MRV_ISP_ACQ_NR_FRAMES_SHIFT 0
++#define MRV_ISP_ACQ_NR_FRAMES_MAX \
++ (MRV_ISP_ACQ_NR_FRAMES_MASK >> MRV_ISP_ACQ_NR_FRAMES_SHIFT)
++
++#define MRV_ISP_GAMMA_DX_8
++#define MRV_ISP_GAMMA_DX_8_MASK 0x70000000
++#define MRV_ISP_GAMMA_DX_8_SHIFT 28
++
++#define MRV_ISP_GAMMA_DX_7
++#define MRV_ISP_GAMMA_DX_7_MASK 0x07000000
++#define MRV_ISP_GAMMA_DX_7_SHIFT 24
++
++#define MRV_ISP_GAMMA_DX_6
++#define MRV_ISP_GAMMA_DX_6_MASK 0x00700000
++#define MRV_ISP_GAMMA_DX_6_SHIFT 20
++
++#define MRV_ISP_GAMMA_DX_5
++#define MRV_ISP_GAMMA_DX_5_MASK 0x00070000
++#define MRV_ISP_GAMMA_DX_5_SHIFT 16
++
++#define MRV_ISP_GAMMA_DX_4
++#define MRV_ISP_GAMMA_DX_4_MASK 0x00007000
++#define MRV_ISP_GAMMA_DX_4_SHIFT 12
++
++#define MRV_ISP_GAMMA_DX_3
++#define MRV_ISP_GAMMA_DX_3_MASK 0x00000700
++#define MRV_ISP_GAMMA_DX_3_SHIFT 8
++
++#define MRV_ISP_GAMMA_DX_2
++#define MRV_ISP_GAMMA_DX_2_MASK 0x00000070
++#define MRV_ISP_GAMMA_DX_2_SHIFT 4
++
++#define MRV_ISP_GAMMA_DX_1
++#define MRV_ISP_GAMMA_DX_1_MASK 0x00000007
++#define MRV_ISP_GAMMA_DX_1_SHIFT 0
++
++#define MRV_ISP_GAMMA_DX_16
++#define MRV_ISP_GAMMA_DX_16_MASK 0x70000000
++#define MRV_ISP_GAMMA_DX_16_SHIFT 28
++
++#define MRV_ISP_GAMMA_DX_15
++#define MRV_ISP_GAMMA_DX_15_MASK 0x07000000
++#define MRV_ISP_GAMMA_DX_15_SHIFT 24
++
++#define MRV_ISP_GAMMA_DX_14
++#define MRV_ISP_GAMMA_DX_14_MASK 0x00700000
++#define MRV_ISP_GAMMA_DX_14_SHIFT 20
++
++#define MRV_ISP_GAMMA_DX_13
++#define MRV_ISP_GAMMA_DX_13_MASK 0x00070000
++#define MRV_ISP_GAMMA_DX_13_SHIFT 16
++
++#define MRV_ISP_GAMMA_DX_12
++#define MRV_ISP_GAMMA_DX_12_MASK 0x00007000
++#define MRV_ISP_GAMMA_DX_12_SHIFT 12
++
++#define MRV_ISP_GAMMA_DX_11
++#define MRV_ISP_GAMMA_DX_11_MASK 0x00000700
++#define MRV_ISP_GAMMA_DX_11_SHIFT 8
++
++#define MRV_ISP_GAMMA_DX_10
++#define MRV_ISP_GAMMA_DX_10_MASK 0x00000070
++#define MRV_ISP_GAMMA_DX_10_SHIFT 4
++
++#define MRV_ISP_GAMMA_DX_9
++#define MRV_ISP_GAMMA_DX_9_MASK 0x00000007
++#define MRV_ISP_GAMMA_DX_9_SHIFT 0
++
++#define MRV_ISP_GAMMA_Y
++
++#define MRV_ISP_GAMMA_Y_MASK 0x00000FFF
++
++#define MRV_ISP_GAMMA_Y_SHIFT 0
++#define MRV_ISP_GAMMA_Y_MAX (MRV_ISP_GAMMA_Y_MASK >> MRV_ISP_GAMMA_Y_SHIFT)
++
++#define MRV_ISP_GAMMA_R_Y
++#define MRV_ISP_GAMMA_R_Y_MASK MRV_ISP_GAMMA_Y_MASK
++#define MRV_ISP_GAMMA_R_Y_SHIFT MRV_ISP_GAMMA_Y_SHIFT
++
++#define MRV_ISP_GAMMA_G_Y
++#define MRV_ISP_GAMMA_G_Y_MASK MRV_ISP_GAMMA_Y_MASK
++#define MRV_ISP_GAMMA_G_Y_SHIFT MRV_ISP_GAMMA_Y_SHIFT
++
++#define MRV_ISP_GAMMA_B_Y
++#define MRV_ISP_GAMMA_B_Y_MASK MRV_ISP_GAMMA_Y_MASK
++#define MRV_ISP_GAMMA_B_Y_SHIFT MRV_ISP_GAMMA_Y_SHIFT
++
++ #define MRV_ISP_AWB_MEAS_MODE
++ #define MRV_ISP_AWB_MEAS_MODE_MASK 0x80000000
++ #define MRV_ISP_AWB_MEAS_MODE_SHIFT 31
++#define MRV_ISP_AWB_MAX_EN
++#define MRV_ISP_AWB_MAX_EN_MASK 0x00000004
++#define MRV_ISP_AWB_MAX_EN_SHIFT 2
++#define MRV_ISP_AWB_MODE
++#define MRV_ISP_AWB_MODE_MASK 0x00000003
++#define MRV_ISP_AWB_MODE_SHIFT 0
++#define MRV_ISP_AWB_MODE_MEAS 2
++#define MRV_ISP_AWB_MODE_NOMEAS 0
++
++#define MRV_ISP_AWB_H_OFFS
++#define MRV_ISP_AWB_H_OFFS_MASK 0x00000FFF
++#define MRV_ISP_AWB_H_OFFS_SHIFT 0
++
++#define MRV_ISP_AWB_V_OFFS
++#define MRV_ISP_AWB_V_OFFS_MASK 0x00000FFF
++#define MRV_ISP_AWB_V_OFFS_SHIFT 0
++
++#define MRV_ISP_AWB_H_SIZE
++#define MRV_ISP_AWB_H_SIZE_MASK 0x00001FFF
++#define MRV_ISP_AWB_H_SIZE_SHIFT 0
++
++#define MRV_ISP_AWB_V_SIZE
++#define MRV_ISP_AWB_V_SIZE_MASK 0x00000FFF
++#define MRV_ISP_AWB_V_SIZE_SHIFT 0
++
++
++#define MRV_ISP_AWB_FRAMES
++#define MRV_ISP_AWB_FRAMES_MASK 0x00000007
++#define MRV_ISP_AWB_FRAMES_SHIFT 0
++
++#define MRV_ISP_AWB_REF_CR__MAX_R
++#define MRV_ISP_AWB_REF_CR__MAX_R_MASK 0x0000FF00
++#define MRV_ISP_AWB_REF_CR__MAX_R_SHIFT 8
++#define MRV_ISP_AWB_REF_CB__MAX_B
++#define MRV_ISP_AWB_REF_CB__MAX_B_MASK 0x000000FF
++#define MRV_ISP_AWB_REF_CB__MAX_B_SHIFT 0
++
++#define MRV_ISP_AWB_MAX_Y
++#define MRV_ISP_AWB_MAX_Y_MASK 0xFF000000
++#define MRV_ISP_AWB_MAX_Y_SHIFT 24
++
++#define MRV_ISP_AWB_MIN_Y__MAX_G
++#define MRV_ISP_AWB_MIN_Y__MAX_G_MASK 0x00FF0000
++#define MRV_ISP_AWB_MIN_Y__MAX_G_SHIFT 16
++
++#define MRV_ISP_AWB_MAX_CSUM
++#define MRV_ISP_AWB_MAX_CSUM_MASK 0x0000FF00
++#define MRV_ISP_AWB_MAX_CSUM_SHIFT 8
++#define MRV_ISP_AWB_MIN_C
++#define MRV_ISP_AWB_MIN_C_MASK 0x000000FF
++#define MRV_ISP_AWB_MIN_C_SHIFT 0
++
++#define MRV_ISP_AWB_GAIN_GR
++#define MRV_ISP_AWB_GAIN_GR_MASK 0x03FF0000
++#define MRV_ISP_AWB_GAIN_GR_SHIFT 16
++#define MRV_ISP_AWB_GAIN_GR_MAX (MRV_ISP_AWB_GAIN_GR_MASK >> \
++ MRV_ISP_AWB_GAIN_GR_SHIFT)
++#define MRV_ISP_AWB_GAIN_GB
++#define MRV_ISP_AWB_GAIN_GB_MASK 0x000003FF
++#define MRV_ISP_AWB_GAIN_GB_SHIFT 0
++#define MRV_ISP_AWB_GAIN_GB_MAX (MRV_ISP_AWB_GAIN_GB_MASK >> \
++ MRV_ISP_AWB_GAIN_GB_SHIFT)
++
++#define MRV_ISP_AWB_GAIN_R
++#define MRV_ISP_AWB_GAIN_R_MASK 0x03FF0000
++#define MRV_ISP_AWB_GAIN_R_SHIFT 16
++#define MRV_ISP_AWB_GAIN_R_MAX (MRV_ISP_AWB_GAIN_R_MASK >> \
++ MRV_ISP_AWB_GAIN_R_SHIFT)
++#define MRV_ISP_AWB_GAIN_B
++#define MRV_ISP_AWB_GAIN_B_MASK 0x000003FF
++#define MRV_ISP_AWB_GAIN_B_SHIFT 0
++#define MRV_ISP_AWB_GAIN_B_MAX (MRV_ISP_AWB_GAIN_B_MASK >> \
++ MRV_ISP_AWB_GAIN_B_SHIFT)
++
++#define MRV_ISP_AWB_WHITE_CNT
++#define MRV_ISP_AWB_WHITE_CNT_MASK 0x03FFFFFF
++#define MRV_ISP_AWB_WHITE_CNT_SHIFT 0
++
++#define MRV_ISP_AWB_MEAN_Y__G
++#define MRV_ISP_AWB_MEAN_Y__G_MASK 0x00FF0000
++#define MRV_ISP_AWB_MEAN_Y__G_SHIFT 16
++#define MRV_ISP_AWB_MEAN_CB__B
++#define MRV_ISP_AWB_MEAN_CB__B_MASK 0x0000FF00
++#define MRV_ISP_AWB_MEAN_CB__B_SHIFT 8
++#define MRV_ISP_AWB_MEAN_CR__R
++#define MRV_ISP_AWB_MEAN_CR__R_MASK 0x000000FF
++#define MRV_ISP_AWB_MEAN_CR__R_SHIFT 0
++
++
++
++#define MRV_ISP_CC_COEFF_0
++#define MRV_ISP_CC_COEFF_0_MASK 0x000001FF
++#define MRV_ISP_CC_COEFF_0_SHIFT 0
++
++#define MRV_ISP_CC_COEFF_1
++#define MRV_ISP_CC_COEFF_1_MASK 0x000001FF
++#define MRV_ISP_CC_COEFF_1_SHIFT 0
++
++#define MRV_ISP_CC_COEFF_2
++#define MRV_ISP_CC_COEFF_2_MASK 0x000001FF
++#define MRV_ISP_CC_COEFF_2_SHIFT 0
++
++#define MRV_ISP_CC_COEFF_3
++#define MRV_ISP_CC_COEFF_3_MASK 0x000001FF
++#define MRV_ISP_CC_COEFF_3_SHIFT 0
++
++#define MRV_ISP_CC_COEFF_4
++#define MRV_ISP_CC_COEFF_4_MASK 0x000001FF
++#define MRV_ISP_CC_COEFF_4_SHIFT 0
++
++#define MRV_ISP_CC_COEFF_5
++#define MRV_ISP_CC_COEFF_5_MASK 0x000001FF
++#define MRV_ISP_CC_COEFF_5_SHIFT 0
++
++#define MRV_ISP_CC_COEFF_6
++#define MRV_ISP_CC_COEFF_6_MASK 0x000001FF
++#define MRV_ISP_CC_COEFF_6_SHIFT 0
++
++#define MRV_ISP_CC_COEFF_7
++#define MRV_ISP_CC_COEFF_7_MASK 0x000001FF
++#define MRV_ISP_CC_COEFF_7_SHIFT 0
++
++#define MRV_ISP_CC_COEFF_8
++#define MRV_ISP_CC_COEFF_8_MASK 0x000001FF
++#define MRV_ISP_CC_COEFF_8_SHIFT 0
++
++#define MRV_ISP_ISP_OUT_H_OFFS
++#define MRV_ISP_ISP_OUT_H_OFFS_MASK 0x00000FFF
++#define MRV_ISP_ISP_OUT_H_OFFS_SHIFT 0
++
++#define MRV_ISP_ISP_OUT_V_OFFS
++#define MRV_ISP_ISP_OUT_V_OFFS_MASK 0x00000FFF
++#define MRV_ISP_ISP_OUT_V_OFFS_SHIFT 0
++
++#define MRV_ISP_ISP_OUT_H_SIZE
++#define MRV_ISP_ISP_OUT_H_SIZE_MASK 0x00003FFF
++#define MRV_ISP_ISP_OUT_H_SIZE_SHIFT 0
++
++#define MRV_ISP_ISP_OUT_V_SIZE
++#define MRV_ISP_ISP_OUT_V_SIZE_MASK 0x00000FFF
++#define MRV_ISP_ISP_OUT_V_SIZE_SHIFT 0
++
++#define MRV_ISP_DEMOSAIC_BYPASS
++#define MRV_ISP_DEMOSAIC_BYPASS_MASK 0x00000400
++#define MRV_ISP_DEMOSAIC_BYPASS_SHIFT 10
++
++#define MRV_ISP_DEMOSAIC_MODE
++#define MRV_ISP_DEMOSAIC_MODE_MASK 0x00000300
++#define MRV_ISP_DEMOSAIC_MODE_SHIFT 8
++#define MRV_ISP_DEMOSAIC_MODE_STD 0
++#define MRV_ISP_DEMOSAIC_MODE_ENH 1
++#define MRV_ISP_DEMOSAIC_TH
++#define MRV_ISP_DEMOSAIC_TH_MASK 0x000000FF
++#define MRV_ISP_DEMOSAIC_TH_SHIFT 0
++
++#define MRV_ISP_S_HSYNC
++
++#define MRV_ISP_S_HSYNC_MASK 0x80000000
++#define MRV_ISP_S_HSYNC_SHIFT 31
++
++#define MRV_ISP_S_VSYNC
++
++#define MRV_ISP_S_VSYNC_MASK 0x40000000
++#define MRV_ISP_S_VSYNC_SHIFT 30
++
++#define MRV_ISP_S_DATA
++
++#define MRV_ISP_S_DATA_MASK 0x0FFF0000
++
++#define MRV_ISP_S_DATA_SHIFT 16
++#define MRV_ISP_INFORM_FIELD
++#define MRV_ISP_INFORM_FIELD_MASK 0x00000004
++#define MRV_ISP_INFORM_FIELD_SHIFT 2
++#define MRV_ISP_INFORM_FIELD_ODD 0
++#define MRV_ISP_INFORM_FIELD_EVEN 1
++#define MRV_ISP_INFORM_EN_SHD
++#define MRV_ISP_INFORM_EN_SHD_MASK 0x00000002
++#define MRV_ISP_INFORM_EN_SHD_SHIFT 1
++#define MRV_ISP_ISP_ON_SHD
++#define MRV_ISP_ISP_ON_SHD_MASK 0x00000001
++#define MRV_ISP_ISP_ON_SHD_SHIFT 0
++
++
++#define MRV_ISP_ISP_OUT_H_OFFS_SHD
++#define MRV_ISP_ISP_OUT_H_OFFS_SHD_MASK 0x00000FFF
++#define MRV_ISP_ISP_OUT_H_OFFS_SHD_SHIFT 0
++
++#define MRV_ISP_ISP_OUT_V_OFFS_SHD
++#define MRV_ISP_ISP_OUT_V_OFFS_SHD_MASK 0x00000FFF
++#define MRV_ISP_ISP_OUT_V_OFFS_SHD_SHIFT 0
++
++
++#define MRV_ISP_ISP_OUT_H_SIZE_SHD
++#define MRV_ISP_ISP_OUT_H_SIZE_SHD_MASK 0x00003FFF
++#define MRV_ISP_ISP_OUT_H_SIZE_SHD_SHIFT 0
++
++
++#define MRV_ISP_ISP_OUT_V_SIZE_SHD
++#define MRV_ISP_ISP_OUT_V_SIZE_SHD_MASK 0x00000FFF
++#define MRV_ISP_ISP_OUT_V_SIZE_SHD_SHIFT 0
++
++#define MRV_ISP_IMSC_EXP_END
++#define MRV_ISP_IMSC_EXP_END_MASK 0x00040000
++#define MRV_ISP_IMSC_EXP_END_SHIFT 18
++
++#define MRV_ISP_IMSC_FLASH_CAP
++#define MRV_ISP_IMSC_FLASH_CAP_MASK 0x00020000
++#define MRV_ISP_IMSC_FLASH_CAP_SHIFT 17
++
++#define MRV_ISP_IMSC_BP_DET
++#define MRV_ISP_IMSC_BP_DET_MASK 0x00010000
++#define MRV_ISP_IMSC_BP_DET_SHIFT 16
++#define MRV_ISP_IMSC_BP_NEW_TAB_FUL
++#define MRV_ISP_IMSC_BP_NEW_TAB_FUL_MASK 0x00008000
++#define MRV_ISP_IMSC_BP_NEW_TAB_FUL_SHIFT 15
++#define MRV_ISP_IMSC_AFM_FIN
++#define MRV_ISP_IMSC_AFM_FIN_MASK 0x00004000
++#define MRV_ISP_IMSC_AFM_FIN_SHIFT 14
++#define MRV_ISP_IMSC_AFM_LUM_OF
++#define MRV_ISP_IMSC_AFM_LUM_OF_MASK 0x00002000
++#define MRV_ISP_IMSC_AFM_LUM_OF_SHIFT 13
++#define MRV_ISP_IMSC_AFM_SUM_OF
++#define MRV_ISP_IMSC_AFM_SUM_OF_MASK 0x00001000
++#define MRV_ISP_IMSC_AFM_SUM_OF_SHIFT 12
++#define MRV_ISP_IMSC_SHUTTER_OFF
++#define MRV_ISP_IMSC_SHUTTER_OFF_MASK 0x00000800
++#define MRV_ISP_IMSC_SHUTTER_OFF_SHIFT 11
++#define MRV_ISP_IMSC_SHUTTER_ON
++#define MRV_ISP_IMSC_SHUTTER_ON_MASK 0x00000400
++#define MRV_ISP_IMSC_SHUTTER_ON_SHIFT 10
++#define MRV_ISP_IMSC_FLASH_OFF
++#define MRV_ISP_IMSC_FLASH_OFF_MASK 0x00000200
++#define MRV_ISP_IMSC_FLASH_OFF_SHIFT 9
++#define MRV_ISP_IMSC_FLASH_ON
++#define MRV_ISP_IMSC_FLASH_ON_MASK 0x00000100
++#define MRV_ISP_IMSC_FLASH_ON_SHIFT 8
++
++#define MRV_ISP_IMSC_H_START
++#define MRV_ISP_IMSC_H_START_MASK 0x00000080
++#define MRV_ISP_IMSC_H_START_SHIFT 7
++#define MRV_ISP_IMSC_V_START
++#define MRV_ISP_IMSC_V_START_MASK 0x00000040
++#define MRV_ISP_IMSC_V_START_SHIFT 6
++#define MRV_ISP_IMSC_FRAME_IN
++#define MRV_ISP_IMSC_FRAME_IN_MASK 0x00000020
++#define MRV_ISP_IMSC_FRAME_IN_SHIFT 5
++#define MRV_ISP_IMSC_AWB_DONE
++#define MRV_ISP_IMSC_AWB_DONE_MASK 0x00000010
++#define MRV_ISP_IMSC_AWB_DONE_SHIFT 4
++#define MRV_ISP_IMSC_PIC_SIZE_ERR
++#define MRV_ISP_IMSC_PIC_SIZE_ERR_MASK 0x00000008
++#define MRV_ISP_IMSC_PIC_SIZE_ERR_SHIFT 3
++#define MRV_ISP_IMSC_DATA_LOSS
++#define MRV_ISP_IMSC_DATA_LOSS_MASK 0x00000004
++#define MRV_ISP_IMSC_DATA_LOSS_SHIFT 2
++#define MRV_ISP_IMSC_FRAME
++#define MRV_ISP_IMSC_FRAME_MASK 0x00000002
++#define MRV_ISP_IMSC_FRAME_SHIFT 1
++#define MRV_ISP_IMSC_ISP_OFF
++#define MRV_ISP_IMSC_ISP_OFF_MASK 0x00000001
++#define MRV_ISP_IMSC_ISP_OFF_SHIFT 0
++
++#define MRV_ISP_IMSC_ALL
++#define MRV_ISP_IMSC_ALL_MASK \
++(0 \
++| MRV_ISP_IMSC_EXP_END_MASK \
++| MRV_ISP_IMSC_FLASH_CAP_MASK \
++| MRV_ISP_IMSC_BP_DET_MASK \
++| MRV_ISP_IMSC_BP_NEW_TAB_FUL_MASK \
++| MRV_ISP_IMSC_AFM_FIN_MASK \
++| MRV_ISP_IMSC_AFM_LUM_OF_MASK \
++| MRV_ISP_IMSC_AFM_SUM_OF_MASK \
++| MRV_ISP_IMSC_SHUTTER_OFF_MASK \
++| MRV_ISP_IMSC_SHUTTER_ON_MASK \
++| MRV_ISP_IMSC_FLASH_OFF_MASK \
++| MRV_ISP_IMSC_FLASH_ON_MASK \
++| MRV_ISP_IMSC_H_START_MASK \
++| MRV_ISP_IMSC_V_START_MASK \
++| MRV_ISP_IMSC_FRAME_IN_MASK \
++| MRV_ISP_IMSC_AWB_DONE_MASK \
++| MRV_ISP_IMSC_PIC_SIZE_ERR_MASK \
++| MRV_ISP_IMSC_DATA_LOSS_MASK \
++| MRV_ISP_IMSC_FRAME_MASK \
++| MRV_ISP_IMSC_ISP_OFF_MASK \
++)
++#define MRV_ISP_IMSC_ALL_SHIFT 0
++
++#define MRV_ISP_RIS_EXP_END
++#define MRV_ISP_RIS_EXP_END_MASK 0x00040000
++#define MRV_ISP_RIS_EXP_END_SHIFT 18
++
++#define MRV_ISP_RIS_FLASH_CAP
++#define MRV_ISP_RIS_FLASH_CAP_MASK 0x00020000
++#define MRV_ISP_RIS_FLASH_CAP_SHIFT 17
++
++#define MRV_ISP_RIS_BP_DET
++#define MRV_ISP_RIS_BP_DET_MASK 0x00010000
++#define MRV_ISP_RIS_BP_DET_SHIFT 16
++#define MRV_ISP_RIS_BP_NEW_TAB_FUL
++#define MRV_ISP_RIS_BP_NEW_TAB_FUL_MASK 0x00008000
++#define MRV_ISP_RIS_BP_NEW_TAB_FUL_SHIFT 15
++#define MRV_ISP_RIS_AFM_FIN
++#define MRV_ISP_RIS_AFM_FIN_MASK 0x00004000
++#define MRV_ISP_RIS_AFM_FIN_SHIFT 14
++#define MRV_ISP_RIS_AFM_LUM_OF
++#define MRV_ISP_RIS_AFM_LUM_OF_MASK 0x00002000
++#define MRV_ISP_RIS_AFM_LUM_OF_SHIFT 13
++#define MRV_ISP_RIS_AFM_SUM_OF
++#define MRV_ISP_RIS_AFM_SUM_OF_MASK 0x00001000
++#define MRV_ISP_RIS_AFM_SUM_OF_SHIFT 12
++#define MRV_ISP_RIS_SHUTTER_OFF
++#define MRV_ISP_RIS_SHUTTER_OFF_MASK 0x00000800
++#define MRV_ISP_RIS_SHUTTER_OFF_SHIFT 11
++#define MRV_ISP_RIS_SHUTTER_ON
++#define MRV_ISP_RIS_SHUTTER_ON_MASK 0x00000400
++#define MRV_ISP_RIS_SHUTTER_ON_SHIFT 10
++#define MRV_ISP_RIS_FLASH_OFF
++#define MRV_ISP_RIS_FLASH_OFF_MASK 0x00000200
++#define MRV_ISP_RIS_FLASH_OFF_SHIFT 9
++#define MRV_ISP_RIS_FLASH_ON
++#define MRV_ISP_RIS_FLASH_ON_MASK 0x00000100
++#define MRV_ISP_RIS_FLASH_ON_SHIFT 8
++
++#define MRV_ISP_RIS_H_START
++#define MRV_ISP_RIS_H_START_MASK 0x00000080
++#define MRV_ISP_RIS_H_START_SHIFT 7
++#define MRV_ISP_RIS_V_START
++#define MRV_ISP_RIS_V_START_MASK 0x00000040
++#define MRV_ISP_RIS_V_START_SHIFT 6
++#define MRV_ISP_RIS_FRAME_IN
++#define MRV_ISP_RIS_FRAME_IN_MASK 0x00000020
++#define MRV_ISP_RIS_FRAME_IN_SHIFT 5
++#define MRV_ISP_RIS_AWB_DONE
++#define MRV_ISP_RIS_AWB_DONE_MASK 0x00000010
++#define MRV_ISP_RIS_AWB_DONE_SHIFT 4
++#define MRV_ISP_RIS_PIC_SIZE_ERR
++#define MRV_ISP_RIS_PIC_SIZE_ERR_MASK 0x00000008
++#define MRV_ISP_RIS_PIC_SIZE_ERR_SHIFT 3
++#define MRV_ISP_RIS_DATA_LOSS
++#define MRV_ISP_RIS_DATA_LOSS_MASK 0x00000004
++#define MRV_ISP_RIS_DATA_LOSS_SHIFT 2
++#define MRV_ISP_RIS_FRAME
++#define MRV_ISP_RIS_FRAME_MASK 0x00000002
++#define MRV_ISP_RIS_FRAME_SHIFT 1
++#define MRV_ISP_RIS_ISP_OFF
++#define MRV_ISP_RIS_ISP_OFF_MASK 0x00000001
++#define MRV_ISP_RIS_ISP_OFF_SHIFT 0
++
++#define MRV_ISP_RIS_ALL
++#define MRV_ISP_RIS_ALL_MASK \
++(0 \
++| MRV_ISP_RIS_EXP_END_MASK \
++| MRV_ISP_RIS_FLASH_CAP_MASK \
++| MRV_ISP_RIS_BP_DET_MASK \
++| MRV_ISP_RIS_BP_NEW_TAB_FUL_MASK \
++| MRV_ISP_RIS_AFM_FIN_MASK \
++| MRV_ISP_RIS_AFM_LUM_OF_MASK \
++| MRV_ISP_RIS_AFM_SUM_OF_MASK \
++| MRV_ISP_RIS_SHUTTER_OFF_MASK \
++| MRV_ISP_RIS_SHUTTER_ON_MASK \
++| MRV_ISP_RIS_FLASH_OFF_MASK \
++| MRV_ISP_RIS_FLASH_ON_MASK \
++| MRV_ISP_RIS_H_START_MASK \
++| MRV_ISP_RIS_V_START_MASK \
++| MRV_ISP_RIS_FRAME_IN_MASK \
++| MRV_ISP_RIS_AWB_DONE_MASK \
++| MRV_ISP_RIS_PIC_SIZE_ERR_MASK \
++| MRV_ISP_RIS_DATA_LOSS_MASK \
++| MRV_ISP_RIS_FRAME_MASK \
++| MRV_ISP_RIS_ISP_OFF_MASK \
++)
++#define MRV_ISP_RIS_ALL_SHIFT 0
++
++#define MRV_ISP_MIS_EXP_END
++#define MRV_ISP_MIS_EXP_END_MASK 0x00040000
++#define MRV_ISP_MIS_EXP_END_SHIFT 18
++
++#define MRV_ISP_MIS_FLASH_CAP
++#define MRV_ISP_MIS_FLASH_CAP_MASK 0x00020000
++#define MRV_ISP_MIS_FLASH_CAP_SHIFT 17
++
++#define MRV_ISP_MIS_BP_DET
++#define MRV_ISP_MIS_BP_DET_MASK 0x00010000
++#define MRV_ISP_MIS_BP_DET_SHIFT 16
++#define MRV_ISP_MIS_BP_NEW_TAB_FUL
++#define MRV_ISP_MIS_BP_NEW_TAB_FUL_MASK 0x00008000
++#define MRV_ISP_MIS_BP_NEW_TAB_FUL_SHIFT 15
++#define MRV_ISP_MIS_AFM_FIN
++#define MRV_ISP_MIS_AFM_FIN_MASK 0x00004000
++#define MRV_ISP_MIS_AFM_FIN_SHIFT 14
++#define MRV_ISP_MIS_AFM_LUM_OF
++#define MRV_ISP_MIS_AFM_LUM_OF_MASK 0x00002000
++#define MRV_ISP_MIS_AFM_LUM_OF_SHIFT 13
++#define MRV_ISP_MIS_AFM_SUM_OF
++#define MRV_ISP_MIS_AFM_SUM_OF_MASK 0x00001000
++#define MRV_ISP_MIS_AFM_SUM_OF_SHIFT 12
++#define MRV_ISP_MIS_SHUTTER_OFF
++#define MRV_ISP_MIS_SHUTTER_OFF_MASK 0x00000800
++#define MRV_ISP_MIS_SHUTTER_OFF_SHIFT 11
++#define MRV_ISP_MIS_SHUTTER_ON
++#define MRV_ISP_MIS_SHUTTER_ON_MASK 0x00000400
++#define MRV_ISP_MIS_SHUTTER_ON_SHIFT 10
++#define MRV_ISP_MIS_FLASH_OFF
++#define MRV_ISP_MIS_FLASH_OFF_MASK 0x00000200
++#define MRV_ISP_MIS_FLASH_OFF_SHIFT 9
++#define MRV_ISP_MIS_FLASH_ON
++#define MRV_ISP_MIS_FLASH_ON_MASK 0x00000100
++#define MRV_ISP_MIS_FLASH_ON_SHIFT 8
++
++#define MRV_ISP_MIS_H_START
++#define MRV_ISP_MIS_H_START_MASK 0x00000080
++#define MRV_ISP_MIS_H_START_SHIFT 7
++#define MRV_ISP_MIS_V_START
++#define MRV_ISP_MIS_V_START_MASK 0x00000040
++#define MRV_ISP_MIS_V_START_SHIFT 6
++#define MRV_ISP_MIS_FRAME_IN
++#define MRV_ISP_MIS_FRAME_IN_MASK 0x00000020
++#define MRV_ISP_MIS_FRAME_IN_SHIFT 5
++#define MRV_ISP_MIS_AWB_DONE
++#define MRV_ISP_MIS_AWB_DONE_MASK 0x00000010
++#define MRV_ISP_MIS_AWB_DONE_SHIFT 4
++#define MRV_ISP_MIS_PIC_SIZE_ERR
++#define MRV_ISP_MIS_PIC_SIZE_ERR_MASK 0x00000008
++#define MRV_ISP_MIS_PIC_SIZE_ERR_SHIFT 3
++#define MRV_ISP_MIS_DATA_LOSS
++#define MRV_ISP_MIS_DATA_LOSS_MASK 0x00000004
++#define MRV_ISP_MIS_DATA_LOSS_SHIFT 2
++#define MRV_ISP_MIS_FRAME
++#define MRV_ISP_MIS_FRAME_MASK 0x00000002
++#define MRV_ISP_MIS_FRAME_SHIFT 1
++#define MRV_ISP_MIS_ISP_OFF
++#define MRV_ISP_MIS_ISP_OFF_MASK 0x00000001
++#define MRV_ISP_MIS_ISP_OFF_SHIFT 0
++
++#define MRV_ISP_MIS_ALL
++#define MRV_ISP_MIS_ALL_MASK \
++(0 \
++| MRV_ISP_MIS_EXP_END_MASK \
++| MRV_ISP_MIS_FLASH_CAP_MASK \
++| MRV_ISP_MIS_BP_DET_MASK \
++| MRV_ISP_MIS_BP_NEW_TAB_FUL_MASK \
++| MRV_ISP_MIS_AFM_FIN_MASK \
++| MRV_ISP_MIS_AFM_LUM_OF_MASK \
++| MRV_ISP_MIS_AFM_SUM_OF_MASK \
++| MRV_ISP_MIS_SHUTTER_OFF_MASK \
++| MRV_ISP_MIS_SHUTTER_ON_MASK \
++| MRV_ISP_MIS_FLASH_OFF_MASK \
++| MRV_ISP_MIS_FLASH_ON_MASK \
++| MRV_ISP_MIS_H_START_MASK \
++| MRV_ISP_MIS_V_START_MASK \
++| MRV_ISP_MIS_FRAME_IN_MASK \
++| MRV_ISP_MIS_AWB_DONE_MASK \
++| MRV_ISP_MIS_PIC_SIZE_ERR_MASK \
++| MRV_ISP_MIS_DATA_LOSS_MASK \
++| MRV_ISP_MIS_FRAME_MASK \
++| MRV_ISP_MIS_ISP_OFF_MASK \
++)
++#define MRV_ISP_MIS_ALL_SHIFT 0
++
++#define MRV_ISP_ICR_EXP_END
++#define MRV_ISP_ICR_EXP_END_MASK 0x00040000
++#define MRV_ISP_ICR_EXP_END_SHIFT 18
++#define MRV_ISP_ICR_FLASH_CAP
++#define MRV_ISP_ICR_FLASH_CAP_MASK 0x00020000
++#define MRV_ISP_ICR_FLASH_CAP_SHIFT 17
++
++#define MRV_ISP_ICR_BP_DET
++#define MRV_ISP_ICR_BP_DET_MASK 0x00010000
++#define MRV_ISP_ICR_BP_DET_SHIFT 16
++#define MRV_ISP_ICR_BP_NEW_TAB_FUL
++#define MRV_ISP_ICR_BP_NEW_TAB_FUL_MASK 0x00008000
++#define MRV_ISP_ICR_BP_NEW_TAB_FUL_SHIFT 15
++#define MRV_ISP_ICR_AFM_FIN
++#define MRV_ISP_ICR_AFM_FIN_MASK 0x00004000
++#define MRV_ISP_ICR_AFM_FIN_SHIFT 14
++#define MRV_ISP_ICR_AFM_LUM_OF
++#define MRV_ISP_ICR_AFM_LUM_OF_MASK 0x00002000
++#define MRV_ISP_ICR_AFM_LUM_OF_SHIFT 13
++#define MRV_ISP_ICR_AFM_SUM_OF
++#define MRV_ISP_ICR_AFM_SUM_OF_MASK 0x00001000
++#define MRV_ISP_ICR_AFM_SUM_OF_SHIFT 12
++#define MRV_ISP_ICR_SHUTTER_OFF
++#define MRV_ISP_ICR_SHUTTER_OFF_MASK 0x00000800
++#define MRV_ISP_ICR_SHUTTER_OFF_SHIFT 11
++#define MRV_ISP_ICR_SHUTTER_ON
++#define MRV_ISP_ICR_SHUTTER_ON_MASK 0x00000400
++#define MRV_ISP_ICR_SHUTTER_ON_SHIFT 10
++#define MRV_ISP_ICR_FLASH_OFF
++#define MRV_ISP_ICR_FLASH_OFF_MASK 0x00000200
++#define MRV_ISP_ICR_FLASH_OFF_SHIFT 9
++#define MRV_ISP_ICR_FLASH_ON
++#define MRV_ISP_ICR_FLASH_ON_MASK 0x00000100
++#define MRV_ISP_ICR_FLASH_ON_SHIFT 8
++
++#define MRV_ISP_ICR_H_START
++#define MRV_ISP_ICR_H_START_MASK 0x00000080
++#define MRV_ISP_ICR_H_START_SHIFT 7
++#define MRV_ISP_ICR_V_START
++#define MRV_ISP_ICR_V_START_MASK 0x00000040
++#define MRV_ISP_ICR_V_START_SHIFT 6
++#define MRV_ISP_ICR_FRAME_IN
++#define MRV_ISP_ICR_FRAME_IN_MASK 0x00000020
++#define MRV_ISP_ICR_FRAME_IN_SHIFT 5
++#define MRV_ISP_ICR_AWB_DONE
++#define MRV_ISP_ICR_AWB_DONE_MASK 0x00000010
++#define MRV_ISP_ICR_AWB_DONE_SHIFT 4
++#define MRV_ISP_ICR_PIC_SIZE_ERR
++#define MRV_ISP_ICR_PIC_SIZE_ERR_MASK 0x00000008
++#define MRV_ISP_ICR_PIC_SIZE_ERR_SHIFT 3
++#define MRV_ISP_ICR_DATA_LOSS
++#define MRV_ISP_ICR_DATA_LOSS_MASK 0x00000004
++#define MRV_ISP_ICR_DATA_LOSS_SHIFT 2
++#define MRV_ISP_ICR_FRAME
++#define MRV_ISP_ICR_FRAME_MASK 0x00000002
++#define MRV_ISP_ICR_FRAME_SHIFT 1
++#define MRV_ISP_ICR_ISP_OFF
++#define MRV_ISP_ICR_ISP_OFF_MASK 0x00000001
++#define MRV_ISP_ICR_ISP_OFF_SHIFT 0
++
++#define MRV_ISP_ICR_ALL
++#define MRV_ISP_ICR_ALL_MASK \
++(0 \
++| MRV_ISP_ICR_EXP_END_MASK \
++| MRV_ISP_ICR_FLASH_CAP_MASK \
++| MRV_ISP_ICR_BP_DET_MASK \
++| MRV_ISP_ICR_BP_NEW_TAB_FUL_MASK \
++| MRV_ISP_ICR_AFM_FIN_MASK \
++| MRV_ISP_ICR_AFM_LUM_OF_MASK \
++| MRV_ISP_ICR_AFM_SUM_OF_MASK \
++| MRV_ISP_ICR_SHUTTER_OFF_MASK \
++| MRV_ISP_ICR_SHUTTER_ON_MASK \
++| MRV_ISP_ICR_FLASH_OFF_MASK \
++| MRV_ISP_ICR_FLASH_ON_MASK \
++| MRV_ISP_ICR_H_START_MASK \
++| MRV_ISP_ICR_V_START_MASK \
++| MRV_ISP_ICR_FRAME_IN_MASK \
++| MRV_ISP_ICR_AWB_DONE_MASK \
++| MRV_ISP_ICR_PIC_SIZE_ERR_MASK \
++| MRV_ISP_ICR_DATA_LOSS_MASK \
++| MRV_ISP_ICR_FRAME_MASK \
++| MRV_ISP_ICR_ISP_OFF_MASK \
++)
++#define MRV_ISP_ICR_ALL_SHIFT 0
++
++#define MRV_ISP_ISR_EXP_END
++#define MRV_ISP_ISR_EXP_END_MASK 0x00040000
++#define MRV_ISP_ISR_EXP_END_SHIFT 18
++#define MRV_ISP_ISR_FLASH_CAP
++#define MRV_ISP_ISR_FLASH_CAP_MASK 0x00020000
++#define MRV_ISP_ISR_FLASH_CAP_SHIFT 17
++
++#define MRV_ISP_ISR_BP_DET
++#define MRV_ISP_ISR_BP_DET_MASK 0x00010000
++#define MRV_ISP_ISR_BP_DET_SHIFT 16
++#define MRV_ISP_ISR_BP_NEW_TAB_FUL
++#define MRV_ISP_ISR_BP_NEW_TAB_FUL_MASK 0x00008000
++#define MRV_ISP_ISR_BP_NEW_TAB_FUL_SHIFT 15
++#define MRV_ISP_ISR_AFM_FIN
++#define MRV_ISP_ISR_AFM_FIN_MASK 0x00004000
++#define MRV_ISP_ISR_AFM_FIN_SHIFT 14
++#define MRV_ISP_ISR_AFM_LUM_OF
++#define MRV_ISP_ISR_AFM_LUM_OF_MASK 0x00002000
++#define MRV_ISP_ISR_AFM_LUM_OF_SHIFT 13
++#define MRV_ISP_ISR_AFM_SUM_OF
++#define MRV_ISP_ISR_AFM_SUM_OF_MASK 0x00001000
++#define MRV_ISP_ISR_AFM_SUM_OF_SHIFT 12
++#define MRV_ISP_ISR_SHUTTER_OFF
++#define MRV_ISP_ISR_SHUTTER_OFF_MASK 0x00000800
++#define MRV_ISP_ISR_SHUTTER_OFF_SHIFT 11
++#define MRV_ISP_ISR_SHUTTER_ON
++#define MRV_ISP_ISR_SHUTTER_ON_MASK 0x00000400
++#define MRV_ISP_ISR_SHUTTER_ON_SHIFT 10
++#define MRV_ISP_ISR_FLASH_OFF
++#define MRV_ISP_ISR_FLASH_OFF_MASK 0x00000200
++#define MRV_ISP_ISR_FLASH_OFF_SHIFT 9
++#define MRV_ISP_ISR_FLASH_ON
++#define MRV_ISP_ISR_FLASH_ON_MASK 0x00000100
++#define MRV_ISP_ISR_FLASH_ON_SHIFT 8
++
++#define MRV_ISP_ISR_H_START
++#define MRV_ISP_ISR_H_START_MASK 0x00000080
++#define MRV_ISP_ISR_H_START_SHIFT 7
++#define MRV_ISP_ISR_V_START
++#define MRV_ISP_ISR_V_START_MASK 0x00000040
++#define MRV_ISP_ISR_V_START_SHIFT 6
++#define MRV_ISP_ISR_FRAME_IN
++#define MRV_ISP_ISR_FRAME_IN_MASK 0x00000020
++#define MRV_ISP_ISR_FRAME_IN_SHIFT 5
++#define MRV_ISP_ISR_AWB_DONE
++#define MRV_ISP_ISR_AWB_DONE_MASK 0x00000010
++#define MRV_ISP_ISR_AWB_DONE_SHIFT 4
++#define MRV_ISP_ISR_PIC_SIZE_ERR
++#define MRV_ISP_ISR_PIC_SIZE_ERR_MASK 0x00000008
++#define MRV_ISP_ISR_PIC_SIZE_ERR_SHIFT 3
++#define MRV_ISP_ISR_DATA_LOSS
++#define MRV_ISP_ISR_DATA_LOSS_MASK 0x00000004
++#define MRV_ISP_ISR_DATA_LOSS_SHIFT 2
++#define MRV_ISP_ISR_FRAME
++#define MRV_ISP_ISR_FRAME_MASK 0x00000002
++#define MRV_ISP_ISR_FRAME_SHIFT 1
++#define MRV_ISP_ISR_ISP_OFF
++#define MRV_ISP_ISR_ISP_OFF_MASK 0x00000001
++#define MRV_ISP_ISR_ISP_OFF_SHIFT 0
++
++#define MRV_ISP_ISR_ALL
++#define MRV_ISP_ISR_ALL_MASK \
++(0 \
++| MRV_ISP_ISR_EXP_END_MASK \
++| MRV_ISP_ISR_FLASH_CAP_MASK \
++| MRV_ISP_ISR_BP_DET_MASK \
++| MRV_ISP_ISR_BP_NEW_TAB_FUL_MASK \
++| MRV_ISP_ISR_AFM_FIN_MASK \
++| MRV_ISP_ISR_AFM_LUM_OF_MASK \
++| MRV_ISP_ISR_AFM_SUM_OF_MASK \
++| MRV_ISP_ISR_SHUTTER_OFF_MASK \
++| MRV_ISP_ISR_SHUTTER_ON_MASK \
++| MRV_ISP_ISR_FLASH_OFF_MASK \
++| MRV_ISP_ISR_FLASH_ON_MASK \
++| MRV_ISP_ISR_H_START_MASK \
++| MRV_ISP_ISR_V_START_MASK \
++| MRV_ISP_ISR_FRAME_IN_MASK \
++| MRV_ISP_ISR_AWB_DONE_MASK \
++| MRV_ISP_ISR_PIC_SIZE_ERR_MASK \
++| MRV_ISP_ISR_DATA_LOSS_MASK \
++| MRV_ISP_ISR_FRAME_MASK \
++| MRV_ISP_ISR_ISP_OFF_MASK \
++)
++#define MRV_ISP_ISR_ALL_SHIFT 0
++
++#define MRV_ISP_CT_COEFF
++#define MRV_ISP_CT_COEFF_MASK 0x000007FF
++#define MRV_ISP_CT_COEFF_SHIFT 0
++#define MRV_ISP_CT_COEFF_MAX (MRV_ISP_CT_COEFF_MASK >> MRV_ISP_CT_COEFF_SHIFT)
++
++#define MRV_ISP_EQU_SEGM
++#define MRV_ISP_EQU_SEGM_MASK 0x00000001
++#define MRV_ISP_EQU_SEGM_SHIFT 0
++#define MRV_ISP_EQU_SEGM_LOG 0
++#define MRV_ISP_EQU_SEGM_EQU 1
++
++#define MRV_ISP_ISP_GAMMA_OUT_Y
++#define MRV_ISP_ISP_GAMMA_OUT_Y_MASK 0x000003FF
++#define MRV_ISP_ISP_GAMMA_OUT_Y_SHIFT 0
++
++#define MRV_ISP_OUTFORM_SIZE_ERR
++#define MRV_ISP_OUTFORM_SIZE_ERR_MASK 0x00000004
++#define MRV_ISP_OUTFORM_SIZE_ERR_SHIFT 2
++#define MRV_ISP_IS_SIZE_ERR
++#define MRV_ISP_IS_SIZE_ERR_MASK 0x00000002
++#define MRV_ISP_IS_SIZE_ERR_SHIFT 1
++#define MRV_ISP_INFORM_SIZE_ERR
++#define MRV_ISP_INFORM_SIZE_ERR_MASK 0x00000001
++#define MRV_ISP_INFORM_SIZE_ERR_SHIFT 0
++
++#define MRV_ISP_ALL_ERR
++#define MRV_ISP_ALL_ERR_MASK \
++(0 \
++| MRV_ISP_OUTFORM_SIZE_ERR_MASK \
++| MRV_ISP_IS_SIZE_ERR_MASK \
++| MRV_ISP_INFORM_SIZE_ERR_MASK \
++)
++#define MRV_ISP_ALL_ERR_SHIFT 0
++
++#define MRV_ISP_OUTFORM_SIZE_ERR_CLR
++#define MRV_ISP_OUTFORM_SIZE_ERR_CLR_MASK 0x00000004
++#define MRV_ISP_OUTFORM_SIZE_ERR_CLR_SHIFT 2
++#define MRV_ISP_IS_SIZE_ERR_CLR
++#define MRV_ISP_IS_SIZE_ERR_CLR_MASK 0x00000002
++#define MRV_ISP_IS_SIZE_ERR_CLR_SHIFT 1
++#define MRV_ISP_INFORM_SIZE_ERR_CLR
++#define MRV_ISP_INFORM_SIZE_ERR_CLR_MASK 0x00000001
++#define MRV_ISP_INFORM_SIZE_ERR_CLR_SHIFT 0
++
++
++#define MRV_ISP_FRAME_COUNTER
++#define MRV_ISP_FRAME_COUNTER_MASK 0x000003FF
++#define MRV_ISP_FRAME_COUNTER_SHIFT 0
++
++#define MRV_ISP_CT_OFFSET_R
++#define MRV_ISP_CT_OFFSET_R_MASK 0x00000FFF
++#define MRV_ISP_CT_OFFSET_R_SHIFT 0
++
++#define MRV_ISP_CT_OFFSET_G
++#define MRV_ISP_CT_OFFSET_G_MASK 0x00000FFF
++#define MRV_ISP_CT_OFFSET_G_SHIFT 0
++
++#define MRV_ISP_CT_OFFSET_B
++#define MRV_ISP_CT_OFFSET_B_MASK 0x00000FFF
++#define MRV_ISP_CT_OFFSET_B_SHIFT 0
++
++
++#define MRV_FLASH_PREFLASH_ON
++#define MRV_FLASH_PREFLASH_ON_MASK 0x00000004
++#define MRV_FLASH_PREFLASH_ON_SHIFT 2
++#define MRV_FLASH_FLASH_ON
++#define MRV_FLASH_FLASH_ON_MASK 0x00000002
++#define MRV_FLASH_FLASH_ON_SHIFT 1
++#define MRV_FLASH_PRELIGHT_ON
++#define MRV_FLASH_PRELIGHT_ON_MASK 0x00000001
++#define MRV_FLASH_PRELIGHT_ON_SHIFT 0
++
++#define MRV_FLASH_FL_CAP_DEL
++#define MRV_FLASH_FL_CAP_DEL_MASK 0x000000F0
++#define MRV_FLASH_FL_CAP_DEL_SHIFT 4
++#define MRV_FLASH_FL_CAP_DEL_MAX \
++ (MRV_FLASH_FL_CAP_DEL_MASK >> MRV_FLASH_FL_CAP_DEL_SHIFT)
++#define MRV_FLASH_FL_TRIG_SRC
++#define MRV_FLASH_FL_TRIG_SRC_MASK 0x00000008
++#define MRV_FLASH_FL_TRIG_SRC_SHIFT 3
++#define MRV_FLASH_FL_TRIG_SRC_VDS 0
++#define MRV_FLASH_FL_TRIG_SRC_FL 1
++#define MRV_FLASH_FL_POL
++#define MRV_FLASH_FL_POL_MASK 0x00000004
++#define MRV_FLASH_FL_POL_SHIFT 2
++#define MRV_FLASH_FL_POL_HIGH 0
++#define MRV_FLASH_FL_POL_LOW 1
++#define MRV_FLASH_VS_IN_EDGE
++#define MRV_FLASH_VS_IN_EDGE_MASK 0x00000002
++#define MRV_FLASH_VS_IN_EDGE_SHIFT 1
++#define MRV_FLASH_VS_IN_EDGE_NEG 0
++#define MRV_FLASH_VS_IN_EDGE_POS 1
++#define MRV_FLASH_PRELIGHT_MODE
++#define MRV_FLASH_PRELIGHT_MODE_MASK 0x00000001
++#define MRV_FLASH_PRELIGHT_MODE_SHIFT 0
++#define MRV_FLASH_PRELIGHT_MODE_OASF 0
++#define MRV_FLASH_PRELIGHT_MODE_OAEF 1
++
++#define MRV_FLASH_FL_PRE_DIV
++#define MRV_FLASH_FL_PRE_DIV_MASK 0x000003FF
++#define MRV_FLASH_FL_PRE_DIV_SHIFT 0
++#define MRV_FLASH_FL_PRE_DIV_MAX \
++ (MRV_FLASH_FL_PRE_DIV_MASK >> MRV_FLASH_FL_PRE_DIV_SHIFT)
++
++#define MRV_FLASH_FL_DELAY
++#define MRV_FLASH_FL_DELAY_MASK 0x0003FFFF
++#define MRV_FLASH_FL_DELAY_SHIFT 0
++#define MRV_FLASH_FL_DELAY_MAX \
++ (MRV_FLASH_FL_DELAY_MASK >> MRV_FLASH_FL_DELAY_SHIFT)
++
++#define MRV_FLASH_FL_TIME
++#define MRV_FLASH_FL_TIME_MASK 0x0003FFFF
++#define MRV_FLASH_FL_TIME_SHIFT 0
++#define MRV_FLASH_FL_TIME_MAX \
++ (MRV_FLASH_FL_TIME_MASK >> MRV_FLASH_FL_TIME_SHIFT)
++
++#define MRV_FLASH_FL_MAXP
++#define MRV_FLASH_FL_MAXP_MASK 0x0000FFFF
++#define MRV_FLASH_FL_MAXP_SHIFT 0
++#define MRV_FLASH_FL_MAXP_MAX \
++ (MRV_FLASH_FL_MAXP_MASK >> MRV_FLASH_FL_MAXP_SHIFT)
++
++#define MRV_SHUT_SH_OPEN_POL
++#define MRV_SHUT_SH_OPEN_POL_MASK 0x00000010
++#define MRV_SHUT_SH_OPEN_POL_SHIFT 4
++#define MRV_SHUT_SH_OPEN_POL_HIGH 0
++#define MRV_SHUT_SH_OPEN_POL_LOW 1
++#define MRV_SHUT_SH_TRIG_EN
++#define MRV_SHUT_SH_TRIG_EN_MASK 0x00000008
++#define MRV_SHUT_SH_TRIG_EN_SHIFT 3
++#define MRV_SHUT_SH_TRIG_EN_NEG 0
++#define MRV_SHUT_SH_TRIG_EN_POS 1
++#define MRV_SHUT_SH_TRIG_SRC
++#define MRV_SHUT_SH_TRIG_SRC_MASK 0x00000004
++#define MRV_SHUT_SH_TRIG_SRC_SHIFT 2
++#define MRV_SHUT_SH_TRIG_SRC_VDS 0
++#define MRV_SHUT_SH_TRIG_SRC_SHUT 1
++#define MRV_SHUT_SH_REP_EN
++#define MRV_SHUT_SH_REP_EN_MASK 0x00000002
++#define MRV_SHUT_SH_REP_EN_SHIFT 1
++#define MRV_SHUT_SH_REP_EN_ONCE 0
++#define MRV_SHUT_SH_REP_EN_REP 1
++#define MRV_SHUT_SH_EN
++#define MRV_SHUT_SH_EN_MASK 0x00000001
++#define MRV_SHUT_SH_EN_SHIFT 0
++
++#define MRV_SHUT_SH_PRE_DIV
++#define MRV_SHUT_SH_PRE_DIV_MASK 0x000003FF
++#define MRV_SHUT_SH_PRE_DIV_SHIFT 0
++#define MRV_SHUT_SH_PRE_DIV_MAX \
++ (MRV_SHUT_SH_PRE_DIV_MASK >> MRV_SHUT_SH_PRE_DIV_SHIFT)
++
++#define MRV_SHUT_SH_DELAY
++#define MRV_SHUT_SH_DELAY_MASK 0x000FFFFF
++#define MRV_SHUT_SH_DELAY_SHIFT 0
++#define MRV_SHUT_SH_DELAY_MAX \
++ (MRV_SHUT_SH_DELAY_MASK >> MRV_SHUT_SH_DELAY_SHIFT)
++
++#define MRV_SHUT_SH_TIME
++#define MRV_SHUT_SH_TIME_MASK 0x000FFFFF
++#define MRV_SHUT_SH_TIME_SHIFT 0
++#define MRV_SHUT_SH_TIME_MAX (MRV_SHUT_SH_TIME_MASK >> MRV_SHUT_SH_TIME_SHIFT)
++
++#define MRV_CPROC_CPROC_C_OUT_RANGE
++#define MRV_CPROC_CPROC_C_OUT_RANGE_MASK 0x00000008
++#define MRV_CPROC_CPROC_C_OUT_RANGE_SHIFT 3
++#define MRV_CPROC_CPROC_C_OUT_RANGE_BT601 0
++#define MRV_CPROC_CPROC_C_OUT_RANGE_FULL 1
++#define MRV_CPROC_CPROC_Y_IN_RANGE
++#define MRV_CPROC_CPROC_Y_IN_RANGE_MASK 0x00000004
++#define MRV_CPROC_CPROC_Y_IN_RANGE_SHIFT 2
++#define MRV_CPROC_CPROC_Y_IN_RANGE_BT601 0
++#define MRV_CPROC_CPROC_Y_IN_RANGE_FULL 1
++#define MRV_CPROC_CPROC_Y_OUT_RANGE
++#define MRV_CPROC_CPROC_Y_OUT_RANGE_MASK 0x00000002
++#define MRV_CPROC_CPROC_Y_OUT_RANGE_SHIFT 1
++#define MRV_CPROC_CPROC_Y_OUT_RANGE_BT601 0
++#define MRV_CPROC_CPROC_Y_OUT_RANGE_FULL 1
++#define MRV_CPROC_CPROC_ENABLE
++#define MRV_CPROC_CPROC_ENABLE_MASK 0x00000001
++#define MRV_CPROC_CPROC_ENABLE_SHIFT 0
++
++#define MRV_CPROC_CPROC_CONTRAST
++#define MRV_CPROC_CPROC_CONTRAST_MASK 0x000000FF
++#define MRV_CPROC_CPROC_CONTRAST_SHIFT 0
++
++#define MRV_CPROC_CPROC_BRIGHTNESS
++#define MRV_CPROC_CPROC_BRIGHTNESS_MASK 0x000000FF
++#define MRV_CPROC_CPROC_BRIGHTNESS_SHIFT 0
++
++#define MRV_CPROC_CPROC_SATURATION
++#define MRV_CPROC_CPROC_SATURATION_MASK 0x000000FF
++#define MRV_CPROC_CPROC_SATURATION_SHIFT 0
++
++#define MRV_CPROC_CPROC_HUE
++#define MRV_CPROC_CPROC_HUE_MASK 0x000000FF
++#define MRV_CPROC_CPROC_HUE_SHIFT 0
++
++#define MRV_RSZ_SCALE
++
++#define MRV_RSZ_SCALE_MASK 0x00003FFF
++
++#define MRV_RSZ_SCALE_SHIFT 0
++#define MRV_RSZ_SCALE_MAX (MRV_RSZ_SCALE_MASK >> MRV_RSZ_SCALE_SHIFT)
++
++
++
++#define MRV_MRSZ_CFG_UPD
++#define MRV_MRSZ_CFG_UPD_MASK 0x00000100
++#define MRV_MRSZ_CFG_UPD_SHIFT 8
++#define MRV_MRSZ_SCALE_VC_UP
++#define MRV_MRSZ_SCALE_VC_UP_MASK 0x00000080
++#define MRV_MRSZ_SCALE_VC_UP_SHIFT 7
++#define MRV_MRSZ_SCALE_VC_UP_UPSCALE 1
++#define MRV_MRSZ_SCALE_VC_UP_DOWNSCALE 0
++#define MRV_MRSZ_SCALE_VY_UP
++#define MRV_MRSZ_SCALE_VY_UP_MASK 0x00000040
++#define MRV_MRSZ_SCALE_VY_UP_SHIFT 6
++#define MRV_MRSZ_SCALE_VY_UP_UPSCALE 1
++#define MRV_MRSZ_SCALE_VY_UP_DOWNSCALE 0
++#define MRV_MRSZ_SCALE_HC_UP
++#define MRV_MRSZ_SCALE_HC_UP_MASK 0x00000020
++#define MRV_MRSZ_SCALE_HC_UP_SHIFT 5
++#define MRV_MRSZ_SCALE_HC_UP_UPSCALE 1
++#define MRV_MRSZ_SCALE_HC_UP_DOWNSCALE 0
++#define MRV_MRSZ_SCALE_HY_UP
++#define MRV_MRSZ_SCALE_HY_UP_MASK 0x00000010
++#define MRV_MRSZ_SCALE_HY_UP_SHIFT 4
++#define MRV_MRSZ_SCALE_HY_UP_UPSCALE 1
++#define MRV_MRSZ_SCALE_HY_UP_DOWNSCALE 0
++#define MRV_MRSZ_SCALE_VC_ENABLE
++#define MRV_MRSZ_SCALE_VC_ENABLE_MASK 0x00000008
++#define MRV_MRSZ_SCALE_VC_ENABLE_SHIFT 3
++#define MRV_MRSZ_SCALE_VY_ENABLE
++#define MRV_MRSZ_SCALE_VY_ENABLE_MASK 0x00000004
++#define MRV_MRSZ_SCALE_VY_ENABLE_SHIFT 2
++#define MRV_MRSZ_SCALE_HC_ENABLE
++#define MRV_MRSZ_SCALE_HC_ENABLE_MASK 0x00000002
++#define MRV_MRSZ_SCALE_HC_ENABLE_SHIFT 1
++#define MRV_MRSZ_SCALE_HY_ENABLE
++#define MRV_MRSZ_SCALE_HY_ENABLE_MASK 0x00000001
++#define MRV_MRSZ_SCALE_HY_ENABLE_SHIFT 0
++
++#define MRV_MRSZ_SCALE_HY
++#define MRV_MRSZ_SCALE_HY_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_SCALE_HY_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_SCALE_HCB
++#define MRV_MRSZ_SCALE_HCB_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_SCALE_HCB_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_SCALE_HCR
++#define MRV_MRSZ_SCALE_HCR_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_SCALE_HCR_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_SCALE_VY
++#define MRV_MRSZ_SCALE_VY_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_SCALE_VY_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_SCALE_VC
++#define MRV_MRSZ_SCALE_VC_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_SCALE_VC_SHIFT MRV_RSZ_SCALE_SHIFT
++
++
++#define MRV_MRSZ_PHASE_HY
++#define MRV_MRSZ_PHASE_HY_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_PHASE_HY_SHIFT MRV_RSZ_SCALE_SHIFT
++
++
++#define MRV_MRSZ_PHASE_HC
++#define MRV_MRSZ_PHASE_HC_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_PHASE_HC_SHIFT MRV_RSZ_SCALE_SHIFT
++
++
++#define MRV_MRSZ_PHASE_VY
++#define MRV_MRSZ_PHASE_VY_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_PHASE_VY_SHIFT MRV_RSZ_SCALE_SHIFT
++
++
++#define MRV_MRSZ_PHASE_VC
++#define MRV_MRSZ_PHASE_VC_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_PHASE_VC_SHIFT MRV_RSZ_SCALE_SHIFT
++
++
++#define MRV_MRSZ_SCALE_LUT_ADDR
++#define MRV_MRSZ_SCALE_LUT_ADDR_MASK 0x0000003F
++#define MRV_MRSZ_SCALE_LUT_ADDR_SHIFT 0
++
++
++#define MRV_MRSZ_SCALE_LUT
++#define MRV_MRSZ_SCALE_LUT_MASK 0x0000003F
++#define MRV_MRSZ_SCALE_LUT_SHIFT 0
++
++
++#define MRV_MRSZ_SCALE_VC_UP_SHD
++#define MRV_MRSZ_SCALE_VC_UP_SHD_MASK 0x00000080
++#define MRV_MRSZ_SCALE_VC_UP_SHD_SHIFT 7
++#define MRV_MRSZ_SCALE_VC_UP_SHD_UPSCALE 1
++#define MRV_MRSZ_SCALE_VC_UP_SHD_DOWNSCALE 0
++#define MRV_MRSZ_SCALE_VY_UP_SHD
++#define MRV_MRSZ_SCALE_VY_UP_SHD_MASK 0x00000040
++#define MRV_MRSZ_SCALE_VY_UP_SHD_SHIFT 6
++#define MRV_MRSZ_SCALE_VY_UP_SHD_UPSCALE 1
++#define MRV_MRSZ_SCALE_VY_UP_SHD_DOWNSCALE 0
++#define MRV_MRSZ_SCALE_HC_UP_SHD
++#define MRV_MRSZ_SCALE_HC_UP_SHD_MASK 0x00000020
++#define MRV_MRSZ_SCALE_HC_UP_SHD_SHIFT 5
++#define MRV_MRSZ_SCALE_HC_UP_SHD_UPSCALE 1
++#define MRV_MRSZ_SCALE_HC_UP_SHD_DOWNSCALE 0
++#define MRV_MRSZ_SCALE_HY_UP_SHD
++#define MRV_MRSZ_SCALE_HY_UP_SHD_MASK 0x00000010
++#define MRV_MRSZ_SCALE_HY_UP_SHD_SHIFT 4
++#define MRV_MRSZ_SCALE_HY_UP_SHD_UPSCALE 1
++#define MRV_MRSZ_SCALE_HY_UP_SHD_DOWNSCALE 0
++#define MRV_MRSZ_SCALE_VC_ENABLE_SHD
++#define MRV_MRSZ_SCALE_VC_ENABLE_SHD_MASK 0x00000008
++#define MRV_MRSZ_SCALE_VC_ENABLE_SHD_SHIFT 3
++#define MRV_MRSZ_SCALE_VY_ENABLE_SHD
++#define MRV_MRSZ_SCALE_VY_ENABLE_SHD_MASK 0x00000004
++#define MRV_MRSZ_SCALE_VY_ENABLE_SHD_SHIFT 2
++#define MRV_MRSZ_SCALE_HC_ENABLE_SHD
++#define MRV_MRSZ_SCALE_HC_ENABLE_SHD_MASK 0x00000002
++#define MRV_MRSZ_SCALE_HC_ENABLE_SHD_SHIFT 1
++#define MRV_MRSZ_SCALE_HY_ENABLE_SHD
++#define MRV_MRSZ_SCALE_HY_ENABLE_SHD_MASK 0x00000001
++#define MRV_MRSZ_SCALE_HY_ENABLE_SHD_SHIFT 0
++
++#define MRV_MRSZ_SCALE_HY_SHD
++#define MRV_MRSZ_SCALE_HY_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_SCALE_HY_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_SCALE_HCB_SHD
++#define MRV_MRSZ_SCALE_HCB_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_SCALE_HCB_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_SCALE_HCR_SHD
++#define MRV_MRSZ_SCALE_HCR_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_SCALE_HCR_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++
++#define MRV_MRSZ_SCALE_VY_SHD
++#define MRV_MRSZ_SCALE_VY_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_SCALE_VY_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_SCALE_VC_SHD
++#define MRV_MRSZ_SCALE_VC_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_SCALE_VC_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_PHASE_HY_SHD
++#define MRV_MRSZ_PHASE_HY_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_PHASE_HY_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_PHASE_HC_SHD
++#define MRV_MRSZ_PHASE_HC_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_PHASE_HC_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_PHASE_VY_SHD
++#define MRV_MRSZ_PHASE_VY_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_PHASE_VY_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MRSZ_PHASE_VC_SHD
++#define MRV_MRSZ_PHASE_VC_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_MRSZ_PHASE_VC_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_CFG_UPD
++#define MRV_SRSZ_CFG_UPD_MASK 0x00000100
++#define MRV_SRSZ_CFG_UPD_SHIFT 8
++#define MRV_SRSZ_SCALE_VC_UP
++#define MRV_SRSZ_SCALE_VC_UP_MASK 0x00000080
++#define MRV_SRSZ_SCALE_VC_UP_SHIFT 7
++#define MRV_SRSZ_SCALE_VC_UP_UPSCALE 1
++#define MRV_SRSZ_SCALE_VC_UP_DOWNSCALE 0
++#define MRV_SRSZ_SCALE_VY_UP
++#define MRV_SRSZ_SCALE_VY_UP_MASK 0x00000040
++#define MRV_SRSZ_SCALE_VY_UP_SHIFT 6
++#define MRV_SRSZ_SCALE_VY_UP_UPSCALE 1
++#define MRV_SRSZ_SCALE_VY_UP_DOWNSCALE 0
++#define MRV_SRSZ_SCALE_HC_UP
++#define MRV_SRSZ_SCALE_HC_UP_MASK 0x00000020
++#define MRV_SRSZ_SCALE_HC_UP_SHIFT 5
++#define MRV_SRSZ_SCALE_HC_UP_UPSCALE 1
++#define MRV_SRSZ_SCALE_HC_UP_DOWNSCALE 0
++#define MRV_SRSZ_SCALE_HY_UP
++#define MRV_SRSZ_SCALE_HY_UP_MASK 0x00000010
++#define MRV_SRSZ_SCALE_HY_UP_SHIFT 4
++#define MRV_SRSZ_SCALE_HY_UP_UPSCALE 1
++#define MRV_SRSZ_SCALE_HY_UP_DOWNSCALE 0
++
++#define MRV_SRSZ_SCALE_VC_ENABLE
++#define MRV_SRSZ_SCALE_VC_ENABLE_MASK 0x00000008
++#define MRV_SRSZ_SCALE_VC_ENABLE_SHIFT 3
++#define MRV_SRSZ_SCALE_VY_ENABLE
++#define MRV_SRSZ_SCALE_VY_ENABLE_MASK 0x00000004
++#define MRV_SRSZ_SCALE_VY_ENABLE_SHIFT 2
++#define MRV_SRSZ_SCALE_HC_ENABLE
++#define MRV_SRSZ_SCALE_HC_ENABLE_MASK 0x00000002
++#define MRV_SRSZ_SCALE_HC_ENABLE_SHIFT 1
++#define MRV_SRSZ_SCALE_HY_ENABLE
++#define MRV_SRSZ_SCALE_HY_ENABLE_MASK 0x00000001
++#define MRV_SRSZ_SCALE_HY_ENABLE_SHIFT 0
++
++#define MRV_SRSZ_SCALE_HY
++#define MRV_SRSZ_SCALE_HY_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_SCALE_HY_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_SCALE_HCB
++#define MRV_SRSZ_SCALE_HCB_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_SCALE_HCB_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_SCALE_HCR
++#define MRV_SRSZ_SCALE_HCR_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_SCALE_HCR_SHIFT MRV_RSZ_SCALE_SHIFT
++
++
++#define MRV_SRSZ_SCALE_VY
++#define MRV_SRSZ_SCALE_VY_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_SCALE_VY_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_SCALE_VC
++#define MRV_SRSZ_SCALE_VC_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_SCALE_VC_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_PHASE_HY
++#define MRV_SRSZ_PHASE_HY_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_PHASE_HY_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_PHASE_HC
++#define MRV_SRSZ_PHASE_HC_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_PHASE_HC_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_PHASE_VY
++#define MRV_SRSZ_PHASE_VY_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_PHASE_VY_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_PHASE_VC
++#define MRV_SRSZ_PHASE_VC_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_PHASE_VC_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_SCALE_LUT_ADDR
++#define MRV_SRSZ_SCALE_LUT_ADDR_MASK 0x0000003F
++#define MRV_SRSZ_SCALE_LUT_ADDR_SHIFT 0
++
++
++#define MRV_SRSZ_SCALE_LUT
++#define MRV_SRSZ_SCALE_LUT_MASK 0x0000003F
++#define MRV_SRSZ_SCALE_LUT_SHIFT 0
++
++
++#define MRV_SRSZ_SCALE_VC_UP_SHD
++#define MRV_SRSZ_SCALE_VC_UP_SHD_MASK 0x00000080
++#define MRV_SRSZ_SCALE_VC_UP_SHD_SHIFT 7
++#define MRV_SRSZ_SCALE_VC_UP_SHD_UPSCALE 1
++#define MRV_SRSZ_SCALE_VC_UP_SHD_DOWNSCALE 0
++#define MRV_SRSZ_SCALE_VY_UP_SHD
++#define MRV_SRSZ_SCALE_VY_UP_SHD_MASK 0x00000040
++#define MRV_SRSZ_SCALE_VY_UP_SHD_SHIFT 6
++#define MRV_SRSZ_SCALE_VY_UP_SHD_UPSCALE 1
++#define MRV_SRSZ_SCALE_VY_UP_SHD_DOWNSCALE 0
++#define MRV_SRSZ_SCALE_HC_UP_SHD
++#define MRV_SRSZ_SCALE_HC_UP_SHD_MASK 0x00000020
++#define MRV_SRSZ_SCALE_HC_UP_SHD_SHIFT 5
++#define MRV_SRSZ_SCALE_HC_UP_SHD_UPSCALE 1
++#define MRV_SRSZ_SCALE_HC_UP_SHD_DOWNSCALE 0
++#define MRV_SRSZ_SCALE_HY_UP_SHD
++#define MRV_SRSZ_SCALE_HY_UP_SHD_MASK 0x00000010
++#define MRV_SRSZ_SCALE_HY_UP_SHD_SHIFT 4
++#define MRV_SRSZ_SCALE_HY_UP_SHD_UPSCALE 1
++#define MRV_SRSZ_SCALE_HY_UP_SHD_DOWNSCALE 0
++#define MRV_SRSZ_SCALE_VC_ENABLE_SHD
++#define MRV_SRSZ_SCALE_VC_ENABLE_SHD_MASK 0x00000008
++#define MRV_SRSZ_SCALE_VC_ENABLE_SHD_SHIFT 3
++#define MRV_SRSZ_SCALE_VY_ENABLE_SHD
++#define MRV_SRSZ_SCALE_VY_ENABLE_SHD_MASK 0x00000004
++#define MRV_SRSZ_SCALE_VY_ENABLE_SHD_SHIFT 2
++#define MRV_SRSZ_SCALE_HC_ENABLE_SHD
++#define MRV_SRSZ_SCALE_HC_ENABLE_SHD_MASK 0x00000002
++#define MRV_SRSZ_SCALE_HC_ENABLE_SHD_SHIFT 1
++#define MRV_SRSZ_SCALE_HY_ENABLE_SHD
++#define MRV_SRSZ_SCALE_HY_ENABLE_SHD_MASK 0x00000001
++#define MRV_SRSZ_SCALE_HY_ENABLE_SHD_SHIFT 0
++
++#define MRV_SRSZ_SCALE_HY_SHD
++#define MRV_SRSZ_SCALE_HY_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_SCALE_HY_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_SCALE_HCB_SHD
++#define MRV_SRSZ_SCALE_HCB_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_SCALE_HCB_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_SCALE_HCR_SHD
++#define MRV_SRSZ_SCALE_HCR_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_SCALE_HCR_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++
++#define MRV_SRSZ_SCALE_VY_SHD
++#define MRV_SRSZ_SCALE_VY_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_SCALE_VY_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_SCALE_VC_SHD
++#define MRV_SRSZ_SCALE_VC_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_SCALE_VC_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_PHASE_HY_SHD
++#define MRV_SRSZ_PHASE_HY_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_PHASE_HY_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_PHASE_HC_SHD
++#define MRV_SRSZ_PHASE_HC_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_PHASE_HC_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_PHASE_VY_SHD
++#define MRV_SRSZ_PHASE_VY_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_PHASE_VY_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_SRSZ_PHASE_VC_SHD
++#define MRV_SRSZ_PHASE_VC_SHD_MASK MRV_RSZ_SCALE_MASK
++#define MRV_SRSZ_PHASE_VC_SHD_SHIFT MRV_RSZ_SCALE_SHIFT
++
++#define MRV_MI_SP_OUTPUT_FORMAT
++#define MRV_MI_SP_OUTPUT_FORMAT_MASK 0x70000000
++#define MRV_MI_SP_OUTPUT_FORMAT_SHIFT 28
++#define MRV_MI_SP_OUTPUT_FORMAT_RGB888 6
++#define MRV_MI_SP_OUTPUT_FORMAT_RGB666 5
++#define MRV_MI_SP_OUTPUT_FORMAT_RGB565 4
++#define MRV_MI_SP_OUTPUT_FORMAT_YUV444 3
++#define MRV_MI_SP_OUTPUT_FORMAT_YUV422 2
++#define MRV_MI_SP_OUTPUT_FORMAT_YUV420 1
++#define MRV_MI_SP_OUTPUT_FORMAT_YUV400 0
++#define MRV_MI_SP_INPUT_FORMAT
++#define MRV_MI_SP_INPUT_FORMAT_MASK 0x0C000000
++#define MRV_MI_SP_INPUT_FORMAT_SHIFT 26
++#define MRV_MI_SP_INPUT_FORMAT_YUV444 3
++#define MRV_MI_SP_INPUT_FORMAT_YUV422 2
++#define MRV_MI_SP_INPUT_FORMAT_YUV420 1
++#define MRV_MI_SP_INPUT_FORMAT_YUV400 0
++#define MRV_MI_SP_WRITE_FORMAT
++#define MRV_MI_SP_WRITE_FORMAT_MASK 0x03000000
++#define MRV_MI_SP_WRITE_FORMAT_SHIFT 24
++#define MRV_MI_SP_WRITE_FORMAT_PLANAR 0
++#define MRV_MI_SP_WRITE_FORMAT_SEMIPLANAR 1
++#define MRV_MI_SP_WRITE_FORMAT_INTERLEAVED 2
++#define MRV_MI_MP_WRITE_FORMAT
++#define MRV_MI_MP_WRITE_FORMAT_MASK 0x00C00000
++#define MRV_MI_MP_WRITE_FORMAT_SHIFT 22
++#define MRV_MI_MP_WRITE_FORMAT_PLANAR 0
++#define MRV_MI_MP_WRITE_FORMAT_SEMIPLANAR 1
++#define MRV_MI_MP_WRITE_FORMAT_INTERLEAVED 2
++#define MRV_MI_MP_WRITE_FORMAT_RAW_8 0
++#define MRV_MI_MP_WRITE_FORMAT_RAW_12 2
++#define MRV_MI_INIT_OFFSET_EN
++#define MRV_MI_INIT_OFFSET_EN_MASK 0x00200000
++#define MRV_MI_INIT_OFFSET_EN_SHIFT 21
++
++#define MRV_MI_INIT_BASE_EN
++#define MRV_MI_INIT_BASE_EN_MASK 0x00100000
++#define MRV_MI_INIT_BASE_EN_SHIFT 20
++#define MRV_MI_BURST_LEN_CHROM
++#define MRV_MI_BURST_LEN_CHROM_MASK 0x000C0000
++#define MRV_MI_BURST_LEN_CHROM_SHIFT 18
++#define MRV_MI_BURST_LEN_CHROM_4 0
++#define MRV_MI_BURST_LEN_CHROM_8 1
++#define MRV_MI_BURST_LEN_CHROM_16 2
++
++#define MRV_MI_BURST_LEN_LUM
++#define MRV_MI_BURST_LEN_LUM_MASK 0x00030000
++#define MRV_MI_BURST_LEN_LUM_SHIFT 16
++#define MRV_MI_BURST_LEN_LUM_4 0
++#define MRV_MI_BURST_LEN_LUM_8 1
++#define MRV_MI_BURST_LEN_LUM_16 2
++
++#define MRV_MI_LAST_PIXEL_SIG_EN
++#define MRV_MI_LAST_PIXEL_SIG_EN_MASK 0x00008000
++#define MRV_MI_LAST_PIXEL_SIG_EN_SHIFT 15
++
++ #define MRV_MI_422NONCOSITED
++ #define MRV_MI_422NONCOSITED_MASK 0x00000400
++ #define MRV_MI_422NONCOSITED_SHIFT 10
++ #define MRV_MI_CBCR_FULL_RANGE
++ #define MRV_MI_CBCR_FULL_RANGE_MASK 0x00000200
++ #define MRV_MI_CBCR_FULL_RANGE_SHIFT 9
++ #define MRV_MI_Y_FULL_RANGE
++ #define MRV_MI_Y_FULL_RANGE_MASK 0x00000100
++ #define MRV_MI_Y_FULL_RANGE_SHIFT 8
++#define MRV_MI_BYTE_SWAP
++#define MRV_MI_BYTE_SWAP_MASK 0x00000080
++#define MRV_MI_BYTE_SWAP_SHIFT 7
++#define MRV_MI_ROT
++#define MRV_MI_ROT_MASK 0x00000040
++#define MRV_MI_ROT_SHIFT 6
++#define MRV_MI_V_FLIP
++#define MRV_MI_V_FLIP_MASK 0x00000020
++#define MRV_MI_V_FLIP_SHIFT 5
++
++#define MRV_MI_H_FLIP
++#define MRV_MI_H_FLIP_MASK 0x00000010
++#define MRV_MI_H_FLIP_SHIFT 4
++#define MRV_MI_RAW_ENABLE
++#define MRV_MI_RAW_ENABLE_MASK 0x00000008
++#define MRV_MI_RAW_ENABLE_SHIFT 3
++#define MRV_MI_JPEG_ENABLE
++#define MRV_MI_JPEG_ENABLE_MASK 0x00000004
++#define MRV_MI_JPEG_ENABLE_SHIFT 2
++#define MRV_MI_SP_ENABLE
++#define MRV_MI_SP_ENABLE_MASK 0x00000002
++#define MRV_MI_SP_ENABLE_SHIFT 1
++#define MRV_MI_MP_ENABLE
++#define MRV_MI_MP_ENABLE_MASK 0x00000001
++#define MRV_MI_MP_ENABLE_SHIFT 0
++
++
++#define MRV_MI_ROT_AND_FLIP
++#define MRV_MI_ROT_AND_FLIP_MASK \
++ (MRV_MI_H_FLIP_MASK | MRV_MI_V_FLIP_MASK | MRV_MI_ROT_MASK)
++#define MRV_MI_ROT_AND_FLIP_SHIFT \
++ (MRV_MI_H_FLIP_SHIFT)
++#define MRV_MI_ROT_AND_FLIP_H_FLIP \
++ (MRV_MI_H_FLIP_MASK >> MRV_MI_ROT_AND_FLIP_SHIFT)
++#define MRV_MI_ROT_AND_FLIP_V_FLIP \
++ (MRV_MI_V_FLIP_MASK >> MRV_MI_ROT_AND_FLIP_SHIFT)
++#define MRV_MI_ROT_AND_FLIP_ROTATE \
++ (MRV_MI_ROT_MASK >> MRV_MI_ROT_AND_FLIP_SHIFT)
++
++#define MRV_MI_MI_CFG_UPD
++#define MRV_MI_MI_CFG_UPD_MASK 0x00000010
++#define MRV_MI_MI_CFG_UPD_SHIFT 4
++#define MRV_MI_MI_SKIP
++#define MRV_MI_MI_SKIP_MASK 0x00000004
++#define MRV_MI_MI_SKIP_SHIFT 2
++
++#define MRV_MI_MP_Y_BASE_AD_INIT
++#define MRV_MI_MP_Y_BASE_AD_INIT_MASK 0xFFFFFFFC
++#define MRV_MI_MP_Y_BASE_AD_INIT_SHIFT 0
++#define MRV_MI_MP_Y_BASE_AD_INIT_VALID_MASK (MRV_MI_MP_Y_BASE_AD_INIT_MASK &\
++ ~0x00000003)
++#define MRV_MI_MP_Y_SIZE_INIT
++#define MRV_MI_MP_Y_SIZE_INIT_MASK 0x01FFFFFC
++#define MRV_MI_MP_Y_SIZE_INIT_SHIFT 0
++#define MRV_MI_MP_Y_SIZE_INIT_VALID_MASK (MRV_MI_MP_Y_SIZE_INIT_MASK &\
++ ~0x00000003)
++#define MRV_MI_MP_Y_OFFS_CNT_INIT
++#define MRV_MI_MP_Y_OFFS_CNT_INIT_MASK 0x01FFFFFC
++#define MRV_MI_MP_Y_OFFS_CNT_INIT_SHIFT 0
++#define MRV_MI_MP_Y_OFFS_CNT_INIT_VALID_MASK \
++ (MRV_MI_MP_Y_OFFS_CNT_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_MP_Y_OFFS_CNT_START
++#define MRV_MI_MP_Y_OFFS_CNT_START_MASK 0x01FFFFFC
++#define MRV_MI_MP_Y_OFFS_CNT_START_SHIFT 0
++#define MRV_MI_MP_Y_OFFS_CNT_START_VALID_MASK \
++ (MRV_MI_MP_Y_OFFS_CNT_START_MASK & ~0x00000003)
++
++#define MRV_MI_MP_Y_IRQ_OFFS_INIT
++#define MRV_MI_MP_Y_IRQ_OFFS_INIT_MASK 0x01FFFFFC
++#define MRV_MI_MP_Y_IRQ_OFFS_INIT_SHIFT 0
++#define MRV_MI_MP_Y_IRQ_OFFS_INIT_VALID_MASK \
++ (MRV_MI_MP_Y_IRQ_OFFS_INIT_MASK & ~0x00000003)
++#define MRV_MI_MP_CB_BASE_AD_INIT
++#define MRV_MI_MP_CB_BASE_AD_INIT_MASK 0xFFFFFFFC
++#define MRV_MI_MP_CB_BASE_AD_INIT_SHIFT 0
++#define MRV_MI_MP_CB_BASE_AD_INIT_VALID_MASK \
++ (MRV_MI_MP_CB_BASE_AD_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CB_SIZE_INIT
++#define MRV_MI_MP_CB_SIZE_INIT_MASK 0x00FFFFFC
++#define MRV_MI_MP_CB_SIZE_INIT_SHIFT 0
++#define MRV_MI_MP_CB_SIZE_INIT_VALID_MASK \
++ (MRV_MI_MP_CB_SIZE_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CB_OFFS_CNT_INIT
++#define MRV_MI_MP_CB_OFFS_CNT_INIT_MASK 0x00FFFFFC
++#define MRV_MI_MP_CB_OFFS_CNT_INIT_SHIFT 0
++#define MRV_MI_MP_CB_OFFS_CNT_INIT_VALID_MASK \
++ (MRV_MI_MP_CB_OFFS_CNT_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CB_OFFS_CNT_START
++#define MRV_MI_MP_CB_OFFS_CNT_START_MASK 0x00FFFFFC
++#define MRV_MI_MP_CB_OFFS_CNT_START_SHIFT 0
++#define MRV_MI_MP_CB_OFFS_CNT_START_VALID_MASK \
++ (MRV_MI_MP_CB_OFFS_CNT_START_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CR_BASE_AD_INIT
++#define MRV_MI_MP_CR_BASE_AD_INIT_MASK 0xFFFFFFFC
++#define MRV_MI_MP_CR_BASE_AD_INIT_SHIFT 0
++#define MRV_MI_MP_CR_BASE_AD_INIT_VALID_MASK \
++ (MRV_MI_MP_CR_BASE_AD_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CR_SIZE_INIT
++#define MRV_MI_MP_CR_SIZE_INIT_MASK 0x00FFFFFC
++#define MRV_MI_MP_CR_SIZE_INIT_SHIFT 0
++#define MRV_MI_MP_CR_SIZE_INIT_VALID_MASK \
++ (MRV_MI_MP_CR_SIZE_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CR_OFFS_CNT_INIT
++#define MRV_MI_MP_CR_OFFS_CNT_INIT_MASK 0x00FFFFFC
++#define MRV_MI_MP_CR_OFFS_CNT_INIT_SHIFT 0
++#define MRV_MI_MP_CR_OFFS_CNT_INIT_VALID_MASK \
++ (MRV_MI_MP_CR_OFFS_CNT_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CR_OFFS_CNT_START
++#define MRV_MI_MP_CR_OFFS_CNT_START_MASK 0x00FFFFFC
++#define MRV_MI_MP_CR_OFFS_CNT_START_SHIFT 0
++#define MRV_MI_MP_CR_OFFS_CNT_START_VALID_MASK \
++ (MRV_MI_MP_CR_OFFS_CNT_START_MASK & ~0x00000003)
++
++#define MRV_MI_SP_Y_BASE_AD_INIT
++#define MRV_MI_SP_Y_BASE_AD_INIT_MASK 0xFFFFFFFC
++#define MRV_MI_SP_Y_BASE_AD_INIT_SHIFT 0
++#define MRV_MI_SP_Y_BASE_AD_INIT_VALID_MASK \
++ (MRV_MI_SP_Y_BASE_AD_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_Y_SIZE_INIT
++#define MRV_MI_SP_Y_SIZE_INIT_MASK 0x01FFFFFC
++#define MRV_MI_SP_Y_SIZE_INIT_SHIFT 0
++#define MRV_MI_SP_Y_SIZE_INIT_VALID_MASK \
++ (MRV_MI_SP_Y_SIZE_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_Y_OFFS_CNT_INIT
++#define MRV_MI_SP_Y_OFFS_CNT_INIT_MASK 0x01FFFFFC
++#define MRV_MI_SP_Y_OFFS_CNT_INIT_SHIFT 0
++#define MRV_MI_SP_Y_OFFS_CNT_INIT_VALID_MASK \
++ (MRV_MI_SP_Y_OFFS_CNT_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_Y_OFFS_CNT_START
++#define MRV_MI_SP_Y_OFFS_CNT_START_MASK 0x01FFFFFC
++#define MRV_MI_SP_Y_OFFS_CNT_START_SHIFT 0
++#define MRV_MI_SP_Y_OFFS_CNT_START_VALID_MASK \
++ (MRV_MI_SP_Y_OFFS_CNT_START_MASK & ~0x00000003)
++
++#define MRV_MI_SP_Y_LLENGTH
++#define MRV_MI_SP_Y_LLENGTH_MASK 0x00001FFF
++#define MRV_MI_SP_Y_LLENGTH_SHIFT 0
++#define MRV_MI_SP_Y_LLENGTH_VALID_MASK \
++ (MRV_MI_SP_Y_LLENGTH_MASK & ~0x00000000)
++
++#define MRV_MI_SP_CB_BASE_AD_INIT
++#define MRV_MI_SP_CB_BASE_AD_INIT_MASK 0xFFFFFFFF
++#define MRV_MI_SP_CB_BASE_AD_INIT_SHIFT 0
++#define MRV_MI_SP_CB_BASE_AD_INIT_VALID_MASK \
++ (MRV_MI_SP_CB_BASE_AD_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CB_SIZE_INIT
++#define MRV_MI_SP_CB_SIZE_INIT_MASK 0x00FFFFFF
++#define MRV_MI_SP_CB_SIZE_INIT_SHIFT 0
++#define MRV_MI_SP_CB_SIZE_INIT_VALID_MASK \
++ (MRV_MI_SP_CB_SIZE_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CB_OFFS_CNT_INIT
++#define MRV_MI_SP_CB_OFFS_CNT_INIT_MASK 0x00FFFFFF
++#define MRV_MI_SP_CB_OFFS_CNT_INIT_SHIFT 0
++#define MRV_MI_SP_CB_OFFS_CNT_INIT_VALID_MASK \
++ (MRV_MI_SP_CB_OFFS_CNT_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CB_OFFS_CNT_START
++#define MRV_MI_SP_CB_OFFS_CNT_START_MASK 0x00FFFFFF
++#define MRV_MI_SP_CB_OFFS_CNT_START_SHIFT 0
++#define MRV_MI_SP_CB_OFFS_CNT_START_VALID_MASK \
++ (MRV_MI_SP_CB_OFFS_CNT_START_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CR_BASE_AD_INIT
++#define MRV_MI_SP_CR_BASE_AD_INIT_MASK 0xFFFFFFFF
++#define MRV_MI_SP_CR_BASE_AD_INIT_SHIFT 0
++#define MRV_MI_SP_CR_BASE_AD_INIT_VALID_MASK \
++ (MRV_MI_SP_CR_BASE_AD_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CR_SIZE_INIT
++#define MRV_MI_SP_CR_SIZE_INIT_MASK 0x00FFFFFF
++#define MRV_MI_SP_CR_SIZE_INIT_SHIFT 0
++#define MRV_MI_SP_CR_SIZE_INIT_VALID_MASK \
++ (MRV_MI_SP_CR_SIZE_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CR_OFFS_CNT_INIT
++#define MRV_MI_SP_CR_OFFS_CNT_INIT_MASK 0x00FFFFFF
++#define MRV_MI_SP_CR_OFFS_CNT_INIT_SHIFT 0
++#define MRV_MI_SP_CR_OFFS_CNT_INIT_VALID_MASK \
++ (MRV_MI_SP_CR_OFFS_CNT_INIT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CR_OFFS_CNT_START
++#define MRV_MI_SP_CR_OFFS_CNT_START_MASK 0x00FFFFFF
++#define MRV_MI_SP_CR_OFFS_CNT_START_SHIFT 0
++#define MRV_MI_SP_CR_OFFS_CNT_START_VALID_MASK \
++ (MRV_MI_SP_CR_OFFS_CNT_START_MASK & ~0x00000003)
++
++#define MRV_MI_BYTE_CNT
++#define MRV_MI_BYTE_CNT_MASK 0x01FFFFFF
++#define MRV_MI_BYTE_CNT_SHIFT 0
++
++#define MRV_MI_RAW_ENABLE_OUT
++#define MRV_MI_RAW_ENABLE_OUT_MASK 0x00080000
++#define MRV_MI_RAW_ENABLE_OUT_SHIFT 19
++#define MRV_MI_JPEG_ENABLE_OUT
++#define MRV_MI_JPEG_ENABLE_OUT_MASK 0x00040000
++#define MRV_MI_JPEG_ENABLE_OUT_SHIFT 18
++#define MRV_MI_SP_ENABLE_OUT
++#define MRV_MI_SP_ENABLE_OUT_MASK 0x00020000
++#define MRV_MI_SP_ENABLE_OUT_SHIFT 17
++#define MRV_MI_MP_ENABLE_OUT
++#define MRV_MI_MP_ENABLE_OUT_MASK 0x00010000
++#define MRV_MI_MP_ENABLE_OUT_SHIFT 16
++#define MRV_MI_RAW_ENABLE_IN
++#define MRV_MI_RAW_ENABLE_IN_MASK 0x00000020
++#define MRV_MI_RAW_ENABLE_IN_SHIFT 5
++#define MRV_MI_JPEG_ENABLE_IN
++#define MRV_MI_JPEG_ENABLE_IN_MASK 0x00000010
++#define MRV_MI_JPEG_ENABLE_IN_SHIFT 4
++#define MRV_MI_SP_ENABLE_IN
++#define MRV_MI_SP_ENABLE_IN_MASK 0x00000004
++#define MRV_MI_SP_ENABLE_IN_SHIFT 2
++#define MRV_MI_MP_ENABLE_IN
++#define MRV_MI_MP_ENABLE_IN_MASK 0x00000001
++#define MRV_MI_MP_ENABLE_IN_SHIFT 0
++
++#define MRV_MI_MP_Y_BASE_AD
++#define MRV_MI_MP_Y_BASE_AD_MASK 0xFFFFFFFC
++#define MRV_MI_MP_Y_BASE_AD_SHIFT 0
++#define MRV_MI_MP_Y_BASE_AD_VALID_MASK \
++ (MRV_MI_MP_Y_BASE_AD_MASK & ~0x00000003)
++
++#define MRV_MI_MP_Y_SIZE
++#define MRV_MI_MP_Y_SIZE_MASK 0x01FFFFFC
++#define MRV_MI_MP_Y_SIZE_SHIFT 0
++#define MRV_MI_MP_Y_SIZE_VALID_MASK (MRV_MI_MP_Y_SIZE_MASK & ~0x00000003)
++
++#define MRV_MI_MP_Y_OFFS_CNT
++#define MRV_MI_MP_Y_OFFS_CNT_MASK 0x01FFFFFC
++#define MRV_MI_MP_Y_OFFS_CNT_SHIFT 0
++#define MRV_MI_MP_Y_OFFS_CNT_VALID_MASK \
++ (MRV_MI_MP_Y_OFFS_CNT_MASK & ~0x00000003)
++
++#define MRV_MI_MP_Y_IRQ_OFFS
++#define MRV_MI_MP_Y_IRQ_OFFS_MASK 0x01FFFFFC
++#define MRV_MI_MP_Y_IRQ_OFFS_SHIFT 0
++#define MRV_MI_MP_Y_IRQ_OFFS_VALID_MASK \
++ (MRV_MI_MP_Y_IRQ_OFFS_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CB_BASE_AD
++#define MRV_MI_MP_CB_BASE_AD_MASK 0xFFFFFFFF
++#define MRV_MI_MP_CB_BASE_AD_SHIFT 0
++#define MRV_MI_MP_CB_BASE_AD_VALID_MASK \
++ (MRV_MI_MP_CB_BASE_AD_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CB_SIZE
++#define MRV_MI_MP_CB_SIZE_MASK 0x00FFFFFF
++#define MRV_MI_MP_CB_SIZE_SHIFT 0
++#define MRV_MI_MP_CB_SIZE_VALID_MASK (MRV_MI_MP_CB_SIZE_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CB_OFFS_CNT
++#define MRV_MI_MP_CB_OFFS_CNT_MASK 0x00FFFFFF
++#define MRV_MI_MP_CB_OFFS_CNT_SHIFT 0
++#define MRV_MI_MP_CB_OFFS_CNT_VALID_MASK \
++ (MRV_MI_MP_CB_OFFS_CNT_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CR_BASE_AD
++#define MRV_MI_MP_CR_BASE_AD_MASK 0xFFFFFFFF
++#define MRV_MI_MP_CR_BASE_AD_SHIFT 0
++#define MRV_MI_MP_CR_BASE_AD_VALID_MASK \
++ (MRV_MI_MP_CR_BASE_AD_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CR_SIZE
++#define MRV_MI_MP_CR_SIZE_MASK 0x00FFFFFF
++#define MRV_MI_MP_CR_SIZE_SHIFT 0
++#define MRV_MI_MP_CR_SIZE_VALID_MASK (MRV_MI_MP_CR_SIZE_MASK & ~0x00000003)
++
++#define MRV_MI_MP_CR_OFFS_CNT
++#define MRV_MI_MP_CR_OFFS_CNT_MASK 0x00FFFFFF
++#define MRV_MI_MP_CR_OFFS_CNT_SHIFT 0
++#define MRV_MI_MP_CR_OFFS_CNT_VALID_MASK \
++ (MRV_MI_MP_CR_OFFS_CNT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_Y_BASE_AD
++#define MRV_MI_SP_Y_BASE_AD_MASK 0xFFFFFFFF
++#define MRV_MI_SP_Y_BASE_AD_SHIFT 0
++#define MRV_MI_SP_Y_BASE_AD_VALID_MASK \
++ (MRV_MI_SP_Y_BASE_AD_MASK & ~0x00000003)
++
++#define MRV_MI_SP_Y_SIZE
++#define MRV_MI_SP_Y_SIZE_MASK 0x01FFFFFC
++#define MRV_MI_SP_Y_SIZE_SHIFT 0
++#define MRV_MI_SP_Y_SIZE_VALID_MASK (MRV_MI_SP_Y_SIZE_MASK & ~0x00000003)
++
++#define MRV_MI_SP_Y_OFFS_CNT
++#define MRV_MI_SP_Y_OFFS_CNT_MASK 0x01FFFFFC
++#define MRV_MI_SP_Y_OFFS_CNT_SHIFT 0
++#define MRV_MI_SP_Y_OFFS_CNT_VALID_MASK \
++ (MRV_MI_SP_Y_OFFS_CNT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CB_BASE_AD
++#define MRV_MI_SP_CB_BASE_AD_MASK 0xFFFFFFFF
++#define MRV_MI_SP_CB_BASE_AD_SHIFT 0
++#define MRV_MI_SP_CB_BASE_AD_VALID_MASK \
++ (MRV_MI_SP_CB_BASE_AD_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CB_SIZE
++#define MRV_MI_SP_CB_SIZE_MASK 0x00FFFFFF
++#define MRV_MI_SP_CB_SIZE_SHIFT 0
++#define MRV_MI_SP_CB_SIZE_VALID_MASK (MRV_MI_SP_CB_SIZE_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CB_OFFS_CNT
++#define MRV_MI_SP_CB_OFFS_CNT_MASK 0x00FFFFFF
++#define MRV_MI_SP_CB_OFFS_CNT_SHIFT 0
++#define MRV_MI_SP_CB_OFFS_CNT_VALID_MASK \
++ (MRV_MI_SP_CB_OFFS_CNT_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CR_BASE_AD
++#define MRV_MI_SP_CR_BASE_AD_MASK 0xFFFFFFFF
++#define MRV_MI_SP_CR_BASE_AD_SHIFT 0
++#define MRV_MI_SP_CR_BASE_AD_VALID_MASK \
++ (MRV_MI_SP_CR_BASE_AD_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CR_SIZE
++#define MRV_MI_SP_CR_SIZE_MASK 0x00FFFFFF
++#define MRV_MI_SP_CR_SIZE_SHIFT 0
++#define MRV_MI_SP_CR_SIZE_VALID_MASK (MRV_MI_SP_CR_SIZE_MASK & ~0x00000003)
++
++#define MRV_MI_SP_CR_OFFS_CNT
++#define MRV_MI_SP_CR_OFFS_CNT_MASK 0x00FFFFFF
++#define MRV_MI_SP_CR_OFFS_CNT_SHIFT 0
++#define MRV_MI_SP_CR_OFFS_CNT_VALID_MASK \
++ (MRV_MI_SP_CR_OFFS_CNT_MASK & ~0x00000003)
++
++
++#define MRV_MI_DMA_Y_PIC_START_AD
++#define MRV_MI_DMA_Y_PIC_START_AD_MASK 0xFFFFFFFF
++#define MRV_MI_DMA_Y_PIC_START_AD_SHIFT 0
++
++#define MRV_MI_DMA_Y_PIC_WIDTH
++#define MRV_MI_DMA_Y_PIC_WIDTH_MASK 0x00001FFF
++#define MRV_MI_DMA_Y_PIC_WIDTH_SHIFT 0
++
++#define MRV_MI_DMA_Y_LLENGTH
++#define MRV_MI_DMA_Y_LLENGTH_MASK 0x00001FFF
++#define MRV_MI_DMA_Y_LLENGTH_SHIFT 0
++
++#define MRV_MI_DMA_Y_PIC_SIZE
++#define MRV_MI_DMA_Y_PIC_SIZE_MASK 0x00FFFFFF
++#define MRV_MI_DMA_Y_PIC_SIZE_SHIFT 0
++
++#define MRV_MI_DMA_CB_PIC_START_AD
++#define MRV_MI_DMA_CB_PIC_START_AD_MASK 0xFFFFFFFF
++#define MRV_MI_DMA_CB_PIC_START_AD_SHIFT 0
++
++
++#define MRV_MI_DMA_CR_PIC_START_AD
++#define MRV_MI_DMA_CR_PIC_START_AD_MASK 0xFFFFFFFF
++#define MRV_MI_DMA_CR_PIC_START_AD_SHIFT 0
++
++
++#define MRV_MI_DMA_READY
++#define MRV_MI_DMA_READY_MASK 0x00000800
++#define MRV_MI_DMA_READY_SHIFT 11
++
++#define MRV_MI_AHB_ERROR
++
++#define MRV_MI_AHB_ERROR_MASK 0x00000400
++#define MRV_MI_AHB_ERROR_SHIFT 10
++#define MRV_MI_WRAP_SP_CR
++
++#define MRV_MI_WRAP_SP_CR_MASK 0x00000200
++#define MRV_MI_WRAP_SP_CR_SHIFT 9
++#define MRV_MI_WRAP_SP_CB
++
++#define MRV_MI_WRAP_SP_CB_MASK 0x00000100
++#define MRV_MI_WRAP_SP_CB_SHIFT 8
++#define MRV_MI_WRAP_SP_Y
++
++#define MRV_MI_WRAP_SP_Y_MASK 0x00000080
++#define MRV_MI_WRAP_SP_Y_SHIFT 7
++#define MRV_MI_WRAP_MP_CR
++
++#define MRV_MI_WRAP_MP_CR_MASK 0x00000040
++#define MRV_MI_WRAP_MP_CR_SHIFT 6
++#define MRV_MI_WRAP_MP_CB
++
++#define MRV_MI_WRAP_MP_CB_MASK 0x00000020
++#define MRV_MI_WRAP_MP_CB_SHIFT 5
++#define MRV_MI_WRAP_MP_Y
++
++#define MRV_MI_WRAP_MP_Y_MASK 0x00000010
++#define MRV_MI_WRAP_MP_Y_SHIFT 4
++#define MRV_MI_FILL_MP_Y
++
++#define MRV_MI_FILL_MP_Y_MASK 0x00000008
++#define MRV_MI_FILL_MP_Y_SHIFT 3
++#define MRV_MI_MBLK_LINE
++
++#define MRV_MI_MBLK_LINE_MASK 0x00000004
++#define MRV_MI_MBLK_LINE_SHIFT 2
++#define MRV_MI_SP_FRAME_END
++#define MRV_MI_SP_FRAME_END_MASK 0x00000002
++#define MRV_MI_SP_FRAME_END_SHIFT 1
++
++#define MRV_MI_MP_FRAME_END
++#define MRV_MI_MP_FRAME_END_MASK 0x00000001
++#define MRV_MI_MP_FRAME_END_SHIFT 0
++
++#ifndef MRV_MI_SP_FRAME_END
++#define MRV_MI_SP_FRAME_END_MASK 0
++#endif
++#ifndef MRV_MI_DMA_FRAME_END
++#define MRV_MI_DMA_FRAME_END_MASK 0
++#endif
++
++
++#define MRV_MI_ALLIRQS
++#define MRV_MI_ALLIRQS_MASK \
++(0 \
++| MRV_MI_DMA_READY_MASK \
++| MRV_MI_AHB_ERROR_MASK \
++| MRV_MI_WRAP_SP_CR_MASK \
++| MRV_MI_WRAP_SP_CB_MASK \
++| MRV_MI_WRAP_SP_Y_MASK \
++| MRV_MI_WRAP_MP_CR_MASK \
++| MRV_MI_WRAP_MP_CB_MASK \
++| MRV_MI_WRAP_MP_Y_MASK \
++| MRV_MI_FILL_MP_Y_MASK \
++| MRV_MI_MBLK_LINE_MASK \
++| MRV_MI_SP_FRAME_END_MASK \
++| MRV_MI_DMA_FRAME_END_MASK \
++| MRV_MI_MP_FRAME_END_MASK \
++)
++#define MRV_MI_ALLIRQS_SHIFT 0
++
++#define MRV_MI_AHB_READ_ERROR
++#define MRV_MI_AHB_READ_ERROR_MASK 0x00000200
++#define MRV_MI_AHB_READ_ERROR_SHIFT 9
++#define MRV_MI_AHB_WRITE_ERROR
++#define MRV_MI_AHB_WRITE_ERROR_MASK 0x00000100
++#define MRV_MI_AHB_WRITE_ERROR_SHIFT 8
++#define MRV_MI_SP_CR_FIFO_FULL
++#define MRV_MI_SP_CR_FIFO_FULL_MASK 0x00000040
++#define MRV_MI_SP_CR_FIFO_FULL_SHIFT 6
++#define MRV_MI_SP_CB_FIFO_FULL
++#define MRV_MI_SP_CB_FIFO_FULL_MASK 0x00000020
++#define MRV_MI_SP_CB_FIFO_FULL_SHIFT 5
++#define MRV_MI_SP_Y_FIFO_FULL
++#define MRV_MI_SP_Y_FIFO_FULL_MASK 0x00000010
++#define MRV_MI_SP_Y_FIFO_FULL_SHIFT 4
++#define MRV_MI_MP_CR_FIFO_FULL
++#define MRV_MI_MP_CR_FIFO_FULL_MASK 0x00000004
++#define MRV_MI_MP_CR_FIFO_FULL_SHIFT 2
++#define MRV_MI_MP_CB_FIFO_FULL
++#define MRV_MI_MP_CB_FIFO_FULL_MASK 0x00000002
++#define MRV_MI_MP_CB_FIFO_FULL_SHIFT 1
++#define MRV_MI_MP_Y_FIFO_FULL
++#define MRV_MI_MP_Y_FIFO_FULL_MASK 0x00000001
++#define MRV_MI_MP_Y_FIFO_FULL_SHIFT 0
++
++
++#define MRV_MI_ALL_STAT
++#define MRV_MI_ALL_STAT_MASK \
++(0 \
++| MRV_MI_AHB_READ_ERROR_MASK \
++| MRV_MI_AHB_WRITE_ERROR_MASK \
++| MRV_MI_SP_CR_FIFO_FULL_MASK \
++| MRV_MI_SP_CB_FIFO_FULL_MASK \
++| MRV_MI_SP_Y_FIFO_FULL_MASK \
++| MRV_MI_MP_CR_FIFO_FULL_MASK \
++| MRV_MI_MP_CB_FIFO_FULL_MASK \
++| MRV_MI_MP_Y_FIFO_FULL_MASK \
++)
++#define MRV_MI_ALL_STAT_SHIFT 0
++
++
++
++#define MRV_MI_SP_Y_PIC_WIDTH
++#define MRV_MI_SP_Y_PIC_WIDTH_MASK 0x00000FFF
++#define MRV_MI_SP_Y_PIC_WIDTH_SHIFT 0
++
++#define MRV_MI_SP_Y_PIC_HEIGHT
++#define MRV_MI_SP_Y_PIC_HEIGHT_MASK 0x00000FFF
++#define MRV_MI_SP_Y_PIC_HEIGHT_SHIFT 0
++
++#define MRV_MI_SP_Y_PIC_SIZE
++#define MRV_MI_SP_Y_PIC_SIZE_MASK 0x01FFFFFF
++#define MRV_MI_SP_Y_PIC_SIZE_SHIFT 0
++
++
++
++
++#define MRV_MI_DMA_FRAME_END_DISABLE
++#define MRV_MI_DMA_FRAME_END_DISABLE_MASK 0x00000400
++#define MRV_MI_DMA_FRAME_END_DISABLE_SHIFT 10
++#define MRV_MI_DMA_CONTINUOUS_EN
++#define MRV_MI_DMA_CONTINUOUS_EN_MASK 0x00000200
++#define MRV_MI_DMA_CONTINUOUS_EN_SHIFT 9
++#define MRV_MI_DMA_BYTE_SWAP
++#define MRV_MI_DMA_BYTE_SWAP_MASK 0x00000100
++#define MRV_MI_DMA_BYTE_SWAP_SHIFT 8
++#define MRV_MI_DMA_INOUT_FORMAT
++#define MRV_MI_DMA_INOUT_FORMAT_MASK 0x000000C0
++#define MRV_MI_DMA_INOUT_FORMAT_SHIFT 6
++#define MRV_MI_DMA_INOUT_FORMAT_YUV444 3
++#define MRV_MI_DMA_INOUT_FORMAT_YUV422 2
++#define MRV_MI_DMA_INOUT_FORMAT_YUV420 1
++#define MRV_MI_DMA_INOUT_FORMAT_YUV400 0
++#define MRV_MI_DMA_READ_FORMAT
++#define MRV_MI_DMA_READ_FORMAT_MASK 0x00000030
++#define MRV_MI_DMA_READ_FORMAT_SHIFT 4
++#define MRV_MI_DMA_READ_FORMAT_PLANAR 0
++#define MRV_MI_DMA_READ_FORMAT_SEMIPLANAR 1
++#define MRV_MI_DMA_READ_FORMAT_INTERLEAVED 2
++#define MRV_MI_DMA_BURST_LEN_CHROM
++#define MRV_MI_DMA_BURST_LEN_CHROM_MASK 0x0000000C
++#define MRV_MI_DMA_BURST_LEN_CHROM_SHIFT 2
++#define MRV_MI_DMA_BURST_LEN_CHROM_4 0
++#define MRV_MI_DMA_BURST_LEN_CHROM_8 1
++#define MRV_MI_DMA_BURST_LEN_CHROM_16 2
++#define MRV_MI_DMA_BURST_LEN_LUM
++#define MRV_MI_DMA_BURST_LEN_LUM_MASK 0x00000003
++#define MRV_MI_DMA_BURST_LEN_LUM_SHIFT 0
++#define MRV_MI_DMA_BURST_LEN_LUM_4 0
++#define MRV_MI_DMA_BURST_LEN_LUM_8 1
++#define MRV_MI_DMA_BURST_LEN_LUM_16 2
++
++
++
++#define MRV_MI_DMA_START
++#define MRV_MI_DMA_START_MASK 0x00000001
++#define MRV_MI_DMA_START_SHIFT 0
++
++
++#define MRV_MI_DMA_ACTIVE
++#define MRV_MI_DMA_ACTIVE_MASK 0x00000001
++#define MRV_MI_DMA_ACTIVE_SHIFT 0
++
++
++
++#define MRV_JPE_GEN_HEADER
++#define MRV_JPE_GEN_HEADER_MASK 0x00000001
++#define MRV_JPE_GEN_HEADER_SHIFT 0
++
++
++#define MRV_JPE_CONT_MODE
++#define MRV_JPE_CONT_MODE_MASK 0x00000030
++#define MRV_JPE_CONT_MODE_SHIFT 4
++#define MRV_JPE_CONT_MODE_STOP 0
++#define MRV_JPE_CONT_MODE_NEXT 1
++#define MRV_JPE_CONT_MODE_HEADER 3
++#define MRV_JPE_ENCODE
++#define MRV_JPE_ENCODE_MASK 0x00000001
++#define MRV_JPE_ENCODE_SHIFT 0
++
++
++#define MRV_JPE_JP_INIT
++#define MRV_JPE_JP_INIT_MASK 0x00000001
++#define MRV_JPE_JP_INIT_SHIFT 0
++
++
++#define MRV_JPE_Y_SCALE_EN
++#define MRV_JPE_Y_SCALE_EN_MASK 0x00000001
++#define MRV_JPE_Y_SCALE_EN_SHIFT 0
++
++
++#define MRV_JPE_CBCR_SCALE_EN
++#define MRV_JPE_CBCR_SCALE_EN_MASK 0x00000001
++#define MRV_JPE_CBCR_SCALE_EN_SHIFT 0
++
++#define MRV_JPE_TABLE_FLUSH
++#define MRV_JPE_TABLE_FLUSH_MASK 0x00000001
++#define MRV_JPE_TABLE_FLUSH_SHIFT 0
++
++
++#define MRV_JPE_ENC_HSIZE
++
++#define MRV_JPE_ENC_HSIZE_MASK 0x00001FFF
++
++#define MRV_JPE_ENC_HSIZE_SHIFT 0
++
++#define MRV_JPE_ENC_VSIZE
++
++#define MRV_JPE_ENC_VSIZE_MASK 0x00000FFF
++
++#define MRV_JPE_ENC_VSIZE_SHIFT 0
++
++
++#define MRV_JPE_ENC_PIC_FORMAT
++#define MRV_JPE_ENC_PIC_FORMAT_MASK 0x00000007
++#define MRV_JPE_ENC_PIC_FORMAT_SHIFT 0
++#define MRV_JPE_ENC_PIC_FORMAT_422 1
++#define MRV_JPE_ENC_PIC_FORMAT_400 4
++
++#define MRV_JPE_RESTART_INTERVAL
++#define MRV_JPE_RESTART_INTERVAL_MASK 0x0000FFFF
++#define MRV_JPE_RESTART_INTERVAL_SHIFT 0
++
++#define MRV_JPE_TQ0_SELECT
++#define MRV_JPE_TQ0_SELECT_MASK 0x00000003
++#define MRV_JPE_TQ0_SELECT_SHIFT 0
++#define MRV_JPE_TQ1_SELECT
++#define MRV_JPE_TQ1_SELECT_MASK 0x00000003
++#define MRV_JPE_TQ1_SELECT_SHIFT 0
++
++
++#define MRV_JPE_TQ2_SELECT
++#define MRV_JPE_TQ2_SELECT_MASK 0x00000003
++#define MRV_JPE_TQ2_SELECT_SHIFT 0
++
++#define MRV_JPE_TQ_SELECT_TAB3 3
++#define MRV_JPE_TQ_SELECT_TAB2 2
++#define MRV_JPE_TQ_SELECT_TAB1 1
++#define MRV_JPE_TQ_SELECT_TAB0 0
++
++
++#define MRV_JPE_DC_TABLE_SELECT_Y
++#define MRV_JPE_DC_TABLE_SELECT_Y_MASK 0x00000001
++#define MRV_JPE_DC_TABLE_SELECT_Y_SHIFT 0
++#define MRV_JPE_DC_TABLE_SELECT_U
++#define MRV_JPE_DC_TABLE_SELECT_U_MASK 0x00000002
++#define MRV_JPE_DC_TABLE_SELECT_U_SHIFT 1
++#define MRV_JPE_DC_TABLE_SELECT_V
++#define MRV_JPE_DC_TABLE_SELECT_V_MASK 0x00000004
++#define MRV_JPE_DC_TABLE_SELECT_V_SHIFT 2
++
++
++#define MRV_JPE_AC_TABLE_SELECT_Y
++#define MRV_JPE_AC_TABLE_SELECT_Y_MASK 0x00000001
++#define MRV_JPE_AC_TABLE_SELECT_Y_SHIFT 0
++#define MRV_JPE_AC_TABLE_SELECT_U
++#define MRV_JPE_AC_TABLE_SELECT_U_MASK 0x00000002
++#define MRV_JPE_AC_TABLE_SELECT_U_SHIFT 1
++#define MRV_JPE_AC_TABLE_SELECT_V
++#define MRV_JPE_AC_TABLE_SELECT_V_MASK 0x00000004
++#define MRV_JPE_AC_TABLE_SELECT_V_SHIFT 2
++
++
++#define MRV_JPE_TABLE_WDATA_H
++#define MRV_JPE_TABLE_WDATA_H_MASK 0x0000FF00
++#define MRV_JPE_TABLE_WDATA_H_SHIFT 8
++#define MRV_JPE_TABLE_WDATA_L
++#define MRV_JPE_TABLE_WDATA_L_MASK 0x000000FF
++#define MRV_JPE_TABLE_WDATA_L_SHIFT 0
++
++
++#define MRV_JPE_TABLE_ID
++#define MRV_JPE_TABLE_ID_MASK 0x0000000F
++#define MRV_JPE_TABLE_ID_SHIFT 0
++#define MRV_JPE_TABLE_ID_QUANT0 0
++#define MRV_JPE_TABLE_ID_QUANT1 1
++#define MRV_JPE_TABLE_ID_QUANT2 2
++#define MRV_JPE_TABLE_ID_QUANT3 3
++#define MRV_JPE_TABLE_ID_VLC_DC0 4
++#define MRV_JPE_TABLE_ID_VLC_AC0 5
++#define MRV_JPE_TABLE_ID_VLC_DC1 6
++#define MRV_JPE_TABLE_ID_VLC_AC1 7
++
++#define MRV_JPE_TAC0_LEN
++#define MRV_JPE_TAC0_LEN_MASK 0x000000FF
++#define MRV_JPE_TAC0_LEN_SHIFT 0
++
++#define MRV_JPE_TDC0_LEN
++#define MRV_JPE_TDC0_LEN_MASK 0x000000FF
++#define MRV_JPE_TDC0_LEN_SHIFT 0
++
++#define MRV_JPE_TAC1_LEN
++#define MRV_JPE_TAC1_LEN_MASK 0x000000FF
++#define MRV_JPE_TAC1_LEN_SHIFT 0
++
++#define MRV_JPE_TDC1_LEN
++#define MRV_JPE_TDC1_LEN_MASK 0x000000FF
++#define MRV_JPE_TDC1_LEN_SHIFT 0
++
++
++#define MRV_JPE_CODEC_BUSY
++#define MRV_JPE_CODEC_BUSY_MASK 0x00000001
++#define MRV_JPE_CODEC_BUSY_SHIFT 0
++
++
++#define MRV_JPE_HEADER_MODE
++#define MRV_JPE_HEADER_MODE_MASK 0x00000003
++#define MRV_JPE_HEADER_MODE_SHIFT 0
++#define MRV_JPE_HEADER_MODE_NO 0
++#define MRV_JPE_HEADER_MODE_JFIF 2
++
++#define MRV_JPE_ENCODE_MODE
++#define MRV_JPE_ENCODE_MODE_MASK 0x00000001
++#define MRV_JPE_ENCODE_MODE_SHIFT 0
++
++#define MRV_JPE_DEB_BAD_TABLE_ACCESS
++#define MRV_JPE_DEB_BAD_TABLE_ACCESS_MASK 0x00000100
++#define MRV_JPE_DEB_BAD_TABLE_ACCESS_SHIFT 8
++#define MRV_JPE_DEB_VLC_TABLE_BUSY
++#define MRV_JPE_DEB_VLC_TABLE_BUSY_MASK 0x00000020
++#define MRV_JPE_DEB_VLC_TABLE_BUSY_SHIFT 5
++#define MRV_JPE_DEB_R2B_MEMORY_FULL
++#define MRV_JPE_DEB_R2B_MEMORY_FULL_MASK 0x00000010
++#define MRV_JPE_DEB_R2B_MEMORY_FULL_SHIFT 4
++#define MRV_JPE_DEB_VLC_ENCODE_BUSY
++#define MRV_JPE_DEB_VLC_ENCODE_BUSY_MASK 0x00000008
++#define MRV_JPE_DEB_VLC_ENCODE_BUSY_SHIFT 3
++#define MRV_JPE_DEB_QIQ_TABLE_ACC
++#define MRV_JPE_DEB_QIQ_TABLE_ACC_MASK 0x00000004
++#define MRV_JPE_DEB_QIQ_TABLE_ACC_SHIFT 2
++
++#define MRV_JPE_VLC_TABLE_ERR
++#define MRV_JPE_VLC_TABLE_ERR_MASK 0x00000400
++#define MRV_JPE_VLC_TABLE_ERR_SHIFT 10
++#define MRV_JPE_R2B_IMG_SIZE_ERR
++#define MRV_JPE_R2B_IMG_SIZE_ERR_MASK 0x00000200
++#define MRV_JPE_R2B_IMG_SIZE_ERR_SHIFT 9
++#define MRV_JPE_DCT_ERR
++#define MRV_JPE_DCT_ERR_MASK 0x00000080
++#define MRV_JPE_DCT_ERR_SHIFT 7
++#define MRV_JPE_VLC_SYMBOL_ERR
++#define MRV_JPE_VLC_SYMBOL_ERR_MASK 0x00000010
++#define MRV_JPE_VLC_SYMBOL_ERR_SHIFT 4
++
++
++#define MRV_JPE_ALL_ERR
++#define MRV_JPE_ALL_ERR_MASK \
++(0 \
++| MRV_JPE_VLC_TABLE_ERR_MASK \
++| MRV_JPE_R2B_IMG_SIZE_ERR_MASK \
++| MRV_JPE_DCT_ERR_MASK \
++| MRV_JPE_VLC_SYMBOL_ERR_MASK \
++)
++#define MRV_JPE_ALL_ERR_SHIFT 0
++
++#define MRV_JPE_GEN_HEADER_DONE
++#define MRV_JPE_GEN_HEADER_DONE_MASK 0x00000020
++#define MRV_JPE_GEN_HEADER_DONE_SHIFT 5
++#define MRV_JPE_ENCODE_DONE
++#define MRV_JPE_ENCODE_DONE_MASK 0x00000010
++#define MRV_JPE_ENCODE_DONE_SHIFT 4
++
++/* FIXME | MRV_JPE_GEN_HEADER_DONE_MASK \ */
++
++#define MRV_JPE_ALL_STAT
++#define MRV_JPE_ALL_STAT_MASK \
++(0 \
++| MRV_JPE_ENCODE_DONE_MASK \
++)
++#define MRV_JPE_ALL_STAT_SHIFT 0
++
++
++#define MRV_SMIA_DMA_CHANNEL_SEL
++#define MRV_SMIA_DMA_CHANNEL_SEL_MASK 0x00000700
++#define MRV_SMIA_DMA_CHANNEL_SEL_SHIFT 8
++#define MRV_SMIA_SHUTDOWN_LANE
++#define MRV_SMIA_SHUTDOWN_LANE_MASK 0x00000008
++#define MRV_SMIA_SHUTDOWN_LANE_SHIFT 3
++
++#define MRV_SMIA_FLUSH_FIFO
++#define MRV_SMIA_FLUSH_FIFO_MASK 0x00000002
++#define MRV_SMIA_FLUSH_FIFO_SHIFT 1
++
++#define MRV_SMIA_OUTPUT_ENA
++#define MRV_SMIA_OUTPUT_ENA_MASK 0x00000001
++#define MRV_SMIA_OUTPUT_ENA_SHIFT 0
++
++#define MRV_SMIA_DMA_CHANNEL
++#define MRV_SMIA_DMA_CHANNEL_MASK 0x00000700
++#define MRV_SMIA_DMA_CHANNEL_SHIFT 8
++#define MRV_SMIA_EMB_DATA_AVAIL
++#define MRV_SMIA_EMB_DATA_AVAIL_MASK 0x00000001
++#define MRV_SMIA_EMB_DATA_AVAIL_SHIFT 0
++
++#define MRV_SMIA_IMSC_FIFO_FILL_LEVEL
++#define MRV_SMIA_IMSC_FIFO_FILL_LEVEL_MASK 0x00000020
++#define MRV_SMIA_IMSC_FIFO_FILL_LEVEL_SHIFT 5
++
++#define MRV_SMIA_IMSC_SYNC_FIFO_OVFLW
++#define MRV_SMIA_IMSC_SYNC_FIFO_OVFLW_MASK 0x00000010
++#define MRV_SMIA_IMSC_SYNC_FIFO_OVFLW_SHIFT 4
++#define MRV_SMIA_IMSC_ERR_CS
++#define MRV_SMIA_IMSC_ERR_CS_MASK 0x00000008
++#define MRV_SMIA_IMSC_ERR_CS_SHIFT 3
++#define MRV_SMIA_IMSC_ERR_PROTOCOL
++#define MRV_SMIA_IMSC_ERR_PROTOCOL_MASK 0x00000004
++#define MRV_SMIA_IMSC_ERR_PROTOCOL_SHIFT 2
++
++#define MRV_SMIA_IMSC_EMB_DATA_OVFLW
++#define MRV_SMIA_IMSC_EMB_DATA_OVFLW_MASK 0x00000002
++#define MRV_SMIA_IMSC_EMB_DATA_OVFLW_SHIFT 1
++#define MRV_SMIA_IMSC_FRAME_END
++#define MRV_SMIA_IMSC_FRAME_END_MASK 0x00000001
++#define MRV_SMIA_IMSC_FRAME_END_SHIFT 0
++
++#define MRV_SMIA_IMSC_ALL_IRQS
++#define MRV_SMIA_IMSC_ALL_IRQS_MASK \
++(0 \
++| MRV_SMIA_IMSC_FIFO_FILL_LEVEL_MASK \
++| MRV_SMIA_IMSC_SYNC_FIFO_OVFLW_MASK \
++| MRV_SMIA_IMSC_ERR_CS_MASK \
++| MRV_SMIA_IMSC_ERR_PROTOCOL_MASK \
++| MRV_SMIA_IMSC_EMB_DATA_OVFLW_MASK \
++| MRV_SMIA_IMSC_FRAME_END_MASK \
++)
++#define MRV_SMIA_IMSC_ALL_IRQS_SHIFT 0
++
++#define MRV_SMIA_RIS_FIFO_FILL_LEVEL
++#define MRV_SMIA_RIS_FIFO_FILL_LEVEL_MASK 0x00000020
++#define MRV_SMIA_RIS_FIFO_FILL_LEVEL_SHIFT 5
++#define MRV_SMIA_RIS_SYNC_FIFO_OVFLW
++#define MRV_SMIA_RIS_SYNC_FIFO_OVFLW_MASK 0x00000010
++#define MRV_SMIA_RIS_SYNC_FIFO_OVFLW_SHIFT 4
++#define MRV_SMIA_RIS_ERR_CS
++#define MRV_SMIA_RIS_ERR_CS_MASK 0x00000008
++#define MRV_SMIA_RIS_ERR_CS_SHIFT 3
++#define MRV_SMIA_RIS_ERR_PROTOCOL
++#define MRV_SMIA_RIS_ERR_PROTOCOL_MASK 0x00000004
++#define MRV_SMIA_RIS_ERR_PROTOCOL_SHIFT 2
++
++#define MRV_SMIA_RIS_EMB_DATA_OVFLW
++#define MRV_SMIA_RIS_EMB_DATA_OVFLW_MASK 0x00000002
++#define MRV_SMIA_RIS_EMB_DATA_OVFLW_SHIFT 1
++#define MRV_SMIA_RIS_FRAME_END
++#define MRV_SMIA_RIS_FRAME_END_MASK 0x00000001
++#define MRV_SMIA_RIS_FRAME_END_SHIFT 0
++
++#define MRV_SMIA_RIS_ALL_IRQS
++#define MRV_SMIA_RIS_ALL_IRQS_MASK \
++(0 \
++| MRV_SMIA_RIS_FIFO_FILL_LEVEL_MASK \
++| MRV_SMIA_RIS_SYNC_FIFO_OVFLW_MASK \
++| MRV_SMIA_RIS_ERR_CS_MASK \
++| MRV_SMIA_RIS_ERR_PROTOCOL_MASK \
++| MRV_SMIA_RIS_EMB_DATA_OVFLW_MASK \
++| MRV_SMIA_RIS_FRAME_END_MASK \
++)
++#define MRV_SMIA_RIS_ALL_IRQS_SHIFT 0
++
++#define MRV_SMIA_MIS_FIFO_FILL_LEVEL
++#define MRV_SMIA_MIS_FIFO_FILL_LEVEL_MASK 0x00000020
++#define MRV_SMIA_MIS_FIFO_FILL_LEVEL_SHIFT 5
++#define MRV_SMIA_MIS_SYNC_FIFO_OVFLW
++#define MRV_SMIA_MIS_SYNC_FIFO_OVFLW_MASK 0x00000010
++#define MRV_SMIA_MIS_SYNC_FIFO_OVFLW_SHIFT 4
++#define MRV_SMIA_MIS_ERR_CS
++#define MRV_SMIA_MIS_ERR_CS_MASK 0x00000008
++#define MRV_SMIA_MIS_ERR_CS_SHIFT 3
++#define MRV_SMIA_MIS_ERR_PROTOCOL
++#define MRV_SMIA_MIS_ERR_PROTOCOL_MASK 0x00000004
++#define MRV_SMIA_MIS_ERR_PROTOCOL_SHIFT 2
++
++#define MRV_SMIA_MIS_EMB_DATA_OVFLW
++#define MRV_SMIA_MIS_EMB_DATA_OVFLW_MASK 0x00000002
++#define MRV_SMIA_MIS_EMB_DATA_OVFLW_SHIFT 1
++#define MRV_SMIA_MIS_FRAME_END
++#define MRV_SMIA_MIS_FRAME_END_MASK 0x00000001
++#define MRV_SMIA_MIS_FRAME_END_SHIFT 0
++
++#define MRV_SMIA_MIS_ALL_IRQS
++#define MRV_SMIA_MIS_ALL_IRQS_MASK \
++(0 \
++| MRV_SMIA_MIS_FIFO_FILL_LEVEL_MASK \
++| MRV_SMIA_MIS_SYNC_FIFO_OVFLW_MASK \
++| MRV_SMIA_MIS_ERR_CS_MASK \
++| MRV_SMIA_MIS_ERR_PROTOCOL_MASK \
++| MRV_SMIA_MIS_EMB_DATA_OVFLW_MASK \
++| MRV_SMIA_MIS_FRAME_END_MASK \
++)
++#define MRV_SMIA_MIS_ALL_IRQS_SHIFT 0
++
++
++#define MRV_SMIA_ICR_FIFO_FILL_LEVEL
++#define MRV_SMIA_ICR_FIFO_FILL_LEVEL_MASK 0x00000020
++#define MRV_SMIA_ICR_FIFO_FILL_LEVEL_SHIFT 5
++#define MRV_SMIA_ICR_SYNC_FIFO_OVFLW
++#define MRV_SMIA_ICR_SYNC_FIFO_OVFLW_MASK 0x00000010
++#define MRV_SMIA_ICR_SYNC_FIFO_OVFLW_SHIFT 4
++#define MRV_SMIA_ICR_ERR_CS
++#define MRV_SMIA_ICR_ERR_CS_MASK 0x00000008
++#define MRV_SMIA_ICR_ERR_CS_SHIFT 3
++#define MRV_SMIA_ICR_ERR_PROTOCOL
++#define MRV_SMIA_ICR_ERR_PROTOCOL_MASK 0x00000004
++#define MRV_SMIA_ICR_ERR_PROTOCOL_SHIFT 2
++
++#define MRV_SMIA_ICR_EMB_DATA_OVFLW
++#define MRV_SMIA_ICR_EMB_DATA_OVFLW_MASK 0x00000002
++#define MRV_SMIA_ICR_EMB_DATA_OVFLW_SHIFT 1
++#define MRV_SMIA_ICR_FRAME_END
++#define MRV_SMIA_ICR_FRAME_END_MASK 0x00000001
++#define MRV_SMIA_ICR_FRAME_END_SHIFT 0
++
++#define MRV_SMIA_ICR_ALL_IRQS
++#define MRV_SMIA_ICR_ALL_IRQS_MASK \
++(0 \
++| MRV_SMIA_ICR_FIFO_FILL_LEVEL_MASK \
++| MRV_SMIA_ICR_SYNC_FIFO_OVFLW_MASK \
++| MRV_SMIA_ICR_ERR_CS_MASK \
++| MRV_SMIA_ICR_ERR_PROTOCOL_MASK \
++| MRV_SMIA_ICR_EMB_DATA_OVFLW_MASK \
++| MRV_SMIA_ICR_FRAME_END_MASK \
++)
++#define MRV_SMIA_ICR_ALL_IRQS_SHIFT 0
++
++
++#define MRV_SMIA_ISR_FIFO_FILL_LEVEL
++#define MRV_SMIA_ISR_FIFO_FILL_LEVEL_MASK 0x00000020
++#define MRV_SMIA_ISR_FIFO_FILL_LEVEL_SHIFT 5
++#define MRV_SMIA_ISR_SYNC_FIFO_OVFLW
++#define MRV_SMIA_ISR_SYNC_FIFO_OVFLW_MASK 0x00000010
++#define MRV_SMIA_ISR_SYNC_FIFO_OVFLW_SHIFT 4
++#define MRV_SMIA_ISR_ERR_CS
++#define MRV_SMIA_ISR_ERR_CS_MASK 0x00000008
++#define MRV_SMIA_ISR_ERR_CS_SHIFT 3
++#define MRV_SMIA_ISR_ERR_PROTOCOL
++#define MRV_SMIA_ISR_ERR_PROTOCOL_MASK 0x00000004
++#define MRV_SMIA_ISR_ERR_PROTOCOL_SHIFT 2
++
++#define MRV_SMIA_ISR_EMB_DATA_OVFLW
++#define MRV_SMIA_ISR_EMB_DATA_OVFLW_MASK 0x00000002
++#define MRV_SMIA_ISR_EMB_DATA_OVFLW_SHIFT 1
++#define MRV_SMIA_ISR_FRAME_END
++#define MRV_SMIA_ISR_FRAME_END_MASK 0x00000001
++#define MRV_SMIA_ISR_FRAME_END_SHIFT 0
++
++#define MRV_SMIA_ISR_ALL_IRQS
++#define MRV_SMIA_ISR_ALL_IRQS_MASK \
++(0 \
++| MRV_SMIA_ISR_FIFO_FILL_LEVEL_MASK \
++| MRV_SMIA_ISR_SYNC_FIFO_OVFLW_MASK \
++| MRV_SMIA_ISR_ERR_CS_MASK \
++| MRV_SMIA_ISR_ERR_PROTOCOL_MASK \
++| MRV_SMIA_ISR_EMB_DATA_OVFLW_MASK \
++| MRV_SMIA_ISR_FRAME_END_MASK \
++)
++#define MRV_SMIA_ISR_ALL_IRQS_SHIFT 0
++
++#define MRV_SMIA_DATA_FORMAT_SEL
++#define MRV_SMIA_DATA_FORMAT_SEL_MASK 0x0000000F
++#define MRV_SMIA_DATA_FORMAT_SEL_SHIFT 0
++#define MRV_SMIA_DATA_FORMAT_SEL_YUV422 0
++#define MRV_SMIA_DATA_FORMAT_SEL_YUV420 1
++#define MRV_SMIA_DATA_FORMAT_SEL_RGB444 4
++#define MRV_SMIA_DATA_FORMAT_SEL_RGB565 5
++#define MRV_SMIA_DATA_FORMAT_SEL_RGB888 6
++#define MRV_SMIA_DATA_FORMAT_SEL_RAW6 8
++#define MRV_SMIA_DATA_FORMAT_SEL_RAW7 9
++#define MRV_SMIA_DATA_FORMAT_SEL_RAW8 10
++#define MRV_SMIA_DATA_FORMAT_SEL_RAW10 11
++#define MRV_SMIA_DATA_FORMAT_SEL_RAW12 12
++#define MRV_SMIA_DATA_FORMAT_SEL_RAW8TO10 13
++#define MRV_SMIA_DATA_FORMAT_SEL_COMPRESSED 15
++
++
++#define MRV_SMIA_SOF_EMB_DATA_LINES
++#define MRV_SMIA_SOF_EMB_DATA_LINES_MASK 0x00000007
++#define MRV_SMIA_SOF_EMB_DATA_LINES_SHIFT 0
++#define MRV_SMIA_SOF_EMB_DATA_LINES_MIN 0
++#define MRV_SMIA_SOF_EMB_DATA_LINES_MAX \
++ (MRV_SMIA_SOF_EMB_DATA_LINES_MASK >> MRV_SMIA_SOF_EMB_DATA_LINES_SHIFT)
++#define MRV_SMIA_EMB_HSTART
++#define MRV_SMIA_EMB_HSTART_MASK 0x00003FFF
++#define MRV_SMIA_EMB_HSTART_SHIFT 0
++#define MRV_SMIA_EMB_HSTART_VALID_MASK (MRV_SMIA_EMB_HSTART_MASK & ~0x00000003)
++
++#define MRV_SMIA_EMB_HSIZE
++#define MRV_SMIA_EMB_HSIZE_MASK 0x00003FFF
++#define MRV_SMIA_EMB_HSIZE_SHIFT 0
++#define MRV_SMIA_EMB_HSIZE_VALID_MASK (MRV_SMIA_EMB_HSIZE_MASK & ~0x00000003)
++
++#define MRV_SMIA_EMB_VSTART
++#define MRV_SMIA_EMB_VSTART_MASK 0x00000FFF
++#define MRV_SMIA_EMB_VSTART_SHIFT 0
++
++#define MRV_SMIA_NUM_LINES
++#define MRV_SMIA_NUM_LINES_MASK 0x00000FFF
++
++#define MRV_SMIA_NUM_LINES_SHIFT 0
++#define MRV_SMIA_NUM_LINES_MIN 1
++#define MRV_SMIA_NUM_LINES_MAX \
++ (MRV_SMIA_NUM_LINES_MASK >> MRV_SMIA_NUM_LINES_SHIFT)
++
++#define MRV_SMIA_EMB_DATA_FIFO
++#define MRV_SMIA_EMB_DATA_FIFO_MASK 0xFFFFFFFF
++#define MRV_SMIA_EMB_DATA_FIFO_SHIFT 0
++
++#define MRV_SMIA_FIFO_FILL_LEVEL
++#define MRV_SMIA_FIFO_FILL_LEVEL_MASK 0x000003FF
++#define MRV_SMIA_FIFO_FILL_LEVEL_SHIFT 0
++#define MRV_SMIA_FIFO_FILL_LEVEL_VALID_MASK \
++ (MRV_SMIA_FIFO_FILL_LEVEL_MASK & ~0x00000003)
++
++#define MRV_MIPI_ERR_SOT_SYNC_HS_SKIP
++#define MRV_MIPI_ERR_SOT_SYNC_HS_SKIP_MASK 0x00020000
++#define MRV_MIPI_ERR_SOT_SYNC_HS_SKIP_SHIFT 17
++#define MRV_MIPI_ERR_SOT_HS_SKIP
++#define MRV_MIPI_ERR_SOT_HS_SKIP_MASK 0x00010000
++#define MRV_MIPI_ERR_SOT_HS_SKIP_SHIFT 16
++
++#define MRV_MIPI_NUM_LANES
++#define MRV_MIPI_NUM_LANES_MASK 0x00003000
++#define MRV_MIPI_NUM_LANES_SHIFT 12
++#define MRV_MIPI_SHUTDOWN_LANE
++#define MRV_MIPI_SHUTDOWN_LANE_MASK 0x00000F00
++#define MRV_MIPI_SHUTDOWN_LANE_SHIFT 8
++#define MRV_MIPI_FLUSH_FIFO
++#define MRV_MIPI_FLUSH_FIFO_MASK 0x00000002
++#define MRV_MIPI_FLUSH_FIFO_SHIFT 1
++#define MRV_MIPI_OUTPUT_ENA
++#define MRV_MIPI_OUTPUT_ENA_MASK 0x00000001
++#define MRV_MIPI_OUTPUT_ENA_SHIFT 0
++
++#define MRV_MIPI_STOPSTATE
++#define MRV_MIPI_STOPSTATE_MASK 0x00000F00
++#define MRV_MIPI_STOPSTATE_SHIFT 8
++#define MRV_MIPI_ADD_DATA_AVAIL
++#define MRV_MIPI_ADD_DATA_AVAIL_MASK 0x00000001
++#define MRV_MIPI_ADD_DATA_AVAIL_SHIFT 0
++
++#define MRV_MIPI_IMSC_ADD_DATA_FILL_LEVEL
++#define MRV_MIPI_IMSC_ADD_DATA_FILL_LEVEL_MASK 0x04000000
++#define MRV_MIPI_IMSC_ADD_DATA_FILL_LEVEL_SHIFT 26
++#define MRV_MIPI_IMSC_ADD_DATA_OVFLW
++#define MRV_MIPI_IMSC_ADD_DATA_OVFLW_MASK 0x02000000
++#define MRV_MIPI_IMSC_ADD_DATA_OVFLW_SHIFT 25
++#define MRV_MIPI_IMSC_FRAME_END
++#define MRV_MIPI_IMSC_FRAME_END_MASK 0x01000000
++#define MRV_MIPI_IMSC_FRAME_END_SHIFT 24
++#define MRV_MIPI_IMSC_ERR_CS
++#define MRV_MIPI_IMSC_ERR_CS_MASK 0x00800000
++#define MRV_MIPI_IMSC_ERR_CS_SHIFT 23
++#define MRV_MIPI_IMSC_ERR_ECC1
++#define MRV_MIPI_IMSC_ERR_ECC1_MASK 0x00400000
++#define MRV_MIPI_IMSC_ERR_ECC1_SHIFT 22
++#define MRV_MIPI_IMSC_ERR_ECC2
++#define MRV_MIPI_IMSC_ERR_ECC2_MASK 0x00200000
++#define MRV_MIPI_IMSC_ERR_ECC2_SHIFT 21
++#define MRV_MIPI_IMSC_ERR_PROTOCOL
++#define MRV_MIPI_IMSC_ERR_PROTOCOL_MASK 0x00100000
++#define MRV_MIPI_IMSC_ERR_PROTOCOL_SHIFT 20
++#define MRV_MIPI_IMSC_ERR_CONTROL
++#define MRV_MIPI_IMSC_ERR_CONTROL_MASK 0x000F0000
++#define MRV_MIPI_IMSC_ERR_CONTROL_SHIFT 16
++
++#define MRV_MIPI_IMSC_ERR_EOT_SYNC
++#define MRV_MIPI_IMSC_ERR_EOT_SYNC_MASK 0x0000F000
++#define MRV_MIPI_IMSC_ERR_EOT_SYNC_SHIFT 12
++#define MRV_MIPI_IMSC_ERR_SOT_SYNC
++#define MRV_MIPI_IMSC_ERR_SOT_SYNC_MASK 0x00000F00
++#define MRV_MIPI_IMSC_ERR_SOT_SYNC_SHIFT 8
++#define MRV_MIPI_IMSC_ERR_SOT
++#define MRV_MIPI_IMSC_ERR_SOT_MASK 0x000000F0
++#define MRV_MIPI_IMSC_ERR_SOT_SHIFT 4
++#define MRV_MIPI_IMSC_SYNC_FIFO_OVFLW
++#define MRV_MIPI_IMSC_SYNC_FIFO_OVFLW_MASK 0x0000000F
++#define MRV_MIPI_IMSC_SYNC_FIFO_OVFLW_SHIFT 0
++
++#define MRV_MIPI_IMSC_ALL_IRQS
++#define MRV_MIPI_IMSC_ALL_IRQS_MASK \
++(0 \
++| MRV_MIPI_IMSC_ADD_DATA_FILL_LEVEL_MASK \
++| MRV_MIPI_IMSC_ADD_DATA_OVFLW_MASK \
++| MRV_MIPI_IMSC_FRAME_END_MASK \
++| MRV_MIPI_IMSC_ERR_CS_MASK \
++| MRV_MIPI_IMSC_ERR_ECC1_MASK \
++| MRV_MIPI_IMSC_ERR_ECC2_MASK \
++| MRV_MIPI_IMSC_ERR_PROTOCOL_MASK \
++| MRV_MIPI_IMSC_ERR_CONTROL_MASK \
++| MRV_MIPI_IMSC_ERR_EOT_SYNC_MASK \
++| MRV_MIPI_IMSC_ERR_SOT_SYNC_MASK \
++| MRV_MIPI_IMSC_ERR_SOT_MASK \
++| MRV_MIPI_IMSC_SYNC_FIFO_OVFLW_MASK \
++)
++#define MRV_MIPI_IMSC_ALL_IRQS_SHIFT 0
++
++#define MRV_MIPI_RIS_ADD_DATA_FILL_LEVEL
++#define MRV_MIPI_RIS_ADD_DATA_FILL_LEVEL_MASK 0x04000000
++#define MRV_MIPI_RIS_ADD_DATA_FILL_LEVEL_SHIFT 26
++#define MRV_MIPI_RIS_ADD_DATA_OVFLW
++#define MRV_MIPI_RIS_ADD_DATA_OVFLW_MASK 0x02000000
++#define MRV_MIPI_RIS_ADD_DATA_OVFLW_SHIFT 25
++#define MRV_MIPI_RIS_FRAME_END
++#define MRV_MIPI_RIS_FRAME_END_MASK 0x01000000
++#define MRV_MIPI_RIS_FRAME_END_SHIFT 24
++#define MRV_MIPI_RIS_ERR_CS
++#define MRV_MIPI_RIS_ERR_CS_MASK 0x00800000
++#define MRV_MIPI_RIS_ERR_CS_SHIFT 23
++#define MRV_MIPI_RIS_ERR_ECC1
++#define MRV_MIPI_RIS_ERR_ECC1_MASK 0x00400000
++#define MRV_MIPI_RIS_ERR_ECC1_SHIFT 22
++#define MRV_MIPI_RIS_ERR_ECC2
++#define MRV_MIPI_RIS_ERR_ECC2_MASK 0x00200000
++#define MRV_MIPI_RIS_ERR_ECC2_SHIFT 21
++#define MRV_MIPI_RIS_ERR_PROTOCOL
++#define MRV_MIPI_RIS_ERR_PROTOCOL_MASK 0x00100000
++#define MRV_MIPI_RIS_ERR_PROTOCOL_SHIFT 20
++#define MRV_MIPI_RIS_ERR_CONTROL
++#define MRV_MIPI_RIS_ERR_CONTROL_MASK 0x000F0000
++#define MRV_MIPI_RIS_ERR_CONTROL_SHIFT 16
++#define MRV_MIPI_RIS_ERR_EOT_SYNC
++#define MRV_MIPI_RIS_ERR_EOT_SYNC_MASK 0x0000F000
++#define MRV_MIPI_RIS_ERR_EOT_SYNC_SHIFT 12
++#define MRV_MIPI_RIS_ERR_SOT_SYNC
++#define MRV_MIPI_RIS_ERR_SOT_SYNC_MASK 0x00000F00
++#define MRV_MIPI_RIS_ERR_SOT_SYNC_SHIFT 8
++#define MRV_MIPI_RIS_ERR_SOT
++#define MRV_MIPI_RIS_ERR_SOT_MASK 0x000000F0
++#define MRV_MIPI_RIS_ERR_SOT_SHIFT 4
++#define MRV_MIPI_RIS_SYNC_FIFO_OVFLW
++#define MRV_MIPI_RIS_SYNC_FIFO_OVFLW_MASK 0x0000000F
++#define MRV_MIPI_RIS_SYNC_FIFO_OVFLW_SHIFT 0
++
++#define MRV_MIPI_RIS_ALL_IRQS
++#define MRV_MIPI_RIS_ALL_IRQS_MASK \
++(0 \
++| MRV_MIPI_RIS_ADD_DATA_FILL_LEVEL_MASK \
++| MRV_MIPI_RIS_ADD_DATA_OVFLW_MASK \
++| MRV_MIPI_RIS_FRAME_END_MASK \
++| MRV_MIPI_RIS_ERR_CS_MASK \
++| MRV_MIPI_RIS_ERR_ECC1_MASK \
++| MRV_MIPI_RIS_ERR_ECC2_MASK \
++| MRV_MIPI_RIS_ERR_PROTOCOL_MASK \
++| MRV_MIPI_RIS_ERR_CONTROL_MASK \
++| MRV_MIPI_RIS_ERR_EOT_SYNC_MASK \
++| MRV_MIPI_RIS_ERR_SOT_SYNC_MASK \
++| MRV_MIPI_RIS_ERR_SOT_MASK \
++| MRV_MIPI_RIS_SYNC_FIFO_OVFLW_MASK \
++)
++#define MRV_MIPI_RIS_ALL_IRQS_SHIFT 0
++
++#define MRV_MIPI_MIS_ADD_DATA_FILL_LEVEL
++#define MRV_MIPI_MIS_ADD_DATA_FILL_LEVEL_MASK 0x04000000
++#define MRV_MIPI_MIS_ADD_DATA_FILL_LEVEL_SHIFT 26
++#define MRV_MIPI_MIS_ADD_DATA_OVFLW
++#define MRV_MIPI_MIS_ADD_DATA_OVFLW_MASK 0x02000000
++#define MRV_MIPI_MIS_ADD_DATA_OVFLW_SHIFT 25
++#define MRV_MIPI_MIS_FRAME_END
++#define MRV_MIPI_MIS_FRAME_END_MASK 0x01000000
++#define MRV_MIPI_MIS_FRAME_END_SHIFT 24
++#define MRV_MIPI_MIS_ERR_CS
++#define MRV_MIPI_MIS_ERR_CS_MASK 0x00800000
++#define MRV_MIPI_MIS_ERR_CS_SHIFT 23
++#define MRV_MIPI_MIS_ERR_ECC1
++#define MRV_MIPI_MIS_ERR_ECC1_MASK 0x00400000
++#define MRV_MIPI_MIS_ERR_ECC1_SHIFT 22
++#define MRV_MIPI_MIS_ERR_ECC2
++#define MRV_MIPI_MIS_ERR_ECC2_MASK 0x00200000
++#define MRV_MIPI_MIS_ERR_ECC2_SHIFT 21
++#define MRV_MIPI_MIS_ERR_PROTOCOL
++#define MRV_MIPI_MIS_ERR_PROTOCOL_MASK 0x00100000
++#define MRV_MIPI_MIS_ERR_PROTOCOL_SHIFT 20
++#define MRV_MIPI_MIS_ERR_CONTROL
++#define MRV_MIPI_MIS_ERR_CONTROL_MASK 0x000F0000
++#define MRV_MIPI_MIS_ERR_CONTROL_SHIFT 16
++#define MRV_MIPI_MIS_ERR_EOT_SYNC
++#define MRV_MIPI_MIS_ERR_EOT_SYNC_MASK 0x0000F000
++#define MRV_MIPI_MIS_ERR_EOT_SYNC_SHIFT 12
++#define MRV_MIPI_MIS_ERR_SOT_SYNC
++#define MRV_MIPI_MIS_ERR_SOT_SYNC_MASK 0x00000F00
++#define MRV_MIPI_MIS_ERR_SOT_SYNC_SHIFT 8
++#define MRV_MIPI_MIS_ERR_SOT
++#define MRV_MIPI_MIS_ERR_SOT_MASK 0x000000F0
++#define MRV_MIPI_MIS_ERR_SOT_SHIFT 4
++#define MRV_MIPI_MIS_SYNC_FIFO_OVFLW
++#define MRV_MIPI_MIS_SYNC_FIFO_OVFLW_MASK 0x0000000F
++#define MRV_MIPI_MIS_SYNC_FIFO_OVFLW_SHIFT 0
++
++#define MRV_MIPI_MIS_ALL_IRQS
++#define MRV_MIPI_MIS_ALL_IRQS_MASK \
++(0 \
++| MRV_MIPI_MIS_ADD_DATA_FILL_LEVEL_MASK \
++| MRV_MIPI_MIS_ADD_DATA_OVFLW_MASK \
++| MRV_MIPI_MIS_FRAME_END_MASK \
++| MRV_MIPI_MIS_ERR_CS_MASK \
++| MRV_MIPI_MIS_ERR_ECC1_MASK \
++| MRV_MIPI_MIS_ERR_ECC2_MASK \
++| MRV_MIPI_MIS_ERR_PROTOCOL_MASK \
++| MRV_MIPI_MIS_ERR_CONTROL_MASK \
++| MRV_MIPI_MIS_ERR_EOT_SYNC_MASK \
++| MRV_MIPI_MIS_ERR_SOT_SYNC_MASK \
++| MRV_MIPI_MIS_ERR_SOT_MASK \
++| MRV_MIPI_MIS_SYNC_FIFO_OVFLW_MASK \
++)
++#define MRV_MIPI_MIS_ALL_IRQS_SHIFT 0
++
++#define MRV_MIPI_ICR_ADD_DATA_FILL_LEVEL
++#define MRV_MIPI_ICR_ADD_DATA_FILL_LEVEL_MASK 0x04000000
++#define MRV_MIPI_ICR_ADD_DATA_FILL_LEVEL_SHIFT 26
++#define MRV_MIPI_ICR_ADD_DATA_OVFLW
++#define MRV_MIPI_ICR_ADD_DATA_OVFLW_MASK 0x02000000
++#define MRV_MIPI_ICR_ADD_DATA_OVFLW_SHIFT 25
++#define MRV_MIPI_ICR_FRAME_END
++#define MRV_MIPI_ICR_FRAME_END_MASK 0x01000000
++#define MRV_MIPI_ICR_FRAME_END_SHIFT 24
++#define MRV_MIPI_ICR_ERR_CS
++#define MRV_MIPI_ICR_ERR_CS_MASK 0x00800000
++#define MRV_MIPI_ICR_ERR_CS_SHIFT 23
++#define MRV_MIPI_ICR_ERR_ECC1
++#define MRV_MIPI_ICR_ERR_ECC1_MASK 0x00400000
++#define MRV_MIPI_ICR_ERR_ECC1_SHIFT 22
++#define MRV_MIPI_ICR_ERR_ECC2
++#define MRV_MIPI_ICR_ERR_ECC2_MASK 0x00200000
++#define MRV_MIPI_ICR_ERR_ECC2_SHIFT 21
++#define MRV_MIPI_ICR_ERR_PROTOCOL
++#define MRV_MIPI_ICR_ERR_PROTOCOL_MASK 0x00100000
++#define MRV_MIPI_ICR_ERR_PROTOCOL_SHIFT 20
++#define MRV_MIPI_ICR_ERR_CONTROL
++#define MRV_MIPI_ICR_ERR_CONTROL_MASK 0x000F0000
++#define MRV_MIPI_ICR_ERR_CONTROL_SHIFT 16
++#define MRV_MIPI_ICR_ERR_EOT_SYNC
++#define MRV_MIPI_ICR_ERR_EOT_SYNC_MASK 0x0000F000
++#define MRV_MIPI_ICR_ERR_EOT_SYNC_SHIFT 12
++#define MRV_MIPI_ICR_ERR_SOT_SYNC
++#define MRV_MIPI_ICR_ERR_SOT_SYNC_MASK 0x00000F00
++#define MRV_MIPI_ICR_ERR_SOT_SYNC_SHIFT 8
++#define MRV_MIPI_ICR_ERR_SOT
++#define MRV_MIPI_ICR_ERR_SOT_MASK 0x000000F0
++#define MRV_MIPI_ICR_ERR_SOT_SHIFT 4
++#define MRV_MIPI_ICR_SYNC_FIFO_OVFLW
++#define MRV_MIPI_ICR_SYNC_FIFO_OVFLW_MASK 0x0000000F
++#define MRV_MIPI_ICR_SYNC_FIFO_OVFLW_SHIFT 0
++
++#define MRV_MIPI_ICR_ALL_IRQS
++#define MRV_MIPI_ICR_ALL_IRQS_MASK \
++(0 \
++| MRV_MIPI_ICR_ADD_DATA_FILL_LEVEL_MASK \
++| MRV_MIPI_ICR_ADD_DATA_OVFLW_MASK \
++| MRV_MIPI_ICR_FRAME_END_MASK \
++| MRV_MIPI_ICR_ERR_CS_MASK \
++| MRV_MIPI_ICR_ERR_ECC1_MASK \
++| MRV_MIPI_ICR_ERR_ECC2_MASK \
++| MRV_MIPI_ICR_ERR_PROTOCOL_MASK \
++| MRV_MIPI_ICR_ERR_CONTROL_MASK \
++| MRV_MIPI_ICR_ERR_EOT_SYNC_MASK \
++| MRV_MIPI_ICR_ERR_SOT_SYNC_MASK \
++| MRV_MIPI_ICR_ERR_SOT_MASK \
++| MRV_MIPI_ICR_SYNC_FIFO_OVFLW_MASK \
++)
++#define MRV_MIPI_ICR_ALL_IRQS_SHIFT 0
++
++
++#define MRV_MIPI_ISR_ADD_DATA_FILL_LEVEL
++#define MRV_MIPI_ISR_ADD_DATA_FILL_LEVEL_MASK 0x04000000
++#define MRV_MIPI_ISR_ADD_DATA_FILL_LEVEL_SHIFT 26
++#define MRV_MIPI_ISR_ADD_DATA_OVFLW
++#define MRV_MIPI_ISR_ADD_DATA_OVFLW_MASK 0x02000000
++#define MRV_MIPI_ISR_ADD_DATA_OVFLW_SHIFT 25
++#define MRV_MIPI_ISR_FRAME_END
++#define MRV_MIPI_ISR_FRAME_END_MASK 0x01000000
++#define MRV_MIPI_ISR_FRAME_END_SHIFT 24
++#define MRV_MIPI_ISR_ERR_CS
++#define MRV_MIPI_ISR_ERR_CS_MASK 0x00800000
++#define MRV_MIPI_ISR_ERR_CS_SHIFT 23
++#define MRV_MIPI_ISR_ERR_ECC1
++#define MRV_MIPI_ISR_ERR_ECC1_MASK 0x00400000
++#define MRV_MIPI_ISR_ERR_ECC1_SHIFT 22
++#define MRV_MIPI_ISR_ERR_ECC2
++#define MRV_MIPI_ISR_ERR_ECC2_MASK 0x00200000
++#define MRV_MIPI_ISR_ERR_ECC2_SHIFT 21
++#define MRV_MIPI_ISR_ERR_PROTOCOL
++#define MRV_MIPI_ISR_ERR_PROTOCOL_MASK 0x00100000
++#define MRV_MIPI_ISR_ERR_PROTOCOL_SHIFT 20
++#define MRV_MIPI_ISR_ERR_CONTROL
++#define MRV_MIPI_ISR_ERR_CONTROL_MASK 0x000F0000
++#define MRV_MIPI_ISR_ERR_CONTROL_SHIFT 16
++#define MRV_MIPI_ISR_ERR_EOT_SYNC
++#define MRV_MIPI_ISR_ERR_EOT_SYNC_MASK 0x0000F000
++#define MRV_MIPI_ISR_ERR_EOT_SYNC_SHIFT 12
++#define MRV_MIPI_ISR_ERR_SOT_SYNC
++#define MRV_MIPI_ISR_ERR_SOT_SYNC_MASK 0x00000F00
++#define MRV_MIPI_ISR_ERR_SOT_SYNC_SHIFT 8
++#define MRV_MIPI_ISR_ERR_SOT
++#define MRV_MIPI_ISR_ERR_SOT_MASK 0x000000F0
++#define MRV_MIPI_ISR_ERR_SOT_SHIFT 4
++#define MRV_MIPI_ISR_SYNC_FIFO_OVFLW
++#define MRV_MIPI_ISR_SYNC_FIFO_OVFLW_MASK 0x0000000F
++#define MRV_MIPI_ISR_SYNC_FIFO_OVFLW_SHIFT 0
++
++#define MRV_MIPI_ISR_ALL_IRQS
++#define MRV_MIPI_ISR_ALL_IRQS_MASK \
++(0 \
++| MRV_MIPI_ISR_ADD_DATA_FILL_LEVEL_MASK \
++| MRV_MIPI_ISR_ADD_DATA_OVFLW_MASK \
++| MRV_MIPI_ISR_FRAME_END_MASK \
++| MRV_MIPI_ISR_ERR_CS_MASK \
++| MRV_MIPI_ISR_ERR_ECC1_MASK \
++| MRV_MIPI_ISR_ERR_ECC2_MASK \
++| MRV_MIPI_ISR_ERR_PROTOCOL_MASK \
++| MRV_MIPI_ISR_ERR_CONTROL_MASK \
++| MRV_MIPI_ISR_ERR_EOT_SYNC_MASK \
++| MRV_MIPI_ISR_ERR_SOT_SYNC_MASK \
++| MRV_MIPI_ISR_ERR_SOT_MASK \
++| MRV_MIPI_ISR_SYNC_FIFO_OVFLW_MASK \
++)
++#define MRV_MIPI_ISR_ALL_IRQS_SHIFT 0
++
++
++#define MRV_MIPI_VIRTUAL_CHANNEL
++#define MRV_MIPI_VIRTUAL_CHANNEL_MASK 0x000000C0
++#define MRV_MIPI_VIRTUAL_CHANNEL_SHIFT 6
++
++#define MRV_MIPI_VIRTUAL_CHANNEL_MAX \
++ (MRV_MIPI_VIRTUAL_CHANNEL_MASK >> MRV_MIPI_VIRTUAL_CHANNEL_SHIFT)
++#define MRV_MIPI_DATA_TYPE
++#define MRV_MIPI_DATA_TYPE_MASK 0x0000003F
++#define MRV_MIPI_DATA_TYPE_SHIFT 0
++
++#define MRV_MIPI_DATA_TYPE_MAX \
++ (MRV_MIPI_DATA_TYPE_MASK >> MRV_MIPI_DATA_TYPE_SHIFT)
++
++
++#define MRV_MIPI_VIRTUAL_CHANNEL_SEL
++#define MRV_MIPI_VIRTUAL_CHANNEL_SEL_MASK 0x000000C0
++#define MRV_MIPI_VIRTUAL_CHANNEL_SEL_SHIFT 6
++#define MRV_MIPI_DATA_TYPE_SEL
++#define MRV_MIPI_DATA_TYPE_SEL_MASK 0x0000003F
++#define MRV_MIPI_DATA_TYPE_SEL_SHIFT 0
++#define MRV_MIPI_DATA_TYPE_SEL_YUV420_8BIT 24
++#define MRV_MIPI_DATA_TYPE_SEL_YUV420_10BIT 25
++#define MRV_MIPI_DATA_TYPE_SEL_YUV420_8BIT_LEGACY 26
++#define MRV_MIPI_DATA_TYPE_SEL_YUV420_8BIT_CSPS 28
++#define MRV_MIPI_DATA_TYPE_SEL_YUV420_10BIT_CSPS 29
++#define MRV_MIPI_DATA_TYPE_SEL_YUV422_8BIT 30
++#define MRV_MIPI_DATA_TYPE_SEL_YUV422_10BIT 31
++#define MRV_MIPI_DATA_TYPE_SEL_RGB444 32
++#define MRV_MIPI_DATA_TYPE_SEL_RGB555 33
++#define MRV_MIPI_DATA_TYPE_SEL_RGB565 34
++#define MRV_MIPI_DATA_TYPE_SEL_RGB666 35
++#define MRV_MIPI_DATA_TYPE_SEL_RGB888 36
++#define MRV_MIPI_DATA_TYPE_SEL_RAW6 40
++#define MRV_MIPI_DATA_TYPE_SEL_RAW7 41
++#define MRV_MIPI_DATA_TYPE_SEL_RAW8 42
++#define MRV_MIPI_DATA_TYPE_SEL_RAW10 43
++#define MRV_MIPI_DATA_TYPE_SEL_RAW12 44
++#define MRV_MIPI_DATA_TYPE_SEL_USER1 48
++#define MRV_MIPI_DATA_TYPE_SEL_USER2 49
++#define MRV_MIPI_DATA_TYPE_SEL_USER3 50
++#define MRV_MIPI_DATA_TYPE_SEL_USER4 51
++
++
++#define MRV_MIPI_ADD_DATA_VC_1
++#define MRV_MIPI_ADD_DATA_VC_1_MASK 0x000000C0
++#define MRV_MIPI_ADD_DATA_VC_1_SHIFT 6
++#define MRV_MIPI_ADD_DATA_TYPE_1
++#define MRV_MIPI_ADD_DATA_TYPE_1_MASK 0x0000003F
++#define MRV_MIPI_ADD_DATA_TYPE_1_SHIFT 0
++
++
++#define MRV_MIPI_ADD_DATA_VC_2
++#define MRV_MIPI_ADD_DATA_VC_2_MASK 0x000000C0
++#define MRV_MIPI_ADD_DATA_VC_2_SHIFT 6
++#define MRV_MIPI_ADD_DATA_TYPE_2
++#define MRV_MIPI_ADD_DATA_TYPE_2_MASK 0x0000003F
++#define MRV_MIPI_ADD_DATA_TYPE_2_SHIFT 0
++
++
++#define MRV_MIPI_ADD_DATA_VC_3
++#define MRV_MIPI_ADD_DATA_VC_3_MASK 0x000000C0
++#define MRV_MIPI_ADD_DATA_VC_3_SHIFT 6
++#define MRV_MIPI_ADD_DATA_TYPE_3
++#define MRV_MIPI_ADD_DATA_TYPE_3_MASK 0x0000003F
++#define MRV_MIPI_ADD_DATA_TYPE_3_SHIFT 0
++
++
++#define MRV_MIPI_ADD_DATA_VC_4
++#define MRV_MIPI_ADD_DATA_VC_4_MASK 0x000000C0
++#define MRV_MIPI_ADD_DATA_VC_4_SHIFT 6
++#define MRV_MIPI_ADD_DATA_TYPE_4
++#define MRV_MIPI_ADD_DATA_TYPE_4_MASK 0x0000003F
++#define MRV_MIPI_ADD_DATA_TYPE_4_SHIFT 0
++
++#define MRV_MIPI_ADD_DATA_FIFO
++#define MRV_MIPI_ADD_DATA_FIFO_MASK 0xFFFFFFFF
++#define MRV_MIPI_ADD_DATA_FIFO_SHIFT 0
++
++#define MRV_MIPI_ADD_DATA_FILL_LEVEL
++#define MRV_MIPI_ADD_DATA_FILL_LEVEL_MASK 0x00001FFC
++#define MRV_MIPI_ADD_DATA_FILL_LEVEL_SHIFT 0
++#define MRV_MIPI_ADD_DATA_FILL_LEVEL_MAX 0x00001FFC
++
++#define MRV_AFM_AFM_EN
++#define MRV_AFM_AFM_EN_MASK 0x00000001
++#define MRV_AFM_AFM_EN_SHIFT 0
++
++#define MRV_AFM_A_H_L
++#define MRV_AFM_A_H_L_MASK 0x0FFF0000
++#define MRV_AFM_A_H_L_SHIFT 16
++#define MRV_AFM_A_H_L_MIN 5
++#define MRV_AFM_A_H_L_MAX (MRV_AFM_A_H_L_MASK >> MRV_AFM_A_H_L_SHIFT)
++#define MRV_AFM_A_V_T
++#define MRV_AFM_A_V_T_MASK 0x00000FFF
++#define MRV_AFM_A_V_T_SHIFT 0
++#define MRV_AFM_A_V_T_MIN 2
++#define MRV_AFM_A_V_T_MAX (MRV_AFM_A_V_T_MASK >> MRV_AFM_A_V_T_SHIFT)
++
++
++#define MRV_AFM_A_H_R
++#define MRV_AFM_A_H_R_MASK 0x0FFF0000
++#define MRV_AFM_A_H_R_SHIFT 16
++#define MRV_AFM_A_H_R_MIN 5
++#define MRV_AFM_A_H_R_MAX (MRV_AFM_A_H_R_MASK >> MRV_AFM_A_H_R_SHIFT)
++#define MRV_AFM_A_V_B
++#define MRV_AFM_A_V_B_MASK 0x00000FFF
++#define MRV_AFM_A_V_B_SHIFT 0
++#define MRV_AFM_A_V_B_MIN 2
++#define MRV_AFM_A_V_B_MAX (MRV_AFM_A_V_B_MASK >> MRV_AFM_A_V_B_SHIFT)
++
++
++#define MRV_AFM_B_H_L
++#define MRV_AFM_B_H_L_MASK 0x0FFF0000
++#define MRV_AFM_B_H_L_SHIFT 16
++#define MRV_AFM_B_H_L_MIN 5
++#define MRV_AFM_B_H_L_MAX (MRV_AFM_B_H_L_MASK >> MRV_AFM_B_H_L_SHIFT)
++#define MRV_AFM_B_V_T
++#define MRV_AFM_B_V_T_MASK 0x00000FFF
++#define MRV_AFM_B_V_T_SHIFT 0
++#define MRV_AFM_B_V_T_MIN 2
++#define MRV_AFM_B_V_T_MAX (MRV_AFM_B_V_T_MASK >> MRV_AFM_B_V_T_SHIFT)
++
++
++#define MRV_AFM_B_H_R
++#define MRV_AFM_B_H_R_MASK 0x0FFF0000
++#define MRV_AFM_B_H_R_SHIFT 16
++#define MRV_AFM_B_H_R_MIN 5
++#define MRV_AFM_B_H_R_MAX (MRV_AFM_B_H_R_MASK >> MRV_AFM_B_H_R_SHIFT)
++#define MRV_AFM_B_V_B
++#define MRV_AFM_B_V_B_MASK 0x00000FFF
++#define MRV_AFM_B_V_B_SHIFT 0
++#define MRV_AFM_B_V_B_MIN 2
++#define MRV_AFM_B_V_B_MAX (MRV_AFM_B_V_B_MASK >> MRV_AFM_B_V_B_SHIFT)
++
++
++#define MRV_AFM_C_H_L
++#define MRV_AFM_C_H_L_MASK 0x0FFF0000
++#define MRV_AFM_C_H_L_SHIFT 16
++#define MRV_AFM_C_H_L_MIN 5
++#define MRV_AFM_C_H_L_MAX (MRV_AFM_C_H_L_MASK >> MRV_AFM_C_H_L_SHIFT)
++#define MRV_AFM_C_V_T
++#define MRV_AFM_C_V_T_MASK 0x00000FFF
++#define MRV_AFM_C_V_T_SHIFT 0
++#define MRV_AFM_C_V_T_MIN 2
++#define MRV_AFM_C_V_T_MAX (MRV_AFM_C_V_T_MASK >> MRV_AFM_C_V_T_SHIFT)
++
++
++#define MRV_AFM_C_H_R
++#define MRV_AFM_C_H_R_MASK 0x0FFF0000
++#define MRV_AFM_C_H_R_SHIFT 16
++#define MRV_AFM_C_H_R_MIN 5
++#define MRV_AFM_C_H_R_MAX (MRV_AFM_C_H_R_MASK >> MRV_AFM_C_H_R_SHIFT)
++#define MRV_AFM_C_V_B
++#define MRV_AFM_C_V_B_MASK 0x00000FFF
++#define MRV_AFM_C_V_B_SHIFT 0
++#define MRV_AFM_C_V_B_MIN 2
++#define MRV_AFM_C_V_B_MAX (MRV_AFM_C_V_B_MASK >> MRV_AFM_C_V_B_SHIFT)
++
++#define MRV_AFM_AFM_THRES
++#define MRV_AFM_AFM_THRES_MASK 0x0000FFFF
++#define MRV_AFM_AFM_THRES_SHIFT 0
++
++#define MRV_AFM_LUM_VAR_SHIFT
++#define MRV_AFM_LUM_VAR_SHIFT_MASK 0x00070000
++#define MRV_AFM_LUM_VAR_SHIFT_SHIFT 16
++#define MRV_AFM_AFM_VAR_SHIFT
++#define MRV_AFM_AFM_VAR_SHIFT_MASK 0x00000007
++#define MRV_AFM_AFM_VAR_SHIFT_SHIFT 0
++
++#define MRV_AFM_AFM_SUM_A
++#define MRV_AFM_AFM_SUM_A_MASK 0xFFFFFFFF
++#define MRV_AFM_AFM_SUM_A_SHIFT 0
++
++#define MRV_AFM_AFM_SUM_B
++#define MRV_AFM_AFM_SUM_B_MASK 0xFFFFFFFF
++#define MRV_AFM_AFM_SUM_B_SHIFT 0
++
++#define MRV_AFM_AFM_SUM_C
++#define MRV_AFM_AFM_SUM_C_MASK 0xFFFFFFFF
++#define MRV_AFM_AFM_SUM_C_SHIFT 0
++
++#define MRV_AFM_AFM_LUM_A
++#define MRV_AFM_AFM_LUM_A_MASK 0x00FFFFFF
++#define MRV_AFM_AFM_LUM_A_SHIFT 0
++
++#define MRV_AFM_AFM_LUM_B
++#define MRV_AFM_AFM_LUM_B_MASK 0x00FFFFFF
++#define MRV_AFM_AFM_LUM_B_SHIFT 0
++
++#define MRV_AFM_AFM_LUM_C
++#define MRV_AFM_AFM_LUM_C_MASK 0x00FFFFFF
++#define MRV_AFM_AFM_LUM_C_SHIFT 0
++
++
++#define MRV_BP_COR_TYPE
++#define MRV_BP_COR_TYPE_MASK 0x00000010
++#define MRV_BP_COR_TYPE_SHIFT 4
++#define MRV_BP_COR_TYPE_TABLE 0
++#define MRV_BP_COR_TYPE_DIRECT 1
++#define MRV_BP_REP_APPR
++#define MRV_BP_REP_APPR_MASK 0x00000008
++#define MRV_BP_REP_APPR_SHIFT 3
++#define MRV_BP_REP_APPR_NEAREST 0
++#define MRV_BP_REP_APPR_INTERPOL 1
++#define MRV_BP_DEAD_COR_EN
++#define MRV_BP_DEAD_COR_EN_MASK 0x00000004
++#define MRV_BP_DEAD_COR_EN_SHIFT 2
++#define MRV_BP_HOT_COR_EN
++#define MRV_BP_HOT_COR_EN_MASK 0x00000002
++#define MRV_BP_HOT_COR_EN_SHIFT 1
++#define MRV_BP_BP_DET_EN
++#define MRV_BP_BP_DET_EN_MASK 0x00000001
++#define MRV_BP_BP_DET_EN_SHIFT 0
++
++
++
++#define MRV_BP_HOT_THRES
++#define MRV_BP_HOT_THRES_MASK 0x0FFF0000
++#define MRV_BP_HOT_THRES_SHIFT 16
++#define MRV_BP_DEAD_THRES
++#define MRV_BP_DEAD_THRES_MASK 0x00000FFF
++#define MRV_BP_DEAD_THRES_SHIFT 0
++
++
++
++#define MRV_BP_DEV_HOT_THRES
++#define MRV_BP_DEV_HOT_THRES_MASK 0x0FFF0000
++#define MRV_BP_DEV_HOT_THRES_SHIFT 16
++#define MRV_BP_DEV_DEAD_THRES
++#define MRV_BP_DEV_DEAD_THRES_MASK 0x00000FFF
++#define MRV_BP_DEV_DEAD_THRES_SHIFT 0
++
++
++#define MRV_BP_BP_NUMBER
++
++#define MRV_BP_BP_NUMBER_MASK 0x00000FFF
++#define MRV_BP_BP_NUMBER_SHIFT 0
++
++#define MRV_BP_BP_TABLE_ADDR
++#define MRV_BP_BP_TABLE_ADDR_MASK 0x000007FF
++
++#define MRV_BP_BP_TABLE_ADDR_SHIFT 0
++#define MRV_BP_BP_TABLE_ADDR_MAX MRV_BP_BP_TABLE_ADDR_MASK
++
++
++#define MRV_BP_PIX_TYPE
++#define MRV_BP_PIX_TYPE_MASK 0x80000000
++#define MRV_BP_PIX_TYPE_SHIFT 31
++#define MRV_BP_PIX_TYPE_DEAD 0u
++#define MRV_BP_PIX_TYPE_HOT 1u
++#define MRV_BP_V_ADDR
++
++#define MRV_BP_V_ADDR_MASK 0x0FFF0000
++
++#define MRV_BP_V_ADDR_SHIFT 16
++#define MRV_BP_H_ADDR
++#define MRV_BP_H_ADDR_MASK 0x00000FFF
++#define MRV_BP_H_ADDR_SHIFT 0
++
++
++#define MRV_BP_BP_NEW_NUMBER
++#define MRV_BP_BP_NEW_NUMBER_MASK 0x0000000F
++#define MRV_BP_BP_NEW_NUMBER_SHIFT 0
++
++
++#define MRV_BP_NEW_VALUE
++
++#define MRV_BP_NEW_VALUE_MASK 0xF8000000
++#define MRV_BP_NEW_VALUE_SHIFT 27
++#define MRV_BP_NEW_V_ADDR
++
++#define MRV_BP_NEW_V_ADDR_MASK 0x07FF0000
++#define MRV_BP_NEW_V_ADDR_SHIFT 16
++#define MRV_BP_NEW_H_ADDR
++#define MRV_BP_NEW_H_ADDR_MASK 0x00000FFF
++#define MRV_BP_NEW_H_ADDR_SHIFT 0
++
++
++
++#define MRV_LSC_LSC_EN
++#define MRV_LSC_LSC_EN_MASK 0x00000001
++#define MRV_LSC_LSC_EN_SHIFT 0
++
++#define MRV_LSC_R_RAM_ADDR
++#define MRV_LSC_R_RAM_ADDR_MASK 0x000000FF
++#define MRV_LSC_R_RAM_ADDR_SHIFT 0
++#define MRV_LSC_R_RAM_ADDR_MIN 0x00000000
++#define MRV_LSC_R_RAM_ADDR_MAX 0x00000098
++
++#define MRV_LSC_G_RAM_ADDR
++#define MRV_LSC_G_RAM_ADDR_MASK 0x000000FF
++#define MRV_LSC_G_RAM_ADDR_SHIFT 0
++#define MRV_LSC_G_RAM_ADDR_MIN 0x00000000
++#define MRV_LSC_G_RAM_ADDR_MAX 0x00000098
++
++#define MRV_LSC_B_RAM_ADDR
++#define MRV_LSC_B_RAM_ADDR_MASK 0x000000FF
++#define MRV_LSC_B_RAM_ADDR_SHIFT 0
++#define MRV_LSC_B_RAM_ADDR_MIN 0x00000000
++#define MRV_LSC_B_RAM_ADDR_MAX 0x00000098
++
++#define MRV_LSC_R_SAMPLE_1
++#define MRV_LSC_R_SAMPLE_1_MASK 0x00FFF000
++#define MRV_LSC_R_SAMPLE_1_SHIFT 12
++#define MRV_LSC_R_SAMPLE_0
++#define MRV_LSC_R_SAMPLE_0_MASK 0x00000FFF
++#define MRV_LSC_R_SAMPLE_0_SHIFT 0
++
++
++#define MRV_LSC_G_SAMPLE_1
++#define MRV_LSC_G_SAMPLE_1_MASK 0x00FFF000
++#define MRV_LSC_G_SAMPLE_1_SHIFT 12
++#define MRV_LSC_G_SAMPLE_0
++#define MRV_LSC_G_SAMPLE_0_MASK 0x00000FFF
++#define MRV_LSC_G_SAMPLE_0_SHIFT 0
++
++
++#define MRV_LSC_B_SAMPLE_1
++#define MRV_LSC_B_SAMPLE_1_MASK 0x00FFF000
++#define MRV_LSC_B_SAMPLE_1_SHIFT 12
++#define MRV_LSC_B_SAMPLE_0
++#define MRV_LSC_B_SAMPLE_0_MASK 0x00000FFF
++#define MRV_LSC_B_SAMPLE_0_SHIFT 0
++
++#define MRV_LSC_XGRAD_1
++#define MRV_LSC_XGRAD_1_MASK 0x0FFF0000
++#define MRV_LSC_XGRAD_1_SHIFT 16
++#define MRV_LSC_XGRAD_0
++#define MRV_LSC_XGRAD_0_MASK 0x00000FFF
++#define MRV_LSC_XGRAD_0_SHIFT 0
++
++#define MRV_LSC_XGRAD_3
++#define MRV_LSC_XGRAD_3_MASK 0x0FFF0000
++#define MRV_LSC_XGRAD_3_SHIFT 16
++#define MRV_LSC_XGRAD_2
++#define MRV_LSC_XGRAD_2_MASK 0x00000FFF
++#define MRV_LSC_XGRAD_2_SHIFT 0
++
++#define MRV_LSC_XGRAD_5
++#define MRV_LSC_XGRAD_5_MASK 0x0FFF0000
++#define MRV_LSC_XGRAD_5_SHIFT 16
++
++#define MRV_LSC_XGRAD_4
++#define MRV_LSC_XGRAD_4_MASK 0x00000FFF
++#define MRV_LSC_XGRAD_4_SHIFT 0
++
++
++#define MRV_LSC_XGRAD_7
++#define MRV_LSC_XGRAD_7_MASK 0x0FFF0000
++#define MRV_LSC_XGRAD_7_SHIFT 16
++
++#define MRV_LSC_XGRAD_6
++#define MRV_LSC_XGRAD_6_MASK 0x00000FFF
++#define MRV_LSC_XGRAD_6_SHIFT 0
++
++
++#define MRV_LSC_YGRAD_1
++#define MRV_LSC_YGRAD_1_MASK 0x0FFF0000
++#define MRV_LSC_YGRAD_1_SHIFT 16
++#define MRV_LSC_YGRAD_0
++#define MRV_LSC_YGRAD_0_MASK 0x00000FFF
++#define MRV_LSC_YGRAD_0_SHIFT 0
++
++
++#define MRV_LSC_YGRAD_3
++#define MRV_LSC_YGRAD_3_MASK 0x0FFF0000
++#define MRV_LSC_YGRAD_3_SHIFT 16
++
++#define MRV_LSC_YGRAD_2
++#define MRV_LSC_YGRAD_2_MASK 0x00000FFF
++#define MRV_LSC_YGRAD_2_SHIFT 0
++
++
++#define MRV_LSC_YGRAD_5
++#define MRV_LSC_YGRAD_5_MASK 0x0FFF0000
++#define MRV_LSC_YGRAD_5_SHIFT 16
++
++#define MRV_LSC_YGRAD_4
++#define MRV_LSC_YGRAD_4_MASK 0x00000FFF
++#define MRV_LSC_YGRAD_4_SHIFT 0
++
++
++#define MRV_LSC_YGRAD_7
++#define MRV_LSC_YGRAD_7_MASK 0x0FFF0000
++#define MRV_LSC_YGRAD_7_SHIFT 16
++
++#define MRV_LSC_YGRAD_6
++#define MRV_LSC_YGRAD_6_MASK 0x00000FFF
++#define MRV_LSC_YGRAD_6_SHIFT 0
++
++
++#define MRV_LSC_X_SECT_SIZE_1
++#define MRV_LSC_X_SECT_SIZE_1_MASK 0x03FF0000
++#define MRV_LSC_X_SECT_SIZE_1_SHIFT 16
++
++#define MRV_LSC_X_SECT_SIZE_0
++#define MRV_LSC_X_SECT_SIZE_0_MASK 0x000003FF
++#define MRV_LSC_X_SECT_SIZE_0_SHIFT 0
++
++
++#define MRV_LSC_X_SECT_SIZE_3
++#define MRV_LSC_X_SECT_SIZE_3_MASK 0x03FF0000
++#define MRV_LSC_X_SECT_SIZE_3_SHIFT 16
++
++#define MRV_LSC_X_SECT_SIZE_2
++#define MRV_LSC_X_SECT_SIZE_2_MASK 0x000003FF
++#define MRV_LSC_X_SECT_SIZE_2_SHIFT 0
++
++
++#define MRV_LSC_X_SECT_SIZE_5
++#define MRV_LSC_X_SECT_SIZE_5_MASK 0x03FF0000
++#define MRV_LSC_X_SECT_SIZE_5_SHIFT 16
++
++#define MRV_LSC_X_SECT_SIZE_4
++#define MRV_LSC_X_SECT_SIZE_4_MASK 0x000003FF
++#define MRV_LSC_X_SECT_SIZE_4_SHIFT 0
++
++
++#define MRV_LSC_X_SECT_SIZE_7
++#define MRV_LSC_X_SECT_SIZE_7_MASK 0x03FF0000
++#define MRV_LSC_X_SECT_SIZE_7_SHIFT 16
++
++#define MRV_LSC_X_SECT_SIZE_6
++#define MRV_LSC_X_SECT_SIZE_6_MASK 0x000003FF
++#define MRV_LSC_X_SECT_SIZE_6_SHIFT 0
++
++
++#define MRV_LSC_Y_SECT_SIZE_1
++#define MRV_LSC_Y_SECT_SIZE_1_MASK 0x03FF0000
++#define MRV_LSC_Y_SECT_SIZE_1_SHIFT 16
++#define MRV_LSC_Y_SECT_SIZE_0
++#define MRV_LSC_Y_SECT_SIZE_0_MASK 0x000003FF
++#define MRV_LSC_Y_SECT_SIZE_0_SHIFT 0
++
++
++#define MRV_LSC_Y_SECT_SIZE_3
++#define MRV_LSC_Y_SECT_SIZE_3_MASK 0x03FF0000
++#define MRV_LSC_Y_SECT_SIZE_3_SHIFT 16
++#define MRV_LSC_Y_SECT_SIZE_2
++#define MRV_LSC_Y_SECT_SIZE_2_MASK 0x000003FF
++#define MRV_LSC_Y_SECT_SIZE_2_SHIFT 0
++
++
++#define MRV_LSC_Y_SECT_SIZE_5
++#define MRV_LSC_Y_SECT_SIZE_5_MASK 0x03FF0000
++#define MRV_LSC_Y_SECT_SIZE_5_SHIFT 16
++#define MRV_LSC_Y_SECT_SIZE_4
++#define MRV_LSC_Y_SECT_SIZE_4_MASK 0x000003FF
++#define MRV_LSC_Y_SECT_SIZE_4_SHIFT 0
++
++
++#define MRV_LSC_Y_SECT_SIZE_7
++#define MRV_LSC_Y_SECT_SIZE_7_MASK 0x03FF0000
++#define MRV_LSC_Y_SECT_SIZE_7_SHIFT 16
++#define MRV_LSC_Y_SECT_SIZE_6
++#define MRV_LSC_Y_SECT_SIZE_6_MASK 0x000003FF
++#define MRV_LSC_Y_SECT_SIZE_6_SHIFT 0
++
++
++#define MRV_IS_IS_EN
++#define MRV_IS_IS_EN_MASK 0x00000001
++#define MRV_IS_IS_EN_SHIFT 0
++
++
++#define MRV_IS_IS_RECENTER
++#define MRV_IS_IS_RECENTER_MASK 0x00000007
++#define MRV_IS_IS_RECENTER_SHIFT 0
++#define MRV_IS_IS_RECENTER_MAX \
++ (MRV_IS_IS_RECENTER_MASK >> MRV_IS_IS_RECENTER_SHIFT)
++
++
++#define MRV_IS_IS_H_OFFS
++#define MRV_IS_IS_H_OFFS_MASK 0x00001FFF
++#define MRV_IS_IS_H_OFFS_SHIFT 0
++
++
++#define MRV_IS_IS_V_OFFS
++#define MRV_IS_IS_V_OFFS_MASK 0x00000FFF
++#define MRV_IS_IS_V_OFFS_SHIFT 0
++
++#define MRV_IS_IS_H_SIZE
++#define MRV_IS_IS_H_SIZE_MASK 0x00003FFF
++#define MRV_IS_IS_H_SIZE_SHIFT 0
++
++#define MRV_IS_IS_V_SIZE
++#define MRV_IS_IS_V_SIZE_MASK 0x00000FFF
++#define MRV_IS_IS_V_SIZE_SHIFT 0
++
++#define MRV_IS_IS_MAX_DX
++#define MRV_IS_IS_MAX_DX_MASK 0x00000FFF
++#define MRV_IS_IS_MAX_DX_SHIFT 0
++#define MRV_IS_IS_MAX_DX_MAX (MRV_IS_IS_MAX_DX_MASK >> MRV_IS_IS_MAX_DX_SHIFT)
++
++
++
++#define MRV_IS_IS_MAX_DY
++#define MRV_IS_IS_MAX_DY_MASK 0x00000FFF
++#define MRV_IS_IS_MAX_DY_SHIFT 0
++#define MRV_IS_IS_MAX_DY_MAX (MRV_IS_IS_MAX_DY_MASK >> MRV_IS_IS_MAX_DY_SHIFT)
++#define MRV_IS_DY
++#define MRV_IS_DY_MASK 0x0FFF0000
++#define MRV_IS_DY_SHIFT 16
++#define MRV_IS_DY_MAX 0x000007FF
++#define MRV_IS_DY_MIN (~MRV_IS_DY_MAX)
++#define MRV_IS_DX
++#define MRV_IS_DX_MASK 0x00000FFF
++#define MRV_IS_DX_SHIFT 0
++#define MRV_IS_DX_MAX 0x000007FF
++#define MRV_IS_DX_MIN (~MRV_IS_DX_MAX)
++
++
++#define MRV_IS_IS_H_OFFS_SHD
++#define MRV_IS_IS_H_OFFS_SHD_MASK 0x00001FFF
++#define MRV_IS_IS_H_OFFS_SHD_SHIFT 0
++
++
++#define MRV_IS_IS_V_OFFS_SHD
++#define MRV_IS_IS_V_OFFS_SHD_MASK 0x00000FFF
++#define MRV_IS_IS_V_OFFS_SHD_SHIFT 0
++
++
++#define MRV_IS_ISP_H_SIZE_SHD
++#define MRV_IS_ISP_H_SIZE_SHD_MASK 0x00001FFF
++#define MRV_IS_ISP_H_SIZE_SHD_SHIFT 0
++
++
++#define MRV_IS_ISP_V_SIZE_SHD
++#define MRV_IS_ISP_V_SIZE_SHD_MASK 0x00000FFF
++#define MRV_IS_ISP_V_SIZE_SHD_SHIFT 0
++
++
++#define MRV_HIST_HIST_PDIV
++#define MRV_HIST_HIST_PDIV_MASK 0x000007F8
++#define MRV_HIST_HIST_PDIV_SHIFT 3
++#define MRV_HIST_HIST_PDIV_MIN 0x00000003
++#define MRV_HIST_HIST_PDIV_MAX 0x000000FF
++#define MRV_HIST_HIST_MODE
++#define MRV_HIST_HIST_MODE_MASK 0x00000007
++#define MRV_HIST_HIST_MODE_SHIFT 0
++#define MRV_HIST_HIST_MODE_MAX 5
++#define MRV_HIST_HIST_MODE_LUM 5
++#define MRV_HIST_HIST_MODE_B 4
++#define MRV_HIST_HIST_MODE_G 3
++#define MRV_HIST_HIST_MODE_R 2
++#define MRV_HIST_HIST_MODE_RGB 1
++#define MRV_HIST_HIST_MODE_NONE 0
++
++#define MRV_HIST_HIST_H_OFFS
++#define MRV_HIST_HIST_H_OFFS_MASK 0x00000FFF
++#define MRV_HIST_HIST_H_OFFS_SHIFT 0
++#define MRV_HIST_HIST_H_OFFS_MAX \
++ (MRV_HIST_HIST_H_OFFS_MASK >> MRV_HIST_HIST_H_OFFS_SHIFT)
++
++#define MRV_HIST_HIST_V_OFFS
++#define MRV_HIST_HIST_V_OFFS_MASK 0x00000FFF
++#define MRV_HIST_HIST_V_OFFS_SHIFT 0
++#define MRV_HIST_HIST_V_OFFS_MAX \
++ (MRV_HIST_HIST_V_OFFS_MASK >> MRV_HIST_HIST_V_OFFS_SHIFT)
++
++#define MRV_HIST_HIST_H_SIZE
++#define MRV_HIST_HIST_H_SIZE_MASK 0x00000FFF
++#define MRV_HIST_HIST_H_SIZE_SHIFT 0
++#define MRV_HIST_HIST_H_SIZE_MAX \
++ (MRV_HIST_HIST_H_SIZE_MASK >> MRV_HIST_HIST_H_SIZE_SHIFT)
++
++#define MRV_HIST_HIST_V_SIZE
++#define MRV_HIST_HIST_V_SIZE_MASK 0x00000FFF
++#define MRV_HIST_HIST_V_SIZE_SHIFT 0
++#define MRV_HIST_HIST_V_SIZE_MAX \
++ (MRV_HIST_HIST_V_SIZE_MASK >> MRV_HIST_HIST_V_SIZE_SHIFT)
++
++
++#define MRV_HIST_HIST_BIN_N
++#define MRV_HIST_HIST_BIN_N_MASK 0x000000FF
++#define MRV_HIST_HIST_BIN_N_SHIFT 0
++#define MRV_HIST_HIST_BIN_N_MAX \
++ (MRV_HIST_HIST_BIN_N_MASK >> MRV_HIST_HIST_BIN_N_SHIFT)
++
++
++
++#define MRV_FILT_STAGE1_SELECT
++#define MRV_FILT_STAGE1_SELECT_MASK 0x00000F00
++#define MRV_FILT_STAGE1_SELECT_SHIFT 8
++#define MRV_FILT_STAGE1_SELECT_MAX_BLUR 0
++#define MRV_FILT_STAGE1_SELECT_DEFAULT 4
++#define MRV_FILT_STAGE1_SELECT_MIN_BLUR 7
++#define MRV_FILT_STAGE1_SELECT_BYPASS 8
++#define MRV_FILT_FILT_CHR_H_MODE
++#define MRV_FILT_FILT_CHR_H_MODE_MASK 0x000000C0
++#define MRV_FILT_FILT_CHR_H_MODE_SHIFT 6
++#define MRV_FILT_FILT_CHR_H_MODE_BYPASS 0
++#define MRV_FILT_FILT_CHR_H_MODE_STATIC 1
++#define MRV_FILT_FILT_CHR_H_MODE_DYN_1 2
++#define MRV_FILT_FILT_CHR_H_MODE_DYN_2 3
++#define MRV_FILT_FILT_CHR_V_MODE
++#define MRV_FILT_FILT_CHR_V_MODE_MASK 0x00000030
++#define MRV_FILT_FILT_CHR_V_MODE_SHIFT 4
++#define MRV_FILT_FILT_CHR_V_MODE_BYPASS 0
++#define MRV_FILT_FILT_CHR_V_MODE_STATIC8 1
++#define MRV_FILT_FILT_CHR_V_MODE_STATIC10 2
++#define MRV_FILT_FILT_CHR_V_MODE_STATIC12 3
++
++#define MRV_FILT_FILT_MODE
++#define MRV_FILT_FILT_MODE_MASK 0x00000002
++#define MRV_FILT_FILT_MODE_SHIFT 1
++#define MRV_FILT_FILT_MODE_STATIC 0
++#define MRV_FILT_FILT_MODE_DYNAMIC 1
++
++#define MRV_FILT_FILT_ENABLE
++#define MRV_FILT_FILT_ENABLE_MASK 0x00000001
++#define MRV_FILT_FILT_ENABLE_SHIFT 0
++
++
++#define MRV_FILT_FILT_THRESH_BL0
++#define MRV_FILT_FILT_THRESH_BL0_MASK 0x000003FF
++#define MRV_FILT_FILT_THRESH_BL0_SHIFT 0
++
++
++#define MRV_FILT_FILT_THRESH_BL1
++#define MRV_FILT_FILT_THRESH_BL1_MASK 0x000003FF
++#define MRV_FILT_FILT_THRESH_BL1_SHIFT 0
++
++
++#define MRV_FILT_FILT_THRESH_SH0
++#define MRV_FILT_FILT_THRESH_SH0_MASK 0x000003FF
++#define MRV_FILT_FILT_THRESH_SH0_SHIFT 0
++
++
++#define MRV_FILT_FILT_THRESH_SH1
++#define MRV_FILT_FILT_THRESH_SH1_MASK 0x000003FF
++#define MRV_FILT_FILT_THRESH_SH1_SHIFT 0
++
++
++#define MRV_FILT_LUM_WEIGHT_GAIN
++#define MRV_FILT_LUM_WEIGHT_GAIN_MASK 0x00070000
++#define MRV_FILT_LUM_WEIGHT_GAIN_SHIFT 16
++#define MRV_FILT_LUM_WEIGHT_KINK
++#define MRV_FILT_LUM_WEIGHT_KINK_MASK 0x0000FF00
++#define MRV_FILT_LUM_WEIGHT_KINK_SHIFT 8
++#define MRV_FILT_LUM_WEIGHT_MIN
++#define MRV_FILT_LUM_WEIGHT_MIN_MASK 0x000000FF
++#define MRV_FILT_LUM_WEIGHT_MIN_SHIFT 0
++
++
++#define MRV_FILT_FILT_FAC_SH1
++#define MRV_FILT_FILT_FAC_SH1_MASK 0x0000003F
++#define MRV_FILT_FILT_FAC_SH1_SHIFT 0
++
++
++#define MRV_FILT_FILT_FAC_SH0
++#define MRV_FILT_FILT_FAC_SH0_MASK 0x0000003F
++#define MRV_FILT_FILT_FAC_SH0_SHIFT 0
++
++
++#define MRV_FILT_FILT_FAC_MID
++#define MRV_FILT_FILT_FAC_MID_MASK 0x0000003F
++#define MRV_FILT_FILT_FAC_MID_SHIFT 0
++
++
++#define MRV_FILT_FILT_FAC_BL0
++#define MRV_FILT_FILT_FAC_BL0_MASK 0x0000003F
++#define MRV_FILT_FILT_FAC_BL0_SHIFT 0
++
++
++#define MRV_FILT_FILT_FAC_BL1
++#define MRV_FILT_FILT_FAC_BL1_MASK 0x0000003F
++#define MRV_FILT_FILT_FAC_BL1_SHIFT 0
++
++
++
++
++ #define MRV_AE_EXP_MEAS_MODE
++ #define MRV_AE_EXP_MEAS_MODE_MASK 0x80000000
++ #define MRV_AE_EXP_MEAS_MODE_SHIFT 31
++
++#define MRV_AE_AUTOSTOP
++#define MRV_AE_AUTOSTOP_MASK 0x00000002
++#define MRV_AE_AUTOSTOP_SHIFT 1
++
++#define MRV_AE_EXP_START
++#define MRV_AE_EXP_START_MASK 0x00000001
++#define MRV_AE_EXP_START_SHIFT 0
++
++
++
++
++
++#define MRV_AE_ISP_EXP_H_OFFSET
++#define MRV_AE_ISP_EXP_H_OFFSET_MASK 0x00000FFF
++#define MRV_AE_ISP_EXP_H_OFFSET_SHIFT 0
++#define MRV_AE_ISP_EXP_H_OFFSET_MIN 0x00000000
++#define MRV_AE_ISP_EXP_H_OFFSET_MAX 0x00000F50
++
++
++
++#define MRV_AE_ISP_EXP_V_OFFSET
++#define MRV_AE_ISP_EXP_V_OFFSET_MASK 0x00000FFF
++#define MRV_AE_ISP_EXP_V_OFFSET_SHIFT 0
++#define MRV_AE_ISP_EXP_V_OFFSET_MIN 0x00000000
++#define MRV_AE_ISP_EXP_V_OFFSET_MAX 0x00000B74
++
++
++#define MRV_AE_ISP_EXP_H_SIZE
++#define MRV_AE_ISP_EXP_H_SIZE_MASK 0x000003FF
++#define MRV_AE_ISP_EXP_H_SIZE_SHIFT 0
++#define MRV_AE_ISP_EXP_H_SIZE_MIN 0x00000023
++#define MRV_AE_ISP_EXP_H_SIZE_MAX 0x00000333
++
++
++#define MRV_AE_ISP_EXP_V_SIZE
++#define MRV_AE_ISP_EXP_V_SIZE_MASK 0x000003FE
++#define MRV_AE_ISP_EXP_V_SIZE_SHIFT 0
++#define MRV_AE_ISP_EXP_V_SIZE_VALID_MASK \
++ (MRV_AE_ISP_EXP_V_SIZE_MASK & ~0x00000001)
++#define MRV_AE_ISP_EXP_V_SIZE_MIN 0x0000001C
++#define MRV_AE_ISP_EXP_V_SIZE_MAX 0x00000266
++
++#define MRV_AE_ISP_EXP_MEAN_ARR_SIZE1 5
++#define MRV_AE_ISP_EXP_MEAN_ARR_SIZE2 5
++#define MRV_AE_ISP_EXP_MEAN_ARR_OFS1 1
++#define MRV_AE_ISP_EXP_MEAN_ARR_OFS2 MRV_AE_ISP_EXP_MEAN_ARR_SIZE1
++#define MRV_AE_ISP_EXP_MEAN
++#define MRV_AE_ISP_EXP_MEAN_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_00
++#define MRV_AE_ISP_EXP_MEAN_00_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_00_SHIFT 0
++
++#define MRV_AE_ISP_EXP_MEAN_10
++#define MRV_AE_ISP_EXP_MEAN_10_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_10_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_20
++#define MRV_AE_ISP_EXP_MEAN_20_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_20_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_30
++#define MRV_AE_ISP_EXP_MEAN_30_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_30_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_40
++#define MRV_AE_ISP_EXP_MEAN_40_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_40_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_01
++#define MRV_AE_ISP_EXP_MEAN_01_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_01_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_11
++#define MRV_AE_ISP_EXP_MEAN_11_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_11_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_21
++#define MRV_AE_ISP_EXP_MEAN_21_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_21_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_31
++#define MRV_AE_ISP_EXP_MEAN_31_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_31_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_41
++#define MRV_AE_ISP_EXP_MEAN_41_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_41_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_02
++#define MRV_AE_ISP_EXP_MEAN_02_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_02_SHIFT 0
++
++
++
++#define MRV_AE_ISP_EXP_MEAN_12
++#define MRV_AE_ISP_EXP_MEAN_12_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_12_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_22
++#define MRV_AE_ISP_EXP_MEAN_22_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_22_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_32
++#define MRV_AE_ISP_EXP_MEAN_32_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_32_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_42
++#define MRV_AE_ISP_EXP_MEAN_42_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_42_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_03
++#define MRV_AE_ISP_EXP_MEAN_03_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_03_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_13
++#define MRV_AE_ISP_EXP_MEAN_13_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_13_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_23
++#define MRV_AE_ISP_EXP_MEAN_23_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_23_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_33
++#define MRV_AE_ISP_EXP_MEAN_33_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_33_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_43
++#define MRV_AE_ISP_EXP_MEAN_43_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_43_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_04
++#define MRV_AE_ISP_EXP_MEAN_04_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_04_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_14
++#define MRV_AE_ISP_EXP_MEAN_14_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_14_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_24
++#define MRV_AE_ISP_EXP_MEAN_24_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_24_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_34
++#define MRV_AE_ISP_EXP_MEAN_34_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_34_SHIFT 0
++
++
++#define MRV_AE_ISP_EXP_MEAN_44
++#define MRV_AE_ISP_EXP_MEAN_44_MASK 0x000000FF
++#define MRV_AE_ISP_EXP_MEAN_44_SHIFT 0
++
++
++
++#define MRV_BLS_WINDOW_ENABLE
++#define MRV_BLS_WINDOW_ENABLE_MASK 0x0000000C
++#define MRV_BLS_WINDOW_ENABLE_SHIFT 2
++#define MRV_BLS_WINDOW_ENABLE_NONE 0
++#define MRV_BLS_WINDOW_ENABLE_WND1 1
++#define MRV_BLS_WINDOW_ENABLE_WND2 2
++#define MRV_BLS_WINDOW_ENABLE_BOTH 3
++
++#define MRV_BLS_BLS_MODE
++#define MRV_BLS_BLS_MODE_MASK 0x00000002
++#define MRV_BLS_BLS_MODE_SHIFT 1
++#define MRV_BLS_BLS_MODE_MEAS 1
++#define MRV_BLS_BLS_MODE_FIX 0
++
++#define MRV_BLS_BLS_ENABLE
++#define MRV_BLS_BLS_ENABLE_MASK 0x00000001
++#define MRV_BLS_BLS_ENABLE_SHIFT 0
++
++
++#define MRV_BLS_BLS_SAMPLES
++#define MRV_BLS_BLS_SAMPLES_MASK 0x0000001F
++#define MRV_BLS_BLS_SAMPLES_SHIFT 0
++
++#define MRV_BLS_BLS_SAMPLES_MAX (0x00000014)
++
++
++#define MRV_BLS_BLS_H1_START
++#define MRV_BLS_BLS_H1_START_MASK 0x00000FFF
++#define MRV_BLS_BLS_H1_START_SHIFT 0
++#define MRV_BLS_BLS_H1_START_MAX \
++ (MRV_BLS_BLS_H1_START_MASK >> MRV_BLS_BLS_H1_START_SHIFT)
++
++
++#define MRV_BLS_BLS_H1_STOP
++#define MRV_BLS_BLS_H1_STOP_MASK 0x00001FFF
++#define MRV_BLS_BLS_H1_STOP_SHIFT 0
++#define MRV_BLS_BLS_H1_STOP_MAX \
++ (MRV_BLS_BLS_H1_STOP_MASK >> MRV_BLS_BLS_H1_STOP_SHIFT)
++
++
++#define MRV_BLS_BLS_V1_START
++#define MRV_BLS_BLS_V1_START_MASK 0x00001FFF
++#define MRV_BLS_BLS_V1_START_SHIFT 0
++#define MRV_BLS_BLS_V1_START_MAX \
++ (MRV_BLS_BLS_V1_START_MASK >> MRV_BLS_BLS_V1_START_SHIFT)
++
++#define MRV_BLS_BLS_V1_STOP
++#define MRV_BLS_BLS_V1_STOP_MASK 0x00001FFF
++#define MRV_BLS_BLS_V1_STOP_SHIFT 0
++#define MRV_BLS_BLS_V1_STOP_MAX \
++ (MRV_BLS_BLS_V1_STOP_MASK >> MRV_BLS_BLS_V1_STOP_SHIFT)
++
++#define MRV_BLS_BLS_H2_START
++#define MRV_BLS_BLS_H2_START_MASK 0x00001FFF
++#define MRV_BLS_BLS_H2_START_SHIFT 0
++#define MRV_BLS_BLS_H2_START_MAX \
++ (MRV_BLS_BLS_H2_START_MASK >> MRV_BLS_BLS_H2_START_SHIFT)
++
++
++#define MRV_BLS_BLS_H2_STOP
++#define MRV_BLS_BLS_H2_STOP_MASK 0x00001FFF
++#define MRV_BLS_BLS_H2_STOP_SHIFT 0
++#define MRV_BLS_BLS_H2_STOP_MAX \
++ (MRV_BLS_BLS_H2_STOP_MASK >> MRV_BLS_BLS_H2_STOP_SHIFT)
++
++
++#define MRV_BLS_BLS_V2_START
++#define MRV_BLS_BLS_V2_START_MASK 0x00001FFF
++#define MRV_BLS_BLS_V2_START_SHIFT 0
++#define MRV_BLS_BLS_V2_START_MAX \
++ (MRV_BLS_BLS_V2_START_MASK >> MRV_BLS_BLS_V2_START_SHIFT)
++
++
++#define MRV_BLS_BLS_V2_STOP
++#define MRV_BLS_BLS_V2_STOP_MASK 0x00001FFF
++#define MRV_BLS_BLS_V2_STOP_SHIFT 0
++#define MRV_BLS_BLS_V2_STOP_MAX \
++ (MRV_BLS_BLS_V2_STOP_MASK >> MRV_BLS_BLS_V2_STOP_SHIFT)
++
++#define MRV_ISP_BLS_FIX_SUB_MIN (0xFFFFF001)
++#define MRV_ISP_BLS_FIX_SUB_MAX (0x00000FFF)
++#define MRV_ISP_BLS_FIX_MASK (0x00001FFF)
++#define MRV_ISP_BLS_FIX_SHIFT_A (0)
++#define MRV_ISP_BLS_FIX_SHIFT_B (0)
++#define MRV_ISP_BLS_FIX_SHIFT_C (0)
++#define MRV_ISP_BLS_FIX_SHIFT_D (0)
++#define MRV_ISP_BLS_MEAN_MASK (0x00000FFF)
++#define MRV_ISP_BLS_MEAN_SHIFT_A (0)
++#define MRV_ISP_BLS_MEAN_SHIFT_B (0)
++#define MRV_ISP_BLS_MEAN_SHIFT_C (0)
++#define MRV_ISP_BLS_MEAN_SHIFT_D (0)
++
++#define MRV_BLS_BLS_A_FIXED
++#define MRV_BLS_BLS_A_FIXED_MASK (MRV_ISP_BLS_FIX_MASK <<\
++ MRV_ISP_BLS_FIX_SHIFT_A)
++#define MRV_BLS_BLS_A_FIXED_SHIFT MRV_ISP_BLS_FIX_SHIFT_A
++
++#define MRV_BLS_BLS_B_FIXED
++#define MRV_BLS_BLS_B_FIXED_MASK (MRV_ISP_BLS_FIX_MASK <<\
++ MRV_ISP_BLS_FIX_SHIFT_B)
++#define MRV_BLS_BLS_B_FIXED_SHIFT MRV_ISP_BLS_FIX_SHIFT_B
++
++#define MRV_BLS_BLS_C_FIXED
++#define MRV_BLS_BLS_C_FIXED_MASK (MRV_ISP_BLS_FIX_MASK <<\
++ MRV_ISP_BLS_FIX_SHIFT_C)
++#define MRV_BLS_BLS_C_FIXED_SHIFT MRV_ISP_BLS_FIX_SHIFT_C
++
++#define MRV_BLS_BLS_D_FIXED
++#define MRV_BLS_BLS_D_FIXED_MASK (MRV_ISP_BLS_FIX_MASK <<\
++ MRV_ISP_BLS_FIX_SHIFT_D)
++#define MRV_BLS_BLS_D_FIXED_SHIFT MRV_ISP_BLS_FIX_SHIFT_D
++
++
++#define MRV_BLS_BLS_A_MEASURED
++#define MRV_BLS_BLS_A_MEASURED_MASK (MRV_ISP_BLS_MEAN_MASK <<\
++ MRV_ISP_BLS_MEAN_SHIFT_A)
++#define MRV_BLS_BLS_A_MEASURED_SHIFT MRV_ISP_BLS_MEAN_SHIFT_A
++
++
++#define MRV_BLS_BLS_B_MEASURED
++#define MRV_BLS_BLS_B_MEASURED_MASK (MRV_ISP_BLS_MEAN_MASK <<\
++ MRV_ISP_BLS_MEAN_SHIFT_B)
++#define MRV_BLS_BLS_B_MEASURED_SHIFT MRV_ISP_BLS_MEAN_SHIFT_B
++
++
++#define MRV_BLS_BLS_C_MEASURED
++#define MRV_BLS_BLS_C_MEASURED_MASK (MRV_ISP_BLS_MEAN_MASK <<\
++ MRV_ISP_BLS_MEAN_SHIFT_C)
++#define MRV_BLS_BLS_C_MEASURED_SHIFT MRV_ISP_BLS_MEAN_SHIFT_C
++
++
++#define MRV_BLS_BLS_D_MEASURED
++#define MRV_BLS_BLS_D_MEASURED_MASK (MRV_ISP_BLS_MEAN_MASK <<\
++ MRV_ISP_BLS_MEAN_SHIFT_D)
++#define MRV_BLS_BLS_D_MEASURED_SHIFT MRV_ISP_BLS_MEAN_SHIFT_D
++
++#define CI_ISP_DELAY_AFTER_RESET 15
++
++#define IRQ_ISP_ERROR -1
++#define IRQ_JPE_ERROR 0
++#define IRQ_JPE_SUCCESS 1
++#define IRQ_MI_SUCCESS 2
++#define IRQ_MI_SP_SUCCESS 3
++#define IRQ 1
++
++#endif
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstisp/include/mrstisp_stdinc.h
+@@ -0,0 +1,119 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#ifndef _MRSTISP_STDINC_H
++#define _MRSTISP_STDINC_H
++
++#ifdef __KERNEL__
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/moduleparam.h>
++#include <linux/init.h>
++#include <linux/fs.h>
++
++#include <linux/proc_fs.h>
++#include <linux/ctype.h>
++#include <linux/pagemap.h>
++#include <linux/delay.h>
++#include <linux/io.h>
++
++#include <linux/uaccess.h>
++#include <linux/videodev2.h>
++#include <media/v4l2-common.h>
++#include <media/v4l2-ioctl.h>
++
++#include <linux/mutex.h>
++#include <linux/list.h>
++#include <linux/string.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/moduleparam.h>
++#include <linux/smp_lock.h>
++#include <asm/kmap_types.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++#include <linux/interrupt.h>
++#include <linux/timer.h>
++#include <asm/system.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <linux/time.h>
++#include <linux/syscalls.h>
++
++#include <linux/i2c.h>
++#include <linux/gpio.h>
++#include <linux/dma-mapping.h>
++#include <media/videobuf-core.h>
++#include <media/videobuf-dma-contig.h>
++/* #include <media/v4l2-i2c-drv.h> */
++
++#ifdef CONFIG_KMOD
++#include <linux/kmod.h>
++#endif
++
++#include "project_settings_mrv.h"
++
++#include "ci_sensor_common.h"
++#include "ci_isp_common.h"
++#include "ci_va.h"
++#include "v4l2_jpg_review.h"
++
++#include "def.h"
++#include "mrstisp_reg.h"
++#include "mrstisp.h"
++#include "mrstisp_isp.h"
++#include "mrstisp_hw.h"
++#include "mrstisp_jpe.h"
++#include "mrstisp_dp.h"
++/* #include "mrstisp_mif.h" */
++
++extern unsigned char *mrst_isp_regs;
++#define MEM_CSC_REG_BASE (0x08500000)
++#define MEM_MRV_REG_BASE (mrst_isp_regs)
++#define ALIGN_TO_4(f) (((f) + 3) & ~3)
++
++/* for debug */
++extern int mrstisp_debug;
++#define dprintk(level, fmt, arg...) do { \
++ if (mrstisp_debug >= level) \
++ printk(KERN_DEBUG "mrstisp@%s: " fmt "\n", \
++ __func__, ## arg); } \
++ while (0)
++
++#define eprintk(fmt, arg...) \
++ printk(KERN_ERR "mrstisp@%s" fmt "\n", \
++ __func__, ## arg);
++
++#define DBG_entering dprintk(1, "entering");
++#define DBG_leaving dprintk(1, "leaving");
++#define DBG_line dprintk(1, " line: %d", __LINE__);
++
++#include "reg_access.h"
++
++#endif
++#endif
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstisp/include/project_settings_mrv.h
+@@ -0,0 +1,622 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++
++#ifndef _PROJECT_SETTTINGS_MRV_H
++#define _PROJECT_SETTTINGS_MRV_H
++
++/* !< information mask */
++#define DBG_INFO 0x00000001
++/* !< warning mask */
++#define DBG_WARN 0x00000002
++/* !< error mask */
++#define DBG_ERR 0x00000004
++/* !< assert mask */
++#define DBG_ASSERT 0x00000008
++/* !< mask to get all categories */
++#define DBG_CAT_ALL 0x000000FF
++
++/* !< currly used category mask */
++#define DBG_CAT_FILTER (DBG_CAT_ALL)
++
++/* !< application mask */
++#define DBG_APP 0x00002000
++/* !< MARVIN driver mask */
++#define DBG_MRV 0x00001000
++/* !< mipi driver mask */
++#define DBG_MIPI 0x00040000
++
++#define CAMERA_VB6850 11
++#define CAMERA_HW CAMERA_VB6850
++/*
++ * \name MARVIN_HW
++ * select which MARVIN hardware is used.
++ */
++
++/* MARVIN 3, basically MARVIN 1 with more resolution */
++#define MARVIN_3 2
++/* Codename: "ISP upgrade" */
++#define MARVIN_3_V2 3
++/* MARVIN_3_V2 upgrade */
++#define MARVIN_3_V22 11
++/* MARVIN_3_V2 upgrade + MI patch from 12/2006 */
++#define MARVIN_3_V22X 13
++/* MARVIN_3_V2 upgrade + MI patch from 12/2006 (package tag 15.01.07) */
++#define MARVIN_3_V22X2 15
++/* just a quick-made test version for AF, other features see below */
++#define MARVIN_3_V2B 5
++/* Codename: "M3plus" */
++#define MARVIN_3_V3 4
++/* Codename: "Autofocus/bad pixel" */
++#define MARVIN_3_V4 6
++/* MARVIN_3_V4 upgrade */
++#define MARVIN_3_V42 12
++/* MARVIN_3_V4 upgrade + MI patch from 12/2006 */
++#define MARVIN_3_V42X 14
++/* MARVIN_3_V4 upgrade + MI patch from 12/2006 + (package tag 15.01.07) */
++#define MARVIN_3_V42X2 16
++/* Codename: "EMP" */
++#define MARVIN_3_V5 7
++/* successor of MARVIN_3_V5 */
++#define MARVIN_5_V5 18
++
++/*
++ * FPGA Bitstream ID 12 (Marvin5 tag 27.02.06), used for USA roadshow in
++ * 03/2006
++ */
++#define MARVIN_5_BS12 10
++/* MARVIN 5 (product) */
++#define MARVIN_5_V1 9
++/* MARVIN 5 (product with new isp filter) */
++#define MARVIN_5_V2 20
++/* MARVIN 5 (product with Chromatic Aberration) */
++#define MARVIN_5_V3 21
++/* MARVIN 5 (with 16 beat burst) */
++#define MARVIN_5_V4 22
++/* MARVIN XF */
++#define MARVIN_5_XF 17
++/* MARVIN XF */
++#define MARVIN_5_XF_TMP 19
++
++/* MARVIN 12MP */
++#define MARVIN_12_V1 23
++
++/* MARVIN 5 (with 16 beat burst) */
++#define MARVIN_5_V4_R20 30
++
++
++/* Currently used MARVIN version */
++#define MARVIN_HW MARVIN_5_V4_R20
++
++/*
++ * MRV_SUPPORT_xxx
++ * Some compile-time-configurable features of the MARVIN driver.
++ * Set the certain defines to a non-zero value to enable the feature
++ */
++
++/*
++ * routines to convert state and configuration enums into human readable
++ * text. (useful in e.g. debug outputs)
++ */
++#define MRV_SUPPORT_STATE2STRING 1
++
++/*
++ * routines to read, write and dump the register set of the MARVIN module
++ */
++#define MRV_SUPPORT_REGDUMP 1
++
++/*
++ * second level support routines for e.g. exposure control, auto focus
++ * and white balance. Second level support routines are
++ * those using the plain routines of mrv.h to implement a kind of
++ * "helper" to program/access/use the MARVIN with a bit more
++ * abstraction.
++ */
++#define MRV_SUPPORT_SL 1
++
++/*
++ * \mainpage MARVIN SOFTWARE
++ * \b File: project_settings_mrv.h
++ *
++ * <!-- \section Global Descriptions and Informations
++ * (settings, definitions, software changes) -->
++ *
++ * For global descriptions and informations see under "Modules"
++ *
++ */
++
++/*
++ * \addtogroup MARVIN_DEFINES_00 MARVIN Features
++ * \b File: project_settings_mrv.h
++ *
++ * \par MARVIN Features
++ * Depends on the used MARVIN_HW. Direct usage of MARVIN_HW should be
++ * avoided wherever possible.
++ * This makes it VERY MUCH easier to adapt the driver to new MARVIN
++ * versions with a feature set suited to a certain customer.
++ *
++ * \par MARVIN_FEATURE_CHIP_ID (integer)
++ * ID value contained in the chip. This is to be able to identify
++ * the chip derivative during runtime of the software
++ *
++ * \par MARVIN_FEATURE_CAMBUSWIDTH: (integer)
++ * How many bits can be captured from the image sensor?
++ * MARVIN_FEATURE_8BITS = sensor bus width is 8 bits
++ * MARVIN_FEATURE_10BITS = sensor bus width is 10 bits
++ * MARVIN_FEATURE_12BITS = sensor bus width is 12 bits
++ *
++ * \par MARVIN_FEATURE_XTALK: (integer)
++ * separate crosstalk matrix. without this feature, the crosstalk
++ * coefficients have to be combined with the color conversion matrix
++ * MARVIN_FEATURE_XTALK_9BITS = coefficient range -2.0 ... +1.992 ( 9 Bit)
++ * MARVIN_FEATURE_XTALK_10BITS = coefficient range -4.0 ... +3.992 (10 Bit)
++ * MARVIN_FEATURE_XTALK_11BITS = coefficient range -8.0 ... +7.992 (11 Bit)
++ * MARVIN_FEATURE_EXIST_NOT = no separate xtalk matrix
++ *
++ * \par MARVIN_FEATURE_XTALK_OFFSET: (boolean)
++ * add a offset to the crosstalk matrix output
++ *
++ * \par MARVIN_FEATURE_GAMMAOUT: (boolean)
++ * gamma correction for luminance channel at the output of the
++ * ISP block.
++ *
++ * \par MARVIN_FEATURE_FRAMESIZE: (integer)
++ * Maximum frame size (at sensor input) MARVIN can handle.
++ * MARVIN_FEATURE_1M9 = 1.9 megapixels
++ * MARVIN_FEATURE_3M1 = 3.1 megapixels
++ * MARVIN_FEATURE_5M3 = 5.3 megapixels
++ *
++ * \par MARVIN_FEATURE_SSCALE: (boolean)
++ * Selfpath feature, and therefore selfpath scaler feautel also. If set to
++ * MARVIN_FEATURE_EXIST_NOT, the whole self data path is not present.
++ *
++ * \par MARVIN_FEATURE_SSCALE_UP: (boolean)
++ * Upscale capability of the self path scaler. This feature enables
++ * the scaler to do upscaling up to the factor of 5
++ *
++ * \par MARVIN_FEATURE_SSCALE_FACTORCALC: (integer)
++ * Specifies the strategy to calculate the scale factors for the self scaler.
++ * Note that this define is the successor of the MARVIN_FEATURE_SSCALE_LANES,
++ * which does not longer reflect the 'true' implementations of the scaler
++ * hardware and therefore has been removed.
++ * MARVIN_FEATURE_SCALEFACTOR_COMBINED_UV = 'traditional' behaviour: The
++ * scaler handles the U and V chroma components as if they were two joined
++ * pixels. Thus, when YUV422 subsampled input data is to be processed and
++ * no further subsampling is required, the scale facrors of luma and chroma
++ * pathes must be identical.
++ * MARVIN_FEATURE_SCALEFACTOR_SEPARATED_UV = 'new style' behaviour: The
++ * scaler handles the U and V chroma components as if they belong to
++ * totally different picture planes. Thus, when YUV422 subsampled input
++ * data is to be processed and no further subsampling is required, the
++ * scale facrors of the chroma path must be calculated like those of the
++ * luma path, but with only half of the image width.
++ *
++ * \par MARVIN_FEATURE_MSCALE_UP: (boolean)
++ * Upscale capability of the main path scaler. This feature enables
++ * the scaler to do upscaling up to the factor of 5
++ *
++ * \par MARVIN_FEATURE_MSCALE_FACTORCALC: (integer)
++ * Specifies the strategy to calculate the scale factors for the main scaler.
++ * MARVIN_FEATURE_SCALEFACTOR_COMBINED_UV = 'traditional' behaviour: The
++ * scaler handles the U and V chroma components as if they were two joined
++ * pixels. Thus, when YUV422 subsampled input data is to be processed and
++ * no further subsampling is required, the scale facrors of luma and chroma
++ * pathes must be identical.
++ * MARVIN_FEATURE_SCALEFACTOR_SEPARATED_UV = 'new style' behaviour: The
++ * scaler handles the U and V chroma components as if they belong to
++ * totally different picture planes. Thus, when YUV422 subsampled input
++ * data is to be processed and no further subsampling is required, the
++ * scale facrors of the chroma path must be calculated like those of the
++ * luma path, but with only half of the image width.
++ *
++ * \par MARVIN_FEATURE_SCALE_FACTORWIDTH: (integer)
++ * Width of the scalefactors for both main and self scaler
++ * MARVIN_FEATURE_12BITS = 12 bits precision
++ * MARVIN_FEATURE_14BITS = 14 bits precision
++ * MARVIN_FEATURE_16BITS = 16 bits precision
++ *
++ * \par MARVIN_FEATURE_AF_MEASURE: (boolean)
++ * Autofocus measurement block (attached to the demosaicing block)
++ *
++ * \par MARVIN_FEATURE_BAD_PIXEL: (boolean)
++ * Bad pixel detection/correction block
++ *
++ * \par MARVIN_FEATURE_BAD_PIXEL_WIDTH: (integer)
++ * Bad pixel detection/correction block register size
++ * MARVIN_FEATURE_10BITS = 10 bits precision
++ * MARVIN_FEATURE_12BITS = 12 bits precision
++ * MARVIN_FEATURE_EXIST_NOT = no bad pixel detection/correction block
++ *
++ * \par MARVIN_FEATURE_BAD_PIXEL_RAM: (integer)
++ * Bad pixel table ram address size
++ * MARVIN_FEATURE_7BITS = 128 entries
++ * MARVIN_FEATURE_11BITS = 2048 entries
++ * MARVIN_FEATURE_EXIST_NOT = no bad pixel ram block
++ *
++ * \par MARVIN_FEATURE_SUPERIMPOSE: (boolean)
++ * Superimpose block, used to combine camera picture with a static
++ * one coming from the system memory (chroma keying)
++ *
++ * \par MARVIN_FEATURE_CHROM_ABERRATION: (boolean)
++ * Chromatic aberration block corrects color fringing
++ *
++ * \par MARVIN_FEATURE_IMAGE_EFFECTS: (boolean)
++ * Image effects block, various modes like grayscale, sepia, emboss
++ * sketch, etc.
++ *
++ * \par MARVIN_FEATURE_LENS_SHADING: (boolean)
++ * Lens shading compensation block
++ *
++ * \par MARVIN_FEATURE_ISP_ERRORFLAGS: (boolean)
++ * Some registers containing more detailed error flags of the ISP.
++ * These may help during system integration.
++ *
++ * \par MARVIN_FEATURE_FRAMECOUNTER: (boolean)
++ * Frame counter register
++ *
++ * \par MARVIN_FEATURE_FLASH_LIGHT: (boolean)
++ * Support for frame-synchronized triggering of a LED or xenon based
++ * flashlight
++ *
++ * \par MARVIN_FEATURE_SHUTTER: (boolean)
++ * Support for driving an external mechanical shutter
++ *
++ * \par MARVIN_FEATURE_IMG_STABILIZATION: (integer)
++ * Support for digital image stabilization (=compensation against
++ * small movements)
++ * MARVIN_FEATURE_IMG_STABILIZATION_V1 = represents second output formatter
++ * at ISP output, no image
++ * stabilization functionality, located
++ * in the ISP bayer path only.
++ * MARVIN_FEATURE_IMG_STABILIZATION_V2 = represents image stabilization
++ * including output formatter, located
++ * in both bayer and YCbCr paths, but
++ * not in the raw data path.
++ * MARVIN_FEATURE_EXIST_NOT = there is no output image stabilization
++ *
++ * \par MARVIN_FEATURE_ISP_HISTOGRAM: (boolean)
++ * Histogram measurement block
++ *
++ * \par MARVIN_FEATURE_ISP_FILTER: (boolean)
++ * Additional burring/sharpness filer
++ *
++ * \par MARVIN_FEATURE_SMIA: (integer)
++ * SMIA camera protocol version switch
++ * MARVIN_FEATURE_SMIA_MIPI_EMP = EMP version that contains just the the
++ * SMIA and MIPI application protocol
++ * with two embedded data areas
++ * MARVIN_FEATURE_SMIA_EM = EMP version that contains just the the SMIA
++ * application protocol
++ * MARVIN_FEATURE_SMIA_COMPLETE= SMIA module that contains the complete SMIA
++ * functionality.
++ * MARVIN_FEATURE_EXIST_NOT = there is no SMIA module
++ *
++ * \par MARVIN_FEATURE_AUTO_EXPOSURE: (integer)
++ * measurement unit for automatic exposure control
++ * MARVIN_FEATURE_AUTO_EXPOSURE_V1 = First implemented auto-exposure
++ * algorithm version
++ * MARVIN_FEATURE_AUTO_EXPOSURE_V2 = Second implemented auto-exposure
++ * algorithm version
++ *
++ * \par MARVIN_FEATURE_MI_STATUSFLAGS: (boolean)
++ * MI status flags needed for debugging purposes
++ *
++ * \par MARVIN_FEATURE_MIPI: (boolean)
++ * MIPI camera protocol block
++ *
++ * \par MARVIN_FEATURE_SMALL_OUTUNIT: (boolean)
++ * A small output unit instead of MI module
++ *
++ * \par MARVIN_FEATURE_CLOCK_DOMAINS: (integer)
++ * MARVIN_CLOCK_DOMAINS_1 = One clock domain for the complete MARVIN.
++ * MARVIN_CLOCK_DOMAINS_2 = Two clock domains (Camera data clock and AHB
++ * clock)
++ *
++ * \par MARVIN_FEATURE_WB: (integer)
++ * measurement and correction unit for white balance
++ * MARVIN_FEATURE_WB_V1 = basic white balance block
++ * MARVIN_FEATURE_WB_V2 = like version 1, but Y_max added
++ * MARVIN_FEATURE_WB_V3 = like version 2, but green_diff_gain added
++ * MARVIN_FEATURE_WB_V4 = version 4of the white balance block. Extended gain
++ * range 0..4, resolution 10 bit, separated green
++ * gains for red and blue rows of bayer pattern.
++ *
++ * \par MARVIN_FEATURE_OUTPUT_FORMATTER: (integer)
++ * position of the output formatter
++ * MARVIN_FEATURE_OUTPUT_FORMATTER_V1 = exists at ISP output (old style)
++ * MARVIN_FEATURE_OUTPUT_FORMATTER_V2 = exists at ISP input
++ * MARVIN_FEATURE_EXIST_NOT = there is no output formatter, as
++ * the image stabilization contains
++ * already this functionality
++ *
++ * \par MARVIN_FEATURE_MI: (integer)
++ * MARVIN_FEATURE_MI_V1 = basic version
++ * MARVIN_FEATURE_MI_V2 = introducing self-path DMA read
++ * MARVIN_FEATURE_MI_V3 = self path DMA read, rotation, line stripe, 8
++ * beat burst
++ *
++ * \par MARVIN_FEATURE_DMA_READ: (integer)
++ * MARVIN_FEATURE_DMA_READ_V1 = version 1
++ * MARVIN_FEATURE_DMA_READ_V2 = version 2
++ * MARVIN_FEATURE_DMA_READ_V3 = version 3
++ * MARVIN_FEATURE_DMA_READ_V4 = version 4
++ * MARVIN_FEATURE_EXIST_NOT = there is no DMA read feature
++ *
++ * \par MARVIN_FEATURE_JPE_SIZE: (integer)
++ * MARVIN_FEATURE_JPE_SIZE_11BITS =11 Bits for JPE_HSIZE and JPE_VSIZE, only
++ * Marvin1.
++ * MARVIN_FEATURE_JPE_SIZE_12BITS =12 Bits for JPE_HSIZE and JPE_VSIZE, all
++ * MARVIN3.
++ * MARVIN_FEATURE_JPE_SIZE_13BITS = 13 Bits for JPE_HSIZE and JPE_VSIZE, all
++ * MARVIN5.
++ *
++ * \par MARVIN_FEATURE_BLACK_LEVEL: (integer)
++ * MARVIN_FEATURE_EXIST_NOT = there is no BLS module
++ * MARVIN_FEATURE_BLACK_LEVEL_V1 = version 1; basic version with 8 Bit
++ * registers
++ * MARVIN_FEATURE_BLACK_LEVEL_V2 = version 2; extended version with 10 Bit
++ * registers
++ * MARVIN_FEATURE_BLACK_LEVEL_V3 = version 3; extended version with 12 Bit
++ * registers
++ * MARVIN_FEATURE_BLACK_LEVEL_V4 = version 4; advanced version with 2
++ * independent measurement windows
++ * and signed values; 10 Bit
++ * registers
++ * MARVIN_FEATURE_BLACK_LEVEL_V5 = version 5; like version 4
++ * with 12 Bit registers
++ *
++ * \par MARVIN_FEATURE_DPMUX_YCSPLIT: (integer)
++ * MARVIN_FEATURE_YCS_V1 = traditional datapath setup; separate datapath for
++ * raw data, y/c splitter does not support self path
++ * only mode
++ * MARVIN_FEATURE_YCS_V2 = version 2, raw data routed through main path,
++ * y/c splitter supports self path only mode.
++ *
++ * \par MARVIN_FEATURE_DPMUX_MAINPATH: (integer)
++ * MARVIN_FEATURE_DPMUX_MAIN_V1 = Traditional mainpath muxer. No direct path
++ * from DMA-read to JPEG encoder, explicit RAW
++ * datapath to MI
++ * MARVIN_FEATURE_DPMUX_MAIN_V2 = new DPCL register settings,
++ * possibility to feed
++ * JPEG encoder directly via DMA-Read
++ *
++ * \par MARVIN_FEATURE_INPUT_AQUISITION: (integer)
++ * MARVIN_FEATURE_IAQU_V1 = Traditional version, supports following modes:
++ * raw data mode,
++ * raw picture according to ITU-R BT.601,
++ * RGB Bayer according to ITU-R BT.601,
++ * ITU-R BT601 (YCbCr data),
++ * ITU-R BT656 (YCbCr data)
++ * MARVIN_FEATURE_IAQU_V2 = Additional modes:
++ * RGB Bayer according to ITU-R BT.656, raw
++ * picture according to ITU-R BT.656
++ *
++ * \par MARVIN_FEATURE_JPE: (integer)
++ * MARVIN_FEATURE_JPE_V1 = Basic version
++ * MARVIN_FEATURE_JPE_V2 = Enable bit frame synchronization
++ * MARVIN_FEATURE_JPE_V3 = flags for Motion JPEG
++ *
++ * \par MARVIN_FEATURE_EXT_YCBCR_RANGE: (boolean)
++ * ???
++ *
++ * \par MARVIN_FEATURE_SP_DMA: (boolean)
++ * ???
++ * \par MARVIN_FEATURE_MI_BURST_16: (boolean)
++ * MARVIN_FEATURE_EXIST = AHB 16 beat burst
++ * MARVIN_FEATURE_EXIST_NOT = AHB burst to 8 or 4 is possible
++ * \par MARVIN_FEATURE_MI_LAST_PIXEL: (boolean)
++ * last pixel signalization
++ */
++
++/* \name Values for all boolean features */
++#define MARVIN_FEATURE_EXIST_NOT (0)
++#define MARVIN_FEATURE_EXIST (1)
++
++/*
++ * \name Values for MARVIN_FEATURE_FRAMESIZE and
++ * MARVIN_FEATURE_MI_FRAMESIZE
++ */
++#define MARVIN_FEATURE_1M9 1
++#define MARVIN_FEATURE_3M1 2
++#define MARVIN_FEATURE_5M3 3
++#define MARVIN_FEATURE_12M6 4
++
++/* \name Values for MARVIN_FEATURE_CAMBUSWIDTH and
++ * MARVIN_FEATURE_SCALE_FACTORWIDTH
++ */
++#define MARVIN_FEATURE_7BITS 7
++#define MARVIN_FEATURE_8BITS 8
++#define MARVIN_FEATURE_9BITS 9
++#define MARVIN_FEATURE_10BITS 10
++#define MARVIN_FEATURE_11BITS 11
++#define MARVIN_FEATURE_12BITS 12
++#define MARVIN_FEATURE_14BITS 14
++#define MARVIN_FEATURE_16BITS 16
++
++/* \name Values for MARVIN_FEATURE_SMIA */
++#define MARVIN_FEATURE_SMIA_COMPLETE 1
++#define MARVIN_FEATURE_SMIA_EMP 2
++#define MARVIN_FEATURE_SMIA_MIPI_EMP 3
++
++/* \name Values for MARVIN_FEATURE_AUTO_EXPOSURE */
++#define MARVIN_FEATURE_AUTO_EXPOSURE_V1 1
++#define MARVIN_FEATURE_AUTO_EXPOSURE_V2 2
++#define MARVIN_FEATURE_AUTO_EXPOSURE_V3 3
++
++/* \name Values for MARVIN_FEATURE_CLOCK_DOMAINS */
++#define MARVIN_CLOCK_DOMAINS_1 1
++#define MARVIN_CLOCK_DOMAINS_2 2
++
++/* \name Values for MARVIN_FEATURE_WB: (integer) */
++#define MARVIN_FEATURE_WB_V4 4
++#define MARVIN_FEATURE_WB_V5 5
++
++/* \name Values for MARVIN_FEATURE_XTALK: (integer) */
++/* coefficient range -2.0 ... +1.992 ( 9 Bit) */
++#define MARVIN_FEATURE_XTALK_9BITS 2
++/* coefficient range -4.0 ... +3.992 (10 Bit) */
++#define MARVIN_FEATURE_XTALK_10BITS 3
++/* coefficient range -8.0 ... +7.992 (11 Bit) */
++#define MARVIN_FEATURE_XTALK_11BITS 4
++
++#define MARVIN_FEATURE_GAMMAIN_10BITS 1
++#define MARVIN_FEATURE_GAMMAIN_12BITS 2
++/* \name Values for MARVIN_FEATURE_OUTPUT_FORMATTER: (integer) */
++#define MARVIN_FEATURE_OUTPUT_FORMATTER_V1 (2)
++#define MARVIN_FEATURE_OUTPUT_FORMATTER_V2 (3)
++
++/* \name Values for MARVIN_FEATURE_IMG_STABILIZATION: (integer) */
++#define MARVIN_FEATURE_IMG_STABILIZATION_V1 (2)
++#define MARVIN_FEATURE_IMG_STABILIZATION_V2 (3)
++
++/*
++ * \name Values for MARVIN_FEATURE_SSCALE_FACTORCALC and
++ * MARVIN_FEATURE_MSCALE_FACTORCALC: (integer)
++ */
++#define MARVIN_FEATURE_SCALEFACTOR_COMBINED_UV (2)
++#define MARVIN_FEATURE_SCALEFACTOR_SEPARATED_UV (3)
++
++/* \name Values for MARVIN_FEATURE_MI: (integer) */
++#define MARVIN_FEATURE_MI_V1 (2)
++#define MARVIN_FEATURE_MI_V2 (3)
++#define MARVIN_FEATURE_MI_V3 (4)
++#define MARVIN_FEATURE_MI_V4 (5)
++
++/* \name Values for MARVIN_FEATURE_DMA_READ: (integer) */
++#define MARVIN_FEATURE_DMA_READ_V1 (2)
++#define MARVIN_FEATURE_DMA_READ_V2 (3)
++#define MARVIN_FEATURE_DMA_READ_V3 (4)
++#define MARVIN_FEATURE_DMA_READ_V4 (5)
++
++/* \name Values for MARVIN_FEATURE_JPE_SIZE: (integer) */
++#define MARVIN_FEATURE_JPE_SIZE_11BITS 1
++#define MARVIN_FEATURE_JPE_SIZE_12BITS 2
++#define MARVIN_FEATURE_JPE_SIZE_13BITS 3
++
++/* \name Values for MARVIN_FEATURE_BLACK_LEVEL: (integer) */
++#define MARVIN_FEATURE_BLACK_LEVEL_V1 (2)
++#define MARVIN_FEATURE_BLACK_LEVEL_V2 (3)
++#define MARVIN_FEATURE_BLACK_LEVEL_V3 (4)
++#define MARVIN_FEATURE_BLACK_LEVEL_V4 (5)
++#define MARVIN_FEATURE_BLACK_LEVEL_V5 (6)
++
++/* \name Values for MARVIN_FEATURE_DPMUX_YCSPLIT: (integer) */
++#define MARVIN_FEATURE_YCS_V1 1
++#define MARVIN_FEATURE_YCS_V2 2
++
++/* \name Values for MARVIN_FEATURE_DPMUX_MAINPATH: (integer) */
++#define MARVIN_FEATURE_DPMUX_MAIN_V1 1
++#define MARVIN_FEATURE_DPMUX_MAIN_V2 2
++
++/* \name Values for MARVIN_FEATURE_INPUT_AQUISITION: (integer) */
++#define MARVIN_FEATURE_IAQU_V1 1
++#define MARVIN_FEATURE_IAQU_V2 2
++
++/* \name Values for MARVIN_FEATURE_JPE: (integer) */
++#define MARVIN_FEATURE_JPE_V1 (2)
++#define MARVIN_FEATURE_JPE_V2 (3)
++#define MARVIN_FEATURE_JPE_V3 (4)
++
++/* \name Values for MARVIN_FEATURE_JPE_CFG: (integer) */
++#define MARVIN_FEATURE_JPE_CFG_V1 (2)
++#define MARVIN_FEATURE_JPE_CFG_V2 (3)
++
++
++/* \name Values for MARVIN_FEATURE_ISP_FILTER: (integer) */
++#define MARVIN_FEATURE_ISP_FILTER_V1 1
++#define MARVIN_FEATURE_ISP_FILTER_V2 2
++
++/* \name Values for MARVIN_FEATURE_LENS_SHADING: (integer) */
++#define MARVIN_FEATURE_LSC_V1 1
++#define MARVIN_FEATURE_LSC_V2 2
++
++/* \name Values for MARVIN_FEATURE_HISTOGRAM: (integer) */
++#define MARVIN_FEATURE_HIST_V1 1
++#define MARVIN_FEATURE_HIST_V2 2
++#define MARVIN_FEATURE_HIST_V3 3
++
++#define MARVIN_FEATURE_IE_V1 (2)
++#define MARVIN_FEATURE_IE_V2 (3)
++#define MARVIN_FEATURE_IE_V3 (4)
++
++#if (MARVIN_HW == MARVIN_5_V4_R20)
++#define MARVIN_FEATURE_CHIP_ID 0x20453010
++#define MARVIN_FEATURE_CAMBUSWIDTH MARVIN_FEATURE_12BITS
++#define MARVIN_FEATURE_XTALK MARVIN_FEATURE_XTALK_11BITS
++ #define MARVIN_FEATURE_GAMMAIN MARVIN_FEATURE_GAMMAIN_12BITS
++#define MARVIN_FEATURE_GAMMAOUT MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_FRAMESIZE MARVIN_FEATURE_5M3
++#define MARVIN_FEATURE_SP_DMA MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_SSCALE MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_SSCALE_UP MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_SSCALE_FACTORCALC \
++ MARVIN_FEATURE_SCALEFACTOR_SEPARATED_UV
++#define MARVIN_FEATURE_MSCALE_UP MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_MSCALE_FACTORCALC \
++ MARVIN_FEATURE_SCALEFACTOR_SEPARATED_UV
++#define MARVIN_FEATURE_SCALE_FACTORWIDTH MARVIN_FEATURE_14BITS
++#define MARVIN_FEATURE_AF_MEASURE MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_BAD_PIXEL MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_BAD_PIXEL_WIDTH MARVIN_FEATURE_12BITS
++#define MARVIN_FEATURE_BAD_PIXEL_RAM MARVIN_FEATURE_11BITS
++#define MARVIN_FEATURE_SUPERIMPOSE MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_IMAGE_EFFECTS MARVIN_FEATURE_IE_V1
++#define MARVIN_FEATURE_LENS_SHADING MARVIN_FEATURE_LSC_V2
++#define MARVIN_FEATURE_ISP_ERRORFLAGS MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_FRAMECOUNTER MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_FLASH_LIGHT MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_EXT_YCBCR_RANGE MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_SHUTTER MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_IMG_STABILIZATION MARVIN_FEATURE_IMG_STABILIZATION_V2
++#define MARVIN_FEATURE_ISP_HISTOGRAM MARVIN_FEATURE_HIST_V2
++#define MARVIN_FEATURE_ISP_CSM MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_ISP_FILTER MARVIN_FEATURE_ISP_FILTER_V2
++#define MARVIN_FEATURE_SMIA MARVIN_FEATURE_SMIA_COMPLETE
++#define MARVIN_FEATURE_AUTO_EXPOSURE MARVIN_FEATURE_AUTO_EXPOSURE_V3
++#define MARVIN_FEATURE_MI_STATUSFLAGS MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_MIPI MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_SMALL_OUTUNIT MARVIN_FEATURE_EXIST_NOT
++#define MARVIN_FEATURE_CLOCK_DOMAINS MARVIN_CLOCK_DOMAINS_1
++#define MARVIN_FEATURE_WB MARVIN_FEATURE_WB_V5
++#define MARVIN_FEATURE_OUTPUT_FORMATTER MARVIN_FEATURE_OUTPUT_FORMATTER_V2
++#define MARVIN_FEATURE_MI MARVIN_FEATURE_MI_V4
++#define MARVIN_FEATURE_DMA_READ MARVIN_FEATURE_DMA_READ_V3
++#define MARVIN_FEATURE_JPE_SIZE MARVIN_FEATURE_JPE_SIZE_13BITS
++#define MARVIN_FEATURE_BLACK_LEVEL MARVIN_FEATURE_BLACK_LEVEL_V5
++#define MARVIN_FEATURE_DPMUX_YCSPLIT MARVIN_FEATURE_YCS_V2
++#define MARVIN_FEATURE_DPMUX_MAINPATH MARVIN_FEATURE_DPMUX_MAIN_V2
++#define MARVIN_FEATURE_INPUT_AQUISITION MARVIN_FEATURE_IAQU_V2
++#define MARVIN_FEATURE_JPE MARVIN_FEATURE_JPE_V3
++#define MARVIN_FEATURE_JPE_CFG MARVIN_FEATURE_JPE_CFG_V1
++#define MARVIN_FEATURE_XTALK_OFFSET MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_CHROM_ABERRATION MARVIN_FEATURE_EXIST_NOT
++#define MARVIN_FEATURE_MI_BURST_16 MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_MI_LAST_PIXEL MARVIN_FEATURE_EXIST
++#define MARVIN_FEATURE_MI_FRAMESIZE MARVIN_FEATURE_12M6
++#define MARVIN_FEATURE_BAYER_RGB MARVIN_FEATURE_EXIST
++
++#endif /* MARVIN_HW */
++
++#endif
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstisp/include/reg_access.h
+@@ -0,0 +1,233 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#ifndef _REG_ACCESS_H
++#define _REG_ACCESS_H
++
++/*
++ * Notes:
++ *
++ * registers:
++ * - use these macros to allow a central way e.g. to print out debug
++ * information on register access
++ *
++ * slices:
++ * - "parameter" \a reg could be a hardware register or a (32bit) variable,
++ * but not a pointer!
++ * - each slice (specified as "parameter" \a name) requires two \#defines:
++ * \b \<name\>_MASK : defines the mask to use on register side
++ * \b \<name\>_SHIFT : defines the shift value to use (left on write, right
++ * on read)
++ *
++ * arrays:
++ * - "parameter" \a areg could be an (32bit) array or a pointer to the
++ * first array element
++ * - each one-dimensional array (specified as "parameter" \a name) requires
++ * one \#define
++ * - <tt> \<name\>_ARR_SIZE </tt>: number of elements
++ * - each two-dimensional array (specified as "parameter" <name>) requires
++ * four \#defines:
++ * - <tt> \<name\>_ARR_SIZE1 </tt>: number of elements in first dimension
++ * - <tt> \<name\>_ARR_SIZE2 </tt>: number of elements in second dimension
++ * - <tt> \<name\>_ARR_OFS1 </tt>: offset between two consecutive elements
++ * in first dimension
++ * - <tt> \<name\>_ARR_OFS2 </tt>: offset between two consecutive elements
++ * in second dimension
++ */
++
++/*
++ * reads and returns the complete value of register \a reg
++ * \note Use these macro to allow a central way e.g. to print out debug
++ * information on register access.
++ */
++
++/* helper function to let REG_READ return the value */
++
++#define DBG_DD(x) \
++ do { \
++ if (mrstisp_debug >= 4) { \
++ printk(KERN_INFO "mrstisp@%s ", __func__); \
++ printk x; \
++ } \
++ } while (0)
++
++static inline u32 _reg_read(u32 reg, const char *text)
++{
++ u32 variable = reg;
++ DBG_DD((text, variable));
++ return variable;
++}
++
++#define REG_READ(reg) \
++_reg_read((reg), "REG_READ(" VAL2STR(reg) "): 0x%08X\n")
++
++static inline u32 _reg_read_ex(u32 reg, const char *text)
++{
++ u32 variable = reg;
++ DBG_DD((text, variable));
++ return variable;
++}
++
++#define REG_READ_EX(reg) \
++_reg_read_ex((reg), "REG_READ_EX(" VAL2STR(reg) "): 0x%08X\n")
++/*
++ * writes the complete value \a value into register \a reg
++ * \note Use these macro to allow a central way e.g. to print out debug
++ * information on register access.
++ */
++#define REG_WRITE(reg, value) \
++{ \
++ dprintk(4, \
++ "REG_WRITE(" VAL2STR(reg) ", " VAL2STR(value) "): 0x%08X", (value)); \
++ (reg) = (value); \
++}
++
++#define REG_WRITE_EX(reg, value) \
++{ \
++ (reg) = (value); \
++}
++
++
++/*
++ * returns the value of slice \a name from register or variable \a reg
++ * \note "parameter" \a reg could be a hardware register or a (32bit)
++ * variable, but not a pointer! \n
++ * each slice (specified as "parameter" \a name) requires two \#defines: \n
++ * - <tt>\<name\>_MASK </tt>: defines the mask to use on register side
++ * - <tt>\<name\>_SHIFT </tt>: defines the shift value to use (left on write,
++ * right on read)
++ */
++
++static inline u32 _reg_get_slice(const char *text, u32 val)
++{
++ u32 variable = val;
++ DBG_DD((text, variable));
++ return val;
++}
++
++#define REG_GET_SLICE_EX(reg, name) \
++ (((reg) & (name##_MASK)) >> (name##_SHIFT))
++
++#define REG_GET_SLICE(reg, name) \
++ _reg_get_slice("REG_GET_SLICE(" VAL2STR(reg) ", " VAL2STR(name) \
++ "): 0x%08X\n" , \
++ (((reg) & (name##_MASK)) >> (name##_SHIFT)))
++
++/*
++ * writes the value \a value into slice \a name of register or variable \a reg
++ * \note "parameter" \a reg could be a hardware register or a (32bit) variable,
++ * but not a pointer! \n
++ * each slice (specified as "parameter" \a name) requires two \#defines: \n
++ * - <tt>\<name\>_MASK </tt>: defines the mask to use on register side
++ * - <tt>\<name\>_SHIFT </tt>: defines the shift value to use (left on write,
++ * right on read)
++ */
++#define REG_SET_SLICE(reg, name, value) \
++{ \
++ dprintk(4, \
++ "REG_SET_SLICE(" VAL2STR(reg) ", " VAL2STR(name) \
++ ", " VAL2STR(value) "): 0x%08X", \
++ (value)); \
++ ((reg) = (((reg) & ~(name##_MASK)) | \
++ (((value) << (name##_SHIFT)) & (name##_MASK)))); \
++}
++
++#define REG_SET_SLICE_EX(reg, name, value) \
++{ \
++ ((reg) = (((reg) & ~(name##_MASK)) | \
++ (((value) << (name##_SHIFT)) & (name##_MASK)))); \
++}
++
++/*
++ * returns the value of element \a idx from register array or array variable \a
++ * areg
++ * \note "parameter" \a areg could be an (32bit) array or a pointer to the first
++ * array element \n
++ * each one-dimensional array (specified as "parameter" \a name) requires one
++ * \#define: \n
++ * - <tt>\<name\>_ARR_SIZE </tt>: number of elements
++ */
++#define REG_GET_ARRAY_ELEM1(areg, name, idx) \
++((idx < name##_ARR_SIZE) \
++? areg[idx] \
++: 0)
++
++
++/*
++ * writes the value \a value into element \a idx of register array or array
++ * variable \a areg
++ * \note "parameter" \a areg could be an (32bit) array or a pointer to the
++ * first array element \n
++ * each one-dimensional array (specified as "parameter" \a name) requires
++ * one \#define: \n
++ * - <tt>\<name\>_ARR_SIZE </tt>: number of elements
++ */
++#define REG_SET_ARRAY_ELEM1(areg, name, idx, value) \
++((idx < name##_ARR_SIZE) \
++? areg[idx] = value \
++: 0)
++
++
++/*
++ * returns the value of element \a idx1, \a idx2 from two-dimensional register
++ * array or array variable \a areg
++ * \note "parameter" \a areg could be an (32bit) array or a pointer to the
++ * first array element \n
++ * each two-dimensional array (specified as "parameter" \a name) requires
++ * four \#defines:
++ * - <tt>\<name\>_ARR_SIZE1 </tt>: number of elements in first dimension
++ * - <tt>\<name\>_ARR_SIZE2 </tt>: number of elements in second dimension
++ * - <tt>\<name\>_ARR_OFS1 </tt>: offset between two consecutive
++ * elements in first dimension
++ * - <tt>\<name\>_ARR_OFS2 </tt>: offset between two consecutive
++ * elements in second dimension
++ */
++#define REG_GET_ARRAY_ELEM2(areg, name, idx1, idx2) \
++(((idx1 < name##_ARR_SIZE1) && (idx2 < name##_ARR_SIZE2)) \
++? areg[(idx1 * name##_ARR_OFS1) + (idx2 * name##_ARR_OFS2)] \
++: 0)
++
++
++/*
++ * writes the value \a value into element \a idx1, \a idx2 of two-dimensional
++ * register array or array variable \a areg
++ * \note "parameter" \a areg could be an (32bit) array or a pointer to the
++ * first array element \n
++ * each two-dimensional array (specified as "parameter" \a name) requires
++ * four \#defines:
++ * - <tt>\<name\>_ARR_SIZE1 </tt>: number of elements in first dimension
++ * - <tt>\<name\>_ARR_SIZE2 </tt>: number of elements in second dimension
++ * - <tt>\<name\>_ARR_OFS1 </tt>: offset between two consecutive
++ * elements in first dimension
++ * - <tt>\<name\>_ARR_OFS2 </tt>: offset between two consecutive
++ * elements in second dimension
++ */
++#define REG_SET_ARRAY_ELEM2(areg, name, idx1, idx2, value) \
++(((idx1 < name##_ARR_SIZE1) && (idx2 < name##_ARR_SIZE2)) \
++? areg[(idx1 * name##_ARR_OFS1) + (idx2 * name##_ARR_OFS2)] = value \
++: 0)
++
++/* _REG_ACCESS_H */
++#endif
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-moorestown-camera-driver-10.0-2-3.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-moorestown-camera-driver-10.0-2-3.patch
new file mode 100644
index 0000000..f3e8159
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-moorestown-camera-driver-10.0-2-3.patch
@@ -0,0 +1,9779 @@
+From b5dc72f2a0bc509a241e03eea51e739204315f93 Mon Sep 17 00:00:00 2001
+From: Zheng Ba <zheng.ba@intel.com>
+Date: Thu, 1 Apr 2010 16:24:20 +0800
+Subject: [PATCH 2/3] Moorestown Camera Imaging driver Beta 10.0
+
+Patch-mainline: 2.6.35?
+
+Changes from Beta 9.0:
+1. Fixed hsd sighting
+ 3469638 3469639 3469710 3469822 (high)
+ 3469697 (medium)
+
+Changes from Beta 8.0:
+1. Fixed hsd sighting
+ 3469056 3469058 (critical)
+ 3469705 3469696 3469709 3469510 (medium)
+
+Changes from Beta 7.0:
+1. Fixed hsd sighting 3469681,3469682,3469683 (high)
+
+Changes from Beta 6.0:
+1. Fixed hsd sighting 3469668 (high)
+2. Fixed ov5630 v4l2 view-finding dark issue
+3. Enabled support for popular v4l2 applications (cheese, skype, ffmpeg)
+
+Changes from Beta 5.1:
+1. Fixed CRITICAL sighting 3469558 -- ciapp fails to launch with segment fault
+2. Fixed HIGH sighting 3479513 -- ov5630 AWB unstable
+3. Improved KMOT sensor 720p fps from 30 to 40
+
+Changes from Beta 5.0:
+Fixed a critical issue of camera driver not loading -- hsd 3469557
+
+Main changes from Beta 4.0:
+Fixed 4 HSD sightings: 3469392,3469099,3469470,3469500
+
+Main changes from Beta 3.0:
+Fixed 7 HSD sightings: 3469264,3469112,3469395,3469103,3469105,3469471,3469484
+
+Main changes from Beta 2.0:
+Fixed 6 HSD sightings: 3469047,3469315,3469317,3469101,3468409,3469391
+
+Main changes from Beta 1.1:
+1. Added interrupt mode for jpeg capture and KMOT viewfinding
+2. Fixed HSD sighting 3469228 and 3469147
+
+Main changes from Alpha2:
+Enabled MIPI interface in ISP driver and KMOT sensor s5k4e1.
+Enabled FIFO in ISP driver, which doubled the fps in view-finding mode.
+Enabled Subdev Framework in CI kernel driver.
+Enabled AF Continuous Mode.
+Enabled AE scene evaluation.
+
+Enabled the camera drivers in kernel:
+Device Drivers --> Multimedia support --> Video For Linux
+Device Drivers --> Mulitmedia support --> Video capture adapters -->
+--> Moorestown Langwell Camera Imaging Subsystem support.
+
+Kernel configs:
+1. camera driver depends on GPIO library and I2C driver.
+CONFIG_GENERIC_GPIO=y
+CONFIG_I2C=y
+CONFIG_GPIOLIB=y
+2. camera driver depends on videobuf-core and videobuf-dma-contig.
+VIDEOBUF_GEN=y
+VIDEOBUF_DMA_CONTIG=y
+3. enable multimedia support and video capture.
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_VIDEO_DEV=y
+CONFIG_VIDEO_V4L2_COMMON=y
+CONFIG_VIDEO_MEDIA=y
+CONFIG_VIDEO_V4L2=y
+4. camera drivers incluing ISP, 5630, 5630-motor, s5k4e1, s5k4e1-motor, 2650,
+9665, flash.
+CONFIG_VIDEO_MRSTCI=y
+CONFIG_VIDEO_MRST_ISP=y
+CONFIG_VIDEO_MRST_OV5630=y
+CONFIG_VIDEO_MRST_OV5630_MOTOR=y
+CONFIG_VIDEO_MRST_S5K4E1=y
+CONFIG_VIDEO_MRST_S5K4E1_MOTOR=y
+CONFIG_VIDEO_MRST_FLASH=y
+CONFIG_VIDEO_MRST_OV2650=y
+CONFIG_VIDEO_MRST_OV9665=y
+
+Signed-off-by: Zheng Ba <zheng.ba@intel.com>
+---
+ drivers/media/video/mrstci/Kconfig | 26 +
+ drivers/media/video/mrstci/Makefile | 8 +
+ drivers/media/video/mrstci/mrstisp/Kconfig | 10 +
+ drivers/media/video/mrstci/mrstisp/Makefile | 7 +
+ .../video/mrstci/mrstisp/__mrstisp_private_ioctl.c | 324 +++
+ drivers/media/video/mrstci/mrstisp/mrstisp_dp.c | 1301 +++++++++
+ drivers/media/video/mrstci/mrstisp/mrstisp_hw.c | 1622 +++++++++++
+ drivers/media/video/mrstci/mrstisp/mrstisp_isp.c | 1993 +++++++++++++
+ drivers/media/video/mrstci/mrstisp/mrstisp_jpe.c | 569 ++++
+ drivers/media/video/mrstci/mrstisp/mrstisp_main.c | 2977 ++++++++++++++++++++
+ drivers/media/video/mrstci/mrstisp/mrstisp_mif.c | 763 +++++
+ 11 files changed, 9600 insertions(+), 0 deletions(-)
+ create mode 100644 drivers/media/video/mrstci/Kconfig
+ create mode 100644 drivers/media/video/mrstci/Makefile
+ create mode 100644 drivers/media/video/mrstci/mrstisp/Kconfig
+ create mode 100644 drivers/media/video/mrstci/mrstisp/Makefile
+ create mode 100644 drivers/media/video/mrstci/mrstisp/__mrstisp_private_ioctl.c
+ create mode 100644 drivers/media/video/mrstci/mrstisp/mrstisp_dp.c
+ create mode 100644 drivers/media/video/mrstci/mrstisp/mrstisp_hw.c
+ create mode 100644 drivers/media/video/mrstci/mrstisp/mrstisp_isp.c
+ create mode 100644 drivers/media/video/mrstci/mrstisp/mrstisp_jpe.c
+ create mode 100644 drivers/media/video/mrstci/mrstisp/mrstisp_main.c
+ create mode 100644 drivers/media/video/mrstci/mrstisp/mrstisp_mif.c
+
+diff --git a/drivers/media/video/mrstci/Kconfig b/drivers/media/video/mrstci/Kconfig
+new file mode 100644
+index 0000000..9ac7065
+--- /dev/null
++++ b/drivers/media/video/mrstci/Kconfig
+@@ -0,0 +1,26 @@
++menuconfig VIDEO_MRSTCI
++ bool "Moorestown Langwell Camera Imaging Subsystem support"
++ depends on VIDEO_V4L2
++ default y
++
++ ---help---
++ Say Y here to enable selecting the Intel Moorestown Langwell Camera Imaging Subsystem for webcams.
++
++if VIDEO_MRSTCI && VIDEO_V4L2
++
++source "drivers/media/video/mrstci/mrstisp/Kconfig"
++
++source "drivers/media/video/mrstci/mrstov5630/Kconfig"
++source "drivers/media/video/mrstci/mrstov5630_motor/Kconfig"
++
++source "drivers/media/video/mrstci/mrsts5k4e1/Kconfig"
++source "drivers/media/video/mrstci/mrsts5k4e1_motor/Kconfig"
++
++source "drivers/media/video/mrstci/mrstflash/Kconfig"
++
++source "drivers/media/video/mrstci/mrstov2650/Kconfig"
++
++source "drivers/media/video/mrstci/mrstov9665/Kconfig"
++
++endif # VIDEO_MRSTCI
++
+diff --git a/drivers/media/video/mrstci/Makefile b/drivers/media/video/mrstci/Makefile
+new file mode 100644
+index 0000000..9d3449e
+--- /dev/null
++++ b/drivers/media/video/mrstci/Makefile
+@@ -0,0 +1,8 @@
++obj-$(CONFIG_VIDEO_MRST_OV2650) += mrstov2650/
++obj-$(CONFIG_VIDEO_MRST_OV9665) += mrstov9665/
++obj-$(CONFIG_VIDEO_MRST_OV5630) += mrstov5630/
++obj-$(CONFIG_VIDEO_MRST_OV5630_MOTOR) += mrstov5630_motor/
++obj-$(CONFIG_VIDEO_MRST_S5K4E1) += mrsts5k4e1/
++obj-$(CONFIG_VIDEO_MRST_S5K4E1_MOTOR) += mrsts5k4e1_motor/
++obj-$(CONFIG_VIDEO_MRST_FLASH) += mrstflash/
++obj-$(CONFIG_VIDEO_MRST_ISP) += mrstisp/
+diff --git a/drivers/media/video/mrstci/mrstisp/Kconfig b/drivers/media/video/mrstci/mrstisp/Kconfig
+new file mode 100644
+index 0000000..8e58a87
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstisp/Kconfig
+@@ -0,0 +1,10 @@
++config VIDEO_MRST_ISP
++ tristate "Moorstown Marvin - ISP Driver"
++ depends on VIDEO_V4L2
++ select VIDEOBUF_DMA_CONTIG
++ default y
++ ---help---
++ Say Y here if you want support for cameras based on the Intel Moorestown platform.
++
++ To compile this driver as a module, choose M here: the
++ module will be called mrstisp.ko.
+diff --git a/drivers/media/video/mrstci/mrstisp/Makefile b/drivers/media/video/mrstci/mrstisp/Makefile
+new file mode 100644
+index 0000000..30f4e62
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstisp/Makefile
+@@ -0,0 +1,7 @@
++mrstisp-objs := mrstisp_main.o mrstisp_hw.o mrstisp_isp.o \
++ mrstisp_dp.o mrstisp_mif.o mrstisp_jpe.o \
++ __mrstisp_private_ioctl.o
++
++obj-$(CONFIG_VIDEO_MRST_ISP) += mrstisp.o
++
++EXTRA_CFLAGS += -I$(src)/../include -I$(src)/include
+diff --git a/drivers/media/video/mrstci/mrstisp/__mrstisp_private_ioctl.c b/drivers/media/video/mrstci/mrstisp/__mrstisp_private_ioctl.c
+new file mode 100644
+index 0000000..85cc482
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstisp/__mrstisp_private_ioctl.c
+@@ -0,0 +1,324 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include "mrstisp_stdinc.h"
++
++/*
++static u32 copy_sensor_config_from_user(struct ci_sensor_config *des,
++ struct ci_sensor_config *src)
++{
++ u32 ret = 0;
++ ret = copy_from_user((void *)des, (const void *)src,
++ sizeof(struct ci_sensor_config));
++ if (ret)
++ return -EFAULT;
++ return ret;
++}
++
++static u32 copy_sensor_caps_from_user(struct ci_sensor_caps *des,
++ struct ci_sensor_caps *src)
++{
++ u32 ret = 0;
++ ret = copy_from_user((void *)des, (const void *)src,
++ sizeof(struct ci_sensor_caps));
++ if (ret)
++ return -EFAULT;
++ return ret;
++}
++
++static u32 copy_isp_config_from_user(struct ci_isp_config *des,
++ struct ci_isp_config *src)
++{
++ int ret = 0;
++ ret = copy_from_user((void *)des, (const void *)src,
++ sizeof(struct ci_isp_config));
++ if (ret) {
++ eprintk("returning %d", ret);
++ return ret;
++ }
++ return 0;
++}
++*/
++
++static void print_bls_cfg(struct ci_isp_config *isp_cfg)
++{
++ struct ci_isp_bls_config *bls_cfg = &isp_cfg->bls_cfg;
++
++ dprintk(4, "print_bls_cfg:");
++ dprintk(4, "enable_automatic:%d", (bls_cfg->enable_automatic ? 1 : 0));
++ dprintk(4, "disable_h:%d", (bls_cfg->disable_h ? 1 : 0));
++ dprintk(4, "disable_v:%d", (bls_cfg->disable_v ? 1 : 0));
++ dprintk(4, "enable_window1:%d",
++ (bls_cfg->isp_bls_window1.enable_window ? 1 : 0));
++ dprintk(4, "start_h:%d", (int)bls_cfg->isp_bls_window1.start_h);
++ dprintk(4, "stop_h:%d", (int)bls_cfg->isp_bls_window1.stop_h);
++ dprintk(4, "start_v:%d", (int)bls_cfg->isp_bls_window1.start_v);
++ dprintk(4, "stop_v:%d", (int)bls_cfg->isp_bls_window1.stop_v);
++ dprintk(4, "enable_window2: %d",
++ (bls_cfg->isp_bls_window2.enable_window ? 1 : 0));
++ dprintk(4, "start_h%d", (int)bls_cfg->isp_bls_window2.start_h);
++ dprintk(4, "stop_h%d", (int)bls_cfg->isp_bls_window2.stop_h);
++ dprintk(4, "start_v%d", (int)bls_cfg->isp_bls_window2.start_v);
++ dprintk(4, "stop_v%d", (int)bls_cfg->isp_bls_window2.stop_v);
++ dprintk(4, "bls_samples%d", (int)bls_cfg->bls_samples);
++ dprintk(4, "fixed_a0x%02x", (int)bls_cfg->bls_subtraction.fixed_a);
++ dprintk(4, "fixed_b0x%02x", (int)bls_cfg->bls_subtraction.fixed_b);
++ dprintk(4, "fixed_c0x%02x", (int)bls_cfg->bls_subtraction.fixed_c);
++ dprintk(4, "fixed_d0x%02x", (int)bls_cfg->bls_subtraction.fixed_d);
++ dprintk(4, "\n");
++}
++
++static int mrst_isp_set_cfg(struct file *file, void *priv,
++ struct ci_pl_system_config *arg)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++
++ if (arg == NULL) {
++ eprintk("NULL pointer of arg");
++ return 0;
++ }
++ mutex_lock(&isp->mutex);
++
++ /*
++ if (arg->isi_config != NULL) {
++ dprintk(2, "sync isi cfg");
++ copy_sensor_config_from_user(isp->sys_conf.isi_config,
++ arg->isi_config);
++ } else {
++ eprintk("NULL arg->isi_config");
++ ret = CI_STATUS_NULL_POINTER;
++ goto exit_unlock;
++ }
++
++ if (arg->isi_caps != NULL) {
++ dprintk(2, "sync isi caps");
++ copy_sensor_caps_from_user(isp->sys_conf.isi_caps,
++ arg->isi_caps);
++ } else {
++ eprintk("NULL arg->isi_caps");
++ ret = CI_STATUS_NULL_POINTER;
++ goto exit_unlock;
++ }
++ */
++
++ memcpy(&isp->sys_conf.isp_cfg, &arg->isp_cfg,
++ sizeof(struct ci_isp_config));
++
++ print_bls_cfg(&isp->sys_conf.isp_cfg);
++
++ dprintk(2, "gammagamma2 = %d", arg->isp_cfg.flags.gamma2);
++ dprintk(2, "gammagamma2 now = %d", isp->sys_conf.isp_cfg.flags.gamma2);
++ mutex_unlock(&isp->mutex);
++
++ isp->sys_conf.isp_hal_enable = 1;
++
++ DBG_leaving;
++ return 0;
++}
++
++/* for buffer sharing between CI and VA */
++static int mrst_isp_get_frame_info(struct file *file, void *priv,
++ struct ci_frame_info *arg)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++
++ mutex_lock(&isp->mutex);
++
++ arg->width = isp->bufwidth;
++ arg->height = isp->bufheight;
++ arg->fourcc = isp->pixelformat;
++ arg->stride = isp->bufwidth; /* should be 64 bit alignment*/
++ arg->offset = arg->frame_id * PAGE_ALIGN(isp->frame_size);
++#if 0
++ if (isp->bufwidth == 640 && isp->bufheight == 480)
++ arg->offset = arg->frame_id * 0x71000;
++ else if (isp->bufwidth == 1280 && isp->bufheight == 720)
++ arg->offset = arg->frame_id * 0x152000;
++#endif
++
++
++ dprintk(2, "w=%d, h=%d, 4cc =%x, stride=%d, offset=%d,fsize=%d",
++ arg->width, arg->height, arg->fourcc, arg->stride,
++ arg->offset, isp->frame_size);
++
++ mutex_unlock(&isp->mutex);
++
++ DBG_leaving;
++ return 0;
++}
++
++static int mrst_isp_set_jpg_enc_ratio(struct file *file, void *priv, int *arg)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++
++ dprintk(2, "set jpg compression ratio is %d", *arg);
++
++ mutex_lock(&isp->mutex);
++ isp->sys_conf.isp_cfg.jpeg_enc_ratio = *arg;
++ mutex_unlock(&isp->mutex);
++
++ DBG_leaving;
++ return 0;
++}
++
++int mrst_isp_get_isp_mem_info(struct file *file, void *priv,
++ struct ci_isp_mem_info *arg)
++{
++ u32 ret = 0;
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++
++ mutex_lock(&isp->mutex);
++ arg->isp_bar0_pa = isp->mb0;
++ arg->isp_bar0_size = isp->mb0_size;
++ arg->isp_bar1_pa = isp->mb1;
++ arg->isp_bar1_size = isp->mb1_size;
++ mutex_unlock(&isp->mutex);
++
++ DBG_leaving;
++ return ret;
++}
++
++int mrst_isp_create_jpg_review_frame(struct file *file, void *priv,
++ struct v4l2_jpg_review_buffer *arg)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ u32 width = arg->width;
++ u32 height = arg->height;
++ u32 pix_fmt = arg->pix_fmt;
++ u32 jpg_frame = arg->jpg_frame;
++
++ static struct v4l2_jpg_review_buffer *jpg_review;
++
++ jpg_review = &isp->sys_conf.jpg_review;
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++
++ if (width > 640 || height > 480 || width < 32 || height < 16) {
++ eprintk("unsupported resolution: %d * %d", width, height);
++ return -EINVAL;
++ }
++
++ if (jpg_frame >= isp->num_frames) {
++ eprintk("error jpeg frame id");
++ return -1;
++ }
++
++ jpg_review->width = width;
++ jpg_review->height = height;
++ jpg_review->pix_fmt = pix_fmt;
++ jpg_review->jpg_frame = jpg_frame;
++
++ switch (arg->pix_fmt) {
++ case V4L2_PIX_FMT_YUV422P:
++ jpg_review->bytesperline = width * 2;
++ jpg_review->frame_size = width * height * 2;
++ break;
++ case V4L2_PIX_FMT_YUV420:
++ case V4L2_PIX_FMT_YVU420:
++ case V4L2_PIX_FMT_NV12:
++ jpg_review->bytesperline = width * 3/2;
++ jpg_review->frame_size = width * height * 3/2;
++ break;
++ default:
++ eprintk("unsupported pix_fmt: %d", arg->pix_fmt);
++ return -EINVAL;
++ }
++
++ jpg_review->offset = isp->mb1_size - 640*480*2;
++
++ isp->sys_conf.jpg_review_enable = 1; /* enable jpg review flag */
++
++ /* set user space data */
++ arg->bytesperline = jpg_review->bytesperline;
++ arg->frame_size = jpg_review->frame_size;
++ arg->offset = jpg_review->offset;
++
++ dprintk(1, "create jpg review frame successfully: "
++ "bytesperline = %d, frame_size = %d,"
++ " offset = %d\n", arg->bytesperline,
++ arg->frame_size, arg->offset);
++
++ DBG_leaving;
++ return 0;
++}
++
++/* isp private ioctl for libci */
++long mrst_isp_vidioc_default(struct file *file, void *fh,
++ int cmd, void *arg)
++{
++ void *priv = file->private_data;
++
++ DBG_entering;
++
++ switch (cmd) {
++ case VIDIOC_GET_ISP_MEM_INFO:
++ return mrst_isp_get_isp_mem_info(file, priv,
++ (struct ci_isp_mem_info *)arg);
++
++ case VIDIOC_SET_SYS_CFG:
++ return mrst_isp_set_cfg(file, priv,
++ (struct ci_pl_system_config *)arg);
++
++ case VIDIOC_SET_JPG_ENC_RATIO:
++ return mrst_isp_set_jpg_enc_ratio(file, priv, (int *)arg);
++
++ case ISP_IOCTL_GET_FRAME_INFO:
++ return mrst_isp_get_frame_info(file, priv,
++ (struct ci_frame_info *)arg);
++
++ case VIDIOC_CREATE_JPG_REVIEW_BUF:
++ return mrst_isp_create_jpg_review_frame(file, priv,
++ (struct v4l2_jpg_review_buffer *)arg);
++ default:
++ v4l_print_ioctl("lnw_isp", cmd);
++ dprintk(2, "VIDIOC_SET_SYS_CFG = %x", VIDIOC_SET_SYS_CFG);
++ return -EINVAL;
++ }
++
++ DBG_leaving;
++ return 0;
++}
+diff --git a/drivers/media/video/mrstci/mrstisp/mrstisp_dp.c b/drivers/media/video/mrstci/mrstisp/mrstisp_dp.c
+new file mode 100644
+index 0000000..dd892fb
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstisp/mrstisp_dp.c
+@@ -0,0 +1,1301 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include "mrstisp_stdinc.h"
++
++/* mask for all chroma subsampling settings */
++#define CI_ISP_DPD_CSS_MASK (CI_ISP_DPD_CSS_H_MASK | CI_ISP_DPD_CSS_V_MASK)
++
++#define SCALER_COFFS_COSITED 0x400
++#define FIXEDPOINT_ONE 0x1000
++
++/* limitations of main and self scaler */
++#define MAIN_SCALER_WIDTH_MAX 2600
++
++#define SELF_SCALER_WIDTH_MAX 640
++#define SCALER_MIN 16
++
++#define SELF_UPSCALE_FACTOR_MAX 5
++
++#define MAIN_UPSCALE_FACTOR_MAX 5
++
++/*
++ * upscale lookup table for smooth edges
++ * (linear interpolation between pixels)
++ */
++
++/* smooth edges */
++static const struct ci_isp_rsz_lut isp_rsz_lut_smooth_lin = {
++ {
++ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
++ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
++ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
++ 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
++ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
++ 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F
++ }
++};
++
++/*
++ * upscale lookup table for sharp edges
++ * (no interpolation, just duplicate pixels)
++ */
++
++/* sharp edges */
++static const struct ci_isp_rsz_lut isp_rsz_lut_sharp = {
++ {
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
++ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
++ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
++ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F
++ }
++};
++
++/* structure combining virtual ISP windows settings */
++struct ci_isp_virtual_isp_wnds {
++ struct ci_isp_window wnd_blacklines;
++ struct ci_isp_window wnd_zoom_crop;
++};
++
++/* static storage to remember last applied virtual ISP window settings */
++static struct ci_isp_virtual_isp_wnds last_isp_wnds;
++
++/*
++ * Calculates the value to program into the struct ci_isp_scale or
++ * tsMrvSScale structures to scale from in pixels to out pixels.
++ *
++ * The formulas are taken from the MARVIN / MARVIN3PLUS user
++ * manuals (fixed-point calculation using 32 bit during
++ * processing, will overflow at an output size of 1048575 pixels).
++ */
++static u32 ci_get_scale_reg(u16 in, u16 out)
++{
++ if (in > out) {
++ /* downscaling */
++ return (u32) (((((u32) out - 1) * RSZ_SCALER_BYPASS) /
++ (u32) (in - 1)) + 1);
++ } else if (in < out) {
++ /* upscaling */
++ return (u32) (((((u32) in - 1) * RSZ_SCALER_BYPASS) /
++ (u32) (out - 1)) | (u32) RSZ_UPSCALE_ENABLE);
++ }
++
++ /* no scaling */
++ return RSZ_SCALER_BYPASS;
++}
++
++/*
++ * Calculates the values of the ci_isp_scale structure for the
++ * given input size and data path descriptor.
++ */
++static u32 ci_calc_scale_factors(const struct ci_isp_datapath_desc *source,
++ const struct ci_isp_datapath_desc *path,
++ struct ci_isp_scale *scale, int implementation)
++{
++ u32 scaler_output_format;
++ u32 cssflags;
++ u32 scaler_input_format;
++
++ u16 chroma_in_w;
++ u16 chroma_in_h;
++ u16 chroma_out_wcr;
++ u16 chroma_out_wcb;
++ u16 chroma_out_h;
++
++ memset(scale, 0, sizeof(struct ci_isp_scale));
++ dprintk(1, "srcw %d, srch %d;", source->out_w, source->out_h);
++ dprintk(1, "dstw %d, dsth %d", path->out_w, path->out_h);
++
++ /* calculate Y scale factors */
++ scale->scale_hy = ci_get_scale_reg(source->out_w, path->out_w);
++ scale->scale_vy = ci_get_scale_reg(source->out_h, path->out_h);
++
++ /* figure out the color input format of the scaler */
++ switch (path->flags & CI_ISP_DPD_MODE_MASK) {
++ case CI_ISP_DPD_MODE_DMAYC_DIRECT:
++ case CI_ISP_DPD_MODE_DMAYC_ISP:
++ case CI_ISP_DPD_MODE_DMAJPEG_DIRECT:
++ case CI_ISP_DPD_MODE_DMAJPEG_ISP:
++ /* DMA-read originated data */
++ scaler_input_format = path->flags & CI_ISP_DPD_DMA_IN_MASK;
++ break;
++ default:
++ /* ISP originated data */
++ scaler_input_format = CI_ISP_DPD_DMA_IN_422;
++ break;
++ }
++
++ dprintk(1, "scaler_input_format is 0x%x", scaler_input_format);
++
++ switch (scaler_input_format) {
++ case CI_ISP_DPD_DMA_IN_422:
++ chroma_in_w = source->out_w / 2;
++ chroma_in_h = source->out_h;
++ chroma_out_wcr = path->out_w / 2;
++ chroma_out_wcb = (path->out_w + 1) / 2;
++ chroma_out_h = path->out_h;
++ break;
++ case CI_ISP_DPD_DMA_IN_420:
++ chroma_in_w = source->out_w / 2;
++ chroma_in_h = source->out_h / 2;
++ chroma_out_wcr = path->out_w / 2;
++ chroma_out_wcb = (path->out_w + 1) / 2;
++ chroma_out_h = path->out_h / 2;
++ break;
++ case CI_ISP_DPD_DMA_IN_411:
++ chroma_in_w = source->out_w / 4;
++ chroma_in_h = source->out_h;
++ chroma_out_wcr = path->out_w / 4;
++ chroma_out_wcb = (path->out_w + 2) / 4;
++ chroma_out_h = path->out_h;
++ break;
++ case CI_ISP_DPD_DMA_IN_444:
++ default:
++ chroma_in_w = source->out_w;
++ chroma_in_h = source->out_h;
++ chroma_out_wcb = chroma_out_wcr = path->out_w;
++ chroma_out_h = path->out_h;
++ break;
++ }
++
++ /* calculate chrominance scale factors */
++ switch (path->flags & CI_ISP_DPD_CSS_H_MASK) {
++ case CI_ISP_DPD_CSS_H2:
++ chroma_out_wcb /= 2;
++ chroma_out_wcr /= 2;
++ break;
++ case CI_ISP_DPD_CSS_H4:
++ chroma_out_wcb /= 4;
++ chroma_out_wcr /= 4;
++ break;
++ case CI_ISP_DPD_CSS_HUP2:
++ chroma_out_wcb *= 2;
++ chroma_out_wcr *= 2;
++ break;
++ case CI_ISP_DPD_CSS_HUP4:
++ chroma_out_wcb *= 4;
++ chroma_out_wcr *= 4;
++ break;
++ default:
++ /*leave chroma_out_w untouched*/
++ break;
++ }
++
++ scale->scale_hcr = ci_get_scale_reg(chroma_in_w, chroma_out_wcr);
++ scale->scale_hcb = ci_get_scale_reg(chroma_in_w, chroma_out_wcb);
++ scale->scale_hcb = scale->scale_hcr;
++
++ switch (path->flags & CI_ISP_DPD_CSS_V_MASK) {
++ case CI_ISP_DPD_CSS_V2:
++ chroma_out_h /= 2;
++ break;
++ case CI_ISP_DPD_CSS_V4:
++ chroma_out_h /= 4;
++ break;
++ case CI_ISP_DPD_CSS_VUP2:
++ chroma_out_h *= 2;
++ break;
++ case CI_ISP_DPD_CSS_VUP4:
++ chroma_out_h *= 4;
++ break;
++ default:
++ /* leave chroma_out_h untouched */
++ break;
++ }
++
++ scale->scale_vc = ci_get_scale_reg(chroma_in_h, chroma_out_h);
++
++ /* additional chrominance phase shifts */
++ if (path->flags & CI_ISP_DPD_CSS_HSHIFT)
++ scale->phase_hc = SCALER_COFFS_COSITED;
++ if (path->flags & CI_ISP_DPD_CSS_VSHIFT)
++ scale->phase_vc = SCALER_COFFS_COSITED;
++
++ /* additional luminance phase shifts */
++ if (path->flags & CI_ISP_DPD_LUMA_HSHIFT)
++ scale->phase_hy = SCALER_COFFS_COSITED;
++ if (path->flags & CI_ISP_DPD_LUMA_VSHIFT)
++ scale->phase_vy = SCALER_COFFS_COSITED;
++
++ /* try to figure out the outcoming YCbCr format */
++ cssflags = path->flags & CI_ISP_DPD_CSS_MASK;
++ if (cssflags == (CI_ISP_DPD_CSS_H_OFF | CI_ISP_DPD_CSS_V_OFF)) {
++ /* trivial case: the output format is not changed */
++ scaler_output_format = scaler_input_format;
++ } else {
++ /* output format gets changed by the scaler setting */
++ /* assume invalid format by default */
++ scaler_output_format = (u32) (-1);
++ switch (scaler_input_format) {
++ case CI_ISP_DPD_DMA_IN_444:
++ if (cssflags == (CI_ISP_DPD_CSS_H2
++ | CI_ISP_DPD_CSS_V_OFF)) {
++ /* conversion 444 -> 422 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_422;
++ } else if (cssflags == (CI_ISP_DPD_CSS_H4
++ | CI_ISP_DPD_CSS_V_OFF)) {
++ /* conversion 444 -> 411 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_411;
++ } else if (cssflags == (CI_ISP_DPD_CSS_H2
++ | CI_ISP_DPD_CSS_V2)) {
++ /* conversion 444 -> 420 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_420;
++ }
++ break;
++
++ case CI_ISP_DPD_DMA_IN_422:
++ if (cssflags == (CI_ISP_DPD_CSS_HUP2
++ | CI_ISP_DPD_CSS_V_OFF)) {
++ /* conversion 422 -> 444 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_444;
++ } else if (cssflags == (CI_ISP_DPD_CSS_H2
++ | CI_ISP_DPD_CSS_V_OFF)) {
++ /* conversion 422 -> 411 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_411;
++ } else if (cssflags == (CI_ISP_DPD_CSS_H_OFF
++ | CI_ISP_DPD_CSS_V2)) {
++ /* conversion 422 -> 420 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_420;
++ }
++ break;
++
++ case CI_ISP_DPD_DMA_IN_420:
++ if (cssflags == (CI_ISP_DPD_CSS_HUP2
++ | CI_ISP_DPD_CSS_VUP2)) {
++ /* conversion 420 -> 444 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_444;
++ } else if (cssflags == (CI_ISP_DPD_CSS_H2
++ | CI_ISP_DPD_CSS_VUP2)) {
++ /* conversion 420 -> 411 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_411;
++ } else if (cssflags == (CI_ISP_DPD_CSS_H_OFF
++ | CI_ISP_DPD_CSS_VUP2)) {
++ /* conversion 420 -> 422 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_422;
++ }
++ break;
++
++ case CI_ISP_DPD_DMA_IN_411:
++ if (cssflags == (CI_ISP_DPD_CSS_HUP4
++ | CI_ISP_DPD_CSS_V_OFF)) {
++ /* conversion 411 -> 444 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_444;
++ } else if (cssflags == (CI_ISP_DPD_CSS_HUP2
++ | CI_ISP_DPD_CSS_V_OFF)) {
++ /* conversion 411 -> 422 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_422;
++ } else if (cssflags == (CI_ISP_DPD_CSS_HUP2
++ | CI_ISP_DPD_CSS_V2)) {
++ /* conversion 411 -> 420 */
++ scaler_output_format = CI_ISP_DPD_DMA_IN_420;
++ }
++ break;
++
++ default:
++ /* DMA input format not supported */
++ break;
++ }
++ }
++
++ return scaler_output_format;
++}
++
++/*
++ * Returns the address of up-scaling lookup table to use for
++ * the given data path flags.
++ */
++static const struct ci_isp_rsz_lut *ci_get_rsz_lut(u32 flags)
++{
++ const struct ci_isp_rsz_lut *ret_val;
++ switch (flags & CI_ISP_DPD_UPSCALE_MASK) {
++ case CI_ISP_DPD_UPSCALE_SHARP:
++ ret_val = &isp_rsz_lut_sharp;
++ break;
++ default:
++ ret_val = &isp_rsz_lut_smooth_lin;
++ break;
++ }
++ return ret_val;
++}
++
++/*
++ * Fills in scale factors and MI configuration for the main path.
++ * Note that only self path related settings will be written into
++ * the MI configuration struct, so this routine can be used for
++ * both ISP and DMA originated data path setups.
++ *
++ * Following fields are being filled in:
++ * scale_main: [all fields]
++ * mrv_mi_ctrl: mrv_mif_mp_pic_form main_path
++ */
++static int ci_calc_main_path_settings(const struct ci_isp_datapath_desc *source,
++ const struct ci_isp_datapath_desc *main,
++ struct ci_isp_scale *scale_main,
++ struct ci_isp_mi_ctrl *mrv_mi_ctrl)
++{
++ u32 main_flag;
++
++ WARN_ON(!(source != NULL));
++ WARN_ON(!(scale_main != NULL));
++ WARN_ON(!(mrv_mi_ctrl != NULL));
++
++ /* assume datapath deactivation if no selfpath pointer is given) */
++ if (main)
++ main_flag = main->flags;
++ else
++ main_flag = 0;
++
++ /* initialize the given parameters */
++ memset(scale_main, 0, sizeof(struct ci_isp_scale));
++ scale_main->scale_hy = RSZ_SCALER_BYPASS;
++ scale_main->scale_hcb = RSZ_SCALER_BYPASS;
++ scale_main->scale_hcr = RSZ_SCALER_BYPASS;
++ scale_main->scale_vy = RSZ_SCALER_BYPASS;
++ scale_main->scale_vc = RSZ_SCALER_BYPASS;
++
++ if (main_flag & CI_ISP_DPD_ENABLE) {
++ switch (main_flag & CI_ISP_DPD_MODE_MASK) {
++ case CI_ISP_DPD_MODE_ISPYC:
++ case CI_ISP_DPD_MODE_DMAYC_ISP:
++ mrv_mi_ctrl->main_path = CI_ISP_PATH_ON;
++ break;
++ case CI_ISP_DPD_MODE_ISPJPEG:
++ case CI_ISP_DPD_MODE_DMAJPEG_DIRECT:
++ case CI_ISP_DPD_MODE_DMAJPEG_ISP:
++ mrv_mi_ctrl->main_path = CI_ISP_PATH_JPE;
++ break;
++ case CI_ISP_DPD_MODE_ISPRAW:
++ mrv_mi_ctrl->main_path = CI_ISP_PATH_RAW8;
++ break;
++ case CI_ISP_DPD_MODE_ISPRAW_16B:
++ mrv_mi_ctrl->main_path = CI_ISP_PATH_RAW816;
++ break;
++ default:
++ eprintk("unsupported mode for main path");
++ return CI_STATUS_NOTSUPP;
++ }
++ if (main_flag & (CI_ISP_DPD_H_FLIP | CI_ISP_DPD_V_FLIP |
++ CI_ISP_DPD_90DEG_CCW)) {
++ eprintk("not supported for main path");
++ return CI_STATUS_NOTSUPP;
++ }
++ if (main_flag & CI_ISP_DPD_NORESIZE) {
++ if (main_flag & CI_ISP_DPD_CSS_MASK) {
++ eprintk("main path needs rezizer");
++ return CI_STATUS_NOTSUPP;
++ }
++ if (main_flag &
++ (CI_ISP_DPD_LUMA_HSHIFT | CI_ISP_DPD_LUMA_VSHIFT)) {
++ eprintk("main path needs rezizer");
++ return CI_STATUS_NOTSUPP;
++ }
++ } else {
++ if ((mrv_mi_ctrl->main_path == CI_ISP_PATH_RAW8)
++ || (mrv_mi_ctrl->main_path == CI_ISP_PATH_RAW8)) {
++ eprintk("scaler not in RAW mode");
++ return CI_STATUS_NOTSUPP;
++ }
++ /* changed to avoid LINT warnings (Warning 613) */
++ if (main != NULL) {
++ if ((((u32) (source->out_w) *
++ MAIN_UPSCALE_FACTOR_MAX) < main->out_w)
++ ||
++ (((u32) (source->out_h) *
++ MAIN_UPSCALE_FACTOR_MAX) <
++ main->out_h)) {
++ eprintk("main upscaling exceeded");
++ return CI_STATUS_NOTSUPP;
++ }
++ if ((main->out_w >
++ MAIN_SCALER_WIDTH_MAX)
++ || (main->out_w < SCALER_MIN)
++ || (main->out_h < SCALER_MIN)) {
++ eprintk("main scaler ange exceeded");
++ return CI_STATUS_NOTSUPP;
++ }
++ } else {
++ WARN_ON(main == NULL);
++ }
++
++ if (source->out_w & 0x01) {
++ eprintk("input width must be even!");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ /* calculate scale factors. */
++ (void)ci_calc_scale_factors(source, main, scale_main,
++ MARVIN_FEATURE_MSCALE_FACTORCALC);
++ }
++ } else {
++ mrv_mi_ctrl->main_path = CI_ISP_PATH_OFF;
++ }
++
++ /* hardcoded MI settings */
++ dprintk(1, "main_flag is 0x%x", main_flag);
++ if (main_flag & CI_ISP_DPD_HWRGB_MASK) {
++ switch (main_flag & CI_ISP_DPD_HWRGB_MASK) {
++ case CI_ISP_DPD_YUV_420:
++ case CI_ISP_DPD_YUV_422:
++ mrv_mi_ctrl->mrv_mif_mp_pic_form =
++ CI_ISP_MIF_PIC_FORM_PLANAR;
++ break;
++ case CI_ISP_DPD_YUV_NV12:
++ mrv_mi_ctrl->mrv_mif_mp_pic_form =
++ CI_ISP_MIF_PIC_FORM_SEMI_PLANAR;
++ break;
++ case CI_ISP_DPD_YUV_YUYV:
++ mrv_mi_ctrl->mrv_mif_mp_pic_form =
++ CI_ISP_MIF_PIC_FORM_INTERLEAVED;
++ break;
++ default:
++ mrv_mi_ctrl->mrv_mif_mp_pic_form =
++ CI_ISP_MIF_PIC_FORM_PLANAR;
++ }
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Fills in scale factors and MI configuration for the self
++ * path. Note that only self path related settings will be written into
++ * the MI config struct, so this routine can be used for both ISP and DMA
++ * originated datapath setups.
++ *
++ * Following fields are being filled in:
++ * scale_flag :
++ * [all fields]
++ * mrv_mi_ctrl :
++ * mrv_mif_sp_out_form
++ * mrv_mif_sp_in_form
++ * mrv_mif_sp_pic_form
++ * mrv_mif_sp_mode
++ * self_path
++ */
++static int ci_calc_self_path_settings(const struct ci_isp_datapath_desc *source,
++ const struct ci_isp_datapath_desc *self,
++ struct ci_isp_scale *scale_flag,
++ struct ci_isp_mi_ctrl *mrv_mi_ctrl)
++{
++ u32 scaler_out_col_format;
++ u32 self_flag;
++
++ WARN_ON(!(source != NULL));
++ WARN_ON(!(scale_flag != NULL));
++ WARN_ON(!(mrv_mi_ctrl != NULL));
++
++ /* assume datapath deactivation if no selfpath pointer is given) */
++ if (self)
++ self_flag = self->flags;
++ else
++ self_flag = 0;
++
++ /* initialize the given parameters */
++ memset(scale_flag, 0, sizeof(struct ci_isp_scale));
++ scale_flag->scale_hy = RSZ_SCALER_BYPASS;
++ scale_flag->scale_hcb = RSZ_SCALER_BYPASS;
++ scale_flag->scale_hcr = RSZ_SCALER_BYPASS;
++ scale_flag->scale_vy = RSZ_SCALER_BYPASS;
++ scale_flag->scale_vc = RSZ_SCALER_BYPASS;
++
++ if (self_flag & CI_ISP_DPD_ENABLE) {
++
++ switch (self_flag & CI_ISP_DPD_MODE_MASK) {
++ case CI_ISP_DPD_MODE_ISPYC:
++ mrv_mi_ctrl->self_path = CI_ISP_PATH_ON;
++ scaler_out_col_format = CI_ISP_DPD_DMA_IN_422;
++ break;
++ case CI_ISP_DPD_MODE_DMAYC_ISP:
++ case CI_ISP_DPD_MODE_DMAYC_DIRECT:
++ mrv_mi_ctrl->self_path = CI_ISP_PATH_ON;
++ scaler_out_col_format =
++ self_flag & CI_ISP_DPD_DMA_IN_MASK;
++ break;
++ default:
++ eprintk("unsupported mode for self path");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ if (self_flag & CI_ISP_DPD_NORESIZE) {
++ if (self_flag & CI_ISP_DPD_CSS_MASK) {
++ eprintk("in self path needs rezizer");
++ return CI_STATUS_NOTSUPP;
++ }
++ if (self_flag &
++ (CI_ISP_DPD_LUMA_HSHIFT | CI_ISP_DPD_LUMA_VSHIFT)) {
++ eprintk("n self path needs rezizer");
++ return CI_STATUS_NOTSUPP;
++ }
++ /* changed to avoid LINT warnings (Warning 613) */
++ if (self != NULL) {
++ if ((source->out_w != self->out_w) ||
++ (source->out_h != self->out_h)) {
++ eprintk("sizes needs resizer");
++ return CI_STATUS_NOTSUPP;
++ }
++ } else {
++ WARN_ON(self == NULL);
++ }
++ } else {
++ /* changed to avoid LINT warnings (Warning 613) */
++ if (self != NULL) {
++ /* upscaling only to factor
++ * SELF_UPSCALE_FACTOR_MAX possible
++ */
++ if ((((u32) (source->out_w) *
++ SELF_UPSCALE_FACTOR_MAX) <
++ self->out_w)
++ ||
++ (((u32) (source->out_h) *
++ SELF_UPSCALE_FACTOR_MAX) <
++ self->out_h)) {
++ eprintk("apability exceeded");
++ return CI_STATUS_NOTSUPP;
++ }
++ if ((self->out_w >
++ SELF_SCALER_WIDTH_MAX)
++ || (self->out_w < SCALER_MIN)
++ || (self->out_h < SCALER_MIN)) {
++ eprintk("out range exceeded");
++ return CI_STATUS_NOTSUPP;
++ }
++ } else {
++ WARN_ON(self == NULL);
++ }
++ /* Remember that the input picture width should be
++ * even if the scaler is used */
++
++ /* (otherwise the scaler may show unexpected
++ * behaviour in some rare cases) */
++ if (source->out_w & 0x01) {
++ eprintk("width must be even!");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ /* calculate scale factors. */
++ scaler_out_col_format =
++ ci_calc_scale_factors(source, self, scale_flag,
++ MARVIN_FEATURE_SSCALE_FACTORCALC);
++ }
++
++ dprintk(2, "step1");
++ /* figure out the input format setting */
++ switch (scaler_out_col_format) {
++ case CI_ISP_DPD_DMA_IN_444:
++ mrv_mi_ctrl->mrv_mif_sp_in_form =
++ CI_ISP_MIF_COL_FORMAT_YCBCR_444;
++ break;
++ case CI_ISP_DPD_DMA_IN_422:
++ mrv_mi_ctrl->mrv_mif_sp_in_form =
++ CI_ISP_MIF_COL_FORMAT_YCBCR_422;
++ break;
++ case CI_ISP_DPD_DMA_IN_420:
++ mrv_mi_ctrl->mrv_mif_sp_in_form =
++ CI_ISP_MIF_COL_FORMAT_YCBCR_420;
++ break;
++ /* no break, does not seem to be supported by HW */
++ case CI_ISP_DPD_DMA_IN_411:
++ default:
++ eprintk("input color format not supported");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ /* figure out the output format setting */
++ dprintk(2, "step2, self_flag is 0x%x", self_flag);
++
++ switch (self_flag & CI_ISP_DPD_HWRGB_MASK) {
++ case CI_ISP_DPD_HWRGB_565:
++ mrv_mi_ctrl->mrv_mif_sp_out_form =
++ CI_ISP_MIF_COL_FORMAT_RGB_565;
++ mrv_mi_ctrl->mrv_mif_sp_pic_form =
++ CI_ISP_MIF_PIC_FORM_PLANAR;
++ break;
++ case CI_ISP_DPD_HWRGB_666:
++ mrv_mi_ctrl->mrv_mif_sp_out_form =
++ CI_ISP_MIF_COL_FORMAT_RGB_666;
++ mrv_mi_ctrl->mrv_mif_sp_pic_form =
++ CI_ISP_MIF_PIC_FORM_PLANAR;
++ break;
++ case CI_ISP_DPD_HWRGB_888:
++ mrv_mi_ctrl->mrv_mif_sp_out_form =
++ CI_ISP_MIF_COL_FORMAT_RGB_888;
++ mrv_mi_ctrl->mrv_mif_sp_pic_form =
++ CI_ISP_MIF_PIC_FORM_PLANAR;
++ break;
++ case CI_ISP_DPD_YUV_420:
++ mrv_mi_ctrl->mrv_mif_sp_pic_form =
++ CI_ISP_MIF_PIC_FORM_PLANAR;
++ mrv_mi_ctrl->mrv_mif_sp_out_form =
++ CI_ISP_MIF_COL_FORMAT_YCBCR_420;
++ break;
++ case CI_ISP_DPD_YUV_422:
++ mrv_mi_ctrl->mrv_mif_sp_pic_form =
++ CI_ISP_MIF_PIC_FORM_PLANAR;
++ mrv_mi_ctrl->mrv_mif_sp_out_form =
++ CI_ISP_MIF_COL_FORMAT_YCBCR_422;
++ break;
++ case CI_ISP_DPD_YUV_NV12:
++ mrv_mi_ctrl->mrv_mif_sp_pic_form =
++ CI_ISP_MIF_PIC_FORM_SEMI_PLANAR;
++ mrv_mi_ctrl->mrv_mif_sp_out_form =
++ CI_ISP_MIF_COL_FORMAT_YCBCR_420;
++ break;
++ case CI_ISP_DPD_YUV_YUYV:
++ mrv_mi_ctrl->mrv_mif_sp_pic_form =
++ CI_ISP_MIF_PIC_FORM_INTERLEAVED;
++ mrv_mi_ctrl->mrv_mif_sp_out_form =
++ CI_ISP_MIF_COL_FORMAT_YCBCR_422;
++ break;
++
++ case CI_ISP_DPD_HWRGB_OFF:
++ mrv_mi_ctrl->mrv_mif_sp_out_form =
++ mrv_mi_ctrl->mrv_mif_sp_in_form;
++ mrv_mi_ctrl->mrv_mif_sp_pic_form =
++ CI_ISP_MIF_PIC_FORM_PLANAR;
++ break;
++ default:
++ eprintk("output color format not supported");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ /* picture flipping / rotation */
++ dprintk(2, "step3");
++
++ switch (self_flag &
++ (CI_ISP_DPD_90DEG_CCW | CI_ISP_DPD_V_FLIP |
++ CI_ISP_DPD_H_FLIP)) {
++ case (CI_ISP_DPD_H_FLIP):
++ mrv_mi_ctrl->mrv_mif_sp_mode =
++ CI_ISP_MIF_SP_HORIZONTAL_FLIP;
++ break;
++ case (CI_ISP_DPD_V_FLIP):
++ mrv_mi_ctrl->mrv_mif_sp_mode =
++ CI_ISP_MIF_SP_VERTICAL_FLIP;
++ break;
++ case (CI_ISP_DPD_V_FLIP | CI_ISP_DPD_H_FLIP):
++ mrv_mi_ctrl->mrv_mif_sp_mode =
++ CI_ISP_MIF_SP_ROTATION_180_DEG;
++ break;
++ case (CI_ISP_DPD_90DEG_CCW):
++ mrv_mi_ctrl->mrv_mif_sp_mode =
++ CI_ISP_MIF_SP_ROTATION_090_DEG;
++ break;
++ case (CI_ISP_DPD_90DEG_CCW | CI_ISP_DPD_H_FLIP):
++ mrv_mi_ctrl->mrv_mif_sp_mode =
++ CI_ISP_MIF_SP_ROT_270_V_FLIP;
++ break;
++ case (CI_ISP_DPD_90DEG_CCW | CI_ISP_DPD_V_FLIP):
++ mrv_mi_ctrl->mrv_mif_sp_mode =
++ CI_ISP_MIF_SP_ROT_090_V_FLIP;
++ break;
++ case (CI_ISP_DPD_90DEG_CCW | CI_ISP_DPD_V_FLIP |
++ CI_ISP_DPD_H_FLIP):
++ mrv_mi_ctrl->mrv_mif_sp_mode =
++ CI_ISP_MIF_SP_ROTATION_270_DEG;
++ break;
++ default:
++ mrv_mi_ctrl->mrv_mif_sp_mode = CI_ISP_MIF_SP_ORIGINAL;
++ break;
++ }
++
++ } else {
++ mrv_mi_ctrl->self_path = CI_ISP_PATH_OFF;
++ }
++
++ dprintk(2, "step4");
++ /*mrv_mi_ctrl->mrv_mif_sp_pic_form = CI_ISP_MIF_PIC_FORM_PLANAR;*/
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Translates the given memory interface configuration struct
++ * into appropriate values to program the data path multiplexers.
++ */
++static int ci_calc_dp_mux_settings(const struct ci_isp_mi_ctrl *mi_ctrl,
++ enum ci_isp_ycs_chn_mode *peYcsChnMode,
++ enum ci_isp_dp_switch *peDpSwitch)
++{
++ switch (mi_ctrl->main_path) {
++ case CI_ISP_PATH_RAW8:
++ case CI_ISP_PATH_RAW816:
++ *peDpSwitch = CI_ISP_DP_RAW;
++ *peYcsChnMode = CI_ISP_YCS_MVRaw;
++ if (mi_ctrl->self_path != CI_ISP_PATH_OFF) {
++ eprintk("ombined with RAW mode of main path");
++ return CI_STATUS_NOTSUPP;
++ }
++ break;
++
++ case CI_ISP_PATH_JPE:
++ *peDpSwitch = CI_ISP_DP_JPEG;
++ if (mi_ctrl->self_path != CI_ISP_PATH_OFF)
++ *peYcsChnMode = CI_ISP_YCS_MV_SP;
++ else
++ *peYcsChnMode = CI_ISP_YCS_MV;
++ break;
++
++ case CI_ISP_PATH_ON:
++ *peDpSwitch = CI_ISP_DP_MV;
++ if (mi_ctrl->self_path != CI_ISP_PATH_OFF)
++ *peYcsChnMode = CI_ISP_YCS_MV_SP;
++ else
++ *peYcsChnMode = CI_ISP_YCS_MV;
++ break;
++
++ case CI_ISP_PATH_OFF:
++ *peDpSwitch = CI_ISP_DP_MV;
++ if (mi_ctrl->self_path != CI_ISP_PATH_OFF)
++ *peYcsChnMode = CI_ISP_YCS_SP;
++ else
++ *peYcsChnMode = CI_ISP_YCS_OFF;
++ break;
++
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++/* the windows to cut away black pixels and to zoom/crop the */
++#define ISPWND_COMBINE_WNDS 0x00000001
++/* image must be combined before they are applyed to the marvin registers */
++/* call of the ci_isp_set_output_formatter() routine necessary */
++#define ISPWND_APPLY_OUTFORM 0x00000002
++/* call of the ci_isp_is_set_config() routine necessary */
++#define ISPWND_APPLY_ISCONF 0x00000004
++/* no cropping supported at all */
++#define ISPWND_NO_CROPPING 0x00000008
++
++/*
++ * Returns information about how to combine black pixel and
++ * zoom/crop windows for programming the ISP output formatter and the image
++ * stabilization unit for the given marvin derivative and ISP path.
++ */
++static u32 ci_get_isp_wnd_style(enum ci_isp_path isp_path)
++{
++ u32 res = 0;
++
++ /* output formatter exists at ISP input */
++ /* image stabilization in both bayer and YCbCr paths */
++ if ((isp_path == CI_ISP_PATH_BAYER) ||
++ (isp_path == CI_ISP_PATH_YCBCR))
++ /*we need to program the output formatter with the blackline
++ * window and */
++ res = ISPWND_APPLY_OUTFORM | ISPWND_APPLY_ISCONF;
++ else
++ res = ISPWND_COMBINE_WNDS | ISPWND_APPLY_OUTFORM;
++
++ return res;
++}
++
++/*
++ * the given windows for cutting away blacklines coming from
++ * the image sensor and further cropping of the image for other
++ * purposes like e.g. digital zoom to the output formatter and/or
++ * image stabilisation modules of Marvins ISP.
++ */
++static int ci_set_isp_windows(const struct ci_sensor_config *isi_sensor_config,
++ const struct ci_isp_window *wnd_blackline,
++ const struct ci_isp_window *wnd_zoom_crop)
++{
++ struct ci_isp_window wnd_out_form;
++ struct ci_isp_is_config is_conf;
++ enum ci_isp_path isp_path;
++ u32 wnd_style;
++
++ memset(&wnd_out_form, 0, sizeof(wnd_out_form));
++ memset(&is_conf, 0, sizeof(is_conf));
++
++ /*
++ * figure out the path through the ISP to process the data from the
++ * image sensor
++ */
++ isp_path = ci_isp_select_path(isi_sensor_config, NULL);
++ if (isp_path == CI_ISP_PATH_UNKNOWN) {
++ eprintk("detect marvin ISP path to use");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ /*
++ * get the recommended way to configure output formatter and/or
++ * image stabilization
++ */
++ wnd_style = ci_get_isp_wnd_style(isp_path);
++ if (wnd_style & ISPWND_NO_CROPPING) {
++ /*
++ * cropping not possible -> make sure that it is *not*
++ * supposed to be used
++ */
++ u16 isiX;
++ u16 isiY;
++ /* changed to avoid LINT warnings (Warning 534) */
++ (void)ci_sensor_res2size(isi_sensor_config->res, &isiX, &isiY);
++ if ((wnd_zoom_crop->hsize != isiX)
++ || (wnd_zoom_crop->vsize != isiY)
++ || (wnd_zoom_crop->hoffs != 0)
++ || (wnd_zoom_crop->voffs != 0)) {
++ eprintk("in selected ISP data path");
++ return CI_STATUS_NOTSUPP;
++ }
++ if ((wnd_blackline->hsize != isiX) ||
++ (wnd_blackline->vsize != isiY) ||
++ (wnd_blackline->hoffs != 0) ||
++ (wnd_blackline->voffs != 0)) {
++ eprintk("supported in selected ISP data path");
++ return CI_STATUS_NOTSUPP;
++ }
++ }
++
++ /*
++ * The image stabilization is allowed to move the window in both
++ * directions by the same amount of pixels we have calculated for
++ * the offsets. The initial image stabilization window is equal to
++ * the zoom/crop window
++ */
++ is_conf.max_dx = wnd_zoom_crop->hoffs;
++ is_conf.max_dy = wnd_zoom_crop->voffs;
++ is_conf.mrv_is_window = *wnd_zoom_crop;
++
++ /* combine both blackline and zoom/crop window */
++ if (wnd_style & ISPWND_COMBINE_WNDS) {
++ /* combine both blackline and zoom/crop window */
++ wnd_out_form = *wnd_zoom_crop;
++ wnd_out_form.voffs += wnd_blackline->voffs;
++ wnd_out_form.hoffs += wnd_blackline->hoffs;
++ is_conf.mrv_is_window = wnd_out_form;
++ if (wnd_style & ISPWND_APPLY_OUTFORM) {
++ /*
++ * if the output formatter is to be used, offsets
++ * are cut away there, so
++ * we don't need additional ones in the imags
++ * stabilization unit
++ */
++ is_conf.mrv_is_window.hoffs = 0;
++ is_conf.mrv_is_window.voffs = 0;
++ }
++ } else {
++ /*
++ * do not combine windows --> blacklines done with output
++ * formatter, zoom/cropping done with image stabilization
++ */
++ wnd_out_form = *wnd_blackline;
++ is_conf.mrv_is_window = *wnd_zoom_crop;
++ }
++
++ /* finally, apply the settings to marvin */
++ if (wnd_style & ISPWND_APPLY_OUTFORM) {
++ ci_isp_set_output_formatter(&wnd_out_form,
++ CI_ISP_CFG_UPDATE_IMMEDIATE);
++ }
++ if (wnd_style & ISPWND_APPLY_ISCONF) {
++ int res = ci_isp_is_set_config(&is_conf);
++ if (res != CI_STATUS_SUCCESS) {
++ eprintk("set image stabilization config");
++ return res;
++ }
++ }
++
++ /* success - remember our virtual settings */
++ last_isp_wnds.wnd_blacklines = *wnd_blackline;
++ last_isp_wnds.wnd_zoom_crop = *wnd_zoom_crop;
++
++ return CI_STATUS_SUCCESS;
++}
++
++/* sets extended YCbCr mode */
++static int ci_ext_ycb_cr_mode(const struct ci_isp_datapath_desc *path)
++{
++ u32 main_flag;
++
++ WARN_ON(!(path != NULL));
++
++ /* assume datapath deactivation if no selfpath pointer is given) */
++ if (path)
++ main_flag = path->flags;
++ else
++ main_flag = 0;
++
++ /* if flag CI_ISP_DPD_YCBCREXT is set set extended YCbCr mode */
++ if (main_flag & CI_ISP_DPD_ENABLE) {
++ if (main_flag & CI_ISP_DPD_YCBCREXT)
++ ci_isp_set_ext_ycmode();
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Configures main and self data pathes and scaler for data coming from the ISP.
++ *
++ * Following MARVIN subsystems are programmed:
++ * - ISP output formatter
++ * - Image stabilization module
++ * - YC-Splitter
++ * - Self path DMA-read multiplexer
++ * - Main path multiplexer
++ * - Main & Self path resizer
++ * - Small output unit
++ * - Memory Interface (MI) input source, en/disable and data format
++ *
++ * Following MARVIN subsystems are *NOT* programmed:
++ * - All ISP functionality but the output formatter & image stabilization module
++ * - color Processing block
++ * - JPEG encode subsystem (quantisation tables etc.)
++ * - Memory Interface (MI) output buffer addresses and sizes
++ */
++int ci_datapath_isp(const struct ci_pl_system_config *sys_conf,
++ const struct ci_sensor_config *isi_config,
++ const struct ci_isp_datapath_desc *main,
++ const struct ci_isp_datapath_desc *self, int zoom)
++{
++ int res;
++ /*
++ * copy of flags for main and self path to simplify access (no
++ * pointer de-reference)
++ */
++ u32 main_flag;
++ u32 self_flag;
++ /* resolution from sensor configuration */
++ u16 isiX;
++ u16 isiY;
++ /* things to apply to MARVIN */
++ struct ci_isp_scale scale_main;
++ struct ci_isp_scale scale_flag;
++ enum ci_isp_ycs_chn_mode chn_mode = 0;
++ enum ci_isp_dp_switch dp_switch = 0;
++ struct ci_isp_mi_ctrl mrv_mi_ctrl;
++ struct ci_isp_datapath_desc source;
++ /* ISP windowing because of cutting away blacklines from the sensor */
++ struct ci_isp_window wnd_blackline;
++ /* ISP windowing because of aspect ratio change and/or zoom */
++ struct ci_isp_window wnd_zoom_crop;
++
++ const struct ci_isp_datapath_desc *target = NULL;
++
++ /* assume dapapath deactivation for not provided descriptors */
++ main_flag = 0;
++ self_flag = 0;
++ if (main)
++ main_flag = main->flags; /* 0x012 */
++
++ if (self)
++ self_flag = self->flags; /* 0x10015 */
++
++ /* initialize variables on the stack */
++ res = CI_STATUS_SUCCESS;
++ /* changed to avoid LINT warnings (Warning 534) */
++ (void)ci_sensor_res2size(isi_config->res, &isiX, &isiY);
++ memset(&mrv_mi_ctrl, 0, sizeof(struct ci_isp_mi_ctrl));
++ memset(&wnd_blackline, 0, sizeof(wnd_blackline));
++ memset(&wnd_zoom_crop, 0, sizeof(wnd_zoom_crop));
++
++ /*
++ * ISP Windowing - fill in wnd_out_form, apply_out_form, is_conf and
++ * apply_is_conf
++ */
++
++ /*
++ * by default, size of both blackline and zoom/crop window
++ * is what the camera delivers.
++ */
++
++ /* (no cropping, no offset) */
++ wnd_blackline.hsize = isiX;
++ wnd_blackline.vsize = isiY;
++ wnd_zoom_crop = wnd_blackline;
++
++ /*
++ * check if we have to crop because of aspect ratio
++ * preservement of an
++ */
++
++ /* output channel */
++ if ((main_flag & CI_ISP_DPD_ENABLE) &&
++ (main_flag & CI_ISP_DPD_KEEPRATIO)) {
++ target = main;
++ }
++ if ((self_flag & CI_ISP_DPD_ENABLE) &&
++ (self_flag & CI_ISP_DPD_KEEPRATIO)) {
++ if (target) {
++ eprintk("only allowed for one path");
++ return CI_STATUS_NOTSUPP;
++ }
++ target = self;
++ }
++
++ /* if so, calculate the cropping */
++ if (target) {
++ u32 aspect_cam = (0x1000 * ((u32) isiX)) / isiY;
++ u32 aspect_target = (0x1000 * ((u32) (target->out_w))) /
++ target->out_h;
++ if (aspect_cam < aspect_target) {
++ /*
++ * camera aspect is more 'portrait-like' as
++ * target aspect. We have to crop the
++ * camera picture by cutting off a bit of
++ * the top & bottom changed to avoid LINT
++ * warnings (Info 734)
++ */
++ wnd_zoom_crop.vsize = (u16) (((u32) isiX *
++ (u32) (target->out_h)) / target->out_w);
++ } else {
++ /* camera aspect is more 'landscape-like'
++ * as target aspect. We have to crop the
++ * camera picture by cutting off a bit of
++ * the left and right changed to avoid LINT
++ * warnings (Info 734) */
++ wnd_zoom_crop.hsize = (u16) (((u32) isiY *
++ (u32) (target->out_w)) / target->out_h);
++ }
++ }
++
++ /*
++ * now, we may also want to do digital zoom. If so, we need
++ * to shrink the ISP window by the desired zoom factor.
++ */
++ if (zoom > 0) {
++ /* changed to avoid LINT warnings (Warning 573) */
++ wnd_zoom_crop.vsize = (u16) (((u32) (wnd_zoom_crop.vsize) *
++ 1024) / (1024 + (u32) zoom));
++ /* changed to avoid LINT warnings (Warning 573) */
++ wnd_zoom_crop.hsize = (u16) (((u32) (wnd_zoom_crop.hsize) *
++ 1024) / (1024 + (u32) zoom));
++ }
++ /*
++ * Remember that the output formatter h_size should be
++ * even if the scaler is used
++ * (otherwise the scaler may show unexpected behaviour in
++ * some rare cases)
++ */
++ wnd_zoom_crop.hsize &= ~0x01;
++ /*
++ * At last, we care about the offset of the ISP window. We
++ * want it centered on the image data delivered by the
++ * sensor (not counting possible black lines)
++ */
++ wnd_zoom_crop.hoffs = (isiX - wnd_zoom_crop.hsize) / 2;
++ wnd_zoom_crop.voffs = (isiY - wnd_zoom_crop.vsize) / 2;
++ /*
++ * If the image sensor delivers blacklines, we cut them
++ * away with moving wnd_blackline window by the given
++ * amount of lines
++ */
++ switch (isi_config->bls) {
++ /* no black lines */
++ case SENSOR_BLS_OFF:
++ break;
++ /* two black lines at frame start */
++ case SENSOR_BLS_TWO_LINES:
++ wnd_blackline.voffs += 2;
++ break;
++ /* two black lines at frame start and two at the end */
++ case SENSOR_BLS_FOUR_LINES:
++ wnd_blackline.voffs += 2;
++ break;
++ default:
++ eprintk("config");
++ return CI_STATUS_NOTSUPP;
++ }
++ /*
++ * if we are instructed to show the blacklines and the
++ * sensor generates them,
++ * we have to move the ISP windows to the upper border of
++ * the whole sensor, and deny the image stabilization to
++ * move around the window in vertical direction.
++ */
++ if (isi_config->bls != SENSOR_BLS_OFF) {
++ if (((main_flag & CI_ISP_DPD_ENABLE)
++ && (main_flag & CI_ISP_DPD_BLACKLINES_TOP))
++ || ((self_flag & CI_ISP_DPD_ENABLE)
++ && (self_flag & CI_ISP_DPD_BLACKLINES_TOP))) {
++ if ((main_flag & CI_ISP_DPD_ENABLE)
++ && (self_flag & CI_ISP_DPD_ENABLE)
++ && ((main_flag & CI_ISP_DPD_BLACKLINES_TOP)
++ != (self_flag & CI_ISP_DPD_BLACKLINES_TOP))) {
++ eprintk("and self path");
++ return CI_STATUS_NOTSUPP;
++ }
++ wnd_blackline.voffs = 0;
++ wnd_zoom_crop.voffs = 0;
++ }
++ }
++
++ source.out_w = wnd_zoom_crop.hsize;
++ source.out_h = wnd_zoom_crop.vsize;
++ source.flags = CI_ISP_DPD_DMA_IN_422;
++
++ /*to use crop set crop_flag first*/
++ if (crop_flag) {
++ wnd_zoom_crop.hsize = main->out_w;
++ wnd_zoom_crop.vsize = main->out_h;
++ }
++
++ dprintk(1, "source.out_w %d, source.out_h %d",
++ source.out_w, source.out_h);
++ if (main)
++ dprintk(1, "main.out_w %d, main.out_h %d",
++ main->out_w, main->out_h);
++ if (self)
++ dprintk(1, "self.out_w %d, self.out_h %d",
++ self->out_w, self->out_h);
++
++ /*
++ * At this point, wnd_zoom_crop and wnd_blackline contain
++ * the window sizes that reflect the users request. We have
++ * to configure the ISP output formatter and the image
++ * stabilization formatter in order to achieve this, but
++ * how they interact is highly dependant of the curr
++ * marvin derivative and which datapath of the ISP is
++ * activated. Therefore, translating wnd_zoom_crop and
++ * wnd_blackline into marvin register settings is a bit
++ * complicated and will be done by the
++ * ci_set_isp_windows() routine.
++ */
++
++ /* ISP Window */
++ /* MAIN path - fill in main_path, scale_main and main_rsz_lut */
++ /* basic selfpath settings */
++ res = ci_calc_main_path_settings(&source, main, &scale_main,
++ &mrv_mi_ctrl);
++ if (res != CI_STATUS_SUCCESS)
++ return res;
++
++ /* additional settings specific for main path fed from ISP */
++ if (main_flag & CI_ISP_DPD_ENABLE) {
++ switch (main_flag & CI_ISP_DPD_MODE_MASK) {
++ case CI_ISP_DPD_MODE_ISPYC:
++ case CI_ISP_DPD_MODE_ISPRAW:
++ case CI_ISP_DPD_MODE_ISPRAW_16B:
++ case CI_ISP_DPD_MODE_ISPJPEG:
++ /* allowed cases, just proceed */
++ break;
++ default:
++ eprintk("data coming from the ISP");
++ return CI_STATUS_NOTSUPP;
++ }
++ }
++
++ /* SELF path - fill in self_path & scale_flag */
++ /* basic selfpath settings */
++ res = ci_calc_self_path_settings(&source, self, &scale_flag,
++ &mrv_mi_ctrl);
++ if (res != CI_STATUS_SUCCESS)
++ return res;
++
++ if (sys_conf->isp_cfg.flags.ycbcr_non_cosited)
++ mrv_mi_ctrl.mrv_mif_sp_in_phase = mrv_mif_col_phase_non_cosited;
++ else
++ mrv_mi_ctrl.mrv_mif_sp_in_phase = mrv_mif_col_phase_cosited;
++ if (sys_conf->isp_cfg.flags.ycbcr_full_range)
++ mrv_mi_ctrl.mrv_mif_sp_in_range = mrv_mif_col_range_full;
++ else
++ mrv_mi_ctrl.mrv_mif_sp_in_range = mrv_mif_col_range_std;
++ if (self_flag & CI_ISP_DPD_ENABLE) {
++ switch (self_flag & CI_ISP_DPD_MODE_MASK) {
++ case CI_ISP_DPD_MODE_ISPYC:
++ /* only allowed case, just proceed */
++ break;
++ default:
++ eprintk("data coming from the ISP");
++ return CI_STATUS_NOTSUPP;
++ }
++ }
++
++ /* Datapath multiplexers */
++ res = ci_calc_dp_mux_settings(&mrv_mi_ctrl, &chn_mode, &dp_switch);
++ if (res != CI_STATUS_SUCCESS)
++ return res;
++
++ /* hardcoded global settings of the memory interface */
++ mrv_mi_ctrl.byte_swap_enable = false;
++
++ mrv_mi_ctrl.init_vals = CI_ISP_MIF_INIT_OFFSAndBase;
++
++ /*
++ * If we reach this point, we have collected all values to program
++ * the MARVIN for the requested datapath setup. Now all we've left
++ * to do is apply these to MARVINs register set. For this, we
++ * mostly use the low level MARVIN driver routines.
++ */
++ /*to use crop set crop_flag first*/
++ if (crop_flag) {
++ wnd_blackline.hsize = main->out_w;
++ wnd_blackline.vsize = main->out_h;
++ }
++
++ res = ci_set_isp_windows(isi_config, &wnd_blackline,
++ &wnd_zoom_crop);
++ if (res != CI_STATUS_SUCCESS) {
++ eprintk("failed to set ISP window configuration");
++ return res;
++ }
++ res = ci_isp_set_data_path(chn_mode, dp_switch);
++ if (res != CI_STATUS_SUCCESS)
++ return res;
++
++ res = ci_isp_set_mipi_smia(isi_config->mode);
++ if (res != CI_STATUS_SUCCESS)
++ return res;
++
++ if (mrv_mi_ctrl.self_path != CI_ISP_PATH_OFF)
++ ci_isp_res_set_self_resize(&scale_flag,
++ CI_ISP_CFG_UPDATE_IMMEDIATE,
++ ci_get_rsz_lut(self_flag));
++
++ if (mrv_mi_ctrl.main_path != CI_ISP_PATH_OFF)
++ ci_isp_res_set_main_resize(&scale_main,
++ CI_ISP_CFG_UPDATE_IMMEDIATE,
++ ci_get_rsz_lut(main_flag));
++
++ ci_isp_set_dma_read_mode(CI_ISP_DMA_RD_OFF,
++ CI_ISP_CFG_UPDATE_IMMEDIATE);
++
++ res = ci_isp_mif_set_path_and_orientation(&mrv_mi_ctrl);
++ if (res != CI_STATUS_SUCCESS) {
++ eprintk("failed to set MI path and orientation");
++ return res;
++ }
++
++ /* here the extended YCbCr mode is configured */
++ if (sys_conf->isp_cfg.flags.ycbcr_full_range)
++ res = ci_ext_ycb_cr_mode(main);
++ else
++ (void)ci_isp_set_yc_mode();
++
++ if (res != CI_STATUS_SUCCESS) {
++ eprintk("failed to set ISP YCbCr extended mode");
++ return res;
++ }
++
++ return CI_STATUS_SUCCESS;
++}
+diff --git a/drivers/media/video/mrstci/mrstisp/mrstisp_hw.c b/drivers/media/video/mrstci/mrstisp/mrstisp_hw.c
+new file mode 100644
+index 0000000..56891c1
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstisp/mrstisp_hw.c
+@@ -0,0 +1,1622 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include "mrstisp_stdinc.h"
++
++static unsigned long jiffies_start;
++
++void mrst_timer_start(void)
++{
++ jiffies_start = jiffies;
++}
++
++void mrst_timer_stop(void)
++{
++ jiffies_start = 0;
++}
++
++unsigned long mrst_get_micro_sec(void)
++{
++ unsigned long time_diff = 0;
++
++ time_diff = jiffies - jiffies_start;
++
++ return jiffies_to_msecs(time_diff);
++}
++
++/*
++ * Returns the ISP hardware ID.
++ */
++static u32 ci_isp_get_ci_isp_id(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 result = 0;
++
++ result = REG_GET_SLICE(mrv_reg->vi_id, MRV_REV_ID);
++
++ return result;
++}
++
++/*
++ * Gets the hardware ID and compares it with the expected one.
++ */
++static int ci_isp_verify_chip_id(void)
++{
++ u32 mrv_id = ci_isp_get_ci_isp_id();
++ dprintk(1, "HW-Id: 0x%08X", mrv_id);
++
++ if (mrv_id != MARVIN_FEATURE_CHIP_ID) {
++ eprintk("HW-Id does not match! read:0x%08X, expected:0x%08X",
++ mrv_id, MARVIN_FEATURE_CHIP_ID);
++ return CI_STATUS_FAILURE;
++ }
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Triggers an entire reset of MARVIN (equaling an asynchronous
++ * hardware reset).
++ * Checks the hardware ID. A debug warning is issued if the
++ * module ID does not match the expected ID.
++ * Enables all clocks of all sub-modules.
++ * MARVIN is in idle state afterwards.
++ */
++void ci_isp_init(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ /* verify ID, but no consequences if it doesn't match */
++ (void)ci_isp_verify_chip_id();
++
++ /* enable main clock */
++ REG_SET_SLICE(mrv_reg->vi_ccl, MRV_VI_CCLFDIS, MRV_VI_CCLFDIS_ENABLE);
++
++ /*
++ * enable all clocks to make sure that all submodules will be able to
++ * perform the reset correctly
++ */
++ REG_SET_SLICE(mrv_reg->vi_iccl, MRV_VI_ALL_CLK_ENABLE, ENABLE);
++
++ /*
++ * Reset of the entire MARVIN triggered by software. The minimum time
++ * permitted by mdelay ensures enough delay.
++ */
++
++ /* The reset bit will be cleared by the reset itself. */
++
++ /*
++ * The default value of the clock registers is all clocks on. So we
++ * don't have to enable the clocks again afterwards.
++ */
++
++ REG_SET_SLICE(mrv_reg->vi_ircl, MRV_VI_MARVIN_RST, ON);
++ /*mdelay(CI_ISP_DELAY_AFTER_RESET);*/
++ msleep(CI_ISP_DELAY_AFTER_RESET);
++}
++
++void ci_isp_off(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ /* enable main clock */
++ REG_SET_SLICE(mrv_reg->vi_ccl, MRV_VI_CCLFDIS,
++ MRV_VI_CCLFDIS_DISABLE);
++
++ /*
++ * enable all clocks to make sure that all submodules will be able to
++ * perform the reset correctly
++ */
++ REG_SET_SLICE(mrv_reg->vi_iccl, MRV_VI_ALL_CLK_ENABLE, DISABLE);
++}
++
++/*
++ * Returns the mask for the frame end interrupts, which are
++ * used for Isp.
++ */
++u32 ci_isp_get_frame_end_irq_mask_isp(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ switch (REG_GET_SLICE(mrv_reg->vi_dpcl, MRV_VI_DMA_SWITCH)) {
++ /*
++ * 2: path to image effects block (i.e. replacement for data coming
++ * from the ISP)
++ */
++ case MRV_VI_DMA_SWITCH_IE:
++ /* datapath is used by DMA */
++ return 0;
++ /*
++ * 0: direct path to self path mux
++ */
++ case MRV_VI_DMA_SWITCH_SELF:
++ /*
++ * 1: path to superimpose block
++ */
++ case MRV_VI_DMA_SWITCH_SI:
++ /*
++ * 3: direct path to JPEG encoder (R2B-buffer-less encodein mode)
++ */
++ case MRV_VI_DMA_SWITCH_JPG:
++ default:
++ /* main and/or self path depends on the YC-splitter setting */
++ {
++ switch (REG_GET_SLICE
++ (mrv_reg->vi_dpcl, MRV_VI_CHAN_MODE)) {
++ case MRV_VI_CHAN_MODE_MP:
++ return MRV_MI_MP_FRAME_END_MASK;
++ case MRV_VI_CHAN_MODE_SP:
++ return MRV_MI_SP_FRAME_END_MASK;
++ case MRV_VI_CHAN_MODE_MP_SP:
++ return MRV_MI_MP_FRAME_END_MASK |
++ MRV_MI_SP_FRAME_END_MASK;
++ default:
++ return 0;
++ }
++ }
++ }
++
++}
++
++/*
++ * Programs the number of frames to capture. Clears frame end
++ * interrupt to allow waiting in ci_isp_wait_for_frame_end().
++ * Enables the ISP input acquisition and output formatter.
++ * If immediate=false, the hardware assures that enabling is
++ * done frame synchronously.
++ */
++void ci_isp_start(u16 number_of_frames,
++ enum ci_isp_conf_update_time update_time)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 isp_ctrl = REG_READ(mrv_reg->isp_ctrl);
++ u32 eof_irq_mask = ci_isp_get_frame_end_irq_mask_isp();
++
++ /* max. 10 bits allowed */
++ WARN_ON(!(number_of_frames <= MRV_ISP_ACQ_NR_FRAMES_MAX));
++
++ REG_SET_SLICE(mrv_reg->isp_acq_nr_frames, MRV_ISP_ACQ_NR_FRAMES,
++ number_of_frames);
++
++ /* clear frame end interrupt */
++ REG_WRITE(mrv_reg->mi_icr, eof_irq_mask);
++
++ /* Enable ISP input Acquisition and output formatter. */
++
++ /*
++ * Input Acquisition is always enabled synchronous to the image sensor
++ * (no configuration update required). As soon as the input
++ * acquisition is started bit in_enable_shd in the register
++ * isp_flags_shd is set by hardware. In the following a frame end
++ * recognized by the input acquisition unit leads to
++ * ris_in_frame_end=1 in isp_ris. However a recognized frame end and
++ * no signaled errors are no guarantee for a valid configuration.
++ */
++
++ /*
++ * The output formatter is enabled frame synchronously according to
++ * the internal sync signals. Bit MRV_GEN_CFG_UPD has to be set. Bit
++ * isp_on_shd in isp_flags_shd is set when the output formatter is
++ * started. A recognized frame end is signaled with ris_out_frame_end
++ * in isp_ris.
++ */
++
++ /*
++ * The configuration of the input acquisition and the output
++ * formatter has to be correct to generate proper internal sync
++ * signals and thus a proper frame-synchronous update signal.
++ */
++
++ /* If the output formatter does not start check the following:
++ * sync polarities
++ * sample edge
++ * mode in register isp_ctrl
++ * sampling window of input acquisition <= picture size of image
++ * sensor
++ * output formatter window <= sampling window of input
++ * acquisition
++ */
++
++ /*
++ * If problems with the window sizes are suspected preferably add some
++ * offsets and reduce the window sizes, so that the above relations
++ * are true by all means.
++ */
++
++ switch (update_time) {
++ case CI_ISP_CFG_UPDATE_FRAME_SYNC:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_GEN_CFG_UPD, ENABLE);
++ break;
++ case CI_ISP_CFG_UPDATE_IMMEDIATE:
++ /*
++ * MRV_ISP_ISP_CFG_UPD is used instead of
++ * MRV_ISP_ISP_GEN_CFG_UPD. This updates the configuration
++ * right away and MARVIN is ready to aquire the next incoming
++ * frame.
++ */
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_CFG_UPD, ENABLE);
++ break;
++ case CI_ISP_CFG_UPDATE_LATER:
++ /* no update from within this function
++ * but enable ISP and Input */
++ break;
++ default:
++ break;
++ }
++
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_INFORM_ENABLE, ENABLE);
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_ENABLE, ENABLE);
++ REG_WRITE(mrv_reg->isp_ctrl, isp_ctrl);
++
++ dprintk(3, "ISP_CTRL = 0x%08X", mrv_reg->isp_ctrl);
++}
++
++/*
++ * Clear frame end interrupt to allow waiting in
++ * ci_isp_wait_for_frame_end(). Disable output formatter (frame
++ * synchronously).
++ */
++void ci_isp_stop(enum ci_isp_conf_update_time update_time)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 isp_ctrl = REG_READ(mrv_reg->isp_ctrl);
++ u32 eof_irq_mask = ci_isp_get_frame_end_irq_mask_isp();
++
++ /* clear frame end interrupt */
++ REG_WRITE(mrv_reg->mi_icr, eof_irq_mask);
++ /* disable output formatter */
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_ENABLE, DISABLE);
++
++ switch (update_time) {
++ case CI_ISP_CFG_UPDATE_FRAME_SYNC:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_GEN_CFG_UPD, ENABLE);
++ break;
++ case CI_ISP_CFG_UPDATE_IMMEDIATE:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_CFG_UPD, ENABLE);
++ break;
++ case CI_ISP_CFG_UPDATE_LATER:
++ break;
++ default:
++ break;
++ }
++
++ REG_WRITE(mrv_reg->isp_ctrl, isp_ctrl);
++}
++
++/*
++ * Changes the data path settings.
++ */
++int ci_isp_set_data_path(enum ci_isp_ycs_chn_mode ycs_chn_mode,
++ enum ci_isp_dp_switch dp_switch)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 vi_dpcl = REG_READ(mrv_reg->vi_dpcl);
++ u32 vi_chan_mode;
++ u32 vi_mp_mux;
++
++ /* get desired setting for ycs_chan_mode (or vi_chan_mode) bits */
++ switch (ycs_chn_mode) {
++ case CI_ISP_YCS_OFF:
++ vi_chan_mode = MRV_VI_CHAN_MODE_OFF;
++ break;
++ case CI_ISP_YCS_Y:
++ vi_chan_mode = MRV_VI_CHAN_MODE_Y;
++ break;
++ case CI_ISP_YCS_MVRaw:
++ vi_chan_mode = MRV_VI_CHAN_MODE_MP_RAW;
++ break;
++ case CI_ISP_YCS_MV:
++ vi_chan_mode = MRV_VI_CHAN_MODE_MP;
++ break;
++ case CI_ISP_YCS_SP:
++ vi_chan_mode = MRV_VI_CHAN_MODE_SP;
++ break;
++ case CI_ISP_YCS_MV_SP:
++ vi_chan_mode = MRV_VI_CHAN_MODE_MP_SP;
++ break;
++ default:
++ eprintk("unknown value for ycs_chn_mode");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ if (vi_chan_mode & ~(MRV_VI_CHAN_MODE_MASK >> MRV_VI_CHAN_MODE_SHIFT)) {
++ eprintk("enum ci_isp_ycs_chn_mode not supported");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ /* get desired setting for vi_dp_switch (or vi_dp_mux) bits */
++ switch (dp_switch) {
++ case CI_ISP_DP_RAW:
++ vi_mp_mux = MRV_VI_MP_MUX_RAW;
++ break;
++ case CI_ISP_DP_JPEG:
++ vi_mp_mux = MRV_VI_MP_MUX_JPEG;
++ break;
++ case CI_ISP_DP_MV:
++ vi_mp_mux = MRV_VI_MP_MUX_MP;
++ break;
++ default:
++ eprintk("unknown value for dp_switch");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ if (vi_mp_mux & ~MRV_VI_MP_MUX_MASK) {
++ eprintk("dp_switch value not supported");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ /* program settings into MARVIN vi_dpcl register */
++ REG_SET_SLICE(vi_dpcl, MRV_VI_CHAN_MODE, vi_chan_mode);
++ REG_SET_SLICE(vi_dpcl, MRV_VI_MP_MUX, vi_mp_mux);
++ REG_WRITE(mrv_reg->vi_dpcl, vi_dpcl);
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Changes the data path settings to SMIA or MIPI.
++ */
++int ci_isp_set_mipi_smia(u32 mode)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 if_select;
++
++ /* get desired setting for if_select bits */
++ switch (mode) {
++ case SENSOR_MODE_SMIA:
++ if_select = MRV_IF_SELECT_SMIA;
++ break;
++ case SENSOR_MODE_MIPI:
++ if_select = MRV_IF_SELECT_MIPI;
++ break;
++ case SENSOR_MODE_BAYER:
++ case SENSOR_MODE_BT601:
++ case SENSOR_MODE_BT656:
++ case SENSOR_MODE_PICT:
++ case SENSOR_MODE_DATA:
++ case SENSOR_MODE_BAY_BT656:
++ case SENSOR_MODE_RAW_BT656:
++ if_select = MRV_IF_SELECT_PAR;
++ break;
++ default:
++ eprintk("unknown value for mode");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ /* program settings into MARVIN vi_dpcl register */
++ REG_SET_SLICE(mrv_reg->vi_dpcl, MRV_IF_SELECT, if_select);
++
++ if (if_select == MRV_IF_SELECT_MIPI) {
++ REG_WRITE(mrv_reg->mipi_ctrl, 0x1001); /*XXX FLUSH_FIFO? */
++ /* REG_WRITE(mrv_reg->mipi_ctrl, 0x0001); FLUSH_FIFO? */
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Waits until the specified bits becomes signaled in the mi_ris
++ * register.
++ */
++static int ci_isp_wait_for_mi(struct mrst_isp_device *intel, u32 bit_mask)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++#if 0
++ int ret = 0;
++ INIT_COMPLETION(intel->mi_complete);
++ ret = wait_for_completion_interruptible_timeout(&intel->mi_complete,
++ 10*HZ);
++ if (ret == 0) {
++ eprintk("time out in wait for mi");
++ /*
++ * Try to recover. Softreset of submodules (but not
++ * entire marvin) resets processing and status
++ * information, but not configuration register
++ * content. Bits are sticky. So we have to clear them.
++ * Reset affects the MARVIN 1..2 clock cycles after
++ * the bits are set to high. So we don't have to wait
++ * in software before clearing them.
++ */
++
++ /*
++ * Note that only modules with clock enabled will be
++ * affected.
++ */
++ REG_SET_SLICE(mrv_reg->vi_ircl, MRV_VI_ALL_SOFT_RST, ON);
++ REG_SET_SLICE(mrv_reg->vi_ircl, MRV_VI_ALL_SOFT_RST, OFF);
++ mdelay(CI_ISP_DELAY_AFTER_RESET);
++ /*
++ * isp config update, neccessary to update v/h_size
++ * into shadow registers
++ */
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_CFG_UPD, ON);
++ return CI_STATUS_FAILURE;
++ }
++ return CI_STATUS_SUCCESS;
++#endif
++ u32 irq;
++ static int err_frame_cnt;
++ mrst_timer_start();
++ /*
++ * Wait for the curr BitMask. If the BitMask is zero, then it's no
++ * waiting.
++ */
++ while ((mrv_reg->mi_ris & bit_mask) != bit_mask) {
++
++ irq = REG_READ(mrv_reg->isp_ris);
++ if (irq & (MRV_ISP_RIS_DATA_LOSS_MASK
++ | MRV_ISP_RIS_PIC_SIZE_ERR_MASK)){
++ err_frame_cnt++;
++ dprintk(1, "irq = 0x%x, err rumber = %d", irq,
++ err_frame_cnt);
++ }
++ if (mrst_get_micro_sec() > 1000) {
++ /*
++ * Note: Don't use REG_READ because content of
++ * registers would be already printed here.
++ */
++ dprintk(1, "time out");
++ mrst_timer_stop();
++ /*
++ * Try to recover. Softreset of submodules (but not
++ * entire marvin) resets processing and status
++ * information, but not configuration register
++ * content. Bits are sticky. So we have to clear them.
++ * Reset affects the MARVIN 1..2 clock cycles after
++ * the bits are set to high. So we don't have to wait
++ * in software before clearing them.
++ */
++
++ /*
++ * Note that only modules with clock enabled will be
++ * affected.
++ */
++ REG_SET_SLICE(mrv_reg->vi_ircl,
++ MRV_VI_ALL_SOFT_RST, ON);
++ REG_SET_SLICE(mrv_reg->vi_ircl,
++ MRV_VI_ALL_SOFT_RST, OFF);
++ /*mdelay(CI_ISP_DELAY_AFTER_RESET);*/
++ msleep(CI_ISP_DELAY_AFTER_RESET);
++ /*
++ * isp config update, neccessary to update v/h_size
++ * into shadow registers
++ */
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_CFG_UPD,
++ ON);
++ return CI_STATUS_FAILURE;
++ }
++ }
++
++ mrst_timer_stop();
++ if (REG_GET_SLICE(mrv_reg->isp_ris, MRV_ISP_RIS_DATA_LOSS))
++ dprintk(1, "no failure, but MRV_ISPINT_DATA_LOSS");
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Waits until a frame is written to memory (frame end
++ * interrupt occurs).
++ * Waits for the frame end interrupt of the memory
++ * interface.
++ */
++int ci_isp_wait_for_frame_end(struct mrst_isp_device *intel)
++{
++ return ci_isp_wait_for_mi(intel, ci_isp_get_frame_end_irq_mask_isp());
++}
++
++/*
++ * Writes '0xFFFFFFFF' into all *_icr registers to clear all
++ * interrupts.
++ */
++void ci_isp_reset_interrupt_status(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ /* ISP interrupt clear register */
++ REG_SET_SLICE(mrv_reg->isp_icr, MRV_ISP_ICR_ALL, ON);
++ REG_SET_SLICE(mrv_reg->isp_err_clr, MRV_ISP_ALL_ERR, ON);
++ REG_SET_SLICE(mrv_reg->mi_icr, MRV_MI_ALLIRQS, ON);
++ /* JPEG error interrupt clear register */
++ REG_SET_SLICE(mrv_reg->jpe_error_icr, MRV_JPE_ALL_ERR, ON);
++ /* JPEG status interrupt clear register */
++ REG_SET_SLICE(mrv_reg->jpe_status_icr, MRV_JPE_ALL_STAT, ON);
++
++ REG_WRITE(mrv_reg->mipi_icr, 0xffffffff); /*XXX replace by a macro */
++}
++
++void mrst_isp_disable_interrupt(struct mrst_isp_device *isp)
++{
++ struct isp_register *mrv_reg = (struct isp_register *)MEM_MRV_REG_BASE;
++ REG_SET_SLICE(mrv_reg->isp_imsc, MRV_ISP_IMSC_ALL, OFF);
++ REG_SET_SLICE(mrv_reg->mi_imsc, MRV_MI_ALLIRQS, OFF);
++ REG_SET_SLICE(mrv_reg->jpe_error_imr, MRV_JPE_ALL_ERR, OFF);
++ REG_SET_SLICE(mrv_reg->jpe_status_imr, MRV_JPE_ALL_STAT, OFF);
++ REG_WRITE(mrv_reg->mipi_imsc, 0x00000000);
++}
++
++void mrst_isp_enable_interrupt(struct mrst_isp_device *isp)
++{
++ struct isp_register *mrv_reg = (struct isp_register *)MEM_MRV_REG_BASE;
++
++ REG_SET_SLICE(mrv_reg->isp_imsc, MRV_ISP_IMSC_DATA_LOSS, ON);
++ REG_SET_SLICE(mrv_reg->isp_imsc, MRV_ISP_IMSC_PIC_SIZE_ERR, ON);
++
++ REG_WRITE(mrv_reg->mi_imsc, MRV_MI_MP_FRAME_END_MASK);
++
++ REG_SET_SLICE(mrv_reg->mi_imsc, MRV_MI_MBLK_LINE, ON);
++
++ REG_SET_SLICE(mrv_reg->jpe_error_imr, MRV_JPE_ALL_ERR, ON);
++ REG_SET_SLICE(mrv_reg->jpe_status_imr, MRV_JPE_ALL_STAT, ON);
++
++ REG_WRITE(mrv_reg->mipi_imsc, 0x00f00000);
++
++ ci_isp_reset_interrupt_status();
++}
++
++/*
++ * Selects DMA read mode (i.e. sink of the data read from system
++ * memory by the DMA-read block).
++ * update_time is only used on Marvin3plus,
++ * on all other Marvin derivates immediate update is made
++ */
++void ci_isp_set_dma_read_mode(enum ci_isp_dma_read_mode mode,
++ enum ci_isp_conf_update_time update_time)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ /* added to avoid LINT warnings (Info 530) */
++ u32 vi_dma_switch = 0;
++ /* added to avoid LINT warnings (Info 530) */
++ u32 vi_dma_spmux = 0;
++ /* added to avoid LINT warnings (Info 530) */
++ u32 vi_dma_iemux = 0;
++ /* added to avoid LINT warnings (Info 530) */
++ int dma_jpeg_select = false;
++
++ u32 vi_dpcl = REG_READ(mrv_reg->vi_dpcl);
++
++ /*
++ * DMA-read feature connected through a dedicated DMA-read
++ * multiplexer.
++ */
++
++ /* Programming is done via vi_dpcl register only */
++#define DMA_READ_MODE_PROGRAMMING_VI_SPMCL 0
++#define DMA_READ_MODE_PROGRAMMING_VI_DPCL 1
++ WARN_ON(!((mode == CI_ISP_DMA_RD_OFF) ||
++ (mode == CI_ISP_DMA_RD_SELF_PATH) ||
++ (mode == CI_ISP_DMA_RD_IE_PATH) ||
++ (mode == CI_ISP_DMA_RD_SUPERIMPOSE)));
++
++ switch (mode) {
++ case CI_ISP_DMA_RD_OFF:
++ vi_dma_switch = MRV_VI_DMA_SWITCH_SELF;
++ vi_dma_spmux = MRV_VI_DMA_SPMUX_CAM;
++ vi_dma_iemux = MRV_VI_DMA_IEMUX_CAM;
++ dma_jpeg_select = false;
++ break;
++ case CI_ISP_DMA_RD_SELF_PATH:
++ vi_dma_switch = MRV_VI_DMA_SWITCH_SELF;
++ vi_dma_spmux = MRV_VI_DMA_SPMUX_DMA;
++ vi_dma_iemux = MRV_VI_DMA_IEMUX_CAM;
++ dma_jpeg_select = false;
++ break;
++ case CI_ISP_DMA_RD_IE_PATH:
++ vi_dma_switch = MRV_VI_DMA_SWITCH_IE;
++ vi_dma_spmux = MRV_VI_DMA_SPMUX_CAM;
++ vi_dma_iemux = MRV_VI_DMA_IEMUX_DMA;
++ dma_jpeg_select = false;
++ break;
++ case CI_ISP_DMA_RD_JPG_ENC:
++ vi_dma_switch = MRV_VI_DMA_SWITCH_JPG;
++ vi_dma_spmux = MRV_VI_DMA_SPMUX_CAM;
++ vi_dma_iemux = MRV_VI_DMA_IEMUX_CAM;
++ dma_jpeg_select = true;
++ break;
++ case CI_ISP_DMA_RD_SUPERIMPOSE:
++ vi_dma_switch = MRV_VI_DMA_SWITCH_SI;
++ vi_dma_spmux = MRV_VI_DMA_SPMUX_CAM;
++ vi_dma_iemux = MRV_VI_DMA_IEMUX_CAM;
++ dma_jpeg_select = false;
++ break;
++ default:
++ /* unknown DMA-read mode */
++ WARN_ON(1);
++ }
++
++ REG_SET_SLICE(vi_dpcl, MRV_VI_DMA_SWITCH, vi_dma_switch);
++ REG_SET_SLICE(vi_dpcl, MRV_VI_DMA_SPMUX, vi_dma_spmux);
++ REG_SET_SLICE(vi_dpcl, MRV_VI_DMA_IEMUX, vi_dma_iemux);
++#if ((MRV_VI_MP_MUX_JPGDIRECT & \
++~(MRV_VI_MP_MUX_MASK >> MRV_VI_MP_MUX_SHIFT)) == 0)
++ if (dma_jpeg_select) {
++ REG_SET_SLICE(vi_dpcl, MRV_VI_MP_MUX,
++ MRV_VI_MP_MUX_JPGDIRECT);
++ }
++#else
++ /* direct DMA to JPEG not supported */
++ UNUSED_PARAM(dma_jpeg_select);
++#endif
++ REG_WRITE(mrv_reg->vi_dpcl, vi_dpcl);
++}
++
++/*
++ * Set extended mode with unrestricted values for YCbCr
++ * Y (0-255) CbCr (0-255)
++ */
++void ci_isp_set_ext_ycmode(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 isp_ctrl = REG_READ(mrv_reg->isp_ctrl);
++
++ /* modify isp_ctrl register */
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_CSM_C_RANGE,
++ MRV_ISP_ISP_CSM_C_RANGE_FULL);
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_CSM_Y_RANGE,
++ MRV_ISP_ISP_CSM_Y_RANGE_FULL);
++ REG_WRITE(mrv_reg->isp_ctrl, isp_ctrl);
++
++ /* program RGB to YUV color conversion with extended range */
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_0, MRV_ISP_CC_COEFF_0, 0x0026);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_1, MRV_ISP_CC_COEFF_1, 0x004B);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_2, MRV_ISP_CC_COEFF_2, 0x000F);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_3, MRV_ISP_CC_COEFF_3, 0x01EA);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_4, MRV_ISP_CC_COEFF_4, 0x01D6);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_5, MRV_ISP_CC_COEFF_5, 0x0040);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_6, MRV_ISP_CC_COEFF_6, 0x0040);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_7, MRV_ISP_CC_COEFF_7, 0x01CA);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_8, MRV_ISP_CC_COEFF_8, 0x01F6);
++}
++
++void ci_isp_set_yc_mode(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *)MEM_MRV_REG_BASE;
++ u32 isp_ctrl = REG_READ(mrv_reg->isp_ctrl);
++
++ /* modify isp_ctrl register */
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_CSM_C_RANGE,
++ MRV_ISP_ISP_CSM_Y_RANGE_BT601);
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_CSM_Y_RANGE,
++ MRV_ISP_ISP_CSM_Y_RANGE_BT601);
++ REG_WRITE(mrv_reg->isp_ctrl, isp_ctrl);
++
++ /* program RGB to YUV color conversion with extended range */
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_0, MRV_ISP_CC_COEFF_0, 0x0021);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_1, MRV_ISP_CC_COEFF_1, 0x0040);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_2, MRV_ISP_CC_COEFF_2, 0x000D);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_3, MRV_ISP_CC_COEFF_3, 0x01ED);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_4, MRV_ISP_CC_COEFF_4, 0x01DB);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_5, MRV_ISP_CC_COEFF_5, 0x0038);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_6, MRV_ISP_CC_COEFF_6, 0x0038);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_7, MRV_ISP_CC_COEFF_7, 0x01D1);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_8, MRV_ISP_CC_COEFF_8, 0x01F7);
++}
++
++/*
++ * writes the color values for contrast, brightness,
++ * saturation and hue into the appropriate Marvin
++ * registers
++ */
++void ci_isp_col_set_color_processing(
++ const struct ci_isp_color_settings *col)
++{
++ struct isp_register *mrv_reg =
++ (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (col == NULL) {
++ /* disable color processing (bypass) */
++ mrv_reg->c_proc_ctrl = 0;
++ } else {
++ mrv_reg->c_proc_contrast = col->contrast;
++ mrv_reg->c_proc_brightness = col->brightness;
++ mrv_reg->c_proc_saturation = col->saturation;
++ mrv_reg->c_proc_hue = col->hue;
++
++ /* modify color processing registers */
++
++ if (col->flags & CI_ISP_CPROC_C_OUT_RANGE) {
++ mrv_reg->c_proc_ctrl =
++ mrv_reg->c_proc_ctrl | CI_ISP_CPROC_C_OUT_RANGE;
++ }
++
++ if (col->flags & CI_ISP_CPROC_Y_IN_RANGE) {
++ mrv_reg->c_proc_ctrl =
++ mrv_reg->c_proc_ctrl | CI_ISP_CPROC_Y_IN_RANGE;
++ }
++
++ if (col->flags & CI_ISP_CPROC_Y_OUT_RANGE) {
++ mrv_reg->c_proc_ctrl =
++ mrv_reg->c_proc_ctrl | CI_ISP_CPROC_Y_OUT_RANGE;
++ }
++
++ if (col->flags & CI_ISP_CPROC_ENABLE) {
++ mrv_reg->c_proc_ctrl =
++ mrv_reg->c_proc_ctrl | CI_ISP_CPROC_ENABLE;
++ }
++ }
++}
++
++/*
++ * Translates a chrominance component value from usual
++ * representation (range 16..240, 128=neutral grey)
++ * to the one used by the ie_tint register
++ * The value is returned as 32 bit unsigned to support shift
++ * operation without explicit cast.
++ * The translation formular implemented here is taken from
++ * the image effects functional specification document,
++ * Doc-ID 30-001-481.130, revision 1.1 from november, 21st. 2005
++ */
++static u32 ci_isp_ie_tint_cx2_reg_val(u8 cx)
++{
++ s32 temp;
++ u32 reg_val;
++
++ /*
++ * apply scaling as specified in the image effects functional
++ * specification
++ */
++ temp = 128 - (s32) cx;
++ temp = ((temp * 64) / 110);
++
++ /* convert from two's complement to sign/value */
++ if (temp < 0) {
++ reg_val = 0x80;
++ temp *= (-1);
++ } else
++ reg_val = 0;
++
++ /* saturate at 7 bits */
++ if (temp > 0x7F)
++ temp = 0x7F;
++
++ /* combine sign and value to build the regiter value */
++ reg_val |= (u32) temp;
++
++ return reg_val;
++}
++
++/*
++ * Translates usual (decimal) matrix coefficient into the
++ * 4 bit register representation (used in the ie_mat_X registers).
++ * for unsupported decimal numbers, a supported replacement is
++ * selected automatically.
++ * The value is returned as 32 bit unsigned to support shift
++ * operation without explicit cast.
++ * The translation formular implemented here is taken from
++ * the image effects functional specification document,
++ * Doc-ID 30-001-481.130, revision 1.1 from november, 21st. 2005
++ */
++static u32 ci_isp_ie_mx_dec2_reg_val(s8 dec)
++{
++ if (dec <= (-6)) {
++ /* equivlent to -8 */
++ return 0x0f;
++ } else if (dec <= (-3)) {
++ /* equivlent to -4 */
++ return 0x0e;
++ } else if (dec == (-2)) {
++ /* equivlent to -2 */
++ return 0x0d;
++ } else if (dec == (-1)) {
++ /* equivlent to -1 */
++ return 0x0c;
++ } else if (dec == 0) {
++ /* equivlent to 0 (entry not used) */
++ return 0x00;
++ } else if (dec == 1) {
++ /* equivlent to 1 */
++ return 0x08;
++ } else if (dec == 2) {
++ /* equivlent to 2 */
++ return 0x09;
++ } else if (dec < 6) {
++ /* equivlent to 4 */
++ return 0x0a;
++ } else {
++ /* equivlent to 8 */
++ return 0x0b;
++ }
++}
++
++/*
++ * translates the values of the given configuration
++ * structure into register settings for the image effects
++ * submodule and loads the registers.
++ */
++int ci_isp_ie_set_config(const struct ci_isp_ie_config *ie_config)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (!ie_config) {
++ /* just disable the module, i.e. put it in bypass mode */
++ REG_SET_SLICE(mrv_reg->img_eff_ctrl, MRV_IMGEFF_BYPASS_MODE,
++ MRV_IMGEFF_BYPASS_MODE_BYPASS);
++ } else {
++ /* apply the given settings */
++ u32 ul_ie_ctrl = REG_READ(mrv_reg->img_eff_ctrl);
++ u32 ul_ie_csel = REG_READ(mrv_reg->img_eff_color_sel);
++ u32 ul_ie_tint = REG_READ(mrv_reg->img_eff_tint);
++ u32 ul_ie_mat1 = REG_READ(mrv_reg->img_eff_mat_1);
++ u32 ul_ie_mat2 = REG_READ(mrv_reg->img_eff_mat_2);
++ u32 ul_ie_mat3 = REG_READ(mrv_reg->img_eff_mat_3);
++ u32 ul_ie_mat4 = REG_READ(mrv_reg->img_eff_mat_4);
++ u32 ul_ie_mat5 = REG_READ(mrv_reg->img_eff_mat_5);
++
++ /* overall operation mode */
++ switch (ie_config->mode) {
++ case CI_ISP_IE_MODE_OFF:
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_BYPASS_MODE,
++ MRV_IMGEFF_BYPASS_MODE_BYPASS);
++ break;
++ case CI_ISP_IE_MODE_GRAYSCALE:
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_EFFECT_MODE,
++ MRV_IMGEFF_EFFECT_MODE_GRAY);
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_BYPASS_MODE,
++ MRV_IMGEFF_BYPASS_MODE_PROCESS);
++ break;
++ case CI_ISP_IE_MODE_NEGATIVE:
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_EFFECT_MODE,
++ MRV_IMGEFF_EFFECT_MODE_NEGATIVE);
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_BYPASS_MODE,
++ MRV_IMGEFF_BYPASS_MODE_PROCESS);
++ break;
++ case CI_ISP_IE_MODE_SEPIA:
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_EFFECT_MODE,
++ MRV_IMGEFF_EFFECT_MODE_SEPIA);
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_BYPASS_MODE,
++ MRV_IMGEFF_BYPASS_MODE_PROCESS);
++ break;
++ case CI_ISP_IE_MODE_COLOR_SEL:
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_EFFECT_MODE,
++ MRV_IMGEFF_EFFECT_MODE_COLOR_SEL);
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_BYPASS_MODE,
++ MRV_IMGEFF_BYPASS_MODE_PROCESS);
++ break;
++ case CI_ISP_IE_MODE_EMBOSS:
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_EFFECT_MODE,
++ MRV_IMGEFF_EFFECT_MODE_EMBOSS);
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_BYPASS_MODE,
++ MRV_IMGEFF_BYPASS_MODE_PROCESS);
++ break;
++ case CI_ISP_IE_MODE_SKETCH:
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_EFFECT_MODE,
++ MRV_IMGEFF_EFFECT_MODE_SKETCH);
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_BYPASS_MODE,
++ MRV_IMGEFF_BYPASS_MODE_PROCESS);
++ break;
++ default:
++ return CI_STATUS_OUTOFRANGE;
++ }
++
++ /* use next frame sync update */
++ REG_SET_SLICE(ul_ie_ctrl, MRV_IMGEFF_CFG_UPD, ON);
++
++ /* color selection settings */
++ REG_SET_SLICE(ul_ie_csel, MRV_IMGEFF_COLOR_THRESHOLD,
++ (u32) (ie_config->color_thres));
++ REG_SET_SLICE(ul_ie_csel, MRV_IMGEFF_COLOR_SELECTION,
++ (u32) (ie_config->color_sel));
++
++ /* tint color settings */
++ REG_SET_SLICE(ul_ie_tint, MRV_IMGEFF_INCR_CB,
++ ci_isp_ie_tint_cx2_reg_val(ie_config->tint_cb));
++ REG_SET_SLICE(ul_ie_tint, MRV_IMGEFF_INCR_CR,
++ ci_isp_ie_tint_cx2_reg_val(ie_config->tint_cr));
++
++ /* matrix coefficients */
++ REG_SET_SLICE(ul_ie_mat1, MRV_IMGEFF_EMB_COEF_11_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_emboss.
++ coeff_11));
++ REG_SET_SLICE(ul_ie_mat1, MRV_IMGEFF_EMB_COEF_12_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_emboss.
++ coeff_12));
++ REG_SET_SLICE(ul_ie_mat1, MRV_IMGEFF_EMB_COEF_13_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_emboss.
++ coeff_13));
++ REG_SET_SLICE(ul_ie_mat1, MRV_IMGEFF_EMB_COEF_21_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_emboss.
++ coeff_21));
++ REG_SET_SLICE(ul_ie_mat2, MRV_IMGEFF_EMB_COEF_22_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_emboss.
++ coeff_22));
++ REG_SET_SLICE(ul_ie_mat2, MRV_IMGEFF_EMB_COEF_23_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_emboss.
++ coeff_23));
++ REG_SET_SLICE(ul_ie_mat2, MRV_IMGEFF_EMB_COEF_31_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_emboss.
++ coeff_31));
++ REG_SET_SLICE(ul_ie_mat2, MRV_IMGEFF_EMB_COEF_32_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_emboss.
++ coeff_32));
++ REG_SET_SLICE(ul_ie_mat3, MRV_IMGEFF_EMB_COEF_33_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_emboss.
++ coeff_33));
++ REG_SET_SLICE(ul_ie_mat3, MRV_IMGEFF_SKET_COEF_11_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_sketch.
++ coeff_11));
++ REG_SET_SLICE(ul_ie_mat3, MRV_IMGEFF_SKET_COEF_12_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_sketch.
++ coeff_12));
++ REG_SET_SLICE(ul_ie_mat3, MRV_IMGEFF_SKET_COEF_13_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_sketch.
++ coeff_13));
++ REG_SET_SLICE(ul_ie_mat4, MRV_IMGEFF_SKET_COEF_21_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_sketch.
++ coeff_21));
++ REG_SET_SLICE(ul_ie_mat4, MRV_IMGEFF_SKET_COEF_22_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_sketch.
++ coeff_22));
++ REG_SET_SLICE(ul_ie_mat4, MRV_IMGEFF_SKET_COEF_23_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_sketch.
++ coeff_23));
++ REG_SET_SLICE(ul_ie_mat4, MRV_IMGEFF_SKET_COEF_31_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_sketch.
++ coeff_31));
++ REG_SET_SLICE(ul_ie_mat5, MRV_IMGEFF_SKET_COEF_32_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_sketch.
++ coeff_32));
++ REG_SET_SLICE(ul_ie_mat5, MRV_IMGEFF_SKET_COEF_33_4,
++ ci_isp_ie_mx_dec2_reg_val(ie_config->mat_sketch.
++ coeff_33));
++
++ /* write changed values back to registers */
++ REG_WRITE(mrv_reg->img_eff_ctrl, ul_ie_ctrl);
++ REG_WRITE(mrv_reg->img_eff_color_sel, ul_ie_csel);
++ REG_WRITE(mrv_reg->img_eff_tint, ul_ie_tint);
++ REG_WRITE(mrv_reg->img_eff_mat_1, ul_ie_mat1);
++ REG_WRITE(mrv_reg->img_eff_mat_2, ul_ie_mat2);
++ REG_WRITE(mrv_reg->img_eff_mat_3, ul_ie_mat3);
++ REG_WRITE(mrv_reg->img_eff_mat_4, ul_ie_mat4);
++ REG_WRITE(mrv_reg->img_eff_mat_5, ul_ie_mat5);
++
++ /* frame synchronous update of shadow registers */
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_GEN_CFG_UPD, ON);
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Applies the new image stabilisation settings to the module.
++ */
++int ci_isp_is_set_config(const struct ci_isp_is_config *is_config)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (!is_config) {
++ eprintk("is_config NULL");
++ return CI_STATUS_NULL_POINTER;
++ }
++
++ /* set maximal margin distance for X */
++ if (is_config->max_dx > MRV_IS_IS_MAX_DX_MAX) {
++ REG_SET_SLICE(mrv_reg->isp_is_max_dx, MRV_IS_IS_MAX_DX,
++ (u32) (MRV_IS_IS_MAX_DX_MAX));
++ } else {
++ REG_SET_SLICE(mrv_reg->isp_is_max_dx, MRV_IS_IS_MAX_DX,
++ (u32) (is_config->max_dx));
++ }
++
++ /* set maximal margin distance for Y */
++ if (is_config->max_dy > MRV_IS_IS_MAX_DY_MAX) {
++ REG_SET_SLICE(mrv_reg->isp_is_max_dy, MRV_IS_IS_MAX_DY,
++ (u32) (MRV_IS_IS_MAX_DY_MAX));
++ } else {
++ REG_SET_SLICE(mrv_reg->isp_is_max_dy, MRV_IS_IS_MAX_DY,
++ (u32) (is_config->max_dy));
++ }
++
++ /* set H offset */
++ REG_SET_SLICE(mrv_reg->isp_is_h_offs, MRV_IS_IS_H_OFFS,
++ (u32) (is_config->mrv_is_window.hoffs));
++ /* set V offset */
++ REG_SET_SLICE(mrv_reg->isp_is_v_offs, MRV_IS_IS_V_OFFS,
++ (u32) (is_config->mrv_is_window.voffs));
++ /* set H size */
++ REG_SET_SLICE(mrv_reg->isp_is_h_size, MRV_IS_IS_H_SIZE,
++ (u32) (is_config->mrv_is_window.hsize));
++ /* set V size */
++ REG_SET_SLICE(mrv_reg->isp_is_v_size, MRV_IS_IS_V_SIZE,
++ (u32) (is_config->mrv_is_window.vsize));
++
++ return CI_STATUS_SUCCESS;
++}
++
++static int ci_isp_bls_set_fixed_values(const struct ci_isp_bls_subtraction
++ *bls_subtraction)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (!bls_subtraction)
++ return CI_STATUS_NULL_POINTER;
++
++ if ((bls_subtraction->fixed_a > MRV_ISP_BLS_FIX_SUB_MAX) ||
++ (bls_subtraction->fixed_b > MRV_ISP_BLS_FIX_SUB_MAX) ||
++ (bls_subtraction->fixed_c > MRV_ISP_BLS_FIX_SUB_MAX) ||
++ (bls_subtraction->fixed_d > MRV_ISP_BLS_FIX_SUB_MAX) ||
++ (bls_subtraction->fixed_a < (s16) MRV_ISP_BLS_FIX_SUB_MIN) ||
++ (bls_subtraction->fixed_b < (s16) MRV_ISP_BLS_FIX_SUB_MIN) ||
++ (bls_subtraction->fixed_c < (s16) MRV_ISP_BLS_FIX_SUB_MIN) ||
++ (bls_subtraction->fixed_d < (s16) MRV_ISP_BLS_FIX_SUB_MIN)) {
++ return CI_STATUS_OUTOFRANGE;
++ } else {
++ /* we are in this path */
++ REG_SET_SLICE(mrv_reg->isp_bls_a_fixed, MRV_BLS_BLS_A_FIXED,
++ bls_subtraction->fixed_a);
++ REG_SET_SLICE(mrv_reg->isp_bls_b_fixed, MRV_BLS_BLS_B_FIXED, \
++ bls_subtraction->fixed_b);
++ REG_SET_SLICE(mrv_reg->isp_bls_c_fixed, MRV_BLS_BLS_C_FIXED,
++ bls_subtraction->fixed_c);
++ REG_SET_SLICE(mrv_reg->isp_bls_d_fixed, MRV_BLS_BLS_D_FIXED,
++ bls_subtraction->fixed_d);
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Sets the desired configuration values to the BLS registers,
++ * if possible. In the case the parameter (bls_config == NULL)
++ * the BLS module will be deactivated.
++ */
++int ci_isp_bls_set_config(const struct ci_isp_bls_config *bls_config)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 isp_bls_ctrl = 0;
++
++ int error = CI_STATUS_FAILURE;
++
++ if (!bls_config) {
++ /* disable the BLS module */
++ REG_SET_SLICE(mrv_reg->isp_bls_ctrl,
++ MRV_BLS_BLS_ENABLE, DISABLE);
++ return CI_STATUS_SUCCESS;
++ }
++
++ /* measurement window 2, enable_window =0 */
++ if (bls_config->isp_bls_window2.enable_window) {
++ if ((bls_config->isp_bls_window2.start_h >
++ MRV_BLS_BLS_H2_START_MAX)
++ || (bls_config->isp_bls_window2.stop_h >
++ MRV_BLS_BLS_H2_STOP_MAX)
++ || (bls_config->isp_bls_window2.start_v >
++ MRV_BLS_BLS_V2_START_MAX)
++ || (bls_config->isp_bls_window2.stop_v >
++ MRV_BLS_BLS_V2_STOP_MAX)) {
++ return CI_STATUS_OUTOFRANGE;
++ } else {
++ REG_SET_SLICE(mrv_reg->isp_bls_h2_start,
++ MRV_BLS_BLS_H2_START,
++ bls_config->isp_bls_window2.start_h);
++ REG_SET_SLICE(mrv_reg->isp_bls_h2_stop,
++ MRV_BLS_BLS_H2_STOP,
++ bls_config->isp_bls_window2.stop_h);
++ REG_SET_SLICE(mrv_reg->isp_bls_v2_start,
++ MRV_BLS_BLS_V2_START,
++ bls_config->isp_bls_window2.start_v);
++ REG_SET_SLICE(mrv_reg->isp_bls_v2_stop,
++ MRV_BLS_BLS_V2_STOP,
++ bls_config->isp_bls_window2.stop_v);
++ }
++ }
++
++ /* measurement window 1, enable_window=0 */
++ if (bls_config->isp_bls_window1.enable_window) {
++ if ((bls_config->isp_bls_window1.start_h >
++ MRV_BLS_BLS_H1_START_MAX)
++ || (bls_config->isp_bls_window1.stop_h >
++ MRV_BLS_BLS_H1_STOP_MAX)
++ || (bls_config->isp_bls_window1.start_v >
++ MRV_BLS_BLS_V1_START_MAX)
++ || (bls_config->isp_bls_window1.stop_v >
++ MRV_BLS_BLS_V1_STOP_MAX)) {
++ return CI_STATUS_OUTOFRANGE;
++ } else {
++ REG_SET_SLICE(mrv_reg->isp_bls_h1_start,
++ MRV_BLS_BLS_H1_START,
++ bls_config->isp_bls_window1.start_h);
++ REG_SET_SLICE(mrv_reg->isp_bls_h1_stop,
++ MRV_BLS_BLS_H1_STOP,
++ bls_config->isp_bls_window1.stop_h);
++ REG_SET_SLICE(mrv_reg->isp_bls_v1_start,
++ MRV_BLS_BLS_V1_START,
++ bls_config->isp_bls_window1.start_v);
++ REG_SET_SLICE(mrv_reg->isp_bls_v1_stop,
++ MRV_BLS_BLS_V1_STOP,
++ bls_config->isp_bls_window1.stop_v);
++ }
++ }
++
++ if (bls_config->bls_samples > MRV_BLS_BLS_SAMPLES_MAX) {
++ return CI_STATUS_OUTOFRANGE;
++ } else {
++ REG_SET_SLICE(mrv_reg->isp_bls_samples, MRV_BLS_BLS_SAMPLES,
++ bls_config->bls_samples);
++ }
++
++ /* fixed subtraction values, enable_automatic=0 */
++ if (!bls_config->enable_automatic) {
++ error = ci_isp_bls_set_fixed_values(
++ &(bls_config->bls_subtraction));
++ if (error != CI_STATUS_SUCCESS)
++ return error;
++ }
++
++ if ((bls_config->disable_h) || (bls_config->disable_v))
++ return CI_STATUS_OUTOFRANGE;
++
++ isp_bls_ctrl = REG_READ(mrv_reg->isp_bls_ctrl);
++
++ /* enable measurement window(s) */
++ REG_SET_SLICE(isp_bls_ctrl, MRV_BLS_WINDOW_ENABLE,
++ ((bls_config->isp_bls_window1.enable_window)
++ ? MRV_BLS_WINDOW_ENABLE_WND1 : 0) |
++ ((bls_config->isp_bls_window2.enable_window)
++ ? MRV_BLS_WINDOW_ENABLE_WND2 : 0));
++
++ /* set Mode */
++ REG_SET_SLICE(isp_bls_ctrl, MRV_BLS_BLS_MODE,
++ (bls_config->enable_automatic) ? MRV_BLS_BLS_MODE_MEAS :
++ MRV_BLS_BLS_MODE_FIX);
++
++ /* enable module */
++ REG_SET_SLICE(isp_bls_ctrl, MRV_BLS_BLS_ENABLE, ENABLE);
++
++ /* write into register */
++ REG_WRITE(mrv_reg->isp_bls_ctrl, isp_bls_ctrl);
++
++ return CI_STATUS_SUCCESS;
++}
++
++#define RSZ_FLAGS_MASK (RSZ_UPSCALE_ENABLE | RSZ_SCALER_BYPASS)
++
++/*
++ * writes the scaler values to the appropriate Marvin registers.
++ */
++void ci_isp_res_set_main_resize(const struct ci_isp_scale *scale,
++ enum ci_isp_conf_update_time update_time,
++ const struct ci_isp_rsz_lut *rsz_lut)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 mrsz_ctrl = REG_READ(mrv_reg->mrsz_ctrl);
++ u32 i;
++ int upscaling = false;
++
++ /* flags must be "outside" scaler value */
++ WARN_ON(!((RSZ_FLAGS_MASK & MRV_RSZ_SCALE_MASK) == 0));
++ WARN_ON(!((scale->scale_hy & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX));
++ WARN_ON(!((scale->scale_hcb & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX));
++ WARN_ON(!((scale->scale_hcr & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX));
++ WARN_ON(!((scale->scale_vy & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX));
++ WARN_ON(!((scale->scale_vc & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX));
++
++ /* horizontal luminance scale factor */
++ dprintk(1, "scale_hy = %d( %x )", scale->scale_hy, scale->scale_hy);
++
++ if (scale->scale_hy & RSZ_SCALER_BYPASS) {
++ /* disable (bypass) scaler */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_HY_ENABLE, DISABLE);
++ } else {
++ /* enable scaler */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_HY_ENABLE, ENABLE);
++ /* program scale factor and phase */
++ REG_SET_SLICE(mrv_reg->mrsz_scale_hy, MRV_MRSZ_SCALE_HY,
++ (u32) scale->scale_hy);
++ REG_SET_SLICE(mrv_reg->mrsz_phase_hy, MRV_MRSZ_PHASE_HY,
++ (u32) scale->phase_hy);
++
++ if (scale->scale_hy & RSZ_UPSCALE_ENABLE) {
++ /* enable upscaling mode */
++ dprintk(1, "enable up scale");
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_HY_UP,
++ MRV_MRSZ_SCALE_HY_UP_UPSCALE);
++ /* scaler and upscaling enabled */
++ upscaling = true;
++ } else
++ /* disable upscaling mode */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_HY_UP,
++ MRV_MRSZ_SCALE_HY_UP_DOWNSCALE);
++ }
++
++ /* horizontal chrominance scale factors */
++ WARN_ON(!((scale->scale_hcb & RSZ_FLAGS_MASK) == (scale->scale_hcr &
++ RSZ_FLAGS_MASK)));
++ dprintk(1, "scale_hcb = %d( %x )", scale->scale_hcb, scale->scale_hcb);
++
++ if (scale->scale_hcb & RSZ_SCALER_BYPASS) {
++ /* disable (bypass) scaler */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_HC_ENABLE, DISABLE);
++ } else {
++ /* enable scaler */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_HC_ENABLE, ENABLE);
++ /* program scale factor and phase */
++ REG_SET_SLICE(mrv_reg->mrsz_scale_hcb, MRV_MRSZ_SCALE_HCB,
++ (u32) scale->scale_hcb);
++ REG_SET_SLICE(mrv_reg->mrsz_scale_hcr, MRV_MRSZ_SCALE_HCB,
++ (u32) scale->scale_hcr);
++ REG_SET_SLICE(mrv_reg->mrsz_phase_hc, MRV_MRSZ_PHASE_HC,
++ (u32) scale->phase_hc);
++
++ if (scale->scale_hcb & RSZ_UPSCALE_ENABLE) {
++ /* enable upscaling mode */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_HC_UP,
++ MRV_MRSZ_SCALE_HC_UP_UPSCALE);
++ /* scaler and upscaling enabled */
++ upscaling = true;
++ } else {
++ /* disable upscaling mode */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_HC_UP,
++ MRV_MRSZ_SCALE_HC_UP_DOWNSCALE);
++ }
++ }
++
++ /* vertical luminance scale factor */
++ dprintk(1, "scale_vy = %d ( %x )", scale->scale_vy, scale->scale_vy);
++
++ if (scale->scale_vy & RSZ_SCALER_BYPASS) {
++ /* disable (bypass) scaler */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_VY_ENABLE,
++ DISABLE);
++ } else {
++ /* enable scaler */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_VY_ENABLE, ENABLE);
++ /* program scale factor and phase */
++ REG_SET_SLICE(mrv_reg->mrsz_scale_vy, MRV_MRSZ_SCALE_VY,
++ (u32) scale->scale_vy);
++ REG_SET_SLICE(mrv_reg->mrsz_phase_vy, MRV_MRSZ_PHASE_VY,
++ (u32) scale->phase_vy);
++
++ if (scale->scale_vy & RSZ_UPSCALE_ENABLE) {
++ /* enable upscaling mode */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_VY_UP,
++ MRV_MRSZ_SCALE_VY_UP_UPSCALE);
++ /* scaler and upscaling enabled */
++ upscaling = true;
++ } else {
++ /* disable upscaling mode */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_VY_UP,
++ MRV_MRSZ_SCALE_VY_UP_DOWNSCALE);
++ }
++ }
++
++ /* vertical chrominance scale factor */
++ dprintk(1, "scale_vc = %d( %x )", scale->scale_vc, scale->scale_vc);
++
++ if (scale->scale_vc & RSZ_SCALER_BYPASS) {
++ /* disable (bypass) scaler */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_VC_ENABLE,
++ DISABLE);
++ } else {
++ /* enable scaler */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_VC_ENABLE, ENABLE);
++ /* program scale factor and phase */
++ REG_SET_SLICE(mrv_reg->mrsz_scale_vc, MRV_MRSZ_SCALE_VC,
++ (u32) scale->scale_vc);
++ REG_SET_SLICE(mrv_reg->mrsz_phase_vc, MRV_MRSZ_PHASE_VC,
++ (u32) scale->phase_vc);
++
++ if (scale->scale_vc & RSZ_UPSCALE_ENABLE) {
++ /* enable upscaling mode */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_VC_UP,
++ MRV_MRSZ_SCALE_VC_UP_UPSCALE);
++ /* scaler and upscaling enabled */
++ upscaling = true;
++ } else {
++ /* disable upscaling mode */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_SCALE_VC_UP,
++ MRV_MRSZ_SCALE_VC_UP_DOWNSCALE);
++ }
++ }
++
++ /* apply upscaling lookup table */
++ if (rsz_lut) {
++ for (i = 0; i <= MRV_MRSZ_SCALE_LUT_ADDR_MASK; i++) {
++ REG_SET_SLICE(mrv_reg->mrsz_scale_lut_addr,
++ MRV_MRSZ_SCALE_LUT_ADDR, i);
++ REG_SET_SLICE(mrv_reg->mrsz_scale_lut,
++ MRV_MRSZ_SCALE_LUT,
++ rsz_lut->rsz_lut[i]);
++ }
++ } else if (upscaling) {
++ eprintk("Upscaling requires lookup table!");
++ WARN_ON(1);
++ }
++
++ /* handle immediate update flag and write mrsz_ctrl */
++ switch (update_time) {
++ case CI_ISP_CFG_UPDATE_FRAME_SYNC:
++ /* frame synchronous update of shadow registers */
++ REG_WRITE(mrv_reg->mrsz_ctrl, mrsz_ctrl);
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_GEN_CFG_UPD, ON);
++ break;
++ case CI_ISP_CFG_UPDATE_IMMEDIATE:
++ /* immediate update of shadow registers */
++ REG_SET_SLICE(mrsz_ctrl, MRV_MRSZ_CFG_UPD, ON);
++ REG_WRITE(mrv_reg->mrsz_ctrl, mrsz_ctrl);
++ break;
++ case CI_ISP_CFG_UPDATE_LATER:
++ default:
++ /* no update from within this function */
++ REG_WRITE(mrv_reg->mrsz_ctrl, mrsz_ctrl);
++ break;
++ }
++}
++
++/*
++ * writes the scaler values to the appropriate Marvin registers.
++ */
++void ci_isp_res_set_self_resize(const struct ci_isp_scale *scale,
++ enum ci_isp_conf_update_time update_time,
++ const struct ci_isp_rsz_lut *rsz_lut)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 srsz_ctrl = REG_READ(mrv_reg->srsz_ctrl);
++ u32 i;
++ int upscaling = false;
++
++ /* flags must be "outside" scaler value */
++ WARN_ON(!((RSZ_FLAGS_MASK & MRV_RSZ_SCALE_MASK) == 0));
++ WARN_ON(!((scale->scale_hy & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX));
++ WARN_ON(!((scale->scale_hcb & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX));
++ WARN_ON(!((scale->scale_hcr & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX));
++ WARN_ON(!((scale->scale_vy & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX));
++ WARN_ON(!((scale->scale_vc & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX));
++
++ /* horizontal luminance scale factor */
++ dprintk(1, "scale_hy = %d,%x", scale->scale_hy, scale->scale_hy);
++
++ if (scale->scale_hy & RSZ_SCALER_BYPASS) {
++ /* disable (bypass) scaler */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HY_ENABLE,
++ DISABLE);
++ } else {
++ /* enable scaler */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HY_ENABLE, ENABLE);
++ /* program scale factor and phase */
++ REG_SET_SLICE(mrv_reg->srsz_scale_hy, MRV_SRSZ_SCALE_HY,
++ (u32) scale->scale_hy);
++ REG_SET_SLICE(mrv_reg->srsz_phase_hy, MRV_SRSZ_PHASE_HY,
++ (u32) scale->phase_hy);
++
++ if (scale->scale_hy & RSZ_UPSCALE_ENABLE) {
++ /* enable upscaling mode */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HY_UP,
++ MRV_SRSZ_SCALE_HY_UP_UPSCALE);
++ /* scaler and upscaling enabled */
++ upscaling = true;
++ } else {
++ /* disable upscaling mode */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HY_UP,
++ MRV_SRSZ_SCALE_HY_UP_DOWNSCALE);
++ }
++ }
++
++ /* horizontal chrominance scale factors */
++ WARN_ON(!((scale->scale_hcb & RSZ_FLAGS_MASK) == (scale->scale_hcr &
++ RSZ_FLAGS_MASK)));
++
++ dprintk(1, "scale_hcb = %d,%x", scale->scale_hcb, scale->scale_hcb);
++
++ if (scale->scale_hcb & RSZ_SCALER_BYPASS) {
++ /* disable (bypass) scaler */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HC_ENABLE,
++ DISABLE);
++ } else {
++ /* enable scaler */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HC_ENABLE, ENABLE);
++ /* program scale factor and phase */
++ REG_SET_SLICE(mrv_reg->srsz_scale_hcb, MRV_SRSZ_SCALE_HCB,
++ (u32) scale->scale_hcb);
++ REG_SET_SLICE(mrv_reg->srsz_scale_hcr, MRV_SRSZ_SCALE_HCB,
++ (u32) scale->scale_hcr);
++
++ REG_SET_SLICE(mrv_reg->srsz_phase_hc, MRV_SRSZ_PHASE_HC,
++ (u32) scale->phase_hc);
++
++ if (scale->scale_hcb & RSZ_UPSCALE_ENABLE) {
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HC_UP,
++ MRV_SRSZ_SCALE_HC_UP_UPSCALE);
++ /* scaler and upscaling enabled */
++ upscaling = true;
++ } else {
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HC_UP,
++ MRV_SRSZ_SCALE_HC_UP_DOWNSCALE);
++ }
++ }
++
++ /* vertical luminance scale factor */
++ dprintk(1, "scale_vy = %d,%x", scale->scale_vy, scale->scale_vy);
++
++ if (scale->scale_vy & RSZ_SCALER_BYPASS) {
++ /* disable (bypass) scaler */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VY_ENABLE,
++ DISABLE);
++ } else {
++ /* enable scaler */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VY_ENABLE, ENABLE);
++ /* program scale factor and phase */
++ REG_SET_SLICE(mrv_reg->srsz_scale_vy, MRV_SRSZ_SCALE_VY,
++ (u32) scale->scale_vy);
++ REG_SET_SLICE(mrv_reg->srsz_phase_vy, MRV_SRSZ_PHASE_VY,
++ (u32) scale->phase_vy);
++
++ if (scale->scale_vy & RSZ_UPSCALE_ENABLE) {
++ /* enable upscaling mode */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VY_UP,
++ MRV_SRSZ_SCALE_VY_UP_UPSCALE);
++ /* scaler and upscaling enabled */
++ upscaling = true;
++ } else {
++ /* disable upscaling mode */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VY_UP,
++ MRV_SRSZ_SCALE_VY_UP_DOWNSCALE);
++ }
++ }
++
++ /* vertical chrominance scale factor */
++ dprintk(1, "scale_vc = %d,%x", scale->scale_vc, scale->scale_vc);
++
++ if (scale->scale_vc & RSZ_SCALER_BYPASS) {
++ /* disable (bypass) scaler */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VC_ENABLE,
++ DISABLE);
++ } else {
++ /* enable scaler */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VC_ENABLE, ENABLE);
++ /* program scale factor and phase */
++ REG_SET_SLICE(mrv_reg->srsz_scale_vc, MRV_SRSZ_SCALE_VC,
++ (u32) scale->scale_vc);
++ REG_SET_SLICE(mrv_reg->srsz_phase_vc, MRV_SRSZ_PHASE_VC,
++ (u32) scale->phase_vc);
++
++ if (scale->scale_vc & RSZ_UPSCALE_ENABLE) {
++ /* enable upscaling mode */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VC_UP,
++ MRV_SRSZ_SCALE_VC_UP_UPSCALE);
++ /* scaler and upscaling enabled */
++ upscaling = true;
++ } else {
++ /* disable upscaling mode */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VC_UP,
++ MRV_SRSZ_SCALE_VC_UP_DOWNSCALE);
++ }
++ }
++
++ /* apply upscaling lookup table */
++ if (rsz_lut) {
++ for (i = 0; i <= MRV_SRSZ_SCALE_LUT_ADDR_MASK; i++) {
++ REG_SET_SLICE(mrv_reg->srsz_scale_lut_addr,
++ MRV_SRSZ_SCALE_LUT_ADDR, i);
++ REG_SET_SLICE(mrv_reg->srsz_scale_lut,
++ MRV_SRSZ_SCALE_LUT,
++ rsz_lut->rsz_lut[i]);
++ }
++ } else if (upscaling) {
++ eprintk("Upscaling requires lookup table!");
++ WARN_ON(1);
++ }
++
++ /* handle immediate update flag and write mrsz_ctrl */
++ switch (update_time) {
++ case CI_ISP_CFG_UPDATE_FRAME_SYNC:
++ /* frame synchronous update of shadow registers */
++ REG_WRITE(mrv_reg->srsz_ctrl, srsz_ctrl);
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_GEN_CFG_UPD,
++ ON);
++ break;
++ case CI_ISP_CFG_UPDATE_IMMEDIATE:
++ /* immediate update of shadow registers */
++ REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_CFG_UPD, ON);
++ REG_WRITE(mrv_reg->srsz_ctrl, srsz_ctrl);
++ break;
++ case CI_ISP_CFG_UPDATE_LATER:
++ default:
++ /* no update from within this function */
++ REG_WRITE(mrv_reg->srsz_ctrl, srsz_ctrl);
++ break;
++ }
++}
++
++#if MRV_SUPPORT_SL
++
++/* bad pixel table */
++static struct ci_sensor_bp_table bp_table = { 0 };
++
++/*
++ * Initialization of the Bad Pixel Detection and Correction.
++ */
++int ci_bp_init(const struct ci_isp_bp_corr_config *bp_corr_config,
++ const struct ci_isp_bp_det_config *bp_det_config)
++{
++ int error = CI_STATUS_SUCCESS;
++
++ /* number of table elements */
++ /* number of table elements */
++#define MRVSLS_BPINIT_MAX_TABLE 2048
++
++ /* check the parameters */
++ if (!bp_corr_config || !bp_det_config)
++ return CI_STATUS_NULL_POINTER;
++
++ if (bp_corr_config->bp_corr_type == CI_ISP_BP_CORR_TABLE) {
++ /* set badpixel correction */
++ error |= ci_isp_set_bp_correction(bp_corr_config);
++ /* set badpixel detection */
++ error |= ci_isp_set_bp_detection(bp_det_config);
++ /* zero element inside */
++ bp_table.bp_number = 0;
++ if (!bp_table.bp_table_elem) {
++ /* allocate mem space for the table */
++ bp_table.bp_table_elem =
++ (struct ci_sensor_bp_table_elem *)
++ kmalloc((sizeof(struct ci_sensor_bp_table_elem)*
++ MRVSLS_BPINIT_MAX_TABLE), GFP_KERNEL);
++ if (!bp_table.bp_table_elem)
++ error |= CI_STATUS_FAILURE;
++ }
++ /* max count of elements */
++ bp_table.bp_table_elem_num = MRVSLS_BPINIT_MAX_TABLE;
++ /* Clear Interrupt Status */
++ error |= ci_isp_clear_bp_int();
++ } else {
++ if (bp_corr_config->bp_corr_type == CI_ISP_BP_CORR_DIRECT) {
++ /* set badpixel correction */
++ error |= ci_isp_set_bp_correction(bp_corr_config);
++ /* set badpixel detection */
++ error |= ci_isp_set_bp_detection(NULL);
++ } else {
++ return CI_STATUS_NOTSUPP;
++ }
++ }
++ return error;
++}
++
++/*
++ * Disable the Bad Pixel Detection and Correction.
++ */
++int ci_bp_end(const struct ci_isp_bp_corr_config *bp_corr_config)
++{
++ int uiResult = CI_STATUS_SUCCESS;
++
++ /* check the parameter */
++ if (!bp_corr_config)
++ return CI_STATUS_NULL_POINTER;
++
++ /* disable badpixel correction */
++ uiResult |= ci_isp_set_bp_correction(NULL);
++
++ /* disable badpixel detection */
++ uiResult |= ci_isp_set_bp_detection(NULL);
++
++ if (bp_corr_config->bp_corr_type == CI_ISP_BP_CORR_TABLE) {
++ /* Clear Interrupt Status */
++ uiResult |= ci_isp_clear_bp_int();
++
++ /* deallocate BP Table */
++ kfree(bp_table.bp_table_elem);
++ bp_table.bp_table_elem = NULL;
++ }
++ return uiResult;
++}
++#endif
+diff --git a/drivers/media/video/mrstci/mrstisp/mrstisp_isp.c b/drivers/media/video/mrstci/mrstisp/mrstisp_isp.c
+new file mode 100644
+index 0000000..7c96bc4
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstisp/mrstisp_isp.c
+@@ -0,0 +1,1993 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include "mrstisp_stdinc.h"
++
++int mrst_isp_set_color_conversion_ex(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_0, MRV_ISP_CC_COEFF_0, 0x00001021);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_1, MRV_ISP_CC_COEFF_1, 0x00001040);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_2, MRV_ISP_CC_COEFF_2, 0x0000100D);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_3, MRV_ISP_CC_COEFF_3, 0x00000FED);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_4, MRV_ISP_CC_COEFF_4, 0x00000FDB);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_5, MRV_ISP_CC_COEFF_5, 0x00001038);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_6, MRV_ISP_CC_COEFF_6, 0x00001038);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_7, MRV_ISP_CC_COEFF_7, 0x00000FD1);
++ REG_SET_SLICE(mrv_reg->isp_cc_coeff_8, MRV_ISP_CC_COEFF_8, 0x00000FF7);
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Selects the ISP path that will become active while processing
++ * data coming from an image sensor configured by the given ISI
++ * configuration struct.
++ */
++enum ci_isp_path ci_isp_select_path(const struct ci_sensor_config *isi_cfg,
++ u8 *words_per_pixel)
++{
++ u8 words;
++ enum ci_isp_path ret_val;
++
++ switch (isi_cfg->mode) {
++ case SENSOR_MODE_DATA:
++ ret_val = CI_ISP_PATH_RAW;
++ words = 1;
++ break;
++ case SENSOR_MODE_PICT:
++ ret_val = CI_ISP_PATH_RAW;
++ words = 1;
++ break;
++ case SENSOR_MODE_RGB565:
++ ret_val = CI_ISP_PATH_RAW;
++ words = 2;
++ break;
++ case SENSOR_MODE_BT601:
++ ret_val = CI_ISP_PATH_YCBCR;
++ words = 2;
++ break;
++ case SENSOR_MODE_BT656:
++ ret_val = CI_ISP_PATH_YCBCR;
++ words = 2;
++ break;
++ case SENSOR_MODE_BAYER:
++ ret_val = CI_ISP_PATH_BAYER;
++ words = 1;
++ break;
++
++ case SENSOR_MODE_SMIA:
++ switch (isi_cfg->smia_mode) {
++ case SENSOR_SMIA_MODE_RAW_12:
++ case SENSOR_SMIA_MODE_RAW_10:
++ case SENSOR_SMIA_MODE_RAW_8:
++ case SENSOR_SMIA_MODE_RAW_8_TO_10_DECOMP:
++ ret_val = CI_ISP_PATH_BAYER;
++ words = 1;
++ break;
++ case SENSOR_SMIA_MODE_YUV_422:
++ ret_val = CI_ISP_PATH_YCBCR;
++ words = 2;
++ break;
++ case SENSOR_SMIA_MODE_YUV_420:
++ case SENSOR_SMIA_MODE_RGB_444:
++ case SENSOR_SMIA_MODE_RGB_565:
++ case SENSOR_SMIA_MODE_RGB_888:
++ case SENSOR_SMIA_MODE_COMPRESSED:
++ case SENSOR_SMIA_MODE_RAW_7:
++ case SENSOR_SMIA_MODE_RAW_6:
++ default:
++ ret_val = CI_ISP_PATH_RAW;
++ words = 1;
++ break;
++ }
++ break;
++
++ case SENSOR_MODE_MIPI:
++ switch (isi_cfg->mipi_mode) {
++ case SENSOR_MIPI_MODE_RAW_12:
++ case SENSOR_MIPI_MODE_RAW_10:
++ case SENSOR_MIPI_MODE_RAW_8:
++ ret_val = CI_ISP_PATH_BAYER;
++ words = 1;
++ break;
++ case SENSOR_MIPI_MODE_YUV422_8:
++ case SENSOR_MIPI_MODE_YUV422_10:
++ ret_val = CI_ISP_PATH_YCBCR;
++ words = 2;
++ break;
++ case SENSOR_MIPI_MODE_YUV420_8:
++ case SENSOR_MIPI_MODE_YUV420_10:
++ case SENSOR_MIPI_MODE_LEGACY_YUV420_8:
++ case SENSOR_MIPI_MODE_YUV420_CSPS_8:
++ case SENSOR_MIPI_MODE_YUV420_CSPS_10:
++ case SENSOR_MIPI_MODE_RGB444:
++ case SENSOR_MIPI_MODE_RGB555:
++ case SENSOR_MIPI_MODE_RGB565:
++ case SENSOR_MIPI_MODE_RGB666:
++ case SENSOR_MIPI_MODE_RGB888:
++ case SENSOR_MIPI_MODE_RAW_7:
++ case SENSOR_MIPI_MODE_RAW_6:
++ default:
++ ret_val = CI_ISP_PATH_RAW;
++ words = 1;
++ break;
++ }
++ break;
++ case SENSOR_MODE_BAY_BT656:
++ ret_val = CI_ISP_PATH_BAYER;
++ words = 1;
++ break;
++ case SENSOR_MODE_RAW_BT656:
++ ret_val = CI_ISP_PATH_RAW;
++ words = 1;
++ break;
++ default:
++ ret_val = CI_ISP_PATH_UNKNOWN;
++ words = 1;
++ }
++
++ if (words_per_pixel)
++ *words_per_pixel = words ;
++ return ret_val;
++}
++
++/*
++ * configures the input acquisition according to the
++ * given config structure
++ */
++int ci_isp_set_input_aquisition(const struct ci_sensor_config *isi_cfg)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 isp_ctrl = REG_READ(mrv_reg->isp_ctrl);
++ u32 isp_acq_prop = REG_READ(mrv_reg->isp_acq_prop);
++ /* factor between pixel count and amount of bytes to sample */
++ u8 sample_factor;
++ /* number of additional black lines at frame start */
++ u8 black_lines;
++
++ if (ci_isp_select_path(isi_cfg, &sample_factor)
++ == CI_ISP_PATH_UNKNOWN) {
++ eprintk("failed to select path");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ switch (isi_cfg->mode) {
++ case SENSOR_MODE_DATA:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_MODE,
++ MRV_ISP_ISP_MODE_DATA);
++ break;
++ case SENSOR_MODE_PICT:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_MODE,
++ MRV_ISP_ISP_MODE_RAW);
++ break;
++ case SENSOR_MODE_RGB565:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_MODE,
++ MRV_ISP_ISP_MODE_RAW);
++ break;
++ case SENSOR_MODE_BT601:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_MODE,
++ MRV_ISP_ISP_MODE_601);
++ break;
++ case SENSOR_MODE_BT656:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_MODE,
++ MRV_ISP_ISP_MODE_656);
++ break;
++ case SENSOR_MODE_BAYER:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_MODE,
++ MRV_ISP_ISP_MODE_RGB);
++ break;
++ case SENSOR_MODE_BAY_BT656:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_MODE,
++ MRV_ISP_ISP_MODE_RGB656);
++ break;
++ case SENSOR_MODE_RAW_BT656:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_MODE,
++ MRV_ISP_ISP_MODE_RAW656);
++ break;
++
++ case SENSOR_MODE_SMIA:
++ switch (isi_cfg->smia_mode) {
++ case SENSOR_SMIA_MODE_RAW_12:
++ case SENSOR_SMIA_MODE_RAW_10:
++ case SENSOR_SMIA_MODE_RAW_8:
++ case SENSOR_SMIA_MODE_RAW_8_TO_10_DECOMP:
++ case SENSOR_SMIA_MODE_RAW_7:
++ case SENSOR_SMIA_MODE_RAW_6:
++ case SENSOR_SMIA_MODE_YUV_422:
++ case SENSOR_SMIA_MODE_YUV_420:
++ case SENSOR_SMIA_MODE_RGB_888:
++ case SENSOR_SMIA_MODE_RGB_565:
++ case SENSOR_SMIA_MODE_RGB_444:
++ case SENSOR_SMIA_MODE_COMPRESSED:
++ return CI_STATUS_SUCCESS;
++ break;
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++ break;
++
++ case SENSOR_MODE_MIPI:
++ REG_SET_SLICE(isp_ctrl, MRV_ISP_ISP_MODE,
++ MRV_ISP_ISP_MODE_RGB);
++ REG_WRITE(mrv_reg->mipi_img_data_sel, 0x02b);
++ break;
++
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ switch (isi_cfg->bus_width) {
++ case SENSOR_BUSWIDTH_12BIT:
++ /* 000- 12Bit external Interface */
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_INPUT_SELECTION,
++ MRV_ISP_INPUT_SELECTION_12EXT);
++ break;
++ case SENSOR_BUSWIDTH_10BIT_ZZ:
++ /* 001- 10Bit Interface, append 2 zeroes as LSBs */
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_INPUT_SELECTION,
++ MRV_ISP_INPUT_SELECTION_10ZERO);
++ break;
++ case SENSOR_BUSWIDTH_10BIT_EX:
++ /* 010- 10Bit Interface, append 2 MSBs as LSBs */
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_INPUT_SELECTION,
++ MRV_ISP_INPUT_SELECTION_10MSB);
++ break;
++ case SENSOR_BUSWIDTH_8BIT_ZZ:
++ /* 011- 8Bit Interface, append 4 zeroes as LSBs */
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_INPUT_SELECTION,
++ MRV_ISP_INPUT_SELECTION_8ZERO);
++ break;
++ case SENSOR_BUSWIDTH_8BIT_EX:
++ /* 100- 8Bit Interface, append 4 MSBs as LSBs */
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_INPUT_SELECTION,
++ MRV_ISP_INPUT_SELECTION_8MSB);
++ break;
++ /* 101...111 reserved */
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ switch (isi_cfg->field_sel) {
++ case SENSOR_FIELDSEL_ODD:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_FIELD_SELECTION,
++ MRV_ISP_FIELD_SELECTION_ODD);
++ break;
++ case SENSOR_FIELDSEL_EVEN:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_FIELD_SELECTION,
++ MRV_ISP_FIELD_SELECTION_EVEN);
++ break;
++ case SENSOR_FIELDSEL_BOTH:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_FIELD_SELECTION,
++ MRV_ISP_FIELD_SELECTION_BOTH);
++ break;
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ switch (isi_cfg->ycseq) {
++ case SENSOR_YCSEQ_CRYCBY:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_CCIR_SEQ,
++ MRV_ISP_CCIR_SEQ_CRYCBY);
++ break;
++ case SENSOR_YCSEQ_CBYCRY:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_CCIR_SEQ,
++ MRV_ISP_CCIR_SEQ_CBYCRY);
++ break;
++ case SENSOR_YCSEQ_YCRYCB:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_CCIR_SEQ,
++ MRV_ISP_CCIR_SEQ_YCRYCB);
++ break;
++ case SENSOR_YCSEQ_YCBYCR:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_CCIR_SEQ,
++ MRV_ISP_CCIR_SEQ_YCBYCR);
++ break;
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ switch (isi_cfg->conv422) {
++ case SENSOR_CONV422_INTER:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_CONV_422,
++ MRV_ISP_CONV_422_INTER);
++ break;
++
++ case SENSOR_CONV422_NOCOSITED:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_CONV_422,
++ MRV_ISP_CONV_422_NONCO);
++ break;
++ case SENSOR_CONV422_COSITED:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_CONV_422,
++ MRV_ISP_CONV_422_CO);
++ break;
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ switch (isi_cfg->bpat) {
++ case SENSOR_BPAT_BGBGGRGR:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_BAYER_PAT,
++ MRV_ISP_BAYER_PAT_BG);
++ break;
++ case SENSOR_BPAT_GBGBRGRG:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_BAYER_PAT,
++ MRV_ISP_BAYER_PAT_GB);
++ break;
++ case SENSOR_BPAT_GRGRBGBG:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_BAYER_PAT,
++ MRV_ISP_BAYER_PAT_GR);
++ break;
++ case SENSOR_BPAT_RGRGGBGB:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_BAYER_PAT,
++ MRV_ISP_BAYER_PAT_RG);
++ break;
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ switch (isi_cfg->vpol) {
++ case SENSOR_VPOL_POS:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_VSYNC_POL, 1);
++ break;
++ case SENSOR_VPOL_NEG:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_VSYNC_POL, 0);
++ break;
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ switch (isi_cfg->hpol) {
++ /* The trigger edge differs for vsync_pol and hsync_pol. */
++ /* vsync_pol = 1 triggers on positive edge whereas */
++ /* hsync_pol = 1 triggers on negative edge and vice versa */
++ case SENSOR_HPOL_SYNCPOS:
++ /* trigger on negative edge */
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_HSYNC_POL, 1);
++ break;
++ case SENSOR_HPOL_SYNCNEG:
++ /* trigger on positive edge */
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_HSYNC_POL, 0);
++ break;
++ case SENSOR_HPOL_REFPOS:
++ /* trigger on positive edge */
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_HSYNC_POL, 0);
++ break;
++ case SENSOR_HPOL_REFNEG:
++ /* trigger on negative edge */
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_HSYNC_POL, 1);
++ break;
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ switch (isi_cfg->edge) {
++ case SENSOR_EDGE_RISING:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_SAMPLE_EDGE, 1);
++ break;
++ case SENSOR_EDGE_FALLING:
++ REG_SET_SLICE(isp_acq_prop, MRV_ISP_SAMPLE_EDGE, 0);
++ break;
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++ dprintk(2, "isp_acq_prop = 0x%x", isp_acq_prop);
++
++ /* now write values to registers */
++ REG_WRITE(mrv_reg->isp_ctrl, isp_ctrl);
++ REG_WRITE(mrv_reg->isp_acq_prop, isp_acq_prop);
++
++ /* number of additional black lines at frame start */
++ switch (isi_cfg->bls) {
++ case SENSOR_BLS_OFF:
++ black_lines = 0;
++ break;
++ case SENSOR_BLS_TWO_LINES:
++ black_lines = 2;
++ break;
++ case SENSOR_BLS_FOUR_LINES:
++ black_lines = 4;
++ break;
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ REG_SET_SLICE(mrv_reg->isp_acq_h_offs, MRV_ISP_ACQ_H_OFFS,
++ 0 * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_offs, MRV_ISP_ACQ_V_OFFS, 0);
++
++ dprintk(2, "res = %x", isi_cfg->res);
++ switch (isi_cfg->res) {
++ /* 88x72 */
++ case SENSOR_RES_QQCIF:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QQCIF_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QQCIF_SIZE_V + black_lines);
++ break;
++ /* 160x120 */
++ case SENSOR_RES_QQVGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QQVGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QQVGA_SIZE_V + black_lines);
++ break;
++ /* 176x144 */
++ case SENSOR_RES_QCIF:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QCIF_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QCIF_SIZE_V + black_lines);
++ break;
++ /* 320x240 */
++ case SENSOR_RES_QVGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QVGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QVGA_SIZE_V + black_lines);
++ break;
++ /* 352x288 */
++ case SENSOR_RES_CIF:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ CIF_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ CIF_SIZE_V + black_lines);
++ break;
++ /* 640x480 */
++ case SENSOR_RES_VGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ VGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ VGA_SIZE_V + black_lines);
++ break;
++ /* 800x600 */
++ case SENSOR_RES_SVGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ SVGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ SVGA_SIZE_V + black_lines);
++ break;
++ /* 1024x768 */
++ case SENSOR_RES_XGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ XGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ XGA_SIZE_V + black_lines);
++ break;
++ case SENSOR_RES_720P:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ RES_720P_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ RES_720P_SIZE_V + black_lines);
++ break;
++ /* 1280x960 */
++ case SENSOR_RES_XGA_PLUS:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ XGA_PLUS_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ XGA_PLUS_SIZE_V + black_lines);
++ break;
++ /* 1280x1024 */
++ case SENSOR_RES_SXGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ SXGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ SXGA_SIZE_V + black_lines);
++ break;
++ /* 1600x1200 */
++ case SENSOR_RES_UXGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QSVGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QSVGA_SIZE_V + black_lines);
++ break;
++ /* 1920x1280 */
++ case SENSOR_RES_1080P:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ 1920 * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ 1080 + black_lines);
++ break;
++ /* 2048x1536 */
++ case SENSOR_RES_QXGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QXGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QXGA_SIZE_V + black_lines);
++ break;
++ /* 2586x2048 */
++ case SENSOR_RES_QSXGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QSXGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QSXGA_SIZE_V + black_lines);
++ break;
++ /* 2600x2048 */
++ case SENSOR_RES_QSXGA_PLUS:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QSXGA_PLUS_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QSXGA_PLUS_SIZE_V + black_lines);
++ break;
++ /* 2600x1950 */
++ case SENSOR_RES_QSXGA_PLUS2:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QSXGA_PLUS2_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QSXGA_PLUS2_SIZE_V + black_lines);
++ break;
++ /* 2686x2048, 5.30M */
++ case SENSOR_RES_QSXGA_PLUS3:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QSXGA_PLUS3_SIZE_V * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QSXGA_PLUS3_SIZE_V + black_lines);
++ break;
++ /* 2592*1944 5M */
++ case SENSOR_RES_QXGA_PLUS:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QXGA_PLUS_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QXGA_PLUS_SIZE_V + black_lines);
++ break;
++ /* 3200x2048, 6.56M */
++ case SENSOR_RES_WQSXGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ WQSXGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ WQSXGA_SIZE_V + black_lines);
++ break;
++ /* 3200x2400, 7.68M */
++ case SENSOR_RES_QUXGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ QUXGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ QUXGA_SIZE_V + black_lines);
++ break;
++ /* 3840x2400, 9.22M */
++ case SENSOR_RES_WQUXGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ WQUXGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ WQUXGA_SIZE_V + black_lines);
++ break;
++ /* 4096x3072, 12.59M */
++ case SENSOR_RES_HXGA:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ HXGA_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ HXGA_SIZE_V + black_lines);
++ break;
++ /* 4080x1024 */
++ case SENSOR_RES_YUV_HMAX:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ YUV_HMAX_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ YUV_HMAX_SIZE_V);
++ break;
++ /* 1024x4080 */
++ case SENSOR_RES_YUV_VMAX:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ YUV_VMAX_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ YUV_VMAX_SIZE_V);
++ break;
++ /* 4096x2048 */
++ case SENSOR_RES_RAWMAX:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ RAWMAX_SIZE_H);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ RAWMAX_SIZE_V);
++ break;
++ /* 352x240 */
++ case SENSOR_RES_BP1:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ BP1_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ BP1_SIZE_V);
++ break;
++ /* 720x480 */
++ case SENSOR_RES_L_AFM:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ L_AFM_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ L_AFM_SIZE_V);
++ break;
++ /* 128x96 */
++ case SENSOR_RES_M_AFM:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ M_AFM_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ M_AFM_SIZE_V);
++ break;
++ /* 64x32 */
++ case SENSOR_RES_S_AFM:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ S_AFM_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ S_AFM_SIZE_V);
++ break;
++ /* 1304x980 */
++ case SENSOR_RES_VGA_PLUS:
++ REG_SET_SLICE(mrv_reg->isp_acq_h_size, MRV_ISP_ACQ_H_SIZE,
++ VGA_PLUS_SIZE_H * sample_factor);
++ REG_SET_SLICE(mrv_reg->isp_acq_v_size, MRV_ISP_ACQ_V_SIZE,
++ VGA_PLUS_SIZE_V);
++ break;
++
++ default:
++ return CI_STATUS_NOTSUPP;
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * sets output window
++ */
++void ci_isp_set_output_formatter(const struct ci_isp_window *window,
++ enum ci_isp_conf_update_time update_time)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (window) {
++ /* set output window */
++ REG_SET_SLICE(mrv_reg->isp_out_h_offs, MRV_IS_IS_H_OFFS,
++ window->hoffs);
++ REG_SET_SLICE(mrv_reg->isp_out_v_offs, MRV_IS_IS_V_OFFS,
++ window->voffs);
++ REG_SET_SLICE(mrv_reg->isp_out_h_size, MRV_IS_IS_H_SIZE,
++ window->hsize);
++ REG_SET_SLICE(mrv_reg->isp_out_v_size, MRV_IS_IS_V_SIZE,
++ window->vsize);
++
++ REG_SET_SLICE(mrv_reg->isp_is_h_offs, MRV_IS_IS_H_OFFS, 0);
++ REG_SET_SLICE(mrv_reg->isp_is_v_offs, MRV_IS_IS_V_OFFS, 0);
++ REG_SET_SLICE(mrv_reg->isp_is_h_size, MRV_IS_IS_H_SIZE,
++ window->hsize);
++ REG_SET_SLICE(mrv_reg->isp_is_v_size, MRV_IS_IS_V_SIZE,
++ window->vsize);
++
++ switch (update_time) {
++ case CI_ISP_CFG_UPDATE_FRAME_SYNC:
++ /* frame synchronous update of shadow registers */
++ REG_SET_SLICE(mrv_reg->isp_ctrl,
++ MRV_ISP_ISP_GEN_CFG_UPD, ON);
++ break;
++ case CI_ISP_CFG_UPDATE_IMMEDIATE:
++ /* immediate update of shadow registers */
++ REG_SET_SLICE(mrv_reg->isp_ctrl,
++ MRV_ISP_ISP_CFG_UPD, ON);
++ break;
++ case CI_ISP_CFG_UPDATE_LATER:
++ /* no update from within this function */
++ break;
++ default:
++ break;
++ }
++ }
++}
++
++/*
++ * programs the given Bayer pattern demosaic parameters
++ */
++void ci_isp_set_demosaic(enum ci_isp_demosaic_mode demosaic_mode,
++ u8 demosaic_th)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 isp_demosaic = REG_READ(mrv_reg->isp_demosaic);
++
++ /* set demosaic mode */
++ switch (demosaic_mode) {
++ case CI_ISP_DEMOSAIC_STANDARD:
++ REG_SET_SLICE(isp_demosaic, MRV_ISP_DEMOSAIC_MODE,
++ MRV_ISP_DEMOSAIC_MODE_STD);
++ break;
++ case CI_ISP_DEMOSAIC_ENHANCED:
++ REG_SET_SLICE(isp_demosaic, MRV_ISP_DEMOSAIC_MODE,
++ MRV_ISP_DEMOSAIC_MODE_ENH);
++ break;
++ default:
++ WARN_ON(!(false));
++ }
++
++ /* set demosaic threshold */
++ REG_SET_SLICE(isp_demosaic, MRV_ISP_DEMOSAIC_TH, demosaic_th);
++ REG_WRITE(mrv_reg->isp_demosaic, isp_demosaic);
++}
++
++/*
++ * Sets the dedicated AWB block mode.
++ */
++int ci_isp_set_wb_mode(enum ci_isp_awb_mode wb_mode)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ switch (wb_mode) {
++ case CI_ISP_AWB_COMPLETELY_OFF:
++ /* manual WB, no measurements*/
++ REG_SET_SLICE(mrv_reg->isp_awb_prop, MRV_ISP_AWB_MODE,
++ MRV_ISP_AWB_MODE_NOMEAS);
++ /* switch ABW block off */
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_AWB_ENABLE,
++ DISABLE);
++ break;
++ case CI_ISP_AWB_MAN_MEAS:
++ case CI_ISP_AWB_AUTO:
++ case CI_ISP_AWB_MAN_PUSH_AUTO:
++ case CI_ISP_AWB_ONLY_MEAS:
++ /* manual white balance, measure YCbCr means */
++ REG_SET_SLICE(mrv_reg->isp_awb_prop, MRV_ISP_AWB_MODE,
++ MRV_ISP_AWB_MODE_MEAS);
++ /* switch ABW block on */
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_AWB_ENABLE,
++ ENABLE);
++ break;
++ case CI_ISP_AWB_MAN_NOMEAS:
++ /* manual white balance, no measurements */
++ REG_SET_SLICE(mrv_reg->isp_awb_prop, MRV_ISP_AWB_MODE,
++ MRV_ISP_AWB_MODE_NOMEAS);
++ /* switch ABW block on */
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_AWB_ENABLE,
++ ENABLE);
++ break;
++ default:
++ /* to be sure that a regular value is set: */
++ /* manual white balance, no measurements */
++ REG_SET_SLICE(mrv_reg->isp_awb_prop, MRV_ISP_AWB_MODE,
++ MRV_ISP_AWB_MODE_NOMEAS);
++ /* switch ABW block off */
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_AWB_ENABLE,
++ DISABLE);
++ return CI_STATUS_FAILURE;
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++int ci_isp_get_wb_mode(enum ci_isp_awb_mode *wb_mode)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (!wb_mode)
++ return CI_STATUS_NULL_POINTER;
++
++ if (REG_GET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_AWB_ENABLE) ==
++ DISABLE) {
++ *wb_mode = CI_ISP_AWB_COMPLETELY_OFF;
++ } else {
++
++ switch (REG_GET_SLICE(mrv_reg->isp_awb_prop,
++ MRV_ISP_AWB_MODE)) {
++ case MRV_ISP_AWB_MODE_MEAS:
++ *wb_mode = CI_ISP_AWB_MAN_MEAS;
++ break;
++ case MRV_ISP_AWB_MODE_NOMEAS:
++ *wb_mode = CI_ISP_AWB_MAN_NOMEAS;
++ break;
++ default:
++ *wb_mode = CI_ISP_AWB_COMPLETELY_OFF;
++ return CI_STATUS_FAILURE;
++ }
++ }
++ return CI_STATUS_SUCCESS;
++}
++int ci_isp_set_wb_meas_config(const struct ci_isp_wb_meas_config
++ *wb_meas_config)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 isp_awb_thresh = REG_READ(mrv_reg->isp_awb_thresh);
++
++ if (!wb_meas_config)
++ return CI_STATUS_NULL_POINTER;
++
++ /* measurement window */
++ REG_SET_SLICE(mrv_reg->isp_awb_h_size, MRV_ISP_AWB_H_SIZE,
++ (u32) wb_meas_config->awb_window.hsize);
++ REG_SET_SLICE(mrv_reg->isp_awb_v_size, MRV_ISP_AWB_V_SIZE,
++ (u32) wb_meas_config->awb_window.vsize);
++ REG_SET_SLICE(mrv_reg->isp_awb_h_offs, MRV_ISP_AWB_H_OFFS,
++ (u32) wb_meas_config->awb_window.hoffs);
++ REG_SET_SLICE(mrv_reg->isp_awb_v_offs, MRV_ISP_AWB_V_OFFS,
++ (u32) wb_meas_config->awb_window.voffs);
++
++ /* adjust awb properties (Y_MAX compare) */
++ if (wb_meas_config->max_y == 0) {
++ REG_SET_SLICE(mrv_reg->isp_awb_prop, MRV_ISP_AWB_MAX_EN,
++ DISABLE);
++ } else {
++ REG_SET_SLICE(mrv_reg->isp_awb_prop, MRV_ISP_AWB_MAX_EN,
++ ENABLE);
++ }
++
++ /* measurement thresholds */
++ REG_SET_SLICE(isp_awb_thresh, MRV_ISP_AWB_MAX_Y,
++ (u32) wb_meas_config->max_y);
++ REG_SET_SLICE(isp_awb_thresh, MRV_ISP_AWB_MIN_Y__MAX_G,
++ (u32) wb_meas_config->minY_MaxG);
++ REG_SET_SLICE(isp_awb_thresh, MRV_ISP_AWB_MAX_CSUM,
++ (u32) wb_meas_config->max_csum);
++ REG_SET_SLICE(isp_awb_thresh, MRV_ISP_AWB_MIN_C,
++ (u32) wb_meas_config->min_c);
++ REG_WRITE(mrv_reg->isp_awb_thresh, isp_awb_thresh);
++ REG_SET_SLICE(mrv_reg->isp_awb_ref, MRV_ISP_AWB_REF_CR__MAX_R,
++ (u32)(wb_meas_config->ref_cr_MaxR));
++ REG_SET_SLICE(mrv_reg->isp_awb_ref, MRV_ISP_AWB_REF_CB__MAX_B,
++ (u32)(wb_meas_config->ref_cb_MaxB));
++
++ /* amount of measurement frames */
++ REG_SET_SLICE(mrv_reg->isp_awb_frames, MRV_ISP_AWB_FRAMES,
++ (u32) wb_meas_config->frames);
++
++ /* set measurement mode */
++ REG_SET_SLICE(mrv_reg->isp_awb_prop, MRV_ISP_AWB_MEAS_MODE,
++ (u32)(wb_meas_config->meas_mode));
++
++ return CI_STATUS_SUCCESS;
++}
++
++int ci_isp_get_wb_meas_config(struct ci_isp_wb_meas_config *wb_meas_config)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (!wb_meas_config)
++ return CI_STATUS_NULL_POINTER;
++
++ /* measurement window */
++ wb_meas_config->awb_window.hsize =
++ (u16) REG_GET_SLICE(mrv_reg->isp_awb_h_size, MRV_ISP_AWB_H_SIZE);
++ wb_meas_config->awb_window.vsize =
++ (u16) REG_GET_SLICE(mrv_reg->isp_awb_v_size, MRV_ISP_AWB_V_SIZE);
++ wb_meas_config->awb_window.hoffs =
++ (u16) REG_GET_SLICE(mrv_reg->isp_awb_h_offs, MRV_ISP_AWB_H_OFFS);
++ wb_meas_config->awb_window.voffs =
++ (u16) REG_GET_SLICE(mrv_reg->isp_awb_v_offs, MRV_ISP_AWB_V_OFFS);
++
++ /* measurement thresholds */
++ wb_meas_config->min_c =
++ (u8) REG_GET_SLICE(mrv_reg->isp_awb_thresh, MRV_ISP_AWB_MIN_C);
++ wb_meas_config->max_csum =
++ (u8) REG_GET_SLICE(mrv_reg->isp_awb_thresh, MRV_ISP_AWB_MAX_CSUM);
++ wb_meas_config->minY_MaxG =
++ (u8) REG_GET_SLICE(mrv_reg->isp_awb_thresh,
++ MRV_ISP_AWB_MIN_Y__MAX_G);
++ wb_meas_config->max_y =
++ (u8) REG_GET_SLICE(mrv_reg->isp_awb_thresh, MRV_ISP_AWB_MAX_Y);
++ wb_meas_config->ref_cb_MaxB =
++ (u8)REG_GET_SLICE(mrv_reg->isp_awb_ref, MRV_ISP_AWB_REF_CB__MAX_B);
++ wb_meas_config->ref_cr_MaxR =
++ (u8)REG_GET_SLICE(mrv_reg->isp_awb_ref, MRV_ISP_AWB_REF_CR__MAX_R);
++
++ /* amount of measurement frames */
++ wb_meas_config->frames =
++ (u8) REG_GET_SLICE(mrv_reg->isp_awb_frames, MRV_ISP_AWB_FRAMES);
++
++ /* overwrite max_y if the feature is disabled */
++ if (REG_GET_SLICE(mrv_reg->isp_awb_prop, MRV_ISP_AWB_MAX_EN) ==
++ DISABLE) {
++ wb_meas_config->max_y = 0;
++ }
++
++ /* get measurement mode */
++ wb_meas_config->meas_mode = REG_GET_SLICE(mrv_reg->isp_awb_prop,
++ MRV_ISP_AWB_MEAS_MODE);
++ return CI_STATUS_SUCCESS;
++}
++
++int ci_isp_get_wb_meas(struct ci_sensor_awb_mean *awb_mean)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (awb_mean == NULL)
++ return CI_STATUS_NULL_POINTER;
++
++ awb_mean->white = REG_GET_SLICE(mrv_reg->isp_awb_white_cnt,
++ MRV_ISP_AWB_WHITE_CNT);
++ awb_mean->mean_Y__G = (u8) REG_GET_SLICE(mrv_reg->isp_awb_mean,
++ MRV_ISP_AWB_MEAN_Y__G);
++ awb_mean->mean_cb__B = (u8) REG_GET_SLICE(mrv_reg->isp_awb_mean,
++ MRV_ISP_AWB_MEAN_CB__B);
++ awb_mean->mean_cr__R = (u8) REG_GET_SLICE(mrv_reg->isp_awb_mean,
++ MRV_ISP_AWB_MEAN_CR__R);
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * calculates left-top and right-bottom register values
++ * for a given AF measurement window
++ */
++static int ci_isp_afm_wnd2_regs(const struct ci_isp_window *wnd, u32 *lt,
++ u32 *rb)
++{
++ WARN_ON(!((wnd != NULL) && (lt != NULL) && (rb != NULL)));
++
++ if (wnd->hsize && wnd->vsize) {
++ u32 left = wnd->hoffs;
++ u32 top = wnd->voffs;
++ u32 right = left + wnd->hsize - 1;
++ u32 bottom = top + wnd->vsize - 1;
++
++ if ((left < MRV_AFM_A_H_L_MIN)
++ || (left > MRV_AFM_A_H_L_MAX)
++ || (top < MRV_AFM_A_V_T_MIN)
++ || (top > MRV_AFM_A_V_T_MAX)
++ || (right < MRV_AFM_A_H_R_MIN)
++ || (right > MRV_AFM_A_H_R_MAX)
++ || (bottom < MRV_AFM_A_V_B_MIN)
++ || (bottom > MRV_AFM_A_V_B_MAX)) {
++ return CI_STATUS_OUTOFRANGE;
++ }
++
++ /* combine the values and return */
++ REG_SET_SLICE(*lt, MRV_AFM_A_H_L, left);
++ REG_SET_SLICE(*lt, MRV_AFM_A_V_T, top);
++ REG_SET_SLICE(*rb, MRV_AFM_A_H_R, right);
++ REG_SET_SLICE(*rb, MRV_AFM_A_V_B, bottom);
++ } else {
++ *lt = 0;
++ *rb = 0;
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++int ci_isp_set_auto_focus(const struct ci_isp_af_config *af_config)
++{
++
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 result = CI_STATUS_SUCCESS;
++
++ /* disable measurement module */
++ REG_SET_SLICE(mrv_reg->isp_afm_ctrl, MRV_AFM_AFM_EN, DISABLE);
++
++ if (af_config) {
++ u32 lt;
++ u32 rb;
++ result = ci_isp_afm_wnd2_regs(&(af_config->wnd_pos_a),
++ &lt, &rb);
++ /* set measurement window boundaries */
++ if (result != CI_STATUS_SUCCESS)
++ return result;
++
++ REG_WRITE(mrv_reg->isp_afm_lt_a, lt);
++ REG_WRITE(mrv_reg->isp_afm_rb_a, rb);
++
++ result = ci_isp_afm_wnd2_regs(&(af_config->wnd_pos_b),
++ &lt, &rb);
++
++ if (result != CI_STATUS_SUCCESS)
++ return result;
++
++ REG_WRITE(mrv_reg->isp_afm_lt_b, lt);
++ REG_WRITE(mrv_reg->isp_afm_rb_b, rb);
++
++ result = ci_isp_afm_wnd2_regs(&(af_config->wnd_pos_c),
++ &lt, &rb);
++
++ if (result != CI_STATUS_SUCCESS)
++ return result;
++
++ REG_WRITE(mrv_reg->isp_afm_lt_c, lt);
++ REG_WRITE(mrv_reg->isp_afm_rb_c, rb);
++
++ /* set other af measurement paraneters */
++ REG_SET_SLICE(mrv_reg->isp_afm_thres, MRV_AFM_AFM_THRES,
++ af_config->threshold);
++ REG_SET_SLICE(mrv_reg->isp_afm_var_shift, MRV_AFM_LUM_VAR_SHIFT,
++ (af_config->var_shift >> 16));
++ REG_SET_SLICE(mrv_reg->isp_afm_var_shift, MRV_AFM_AFM_VAR_SHIFT,
++ (af_config->var_shift >> 0));
++
++ /* enable measurement module */
++ REG_SET_SLICE(mrv_reg->isp_afm_ctrl, MRV_AFM_AFM_EN, ENABLE);
++ }
++
++ return result;
++}
++
++
++void ci_isp_get_auto_focus_meas(struct ci_isp_af_meas *af_meas)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ WARN_ON(!(af_meas != NULL));
++
++ af_meas->afm_sum_a =
++ REG_GET_SLICE(mrv_reg->isp_afm_sum_a, MRV_AFM_AFM_SUM_A);
++ af_meas->afm_sum_b =
++ REG_GET_SLICE(mrv_reg->isp_afm_sum_b, MRV_AFM_AFM_SUM_B);
++ af_meas->afm_sum_c =
++ REG_GET_SLICE(mrv_reg->isp_afm_sum_c, MRV_AFM_AFM_SUM_C);
++ af_meas->afm_lum_a =
++ REG_GET_SLICE(mrv_reg->isp_afm_lum_a, MRV_AFM_AFM_LUM_A);
++ af_meas->afm_lum_b =
++ REG_GET_SLICE(mrv_reg->isp_afm_lum_b, MRV_AFM_AFM_LUM_B);
++ af_meas->afm_lum_c =
++ REG_GET_SLICE(mrv_reg->isp_afm_lum_c, MRV_AFM_AFM_LUM_C);
++}
++
++int ci_isp_set_ls_correction(struct ci_sensor_ls_corr_config *ls_corr_config)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 i, n;
++ u32 data = 0;
++ int enabled = false;
++
++ if (!ls_corr_config) {
++ /* disable lens shading module */
++ REG_SET_SLICE(mrv_reg->isp_lsc_ctrl, MRV_LSC_LSC_EN, DISABLE);
++ } else {
++ /* test if lens shading correction is enabled */
++ if (REG_GET_SLICE(mrv_reg->isp_lsc_ctrl, MRV_LSC_LSC_EN)) {
++ /* switch off lens shading correction */
++ REG_SET_SLICE(mrv_reg->isp_lsc_ctrl,
++ MRV_LSC_LSC_EN, DISABLE);
++ /* wait 1ms to make sure that
++ * the LSC have time enough to switch off */
++ /* wait over 1 ms */
++ /*mdelay(1000);*/
++ msleep(1000);
++ enabled = true;
++ }
++
++ /* clear address counters */
++ REG_WRITE(mrv_reg->isp_lsc_r_table_addr, 0);
++ REG_WRITE(mrv_reg->isp_lsc_g_table_addr, 0);
++ REG_WRITE(mrv_reg->isp_lsc_b_table_addr, 0);
++
++ /* program data tables (table size is 9 * 17 = 153;
++ * see also MRV_LSC_?_RAM_ADDR_MAX) */
++ WARN_ON(!(((CI_ISP_MAX_LSC_SECTORS + 1) *
++ ((CI_ISP_MAX_LSC_SECTORS + 2) / 2)) ==
++ (MRV_LSC_R_RAM_ADDR_MAX + 1)));
++
++ /* 17 steps */
++ for (n = 0;
++ n < ((CI_ISP_MAX_LSC_SECTORS + 1) *
++ (CI_ISP_MAX_LSC_SECTORS + 1));
++ n += CI_ISP_MAX_LSC_SECTORS + 1) {
++ dprintk(2, "set ls correct step n = %d", n);
++ /* 17 sectors with 2 values in one DWORD = 9
++ * DWORDs (8 steps + 1 outside loop) */
++ for (i = 0; i < (CI_ISP_MAX_LSC_SECTORS); i += 2) {
++ REG_SET_SLICE(data, MRV_LSC_R_SAMPLE_0,
++ ls_corr_config->ls_rdata_tbl[n + i]);
++ REG_SET_SLICE(data, MRV_LSC_R_SAMPLE_1,
++ ls_corr_config->ls_rdata_tbl
++ [n + i + 1]);
++ REG_WRITE(mrv_reg->isp_lsc_r_table_data, data);
++ REG_SET_SLICE(data, MRV_LSC_G_SAMPLE_0,
++ ls_corr_config->ls_gdata_tbl
++ [n + i]);
++ REG_SET_SLICE(data, MRV_LSC_G_SAMPLE_1,
++ ls_corr_config->ls_gdata_tbl
++ [n + i + 1]);
++ REG_WRITE(mrv_reg->isp_lsc_g_table_data, data);
++ REG_SET_SLICE(data, MRV_LSC_B_SAMPLE_0,
++ ls_corr_config->ls_bdata_tbl[n + i]);
++ REG_SET_SLICE(data, MRV_LSC_B_SAMPLE_1,
++ ls_corr_config->ls_bdata_tbl
++ [n + i + 1]);
++ REG_WRITE(mrv_reg->isp_lsc_b_table_data, data);
++ }
++ REG_SET_SLICE(data, MRV_LSC_R_SAMPLE_0,
++ ls_corr_config->ls_rdata_tbl
++ [n + CI_ISP_MAX_LSC_SECTORS]);
++ REG_SET_SLICE(data, MRV_LSC_R_SAMPLE_1, 0);
++ REG_WRITE(mrv_reg->isp_lsc_r_table_data, data);
++ REG_SET_SLICE(data, MRV_LSC_G_SAMPLE_0,
++ ls_corr_config->ls_gdata_tbl
++ [n + CI_ISP_MAX_LSC_SECTORS]);
++ REG_SET_SLICE(data, MRV_LSC_G_SAMPLE_1, 0);
++ REG_WRITE(mrv_reg->isp_lsc_g_table_data, data);
++ REG_SET_SLICE(data, MRV_LSC_B_SAMPLE_0,
++ ls_corr_config->ls_bdata_tbl
++ [n + CI_ISP_MAX_LSC_SECTORS]);
++ REG_SET_SLICE(data, MRV_LSC_B_SAMPLE_1, 0);
++ REG_WRITE(mrv_reg->isp_lsc_b_table_data, data);
++ }
++
++ /* program x size tables */
++ REG_SET_SLICE(mrv_reg->isp_lsc_xsize_01, MRV_LSC_X_SECT_SIZE_0,
++ ls_corr_config->ls_xsize_tbl[0]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xsize_01, MRV_LSC_X_SECT_SIZE_1,
++ ls_corr_config->ls_xsize_tbl[1]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xsize_23, MRV_LSC_X_SECT_SIZE_2,
++ ls_corr_config->ls_xsize_tbl[2]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xsize_23, MRV_LSC_X_SECT_SIZE_3,
++ ls_corr_config->ls_xsize_tbl[3]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xsize_45, MRV_LSC_X_SECT_SIZE_4,
++ ls_corr_config->ls_xsize_tbl[4]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xsize_45, MRV_LSC_X_SECT_SIZE_5,
++ ls_corr_config->ls_xsize_tbl[5]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xsize_67, MRV_LSC_X_SECT_SIZE_6,
++ ls_corr_config->ls_xsize_tbl[6]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xsize_67, MRV_LSC_X_SECT_SIZE_7,
++ ls_corr_config->ls_xsize_tbl[7]);
++
++ /* program y size tables */
++ REG_SET_SLICE(mrv_reg->isp_lsc_ysize_01, MRV_LSC_Y_SECT_SIZE_0,
++ ls_corr_config->ls_ysize_tbl[0]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ysize_01, MRV_LSC_Y_SECT_SIZE_1,
++ ls_corr_config->ls_ysize_tbl[1]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ysize_23, MRV_LSC_Y_SECT_SIZE_2,
++ ls_corr_config->ls_ysize_tbl[2]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ysize_23, MRV_LSC_Y_SECT_SIZE_3,
++ ls_corr_config->ls_ysize_tbl[3]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ysize_45, MRV_LSC_Y_SECT_SIZE_4,
++ ls_corr_config->ls_ysize_tbl[4]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ysize_45, MRV_LSC_Y_SECT_SIZE_5,
++ ls_corr_config->ls_ysize_tbl[5]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ysize_67, MRV_LSC_Y_SECT_SIZE_6,
++ ls_corr_config->ls_ysize_tbl[6]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ysize_67, MRV_LSC_Y_SECT_SIZE_7,
++ ls_corr_config->ls_ysize_tbl[7]);
++
++ /* program x grad tables */
++ REG_SET_SLICE(mrv_reg->isp_lsc_xgrad_01, MRV_LSC_XGRAD_0,
++ ls_corr_config->ls_xgrad_tbl[0]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xgrad_01, MRV_LSC_XGRAD_1,
++ ls_corr_config->ls_xgrad_tbl[1]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xgrad_23, MRV_LSC_XGRAD_2,
++ ls_corr_config->ls_xgrad_tbl[2]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xgrad_23, MRV_LSC_XGRAD_3,
++ ls_corr_config->ls_xgrad_tbl[3]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xgrad_45, MRV_LSC_XGRAD_4,
++ ls_corr_config->ls_xgrad_tbl[4]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xgrad_45, MRV_LSC_XGRAD_5,
++ ls_corr_config->ls_xgrad_tbl[5]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xgrad_67, MRV_LSC_XGRAD_6,
++ ls_corr_config->ls_xgrad_tbl[6]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_xgrad_67, MRV_LSC_XGRAD_7,
++ ls_corr_config->ls_xgrad_tbl[7]);
++
++ /* program y grad tables */
++ REG_SET_SLICE(mrv_reg->isp_lsc_ygrad_01, MRV_LSC_YGRAD_0,
++ ls_corr_config->ls_ygrad_tbl[0]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ygrad_01, MRV_LSC_YGRAD_1,
++ ls_corr_config->ls_ygrad_tbl[1]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ygrad_23, MRV_LSC_YGRAD_2,
++ ls_corr_config->ls_ygrad_tbl[2]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ygrad_23, MRV_LSC_YGRAD_3,
++ ls_corr_config->ls_ygrad_tbl[3]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ygrad_45, MRV_LSC_YGRAD_4,
++ ls_corr_config->ls_ygrad_tbl[4]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ygrad_45, MRV_LSC_YGRAD_5,
++ ls_corr_config->ls_ygrad_tbl[5]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ygrad_67, MRV_LSC_YGRAD_6,
++ ls_corr_config->ls_ygrad_tbl[6]);
++ REG_SET_SLICE(mrv_reg->isp_lsc_ygrad_67, MRV_LSC_YGRAD_7,
++ ls_corr_config->ls_ygrad_tbl[7]);
++
++ if (enabled) {
++ /* switch on lens chading correction */
++ REG_SET_SLICE(mrv_reg->isp_lsc_ctrl,
++ MRV_LSC_LSC_EN, ENABLE);
++ }
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++int ci_isp_ls_correction_on_off(int ls_corr_on_off)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (ls_corr_on_off) {
++ /* switch on lens chading correction */
++ REG_SET_SLICE(mrv_reg->isp_lsc_ctrl, MRV_LSC_LSC_EN, ENABLE);
++ } else {
++ /* switch off lens chading correction */
++ REG_SET_SLICE(mrv_reg->isp_lsc_ctrl, MRV_LSC_LSC_EN, DISABLE);
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Sets the Bad Pixel Correction configuration
++ */
++int ci_isp_set_bp_correction(const struct ci_isp_bp_corr_config
++ *bp_corr_config)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 isp_bp_ctrl = REG_READ(mrv_reg->isp_bp_ctrl);
++
++ if (!bp_corr_config) {
++ /* disable correction module */
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_HOT_COR_EN, DISABLE);
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_DEAD_COR_EN, DISABLE);
++ } else {
++ /* set bad pixel configuration */
++ if (bp_corr_config->bp_corr_type == CI_ISP_BP_CORR_DIRECT) {
++ /* direct detection */
++ u32 isp_bp_cfg1 = REG_READ(mrv_reg->isp_bp_cfg1);
++ u32 isp_bp_cfg2 = REG_READ(mrv_reg->isp_bp_cfg2);
++
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_COR_TYPE,
++ MRV_BP_COR_TYPE_DIRECT);
++
++ WARN_ON(!(!REG_GET_SLICE(mrv_reg->isp_bp_ctrl,
++ MRV_BP_BP_DET_EN)));
++
++ /* threshold register only used for direct mode */
++ REG_SET_SLICE(isp_bp_cfg1, MRV_BP_HOT_THRES,
++ bp_corr_config->bp_abs_hot_thres);
++ REG_SET_SLICE(isp_bp_cfg1, MRV_BP_DEAD_THRES,
++ bp_corr_config->bp_abs_dead_thres);
++ REG_WRITE(mrv_reg->isp_bp_cfg1, isp_bp_cfg1);
++ REG_SET_SLICE(isp_bp_cfg2, MRV_BP_DEV_HOT_THRES,
++ bp_corr_config->bp_dev_hot_thres);
++ REG_SET_SLICE(isp_bp_cfg2, MRV_BP_DEV_DEAD_THRES,
++ bp_corr_config->bp_dev_dead_thres);
++ REG_WRITE(mrv_reg->isp_bp_cfg2, isp_bp_cfg2);
++ } else {
++ /* use bad pixel table */
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_COR_TYPE,
++ MRV_BP_COR_TYPE_TABLE);
++ }
++
++ if (bp_corr_config->bp_corr_rep == CI_ISP_BP_CORR_REP_LIN) {
++ /* use linear approch */
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_REP_APPR,
++ MRV_BP_REP_APPR_INTERPOL);
++ } else {
++ /* use best neighbour */
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_REP_APPR,
++ MRV_BP_REP_APPR_NEAREST);
++ }
++
++ switch (bp_corr_config->bp_corr_mode) {
++ case CI_ISP_BP_CORR_HOT_EN:
++ /* enable Hot */
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_HOT_COR_EN, ENABLE);
++ /* disable Dead */
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_DEAD_COR_EN, DISABLE);
++ break;
++ case CI_ISP_BP_CORR_DEAD_EN:
++ /* disable Hot */
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_HOT_COR_EN, DISABLE);
++ /* enable Dead */
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_DEAD_COR_EN, ENABLE);
++ break;
++ case CI_ISP_BP_CORR_HOT_DEAD_EN:
++ default:
++ /* enable Hot */
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_HOT_COR_EN, ENABLE);
++ /* enable Dead */
++ REG_SET_SLICE(isp_bp_ctrl, MRV_BP_DEAD_COR_EN, ENABLE);
++ break;
++ }
++ }
++
++ REG_WRITE(mrv_reg->isp_bp_ctrl, isp_bp_ctrl);
++
++ return CI_STATUS_SUCCESS;
++
++}
++
++/*
++ * Sets the Bad Pixel configuration for detection
++ */
++int ci_isp_set_bp_detection(const struct ci_isp_bp_det_config *bp_det_config)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (!bp_det_config) {
++ /* disable measurement module */
++ REG_SET_SLICE(mrv_reg->isp_bp_ctrl, MRV_BP_BP_DET_EN, DISABLE);
++ } else {
++ WARN_ON(!(REG_GET_SLICE(mrv_reg->isp_bp_ctrl, MRV_BP_COR_TYPE)
++ == MRV_BP_COR_TYPE_TABLE));
++
++ /* set dead threshold for bad pixel detection */
++ REG_SET_SLICE(mrv_reg->isp_bp_cfg1, MRV_BP_DEAD_THRES,
++ bp_det_config->bp_dead_thres);
++
++ /* enable measurement module */
++ REG_SET_SLICE(mrv_reg->isp_bp_ctrl, MRV_BP_BP_DET_EN, ENABLE);
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++int ci_isp_clear_bp_int(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ /* clear bp_det irq (only if it is signalled to prevent loss of irqs) */
++ if (REG_GET_SLICE(mrv_reg->isp_ris, MRV_ISP_RIS_BP_DET))
++ REG_SET_SLICE(mrv_reg->isp_icr, MRV_ISP_ICR_BP_DET, 1);
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Initializes Isp filter registers with default reset values.
++ */
++static int ci_isp_initialize_filter_registers(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ mrv_reg->isp_filt_mode = 0x00000000;
++ mrv_reg->isp_filt_fac_sh1 = 0x00000010;
++ mrv_reg->isp_filt_fac_sh0 = 0x0000000C;
++ mrv_reg->isp_filt_fac_mid = 0x0000000A;
++ mrv_reg->isp_filt_fac_bl0 = 0x00000006;
++ mrv_reg->isp_filt_fac_bl1 = 0x00000002;
++ mrv_reg->isp_filt_thresh_bl0 = 0x0000000D;
++ mrv_reg->isp_filt_thresh_bl1 = 0x00000005;
++ mrv_reg->isp_filt_thresh_sh0 = 0x0000001A;
++ mrv_reg->isp_filt_thresh_sh1 = 0x0000002C;
++ mrv_reg->isp_filt_lum_weight = 0x00032040;
++
++ return CI_STATUS_SUCCESS;
++}
++
++int ci_isp_activate_filter(int activate_filter)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ int retval = CI_STATUS_SUCCESS;
++
++ /* Initialize ISP filter control registers first */
++ retval = ci_isp_initialize_filter_registers();
++ if (retval != CI_STATUS_SUCCESS)
++ return retval;
++
++ /* Activate or deactivate filter algorythm */
++ REG_SET_SLICE(mrv_reg->isp_filt_mode, MRV_FILT_FILT_ENABLE,
++ (activate_filter) ? ENABLE : DISABLE);
++
++ return retval;
++}
++
++/*
++ * Write coefficient and threshold values into Isp filter
++ * registers for noise, sharpness and blurring filtering.
++ */
++int ci_isp_set_filter_params(u8 noise_reduc_level, u8 sharp_level)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 isp_filt_mode = 0;
++
++ if (!REG_GET_SLICE(mrv_reg->isp_filt_mode, MRV_FILT_FILT_ENABLE))
++ return CI_STATUS_CANCELED;
++
++ REG_WRITE(mrv_reg->isp_filt_mode, isp_filt_mode);
++
++ if (((noise_reduc_level <= 10) || (noise_reduc_level == 99))
++ && (sharp_level <= 10)) {
++ switch (noise_reduc_level) {
++ /* Test Mode */
++ case 99:
++ /* 10 bit max value */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 0x000003FF);
++ /* 10 bit max value */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 0x000003FF);
++ /* 10 bit max value */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 0x000003FF);
++ /* 10 bit max value */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 0x000003FF);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT, 0
++ /* MRV_FILT_STAGE1_SELECT_MAX_BLUR */);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_BYPASS);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_BYPASS);
++ break;
++
++ case 0:
++ /* NoiseReductionLevel = 0 */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 0x000000);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 0x000000);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 0x000000);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 0x000000);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT, 6);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC8);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_BYPASS);
++ break;
++
++ case 1:
++ /* NoiseReductionLevel = 1; */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 33);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 18);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 8);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 2);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT, 6);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC12);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_DYN_2);
++ break;
++
++ case 2:
++ /* NoiseReductionLevel = 2; */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 44);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 26);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 13);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 5);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT, 4
++ /* MRV_FILT_STAGE1_SELECT_DEFAULT */);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC12);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_DYN_2);
++ break;
++
++ case 3:
++ /* NoiseReductionLevel = 3; */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 51);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 36);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 23);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 10);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT, 4
++ /* MRV_FILT_STAGE1_SELECT_DEFAULT */);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC12);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_DYN_2);
++ break;
++
++ case 4:
++ /* NoiseReductionLevel = 4; */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 67);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 41);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 26);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 15);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT, 3);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC12);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_DYN_2);
++ break;
++
++ case 5:
++ /* NoiseReductionLevel = 5; */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 100);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 75);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 50);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 20);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT, 3);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC12);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_DYN_2);
++ break;
++
++ case 6:
++ /* NoiseReductionLevel = 6; */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 120);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 90);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 60);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 26);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT, 2);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC12);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_DYN_2);
++ break;
++
++ case 7:
++ /* NoiseReductionLevel = 7; */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 150);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 120);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 80);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 51);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT, 2);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC12);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_DYN_2);
++ break;
++
++ case 8:
++ /* NoiseReductionLevel = 8; */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 200);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 170);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 140);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 100);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT, 2);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC12);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_DYN_2);
++ break;
++
++ case 9:
++ /* NoiseReductionLevel = 9; */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 300);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 250);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 180);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 150);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT,
++ (sharp_level > 3) ? 2 : 1);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC12);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_DYN_2);
++ break;
++
++ case 10:
++ /* NoiseReductionLevel = 10; extrem noise */
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1, 1023);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 1023);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl0,
++ MRV_FILT_FILT_THRESH_BL0, 1023);
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_bl1,
++ MRV_FILT_FILT_THRESH_BL1, 1023);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_STAGE1_SELECT,
++ (sharp_level > 5) ? 2 :
++ ((sharp_level > 3) ? 1 : 0));
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_V_MODE,
++ MRV_FILT_FILT_CHR_V_MODE_STATIC12);
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_CHR_H_MODE,
++ MRV_FILT_FILT_CHR_H_MODE_DYN_2);
++ break;
++
++ default:
++ return CI_STATUS_OUTOFRANGE;
++ }
++
++ switch (sharp_level) {
++ /* SharpLevel = 0; no sharp enhancement */
++ case 0:
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x00000004);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x00000004);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x00000004);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x00000002);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x00000000);
++ break;
++
++ /* SharpLevel = 1; */
++ case 1:
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x00000008);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x00000007);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x00000006);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x00000002);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x00000000);
++ break;
++
++ /* SharpLevel = 2; */
++ case 2:
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x0000000C);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x0000000A);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x00000008);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x00000004);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x00000000);
++ break;
++
++ /* SharpLevel = 3; */
++ case 3:
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x00000010);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x0000000C);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x0000000A);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x00000006);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x00000002);
++ break;
++
++ /* SharpLevel = 4; */
++ case 4:
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x00000016);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x00000010);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x0000000C);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x00000008);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x00000004);
++ break;
++
++ /* SharpLevel = 5; */
++ case 5:
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x0000001B);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x00000014);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x00000010);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x0000000A);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x00000004);
++ break;
++
++ /* SharpLevel = 6; */
++ case 6:
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x00000020);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x0000001A);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x00000013);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x0000000C);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x00000006);
++ break;
++
++ /* SharpLevel = 7; */
++ case 7:
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x00000026);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x0000001E);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x00000017);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x00000010);
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x00000008);
++ break;
++
++ /* SharpLevel = 8; */
++ case 8:
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 0x00000013);
++ if (REG_GET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1) > 0x0000008A) {
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1,
++ 0x0000008A);
++ }
++ /* 43 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x0000002C);
++ /* 36 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x00000024);
++ /* 29 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x0000001D);
++ /* 21 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x00000015);
++ /* 14 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x0000000D);
++ break;
++
++ /* SharpLevel = 9; */
++ case 9:
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ MRV_FILT_FILT_THRESH_SH0, 0x00000013);
++ if (REG_GET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1) > 0x0000008A) {
++ REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ MRV_FILT_FILT_THRESH_SH1,
++ 0x0000008A);
++ }
++ /* 48 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x00000030);
++ /* 42 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x0000002A);
++ /* 34 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x00000022);
++ /* 26 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x0000001A);
++ /* 20 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x00000014);
++ break;
++
++ /* SharpLevel = 10; */
++ case 10:
++ /* REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh0,
++ * MRV_FILT_FILT_THRESH_SH0, 0x00000013); */
++ /* if (REG_GET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ * MRV_FILT_FILT_THRESH_SH1) > 0x0000008A) */
++ /* { */
++ /* REG_SET_SLICE(mrv_reg->isp_filt_thresh_sh1,
++ * MRV_FILT_FILT_THRESH_SH1, 0x0000008A); */
++ /* } */
++
++ /* 63 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh1,
++ MRV_FILT_FILT_FAC_SH1, 0x0000003F);
++ /* 48 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_sh0,
++ MRV_FILT_FILT_FAC_SH0, 0x00000030);
++ /* 40 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_mid,
++ MRV_FILT_FILT_FAC_MID, 0x00000028);
++ /* 36 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0, 0x00000024);
++ /* 32 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1, 0x00000020);
++ break;
++
++ default:
++ return CI_STATUS_OUTOFRANGE;
++ }
++
++ if (noise_reduc_level > 7) {
++ if (sharp_level > 7) {
++ u32 filt_fac_bl0 = REG_GET_SLICE
++ (mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0);
++ u32 filt_fac_bl1 =
++ REG_GET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1);
++ /* * 0.50 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0,
++ (filt_fac_bl0) >> 1);
++ /* * 0.25 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1,
++ (filt_fac_bl1) >> 2);
++ } else if (sharp_level > 4) {
++ u32 filt_fac_bl0 =
++ REG_GET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0);
++ u32 filt_fac_bl1 =
++ REG_GET_SLICE(mrv_reg->
++ isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1);
++ /* * 0.75 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl0,
++ MRV_FILT_FILT_FAC_BL0,
++ (filt_fac_bl0 * 3) >> 2);
++ /* * 0.50 */
++ REG_SET_SLICE(mrv_reg->isp_filt_fac_bl1,
++ MRV_FILT_FILT_FAC_BL1,
++ (filt_fac_bl1) >> 1);
++ }
++ }
++
++ /* Set ISP filter mode register values */
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_MODE,
++ MRV_FILT_FILT_MODE_DYNAMIC);
++
++ /* enable filter */
++ REG_SET_SLICE(isp_filt_mode, MRV_FILT_FILT_ENABLE, ENABLE);
++ REG_WRITE(mrv_reg->isp_filt_mode, isp_filt_mode);
++
++ return CI_STATUS_SUCCESS;
++ } else {
++ /* At least one function parameter is out of range */
++ return CI_STATUS_OUTOFRANGE;
++ }
++}
++
++int ci_isp_meas_exposure_initialize_module(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ REG_SET_SLICE(mrv_reg->isp_exp_h_size, MRV_AE_ISP_EXP_H_SIZE, 0);
++ REG_SET_SLICE(mrv_reg->isp_exp_v_size, MRV_AE_ISP_EXP_V_SIZE, 0);
++ REG_SET_SLICE(mrv_reg->isp_exp_h_offset, MRV_AE_ISP_EXP_H_OFFSET, 0);
++ REG_SET_SLICE(mrv_reg->isp_exp_v_offset, MRV_AE_ISP_EXP_V_OFFSET, 0);
++
++ return CI_STATUS_SUCCESS;
++
++}
++
++/*
++ * Configures the exposure measurement module.
++ */
++int ci_isp_meas_exposure_set_config(const struct ci_isp_window *wnd,
++ const struct ci_isp_exp_ctrl *isp_exp_ctrl)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ if (!wnd) {
++ /* stop loop if running */
++ REG_SET_SLICE(mrv_reg->isp_exp_ctrl, MRV_AE_AUTOSTOP, ON);
++ /* required? */
++ REG_SET_SLICE(mrv_reg->isp_exp_ctrl, MRV_AE_EXP_START, OFF);
++ return CI_STATUS_SUCCESS;
++ }
++
++ /* range check */
++ if ((wnd->hoffs > MRV_AE_ISP_EXP_H_OFFSET_MAX)
++ || (wnd->hsize > MRV_AE_ISP_EXP_H_SIZE_MAX)
++ || (wnd->voffs > MRV_AE_ISP_EXP_V_OFFSET_MAX)
++ || (wnd->vsize > MRV_AE_ISP_EXP_V_SIZE_MAX)
++ || (wnd->vsize & ~MRV_AE_ISP_EXP_V_SIZE_VALID_MASK))
++ return CI_STATUS_OUTOFRANGE;
++
++ /* configure measurement windows */
++ REG_SET_SLICE(mrv_reg->isp_exp_h_size, MRV_AE_ISP_EXP_H_SIZE,
++ wnd->hsize);
++ REG_SET_SLICE(mrv_reg->isp_exp_v_size, MRV_AE_ISP_EXP_V_SIZE,
++ wnd->vsize);
++ REG_SET_SLICE(mrv_reg->isp_exp_h_offset, MRV_AE_ISP_EXP_H_OFFSET,
++ wnd->hoffs);
++ REG_SET_SLICE(mrv_reg->isp_exp_v_offset, MRV_AE_ISP_EXP_V_OFFSET,
++ wnd->voffs);
++
++ /* set exposure measurement mode */
++ REG_SET_SLICE(mrv_reg->isp_exp_ctrl, MRV_AE_EXP_MEAS_MODE,
++ (isp_exp_ctrl->exp_meas_mode) ? ON : OFF);
++
++ /* set or clear AE autostop bit */
++ REG_SET_SLICE(mrv_reg->isp_exp_ctrl, MRV_AE_AUTOSTOP,
++ (isp_exp_ctrl->auto_stop) ? ON : OFF);
++ REG_SET_SLICE(mrv_reg->isp_exp_ctrl, MRV_AE_EXP_START,
++ (isp_exp_ctrl->exp_start) ? ON : OFF);
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Programs the given gamma curve for the input gamma
++ * block. Enables or disables gamma processing for the
++ * input gamma block.
++ */
++void ci_isp_set_gamma(const struct ci_sensor_gamma_curve *r,
++ const struct ci_sensor_gamma_curve *g,
++ const struct ci_sensor_gamma_curve *b)
++{
++ struct isp_register *mrv_reg = (struct isp_register *)MEM_MRV_REG_BASE;
++ /* values stored as 16bit - use MSBs if cambuswidth is smaller */
++ const u8 shift_val = 16 - MARVIN_FEATURE_CAMBUSWIDTH;
++ /* used to round up values */
++ const u16 round_ofs = 0 << (shift_val - 1);
++ s32 i;
++
++ if (r) {
++
++ /*
++ * Note: gamma curve increments are already register conform,
++ * so REG_WRITE is used instead of REG_SET_SLICE
++ */
++
++ /*
++ * better would be split into 16 separate values to be
++ * register independant
++ */
++
++ /* gamma curve dx1..dx16 increments (each nibble of */
++ REG_WRITE(mrv_reg->isp_gamma_dx_lo, r->gamma_dx0);
++ /* the 32bit-values hold 3 valid bits, see register) */
++ REG_WRITE(mrv_reg->isp_gamma_dx_hi, r->gamma_dx1);
++
++ for (i = 0; i < MRV_ISP_GAMMA_R_Y_ARR_SIZE; i++) {
++ REG_SET_SLICE(mrv_reg->isp_gamma_r_y[i],
++ MRV_ISP_GAMMA_R_Y,
++ (r->isp_gamma_y[i] + round_ofs) >> shift_val);
++ REG_SET_SLICE(mrv_reg->isp_gamma_g_y[i],
++ MRV_ISP_GAMMA_G_Y,
++ (g->isp_gamma_y[i] + round_ofs) >> shift_val);
++ REG_SET_SLICE(mrv_reg->isp_gamma_b_y[i],
++ MRV_ISP_GAMMA_B_Y,
++ (b->isp_gamma_y[i] + round_ofs) >> shift_val);
++ }
++
++ REG_SET_SLICE(mrv_reg->isp_ctrl,
++ MRV_ISP_ISP_GAMMA_IN_ENABLE, ENABLE);
++ } else {
++ REG_SET_SLICE(mrv_reg->isp_ctrl,
++ MRV_ISP_ISP_GAMMA_IN_ENABLE, DISABLE);
++ }
++}
++
++/*
++ * Programs the given gamma curve for the output gamma
++ * block. Enables or disables gamma processing for the
++ * output gamma block.
++ */
++void ci_isp_set_gamma2(const struct ci_isp_gamma_out_curve *gamma)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ s32 i;
++
++ if (gamma) {
++ WARN_ON(!(MRV_ISP_GAMMA_OUT_Y_ARR_SIZE ==
++ CI_ISP_GAMMA_OUT_CURVE_ARR_SIZE));
++
++ for (i = 0; i < MRV_ISP_GAMMA_OUT_Y_ARR_SIZE; i++) {
++ REG_SET_SLICE(mrv_reg->isp_gamma_out_y[i],
++ MRV_ISP_ISP_GAMMA_OUT_Y,
++ gamma->isp_gamma_y[i]);
++ }
++
++ /* gamma curve linear or log */
++ REG_SET_SLICE(mrv_reg->isp_gamma_out_mode, MRV_ISP_EQU_SEGM,
++ gamma->gamma_segmentation);
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_GAMMA_OUT_ENABLE,
++ ENABLE);
++ } else {
++ REG_SET_SLICE(mrv_reg->isp_ctrl,
++ MRV_ISP_ISP_GAMMA_OUT_ENABLE, DISABLE);
++ }
++
++}
+diff --git a/drivers/media/video/mrstci/mrstisp/mrstisp_jpe.c b/drivers/media/video/mrstci/mrstisp/mrstisp_jpe.c
+new file mode 100644
+index 0000000..c042e06
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstisp/mrstisp_jpe.c
+@@ -0,0 +1,569 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include "mrstisp_stdinc.h"
++
++int ci_isp_jpe_init_ex(u16 hsize, u16 vsize, u8 compression_ratio, u8 jpe_scale)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ /*
++ * Reset JPEG-Encoder. In contrast to other software resets
++ * this triggers the modules asynchronous reset resulting
++ * in loss of all data.
++ */
++
++ REG_SET_SLICE(mrv_reg->vi_ircl, MRV_VI_JPEG_SOFT_RST, ON);
++ REG_SET_SLICE(mrv_reg->vi_ircl, MRV_VI_JPEG_SOFT_RST, OFF);
++
++ /* set configuration for the Jpeg capturing */
++ ci_isp_jpe_set_config(hsize, vsize, jpe_scale);
++
++ /*
++ * Sleep a while before setting up tables because of the 400
++ * clock cycles required to initialize the table RAM after a
++ * reset was issued. On FPGA we are running with only 30MHz,
++ * so at least 13us are required.
++ */
++
++
++ /*
++ * Note: this func is called when holding spin lock,
++ * so can not change to msleep.
++ */
++ mdelay(15);
++
++ /* program tables */
++ ci_isp_jpe_set_tables(compression_ratio);
++
++ /* choose tables */
++ ci_isp_jpe_select_tables();
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * initialization of JPEG encoder
++ */
++int ci_isp_jpe_init(u32 resolution, u8 compression_ratio, int jpe_scale)
++{
++ u16 hsize = 0;
++ u16 vsize = 0;
++
++ switch (resolution) {
++ case SENSOR_RES_BP1:
++ /* 352; */
++ hsize = BP1_SIZE_H;
++ /* 240; */
++ vsize = BP1_SIZE_V;
++ break;
++ case SENSOR_RES_S_AFM:
++ /* 64; */
++ hsize = S_AFM_SIZE_H;
++ /* 32; */
++ vsize = S_AFM_SIZE_V;
++ break;
++ case SENSOR_RES_M_AFM:
++ /* 128; */
++ hsize = M_AFM_SIZE_H;
++ /* 96; */
++ vsize = M_AFM_SIZE_V;
++ break;
++ case SENSOR_RES_L_AFM:
++ /* 720; */
++ hsize = L_AFM_SIZE_H;
++ /* 480; */
++ vsize = L_AFM_SIZE_V;
++ break;
++ case SENSOR_RES_QQCIF:
++ /* 88; */
++ hsize = QQCIF_SIZE_H;
++ /* 72; */
++ vsize = QQCIF_SIZE_V;
++ break;
++ case SENSOR_RES_QQVGA:
++ /* 160; */
++ hsize = QQVGA_SIZE_H;
++ /* 120; */
++ vsize = QQVGA_SIZE_V;
++ break;
++ case SENSOR_RES_QCIF:
++ /* 176; */
++ hsize = QCIF_SIZE_H;
++ /* 144; */
++ vsize = QCIF_SIZE_V;
++ break;
++ case SENSOR_RES_QVGA:
++ /* 320; */
++ hsize = QVGA_SIZE_H;
++ /* 240; */
++ vsize = QVGA_SIZE_V;
++ break;
++ case SENSOR_RES_CIF:
++ /* 352; */
++ hsize = CIF_SIZE_H;
++ /* 288; */
++ vsize = CIF_SIZE_V;
++ break;
++ case SENSOR_RES_VGA:
++ /* 640; */
++ hsize = VGA_SIZE_H;
++ /* 480; */
++ vsize = VGA_SIZE_V;
++ break;
++ case SENSOR_RES_SVGA:
++ /* 800; */
++ hsize = SVGA_SIZE_H;
++ /* 600; */
++ vsize = SVGA_SIZE_V;
++ break;
++ case SENSOR_RES_XGA:
++ /* 1024; */
++ hsize = XGA_SIZE_H;
++ /* 768; */
++ vsize = XGA_SIZE_V;
++ break;
++ case SENSOR_RES_XGA_PLUS:
++ /* 1280; */
++ hsize = XGA_PLUS_SIZE_H;
++ /* 960; */
++ vsize = XGA_PLUS_SIZE_V;
++ break;
++ case SENSOR_RES_SXGA:
++ /* 1280; */
++ hsize = SXGA_SIZE_H;
++ /* 1024; */
++ vsize = SXGA_SIZE_V;
++ break;
++ case SENSOR_RES_UXGA:
++ /* 1600; */
++ hsize = UXGA_SIZE_H;
++ /* 1200; */
++ vsize = UXGA_SIZE_V;
++ break;
++ case SENSOR_RES_QXGA:
++ /* 2048; */
++ hsize = QXGA_SIZE_H;
++ /* 1536; */
++ vsize = QXGA_SIZE_V;
++ break;
++ case SENSOR_RES_QSXGA:
++ /* 2586; */
++ hsize = QSXGA_SIZE_H;
++ /* 2048; */
++ vsize = QSXGA_SIZE_V;
++ break;
++ case SENSOR_RES_QSXGA_PLUS:
++ /* 2600; */
++ hsize = QSXGA_PLUS_SIZE_H;
++ /* 2048; */
++ vsize = QSXGA_PLUS_SIZE_V;
++ break;
++ case SENSOR_RES_QSXGA_PLUS2:
++ /* 2600; */
++ hsize = QSXGA_PLUS2_SIZE_H;
++ /* 1950; */
++ vsize = QSXGA_PLUS2_SIZE_V;
++ break;
++ case SENSOR_RES_QSXGA_PLUS3:
++ /* 2686; */
++ hsize = QSXGA_PLUS3_SIZE_H;
++ /* 2048; */
++ vsize = QSXGA_PLUS3_SIZE_V;
++ break;
++ case SENSOR_RES_WQSXGA:
++ /* 3200 */
++ hsize = WQSXGA_SIZE_H;
++ /* 2048 */
++ vsize = WQSXGA_SIZE_V;
++ break;
++ case SENSOR_RES_QUXGA:
++ /* 3200 */
++ hsize = QUXGA_SIZE_H;
++ /* 2400 */
++ vsize = QUXGA_SIZE_V;
++ break;
++ case SENSOR_RES_WQUXGA:
++ /* 3840 */
++ hsize = WQUXGA_SIZE_H;
++ /* 2400 */
++ vsize = WQUXGA_SIZE_V;
++ break;
++ case SENSOR_RES_HXGA:
++ /* 4096 */
++ hsize = HXGA_SIZE_H;
++ /* 3072 */
++ vsize = HXGA_SIZE_V;
++ break;
++ default:
++ eprintk("resolution not supported");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ return ci_isp_jpe_init_ex(hsize, vsize, compression_ratio, jpe_scale);
++}
++
++void ci_isp_jpe_set_tables(u8 compression_ratio)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ /* required because auto-increment register */
++ u32 jpe_table_data = 0;
++
++ u8 idx, size;
++ const u8 *yqtable = NULL;
++ const u8 *uvqtable = NULL;
++
++ switch (compression_ratio) {
++ case CI_ISP_JPEG_LOW_COMPRESSION:
++ yqtable = ci_isp_yq_table_low_comp1;
++ uvqtable = ci_isp_uv_qtable_low_comp1;
++ break;
++ case CI_ISP_JPEG_01_PERCENT:
++ yqtable = ci_isp_yq_table01_per_cent;
++ uvqtable = ci_isp_uv_qtable01_per_cent;
++ break;
++ case CI_ISP_JPEG_20_PERCENT:
++ yqtable = ci_isp_yq_table20_per_cent;
++ uvqtable = ci_isp_uv_qtable20_per_cent;
++ break;
++ case CI_ISP_JPEG_30_PERCENT:
++ yqtable = ci_isp_yq_table30_per_cent;
++ uvqtable = ci_isp_uv_qtable30_per_cent;
++ break;
++ case CI_ISP_JPEG_40_PERCENT:
++ yqtable = ci_isp_yq_table40_per_cent;
++ uvqtable = ci_isp_uv_qtable40_per_cent;
++ break;
++ case CI_ISP_JPEG_50_PERCENT:
++ yqtable = ci_isp_yq_table50_per_cent;
++ uvqtable = ci_isp_uv_qtable50_per_cent;
++ break;
++ case CI_ISP_JPEG_60_PERCENT:
++ yqtable = ci_isp_yq_table60_per_cent;
++ uvqtable = ci_isp_uv_qtable60_per_cent;
++ break;
++ case CI_ISP_JPEG_70_PERCENT:
++ yqtable = ci_isp_yq_table70_per_cent;
++ uvqtable = ci_isp_uv_qtable70_per_cent;
++ break;
++ case CI_ISP_JPEG_80_PERCENT:
++ yqtable = ci_isp_yq_table80_per_cent;
++ uvqtable = ci_isp_uv_qtable80_per_cent;
++ break;
++ case CI_ISP_JPEG_90_PERCENT:
++ yqtable = ci_isp_yq_table90_per_cent;
++ uvqtable = ci_isp_uv_qtable90_per_cent;
++ break;
++ case CI_ISP_JPEG_99_PERCENT:
++ yqtable = ci_isp_yq_table99_per_cent;
++ uvqtable = ci_isp_uv_qtable99_per_cent;
++ break;
++ case CI_ISP_JPEG_HIGH_COMPRESSION:
++ default:
++ /*
++ * in the case an unknown value is set,
++ * use CI_JPEG_HIGH_COMPRESSION
++ */
++ yqtable = ci_isp_yq_table75_per_cent;
++ uvqtable = ci_isp_uv_qtable75_per_cent;
++ break;
++ }
++
++ /* Y q-table 0 programming */
++
++ /* all possible assigned tables have same size */
++ size = sizeof(ci_isp_yq_table75_per_cent)/
++ sizeof(ci_isp_yq_table75_per_cent[0]);
++ REG_SET_SLICE(mrv_reg->jpe_table_id, MRV_JPE_TABLE_ID,
++ MRV_JPE_TABLE_ID_QUANT0);
++ for (idx = 0; idx < (size - 1); idx += 2) {
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_H,
++ yqtable[idx]);
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_L,
++ yqtable[idx + 1]);
++ /* auto-increment register! */
++ REG_WRITE(mrv_reg->jpe_table_data, jpe_table_data);
++ }
++
++ /* U/V q-table 0 programming */
++
++ /* all possible assigned tables have same size */
++ size = sizeof(ci_isp_uv_qtable75_per_cent) /
++ sizeof(ci_isp_uv_qtable75_per_cent[0]);
++ REG_SET_SLICE(mrv_reg->jpe_table_id, MRV_JPE_TABLE_ID,
++ MRV_JPE_TABLE_ID_QUANT1);
++ for (idx = 0; idx < (size - 1); idx += 2) {
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_H,
++ uvqtable[idx]);
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_L,
++ uvqtable[idx + 1]);
++ /* auto-increment register! */
++ REG_WRITE(mrv_reg->jpe_table_data, jpe_table_data);
++ }
++
++ /* Y AC-table 0 programming */
++
++ size = sizeof(ci_isp_ac_luma_table_annex_k) /
++ sizeof(ci_isp_ac_luma_table_annex_k[0]);
++ REG_SET_SLICE(mrv_reg->jpe_table_id, MRV_JPE_TABLE_ID,
++ MRV_JPE_TABLE_ID_VLC_AC0);
++ REG_SET_SLICE(mrv_reg->jpe_tac0_len, MRV_JPE_TAC0_LEN, size);
++ for (idx = 0; idx < (size - 1); idx += 2) {
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_H,
++ ci_isp_ac_luma_table_annex_k[idx]);
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_L,
++ ci_isp_ac_luma_table_annex_k[idx + 1]);
++ /* auto-increment register! */
++ REG_WRITE(mrv_reg->jpe_table_data, jpe_table_data);
++ }
++
++ /* U/V AC-table 1 programming */
++
++ size = sizeof(ci_isp_ac_chroma_table_annex_k) /
++ sizeof(ci_isp_ac_chroma_table_annex_k[0]);
++ REG_SET_SLICE(mrv_reg->jpe_table_id, MRV_JPE_TABLE_ID,
++ MRV_JPE_TABLE_ID_VLC_AC1);
++ REG_SET_SLICE(mrv_reg->jpe_tac1_len, MRV_JPE_TAC1_LEN, size);
++ for (idx = 0; idx < (size - 1); idx += 2) {
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_H,
++ ci_isp_ac_chroma_table_annex_k[idx]);
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_L,
++ ci_isp_ac_chroma_table_annex_k[idx + 1]);
++ /* auto-increment register! */
++ REG_WRITE(mrv_reg->jpe_table_data, jpe_table_data);
++ }
++
++ /* Y DC-table 0 programming */
++
++ size = sizeof(ci_isp_dc_luma_table_annex_k) /
++ sizeof(ci_isp_dc_luma_table_annex_k[0]);
++ REG_SET_SLICE(mrv_reg->jpe_table_id, MRV_JPE_TABLE_ID,
++ MRV_JPE_TABLE_ID_VLC_DC0);
++ REG_SET_SLICE(mrv_reg->jpe_tdc0_len, MRV_JPE_TDC0_LEN, size);
++ for (idx = 0; idx < (size - 1); idx += 2) {
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_H,
++ ci_isp_dc_luma_table_annex_k[idx]);
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_L,
++ ci_isp_dc_luma_table_annex_k[idx + 1]);
++ /* auto-increment register! */
++ REG_WRITE(mrv_reg->jpe_table_data, jpe_table_data);
++ }
++
++ /* U/V DC-table 1 programming */
++
++ size = sizeof(ci_isp_dc_chroma_table_annex_k) /
++ sizeof(ci_isp_dc_chroma_table_annex_k[0]);
++ REG_SET_SLICE(mrv_reg->jpe_table_id, MRV_JPE_TABLE_ID,
++ MRV_JPE_TABLE_ID_VLC_DC1);
++ REG_SET_SLICE(mrv_reg->jpe_tdc1_len, MRV_JPE_TDC1_LEN, size);
++ for (idx = 0; idx < (size - 1); idx += 2) {
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_H,
++ ci_isp_dc_chroma_table_annex_k[idx]);
++ REG_SET_SLICE(jpe_table_data, MRV_JPE_TABLE_WDATA_L,
++ ci_isp_dc_chroma_table_annex_k[idx + 1]);
++ /* auto-increment register! */
++ REG_WRITE(mrv_reg->jpe_table_data, jpe_table_data);
++ }
++}
++
++/*
++ * selects tables to be used by encoder
++ */
++void ci_isp_jpe_select_tables(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ /* selects quantization table for Y */
++ REG_SET_SLICE(mrv_reg->jpe_tq_y_select, MRV_JPE_TQ0_SELECT,
++ MRV_JPE_TQ_SELECT_TAB0);
++ /* selects quantization table for U */
++ REG_SET_SLICE(mrv_reg->jpe_tq_u_select, MRV_JPE_TQ1_SELECT,
++ MRV_JPE_TQ_SELECT_TAB1);
++ /* selects quantization table for V */
++ REG_SET_SLICE(mrv_reg->jpe_tq_v_select, MRV_JPE_TQ2_SELECT,
++ MRV_JPE_TQ_SELECT_TAB1);
++ /* selects Huffman DC table */
++ REG_SET_SLICE(mrv_reg->jpe_dc_table_select,
++ MRV_JPE_DC_TABLE_SELECT_Y, 0);
++ REG_SET_SLICE(mrv_reg->jpe_dc_table_select,
++ MRV_JPE_DC_TABLE_SELECT_U, 1);
++ REG_SET_SLICE(mrv_reg->jpe_dc_table_select,
++ MRV_JPE_DC_TABLE_SELECT_V, 1);
++ /* selects Huffman AC table */
++ REG_SET_SLICE(mrv_reg->jpe_ac_table_select,
++ MRV_JPE_AC_TABLE_SELECT_Y, 0);
++ REG_SET_SLICE(mrv_reg->jpe_ac_table_select,
++ MRV_JPE_AC_TABLE_SELECT_U, 1);
++ REG_SET_SLICE(mrv_reg->jpe_ac_table_select,
++ MRV_JPE_AC_TABLE_SELECT_V, 1);
++}
++
++/*
++ * configure JPEG encoder
++ */
++void ci_isp_jpe_set_config(u16 hsize, u16 vsize, int jpe_scale)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ /* JPEG image size */
++
++ REG_SET_SLICE(mrv_reg->jpe_enc_hsize, MRV_JPE_ENC_HSIZE, hsize);
++ REG_SET_SLICE(mrv_reg->jpe_enc_vsize, MRV_JPE_ENC_VSIZE, vsize);
++
++ if (jpe_scale) {
++ /* upscaling of BT601 color space to full range 0..255 */
++ REG_SET_SLICE(mrv_reg->jpe_y_scale_en, MRV_JPE_Y_SCALE_EN,
++ ENABLE);
++ REG_SET_SLICE(mrv_reg->jpe_cbcr_scale_en,
++ MRV_JPE_CBCR_SCALE_EN, ENABLE);
++ } else {
++ /* bypass scaler */
++ REG_SET_SLICE(mrv_reg->jpe_y_scale_en,
++ MRV_JPE_Y_SCALE_EN, DISABLE);
++ REG_SET_SLICE(mrv_reg->jpe_cbcr_scale_en,
++ MRV_JPE_CBCR_SCALE_EN, DISABLE);
++ }
++
++ /* picture format YUV 422 */
++ REG_SET_SLICE(mrv_reg->jpe_pic_format, MRV_JPE_ENC_PIC_FORMAT,
++ MRV_JPE_ENC_PIC_FORMAT_422);
++ REG_SET_SLICE(mrv_reg->jpe_table_flush, MRV_JPE_TABLE_FLUSH, 0);
++}
++
++int ci_isp_jpe_generate_header(struct mrst_isp_device *intel, u8 header_mode)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ WARN_ON(!((header_mode == MRV_JPE_HEADER_MODE_JFIF)
++ || (header_mode == MRV_JPE_HEADER_MODE_NO)));
++
++ /* clear jpeg gen_header_done interrupt */
++ /* since we poll them later to detect command completion */
++
++ REG_SET_SLICE(mrv_reg->jpe_status_icr, MRV_JPE_GEN_HEADER_DONE, 1);
++ REG_SET_SLICE(mrv_reg->jpe_header_mode, MRV_JPE_HEADER_MODE,
++ header_mode);
++
++ /* start header generation */
++ REG_SET_SLICE(mrv_reg->jpe_gen_header, MRV_JPE_GEN_HEADER, ON);
++
++ return ci_isp_jpe_wait_for_header_gen_done(intel);
++}
++
++void ci_isp_jpe_prep_enc(enum ci_isp_jpe_enc_mode jpe_enc_mode)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 jpe_encode = REG_READ(mrv_reg->jpe_encode);
++
++ /* clear jpeg encode_done interrupt */
++ /* since we poll them later to detect command completion */
++
++ REG_SET_SLICE(mrv_reg->jpe_status_icr, MRV_JPE_ENCODE_DONE, 1);
++ REG_SET_SLICE(jpe_encode, MRV_JPE_ENCODE, ON);
++
++ switch (jpe_enc_mode) {
++ case CI_ISP_JPE_LARGE_CONT_MODE:
++ /* motion JPEG with header generation */
++ REG_SET_SLICE(jpe_encode, MRV_JPE_CONT_MODE,
++ MRV_JPE_CONT_MODE_HEADER);
++ break;
++ case CI_ISP_JPE_SHORT_CONT_MODE:
++ /* motion JPEG only first frame with header */
++ REG_SET_SLICE(jpe_encode, MRV_JPE_CONT_MODE,
++ MRV_JPE_CONT_MODE_NEXT);
++ break;
++ default:
++ /* single shot JPEG */
++ REG_SET_SLICE(jpe_encode, MRV_JPE_CONT_MODE,
++ MRV_JPE_CONT_MODE_STOP);
++ break;
++ }
++
++ REG_WRITE(mrv_reg->jpe_encode, jpe_encode);
++ REG_SET_SLICE(mrv_reg->jpe_init, MRV_JPE_JP_INIT, 1);
++}
++
++/*
++ * wait until JPG Header is generated (MRV_JPGINT_GEN_HEADER_DONE
++ * interrupt occurs)
++ * waiting for JPG Header to be generated
++ */
++int ci_isp_jpe_wait_for_header_gen_done(struct mrst_isp_device *intel)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ mrst_timer_start();
++
++ while (!REG_GET_SLICE(mrv_reg->jpe_status_ris,
++ MRV_JPE_GEN_HEADER_DONE)) {
++ if (mrst_get_micro_sec() > 2000000) {
++ mrst_timer_stop();
++ eprintk("timeout");
++ return CI_STATUS_FAILURE;
++ }
++ }
++
++ mrst_timer_stop();
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * wait until JPG Encoder is done (MRV_JPGINT_ENCODE_DONE
++ * interrupt occurs) waiting for the JPG Encoder to be done
++ */
++int ci_isp_jpe_wait_for_encode_done(struct mrst_isp_device *intel)
++{
++#if 0
++ int ret = 0;
++ INIT_COMPLETION(intel->jpe_complete);
++ ret = wait_for_completion_interruptible_timeout(&intel->jpe_complete,
++ 100 * HZ);
++ if ((ret == 0) | (intel->irq_stat == IRQ_JPE_ERROR)) {
++ eprintk("timeout");
++ return CI_STATUS_FAILURE;
++ }
++
++ return CI_STATUS_SUCCESS;
++#endif
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ mrst_timer_start();
++
++ while (!REG_GET_SLICE(mrv_reg->jpe_status_ris,
++ MRV_JPE_ENCODE_DONE)) {
++ if (mrst_get_micro_sec() > 200000) {
++ mrst_timer_stop();
++ eprintk("timeout");
++ return CI_STATUS_FAILURE;
++ }
++ }
++
++ mrst_timer_stop();
++
++ /* clear jpeg encode_done interrupt */
++ REG_SET_SLICE(mrv_reg->jpe_status_icr, MRV_JPE_ENCODE_DONE, 1);
++
++ return CI_STATUS_SUCCESS;
++}
+diff --git a/drivers/media/video/mrstci/mrstisp/mrstisp_main.c b/drivers/media/video/mrstci/mrstisp/mrstisp_main.c
+new file mode 100644
+index 0000000..e37b3d1
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstisp/mrstisp_main.c
+@@ -0,0 +1,2977 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include "mrstisp_stdinc.h"
++#include "ci_isp_fmts_common.h"
++
++#define GPIO_SCLK_25 44
++#define GPIO_STDBY1_PIN 48
++#define GPIO_STDBY2_PIN 49
++#define GPIO_RESET_PIN 50
++
++int mrstisp_debug;
++module_param(mrstisp_debug, int, 0644);
++
++/*XXX*/
++static int frame_cnt;
++static long mipi_error_num;
++static u32 mipi_error_flag;
++static long isp_error_num;
++static u32 isp_error_flag;
++static unsigned long jiffies_start;
++static int mipi_flag;
++
++void intel_timer_start(void)
++{
++ jiffies_start = jiffies;
++}
++void intel_timer_stop(void)
++{
++ jiffies_start = 0;
++}
++unsigned long intel_get_micro_sec(void)
++{
++ unsigned long time_diff = 0;
++
++ time_diff = jiffies - jiffies_start;
++
++ return jiffies_to_msecs(time_diff);
++}
++
++
++static inline struct mrst_isp_device *to_isp(struct v4l2_device *dev)
++{
++ return container_of(dev, struct mrst_isp_device, v4l2_dev);
++}
++
++static struct mrst_camera mrst_camera_table[] = {
++ {
++ .type = MRST_CAMERA_SOC,
++ .name = "ov2650",
++ .sensor_addr = 0x30,
++ },
++ {
++ .type = MRST_CAMERA_SOC,
++ .name = "ov9665",
++ .sensor_addr = 0x30,
++ },
++ {
++ .type = MRST_CAMERA_RAW,
++ .name = "ov5630",
++ .sensor_addr = 0x36,
++ .motor_name = "ov5630_motor",
++ .motor_addr = (0x18 >> 1),
++ },
++ {
++ .type = MRST_CAMERA_RAW,
++ .name = "s5k4e1",
++ .sensor_addr = 0x36,
++ .motor_name = "s5k4e1_motor",
++ .motor_addr = (0x18 >> 1),
++ },
++};
++
++#define N_CAMERA (ARRAY_SIZE(mrst_camera_table))
++
++struct videobuf_dma_contig_memory {
++ u32 magic;
++ void *vaddr;
++ dma_addr_t dma_handle;
++ unsigned long size;
++ int is_userptr;
++};
++#define MAGIC_DC_MEM 0x0733ac61
++#define MAGIC_CHECK(is, should) \
++ if (unlikely((is) != (should))) { \
++ pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
++ BUG(); \
++ }
++/* flag to determine whether to do the handler of mblk_line irq */
++int mrst_isp_to_do_mblk_line;
++unsigned char *mrst_isp_regs;
++
++static inline struct ci_sensor_config *to_sensor_config(struct v4l2_subdev *sd)
++{
++ return container_of(sd, struct ci_sensor_config, sd);
++}
++
++/* g45-th20-b5 gamma out curve with enhanced black level */
++static struct ci_isp_gamma_out_curve g45_th20_b5 = {
++ {
++ 0x0000, 0x0014, 0x003C, 0x0064,
++ 0x00A0, 0x0118, 0x0171, 0x01A7,
++ 0x01D8, 0x0230, 0x027A, 0x02BB,
++ 0x0323, 0x0371, 0x03AD, 0x03DB,
++ 0x03FF}
++ ,
++ 0
++};
++
++static void print_snr_cfg(struct ci_sensor_config *cfg)
++{
++ dprintk(2, "bus width = %x", cfg->bus_width);
++ dprintk(2, "mode = %x", cfg->mode);
++ dprintk(2, "field_inv = %x", cfg->field_inv);
++ dprintk(2, "field_sel = %x", cfg->field_sel);
++ dprintk(2, "ycseq = %x", cfg->ycseq);
++ dprintk(2, "conv422 = %x", cfg->conv422);
++ dprintk(2, "bpat = %x", cfg->bpat);
++ dprintk(2, "hpol = %x", cfg->hpol);
++ dprintk(2, "vpol = %x", cfg->vpol);
++ dprintk(2, "edge = %x", cfg->edge);
++ dprintk(2, "bls = %x", cfg->bls);
++ dprintk(2, "gamma = %x", cfg->gamma);
++ dprintk(2, "cconv = %x", cfg->cconv);
++ dprintk(2, "res = %x", cfg->res);
++ dprintk(2, "blc = %x", cfg->blc);
++ dprintk(2, "agc = %x", cfg->agc);
++ dprintk(2, "awb = %x", cfg->awb);
++ dprintk(2, "aec = %x", cfg->aec);
++ dprintk(2, "cie_profile = %x", cfg->cie_profile);
++ dprintk(2, "flicker_freq = %x", cfg->flicker_freq);
++ dprintk(2, "smia_mode = %x", cfg->smia_mode);
++ dprintk(2, "mipi_mode = %x", cfg->mipi_mode);
++ dprintk(2, "type = %x", cfg->type);
++ dprintk(2, "name = %s", cfg->name);
++}
++
++static int mrst_isp_defcfg_all_load(struct ci_isp_config *isp_config)
++{
++
++ DBG_entering;
++
++ /* demosaic mode */
++ isp_config->demosaic_mode = CI_ISP_DEMOSAIC_ENHANCED;
++
++ /* bpc */
++ isp_config->bpc_cfg.bp_corr_type = CI_ISP_BP_CORR_DIRECT;
++ isp_config->bpc_cfg.bp_corr_rep = CI_ISP_BP_CORR_REP_NB;
++ isp_config->bpc_cfg.bp_corr_mode = CI_ISP_BP_CORR_HOT_DEAD_EN;
++ isp_config->bpc_cfg.bp_abs_hot_thres = 496;
++ isp_config->bpc_cfg.bp_abs_dead_thres = 20;
++ isp_config->bpc_cfg.bp_dev_hot_thres = 328;
++ isp_config->bpc_cfg.bp_dev_dead_thres = 328;
++ isp_config->bpd_cfg.bp_dead_thres = 1;
++
++ /* WB */
++ isp_config->wb_config.mrv_wb_mode = CI_ISP_AWB_AUTO;
++ isp_config->wb_config.mrv_wb_sub_mode = CI_ISP_AWB_AUTO_ON;
++ isp_config->wb_config.awb_pca_damping = 16;
++ isp_config->wb_config.awb_prior_exp_damping = 12;
++ isp_config->wb_config.awb_pca_push_damping = 16;
++ isp_config->wb_config.awb_prior_exp_push_damping = 12;
++ isp_config->wb_config.awb_auto_max_y = 254;
++ isp_config->wb_config.awb_push_max_y = 250;
++ isp_config->wb_config.awb_measure_max_y = 200;
++ isp_config->wb_config.awb_underexp_det = 10;
++ isp_config->wb_config.awb_push_underexp_det = 170;
++
++ /* CAC */
++ isp_config->cac_config.hsize = 2048;
++ isp_config->cac_config.vsize = 1536;
++ isp_config->cac_config.hcenter_offset = 0;
++ isp_config->cac_config.vcenter_offset = 0;
++ isp_config->cac_config.hclip_mode = 1;
++ isp_config->cac_config.vclip_mode = 2;
++ isp_config->cac_config.ablue = 24;
++ isp_config->cac_config.ared = 489;
++ isp_config->cac_config.bblue = 450;
++ isp_config->cac_config.bred = 53;
++ isp_config->cac_config.cblue = 40;
++ isp_config->cac_config.cred = 479;
++ isp_config->cac_config.aspect_ratio = 0.000000;
++
++ /* BLS */
++ isp_config->bls_cfg.enable_automatic = 0;
++ isp_config->bls_cfg.disable_h = 0;
++ isp_config->bls_cfg.disable_v = 0;
++ isp_config->bls_cfg.isp_bls_window1.enable_window = 0;
++ isp_config->bls_cfg.isp_bls_window1.start_h = 0;
++ isp_config->bls_cfg.isp_bls_window1.stop_h = 0;
++ isp_config->bls_cfg.isp_bls_window1.start_v = 0;
++ isp_config->bls_cfg.isp_bls_window1.stop_v = 0;
++ isp_config->bls_cfg.isp_bls_window2.enable_window = 0;
++ isp_config->bls_cfg.isp_bls_window2.start_h = 0;
++ isp_config->bls_cfg.isp_bls_window2.stop_h = 0;
++ isp_config->bls_cfg.isp_bls_window2.start_v = 0;
++ isp_config->bls_cfg.isp_bls_window2.stop_v = 0;
++ isp_config->bls_cfg.bls_samples = 5;
++ isp_config->bls_cfg.bls_subtraction.fixed_a = 0x100;
++ isp_config->bls_cfg.bls_subtraction.fixed_b = 0x100;
++ isp_config->bls_cfg.bls_subtraction.fixed_c = 0x100;
++ isp_config->bls_cfg.bls_subtraction.fixed_d = 0x100;
++
++ /* AF */
++ isp_config->af_cfg.wnd_pos_a.hoffs = 874;
++ isp_config->af_cfg.wnd_pos_a.voffs = 618;
++ isp_config->af_cfg.wnd_pos_a.hsize = 300;
++ isp_config->af_cfg.wnd_pos_a.vsize = 300;
++ isp_config->af_cfg.wnd_pos_b.hoffs = 0;
++ isp_config->af_cfg.wnd_pos_b.voffs = 0;
++ isp_config->af_cfg.wnd_pos_b.hsize = 0;
++ isp_config->af_cfg.wnd_pos_b.vsize = 0;
++ isp_config->af_cfg.wnd_pos_c.hoffs = 0;
++ isp_config->af_cfg.wnd_pos_c.voffs = 0;
++ isp_config->af_cfg.wnd_pos_c.hsize = 0;
++ isp_config->af_cfg.wnd_pos_c.vsize = 0;
++ isp_config->af_cfg.threshold = 0x00000000;
++
++ /* color */
++ isp_config->color.contrast = 128;
++ isp_config->color.brightness = 0;
++ isp_config->color.saturation = 128;
++ isp_config->color.hue = 0;
++
++ /* Img Effect */
++ isp_config->img_eff_cfg.mode = CI_ISP_IE_MODE_OFF;
++ isp_config->img_eff_cfg.color_sel = 4;
++ isp_config->img_eff_cfg.color_thres = 128;
++ isp_config->img_eff_cfg.tint_cb = 108;
++ isp_config->img_eff_cfg.tint_cr = 141;
++ isp_config->img_eff_cfg.mat_emboss.coeff_11 = 2;
++ isp_config->img_eff_cfg.mat_emboss.coeff_12 = 1;
++ isp_config->img_eff_cfg.mat_emboss.coeff_13 = 0;
++ isp_config->img_eff_cfg.mat_emboss.coeff_21 = 1;
++ isp_config->img_eff_cfg.mat_emboss.coeff_22 = 0;
++ isp_config->img_eff_cfg.mat_emboss.coeff_23 = -1;
++ isp_config->img_eff_cfg.mat_emboss.coeff_31 = 0;
++ isp_config->img_eff_cfg.mat_emboss.coeff_32 = -1;
++ isp_config->img_eff_cfg.mat_emboss.coeff_33 = -2;
++ isp_config->img_eff_cfg.mat_sketch.coeff_11 = -1;
++ isp_config->img_eff_cfg.mat_sketch.coeff_12 = -1;
++ isp_config->img_eff_cfg.mat_sketch.coeff_13 = -1;
++ isp_config->img_eff_cfg.mat_sketch.coeff_21 = -1;
++ isp_config->img_eff_cfg.mat_sketch.coeff_22 = 8;
++ isp_config->img_eff_cfg.mat_sketch.coeff_23 = -1;
++ isp_config->img_eff_cfg.mat_sketch.coeff_31 = -1;
++ isp_config->img_eff_cfg.mat_sketch.coeff_32 = -1;
++ isp_config->img_eff_cfg.mat_sketch.coeff_33 = -1;
++
++ /* Framefun */
++ isp_config->flags.bls = 0;
++ isp_config->flags.lsc = 0;
++ isp_config->flags.bpc = 0;
++ isp_config->flags.awb = 0;
++ isp_config->flags.aec = 0;
++ isp_config->flags.af = 0;
++ isp_config->flags.cp = 0;
++ isp_config->flags.gamma = 0;
++ isp_config->flags.cconv = 0;
++ isp_config->flags.demosaic = 0;
++ isp_config->flags.gamma2 = 0;
++ isp_config->flags.isp_filters = 0;
++ isp_config->flags.cac = 0;
++ isp_config->flags.cconv_basic = 0;
++ isp_config->demosaic_th = 4;
++
++ isp_config->view_finder.flags = VFFLAG_HWRGB;
++
++ isp_config->afm_mode = 1;
++ isp_config->filter_level_noise_reduc = 4;
++ isp_config->filter_level_sharp = 4;
++
++ isp_config->jpeg_enc_ratio = 1;
++
++ DBG_leaving;
++ return 0;
++}
++
++static void mrst_isp_update_marvinvfaddr(struct mrst_isp_device *isp,
++ u32 buffer_base,
++ enum ci_isp_conf_update_time update_time)
++{
++ struct ci_isp_mi_path_conf isp_mi_path_conf;
++ struct ci_isp_mi_path_conf isp_sf_mi_path_conf;
++ static struct v4l2_jpg_review_buffer *jpg_review;
++ u32 bufsize = 0;
++ u32 w;
++ u32 h;
++
++ jpg_review = &isp->sys_conf.jpg_review;
++ memset(&isp_mi_path_conf, 0, sizeof(struct ci_isp_mi_path_conf));
++ memset(&isp_sf_mi_path_conf, 0, sizeof(struct ci_isp_mi_path_conf));
++
++ w = isp_mi_path_conf.llength = isp->bufwidth;
++ h = isp_mi_path_conf.ypic_height = isp->bufheight;
++ isp_mi_path_conf.ypic_width = isp->bufwidth;
++
++ /*XXX Zheng: disable jpg review for MIPI sensor */
++ /*if ((isp->sys_conf.isi_config)->mipi_mode == SENSOR_MIPI_MODE_RAW_10)
++ isp->sys_conf.jpg_review_enable = 0;
++ */
++
++ if (isp->sys_conf.jpg_review_enable) {
++
++ /* for self path, JPEG review */
++ isp_sf_mi_path_conf.ypic_width = jpg_review->width;
++ isp_sf_mi_path_conf.llength = jpg_review->width;
++ isp_sf_mi_path_conf.ypic_height = jpg_review->height;
++
++ bufsize = jpg_review->width * jpg_review->height;
++
++ /* buffer size in bytes */
++ if (jpg_review->pix_fmt == V4L2_PIX_FMT_YUV420
++ || jpg_review->pix_fmt == V4L2_PIX_FMT_YVU420) {
++
++ dprintk(3, "VF yuv420 fmt");
++ isp_sf_mi_path_conf.ybuffer.size = bufsize;
++ isp_sf_mi_path_conf.cb_buffer.size = bufsize/4;
++ isp_sf_mi_path_conf.cr_buffer.size = bufsize/4;
++
++ } else if (jpg_review->pix_fmt == V4L2_PIX_FMT_YUV422P) {
++
++ dprintk(3, "VF yuv422 fmt");
++ isp_sf_mi_path_conf.ybuffer.size = bufsize;
++ isp_sf_mi_path_conf.cb_buffer.size = bufsize/2;
++ isp_sf_mi_path_conf.cr_buffer.size = bufsize/2;
++
++ } else if (jpg_review->pix_fmt == V4L2_PIX_FMT_NV12) {
++
++ dprintk(3, "VF nv12 fmt");
++ isp_sf_mi_path_conf.ybuffer.size = bufsize;
++ isp_sf_mi_path_conf.cb_buffer.size = bufsize/2;
++ isp_sf_mi_path_conf.cr_buffer.size = 0;
++
++ } else {
++ printk(KERN_ERR "mrstisp: no support jpg review fmt\n");
++ }
++
++ /* buffer address */
++ if (isp_sf_mi_path_conf.ybuffer.size != 0) {
++ isp_sf_mi_path_conf.ybuffer.pucbuffer =
++ (u8 *)(unsigned long)
++ isp->mb1 + isp->mb1_size - 640*480*2;
++ }
++
++ if (isp_sf_mi_path_conf.cb_buffer.size != 0) {
++ isp_sf_mi_path_conf.cb_buffer.pucbuffer =
++ isp_sf_mi_path_conf.ybuffer.pucbuffer +
++ isp_sf_mi_path_conf.ybuffer.size;
++ }
++
++ if (isp_sf_mi_path_conf.cr_buffer.size != 0) {
++ isp_sf_mi_path_conf.cr_buffer.pucbuffer =
++ isp_sf_mi_path_conf.cb_buffer.pucbuffer +
++ isp_sf_mi_path_conf.cb_buffer.size;
++ }
++
++ if (jpg_review->pix_fmt == V4L2_PIX_FMT_YVU420) {
++ isp_sf_mi_path_conf.cr_buffer.pucbuffer =
++ isp_sf_mi_path_conf.ybuffer.pucbuffer +
++ isp_sf_mi_path_conf.ybuffer.size;
++ isp_sf_mi_path_conf.cb_buffer.pucbuffer =
++ isp_sf_mi_path_conf.cr_buffer.pucbuffer +
++ isp_sf_mi_path_conf.cr_buffer.size;
++ }
++
++ }
++
++ if (isp->pixelformat == V4L2_PIX_FMT_YUV420 ||
++ isp->pixelformat == V4L2_PIX_FMT_YVU420 ||
++ isp->pixelformat == V4L2_PIX_FMT_YUV422P ||
++ isp->pixelformat == V4L2_PIX_FMT_NV12) {
++ bufsize = w*h;
++ } else
++ bufsize = isp->frame_size;
++
++ /* buffer size in bytes */
++ if (isp->pixelformat == V4L2_PIX_FMT_YUV420
++ || isp->pixelformat == V4L2_PIX_FMT_YVU420) {
++
++ dprintk(3, "yuv420 fmt");
++ isp_mi_path_conf.ybuffer.size = bufsize;
++ isp_mi_path_conf.cb_buffer.size = bufsize/4;
++ isp_mi_path_conf.cr_buffer.size = bufsize/4;
++ } else if (isp->pixelformat == V4L2_PIX_FMT_YUV422P) {
++
++ dprintk(3, "yuv422 fmt");
++ isp_mi_path_conf.ybuffer.size = bufsize;
++ isp_mi_path_conf.cb_buffer.size = bufsize/2;
++ isp_mi_path_conf.cr_buffer.size = bufsize/2;
++ } else if (isp->pixelformat == V4L2_PIX_FMT_NV12) {
++
++ dprintk(3, "nv12 fmt");
++ isp_mi_path_conf.ybuffer.size = bufsize;
++ isp_mi_path_conf.cb_buffer.size = bufsize/2;
++ isp_mi_path_conf.cr_buffer.size = 0;
++ } else {
++
++ dprintk(3, "jpeg and rgb fmt");
++ isp_mi_path_conf.ybuffer.size = bufsize;
++ isp_mi_path_conf.cb_buffer.size = 0;
++ isp_mi_path_conf.cr_buffer.size = 0;
++ }
++
++ /* buffer address */
++ if (isp_mi_path_conf.ybuffer.size != 0) {
++ isp_mi_path_conf.ybuffer.pucbuffer =
++ (u8 *)(unsigned long) buffer_base;
++ }
++
++ if (isp_mi_path_conf.cb_buffer.size != 0) {
++ isp_mi_path_conf.cb_buffer.pucbuffer =
++ isp_mi_path_conf.ybuffer.pucbuffer +
++ isp_mi_path_conf.ybuffer.size;
++ }
++
++ if (isp_mi_path_conf.cr_buffer.size != 0) {
++ isp_mi_path_conf.cr_buffer.pucbuffer =
++ isp_mi_path_conf.cb_buffer.pucbuffer +
++ isp_mi_path_conf.cb_buffer.size;
++ }
++
++ if (isp->pixelformat == V4L2_PIX_FMT_YVU420) {
++ isp_mi_path_conf.cr_buffer.pucbuffer =
++ isp_mi_path_conf.ybuffer.pucbuffer +
++ isp_mi_path_conf.ybuffer.size;
++ isp_mi_path_conf.cb_buffer.pucbuffer =
++ isp_mi_path_conf.cr_buffer.pucbuffer +
++ isp_mi_path_conf.cr_buffer.size;
++ }
++
++ if (isp->sys_conf.isp_cfg.view_finder.flags & VFFLAG_USE_MAINPATH) {
++ ci_isp_mif_set_main_buffer(&isp_mi_path_conf, update_time);
++ if (isp->pixelformat == V4L2_PIX_FMT_JPEG)
++ if (isp->sys_conf.jpg_review_enable)
++ ci_isp_mif_set_self_buffer(
++ &isp_sf_mi_path_conf, update_time);
++ } else {
++ ci_isp_mif_set_self_buffer(&isp_mi_path_conf, update_time);
++ }
++}
++
++static int mrst_isp_setup_viewfinder_path(struct mrst_isp_device *isp,
++ struct ci_sensor_config *isi_config,
++ int zoom)
++{
++ int error = CI_STATUS_SUCCESS;
++ struct ci_isp_datapath_desc dp_main;
++ struct ci_isp_datapath_desc dp_self;
++ struct ci_isp_rect self_rect;
++ u16 isi_hsize;
++ u16 isi_vsize;
++ int jpe_scale;
++ struct ci_pl_system_config *sys_conf = &isp->sys_conf;
++ struct ci_isp_config *config = &sys_conf->isp_cfg;
++ struct v4l2_jpg_review_buffer *jpg_review = &sys_conf->jpg_review;
++ u32 dp_mode;
++
++ DBG_entering;
++
++ if (sys_conf->isp_cfg.flags.ycbcr_full_range)
++ jpe_scale = false;
++ else
++ jpe_scale = true;
++
++ memset(&dp_main, 0, sizeof(struct ci_isp_datapath_desc));
++ memset(&dp_self, 0, sizeof(struct ci_isp_datapath_desc));
++
++ self_rect.x = 0;
++ self_rect.y = 0;
++ self_rect.w = isp->bufwidth; /* 640 */
++ self_rect.h = isp->bufheight; /* 480 */
++
++ if (isp->pixelformat == V4L2_PIX_FMT_JPEG) {
++
++ dprintk(1, "jpeg fmt");
++
++ dp_main.flags = CI_ISP_DPD_ENABLE | CI_ISP_DPD_MODE_ISPJPEG;
++ config->view_finder.flags |= VFFLAG_USE_MAINPATH;
++
++ dp_main.out_w = (u16) isp->bufwidth;
++ dp_main.out_h = (u16) isp->bufheight;
++
++ if (isp->sys_conf.jpg_review_enable) {
++
++ dprintk(1, "jpg_review enabled in VF");
++
++ self_rect.w = jpg_review->width;
++ self_rect.h = jpg_review->height;
++
++ dp_self.flags = (CI_ISP_DPD_ENABLE
++ | CI_ISP_DPD_MODE_ISPYC);
++ if (jpg_review->pix_fmt == V4L2_PIX_FMT_YUV420 ||
++ jpg_review->pix_fmt == V4L2_PIX_FMT_YVU420)
++ dp_self.flags |= CI_ISP_DPD_YUV_420
++ | CI_ISP_DPD_CSS_V2;
++ else if (jpg_review->pix_fmt == V4L2_PIX_FMT_YUV422P)
++ dp_self.flags |= CI_ISP_DPD_YUV_422;
++ else if (jpg_review->pix_fmt == V4L2_PIX_FMT_NV12)
++ dp_self.flags |= CI_ISP_DPD_YUV_NV12
++ | CI_ISP_DPD_CSS_V2;
++ else if (jpg_review->pix_fmt == V4L2_PIX_FMT_YUYV)
++ dp_self.flags |= CI_ISP_DPD_YUV_YUYV;
++
++ dprintk(1, "dp_self.flags is 0x%x", dp_self.flags);
++ }
++
++ } else if (isp->pixelformat == INTEL_PIX_FMT_RAW08) {
++
++ dp_main.flags = CI_ISP_DPD_ENABLE | CI_ISP_DPD_MODE_ISPRAW;
++ config->view_finder.flags |= VFFLAG_USE_MAINPATH;
++
++ /*just take the output of the sensor without any resizing*/
++ dp_main.flags |= CI_ISP_DPD_NORESIZE;
++ (void)ci_sensor_res2size(isi_config->res,
++ &(dp_main.out_w), &(dp_main.out_h));
++
++ dprintk(1, "RAW08 dp_main.flags is 0x%x", dp_main.flags);
++
++ } else if (isp->pixelformat == INTEL_PIX_FMT_RAW10
++ || isp->pixelformat == INTEL_PIX_FMT_RAW12) {
++
++ dp_main.flags = (CI_ISP_DPD_ENABLE
++ | CI_ISP_DPD_MODE_ISPRAW_16B);
++ config->view_finder.flags |= VFFLAG_USE_MAINPATH;
++
++ /*just take the output of the sensor without any resizing*/
++ dp_main.flags |= CI_ISP_DPD_NORESIZE;
++ (void)ci_sensor_res2size(isi_config->res,
++ &(dp_main.out_w), &(dp_main.out_h));
++
++ dprintk(1, "RAW10 dp_main.flags is 0x%x", dp_main.flags);
++
++ } /*else if (isp->bufwidth >= 640 && isp->bufheight >= 480) {*/
++ else if (isp->bufwidth >= 32 && isp->bufheight >= 16) {
++
++ dp_main.flags = (CI_ISP_DPD_ENABLE | CI_ISP_DPD_MODE_ISPYC);
++ dp_main.out_w = (u16) isp->bufwidth;
++ dp_main.out_h = (u16) isp->bufheight;
++ config->view_finder.flags |= VFFLAG_USE_MAINPATH;
++
++ if (isp->pixelformat == V4L2_PIX_FMT_YUV420 ||
++ isp->pixelformat == V4L2_PIX_FMT_YVU420)
++ dp_main.flags |= CI_ISP_DPD_YUV_420 | CI_ISP_DPD_CSS_V2;
++ else if (isp->pixelformat == V4L2_PIX_FMT_YUV422P)
++ dp_main.flags |= CI_ISP_DPD_YUV_422;
++ else if (isp->pixelformat == V4L2_PIX_FMT_NV12) {
++ /* to use crop set crop_flag first */
++ dp_main.flags |= CI_ISP_DPD_YUV_NV12;
++ if (!crop_flag)
++ dp_main.flags |= CI_ISP_DPD_CSS_V2;
++ } else if (isp->pixelformat == V4L2_PIX_FMT_YUYV)
++ dp_main.flags |= CI_ISP_DPD_YUV_YUYV;
++
++ dprintk(1, "YUV dp_main.flags is 0x%x", dp_main.flags);
++
++ } /* else if (isp->bufwidth <= 640 && isp->bufheight <= 480) {
++
++ dp_self.flags = (CI_ISP_DPD_ENABLE | CI_ISP_DPD_MODE_ISPYC);
++
++ if (isp->pixelformat == V4L2_PIX_FMT_YUV420 ||
++ isp->pixelformat == V4L2_PIX_FMT_YVU420)
++ dp_self.flags |= CI_ISP_DPD_YUV_420 | CI_ISP_DPD_CSS_V2;
++ else if (isp->pixelformat == V4L2_PIX_FMT_YUV422P)
++ dp_self.flags |= CI_ISP_DPD_YUV_422;
++ else if (isp->pixelformat == V4L2_PIX_FMT_NV12)
++ dp_self.flags |= CI_ISP_DPD_YUV_NV12
++ | CI_ISP_DPD_CSS_V2;
++ else if (isp->pixelformat == V4L2_PIX_FMT_YUYV)
++ dp_self.flags |= CI_ISP_DPD_YUV_YUYV;
++ else if (isp->pixelformat == V4L2_PIX_FMT_RGB565)
++ dp_self.flags |= CI_ISP_DPD_HWRGB_565;
++ else if (isp->pixelformat == V4L2_PIX_FMT_BGR32)
++ dp_self.flags |= CI_ISP_DPD_HWRGB_888;
++
++ dprintk(1, "YUV dp_self.flags is 0x%x", dp_self.flags);
++ }
++ */
++
++ dprintk(1, "sensor_res = %x", isi_config->res);
++
++ (void)ci_sensor_res2size(isi_config->res, &isi_hsize, &isi_vsize);
++ dprintk(1, "self path: w:%d, h:%d; sensor: w:%d, h:%d",
++ self_rect.w, self_rect.h, isi_hsize, isi_vsize);
++ dprintk(1, "main path: out_w:%d, out_h:%d ",
++ dp_main.out_w, dp_main.out_h);
++
++ /* no stretching/squeezing */
++ if (dp_self.flags && CI_ISP_DPD_ENABLE)
++ dp_self.flags |= CI_ISP_DPD_KEEPRATIO;
++ else
++ dp_main.flags |= CI_ISP_DPD_KEEPRATIO;
++
++ /* prepare datapath, 640x480, can changed to the bufsize */
++ dp_self.out_w = (u16) self_rect.w;
++ dp_self.out_h = (u16) self_rect.h;
++
++ if (sys_conf->isp_cfg.view_finder.flags & VFFLAG_HWRGB) {
++ /* YCbCr to RGB conversion in hardware */
++ if (isp->pixelformat == V4L2_PIX_FMT_RGB565)
++ dp_self.flags |= CI_ISP_DPD_HWRGB_565;
++ if (isp->pixelformat == V4L2_PIX_FMT_BGR32)
++ dp_self.flags |= CI_ISP_DPD_HWRGB_888;
++ }
++
++ if (sys_conf->isp_cfg.view_finder.flags & VFFLAG_MIRROR)
++ dp_self.flags |= CI_ISP_DPD_H_FLIP;
++
++
++ if (sys_conf->isp_cfg.view_finder.flags & VFFLAG_V_FLIP)
++ dp_self.flags |= CI_ISP_DPD_V_FLIP;
++
++
++ if (sys_conf->isp_cfg.view_finder.flags & VFFLAG_ROT90_CCW)
++ dp_self.flags |= CI_ISP_DPD_90DEG_CCW;
++
++ /* setup self & main path with zoom */
++ if (zoom < 0)
++ zoom = sys_conf->isp_cfg.view_finder.zoom;
++
++ if (sys_conf->isp_cfg.view_finder.flags & VFFLAG_USE_MAINPATH) {
++ /* For RAW snapshots, we have to bypass the ISP too */
++ dp_mode = dp_main.flags & CI_ISP_DPD_MODE_MASK;
++ if ((dp_mode == CI_ISP_DPD_MODE_ISPRAW) ||
++ (dp_mode == CI_ISP_DPD_MODE_ISPRAW_16B)) {
++ struct ci_sensor_config isi_conf;
++ /* isi_conf = *sys_conf->isi_config; */
++ isi_conf = *isi_config;
++ isi_conf.mode = SENSOR_MODE_PICT;
++ error = ci_isp_set_input_aquisition(&isi_conf);
++ if (error != CI_STATUS_SUCCESS)
++ eprintk("33");
++ }
++ }
++ /* to use crop mode, set crop_flag */
++ if (crop_flag)
++ dp_main.flags |= CI_ISP_DPD_NORESIZE;
++
++ error = ci_datapath_isp(sys_conf, isi_config, &dp_main, &dp_self, zoom);
++ if (error != CI_STATUS_SUCCESS) {
++ printk(KERN_ERR "mrstisp: failed to setup marvins datapath\n");
++ return error;
++ }
++
++ DBG_leaving;
++ return error;
++}
++
++static int mrst_isp_init_mrv_image_effects(struct ci_pl_system_config *sys_conf,
++ int enable)
++{
++ int res;
++
++ DBG_entering;
++
++ if (enable && sys_conf->isp_cfg.img_eff_cfg.mode
++ != CI_ISP_IE_MODE_OFF) {
++ res = ci_isp_ie_set_config(&(sys_conf->isp_cfg.img_eff_cfg));
++ if (res != CI_STATUS_SUCCESS)
++ printk(KERN_ERR "mrstisp: error setting ie config\n");
++ } else {
++ (void)ci_isp_ie_set_config(NULL);
++ res = CI_STATUS_SUCCESS;
++ }
++
++ DBG_leaving;
++ return res;
++}
++
++static int mrst_isp_init_mrvisp_lensshade(struct ci_pl_system_config *sys_conf,
++ int enable)
++{
++ if (enable) {
++ ci_isp_set_ls_correction(&sys_conf->isp_cfg.lsc_cfg);
++ ci_isp_ls_correction_on_off(1);
++ } else {
++ ci_isp_ls_correction_on_off(0);
++ }
++ return CI_STATUS_SUCCESS;
++}
++
++static int mrst_isp_init_mrvisp_badpixel(const struct ci_pl_system_config
++ *sys_conf, int enable)
++{
++ if ((enable) && (sys_conf->isp_cfg.flags.bpc)) {
++ (void)ci_bp_init(&sys_conf->isp_cfg.bpc_cfg,
++ &sys_conf->isp_cfg.bpd_cfg);
++ } else {
++ (void)ci_bp_end(&sys_conf->isp_cfg.bpc_cfg);
++ (void)ci_isp_set_bp_correction(NULL);
++ (void)ci_isp_set_bp_detection(NULL);
++ }
++ return CI_STATUS_SUCCESS;
++}
++
++static int mrst_isp_init_mrv_ispfilter(const struct ci_pl_system_config
++ *sys_conf, int enable)
++{
++ int res;
++
++ DBG_entering;
++
++ if ((enable) && (sys_conf->isp_cfg.flags.isp_filters)) {
++ ci_isp_activate_filter(true);
++ res = ci_isp_set_filter_params(sys_conf->isp_cfg.
++ filter_level_noise_reduc,
++ sys_conf->isp_cfg.
++ filter_level_sharp);
++ if (res != CI_STATUS_SUCCESS)
++ printk(KERN_ERR "mrstisp: error set filter param\n");
++ } else {
++ ci_isp_activate_filter(false);
++ res = CI_STATUS_SUCCESS;
++ }
++
++ DBG_leaving;
++ return res;
++}
++
++static int mrst_isp_init_mrvisp_cac(const struct ci_pl_system_config *sys_conf,
++ int enable)
++{
++ return 0;
++}
++
++static int mrst_isp_initbls(const struct ci_pl_system_config *sys_conf)
++{
++ struct ci_isp_bls_config *bls_config =
++ (struct ci_isp_bls_config *)&sys_conf->isp_cfg.bls_cfg;
++ return ci_isp_bls_set_config(bls_config);
++}
++
++static int mrst_isp_dp_init(struct ci_pl_system_config *sys_conf,
++ struct ci_sensor_config *isi_config)
++{
++ int error;
++ u8 words_per_pixel;
++
++ DBG_entering;
++
++ /* base initialisation of Marvin */
++ ci_isp_init();
++
++ /* setup input acquisition according to image sensor settings */
++ print_snr_cfg(isi_config);
++ error = ci_isp_set_input_aquisition(isi_config);
++ if (error) {
++ printk(KERN_ERR "mrstisp: error setting input acquisition\n");
++ return error;
++ }
++
++ /* setup functional blocks for Bayer pattern processing */
++ if (ci_isp_select_path(isi_config, &words_per_pixel)
++ == CI_ISP_PATH_BAYER) {
++
++ /* black level */
++ if (sys_conf->isp_cfg.flags.bls) {
++ error = mrst_isp_initbls(sys_conf);
++ if (error != CI_STATUS_SUCCESS) {
++ printk(KERN_ERR "mrstisp: error set bls\n");
++ return error;
++ }
++ } else {
++ ci_isp_bls_set_config(NULL);
++ }
++
++ /* gamma */
++ if (sys_conf->isp_cfg.flags.gamma2) {
++ dprintk(1, "setting gamma 2 ");
++ ci_isp_set_gamma2(&g45_th20_b5);
++ } else {
++ dprintk(1, "no setting gamma 2 ");
++ ci_isp_set_gamma2(NULL);
++ }
++
++ /* demosaic */
++ ci_isp_set_demosaic(sys_conf->isp_cfg.demosaic_mode,
++ sys_conf->isp_cfg.demosaic_th);
++
++ /* color convertion */
++ if (sys_conf->isp_cfg.flags.cconv) {
++ if (!sys_conf->isp_cfg.flags.cconv_basic) {
++ mrst_isp_set_color_conversion_ex();
++ /* set color converstion skipped by xiaolin,
++ * to be done in libci */
++ if (error != CI_STATUS_SUCCESS) {
++ printk(KERN_ERR "mrstisp: error set"
++ " color conversion\n");
++ return error;
++ }
++ }
++ }
++
++ /* af setting */
++ if (sys_conf->isp_cfg.flags.af)
++ ci_isp_set_auto_focus(&sys_conf->isp_cfg.af_cfg);
++ else
++ ci_isp_set_auto_focus(NULL);
++
++ /* filter */
++ mrst_isp_init_mrv_ispfilter(sys_conf, true);
++
++ /* cac */
++ mrst_isp_init_mrvisp_cac(sys_conf, true);
++ }
++
++ /*
++ * disable color processing for now (will be set under user control
++ * in the main loop)
++ */
++ ci_isp_col_set_color_processing(NULL);
++
++ /* configure image effects */
++ mrst_isp_init_mrv_image_effects(sys_conf, true);
++
++ /* configure lens shading correction */
++ if (strcmp(isi_config->name, "s5k4e1") == 0
++ && (isi_config->res == SENSOR_RES_720P
++ || isi_config->res == SENSOR_RES_QXGA_PLUS)) {
++ dprintk(1, "enabling lsc for kmot 720p and qsxga\n");
++ mrst_isp_init_mrvisp_lensshade(sys_conf, true);
++ } else
++ mrst_isp_init_mrvisp_lensshade(sys_conf,
++ sys_conf->isp_cfg.flags.lsc);
++
++ /* configure bad pixel detection/correction */
++ mrst_isp_init_mrvisp_badpixel(sys_conf, true);
++
++ DBG_leaving;
++ return CI_STATUS_SUCCESS;
++}
++
++int ci_jpe_encode(struct mrst_isp_device *intel,
++ enum ci_isp_conf_update_time update_time,
++ enum ci_isp_jpe_enc_mode mrv_jpe_encMode)
++{
++ u32 mipi_data_id = 1;
++ struct isp_register *mrv_reg =
++ (struct isp_register *) MEM_MRV_REG_BASE;
++
++ ci_isp_jpe_prep_enc(mrv_jpe_encMode);
++
++ if (to_sensor_config(intel->sensor_curr)->mipi_mode) {
++ ci_isp_start(1, update_time);
++ v4l2_subdev_call(intel->sensor_curr, video, s_stream, 1);
++ if (mipi_flag)
++ while (mipi_data_id)
++ mipi_data_id =
++ REG_READ_EX(mrv_reg->mipi_cur_data_id);
++ mipi_flag = 0;
++
++ } else
++ ci_isp_start(1, update_time);
++
++ return ci_isp_jpe_wait_for_encode_done(intel);
++}
++
++/* capture one frame */
++u32 ci_jpe_capture(struct mrst_isp_device *isp,
++ enum ci_isp_conf_update_time update_time)
++{
++ int retval = CI_STATUS_SUCCESS;
++
++ /* generate header */
++ retval = ci_isp_jpe_generate_header(isp, MRV_JPE_HEADER_MODE_JFIF);
++ if (retval != CI_STATUS_SUCCESS)
++ return 0;
++
++ /* now encode JPEG */
++ retval = ci_jpe_encode(isp, update_time, CI_ISP_JPE_SINGLE_SHOT);
++ if (retval != CI_STATUS_SUCCESS)
++ return 0;
++
++ /* return ci_isp_mif_get_byte_cnt(); */
++ return 0;
++}
++
++static int mrst_ci_capture(struct mrst_isp_device *isp)
++{
++ u32 bufbase;
++ u32 mipi_data_id = 1;
++ struct videobuf_buffer *vb;
++ struct isp_register *mrv_reg =
++ (struct isp_register *) MEM_MRV_REG_BASE;
++
++ bufbase = videobuf_to_dma_contig(isp->active);
++ mrst_isp_update_marvinvfaddr(isp, bufbase, CI_ISP_CFG_UPDATE_IMMEDIATE);
++ ci_isp_mif_reset_offsets(CI_ISP_CFG_UPDATE_IMMEDIATE);
++
++ ci_isp_reset_interrupt_status();
++ mrst_isp_enable_interrupt(isp);
++
++ if (isp->pixelformat == V4L2_PIX_FMT_JPEG) {
++ mrst_isp_disable_interrupt(isp);
++ ci_isp_jpe_init_ex(isp->bufwidth, isp->bufheight,
++ isp->sys_conf.isp_cfg.jpeg_enc_ratio,
++ true);
++ ci_jpe_capture(isp, CI_ISP_CFG_UPDATE_FRAME_SYNC);
++
++ vb = isp->active;
++ vb->size = ci_isp_mif_get_byte_cnt();
++ vb->state = VIDEOBUF_DONE;
++ do_gettimeofday(&vb->ts);
++ vb->field_count++;
++ wake_up(&vb->done);
++ isp->active = NULL;
++
++ dprintk(2, "countcount = %lx", vb->size);
++ } else if (isp->pixelformat == INTEL_PIX_FMT_RAW08
++ || isp->pixelformat == INTEL_PIX_FMT_RAW10
++ || isp->pixelformat == INTEL_PIX_FMT_RAW12) {
++ mrst_isp_disable_interrupt(isp);
++ ci_isp_start(1, CI_ISP_CFG_UPDATE_FRAME_SYNC);
++ ci_isp_wait_for_frame_end(isp);
++
++ /* update captured frame status */
++ vb = isp->active;
++ /* vb->size = ci_isp_mif_get_byte_cnt(); */
++ vb->state = VIDEOBUF_DONE;
++ do_gettimeofday(&vb->ts);
++ vb->field_count++;
++ wake_up(&vb->done);
++ isp->active = NULL;
++ /* ci_isp_reg_dump_all(); */
++ dprintk(3, "captured index = %d", vb->i);
++ } else if (to_sensor_config(isp->sensor_curr)->mipi_mode) {
++ ci_isp_start(0, CI_ISP_CFG_UPDATE_IMMEDIATE);
++
++ if (mipi_flag) {
++ v4l2_subdev_call(isp->sensor_curr, video, s_stream, 1);
++
++ while (mipi_data_id) {
++ mipi_data_id =
++ REG_READ_EX(mrv_reg->mipi_cur_data_id);
++ dprintk(5, "mipi_cur_data_id = %x",
++ mipi_data_id);
++ }
++ mipi_flag = 0;
++ }
++ } else
++ ci_isp_start(0, CI_ISP_CFG_UPDATE_FRAME_SYNC);
++
++ return 0;
++}
++
++static int buffer_setup(struct videobuf_queue *vq, unsigned int *count,
++ unsigned int *size)
++{
++ struct mrst_isp_fh *fh = vq->priv_data;
++ struct mrst_isp_device *isp = fh->dev;
++
++ u32 w = isp->bufwidth;
++ u32 h = isp->bufheight;
++ u32 depth = isp->depth;
++ u32 fourcc = isp->pixelformat;
++
++ if (fourcc == V4L2_PIX_FMT_JPEG) {
++ *size = PAGE_ALIGN((isp->mb1_size
++ - 640*480*2)/(*count)) - PAGE_SIZE;
++ /* *size = PAGE_ALIGN(2 * 1024 * 1024); */
++ } else if (fourcc == INTEL_PIX_FMT_RAW08
++ || fourcc == INTEL_PIX_FMT_RAW10
++ || fourcc == INTEL_PIX_FMT_RAW12) {
++ *size = (w * h * depth)/8;
++ } else {
++ *size = (w * h * depth)/8;
++ }
++
++ isp->frame_size = *size;
++ isp->num_frames = *count;
++
++ if (0 == *count)
++ *count = 3;
++
++ while (*size * *count > isp->mb1_size)
++ (*count)--;
++
++ dprintk(1, "count=%d, size=%d", *count, *size);
++ return 0;
++}
++
++static void free_buffer(struct videobuf_queue *vq, struct mrst_isp_buffer *buf)
++{
++ struct videobuf_buffer *vb = &buf->vb;
++
++ dprintk(1, "(vb=0x%p) baddr = 0x%08lx bsize = %d", vb,
++ vb->baddr, vb->bsize);
++
++ videobuf_dma_contig_free(vq, vb);
++
++ buf->vb.state = VIDEOBUF_NEEDS_INIT;
++ dprintk(1, "free_buffer: freed");
++}
++
++static int buffer_prepare(struct videobuf_queue *vq,
++ struct videobuf_buffer *vb, enum v4l2_field field)
++{
++ struct mrst_isp_fh *fh = vq->priv_data;
++ struct mrst_isp_device *isp = fh->dev;
++ struct mrst_isp_buffer *buf = container_of(vb, struct mrst_isp_buffer,
++ vb);
++ int ret;
++
++ if (vb->width != isp->bufwidth || vb->height != isp->bufheight
++ || vb->field != field) {
++ /* buf->fmt = isp->pixelformat; */
++ vb->width = isp->bufwidth;
++ vb->height = isp->bufheight;
++ vb->field = field;
++ vb->state = VIDEOBUF_NEEDS_INIT;
++ }
++
++ vb->size = isp->frame_size;
++
++ if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
++ ret = videobuf_iolock(vq, vb, NULL);
++ if (ret)
++ goto fail;
++ vb->state = VIDEOBUF_PREPARED;
++ }
++
++ return 0;
++
++fail:
++ printk(KERN_ERR "mrstisp: error calling videobuf_iolock");
++ free_buffer(vq, buf);
++ return ret;
++}
++
++static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
++{
++ struct mrst_isp_fh *fh = vq->priv_data;
++ struct mrst_isp_device *isp = fh->dev;
++ u32 bufbase;
++
++ vb->state = VIDEOBUF_QUEUED;
++ dprintk(1, "buffer %d in buffer querue", vb->i);
++ if (isp->stopflag) {
++ list_add_tail(&vb->queue, &isp->capture);
++ if (isp->active) {
++ /* dprintk(1, "AAAAAAAAAA in flag condition"); */
++ /* isp->active->state = VIDEOBUF_ACTIVE; */
++ /* mrst_isp_to_do_mblk_line = 1; */
++ bufbase = videobuf_to_dma_contig(vb);
++ mrst_isp_update_marvinvfaddr(isp, bufbase, 0);
++ /* mrst_isp_enable_interrupt(isp); */
++ } else {
++ isp->active = vb;
++ mrst_isp_enable_interrupt(isp);
++ /*
++ dprintk(1, "xxxxxxxxx in flag condition");
++ isp->active->state = VIDEOBUF_ACTIVE;
++ mrst_isp_to_do_mblk_line = 1;
++ bufbase = videobuf_to_dma_contig(isp->active);
++ mrst_isp_update_marvinvfaddr(isp, bufbase,
++ CI_ISP_CFG_UPDATE_FRAME_SYNC);
++ */
++ }
++ isp->stopflag = 0;
++ } else if (!isp->active) {
++ dprintk(1, "no active queue");
++ isp->active = vb;
++ isp->active->state = VIDEOBUF_ACTIVE;
++ mrst_isp_to_do_mblk_line = 1;
++ mrst_ci_capture(isp);
++ } else {
++ dprintk(1, "capture to active queue");
++ list_add_tail(&vb->queue, &isp->capture);
++ }
++
++ return;
++}
++
++static void buffer_release(struct videobuf_queue *vq,
++ struct videobuf_buffer *vb)
++{
++ struct mrst_isp_buffer *buf = container_of(vb,
++ struct mrst_isp_buffer, vb);
++ DBG_entering;
++ free_buffer(vq, buf);
++ DBG_leaving;
++}
++
++static struct videobuf_queue_ops mrst_isp_videobuf_qops = {
++ .buf_setup = buffer_setup,
++ .buf_prepare = buffer_prepare,
++ .buf_queue = buffer_queue,
++ .buf_release = buffer_release,
++};
++
++static int mrst_isp_open(struct file *file)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(vdev);
++ struct mrst_isp_fh *fh = NULL;
++ struct v4l2_format sensor_format;
++ int ret;
++
++ DBG_entering;
++
++ if (!isp) {
++ printk(KERN_ERR "null in mrst_isp_open\n");
++ return -ENODEV;
++ }
++
++ dprintk(2, "open = %d", isp->open);
++ mutex_lock(&isp->mutex);
++ if (isp->open == 0) {
++ if (isp->sensor_soc) {
++ dprintk(0, "cur senfor soc");
++ isp->sensor_curr = isp->sensor_soc;
++ } else {
++ dprintk(0, "cur sensor raw");
++ isp->sensor_curr = isp->sensor_raw;
++ }
++ }
++ ++isp->open;
++
++ ret = v4l2_subdev_call(isp->sensor_curr, video, g_fmt,
++ &sensor_format);
++ if (ret) {
++ printk(KERN_ERR "can't get current pix from sensor!\n");
++ ret = -EINVAL;
++ goto exit_unlock;
++ }
++
++ dprintk(1, "current sensor format: %d x %d",
++ sensor_format.fmt.pix.width,
++ sensor_format.fmt.pix.height);
++
++ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
++ if (NULL == fh) {
++ printk(KERN_ERR "no mem for fh \n");
++ ret = -ENOMEM;
++ goto exit_unlock;
++ }
++
++ file->private_data = fh;
++ fh->dev = isp;
++
++ videobuf_queue_dma_contig_init(&fh->vb_q, &mrst_isp_videobuf_qops,
++ vdev->parent, &isp->lock,
++ V4L2_BUF_TYPE_VIDEO_CAPTURE,
++ V4L2_FIELD_NONE,
++ sizeof(struct mrst_isp_buffer), fh);
++
++exit_unlock:
++ mutex_unlock(&isp->mutex);
++ DBG_leaving;
++ return 0;
++}
++
++static int mrst_isp_close(struct file *file)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++ struct mrst_isp_fh *fh = file->private_data;
++ unsigned long flags;
++
++ DBG_entering;
++ mutex_lock(&isp->mutex);
++ --isp->open;
++ dprintk(2, "close = %d", isp->open);
++ if (isp->open == 0) {
++ if (isp->streaming == 1) {
++ videobuf_streamoff(&fh->vb_q);
++ isp->streaming = 0;
++ isp->buffer_required = 0;
++ isp->stopflag = 0;
++
++ spin_lock_irqsave(&isp->lock, flags);
++ INIT_LIST_HEAD(&isp->capture);
++ isp->active = NULL;
++ isp->next = NULL;
++ isp->sys_conf.isp_hal_enable = 0;
++ isp->sys_conf.jpg_review_enable = 0;
++ spin_unlock_irqrestore(&isp->lock, flags);
++
++ ci_isp_stop(CI_ISP_CFG_UPDATE_FRAME_SYNC);
++ v4l2_subdev_call(isp->sensor_curr, video, s_stream, 0);
++ isp->sensor_curr = NULL;
++ }
++ if (isp->sensor_soc)
++ v4l2_subdev_call(isp->sensor_soc, core, s_gpio, 1);
++ if (isp->sensor_raw)
++ v4l2_subdev_call(isp->sensor_raw, core, s_gpio, 1);
++ }
++
++ kfree(file->private_data);
++
++ mutex_unlock(&isp->mutex);
++
++ /*XXX zheng*/
++ if (isp->open == 0)
++ frame_cnt = 0;
++
++ DBG_leaving;
++ return 0;
++}
++
++static ssize_t mrst_isp_read(struct file *file, char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ return 0;
++}
++
++static void mrst_isp_videobuf_vm_open(struct vm_area_struct *vma)
++{
++ struct videobuf_mapping *map = vma->vm_private_data;
++
++ dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
++ map, map->count, vma->vm_start, vma->vm_end);
++
++ map->count++;
++}
++
++static void mrst_isp_videobuf_vm_close(struct vm_area_struct *vma)
++{
++ struct videobuf_mapping *map = vma->vm_private_data;
++ struct videobuf_queue *q = map->q;
++ int i;
++
++ dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
++ map, map->count, vma->vm_start, vma->vm_end);
++
++ map->count--;
++ if (0 == map->count) {
++ struct videobuf_dma_contig_memory *mem;
++
++ dprintk(2, "munmap %p q=%p\n", map, q);
++ mutex_lock(&q->vb_lock);
++
++ /* We need first to cancel streams, before unmapping */
++ if (q->streaming)
++ videobuf_queue_cancel(q);
++
++ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
++ if (NULL == q->bufs[i])
++ continue;
++
++ if (q->bufs[i]->map != map)
++ continue;
++
++ mem = q->bufs[i]->priv;
++ if (mem) {
++ /* This callback is called only if kernel has
++ allocated memory and this memory is mmapped.
++ In this case, memory should be freed,
++ in order to do memory unmap.
++ */
++
++ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
++
++ /* vfree is not atomic - can't be
++ called with IRQ's disabled
++ */
++ dprintk(2, "buf[%d] freeing %p\n",
++ i, mem->vaddr);
++
++ /*
++ dma_free_coherent(q->dev, mem->size,
++ mem->vaddr, mem->dma_handle);
++ */
++ mem->vaddr = NULL;
++ }
++
++ q->bufs[i]->map = NULL;
++ q->bufs[i]->baddr = 0;
++ }
++
++ kfree(map);
++
++ mutex_unlock(&q->vb_lock);
++ }
++}
++
++static struct vm_operations_struct mrst_isp_videobuf_vm_ops = {
++ .open = mrst_isp_videobuf_vm_open,
++ .close = mrst_isp_videobuf_vm_close,
++};
++
++static int mrst_isp_mmap_mapper(struct videobuf_queue *q,
++ struct vm_area_struct *vma)
++{
++ struct videobuf_dma_contig_memory *mem;
++ struct videobuf_mapping *map;
++ unsigned int first;
++ int retval;
++ unsigned long size, offset = vma->vm_pgoff << PAGE_SHIFT;
++
++ struct mrst_isp_fh *fh = q->priv_data;
++ struct mrst_isp_device *isp = fh->dev;
++
++ DBG_entering;
++
++ if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
++ return -EINVAL;
++
++ /* look for first buffer to map */
++ for (first = 0; first < VIDEO_MAX_FRAME; first++) {
++ if (!q->bufs[first])
++ continue;
++
++ if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
++ continue;
++ if (q->bufs[first]->boff == offset) {
++ dprintk(1, "buff id %d is mapped", first);
++ break;
++ }
++ }
++ if (VIDEO_MAX_FRAME == first) {
++ eprintk("invalid user space offset [offset=0x%lx]", offset);
++ return -EINVAL;
++ }
++
++ /* create mapping + update buffer list */
++ map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
++ if (!map)
++ return -ENOMEM;
++
++ q->bufs[first]->map = map;
++ map->start = vma->vm_start;
++ map->end = vma->vm_end;
++ map->q = q;
++
++ q->bufs[first]->baddr = vma->vm_start;
++
++ mem = q->bufs[first]->priv;
++ BUG_ON(!mem);
++ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
++
++ mem->size = PAGE_ALIGN(q->bufs[first]->bsize);
++ mem->dma_handle = isp->mb1 + (mem->size * first);
++ mem->vaddr = (void *)0x1;
++ /*
++ mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
++ &mem->dma_handle, GFP_KERNEL);
++ */
++ if (mem->size > isp->mb1_size) {
++ eprintk("to big size, can not be mmapped");
++ return -EINVAL;
++ }
++
++ /* Try to remap memory */
++
++ size = vma->vm_end - vma->vm_start;
++ size = (size < mem->size) ? size : mem->size;
++
++ dprintk(1, "vm_end - vm_start = %ld, mem-size = %ld", size, mem->size);
++
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++ retval = remap_pfn_range(vma, vma->vm_start,
++ mem->dma_handle >> PAGE_SHIFT,
++ size, vma->vm_page_prot);
++ if (retval) {
++ eprintk("mmap: remap failed with error %d. ", retval);
++ goto error;
++ }
++
++ vma->vm_ops = &mrst_isp_videobuf_vm_ops;
++ vma->vm_flags |= VM_DONTEXPAND;
++ vma->vm_private_data = map;
++
++ dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
++ map, q, vma->vm_start, vma->vm_end,
++ (long int) q->bufs[first]->bsize,
++ vma->vm_pgoff, first);
++
++ mrst_isp_videobuf_vm_open(vma);
++
++ return 0;
++
++error:
++ kfree(map);
++ return -ENOMEM;
++}
++int mrst_isp_videobuf_mmap_mapper(struct videobuf_queue *q,
++ struct vm_area_struct *vma)
++{
++ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
++
++ mutex_lock(&q->vb_lock);
++ mrst_isp_mmap_mapper(q, vma);
++ /* retval = CALL(q, mmap_mapper, q, vma); */
++ q->is_mmapped = 1;
++ mutex_unlock(&q->vb_lock);
++
++ return 0;
++}
++static int mrst_isp_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ int ret;
++ int map_by_myself;
++ struct mrst_isp_fh *fh;
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
++ unsigned long size = vma->vm_end-vma->vm_start;
++ unsigned long page;
++
++ DBG_entering;
++
++ /* temporarily put here */
++ if (isp->open > 1) {
++ printk(KERN_ERR "ISP already opened...");
++ return -EINVAL;
++ }
++
++ fh = file->private_data;
++
++ if (!(vma->vm_flags & (VM_WRITE | VM_READ))
++ || !(vma->vm_flags & VM_SHARED)) {
++ printk(KERN_ERR "mrstisp: wrong vma flag");
++ return -EINVAL;
++ }
++
++ /* to check whether if it is ISP bar 0 map */
++ if (offset == isp->mb0_size + isp->mb1_size) {
++ dprintk(1, "---- map bar0 ----");
++ page = isp->mb0;
++ map_by_myself = 1;
++ } else if (offset == 0 && size == isp->mb1_size) {
++ dprintk(1, "---- map bar1 ----");
++ page = isp->mb1;
++ map_by_myself = 1;
++ } else if (isp->pixelformat == V4L2_PIX_FMT_JPEG
++ && isp->sys_conf.jpg_review_enable == 1
++ && offset == isp->sys_conf.jpg_review.offset) {
++ dprintk(1, "---- map jpeg review buffer----");
++ page = isp->mb1 + isp->sys_conf.jpg_review.offset;
++ map_by_myself = 1;
++ } else {
++ dprintk(1, "----map one certain buffer----");
++ map_by_myself = 0;
++ }
++
++ if (map_by_myself) {
++ vma->vm_flags |= VM_IO;
++ vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
++
++ page = page >> PAGE_SHIFT;
++
++ if (remap_pfn_range(vma, vma->vm_start, page, size,
++ PAGE_SHARED)) {
++ printk(KERN_ERR "fail to put MMAP buffer to user space\n");
++ return -EAGAIN;
++ }
++
++ return 0;
++ }
++
++ if (size > isp->num_frames * PAGE_ALIGN(isp->frame_size)) {
++ eprintk("length is larger than num * size");
++ return -EINVAL;
++ }
++
++ ret = mrst_isp_videobuf_mmap_mapper(&fh->vb_q, vma);
++
++ dprintk(1, "vma start=0x%08lx, size=%ld, offset=%ld ret=%d",
++ (unsigned long)vma->vm_start,
++ (unsigned long)vma->vm_end-(unsigned long)vma->vm_start,
++ (unsigned long)offset, ret);
++
++ return ret;
++}
++
++static int mrst_isp_g_fmt_cap(struct file *file, void *priv,
++ struct v4l2_format *f)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++ int ret;
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++
++ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
++ f->fmt.pix.width = isp->bufwidth;
++ f->fmt.pix.height = isp->bufheight;
++ f->fmt.pix.pixelformat = isp->pixelformat;
++ f->fmt.pix.bytesperline = (f->fmt.pix.width * isp->depth) >> 3;
++ f->fmt.pix.sizeimage = f->fmt.pix.height
++ * f->fmt.pix.bytesperline;
++ ret = 0;
++ } else {
++ ret = -EINVAL;
++ }
++
++ dprintk(1, "get fmt %d x %d ", f->fmt.pix.width, f->fmt.pix.height);
++ DBG_leaving;
++ return ret;
++}
++
++static struct intel_fmt *fmt_by_fourcc(unsigned int fourcc)
++{
++ unsigned int i;
++
++ for (i = 0; i < NUM_FORMATS; i++)
++ if (fmts[i].fourcc == fourcc)
++ return fmts+i;
++ return NULL;
++}
++
++static int mrst_isp_try_fmt_cap(struct file *file, void *priv,
++ struct v4l2_format *f)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ struct intel_fmt *fmt;
++ int w, h;
++ int ret;
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++
++ mutex_lock(&isp->mutex);
++
++ fmt = fmt_by_fourcc(f->fmt.pix.pixelformat);
++ if (NULL == fmt && f->fmt.pix.pixelformat != V4L2_PIX_FMT_MPEG) {
++ printk(KERN_ERR "mrstisp: fmt not found\n");
++ ret = -EINVAL;
++ goto exit_unlock;
++ }
++
++ w = f->fmt.pix.width;
++ h = f->fmt.pix.height;
++
++ dprintk(1, "sensor name %s: before w = %d, h = %d",
++ isp->sensor_curr->name, w, h);
++
++ ret = v4l2_subdev_call(isp->sensor_curr, video, try_fmt, f);
++ if (ret)
++ goto exit_unlock;
++
++
++ w = f->fmt.pix.width;
++ h = f->fmt.pix.height;
++
++ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565 ||
++ f->fmt.pix.pixelformat == V4L2_PIX_FMT_BGR32) {
++ if (w < INTEL_MIN_WIDTH)
++ w = INTEL_MIN_WIDTH;
++ if (w > INTEL_MAX_WIDTH)
++ w = INTEL_MAX_WIDTH;
++ if (h < INTEL_MIN_HEIGHT)
++ h = INTEL_MIN_HEIGHT;
++ if (h > INTEL_MAX_HEIGHT)
++ h = INTEL_MAX_HEIGHT;
++ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
++ } else {
++ if (w < INTEL_MIN_WIDTH)
++ w = INTEL_MIN_WIDTH;
++ if (w > INTEL_MAX_WIDTH_MP)
++ w = INTEL_MAX_WIDTH_MP;
++ if (h < INTEL_MIN_HEIGHT)
++ h = INTEL_MIN_HEIGHT;
++ if (h > INTEL_MAX_HEIGHT_MP)
++ h = INTEL_MAX_HEIGHT_MP;
++ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
++ }
++
++ f->fmt.pix.width = w;
++ f->fmt.pix.height = h;
++
++ f->fmt.pix.field = V4L2_FIELD_NONE;
++ f->fmt.pix.bytesperline = (w * h)/8;
++ if (fmt)
++ f->fmt.pix.sizeimage = (w * h * fmt->depth)/8;
++ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG)
++ f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
++ f->fmt.pix.priv = 0;
++
++ dprintk(3, "after w = %d, h = %d", w, h);
++ ret = 0;
++
++exit_unlock:
++ mutex_unlock(&isp->mutex);
++
++ DBG_leaving;
++ return ret;
++}
++
++static int mrst_isp_s_fmt_cap(struct file *file, void *priv,
++ struct v4l2_format *f)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++ struct intel_fmt *fmt;
++ int ret;
++ unsigned int width_o, height_o;
++ unsigned short width_sensor, height_sensor;
++ unsigned int w, h;
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++
++ mipi_flag = 1;
++
++ w = f->fmt.pix.width;
++ h = f->fmt.pix.height;
++
++ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565 ||
++ f->fmt.pix.pixelformat == V4L2_PIX_FMT_BGR32) {
++ if (w < INTEL_MIN_WIDTH)
++ w = INTEL_MIN_WIDTH;
++ if (w > INTEL_MAX_WIDTH)
++ w = INTEL_MAX_WIDTH;
++ if (h < INTEL_MIN_HEIGHT)
++ h = INTEL_MIN_HEIGHT;
++ if (h > INTEL_MAX_HEIGHT)
++ h = INTEL_MAX_HEIGHT;
++ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
++ } else {
++ if (w < INTEL_MIN_WIDTH)
++ w = INTEL_MIN_WIDTH;
++ if (w > INTEL_MAX_WIDTH_MP)
++ w = INTEL_MAX_WIDTH_MP;
++ if (h < INTEL_MIN_HEIGHT)
++ h = INTEL_MIN_HEIGHT;
++ if (h > INTEL_MAX_HEIGHT_MP)
++ h = INTEL_MAX_HEIGHT_MP;
++ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
++ }
++
++ f->fmt.pix.width = w;
++ f->fmt.pix.height = h;
++
++ width_o = f->fmt.pix.width;
++ height_o = f->fmt.pix.height;
++
++ (void)ci_sensor_res2size(to_sensor_config(isp->sensor_curr)->res,
++ &width_sensor, &height_sensor);
++
++ ret = mrst_isp_try_fmt_cap(file, priv, f);
++ if (0 != ret) {
++ printk(KERN_ERR "mrstisp: set format failed\n");
++ return ret;
++ }
++
++ /* set fmt for only sensor */
++ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_MPEG) {
++ ret = v4l2_subdev_call(isp->sensor_curr, video, s_fmt, f);
++ dprintk(1, "------------set fmt only for sensor (%d x %d)",
++ f->fmt.pix.width, f->fmt.pix.height);
++ return ret;
++ }
++
++ if (isp->sys_conf.isp_hal_enable) {
++ /* set fmt for isp */
++ mutex_lock(&isp->mutex);
++ fmt = fmt_by_fourcc(f->fmt.pix.pixelformat);
++
++ isp->pixelformat = fmt->fourcc;
++ isp->depth = fmt->depth;
++
++ dprintk(1, "sensor (%d x %d)", width_sensor, height_sensor);
++ if (width_o < f->fmt.pix.width &&
++ height_o < f->fmt.pix.height) {
++ isp->bufwidth = width_o;
++ isp->bufheight = height_o;
++ } else if (width_sensor < f->fmt.pix.width &&
++ height_sensor < f->fmt.pix.height) {
++ isp->bufwidth = width_sensor;
++ isp->bufheight = height_sensor;
++ f->fmt.pix.width = width_sensor;
++ f->fmt.pix.height = height_sensor;
++ } else {
++ isp->bufwidth = f->fmt.pix.width;
++ isp->bufheight = f->fmt.pix.height;
++ }
++
++ /* FIXME
++ * check if buf res is larger than
++ * sensor real res(1304x980)
++ * if yes, down buf res to VGA
++ */
++ if (to_sensor_config(isp->sensor_curr)->res ==
++ SENSOR_RES_VGA_PLUS)
++ if (isp->bufwidth >= VGA_SIZE_H &&
++ isp->bufheight >= VGA_SIZE_V) {
++ isp->bufwidth = VGA_SIZE_H;
++ isp->bufheight = VGA_SIZE_V;
++ }
++
++ mutex_unlock(&isp->mutex);
++
++ dprintk(1, "----------set fmt only to isp: w %d, h%d, "
++ "fourcc: %lx", isp->bufwidth,
++ isp->bufheight, fmt->fourcc);
++ } else {
++
++ /* set fmt for both isp and sensor */
++ mutex_lock(&isp->mutex);
++ fmt = fmt_by_fourcc(f->fmt.pix.pixelformat);
++
++ isp->pixelformat = fmt->fourcc;
++ isp->depth = fmt->depth;
++ isp->bufwidth = width_o;
++ isp->bufheight = height_o;
++
++ mutex_unlock(&isp->mutex);
++
++ dprintk(1, "--------set fmt for isp : w%d, h%d, fourcc: %lx",
++ isp->bufwidth, isp->bufheight, fmt->fourcc);
++ dprintk(1, "--------set fmt for sesnro : w%d, h%d, fourcc: %lx",
++ f->fmt.pix.width, f->fmt.pix.height, fmt->fourcc);
++
++ ret = v4l2_subdev_call(isp->sensor_curr, video, s_fmt, f);
++ }
++
++ DBG_leaving;
++ return ret;
++}
++
++static int mrst_isp_enum_framesizes(struct file *file, void *priv,
++ struct v4l2_frmsizeenum *arg)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++ int ret;
++
++ DBG_entering;
++
++ WARN_ON(priv != file->private_data);
++
++ ret = v4l2_subdev_call(isp->sensor_curr, video, enum_framesizes, arg);
++
++ DBG_leaving;
++ return ret;
++}
++
++static int mrst_isp_enum_frameintervals(struct file *file, void *priv,
++ struct v4l2_frmivalenum *arg)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++ int ret;
++
++ DBG_entering;
++
++ WARN_ON(priv != file->private_data);
++
++ ret = v4l2_subdev_call(isp->sensor_curr, video, enum_frameintervals,
++ arg);
++ DBG_leaving;
++ return ret;
++}
++
++static int mrst_isp_queryctrl(struct file *file, void *priv,
++ struct v4l2_queryctrl *c)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(vdev);
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++
++ if (!v4l2_subdev_call(isp->sensor_curr, core, queryctrl, c))
++ return 0;
++ else if (!v4l2_subdev_call(isp->motor, core, queryctrl, c))
++ return 0;
++
++ /* No controls supported */
++ return -EINVAL;
++}
++
++static int mrst_isp_g_ctrl(struct file *file, void *priv,
++ struct v4l2_control *c)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++ int ret;
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++ if (c->id == V4L2_CID_FOCUS_ABSOLUTE) {
++ ret = v4l2_subdev_call(isp->motor, core, g_ctrl, c);
++ dprintk(2, "get focus from motor : %d", c->value);
++ return ret;
++ } else {
++ ret = v4l2_subdev_call(isp->sensor_curr, core, g_ctrl, c);
++ dprintk(2, "get other cotrol from senrsor : %d", c->value);
++ return ret;
++ }
++}
++
++static int mrst_isp_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ DBG_entering;
++
++ if (c->id == V4L2_CID_FOCUS_ABSOLUTE) {
++ dprintk(2, "setting focus %d to motor", c->value);
++ return v4l2_subdev_call(isp->motor, core, s_ctrl, c);
++ } else {
++ dprintk(2, "setting other ctrls, value = %d", c->value);
++ return v4l2_subdev_call(isp->sensor_curr, core, s_ctrl, c);
++ }
++}
++
++static int mrst_isp_index_to_camera(struct mrst_isp_device *isp, u32 index)
++{
++ int camera = MRST_CAMERA_NONE;
++
++ if (isp->sensor_soc && isp->sensor_raw) {
++ switch (index) {
++ case 0:
++ camera = isp->sensor_soc_index;
++ break;
++ case 1:
++ camera = isp->sensor_raw_index;
++ break;
++ }
++ } else if (isp->sensor_soc) {
++ switch (index) {
++ case 0:
++ camera = isp->sensor_soc_index;
++ break;
++ }
++ } else if (isp->sensor_raw) {
++ switch (index) {
++ case 0:
++ camera = isp->sensor_raw_index;
++ break;
++ }
++ }
++
++ return camera;
++}
++
++static int mrst_isp_enum_input(struct file *file, void *priv,
++ struct v4l2_input *i)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(vdev);
++ int camera;
++
++ DBG_entering;
++
++ WARN_ON(priv != file->private_data);
++
++ camera = mrst_isp_index_to_camera(isp, i->index);
++ if (MRST_CAMERA_NONE == camera)
++ return -EINVAL;
++
++ i->type = V4L2_INPUT_TYPE_CAMERA;
++ i->std = V4L2_STD_UNKNOWN;
++ strcpy(i->name, mrst_camera_table[camera].name);
++
++ DBG_leaving;
++ return 0;
++}
++static int mrst_isp_g_input(struct file *file, void *priv, unsigned int *i)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(vdev);
++
++ DBG_entering;
++
++ WARN_ON(priv != file->private_data);
++
++ if (isp->sensor_soc && isp->sensor_raw)
++ if (isp->sensor_curr == isp->sensor_soc)
++ *i = 0;
++ else
++ *i = 1;
++ else
++ *i = 0;
++
++ DBG_leaving;
++ return 0;
++}
++
++static int mrst_isp_s_input(struct file *file, void *priv, unsigned int i)
++{
++ struct video_device *vdev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(vdev);
++
++ int camera;
++
++ DBG_entering;
++
++ if (isp->streaming) {
++ printk(KERN_WARNING "VIDIOC_S_INPUT error: ISP is streaming\n");
++ return -EBUSY;
++ }
++
++ camera = mrst_isp_index_to_camera(isp, i);
++ if (MRST_CAMERA_NONE == camera)
++ return -EINVAL;
++
++ if (mrst_camera_table[camera].type == MRST_CAMERA_SOC)
++ isp->sensor_curr = isp->sensor_soc;
++ else
++ isp->sensor_curr = isp->sensor_raw;
++
++ dprintk(1, "set sensor %s as input", isp->sensor_curr->name);
++
++ DBG_leaving;
++ return 0;
++}
++
++static int mrst_isp_g_ext_ctrls(struct file *file,
++ void *fh,
++ struct v4l2_ext_controls *c)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ int ret = -EINVAL;
++
++ DBG_entering;
++
++ if (c->ctrl_class != V4L2_CTRL_CLASS_CAMERA) {
++ printk(KERN_ERR "Invalid control class\n");
++ return ret;
++ }
++
++ c->error_idx = 0;
++ if (isp->motor) {
++ ret = v4l2_subdev_call(isp->motor, core, g_ext_ctrls, c);
++ if (c->error_idx) {
++ printk(KERN_ERR "mrst: error call g_ext_ctrls\n");
++ return ret;
++ }
++ }
++
++ DBG_leaving;
++ return 0;
++}
++
++static int mrst_isp_s_ext_ctrls(struct file *file, void *fh,
++ struct v4l2_ext_controls *c)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ int ret = -EINVAL;
++
++ DBG_entering;
++
++ if (c->ctrl_class != V4L2_CTRL_CLASS_CAMERA) {
++ printk(KERN_INFO "Invalid control class\n");
++ return ret;
++ }
++
++ c->error_idx = 0;
++ if (isp->motor) {
++ ret = v4l2_subdev_call(isp->motor, core, s_ext_ctrls, c);
++ if (c->error_idx) {
++ printk(KERN_ERR "mrst: error call s_ext_ctrls\n");
++ return ret;
++ }
++ }
++
++ DBG_leaving;
++ return 0;
++}
++
++static int mrst_isp_s_std(struct file *filp, void *priv, v4l2_std_id *a)
++{
++ DBG_entering;
++ DBG_leaving;
++ return 0;
++}
++
++static int mrst_isp_querycap(struct file *file, void *priv,
++ struct v4l2_capability *cap)
++{
++ struct video_device *dev = video_devdata(file);
++
++ DBG_entering;
++
++ strlcpy(cap->driver, DRIVER_NAME, sizeof(cap->driver));
++ strlcpy(cap->card, dev->name, sizeof(cap->card));
++
++ cap->version = INTEL_VERSION(0, 5, 0);
++ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
++
++ DBG_leaving;
++
++ return 0;
++}
++
++static int mrst_isp_cropcap(struct file *file, void *priv,
++ struct v4l2_cropcap *cap)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++
++ if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ return -EINVAL;
++
++ cap->bounds.left = 0;
++ cap->bounds.top = 0;
++ cap->bounds.width = isp->bufwidth;
++ cap->bounds.height = isp->bufheight;
++ cap->defrect = cap->bounds;
++ cap->pixelaspect.numerator = 1;
++ cap->pixelaspect.denominator = 1;
++
++ DBG_leaving;
++
++ return 0;
++}
++
++static int mrst_isp_enum_fmt_cap(struct file *file, void *priv,
++ struct v4l2_fmtdesc *f)
++{
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ unsigned int index;
++
++ DBG_entering;
++
++ index = f->index;
++
++ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ return -EINVAL;
++ else {
++ if (isp->sensor_curr == isp->sensor_soc)
++ if (index >= 8)
++ return -EINVAL;
++ if (index >= sizeof(fmts) / sizeof(*fmts))
++ return -EINVAL;
++
++ f->index = index;
++ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
++ strlcpy(f->description, fmts[index].name,
++ sizeof(f->description));
++ f->pixelformat = fmts[index].fourcc;
++ if (fmts[index].fourcc == V4L2_PIX_FMT_JPEG)
++ f->flags = V4L2_FMT_FLAG_COMPRESSED;
++ }
++
++ DBG_leaving;
++
++ return 0;
++
++}
++
++#define ALIGN4(x) ((((long)(x)) & 0x3) == 0)
++
++static int mrst_isp_reqbufs(struct file *file, void *priv,
++ struct v4l2_requestbuffers *req)
++{
++ int ret;
++ struct mrst_isp_fh *fh = file->private_data;
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++ if (req->count == 0)
++ return 0;
++
++ /*
++ * if (req->count > 3)
++ req->count = 3;
++ */
++
++ if (req->memory != V4L2_MEMORY_MMAP) {
++ eprintk("wrong memory type");
++ return -EINVAL;
++ }
++ ret = videobuf_reqbufs(&fh->vb_q, req);
++ if (ret)
++ eprintk("err calling videobuf_reqbufs ret = %d", ret);
++
++ if (!ret)
++ isp->buffer_required = 1;
++
++ DBG_leaving;
++ return ret;
++}
++
++static int mrst_isp_querybuf(struct file *file, void *priv,
++ struct v4l2_buffer *buf)
++{
++ int ret;
++ struct mrst_isp_fh *fh = file->private_data;
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++ ret = videobuf_querybuf(&fh->vb_q, buf);
++
++ DBG_leaving;
++ return ret;
++}
++
++static int mrst_isp_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
++{
++ int ret;
++ struct mrst_isp_fh *fh = file->private_data;
++
++ WARN_ON(priv != file->private_data);
++
++ DBG_entering;
++ ret = videobuf_qbuf(&fh->vb_q, buf);
++ /* identify which video buffer was q-ed */
++ if (ret == 0)
++ fh->qbuf_flag |= (1<<buf->index);
++ dprintk(1, "q-ed index = %d", buf->index);
++
++ DBG_leaving;
++
++ return ret;
++}
++
++static int mrst_isp_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
++{
++ int ret;
++ struct mrst_isp_fh *fh = file->private_data;
++
++ WARN_ON(priv != file->private_data);
++
++ /*XXX zheng*/
++ /*
++ if (frame_cnt == 0) {
++ printk(KERN_WARNING "timer start\n");
++ intel_timer_start();
++ }
++ */
++
++ DBG_entering;
++
++ if (b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ return -EINVAL;
++ if (b->memory != V4L2_MEMORY_MMAP)
++ return -EINVAL;
++ if (fh->qbuf_flag == 0) {
++ dprintk(1, "no buffer can be dq-ed\n");
++ return -EINVAL;
++ }
++
++ /*dprintk(3, "entering");*/
++ /* ret = videobuf_dqbuf(&fh->vb_q, b, file->f_flags & O_NONBLOCK); */
++ ret = videobuf_dqbuf(&fh->vb_q, b, 0);
++ /* identify which video buffer was dq-ed */
++ if (ret == 0)
++ fh->qbuf_flag &= ~(1<<b->index);
++
++ /*XXX zheng*/
++ ++frame_cnt;
++ /*
++ if (frame_cnt % 10 == 0)
++ printk(KERN_WARNING "%d frames takes %dms to go, fps = %d\n",
++ frame_cnt, intel_get_micro_sec(),
++ frame_cnt * 1000 / intel_get_micro_sec());
++ */
++
++ dprintk(1, "dq-ed index = %d", b->index);
++ DBG_leaving;
++ return ret;
++}
++
++static int mrst_isp_streamon(struct file *file, void *priv,
++ enum v4l2_buf_type type)
++{
++ struct mrst_isp_fh *fh = file->private_data;
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++ int ret;
++
++ DBG_entering;
++
++ if (!isp->buffer_required) {
++ eprintk("buffer is not required, can not stream on ");
++ return -EINVAL;
++ }
++
++ dprintk(2, "gamma2 = %d", isp->sys_conf.isp_cfg.flags.gamma2);
++ WARN_ON(priv != file->private_data);
++
++ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ return -EINVAL;
++
++ mutex_lock(&isp->mutex);
++
++ if (!to_sensor_config(isp->sensor_curr)->mipi_mode)
++ v4l2_subdev_call(isp->sensor_curr, video, s_stream, 1);
++
++ mrst_isp_dp_init(&isp->sys_conf, to_sensor_config(isp->sensor_curr));
++ mrst_isp_setup_viewfinder_path(isp,
++ to_sensor_config(isp->sensor_curr), -1);
++
++ ret = videobuf_streamon(&fh->vb_q);
++ isp->streaming = 1;
++
++ mutex_unlock(&isp->mutex);
++
++ dprintk(1, "isp->active = %p", isp->active);
++ DBG_leaving;
++ return ret;
++}
++
++static int mrst_isp_streamoff(struct file *file, void *priv,
++ enum v4l2_buf_type type)
++{
++ struct mrst_isp_fh *fh = file->private_data;
++ struct video_device *dev = video_devdata(file);
++ struct mrst_isp_device *isp = video_get_drvdata(dev);
++
++ unsigned long flags;
++ int ret;
++
++ DBG_entering;
++
++ WARN_ON(priv != file->private_data);
++
++ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
++ return -EINVAL;
++
++ mutex_lock(&isp->mutex);
++
++ ret = videobuf_streamoff(&fh->vb_q);
++ dprintk(1, "ret of videobuf_streamoff = %d", ret);
++ isp->streaming = 0;
++
++ spin_lock_irqsave(&isp->lock, flags);
++ INIT_LIST_HEAD(&isp->capture);
++ isp->active = NULL;
++ isp->next = NULL;
++ isp->stopflag = 0;
++ isp->sys_conf.isp_hal_enable = 0;
++ isp->sys_conf.jpg_review_enable = 0;
++ isp->sys_conf.isp_cfg.img_eff_cfg.mode = CI_ISP_IE_MODE_OFF;
++ isp->sys_conf.isp_cfg.jpeg_enc_ratio = 1;
++
++ spin_unlock_irqrestore(&isp->lock, flags);
++
++ v4l2_subdev_call(isp->sensor_curr, video, s_stream, 0);
++ ci_isp_stop(CI_ISP_CFG_UPDATE_FRAME_SYNC);
++
++ mutex_unlock(&isp->mutex);
++
++ DBG_leaving;
++ return ret;
++}
++
++static const struct v4l2_file_operations mrst_isp_fops = {
++ .owner = THIS_MODULE,
++ .open = mrst_isp_open,
++ .release = mrst_isp_close,
++ .read = mrst_isp_read,
++ .mmap = mrst_isp_mmap,
++ .ioctl = video_ioctl2,
++};
++
++static const struct v4l2_ioctl_ops mrst_isp_ioctl_ops = {
++ .vidioc_querycap = mrst_isp_querycap,
++ .vidioc_enum_fmt_vid_cap = mrst_isp_enum_fmt_cap,
++ .vidioc_g_fmt_vid_cap = mrst_isp_g_fmt_cap,
++ /* .vidioc_g_fmt_vid_out =
++ * mrst_isp_g_fmt_cap_for_sensor_hal, */
++ .vidioc_try_fmt_vid_cap = mrst_isp_try_fmt_cap,
++ .vidioc_s_fmt_vid_cap = mrst_isp_s_fmt_cap,
++ .vidioc_cropcap = mrst_isp_cropcap,
++ .vidioc_reqbufs = mrst_isp_reqbufs,
++ .vidioc_querybuf = mrst_isp_querybuf,
++ .vidioc_qbuf = mrst_isp_qbuf,
++ .vidioc_dqbuf = mrst_isp_dqbuf,
++ .vidioc_enum_input = mrst_isp_enum_input,
++ .vidioc_g_input = mrst_isp_g_input,
++ .vidioc_s_input = mrst_isp_s_input,
++ .vidioc_s_std = mrst_isp_s_std,
++ .vidioc_queryctrl = mrst_isp_queryctrl,
++ .vidioc_streamon = mrst_isp_streamon,
++ .vidioc_streamoff = mrst_isp_streamoff,
++ .vidioc_g_ctrl = mrst_isp_g_ctrl,
++ .vidioc_s_ctrl = mrst_isp_s_ctrl,
++ .vidioc_enum_framesizes = mrst_isp_enum_framesizes,
++ .vidioc_enum_frameintervals = mrst_isp_enum_frameintervals,
++ .vidioc_g_ext_ctrls = mrst_isp_g_ext_ctrls,
++ .vidioc_s_ext_ctrls = mrst_isp_s_ext_ctrls,
++ /* FIXME private ioctls */
++ .vidioc_default = mrst_isp_vidioc_default,
++};
++
++static struct video_device mrst_isp_vdev = {
++ .name = "mrst_isp",
++ .minor = -1,
++ .fops = &mrst_isp_fops,
++ .ioctl_ops = &mrst_isp_ioctl_ops,
++ .release = video_device_release_empty,
++};
++
++static int mrst_ci_sensor_probe(struct mrst_isp_device *isp)
++{
++ struct v4l2_subdev *sensor = NULL, *motor = NULL;
++ int i;
++ char *name;
++ u8 addr;
++
++ isp->adapter_sensor = i2c_get_adapter(MRST_I2C_BUS_SENSOR);
++ if (NULL == isp->adapter_sensor) {
++ printk(KERN_ERR "mrstisp: no sensor i2c adapter\n");
++ return -ENODEV;
++ }
++
++ dprintk(1, "got sensor i2c adapter: %s", isp->adapter_sensor->name);
++
++ gpio_request(GPIO_STDBY1_PIN, "Sensor Standby1");
++ gpio_request(GPIO_STDBY2_PIN, "Sensor Standby2");
++ gpio_request(GPIO_RESET_PIN, "Sensor Reset");
++ gpio_request(GPIO_SCLK_25, "Sensor clock");
++ gpio_request(95, "Camera Motor");
++
++ /* Enable sensor related GPIO in system */
++ gpio_direction_output(GPIO_STDBY1_PIN, 0);
++ gpio_direction_output(GPIO_STDBY2_PIN, 0);
++ gpio_direction_output(GPIO_RESET_PIN, 1);
++ gpio_direction_output(GPIO_SCLK_25, 0);
++ /* gpio_direction_output(GPIO_AF_PD, 1); */
++
++ /*
++ gpio_alt_func(GPIO_STDBY1_PIN, 0);
++ gpio_alt_func(GPIO_STDBY2_PIN, 0);
++ gpio_alt_func(GPIO_RESET_PIN, 0);
++ gpio_alt_func(GPIO_SCLK_25, 1);
++ */
++
++ for (i = 0; i < N_CAMERA; i++) {
++ name = mrst_camera_table[i].name;
++ addr = mrst_camera_table[i].sensor_addr;
++ if (mrst_camera_table[i].type == MRST_CAMERA_SOC) {
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
++ sensor = v4l2_i2c_new_subdev(&isp->v4l2_dev,
++ isp->adapter_sensor,
++ name, name, addr);
++#else
++ sensor = v4l2_i2c_new_subdev(&isp->v4l2_dev,
++ isp->adapter_sensor,
++ name, name, addr, NULL);
++#endif
++
++ if (sensor == NULL) {
++ dprintk(2, "sensor %s not found", name);
++ continue;
++ }
++ isp->sensor_soc = sensor;
++ isp->sensor_soc_index = i;
++ dprintk(0, "soc camera sensor %s-%s successfully found",
++ name, sensor->name);
++ }
++
++ if (mrst_camera_table[i].type == MRST_CAMERA_RAW) {
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
++ sensor = v4l2_i2c_new_subdev(&isp->v4l2_dev,
++ isp->adapter_sensor,
++ name, name, addr);
++#else
++ sensor = v4l2_i2c_new_subdev(&isp->v4l2_dev,
++ isp->adapter_sensor,
++ name, name, addr, NULL);
++#endif
++
++ if (sensor == NULL) {
++ dprintk(2, "sensor %s not found", name);
++ continue;
++ }
++ isp->sensor_raw = sensor;
++ isp->sensor_raw_index = i;
++ dprintk(0, "raw camera sensor %s successfully found",
++ name);
++ name = mrst_camera_table[i].motor_name;
++ addr = mrst_camera_table[i].motor_addr;
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
++ motor = v4l2_i2c_new_subdev(&isp->v4l2_dev,
++ isp->adapter_sensor,
++ name, name, addr);
++#else
++ motor = v4l2_i2c_new_subdev(&isp->v4l2_dev,
++ isp->adapter_sensor,
++ name, name, addr, NULL);
++#endif
++
++ if (motor == NULL)
++ dprintk(2, "motor %s not found", name);
++ else {
++ isp->motor = motor;
++ dprintk(0, "motor %s successfully found", name);
++ }
++ }
++ }
++
++ if (!isp->sensor_soc && !isp->sensor_raw) {
++ dprintk(0, "no camera sensor device attached");
++ return -ENODEV;
++ } else {
++ if (isp->sensor_soc)
++ isp->sensor_curr = isp->sensor_soc;
++ else
++ isp->sensor_curr = isp->sensor_raw;
++ return 0;
++ }
++}
++
++static int mrst_ci_flash_probe(struct mrst_isp_device *isp)
++{
++ struct v4l2_subdev *flash = NULL;
++ char *name = "mrst_camera_flash";
++
++ gpio_request(45, "Camera Flash");
++ gpio_direction_output(45, 0);
++
++ isp->adapter_flash = i2c_get_adapter(MRST_I2C_BUS_FLASH);
++ if (NULL == isp->adapter_flash) {
++ dprintk(0, "no flash i2c adapter\n");
++ return -ENODEV;
++ }
++
++ dprintk(1, "got flash i2c adapter: %s", isp->adapter_flash->name);
++
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
++ flash = v4l2_i2c_new_subdev(&isp->v4l2_dev,
++ isp->adapter_flash,
++ name, name, 0x53);
++#else
++ flash = v4l2_i2c_new_subdev(&isp->v4l2_dev,
++ isp->adapter_flash,
++ name, name, 0x53, NULL);
++#endif
++
++ if (flash == NULL) {
++ dprintk(0, "no flash IC found\n");
++ return -ENODEV;
++ }
++
++ dprintk(0, "flash IC found");
++ return 0;
++}
++
++#if IRQ
++static irqreturn_t mrst_isp_irq_handler(int this_irq, void *dev_id)
++{
++ struct isp_register *mrv_reg =
++ (struct isp_register *) MEM_MRV_REG_BASE;
++ struct mrst_isp_device *isp = dev_id;
++ struct videobuf_buffer *vb;
++ unsigned long flags;
++
++ u32 mi_mask = ci_isp_get_frame_end_irq_mask_isp();
++ u32 isp_mask = MRV_ISP_RIS_DATA_LOSS_MASK
++ | MRV_ISP_RIS_PIC_SIZE_ERR_MASK;
++ u32 jpe_status_mask = MRV_JPE_ALL_STAT_MASK;
++ u32 jpe_error_mask = MRV_JPE_ALL_ERR_MASK;
++ u32 mblk_line_mask = MRV_MI_MBLK_LINE_MASK;
++
++ u32 isp_irq;
++ u32 mi_irq;
++ u32 jpe_status_irq;
++ u32 jpe_error_irq;
++ u32 mipi_irq;
++ u32 mblk_line;
++ u32 bufbase;
++
++ isp_irq = REG_READ_EX(mrv_reg->isp_ris) & isp_mask;
++ mi_irq = REG_READ_EX(mrv_reg->mi_ris) & mi_mask;
++
++ mblk_line = REG_READ_EX(mrv_reg->mi_ris) & mblk_line_mask;
++
++ jpe_status_irq = REG_READ_EX(mrv_reg->jpe_status_ris) & jpe_status_mask;
++ jpe_error_irq = REG_READ_EX(mrv_reg->jpe_error_ris) & jpe_error_mask;
++
++ mipi_irq = REG_READ_EX(mrv_reg->mipi_ris) & 0x00f00000;
++
++ dprintk(3, "IRQ: mblk_line = %x, mi_irq = %x, jpe_status_irq = %x,"
++ " jpe_error_irq = %x, isp_irq = %x", mblk_line, mi_irq,
++ jpe_status_irq, jpe_error_irq, isp_irq);
++
++ if (!(isp_irq | mi_irq | jpe_status_irq | jpe_error_irq | mblk_line
++ | mipi_irq)) {
++ dprintk(2, "unknown interrupt");
++ return IRQ_HANDLED;
++ }
++
++ REG_SET_SLICE_EX(mrv_reg->isp_icr, MRV_ISP_ICR_ALL, ON);
++ REG_SET_SLICE_EX(mrv_reg->mi_icr, MRV_MI_ALLIRQS, ON);
++ REG_SET_SLICE_EX(mrv_reg->jpe_error_icr, MRV_JPE_ALL_ERR, ON);
++ REG_SET_SLICE_EX(mrv_reg->jpe_status_icr, MRV_JPE_ALL_STAT, ON);
++ REG_WRITE_EX(mrv_reg->mipi_icr, 0xffffffff);
++
++ if (isp_irq) {
++ /* Currently we don't reset hardware even error detect */
++ dprintk(3, "ISP error IRQ received %x", isp_irq);
++ isp_error_num++;
++ isp_error_flag |= isp_irq;
++ return IRQ_HANDLED;
++ }
++
++ if (mipi_irq) {
++ dprintk(3, "error in mipi_irq %x", mipi_irq);
++ mipi_error_num++;
++ mipi_error_flag |= mipi_irq;
++ return IRQ_HANDLED;
++ }
++
++ if (mblk_line && mrst_isp_to_do_mblk_line) {
++ REG_SET_SLICE(mrv_reg->mi_imsc, MRV_MI_MBLK_LINE, OFF);
++ dprintk(3, "enter mblk_line irq");
++
++ if (!(isp->active && !isp->next)) {
++ dprintk(3, "wrong isq status");
++ if (isp->active)
++ dprintk(2, "actie->i = %d", isp->active->i);
++ else
++ dprintk(2, "actie = NULL");
++ if (isp->next)
++ dprintk(2, "next->i = %d", isp->next->i);
++ else
++ dprintk(2, "next = NULL");
++ return IRQ_HANDLED;
++ }
++
++ spin_lock_irqsave(&isp->lock, flags);
++
++ if (!list_empty(&isp->capture)) {
++ isp->next = list_entry(isp->capture.next,
++ struct videobuf_buffer, queue);
++ isp->next->state = VIDEOBUF_ACTIVE;
++ bufbase = videobuf_to_dma_contig(isp->next);
++ mrst_isp_update_marvinvfaddr(isp, bufbase,
++ CI_ISP_CFG_UPDATE_FRAME_SYNC);
++ dprintk(1, "updating new addr, next = %d",
++ isp->next->i);
++ } else {
++ isp->stopflag = 1;
++ dprintk(0, "stop isp");
++ }
++
++ mrst_isp_to_do_mblk_line = 0;
++
++ spin_unlock_irqrestore(&isp->lock, flags);
++
++ /* return IRQ_HANDLED; */
++ }
++
++ if (mi_irq && isp->pixelformat != V4L2_PIX_FMT_JPEG &&
++ !jpe_status_irq) {
++ dprintk(1, "view finding case");
++
++ if (!isp->active) {
++ dprintk(0, "no active queue, You should not go here");
++ mrst_isp_to_do_mblk_line = 1;
++ REG_SET_SLICE(mrv_reg->mi_imsc, MRV_MI_MBLK_LINE, ON);
++ return IRQ_HANDLED;
++ }
++
++ spin_lock_irqsave(&isp->lock, flags);
++
++ /* update captured frame status */
++ vb = isp->active;
++ /* vb->size = ci_isp_mif_get_byte_cnt(); */
++ /* if this buffer has been dq-ed, set nothing to state*/
++ if (vb->state != VIDEOBUF_IDLE)
++ vb->state = VIDEOBUF_DONE;
++ vb->field_count++;
++
++ isp->active = NULL;
++ dprintk(1, "buf %d size = %lx", vb->i, vb->size);
++ do_gettimeofday(&vb->ts);
++ wake_up(&vb->done);
++
++ if (!isp->next) {
++ if (!list_empty(&isp->capture)) {
++ isp->active = list_entry(isp->capture.next,
++ struct videobuf_buffer, queue);
++ list_del_init(&isp->active->queue);
++ isp->active->state = VIDEOBUF_ACTIVE;
++ dprintk(3, "start next frame %d",
++ isp->active->i);
++ mrst_isp_to_do_mblk_line = 1;
++ REG_SET_SLICE(mrv_reg->mi_imsc,
++ MRV_MI_MBLK_LINE, ON);
++ } else {
++ mrst_isp_to_do_mblk_line = 1;
++ REG_SET_SLICE(mrv_reg->mi_imsc,
++ MRV_MI_MBLK_LINE, ON);
++ mrst_isp_disable_interrupt(isp);
++ dprintk(3, "no frame right now");
++ }
++ } else {
++ isp->active = isp->next;
++ list_del_init(&isp->next->queue);
++ isp->next = NULL;
++ dprintk(1, "active = next = %d, next = NULL",
++ isp->active->i);
++ mrst_isp_to_do_mblk_line = 1;
++ REG_SET_SLICE(mrv_reg->mi_imsc, MRV_MI_MBLK_LINE, ON);
++ }
++
++ spin_unlock_irqrestore(&isp->lock, flags);
++ return IRQ_HANDLED;
++ }
++
++ if (jpe_status_irq) {
++ dprintk(2, "jpeg capture case");
++
++ if (!isp->active)
++ return IRQ_HANDLED;
++
++ spin_lock_irqsave(&isp->lock, flags);
++
++ vb = isp->active;
++ vb->size = ci_isp_mif_get_byte_cnt();
++ vb->state = VIDEOBUF_DONE;
++ do_gettimeofday(&vb->ts);
++ vb->field_count++;
++ wake_up(&vb->done);
++ isp->active = NULL;
++
++ dprintk(2, "index =%d, bufsize = %lx", vb->i, vb->size);
++
++ spin_unlock_irqrestore(&isp->lock, flags);
++
++ return IRQ_HANDLED;
++ }
++
++ if (jpe_error_irq)
++ dprintk(2, "entered jpe_error_irq");
++
++ return IRQ_HANDLED;
++}
++#endif
++
++static void __devexit mrst_isp_pci_remove(struct pci_dev *pdev)
++{
++ struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
++ struct mrst_isp_device *isp = to_isp(v4l2_dev);
++
++ DBG_entering;
++
++ ci_isp_stop(CI_ISP_CFG_UPDATE_FRAME_SYNC);
++ mrst_isp_disable_interrupt(isp);
++
++#if IRQ
++ free_irq(pdev->irq, isp);
++#endif
++
++ if (isp->vdev) {
++ dprintk(2, "isp->vdev = %p", isp->vdev);
++ video_unregister_device(isp->vdev);
++ }
++
++ dma_release_declared_memory(&pdev->dev);
++
++ iounmap(isp->regs);
++
++ pci_release_regions(pdev);
++
++ pci_disable_device(pdev);
++
++ v4l2_device_unregister(&isp->v4l2_dev);
++
++ kfree(isp);
++
++ DBG_leaving;
++}
++
++static int __devinit mrst_isp_pci_probe(struct pci_dev *pdev,
++ const struct pci_device_id *pci_id)
++{
++ struct mrst_isp_device *isp;
++ unsigned int start = 0;
++ unsigned int len = 0;
++ int ret = 0;
++
++ DBG_entering;
++
++ /* alloc device struct */
++ isp = kzalloc(sizeof(struct mrst_isp_device), GFP_KERNEL);
++ if (NULL == isp) {
++ printk(KERN_ERR "mrstisp: fail to kzalloc mrst_isp_device\n");
++ ret = -ENOMEM;
++ goto exit;
++ }
++
++ /* register v4l2 device */
++ ret = v4l2_device_register(&pdev->dev, &isp->v4l2_dev);
++ if (ret) {
++ printk(KERN_ERR "mrstisp: fail to register v4l2 device\n");
++ goto exit_free_isp;
++ }
++
++ /* PCI operations */
++ ret = pci_enable_device(pdev);
++ if (ret) {
++ printk(KERN_ERR "mrstisp: can't enable isp\n");
++ goto exit_unregister_v4l2;
++ }
++
++ pci_set_master(pdev);
++
++ ret = pci_request_regions(pdev, "mrst isp");
++ if (ret) {
++ printk(KERN_ERR "mrstisp: can't request regions\n");
++ goto exit_disable_isp;
++ }
++
++ /* mem bar 0 */
++ start = isp->mb0 = pci_resource_start(pdev, 0);
++ len = isp->mb0_size = pci_resource_len(pdev, 0);
++
++ isp->regs = ioremap_nocache(start, len);
++ mrst_isp_regs = isp->regs;
++ if (isp->regs == NULL) {
++ printk(KERN_ERR "mrstisp: fail to ioremap isp registers\n");
++ goto exit_release_regions;
++ }
++
++ dprintk(1, "isp mb0 = %lx, mb0_size = %lx, regs = %p",
++ isp->mb0, isp->mb0_size, isp->regs);
++
++ /* mem bar 1 */
++ start = isp->mb1 = pci_resource_start(pdev, 1);
++ len = isp->mb1_size = pci_resource_len(pdev, 1);
++
++ dprintk(1, "isp mb1 = %lx, mb1_size = %lx", isp->mb1, isp->mb1_size);
++
++ ret = dma_declare_coherent_memory(&pdev->dev, start,
++ /* start, len - 640 * 480 * 2, */
++ start, len,
++ DMA_MEMORY_MAP);
++ /*
++ DMA_MEMORY_MAP
++ | DMA_MEMORY_EXCLUSIVE);
++ */
++ if (!ret) {
++ dprintk(0, "failed to declare dma memory");
++ ret = -ENXIO;
++ goto exit_iounmap;
++ }
++
++ /* init device struct */
++ INIT_LIST_HEAD(&isp->capture);
++ spin_lock_init(&isp->lock);
++ mutex_init(&isp->mutex);
++
++ pci_read_config_word(pdev, PCI_VENDOR_ID, &isp->vendorID);
++ pci_read_config_word(pdev, PCI_DEVICE_ID, &isp->deviceID);
++
++ mrst_isp_defcfg_all_load(&isp->sys_conf.isp_cfg);
++
++ isp->bufwidth = 640;
++ isp->bufheight = 480;
++ isp->depth = 12;
++ isp->pixelformat = V4L2_PIX_FMT_YVU420;
++ isp->streaming = 0;
++ isp->buffer_required = 0;
++
++
++ /* probe sensor */
++ ret = mrst_ci_sensor_probe(isp);
++ if (ret) {
++ dprintk(0, "failed to sensor probe\n");
++ goto exit_dma_release;
++ }
++
++ /* regiter video device */
++ isp->vdev = &mrst_isp_vdev;
++ isp->vdev->parent = &pdev->dev;
++ video_set_drvdata(isp->vdev, isp);
++
++ ret = video_register_device(isp->vdev, VFL_TYPE_GRABBER, -1);
++ if (ret) {
++ dprintk(0, "fail to register video deivice");
++ goto exit_dma_release;
++ }
++
++ dprintk(0, "registered dev/video%d", isp->vdev->num);
++ dprintk(0, "isp->vdev = %p", isp->vdev);
++
++#if IRQ
++ /* request irq */
++ ret = request_irq(pdev->irq, mrst_isp_irq_handler, IRQF_SHARED,
++ /* pci_name(pdev), isp); */
++ "mrst_camera_imaging", isp);
++ if (ret) {
++ dprintk(0, "fail to request irq");
++ goto exit_unregister_video;
++ }
++
++ mrst_isp_disable_interrupt(isp);
++#endif
++
++ /* probe flash */
++ mrst_ci_flash_probe(isp);
++
++ mrst_isp_to_do_mblk_line = 0;
++
++ dprintk(0, "mrstisp driver module successfully loaded");
++ return 0;
++
++exit_unregister_video:
++ video_unregister_device(isp->vdev);
++exit_dma_release:
++ dma_release_declared_memory(&pdev->dev);
++exit_iounmap:
++ iounmap(isp->regs);
++exit_release_regions:
++ pci_release_regions(pdev);
++exit_disable_isp:
++ pci_disable_device(pdev);
++exit_unregister_v4l2:
++ v4l2_device_unregister(&isp->v4l2_dev);
++exit_free_isp:
++ kfree(isp);
++exit:
++ return ret;
++}
++
++#ifdef CONFIG_PM
++static int mrst_isp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
++ struct mrst_isp_device *isp = to_isp(v4l2_dev);
++ int ret;
++
++ DBG_entering;
++
++ ci_isp_off();
++
++ ret = pci_save_state(pdev);
++ if (ret) {
++ printk(KERN_ERR "mrstisp: pci_save_state failed %d\n", ret);
++ return ret;
++ }
++
++ ret = pci_set_power_state(pdev, PCI_D3cold);
++ if (ret) {
++ printk(KERN_ERR "mrstisp: fail to set power state\n");
++ return ret;
++ }
++
++/*
++ ret = ci_sensor_suspend();
++ if (ret) {
++ printk(KERN_ERR "mrstisp: Fail to suspend sensor\n");
++ return ret;
++ }
++*/
++ if (isp->sensor_soc)
++ v4l2_subdev_call(isp->sensor_soc, core, s_gpio, 1);
++ if (isp->sensor_raw)
++ v4l2_subdev_call(isp->sensor_raw, core, s_gpio, 1);
++
++ DBG_leaving;
++ return 0;
++}
++
++static int mrst_isp_pci_resume(struct pci_dev *pdev)
++{
++ struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
++ struct mrst_isp_device *isp = to_isp(v4l2_dev);
++ int ret;
++
++ DBG_entering;
++
++ pci_set_power_state(pdev, PCI_D0);
++ pci_restore_state(pdev);
++
++ ret = pci_enable_device(pdev);
++ if (ret) {
++ printk(KERN_ERR "mrstisp: fail to enable device in resume\n");
++ return ret;
++ }
++
++/*
++ ret = ci_sensor_resume();
++ if (ret) {
++ printk(KERN_ERR "mrstisp: Fail to resume sensor\n");
++ return ret;
++ }
++*/
++ if (isp->sensor_soc)
++ v4l2_subdev_call(isp->sensor_soc, core, s_gpio, 0);
++ if (isp->sensor_raw)
++ v4l2_subdev_call(isp->sensor_raw, core, s_gpio, 0);
++
++ ci_isp_init();
++
++ DBG_leaving;
++ return 0;
++}
++#endif
++
++static struct pci_device_id mrst_isp_pci_tbl[] __devinitdata = {
++ { PCI_DEVICE(0x8086, 0x080B) },
++ {0,}
++};
++
++MODULE_DEVICE_TABLE(pci, mrst_isp_pci_tbl);
++
++static struct pci_driver mrst_isp_pci_driver = {
++ .name = "mrstisp",
++ .id_table = mrst_isp_pci_tbl,
++ .probe = mrst_isp_pci_probe,
++ .remove = mrst_isp_pci_remove,
++ #ifdef CONFIG_PM
++ .suspend = mrst_isp_pci_suspend,
++ .resume = mrst_isp_pci_resume,
++ #endif
++};
++
++static int __init mrst_isp_pci_init(void)
++{
++ int ret;
++
++ DBG_entering;
++
++ ret = pci_register_driver(&mrst_isp_pci_driver);
++ if (ret) {
++ printk(KERN_ERR "mrstisp: Unable to register driver\n");
++ return ret;
++ }
++
++ if (ret)
++ dprintk(1, "Unable to register flash driver");
++
++ DBG_leaving;
++ return 0;
++}
++
++static void __exit mrst_isp_pci_exit(void)
++{
++ DBG_entering;
++
++ pci_unregister_driver(&mrst_isp_pci_driver);
++
++ DBG_leaving;
++}
++
++module_init(mrst_isp_pci_init);
++/* late_initcall(mrst_isp_pci_init); */
++module_exit(mrst_isp_pci_exit);
++
++MODULE_DESCRIPTION("Intel Moorestown ISP driver");
++MODULE_AUTHOR("Xiaolin Zhang <xiaolin.zhang@intel.com>");
++MODULE_LICENSE("GPL");
++MODULE_SUPPORTED_DEVICE("video");
++
+diff --git a/drivers/media/video/mrstci/mrstisp/mrstisp_mif.c b/drivers/media/video/mrstci/mrstisp/mrstisp_mif.c
+new file mode 100644
+index 0000000..a05731a
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstisp/mrstisp_mif.c
+@@ -0,0 +1,763 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * Copyright (c) Silicon Image 2008 www.siliconimage.com
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include "mrstisp_stdinc.h"
++
++/*
++ * sets all main picture and self picture buffer offsets back to 0
++ */
++void ci_isp_mif_reset_offsets(enum ci_isp_conf_update_time update_time)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ REG_SET_SLICE(mrv_reg->mi_mp_y_offs_cnt_init,
++ MRV_MI_MP_Y_OFFS_CNT_INIT, 0);
++ REG_SET_SLICE(mrv_reg->mi_mp_cb_offs_cnt_init,
++ MRV_MI_MP_CB_OFFS_CNT_INIT, 0);
++ REG_SET_SLICE(mrv_reg->mi_mp_cr_offs_cnt_init,
++ MRV_MI_MP_CR_OFFS_CNT_INIT, 0);
++
++ REG_SET_SLICE(mrv_reg->mi_sp_y_offs_cnt_init,
++ MRV_MI_SP_Y_OFFS_CNT_INIT, 0);
++ REG_SET_SLICE(mrv_reg->mi_sp_cb_offs_cnt_init,
++ MRV_MI_SP_CB_OFFS_CNT_INIT, 0);
++ REG_SET_SLICE(mrv_reg->mi_sp_cr_offs_cnt_init,
++ MRV_MI_SP_CR_OFFS_CNT_INIT, 0);
++
++ REG_SET_SLICE(mrv_reg->mi_ctrl, MRV_MI_INIT_OFFSET_EN, ON);
++ REG_SET_SLICE(mrv_reg->mi_ctrl, MRV_MI_INIT_BASE_EN, ON);
++
++ switch (update_time) {
++ case CI_ISP_CFG_UPDATE_FRAME_SYNC:
++ break;
++ case CI_ISP_CFG_UPDATE_IMMEDIATE:
++ REG_SET_SLICE(mrv_reg->mi_init, MRV_MI_MI_CFG_UPD, ON);
++ break;
++ case CI_ISP_CFG_UPDATE_LATER:
++ break;
++ default:
++ break;
++ }
++}
++
++/*
++ * This function get the byte count from the last JPEG or raw data transfer
++ */
++u32 ci_isp_mif_get_byte_cnt(void)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++
++ return (u32) REG_GET_SLICE(mrv_reg->mi_byte_cnt, MRV_MI_BYTE_CNT);
++}
++
++/*
++ * Sets the desired self picture orientation, if possible.
++ */
++static int ci_isp_mif_set_self_pic_orientation(enum ci_isp_mif_sp_mode
++ mrv_mif_sp_mode,
++ int activate_self_path)
++{
++
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ u32 mi_ctrl = REG_READ(mrv_reg->mi_ctrl);
++
++ u32 output_format = REG_GET_SLICE(mi_ctrl, MRV_MI_SP_OUTPUT_FORMAT);
++
++ /* apply the desired self picture orientation, if possible */
++ switch (mrv_mif_sp_mode) {
++ case CI_ISP_MIF_SP_ORIGINAL:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_ROT_AND_FLIP, 0);
++ break;
++
++ case CI_ISP_MIF_SP_HORIZONTAL_FLIP:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_ROT_AND_FLIP,
++ MRV_MI_ROT_AND_FLIP_H_FLIP);
++ break;
++
++ case CI_ISP_MIF_SP_VERTICAL_FLIP:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_ROT_AND_FLIP,
++ MRV_MI_ROT_AND_FLIP_V_FLIP);
++ break;
++
++ case CI_ISP_MIF_SP_ROTATION_090_DEG:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_ROT_AND_FLIP,
++ MRV_MI_ROT_AND_FLIP_ROTATE);
++ break;
++
++ case CI_ISP_MIF_SP_ROTATION_180_DEG:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_ROT_AND_FLIP,
++ MRV_MI_ROT_AND_FLIP_H_FLIP |
++ MRV_MI_ROT_AND_FLIP_V_FLIP);
++ break;
++
++ case CI_ISP_MIF_SP_ROTATION_270_DEG:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_ROT_AND_FLIP,
++ MRV_MI_ROT_AND_FLIP_H_FLIP |
++ MRV_MI_ROT_AND_FLIP_V_FLIP |
++ MRV_MI_ROT_AND_FLIP_ROTATE);
++ break;
++
++ case CI_ISP_MIF_SP_ROT_090_V_FLIP:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_ROT_AND_FLIP,
++ MRV_MI_ROT_AND_FLIP_V_FLIP |
++ MRV_MI_ROT_AND_FLIP_ROTATE);
++ break;
++
++ case CI_ISP_MIF_SP_ROT_270_V_FLIP:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_ROT_AND_FLIP,
++ MRV_MI_ROT_AND_FLIP_H_FLIP |
++ MRV_MI_ROT_AND_FLIP_ROTATE);
++ break;
++
++ default:
++ eprintk("unknown value for mrv_mif_sp_mode");
++ return CI_STATUS_NOTSUPP;
++ }
++
++ if (REG_GET_SLICE(mi_ctrl, MRV_MI_ROT_AND_FLIP) &
++ MRV_MI_ROT_AND_FLIP_ROTATE) {
++ switch (output_format) {
++ case MRV_MI_SP_OUTPUT_FORMAT_RGB888:
++ case MRV_MI_SP_OUTPUT_FORMAT_RGB666:
++ case MRV_MI_SP_OUTPUT_FORMAT_RGB565:
++ /* rotation supported on this output modes */
++ break;
++ default:
++ eprintk("rotation is only allowed for RGB modes.");
++ return CI_STATUS_NOTSUPP;
++ }
++ }
++
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_ENABLE,
++ (activate_self_path) ? ENABLE : DISABLE);
++ REG_WRITE(mrv_reg->mi_ctrl, mi_ctrl);
++ REG_SET_SLICE(mrv_reg->mi_init, MRV_MI_MI_CFG_UPD, ON);
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Checks the main or self picture path buffer structure.
++ */
++static int ci_isp_mif_check_mi_path_conf(const struct ci_isp_mi_path_conf
++ *isp_mi_path_conf, int main_buffer)
++{
++ if (!isp_mi_path_conf) {
++ eprintk("isp_mi_path_conf is NULL");
++ return CI_STATUS_NULL_POINTER;
++ }
++
++ if (!isp_mi_path_conf->ybuffer.pucbuffer) {
++ eprintk("isp_mi_path_conf->ybuffer.pucbuffer is NULL");
++ return CI_STATUS_NULL_POINTER;
++ }
++
++ if (main_buffer) {
++ if ((((unsigned long)(isp_mi_path_conf->ybuffer.pucbuffer)
++ & ~(MRV_MI_MP_Y_BASE_AD_INIT_VALID_MASK)) != 0)
++ ||
++ ((isp_mi_path_conf->ybuffer.size
++ & ~(MRV_MI_MP_Y_SIZE_INIT_VALID_MASK)) != 0)
++ ||
++ ((isp_mi_path_conf->ybuffer.size
++ & (MRV_MI_MP_Y_SIZE_INIT_VALID_MASK)) == 0)
++ ||
++ ((isp_mi_path_conf->ybuffer.offs
++ & ~(MRV_MI_MP_Y_OFFS_CNT_INIT_VALID_MASK)) != 0)) {
++ return CI_STATUS_OUTOFRANGE;
++ }
++ } else {
++ if ((((unsigned long) isp_mi_path_conf->ybuffer.pucbuffer
++ & ~(MRV_MI_SP_Y_BASE_AD_INIT_VALID_MASK)) != 0)
++ ||
++ ((isp_mi_path_conf->ybuffer.size &
++ ~(MRV_MI_SP_Y_SIZE_INIT_VALID_MASK)) != 0)
++ ||
++ ((isp_mi_path_conf->ybuffer.size &
++ (MRV_MI_SP_Y_SIZE_INIT_VALID_MASK)) == 0)
++ ||
++ ((isp_mi_path_conf->ybuffer.offs &
++ ~(MRV_MI_SP_Y_OFFS_CNT_INIT_VALID_MASK)) !=
++ 0)
++ ||
++ ((isp_mi_path_conf->llength &
++ ~(MRV_MI_SP_Y_LLENGTH_VALID_MASK)) != 0)
++ ||
++ ((isp_mi_path_conf->
++ llength & (MRV_MI_SP_Y_LLENGTH_VALID_MASK)) == 0)) {
++ return CI_STATUS_OUTOFRANGE;
++ }
++ }
++
++ if (isp_mi_path_conf->cb_buffer.pucbuffer != 0) {
++ if (main_buffer) {
++ if ((((unsigned long)
++ isp_mi_path_conf->cb_buffer.pucbuffer
++ & ~(MRV_MI_MP_CB_BASE_AD_INIT_VALID_MASK)) !=
++ 0)
++ ||
++ ((isp_mi_path_conf->cb_buffer.size &
++ ~(MRV_MI_MP_CB_SIZE_INIT_VALID_MASK)) != 0)
++ ||
++ ((isp_mi_path_conf->cb_buffer.size &
++ (MRV_MI_MP_CB_SIZE_INIT_VALID_MASK)) == 0)
++ ||
++ ((isp_mi_path_conf->cb_buffer.offs &
++ ~(MRV_MI_MP_CB_OFFS_CNT_INIT_VALID_MASK)) !=
++ 0)) {
++ return CI_STATUS_OUTOFRANGE;
++ }
++ } else {
++ if ((((unsigned long)
++ isp_mi_path_conf->cb_buffer.pucbuffer
++ & ~(MRV_MI_SP_CB_BASE_AD_INIT_VALID_MASK)) !=
++ 0)
++ ||
++ ((isp_mi_path_conf->cb_buffer.size &
++ ~(MRV_MI_SP_CB_SIZE_INIT_VALID_MASK)) != 0)
++ ||
++ ((isp_mi_path_conf->cb_buffer.size &
++ (MRV_MI_SP_CB_SIZE_INIT_VALID_MASK)) == 0)
++ ||
++ ((isp_mi_path_conf->cb_buffer.offs &
++ ~(MRV_MI_SP_CB_OFFS_CNT_INIT_VALID_MASK)) !=
++ 0)) {
++ return CI_STATUS_OUTOFRANGE;
++ }
++ }
++ }
++
++ if (isp_mi_path_conf->cr_buffer.pucbuffer != 0) {
++ if (main_buffer) {
++ if ((((unsigned long)
++ isp_mi_path_conf->cr_buffer.pucbuffer
++ & ~(MRV_MI_MP_CR_BASE_AD_INIT_VALID_MASK)) !=
++ 0)
++ ||
++ ((isp_mi_path_conf->cr_buffer.size &
++ ~(MRV_MI_MP_CR_SIZE_INIT_VALID_MASK)) != 0)
++ ||
++ ((isp_mi_path_conf->cr_buffer.size &
++ (MRV_MI_MP_CR_SIZE_INIT_VALID_MASK)) == 0)
++ ||
++ ((isp_mi_path_conf->cr_buffer.offs &
++ ~(MRV_MI_MP_CR_OFFS_CNT_INIT_VALID_MASK)) !=
++ 0)){
++ return CI_STATUS_OUTOFRANGE;
++ }
++ } else {
++ if ((((unsigned long)
++ isp_mi_path_conf->cr_buffer.pucbuffer
++ & ~(MRV_MI_SP_CR_BASE_AD_INIT_VALID_MASK))
++ != 0)
++ ||
++ ((isp_mi_path_conf->cr_buffer.size &
++ ~(MRV_MI_SP_CR_SIZE_INIT_VALID_MASK)) != 0)
++ ||
++ ((isp_mi_path_conf->cr_buffer.size &
++ (MRV_MI_SP_CR_SIZE_INIT_VALID_MASK)) == 0)
++ ||
++ ((isp_mi_path_conf->cr_buffer.offs &
++ ~(MRV_MI_SP_CR_OFFS_CNT_INIT_VALID_MASK)) != 0)) {
++ return CI_STATUS_OUTOFRANGE;
++ }
++ }
++ }
++
++ return CI_STATUS_SUCCESS;
++}
++
++/*
++ * Configures the main picture path buffers of the MI.
++ */
++int ci_isp_mif_set_main_buffer(const struct ci_isp_mi_path_conf
++ *isp_mi_path_conf,
++ enum ci_isp_conf_update_time update_time)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ int error = CI_STATUS_FAILURE;
++
++ error = ci_isp_mif_check_mi_path_conf(isp_mi_path_conf, true);
++ if (error != CI_STATUS_SUCCESS)
++ return error;
++
++ /* set register values */
++ REG_SET_SLICE(mrv_reg->mi_mp_y_base_ad_init,
++ MRV_MI_MP_Y_BASE_AD_INIT,
++ (u32)(unsigned long)isp_mi_path_conf->ybuffer.pucbuffer);
++ REG_SET_SLICE(mrv_reg->mi_mp_y_size_init, MRV_MI_MP_Y_SIZE_INIT,
++ isp_mi_path_conf->ybuffer.size);
++ REG_SET_SLICE(mrv_reg->mi_mp_y_offs_cnt_init,
++ MRV_MI_MP_Y_OFFS_CNT_INIT,
++ isp_mi_path_conf->ybuffer.offs);
++
++ if (isp_mi_path_conf->cb_buffer.pucbuffer != 0) {
++ REG_SET_SLICE(mrv_reg->mi_mp_cb_base_ad_init,
++ MRV_MI_MP_CB_BASE_AD_INIT,
++ (u32)(unsigned long) isp_mi_path_conf->cb_buffer.
++ pucbuffer);
++ REG_SET_SLICE(mrv_reg->mi_mp_cb_size_init,
++ MRV_MI_MP_CB_SIZE_INIT,
++ isp_mi_path_conf->cb_buffer.size);
++ REG_SET_SLICE(mrv_reg->mi_mp_cb_offs_cnt_init,
++ MRV_MI_MP_CB_OFFS_CNT_INIT,
++ isp_mi_path_conf->cb_buffer.offs);
++ }
++
++ if (isp_mi_path_conf->cr_buffer.pucbuffer != 0) {
++ REG_SET_SLICE(mrv_reg->mi_mp_cr_base_ad_init,
++ MRV_MI_MP_CR_BASE_AD_INIT,
++ (u32)(unsigned long) isp_mi_path_conf->cr_buffer.
++ pucbuffer);
++ REG_SET_SLICE(mrv_reg->mi_mp_cr_size_init,
++ MRV_MI_MP_CR_SIZE_INIT,
++ isp_mi_path_conf->cr_buffer.size);
++ REG_SET_SLICE(mrv_reg->mi_mp_cr_offs_cnt_init,
++ MRV_MI_MP_CR_OFFS_CNT_INIT,
++ isp_mi_path_conf->cr_buffer.offs);
++ }
++
++ /*
++ * update base and offset registers during next immediate or
++ * automatic update request
++ */
++ REG_SET_SLICE(mrv_reg->mi_ctrl, MRV_MI_INIT_OFFSET_EN, ENABLE);
++ REG_SET_SLICE(mrv_reg->mi_ctrl, MRV_MI_INIT_BASE_EN, ENABLE);
++
++ switch (update_time) {
++ case CI_ISP_CFG_UPDATE_FRAME_SYNC:
++ /*
++ * frame synchronous update of shadow registers,
++ * update is done after the curr frame
++ */
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_GEN_CFG_UPD, ON);
++ break;
++ case CI_ISP_CFG_UPDATE_IMMEDIATE:
++ /*
++ * immediate update of shadow registers
++ * (will disturb an ongoing frame processing)
++ */
++ REG_SET_SLICE(mrv_reg->mi_init, MRV_MI_MI_CFG_UPD, ON);
++ break;
++ case CI_ISP_CFG_UPDATE_LATER:
++ /* no update from within this function */
++ break;
++ default:
++ break;
++ }
++
++ return error;
++}
++
++/*
++ * Configures the self picture path buffers of the MI.
++ *
++ */
++int ci_isp_mif_set_self_buffer(const struct ci_isp_mi_path_conf
++ *isp_mi_path_conf,
++ enum ci_isp_conf_update_time update_time)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ int error = CI_STATUS_FAILURE;
++
++ error = ci_isp_mif_check_mi_path_conf(isp_mi_path_conf, false);
++ if (error != CI_STATUS_SUCCESS)
++ return error;
++
++ /* set register values */
++ REG_SET_SLICE(mrv_reg->mi_sp_y_base_ad_init,
++ MRV_MI_SP_Y_BASE_AD_INIT,
++ (u32)(unsigned long)isp_mi_path_conf->ybuffer.pucbuffer);
++ REG_SET_SLICE(mrv_reg->mi_sp_y_size_init, MRV_MI_SP_Y_SIZE_INIT,
++ isp_mi_path_conf->ybuffer.size);
++ REG_SET_SLICE(mrv_reg->mi_sp_y_offs_cnt_init,
++ MRV_MI_SP_Y_OFFS_CNT_INIT,
++ isp_mi_path_conf->ybuffer.offs);
++
++ /*
++ * llength is counted in pixels and this value could be stored
++ * directly into the register
++ */
++ REG_SET_SLICE(mrv_reg->mi_sp_y_llength, MRV_MI_SP_Y_LLENGTH,
++ isp_mi_path_conf->llength);
++
++ if (isp_mi_path_conf->cb_buffer.pucbuffer) {
++ REG_SET_SLICE(mrv_reg->mi_sp_cb_base_ad_init,
++ MRV_MI_SP_CB_BASE_AD_INIT,
++ (u32) (unsigned long)isp_mi_path_conf->cb_buffer.
++ pucbuffer);
++ REG_SET_SLICE(mrv_reg->mi_sp_cb_size_init,
++ MRV_MI_SP_CB_SIZE_INIT,
++ isp_mi_path_conf->cb_buffer.size);
++ REG_SET_SLICE(mrv_reg->mi_sp_cb_offs_cnt_init,
++ MRV_MI_SP_CB_OFFS_CNT_INIT,
++ isp_mi_path_conf->cb_buffer.offs);
++ }
++
++ if (isp_mi_path_conf->cr_buffer.pucbuffer) {
++ REG_SET_SLICE(mrv_reg->mi_sp_cr_base_ad_init,
++ MRV_MI_SP_CR_BASE_AD_INIT,
++ (u32) (unsigned long)isp_mi_path_conf->cr_buffer.
++ pucbuffer);
++ REG_SET_SLICE(mrv_reg->mi_sp_cr_size_init,
++ MRV_MI_SP_CR_SIZE_INIT,
++ isp_mi_path_conf->cr_buffer.size);
++ REG_SET_SLICE(mrv_reg->mi_sp_cr_offs_cnt_init,
++ MRV_MI_SP_CR_OFFS_CNT_INIT,
++ isp_mi_path_conf->cr_buffer.offs);
++ }
++
++ if ((!isp_mi_path_conf->ypic_width)
++ || (!isp_mi_path_conf->ypic_height)) {
++ return CI_STATUS_FAILURE;
++ }
++
++ REG_SET_SLICE(mrv_reg->mi_sp_y_pic_width, MRV_MI_SP_Y_PIC_WIDTH,
++ isp_mi_path_conf->ypic_width);
++ REG_SET_SLICE(mrv_reg->mi_sp_y_pic_height, MRV_MI_SP_Y_PIC_HEIGHT,
++ isp_mi_path_conf->ypic_height);
++ REG_SET_SLICE(mrv_reg->mi_sp_y_pic_size, MRV_MI_SP_Y_PIC_SIZE,
++ isp_mi_path_conf->ypic_height *
++ isp_mi_path_conf->llength);
++
++ /*
++ * update base and offset registers during next immediate or
++ * automatic update request
++ */
++ REG_SET_SLICE(mrv_reg->mi_ctrl, MRV_MI_INIT_OFFSET_EN, ENABLE);
++ REG_SET_SLICE(mrv_reg->mi_ctrl, MRV_MI_INIT_BASE_EN, ENABLE);
++
++ switch (update_time) {
++ case CI_ISP_CFG_UPDATE_FRAME_SYNC:
++ REG_SET_SLICE(mrv_reg->isp_ctrl, MRV_ISP_ISP_GEN_CFG_UPD,
++ ON);
++ break;
++ case CI_ISP_CFG_UPDATE_IMMEDIATE:
++ REG_SET_SLICE(mrv_reg->mi_init, MRV_MI_MI_CFG_UPD, ON);
++ break;
++ case CI_ISP_CFG_UPDATE_LATER:
++ break;
++ default:
++ break;
++ }
++
++ return error;
++}
++
++/*
++ * Configures the DMA path of the MI.
++ *
++ */
++int ci_isp_mif_set_path_and_orientation(const struct ci_isp_mi_ctrl
++ *mrv_mi_ctrl)
++{
++ struct isp_register *mrv_reg = (struct isp_register *) MEM_MRV_REG_BASE;
++ int error = CI_STATUS_OUTOFRANGE;
++ u32 mi_ctrl = 0;
++
++ if (!mrv_mi_ctrl) {
++ eprintk("mrv_mi_ctrl is NULL");
++ return CI_STATUS_NULL_POINTER;
++ }
++
++ if ((mrv_mi_ctrl->irq_offs_init &
++ ~(MRV_MI_MP_Y_IRQ_OFFS_INIT_VALID_MASK)) != 0) {
++ eprintk("bad mrv_mi_ctrl->irq_offs_init value");
++ return error;
++ }
++
++ REG_SET_SLICE(mrv_reg->mi_mp_y_irq_offs_init,
++ MRV_MI_MP_Y_IRQ_OFFS_INIT, mrv_mi_ctrl->irq_offs_init);
++
++ /* main picture path */
++ switch (mrv_mi_ctrl->main_path) {
++ case CI_ISP_PATH_OFF:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_MP_ENABLE, OFF);
++ break;
++ case CI_ISP_PATH_ON:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_MP_ENABLE, ON);
++ break;
++ case CI_ISP_PATH_JPE:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_JPEG_ENABLE, ON);
++ break;
++ case CI_ISP_PATH_RAW8:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_RAW_ENABLE, ON);
++ break;
++ case CI_ISP_PATH_RAW816:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_RAW_ENABLE, ON);
++ REG_SET_SLICE(mi_ctrl, MRV_MI_MP_WRITE_FORMAT,
++ MRV_MI_MP_WRITE_FORMAT_INTERLEAVED);
++ break;
++ default:
++ eprintk("bad mrv_mi_ctrl->main_path value");
++ return error;
++ }
++
++ /* self picture path output format */
++ switch (mrv_mi_ctrl->mrv_mif_sp_out_form) {
++ case CI_ISP_MIF_COL_FORMAT_YCBCR_422:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_OUTPUT_FORMAT,
++ MRV_MI_SP_OUTPUT_FORMAT_YUV422);
++ break;
++ case CI_ISP_MIF_COL_FORMAT_YCBCR_444:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_OUTPUT_FORMAT,
++ MRV_MI_SP_OUTPUT_FORMAT_YUV444);
++ break;
++ case CI_ISP_MIF_COL_FORMAT_YCBCR_420:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_OUTPUT_FORMAT,
++ MRV_MI_SP_OUTPUT_FORMAT_YUV420);
++ break;
++ case CI_ISP_MIF_COL_FORMAT_YCBCR_400:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_OUTPUT_FORMAT,
++ MRV_MI_SP_OUTPUT_FORMAT_YUV400);
++ break;
++ case CI_ISP_MIF_COL_FORMAT_RGB_565:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_OUTPUT_FORMAT,
++ MRV_MI_SP_OUTPUT_FORMAT_RGB565);
++ break;
++ case CI_ISP_MIF_COL_FORMAT_RGB_888:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_OUTPUT_FORMAT,
++ MRV_MI_SP_OUTPUT_FORMAT_RGB888);
++ break;
++ case CI_ISP_MIF_COL_FORMAT_RGB_666:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_OUTPUT_FORMAT,
++ MRV_MI_SP_OUTPUT_FORMAT_RGB666);
++ break;
++
++ default:
++ eprintk("bad mrv_mi_ctrl->mrv_mif_sp_out_form value");
++ return error;
++ }
++
++ /* self picture path input format */
++ switch (mrv_mi_ctrl->mrv_mif_sp_in_form) {
++ case CI_ISP_MIF_COL_FORMAT_YCBCR_422:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_INPUT_FORMAT,
++ MRV_MI_SP_INPUT_FORMAT_YUV422);
++ break;
++ case CI_ISP_MIF_COL_FORMAT_YCBCR_444:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_INPUT_FORMAT,
++ MRV_MI_SP_INPUT_FORMAT_YUV444);
++ break;
++ case CI_ISP_MIF_COL_FORMAT_YCBCR_420:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_INPUT_FORMAT,
++ MRV_MI_SP_INPUT_FORMAT_YUV420);
++ break;
++ case CI_ISP_MIF_COL_FORMAT_YCBCR_400:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_INPUT_FORMAT,
++ MRV_MI_SP_INPUT_FORMAT_YUV400);
++ break;
++ case CI_ISP_MIF_COL_FORMAT_RGB_565:
++ case CI_ISP_MIF_COL_FORMAT_RGB_666:
++ case CI_ISP_MIF_COL_FORMAT_RGB_888:
++ default:
++ eprintk("bad mrv_mi_ctrl->mrv_mif_sp_in_form value");
++ return error;
++ }
++
++ error = CI_STATUS_SUCCESS;
++
++ /* self picture path write format */
++ switch (mrv_mi_ctrl->mrv_mif_sp_pic_form) {
++ case CI_ISP_MIF_PIC_FORM_PLANAR:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_WRITE_FORMAT,
++ MRV_MI_SP_WRITE_FORMAT_PLANAR);
++ break;
++ case CI_ISP_MIF_PIC_FORM_SEMI_PLANAR:
++ if ((mrv_mi_ctrl->mrv_mif_sp_out_form ==
++ CI_ISP_MIF_COL_FORMAT_YCBCR_422)
++ || (mrv_mi_ctrl->mrv_mif_sp_out_form ==
++ CI_ISP_MIF_COL_FORMAT_YCBCR_420)) {
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_WRITE_FORMAT,
++ MRV_MI_SP_WRITE_FORMAT_SEMIPLANAR);
++ } else {
++ error = CI_STATUS_NOTSUPP;
++ }
++ break;
++ case CI_ISP_MIF_PIC_FORM_INTERLEAVED:
++ if (mrv_mi_ctrl->mrv_mif_sp_out_form ==
++ CI_ISP_MIF_COL_FORMAT_YCBCR_422) {
++ REG_SET_SLICE(mi_ctrl, MRV_MI_SP_WRITE_FORMAT,
++ MRV_MI_SP_WRITE_FORMAT_INTERLEAVED);
++ } else {
++ error = CI_STATUS_NOTSUPP;
++ }
++ break;
++ default:
++ error = CI_STATUS_OUTOFRANGE;
++ break;
++
++ }
++
++ if (error != CI_STATUS_SUCCESS) {
++ eprintk("bad mrv_mi_ctrl->mrv_mif_sp_pic_form value");
++ return error;
++ }
++
++ if (mrv_mi_ctrl->main_path == CI_ISP_PATH_ON) {
++ /* for YCbCr mode only, permitted for raw mode */
++ /* main picture path write format */
++ switch (mrv_mi_ctrl->mrv_mif_mp_pic_form) {
++ case CI_ISP_MIF_PIC_FORM_PLANAR:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_MP_WRITE_FORMAT,
++ MRV_MI_MP_WRITE_FORMAT_PLANAR);
++ break;
++ case CI_ISP_MIF_PIC_FORM_SEMI_PLANAR:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_MP_WRITE_FORMAT,
++ MRV_MI_MP_WRITE_FORMAT_SEMIPLANAR);
++ break;
++ case CI_ISP_MIF_PIC_FORM_INTERLEAVED:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_MP_WRITE_FORMAT,
++ MRV_MI_MP_WRITE_FORMAT_INTERLEAVED);
++ break;
++ default:
++ error = CI_STATUS_OUTOFRANGE;
++ break;
++ }
++ }
++
++ if (error != CI_STATUS_SUCCESS) {
++ eprintk("bad mrv_mi_ctrl->mrv_mif_mp_pic_form value");
++ return error;
++ }
++
++ /* burst length for chrominance for write port */
++ /* setting burst mode to 16 bits
++ switch (mrv_mi_ctrl->burst_length_chrom) {
++ case CI_ISP_MIF_BURST_LENGTH_4:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_BURST_LEN_CHROM,
++ MRV_MI_BURST_LEN_CHROM_4);
++ break;
++ case CI_ISP_MIF_BURST_LENGTH_8:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_BURST_LEN_CHROM,
++ MRV_MI_BURST_LEN_CHROM_8);
++ break;
++ case CI_ISP_MIF_BURST_LENGTH_16:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_BURST_LEN_CHROM,
++ MRV_MI_BURST_LEN_CHROM_16);
++ break;
++ default:
++ error = CI_STATUS_OUTOFRANGE;
++ break;
++ }
++ */
++ REG_SET_SLICE(mi_ctrl, MRV_MI_BURST_LEN_CHROM,
++ MRV_MI_BURST_LEN_CHROM_16);
++
++ if (error != CI_STATUS_SUCCESS) {
++ eprintk("bad mrv_mi_ctrl->burst_length_chrom value");
++ return error;
++ }
++
++ /* burst length for luminance for write port */
++ /* setting burst mode to 16 bits
++ switch (mrv_mi_ctrl->burst_length_lum) {
++ case CI_ISP_MIF_BURST_LENGTH_4:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_BURST_LEN_LUM,
++ MRV_MI_BURST_LEN_LUM_4);
++ break;
++ case CI_ISP_MIF_BURST_LENGTH_8:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_BURST_LEN_LUM,
++ MRV_MI_BURST_LEN_LUM_8);
++ break;
++ case CI_ISP_MIF_BURST_LENGTH_16:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_BURST_LEN_LUM,
++ MRV_MI_BURST_LEN_LUM_16);
++ break;
++ default:
++ error = CI_STATUS_OUTOFRANGE;
++ break;
++ }
++ */
++ REG_SET_SLICE(mi_ctrl, MRV_MI_BURST_LEN_LUM,
++ MRV_MI_BURST_LEN_LUM_16);
++
++ if (error != CI_STATUS_SUCCESS) {
++ eprintk("bad mrv_mi_ctrl->burst_length_lum value");
++ return error;
++ }
++
++ /* enable updating of the shadow registers for main and self picture
++ * to their init values
++ */
++ switch (mrv_mi_ctrl->init_vals) {
++ case CI_ISP_MIF_NO_INIT_VALS:
++ break;
++ case CI_ISP_MIF_INIT_OFFS:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_INIT_OFFSET_EN, ENABLE);
++ break;
++ case CI_ISP_MIF_INIT_BASE:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_INIT_BASE_EN, ENABLE);
++ break;
++ case CI_ISP_MIF_INIT_OFFSAndBase:
++ REG_SET_SLICE(mi_ctrl, MRV_MI_INIT_OFFSET_EN, ENABLE);
++ REG_SET_SLICE(mi_ctrl, MRV_MI_INIT_BASE_EN, ENABLE);
++ break;
++ default:
++ error = CI_STATUS_OUTOFRANGE;
++ break;
++ }
++
++ if (error != CI_STATUS_SUCCESS) {
++ eprintk("bad mrv_mi_ctrl->init_vals value");
++ return error;
++ }
++
++ /* enable change of byte order for write port */
++ REG_SET_SLICE(mi_ctrl, MRV_MI_BYTE_SWAP,
++ (mrv_mi_ctrl->byte_swap_enable) ? ON : OFF);
++
++ /* enable or disable the last pixel signalization */
++ REG_SET_SLICE(mi_ctrl, MRV_MI_LAST_PIXEL_SIG_EN,
++ (mrv_mi_ctrl->last_pixel_enable) ? ON : OFF);
++
++ /* now write settings into register */
++ REG_WRITE(mrv_reg->mi_ctrl, mi_ctrl);
++
++ dprintk(2, "mi_ctrl = 0x%x", mi_ctrl);
++
++ /* self picture path operating mode */
++ if ((mrv_mi_ctrl->self_path == CI_ISP_PATH_ON) ||
++ (mrv_mi_ctrl->self_path == CI_ISP_PATH_OFF)) {
++
++ /* do not call if not supported */
++
++ /* support has been restricted to >= MI_V2 && <= MI_V3 in
++ * ci_isp_mif_set_self_pic_orientation, so we do the same here
++ */
++
++ error = ci_isp_mif_set_self_pic_orientation(
++ mrv_mi_ctrl->mrv_mif_sp_mode,
++ (int) (mrv_mi_ctrl->self_path
++ == CI_ISP_PATH_ON));
++ } else {
++ eprintk("bad mrv_mi_ctrl->self_path value");
++ error = CI_STATUS_OUTOFRANGE;
++ }
++
++ REG_SET_SLICE(mrv_reg->mi_init, MRV_MI_MI_CFG_UPD, ON);
++
++ return error;
++}
+--
+1.6.0.6
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-moorestown-camera-driver-10.0-3-3.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-moorestown-camera-driver-10.0-3-3.patch
new file mode 100644
index 0000000..cd4edb9
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-moorestown-camera-driver-10.0-3-3.patch
@@ -0,0 +1,8290 @@
+From 0d55b08388f12c7c22cae9c6c745995d051624ba Mon Sep 17 00:00:00 2001
+From: Zheng Ba <zheng.ba@intel.com>
+Date: Thu, 1 Apr 2010 16:29:43 +0800
+Subject: [PATCH 3/3] Moorestown Camera Imaging driver Beta 10.0
+
+Patch-mainline: 2.6.35?
+
+Changes from Beta 9.0:
+1. Fixed hsd sighting:
+ 3469638 3469639 3469710 3469822 (high)
+ 3469697 (medium)
+
+Changes from Beta 8.0:
+1. Fixed hsd sighting
+ 3469056 3469058 (critical)
+ 3469705 3469696 3469709 3469510 (medium)
+
+Changes from Beta 7.0:
+1. Fixed hsd sighting 3469681,3469682,3469683 (high)
+
+Changes from Beta 6.0:
+1. Fixed hsd sighting 3469668 (high)
+2. Fixed ov5630 v4l2 view-finding dark issue
+3. Enabled support for popular v4l2 applications (cheese, skype, ffmpeg)
+
+Changes from Beta 5.1:
+1. Fixed CRITICAL sighting 3469558 -- ciapp fails to launch with segment fault
+2. Fixed HIGH sighting 3479513 -- ov5630 AWB unstable
+3. Improved KMOT sensor 720p fps from 30 to 40
+
+Changes from Beta 5.0:
+Fixed a critical issue of camera driver not loading -- hsd 3469557
+
+Main changes from Beta 4.0:
+Fixed 4 HSD sightings: 3469392,3469099,3469470,3469500
+
+Main changes from Beta 3.0:
+Fixed 7 HSD sightings: 3469264,3469112,3469395,3469103,3469105,3469471,3469484
+
+Main changes from Beta 2.0:
+Fixed 6 HSD sightings: 3469047,3469315,3469317,3469101,3468409,3469391
+
+Main changes from Beta 1.1:
+1. Added interrupt mode for jpeg capture and KMOT viewfinding
+2. Fixed HSD sighting 3469228 and 3469147
+
+Main changes from Alpha2:
+Enabled MIPI interface in ISP driver and KMOT sensor s5k4e1.
+Enabled FIFO in ISP driver, which doubled the fps in view-finding mode.
+Enabled Subdev Framework in CI kernel driver.
+Enabled AF Continuous Mode.
+Enabled AE scene evaluation.
+
+Enabled the camera drivers in kernel:
+Device Drivers --> Multimedia support --> Video For Linux
+Device Drivers --> Mulitmedia support --> Video capture adapters -->
+--> Moorestown Langwell Camera Imaging Subsystem support.
+
+Kernel configs:
+1. camera driver depends on GPIO library and I2C driver.
+CONFIG_GENERIC_GPIO=y
+CONFIG_I2C=y
+CONFIG_GPIOLIB=y
+2. camera driver depends on videobuf-core and videobuf-dma-contig.
+VIDEOBUF_GEN=y
+VIDEOBUF_DMA_CONTIG=y
+3. enable multimedia support and video capture.
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_VIDEO_DEV=y
+CONFIG_VIDEO_V4L2_COMMON=y
+CONFIG_VIDEO_MEDIA=y
+CONFIG_VIDEO_V4L2=y
+4. camera drivers incluing ISP, 5630, 5630-motor, s5k4e1, s5k4e1-motor, 2650,
+9665, flash.
+CONFIG_VIDEO_MRSTCI=y
+CONFIG_VIDEO_MRST_ISP=y
+CONFIG_VIDEO_MRST_OV5630=y
+CONFIG_VIDEO_MRST_OV5630_MOTOR=y
+CONFIG_VIDEO_MRST_S5K4E1=y
+CONFIG_VIDEO_MRST_S5K4E1_MOTOR=y
+CONFIG_VIDEO_MRST_FLASH=y
+CONFIG_VIDEO_MRST_OV2650=y
+CONFIG_VIDEO_MRST_OV9665=y
+Signed-off-by: Zheng Ba <zheng.ba@intel.com>
+---
+ drivers/media/video/mrstci/mrstflash/Kconfig | 9 +
+ drivers/media/video/mrstci/mrstflash/Makefile | 3 +
+ drivers/media/video/mrstci/mrstflash/mrstflash.c | 150 +++
+ drivers/media/video/mrstci/mrstov2650/Kconfig | 9 +
+ drivers/media/video/mrstci/mrstov2650/Makefile | 3 +
+ drivers/media/video/mrstci/mrstov2650/mrstov2650.c | 1190 ++++++++++++++++++++
+ drivers/media/video/mrstci/mrstov2650/ov2650.h | 766 +++++++++++++
+ drivers/media/video/mrstci/mrstov5630/Kconfig | 9 +
+ drivers/media/video/mrstci/mrstov5630/Makefile | 4 +
+ drivers/media/video/mrstci/mrstov5630/ov5630.c | 1153 +++++++++++++++++++
+ drivers/media/video/mrstci/mrstov5630/ov5630.h | 672 +++++++++++
+ .../media/video/mrstci/mrstov5630_motor/Kconfig | 9 +
+ .../media/video/mrstci/mrstov5630_motor/Makefile | 3 +
+ .../mrstci/mrstov5630_motor/mrstov5630_motor.c | 428 +++++++
+ .../video/mrstci/mrstov5630_motor/ov5630_motor.h | 86 ++
+ drivers/media/video/mrstci/mrstov9665/Kconfig | 9 +
+ drivers/media/video/mrstci/mrstov9665/Makefile | 3 +
+ drivers/media/video/mrstci/mrstov9665/mrstov9665.c | 972 ++++++++++++++++
+ drivers/media/video/mrstci/mrstov9665/ov9665.h | 263 +++++
+ drivers/media/video/mrstci/mrsts5k4e1/Kconfig | 9 +
+ drivers/media/video/mrstci/mrsts5k4e1/Makefile | 3 +
+ drivers/media/video/mrstci/mrsts5k4e1/mrsts5k4e1.c | 1024 +++++++++++++++++
+ drivers/media/video/mrstci/mrsts5k4e1/mrsts5k4e1.h | 662 +++++++++++
+ .../media/video/mrstci/mrsts5k4e1_motor/Kconfig | 9 +
+ .../media/video/mrstci/mrsts5k4e1_motor/Makefile | 3 +
+ .../mrstci/mrsts5k4e1_motor/mrsts5k4e1_motor.c | 430 +++++++
+ .../mrstci/mrsts5k4e1_motor/mrsts5k4e1_motor.h | 102 ++
+ 27 files changed, 7983 insertions(+), 0 deletions(-)
+ create mode 100644 drivers/media/video/mrstci/mrstflash/Kconfig
+ create mode 100644 drivers/media/video/mrstci/mrstflash/Makefile
+ create mode 100644 drivers/media/video/mrstci/mrstflash/mrstflash.c
+ create mode 100644 drivers/media/video/mrstci/mrstov2650/Kconfig
+ create mode 100644 drivers/media/video/mrstci/mrstov2650/Makefile
+ create mode 100644 drivers/media/video/mrstci/mrstov2650/mrstov2650.c
+ create mode 100644 drivers/media/video/mrstci/mrstov2650/ov2650.h
+ create mode 100644 drivers/media/video/mrstci/mrstov5630/Kconfig
+ create mode 100644 drivers/media/video/mrstci/mrstov5630/Makefile
+ create mode 100644 drivers/media/video/mrstci/mrstov5630/ov5630.c
+ create mode 100644 drivers/media/video/mrstci/mrstov5630/ov5630.h
+ create mode 100644 drivers/media/video/mrstci/mrstov5630_motor/Kconfig
+ create mode 100644 drivers/media/video/mrstci/mrstov5630_motor/Makefile
+ create mode 100644 drivers/media/video/mrstci/mrstov5630_motor/mrstov5630_motor.c
+ create mode 100644 drivers/media/video/mrstci/mrstov5630_motor/ov5630_motor.h
+ create mode 100644 drivers/media/video/mrstci/mrstov9665/Kconfig
+ create mode 100644 drivers/media/video/mrstci/mrstov9665/Makefile
+ create mode 100644 drivers/media/video/mrstci/mrstov9665/mrstov9665.c
+ create mode 100644 drivers/media/video/mrstci/mrstov9665/ov9665.h
+ create mode 100755 drivers/media/video/mrstci/mrsts5k4e1/Kconfig
+ create mode 100644 drivers/media/video/mrstci/mrsts5k4e1/Makefile
+ create mode 100755 drivers/media/video/mrstci/mrsts5k4e1/mrsts5k4e1.c
+ create mode 100755 drivers/media/video/mrstci/mrsts5k4e1/mrsts5k4e1.h
+ create mode 100755 drivers/media/video/mrstci/mrsts5k4e1_motor/Kconfig
+ create mode 100644 drivers/media/video/mrstci/mrsts5k4e1_motor/Makefile
+ create mode 100644 drivers/media/video/mrstci/mrsts5k4e1_motor/mrsts5k4e1_motor.c
+ create mode 100644 drivers/media/video/mrstci/mrsts5k4e1_motor/mrsts5k4e1_motor.h
+
+diff --git a/drivers/media/video/mrstci/mrstflash/Kconfig b/drivers/media/video/mrstci/mrstflash/Kconfig
+new file mode 100644
+index 0000000..72099c5
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstflash/Kconfig
+@@ -0,0 +1,9 @@
++config VIDEO_MRST_FLASH
++ tristate "Moorestown flash"
++ depends on I2C && VIDEO_MRST_ISP
++
++ ---help---
++ Say Y here if your platform support moorestown flash.
++
++ To compile this driver as a module, choose M here: the
++ module will be called mrstov2650.ko.
+diff --git a/drivers/media/video/mrstci/mrstflash/Makefile b/drivers/media/video/mrstci/mrstflash/Makefile
+new file mode 100644
+index 0000000..068f638
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstflash/Makefile
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_VIDEO_MRST_FLASH) += mrstflash.o
++
++EXTRA_CFLAGS += -I$(src)/../include
+diff --git a/drivers/media/video/mrstci/mrstflash/mrstflash.c b/drivers/media/video/mrstci/mrstflash/mrstflash.c
+new file mode 100644
+index 0000000..5611e6b
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstflash/mrstflash.c
+@@ -0,0 +1,150 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging camera flash.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/kmod.h>
++#include <linux/device.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/gpio.h>
++#include <linux/videodev2.h>
++#include <media/v4l2-device.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-i2c-drv.h>
++
++static int debug;
++module_param(debug, bool, 0644);
++MODULE_PARM_DESC(debug, "Debug level (0-1)");
++
++MODULE_AUTHOR("Xiaolin Zhang <xiaolin.zhang@intel.com>");
++MODULE_DESCRIPTION("A low-level driver for mrst flash");
++MODULE_LICENSE("GPL");
++
++static int flash_g_chip_ident(struct v4l2_subdev *sd,
++ struct v4l2_dbg_chip_ident *chip)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++#define V4L2_IDENT_MRST_FLASH 8248
++ return v4l2_chip_ident_i2c_client(client, chip,
++ V4L2_IDENT_MRST_FLASH, 0);
++}
++
++static const struct v4l2_subdev_core_ops flash_core_ops = {
++ .g_chip_ident = flash_g_chip_ident,
++};
++static const struct v4l2_subdev_ops flash_ops = {
++ .core = &flash_core_ops,
++};
++
++static int flash_detect(struct i2c_client *client)
++{
++ struct i2c_adapter *adapter = client->adapter;
++ u8 pid;
++
++ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
++ return -ENODEV;
++
++ if (adapter->nr != 0)
++ return -ENODEV;
++
++ pid = i2c_smbus_read_byte_data(client, 0x10);
++ if (pid == 0x18) {
++ printk(KERN_ERR "camera flash device found\n");
++ v4l_dbg(1, debug, client, "found camera flash device");
++ } else {
++ printk(KERN_ERR "no camera flash device found\n");
++ return -ENODEV;
++ }
++
++ return 0;
++}
++
++static int flash_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ u8 pid, ver;
++ int ret = -1;
++ struct v4l2_subdev *sd;
++
++ v4l_info(client, "chip found @ 0x%x (%s)\n",
++ client->addr << 1, client->adapter->name);
++
++ sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
++ ret = flash_detect(client);
++ if (ret)
++ return -ENODEV;
++
++ v4l2_i2c_subdev_init(sd, client, &flash_ops);
++
++ ver = i2c_smbus_read_byte_data(client, 0x50);
++ v4l_dbg(1, debug, client, "detect:CST from device is 0x%x", ver);
++ pid = i2c_smbus_read_byte_data(client, 0x20);
++ v4l_dbg(1, debug, client, "detect:MFPC from device is 0x%x", pid);
++ pid = i2c_smbus_read_byte_data(client, 0xA0);
++ v4l_dbg(1, debug, client, "detect:TCC from device is 0x%x", pid);
++ pid = i2c_smbus_read_byte_data(client, 0xB0);
++ v4l_dbg(1, debug, client, "detect:FCC from device is 0x%x", pid);
++ pid = i2c_smbus_read_byte_data(client, 0xC0);
++ v4l_dbg(1, debug, client, "detect:FDC from device is 0x%x", pid);
++ i2c_smbus_write_byte_data(client, 0xc0, 0xff); /*set FST to 1000us*/
++ pid = i2c_smbus_read_byte_data(client, 0xc0);
++ v4l_dbg(1, debug, client, "FDC from device is 0x%x", pid);
++
++ v4l_dbg(1, debug, client,
++ "successfully load camera flash device driver");
++ return 0;
++}
++
++static int flash_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *sd = i2c_get_clientdata(client);
++
++ v4l2_device_unregister_subdev(sd);
++
++ return 0;
++}
++
++static const struct i2c_device_id flash_id[] = {
++ {"mrst_camera_flash", 0},
++ {}
++};
++
++MODULE_DEVICE_TABLE(i2c, flash_id);
++
++static struct v4l2_i2c_driver_data v4l2_i2c_data = {
++ .name = "mrst_camera_flash",
++ .probe = flash_probe,
++ .remove = flash_remove,
++ .id_table = flash_id,
++};
+diff --git a/drivers/media/video/mrstci/mrstov2650/Kconfig b/drivers/media/video/mrstci/mrstov2650/Kconfig
+new file mode 100644
+index 0000000..d39d894
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstov2650/Kconfig
+@@ -0,0 +1,9 @@
++config VIDEO_MRST_OV2650
++ tristate "Moorestown OV2650 SoC Sensor"
++ depends on I2C && VIDEO_MRST_ISP
++
++ ---help---
++ Say Y here if your platform support OV2650 SoC Sensor.
++
++ To compile this driver as a module, choose M here: the
++ module will be called mrstov2650.ko.
+diff --git a/drivers/media/video/mrstci/mrstov2650/Makefile b/drivers/media/video/mrstci/mrstov2650/Makefile
+new file mode 100644
+index 0000000..fb16d57
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstov2650/Makefile
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_VIDEO_MRST_OV2650) += mrstov2650.o
++
++EXTRA_CFLAGS += -I$(src)/../include
+\ No newline at end of file
+diff --git a/drivers/media/video/mrstci/mrstov2650/mrstov2650.c b/drivers/media/video/mrstci/mrstov2650/mrstov2650.c
+new file mode 100644
+index 0000000..7f0d478
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstov2650/mrstov2650.c
+@@ -0,0 +1,1190 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/kmod.h>
++#include <linux/device.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/gpio.h>
++#include <linux/videodev2.h>
++#include <media/v4l2-device.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-i2c-drv.h>
++
++#include "ci_sensor_common.h"
++#include "ov2650.h"
++
++static int mrstov2650_debug;
++module_param(mrstov2650_debug, int, 0644);
++MODULE_PARM_DESC(mrstov2650_debug, "Debug level (0-1)");
++
++#define dprintk(level, fmt, arg...) do { \
++ if (mrstov2650_debug >= level) \
++ printk(KERN_DEBUG "mrstisp@%s: " fmt "\n", \
++ __func__, ## arg); } \
++ while (0)
++
++#define eprintk(fmt, arg...) \
++ printk(KERN_ERR "mrstisp@%s: line %d: " fmt "\n", \
++ __func__, __LINE__, ## arg);
++
++#define DBG_entering dprintk(2, "entering");
++#define DBG_leaving dprintk(2, "leaving");
++#define DBG_line dprintk(2, " line: %d", __LINE__);
++
++static inline struct ci_sensor_config *to_sensor_config(struct v4l2_subdev *sd)
++{
++ return container_of(sd, struct ci_sensor_config, sd);
++}
++
++static struct ov2650_format_struct {
++ __u8 *desc;
++ __u32 pixelformat;
++ struct regval_list *regs;
++} ov2650_formats[] = {
++ {
++ .desc = "YUYV 4:2:2",
++ .pixelformat = SENSOR_MODE_BT601,
++ .regs = NULL,
++ },
++};
++#define N_OV2650_FMTS ARRAY_SIZE(ov2650_formats)
++
++static struct ov2650_res_struct {
++ __u8 *desc;
++ int res;
++ int width;
++ int height;
++ /* FIXME: correct the fps values.. */
++ int fps;
++ bool used;
++ struct regval_list *regs;
++} ov2650_res[] = {
++ {
++ .desc = "UXGA",
++ .res = SENSOR_RES_UXGA,
++ .width = 1600,
++ .height = 1200,
++ .fps = 15,
++ .used = 0,
++ .regs = ov2650_res_uxga,
++ },
++ {
++ .desc = "SXGA",
++ .res = SENSOR_RES_SXGA,
++ .width = 1280,
++ .height = 1024,
++ .fps = 15,
++ .used = 0,
++ .regs = ov2650_res_sxga,
++ },
++ {
++ .desc = "SVGA",
++ .res = SENSOR_RES_SVGA,
++ .width = 800,
++ .height = 600,
++ .fps = 15,
++ .used = 0,
++ .regs = ov2650_res_svga,
++ },
++ {
++ .desc = "VGA",
++ .res = SENSOR_RES_VGA,
++ .width = 640,
++ .height = 480,
++ .fps = 15,
++ .used = 0,
++ .regs = ov2650_res_vga_vario,
++ },
++ {
++ .desc = "QVGA",
++ .res = SENSOR_RES_QVGA,
++ .width = 320,
++ .height = 240,
++ .fps = 15,
++ .used = 0,
++ .regs = ov2650_res_qvga,
++ },
++};
++
++#define N_RES (ARRAY_SIZE(ov2650_res))
++
++/*
++ * I2C Read & Write stuff
++ */
++static int ov2650_read(struct i2c_client *c, u16 reg, u8 *value)
++{
++ int ret;
++ int i;
++ struct i2c_msg msg[2];
++ u8 msgbuf[2];
++ u8 ret_val = 0;
++ *value = 0;
++ /* Read needs two message to go */
++ memset(&msg, 0, sizeof(msg));
++ msgbuf[0] = 0;
++ msgbuf[1] = 0;
++ i = 0;
++ msgbuf[i++] = reg >> 8;
++ msgbuf[i++] = reg;
++ msg[0].addr = c->addr;
++ msg[0].buf = msgbuf;
++ msg[0].len = i;
++
++ msg[1].addr = c->addr;
++ msg[1].flags = I2C_M_RD;
++ msg[1].buf = &ret_val;
++ msg[1].len = 1;
++
++ ret = i2c_transfer(c->adapter, &msg[0], 2);
++ *value = ret_val;
++
++ ret = (ret == 2) ? 0 : -1;
++ return ret;
++}
++
++static int ov2650_write(struct i2c_client *c, u16 reg, u8 value)
++{
++ int ret, i;
++ struct i2c_msg msg;
++ u8 msgbuf[3];
++
++ /* Writing only needs one message */
++ memset(&msg, 0, sizeof(msg));
++ i = 0;
++ msgbuf[i++] = reg >> 8;
++ msgbuf[i++] = reg;
++ msgbuf[i++] = value;
++
++ msg.addr = c->addr;
++ msg.flags = 0;
++ msg.buf = msgbuf;
++ msg.len = i;
++
++ ret = i2c_transfer(c->adapter, &msg, 1);
++
++ /* If this is a reset register, wait for 1ms */
++ if (reg == OV2650_SYS && (value & 0x80))
++ msleep(3);
++
++ ret = (ret == 1) ? 0 : -1;
++ return ret;
++}
++
++static int ov2650_write_array(struct i2c_client *c, struct regval_list *vals)
++{
++ struct regval_list *p;
++ u8 read_val = 0;
++ int err_num = 0;
++ int i = 0;
++ p = vals;
++ while (p->reg_num != 0xffff) {
++ ov2650_write(c, p->reg_num, p->value);
++ ov2650_read(c, p->reg_num, &read_val);
++ if (read_val != p->value)
++ err_num++;
++ p++;
++ i++;
++ }
++ return 0;
++}
++
++static int ov2650_set_data_pin_in(struct i2c_client *client)
++{
++ int ret = 0;
++ u8 reg;
++
++ ret += ov2650_write(client, 0x30b0, 0x00);
++
++ ret += ov2650_read(client, 0x30b1, &reg);
++ reg &= 0xfc;
++ ret += ov2650_write(client, 0x30b1, reg);
++
++ return ret;
++}
++
++static int ov2650_set_data_pin_out(struct i2c_client *client)
++{
++ int ret = 0;
++ u8 reg;
++
++ ret += ov2650_write(client, 0x30b0, 0xff);
++
++ ret += ov2650_read(client, 0x30b1, &reg);
++ reg &= 0xfc;
++ reg |= 0x03;
++ ret += ov2650_write(client, 0x30b1, reg);
++
++ return ret;
++}
++/*
++ * Sensor specific helper function
++ */
++static int ov2650_standby(void)
++{
++ gpio_set_value(GPIO_STDBY_PIN, 1);
++ dprintk(1, "PM: standby called\n");
++ return 0;
++}
++
++static int ov2650_wakeup(void)
++{
++ gpio_set_value(GPIO_STDBY_PIN, 0);
++ dprintk(1, "PM: wakeup called\n");
++ return 0;
++}
++
++static int ov2650_s_power(struct v4l2_subdev *sd, u32 val)
++{
++ if (val == 1)
++ ov2650_standby();
++ if (val == 0)
++ ov2650_wakeup();
++ return 0;
++}
++
++static int ov2650_init(struct i2c_client *c)
++{
++ int ret;
++ struct v4l2_subdev *sd = i2c_get_clientdata(c);
++ struct ci_sensor_config *info = to_sensor_config(sd);
++
++ /* Fill the configuration structure */
++ /* Note this default configuration value */
++ info->mode = ov2650_formats[0].pixelformat;
++ info->res = ov2650_res[0].res;
++ info->type = SENSOR_TYPE_SOC;
++ info->bls = SENSOR_BLS_OFF;
++ info->gamma = SENSOR_GAMMA_ON;
++ info->cconv = SENSOR_CCONV_ON;
++ info->blc = SENSOR_BLC_AUTO;
++ info->agc = SENSOR_AGC_AUTO;
++ info->awb = SENSOR_AWB_AUTO;
++ info->aec = SENSOR_AEC_AUTO;
++ info->bus_width = SENSOR_BUSWIDTH_8BIT_ZZ;
++ info->ycseq = SENSOR_YCSEQ_YCBYCR;
++ info->conv422 = SENSOR_CONV422_COSITED;
++ info->bpat = SENSOR_BPAT_BGBGGRGR;/* GRGRBGBG; */
++ info->field_inv = SENSOR_FIELDINV_NOSWAP;
++ info->field_sel = SENSOR_FIELDSEL_BOTH;
++ info->hpol = SENSOR_HPOL_REFPOS;
++ info->vpol = SENSOR_VPOL_POS;
++ info->edge = SENSOR_EDGE_RISING;
++ info->flicker_freq = SENSOR_FLICKER_100;
++ info->cie_profile = 0;
++ memcpy(info->name, "ov2650", 7);
++
++ ret = ov2650_write(c, OV2650_SYS, 0x80);
++ /* Set registers into default config value */
++ ret += ov2650_write_array(c, ov2650_def_reg);
++
++ /* added by wen to stop sensor from streaming */
++ ov2650_write(c, 0x3086, 0x0f);
++ ov2650_set_data_pin_in(c);
++ ssleep(1);
++
++ return ret;
++}
++
++static int distance(struct ov2650_res_struct *res, u32 w, u32 h)
++{
++ int ret;
++ if (res->width < w || res->height < h)
++ return -1;
++
++ ret = ((res->width - w) + (res->height - h));
++ return ret;
++}
++
++static int ov2650_try_res(u32 *w, u32 *h)
++{
++ struct ov2650_res_struct *res_index, *p = NULL;
++ int dis, last_dis = ov2650_res->width + ov2650_res->height;
++
++ dprintk(1, "&&&&& before %dx%d", *w, *h);
++ for (res_index = ov2650_res;
++ res_index < ov2650_res + N_RES;
++ res_index++) {
++ if ((res_index->width <= *w) && (res_index->height <= *h))
++ break;
++ dis = distance(res_index, *w, *h);
++ if (dis < last_dis) {
++ last_dis = dis;
++ p = res_index;
++ }
++ }
++ if ((res_index->width < *w) || (res_index->height < *h)) {
++ if (res_index != ov2650_res)
++ res_index--;
++ }
++
++ /*
++ if (p == NULL) {
++ p = ov2650_res;
++ }
++
++ if ((w != NULL) && (h != NULL)) {
++ *w = p->width;
++ *h = p->height;
++ }
++ */
++ if (res_index == ov2650_res + N_RES)
++ res_index = ov2650_res + N_RES - 1;
++
++ *w = res_index->width;
++ *h = res_index->height;
++
++ dprintk(1, "&&&&& after %dx%d", *w, *h);
++ return 0;
++}
++
++static struct ov2650_res_struct *ov2650_to_res(u32 w, u32 h)
++{
++ struct ov2650_res_struct *res_index;
++
++ for (res_index = ov2650_res;
++ res_index < ov2650_res + N_RES;
++ res_index++)
++ if ((res_index->width == w) && (res_index->height == h))
++ break;
++
++ if (res_index >= ov2650_res + N_RES)
++ res_index--; /* Take the bigger one */
++
++ return res_index;
++}
++
++static int ov2650_try_fmt(struct v4l2_subdev *sd,
++ struct v4l2_format *fmt)
++{
++ DBG_entering;
++ dprintk(1, "&&&&& before %dx%d", fmt->fmt.pix.width,
++ fmt->fmt.pix.height);
++ return ov2650_try_res(&fmt->fmt.pix.width, &fmt->fmt.pix.height);
++ dprintk(1, "&&&&& after %dx%d", fmt->fmt.pix.width,
++ fmt->fmt.pix.height);
++ DBG_leaving;
++}
++
++static int ov2650_get_fmt(struct v4l2_subdev *sd,
++ struct v4l2_format *fmt)
++{
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ unsigned short width, height;
++ int index;
++
++ ci_sensor_res2size(info->res, &width, &height);
++
++ /* Marked the current sensor res as being "used" */
++ for (index = 0; index < N_RES; index++) {
++ if ((width == ov2650_res[index].width) &&
++ (height == ov2650_res[index].height)) {
++ ov2650_res[index].used = 1;
++ continue;
++ }
++ ov2650_res[index].used = 0;
++ }
++
++ fmt->fmt.pix.width = width;
++ fmt->fmt.pix.height = height;
++ return 0;
++}
++
++static int ov2650_set_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ int ret = 0;
++ struct ov2650_res_struct *res_index;
++ u32 width, height;
++ int index;
++
++ DBG_entering;
++
++ width = fmt->fmt.pix.width;
++ height = fmt->fmt.pix.height;
++
++ ret = ov2650_try_res(&width, &height);
++ res_index = ov2650_to_res(width, height);
++
++ ov2650_wakeup();
++
++ /* if ((info->res != res_index->res) && (res_index->regs)) { */
++ if (res_index->regs) {
++
++ dprintk(2, "changing res from to %dx%d", width, height);
++ ret = ov2650_write(client, OV2650_SYS, 0x80);
++ ret += ov2650_write_array(client, ov2650_def_reg);
++ ret += ov2650_write_array(client, res_index->regs);
++
++ /* add to debug
++ if(res_index->res == SENSOR_RES_VGA) {
++ ret += ov2650_write_array(c, ov2650_def_reg);
++ ret += ov2650_write_array(c, res_index->regs);
++ } else {
++ ret += ov2650_write_array(c, ov2650_res_vga_reverse);
++ ret += ov2650_write_array(c, res_index->regs);
++ }
++ */
++
++ /* Add delay here to get better image */
++ /*
++ if (res_index->res == SENSOR_RES_SXGA ||
++ res_index->res == SENSOR_RES_UXGA)
++ msleep(2000);
++ else
++ msleep(900);
++ */
++
++ /* Marked current sensor res as being "used" */
++ for (index = 0; index < N_RES; index++) {
++ if ((width == ov2650_res[index].width) &&
++ (height == ov2650_res[index].height)) {
++ ov2650_res[index].used = 1;
++ continue;
++ }
++ ov2650_res[index].used = 0;
++ }
++
++ for (index = 0; index < N_RES; index++)
++ dprintk(2, "index = %d, used = %d\n", index,
++ ov2650_res[index].used);
++
++ }
++
++ info->res = res_index->res;
++
++ /*
++ int i;
++ unsigned char value;
++ printk(KERN_WARNING "2650 reg dumping start:\n");
++ for(i = 0x3000; i <= 0x360B; i ++) {
++ ov2650_read(c, i, &value);
++ printk(KERN_WARNING "reg at offset %4x = %x\n", i, value);
++ }
++ printk(KERN_WARNING "2650 reg dumping finished:\n");
++ */
++
++ DBG_leaving;
++
++ return ret;
++}
++
++static int ov2650_q_hflip(struct v4l2_subdev *sd, __s32 *value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ int err;
++ unsigned char v;
++
++ err = ov2650_read(client, OV2650_TMC_6, &v);
++ *value = (v & 0x02) == 0x02;
++ return err;
++}
++
++static int ov2650_t_hflip(struct v4l2_subdev *sd, int value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ unsigned char v, v1 = 0;
++ int err;
++
++ value = value >= 1 ? 1 : 0;
++ err = ov2650_read(client, OV2650_TMC_6, &v);
++ if (value) {
++ v |= 0x02;
++ v1 |= 0x08;
++ info->bpat = SENSOR_BPAT_GRGRBGBG;/*BGBGGRGR;*/
++ } else {
++ v &= ~0x02;
++ v1 &= ~0x08;
++ info->bpat = SENSOR_BPAT_BGBGGRGR;
++ }
++ err += ov2650_write(client, OV2650_TMC_6, v);
++ err += ov2650_write(client, 0x3090, v1);
++ msleep(10); /* FIXME */
++
++ return err;
++}
++
++static int ov2650_q_vflip(struct v4l2_subdev *sd, __s32 *value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ int err;
++ unsigned char v;
++
++ err = ov2650_read(client, OV2650_TMC_6, &v);
++ *value = (v & 0x01) == 0x01;
++ return err;
++}
++
++
++static int ov2650_t_vflip(struct v4l2_subdev *sd, int value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ int err = 0;
++ unsigned char v;
++
++ value = value >= 1 ? 1 : 0;
++ err = ov2650_read(client, OV2650_TMC_6, &v);
++ if (value)
++ v |= 0x01;
++ else
++ v &= ~0x01;
++ err += ov2650_write(client, OV2650_TMC_6, v);
++ msleep(10); /* FIXME */
++
++ return err;
++}
++
++#if 0
++static int ov2650_t_awb(struct i2c_client *c, int value)
++{
++ unsigned char v;
++ int ret;
++ struct ci_sensor_config *info = i2c_get_clientdata(c);
++
++ value = value >= 1 ? 1 : 0;
++ ret = ov2650_read(c, OV2650_ISP_CTL_0, &v);
++ if (value & 0x01) {
++ v |= 0x30;
++ info->awb = SENSOR_AWB_AUTO;
++ } else {
++ v &= ~0x30;
++ info->awb = SENSOR_AWB_OFF;
++ }
++ ret += ov2650_write(c, OV2650_ISP_CTL_0, v);
++ msleep(10); /* FIXME */
++
++ return ret;
++}
++
++static int ov2650_q_awb(struct i2c_client *c, int *value)
++{
++ int ret;
++ unsigned char v;
++
++ ret = ov2650_read(c, OV2650_ISP_CTL_0, &v);
++ *value = (v & 0x30) == 0x30;
++ return ret;
++}
++
++static int ov2650_t_agc(struct i2c_client *c, int value)
++{
++ unsigned char v;
++ int ret;
++ struct ci_sensor_config *info = i2c_get_clientdata(c);
++
++ value = value >= 1 ? 1 : 0;
++ ret = ov2650_read(c, OV2650_ISP_CTL_0, &v);
++ if (value & 0x01) {
++ v |= 0x10;
++ info->agc = SENSOR_AGC_AUTO;
++ } else {
++ v &= ~0x10;
++ info->agc = SENSOR_AGC_OFF;
++ }
++ ret += ov2650_write(c, OV2650_ISP_CTL_0, v);
++ msleep(10); /* FIXME */
++
++ return ret;
++}
++
++static int ov2650_q_agc(struct i2c_client *c, int *value)
++{
++ int ret;
++ unsigned char v;
++
++ ret = ov2650_read(c, OV2650_ISP_CTL_0, &v);
++ *value = (v & 0x10) == 0x10;
++ return ret;
++}
++
++static int ov2650_t_blc(struct i2c_client *c, int value)
++{
++ unsigned char v;
++ int ret;
++
++ value = value >= 1 ? 1 : 0;
++
++ ret = ov2650_read(c, OV2650_BLCC, &v);
++ if (value & 0x01)
++ v |= 0x10;
++ else
++ v &= ~0x10;
++ ret += ov2650_write(c, OV2650_BLCC, v);
++ msleep(10); /* FIXME */
++
++ return ret;
++}
++
++static int ov2650_q_blc(struct i2c_client *c, int *value)
++{
++ int ret;
++ unsigned char v;
++
++ ret = ov2650_read(c, OV2650_BLCC, &v);
++ *value = (v & 0x10) == 0x10;
++ return ret;
++}
++#endif
++
++static struct ov2650_control {
++ struct v4l2_queryctrl qc;
++ int (*query)(struct v4l2_subdev *sd, __s32 *value);
++ int (*tweak)(struct v4l2_subdev *sd, int value);
++} ov2650_controls[] = {
++ {
++ .qc = {
++ .id = V4L2_CID_VFLIP,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Vertical flip",
++ .minimum = 0,
++ .maximum = 1,
++ .step = 1,
++ .default_value = 0,
++ },
++ .tweak = ov2650_t_vflip,
++ .query = ov2650_q_vflip,
++ },
++ {
++ .qc = {
++ .id = V4L2_CID_HFLIP,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Horizontal mirror",
++ .minimum = 0,
++ .maximum = 1,
++ .step = 1,
++ .default_value = 0,
++ },
++ .tweak = ov2650_t_hflip,
++ .query = ov2650_q_hflip,
++ },
++#if 0
++ {
++ .parm = {
++ .index = V4L2_CID_AUTO_WHITE_BALANCE,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Auto White Balance",
++ .min = 0,
++ .max = 1,
++ .step = 1,
++ .def_value = 1,
++ },
++ .tweak = ov2650_t_awb,
++ .query = ov2650_q_awb,
++ },
++ {
++ .parm = {
++ .index = V4L2_CID_AUTOGAIN,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Auto Gain Control",
++ .min = 0,
++ .max = 1,
++ .step = 1,
++ .def_value = 1,
++ },
++ .tweak = ov2650_t_agc,
++ .query = ov2650_q_agc,
++
++ },
++ {
++ .parm = {
++ .index = V4L2_CID_BLACK_LEVEL,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Black Level Control",
++ .min = 0,
++ .max = 1,
++ .step = 1,
++ .def_value = 1,
++ },
++ .tweak = ov2650_t_blc,
++ .query = ov2650_q_blc,
++
++ },
++#endif
++};
++#define N_CONTROLS (ARRAY_SIZE(ov2650_controls))
++
++static struct ov2650_control *ov2650_find_control(__u32 id)
++{
++ int i;
++
++ for (i = 0; i < N_CONTROLS; i++)
++ if (ov2650_controls[i].qc.id == id)
++ return ov2650_controls + i;
++ return NULL;
++}
++
++static int ov2650_queryctrl(struct v4l2_subdev *sd,
++ struct v4l2_queryctrl *qc)
++{
++ struct ov2650_control *octrl;
++ octrl = ov2650_find_control(qc->id);
++ if (NULL == octrl)
++ return -EINVAL;
++ *qc = octrl->qc;
++ return 0;
++}
++
++static int ov2650_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ struct ov2650_control *octrl = ov2650_find_control(ctrl->id);
++ int ret;
++
++ if (octrl == NULL)
++ return -EINVAL;
++ ret = octrl->query(sd, &ctrl->value);
++ if (ret >= 0)
++ return 0;
++ return ret;
++}
++
++static int ov2650_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ struct ov2650_control *octrl = ov2650_find_control(ctrl->id);
++ int ret;
++
++ if (octrl == NULL)
++ return -EINVAL;
++ ret = octrl->tweak(sd, ctrl->value);
++ if (ret >= 0)
++ return 0;
++ return ret;
++}
++#if 0
++static int ov2650_get_caps(struct i2c_client *c, struct ci_sensor_caps *caps)
++{
++ if (caps == NULL)
++ return -EIO;
++
++ caps->bus_width = SENSOR_BUSWIDTH_8BIT_ZZ;
++ caps->mode = SENSOR_MODE_BT601;
++ caps->field_inv = SENSOR_FIELDINV_NOSWAP;
++ caps->field_sel = SENSOR_FIELDSEL_BOTH;
++ caps->ycseq = SENSOR_YCSEQ_YCBYCR;
++ caps->conv422 = SENSOR_CONV422_COSITED;
++ caps->bpat = SENSOR_BPAT_BGBGGRGR;
++ caps->hpol = SENSOR_HPOL_REFPOS;
++ caps->vpol = SENSOR_VPOL_POS;
++ caps->edge = SENSOR_EDGE_RISING;
++ caps->bls = SENSOR_BLS_OFF;
++ caps->gamma = SENSOR_GAMMA_ON;
++ caps->cconv = SENSOR_CCONV_ON;
++ caps->res = SENSOR_RES_UXGA | SENSOR_RES_SXGA | SENSOR_RES_SVGA
++ | SENSOR_RES_VGA | SENSOR_RES_QVGA;
++ caps->blc = SENSOR_BLC_AUTO;
++ caps->agc = SENSOR_AGC_AUTO;
++ caps->awb = SENSOR_AWB_AUTO;
++ caps->aec = SENSOR_AEC_AUTO;
++ caps->cie_profile = 0;
++ caps->flicker_freq = SENSOR_FLICKER_100 | SENSOR_FLICKER_120;
++ caps->type = SENSOR_TYPE_SOC;
++ /* caps->name = "ov2650"; */
++ strcpy(caps->name, "ov2650");
++
++ return 0;
++}
++
++static int ov2650_get_config(struct i2c_client *c,
++ struct ci_sensor_config *config)
++{
++ struct ci_sensor_config *info = i2c_get_clientdata(c);
++
++ if (config == NULL) {
++ printk(KERN_WARNING "sensor_get_config: NULL pointer\n");
++ return -EIO;
++ }
++
++ memcpy(config, info, sizeof(struct ci_sensor_config));
++
++ return 0;
++}
++
++static int ov2650_setup(struct i2c_client *c,
++ const struct ci_sensor_config *config)
++{
++ int ret;
++ struct ov2650_res_struct *res_index;
++ struct ci_sensor_config *info = i2c_get_clientdata(c);
++ u16 width, high;
++
++ /* Soft reset camera first*/
++ ret = ov2650_write(c, OV2650_SYS, 0x80);
++
++ /* Set registers into default config value */
++ ret += ov2650_write_array(c, ov2650_def_reg);
++
++ /* set image resolution */
++ ci_sensor_res2size(config->res, &width, &high);
++ ret += ov2650_try_res(c, &width, &high);
++ res_index = ov2650_find_res(width, high);
++ if (res_index->regs)
++ ret += ov2650_write_array(c, res_index->regs);
++ if (!ret)
++ info->res = res_index->res;
++
++
++ if (config->blc != info->blc) {
++ ret += ov2650_t_blc(c, config->blc);
++ info->blc = config->blc;
++ }
++
++ if (config->agc != info->agc) {
++ ret += ov2650_t_agc(c, config->agc);
++ info->agc = config->agc;
++ }
++
++ if (config->awb != info->awb) {
++ ret += ov2650_t_awb(c, config->awb);
++ info->awb = config->awb;
++ }
++ /* Add some delay here to get a better image*/
++ if (res_index->res == SENSOR_RES_SXGA ||
++ res_index->res == SENSOR_RES_UXGA)
++ msleep(2000);
++ else
++ msleep(900);
++
++ return ret;
++}
++
++/*
++ * File operation functions
++ */
++
++
++
++static int ov2650_open(struct i2c_setting *c, void *priv)
++{
++ struct i2c_client *client = c->sensor_client;
++ /* Just wake up sensor */
++ if (ov2650_wakeup())
++ return -EIO;
++ ov2650_init(client);
++ /*Sleep sensor now*/
++ ov2650_write(client, 0x3086, 0x0f);
++
++ /* set data pin to input */
++ if (ov2650_set_data_pin_in(client))
++ return -EIO;
++
++ return 0;
++}
++
++static int ov2650_release(struct i2c_setting *c, void *priv)
++{
++ /* Just suspend the sensor */
++ ov2650_standby();
++ return 0;
++}
++
++static int ov2650_on(struct i2c_setting *c)
++{
++ int ret;
++
++ /* Software wake up sensor */
++ ret = ov2650_write(c->sensor_client, 0x3086, 0x00);
++
++ /* set data pin to output */
++ return ret + ov2650_set_data_pin_out(c->sensor_client);
++}
++
++static int ov2650_off(struct i2c_setting *c)
++{
++ int ret;
++
++ /* Software standby sensor */
++ ret = ov2650_write(c->sensor_client, 0x3086, 0x0f);
++
++ /* set data pin to input */
++ return ret + ov2650_set_data_pin_in(c->sensor_client);
++}
++
++static struct sensor_device ov2650 = {
++ .name = "OV2650",
++ .type = SENSOR_TYPE_SOC,
++ .minor = -1,
++ .open = ov2650_open,
++ .release = ov2650_release,
++ .on = ov2650_on,
++ .off = ov2650_off,
++ .querycap = ov2650_get_caps,
++ .get_config = ov2650_get_config,
++ .set_config = ov2650_setup,
++ .enum_parm = ov2650_queryctrl,
++ .get_parm = ov2650_g_ctrl,
++ .set_parm = ov2650_s_ctrl,
++ .try_res = ov2650_try_res,
++ .set_res = ov2650_set_res,
++ .suspend = ov2650_standby,
++ .resume = ov2650_wakeup,
++ .get_ls_corr_config = NULL,
++ .set_awb = NULL,
++ .set_aec = NULL,
++ .set_blc = NULL,
++ /* TBC */
++};
++#endif
++
++static int ov2650_s_stream(struct v4l2_subdev *sd, int enable)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++ DBG_entering;
++
++
++ if (enable) {
++ ov2650_write(client, 0x3086, 0x00);
++ ov2650_set_data_pin_out(client);
++ msleep(2000);
++ } else {
++ ov2650_write(client, 0x3086, 0x0f);
++ ov2650_set_data_pin_in(client);
++ }
++
++ DBG_leaving;
++ return 0;
++}
++
++static int ov2650_enum_framesizes(struct v4l2_subdev *sd,
++ struct v4l2_frmsizeenum *fsize)
++{
++ unsigned int index = fsize->index;
++
++ DBG_entering;
++
++ if (index >= N_RES)
++ return -EINVAL;
++
++ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
++ fsize->discrete.width = ov2650_res[index].width;
++ fsize->discrete.height = ov2650_res[index].height;
++ fsize->reserved[0] = ov2650_res[index].used;
++
++ DBG_leaving;
++
++ return 0;
++}
++
++static int ov2650_enum_frameintervals(struct v4l2_subdev *sd,
++ struct v4l2_frmivalenum *fival)
++{
++ unsigned int index = fival->index;
++
++ DBG_entering;
++
++ if (index >= N_RES)
++ return -EINVAL;
++
++ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
++ fival->discrete.numerator = 1;
++ fival->discrete.denominator = ov2650_res[index].fps;
++
++ DBG_leaving;
++
++ return 0;
++}
++static int ov2650_g_chip_ident(struct v4l2_subdev *sd,
++ struct v4l2_dbg_chip_ident *chip)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++#define V4L2_IDENT_OV2650 8244
++ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_OV2650, 0);
++}
++
++#ifdef CONFIG_VIDEO_ADV_DEBUG
++static int ov2650_g_register(struct v4l2_subdev *sd,
++ struct v4l2_dbg_register *reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ unsigned char val = 0;
++ int ret;
++
++ if (!v4l2_chip_match_i2c_client(client, &reg->match))
++ return -EINVAL;
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++ ret = ov2650_read(client, reg->reg & 0xffff, &val);
++ reg->val = val;
++ reg->size = 1;
++ return ret;
++}
++
++static int ov2650_s_register(struct v4l2_subdev *sd,
++ struct v4l2_dbg_register *reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++ if (!v4l2_chip_match_i2c_client(client, &reg->match))
++ return -EINVAL;
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++ ov2650_write(client, reg->reg & 0xffff, reg->val & 0xff);
++ return 0;
++}
++#endif
++
++static const struct v4l2_subdev_video_ops ov2650_video_ops = {
++ .try_fmt = ov2650_try_fmt,
++ .s_fmt = ov2650_set_fmt,
++ .g_fmt = ov2650_get_fmt,
++ .s_stream = ov2650_s_stream,
++ .enum_framesizes = ov2650_enum_framesizes,
++ .enum_frameintervals = ov2650_enum_frameintervals,
++};
++
++static const struct v4l2_subdev_core_ops ov2650_core_ops = {
++ .g_chip_ident = ov2650_g_chip_ident,
++ .queryctrl = ov2650_queryctrl,
++ .g_ctrl = ov2650_g_ctrl,
++ .s_ctrl = ov2650_s_ctrl,
++ .s_gpio = ov2650_s_power,
++ /*.g_ext_ctrls = ov2650_g_ext_ctrls,*/
++ /*.s_ext_ctrls = ov2650_s_ext_ctrls,*/
++#ifdef CONFIG_VIDEO_ADV_DEBUG
++ .g_register = ov2650_g_register,
++ .s_register = ov2650_s_register,
++#endif
++};
++
++static const struct v4l2_subdev_ops ov2650_ops = {
++ .core = &ov2650_core_ops,
++ .video = &ov2650_video_ops,
++};
++
++/*
++ * Basic i2c stuff
++ */
++#if 0
++static unsigned short normal_i2c[] = {I2C_OV2650 >> 1, I2C_CLIENT_END};
++I2C_CLIENT_INSMOD;
++
++static struct i2c_driver ov2650_driver;
++#endif
++static int ov2650_detect(struct i2c_client *client)
++{
++ struct i2c_adapter *adapter = client->adapter;
++ int adap_id = i2c_adapter_id(adapter);
++ u8 value;
++
++ printk(KERN_WARNING "Now start ov2650 detect\n");
++ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
++ return -ENODEV;
++
++ if (adap_id != 1)
++ return -ENODEV;
++
++ /* if (ov2650_wakeup()) */
++ /* return -ENODEV; */
++ ov2650_wakeup();
++
++ ov2650_read(client, OV2650_PID_L, &value);
++ if (value != 0x52)
++ return -ENODEV;
++
++ return 0;
++}
++
++static int ov2650_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct ci_sensor_config *info;
++ struct v4l2_subdev *sd;
++ int ret = -1;
++
++ DBG_entering;
++
++ printk(KERN_INFO "Init ov2650 sensor \n");
++
++ v4l_info(client, "chip found @ 0x%x (%s)\n",
++ client->addr << 1, client->adapter->name);
++ /*
++ * Setup sensor configuration structure
++ */
++ info = kzalloc(sizeof(struct ci_sensor_config), GFP_KERNEL);
++ if (!info)
++ return -ENOMEM;
++
++ ret = ov2650_detect(client);
++ if (ret) {
++ kfree(info);
++ return -ENODEV;
++ }
++
++ sd = &info->sd;
++ v4l2_i2c_subdev_init(sd, client, &ov2650_ops);
++
++ /*
++ * TODO: Need to check if this can be here.
++ * Turn into standby mode
++ */
++ /* ov2650_standby(); */
++ ret += ov2650_init(client);
++ ov2650_standby();
++
++ printk(KERN_INFO "Init ov2650 sensor success, ret = %d\n", ret);
++
++ DBG_leaving;
++ return 0;
++}
++
++static int ov2650_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *sd = i2c_get_clientdata(client);
++
++ v4l2_device_unregister_subdev(sd);
++ kfree(to_sensor_config(sd));
++ return 0;
++}
++
++static const struct i2c_device_id ov2650_id[] = {
++ {"ov2650", 0},
++ {}
++};
++
++MODULE_DEVICE_TABLE(i2c, ov2650_id);
++
++static struct v4l2_i2c_driver_data v4l2_i2c_data = {
++ .name = "ov2650",
++ .probe = ov2650_probe,
++ .remove = ov2650_remove,
++ /* .suspend = ov2650_suspend,
++ * .resume = ov2650_resume, */
++ .id_table = ov2650_id,
++};
++
++MODULE_AUTHOR("Xiaolin Zhang <xiaolin.zhang@intel.com>");
++MODULE_DESCRIPTION("A low-level driver for OmniVision 2650 sensors");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/media/video/mrstci/mrstov2650/ov2650.h b/drivers/media/video/mrstci/mrstov2650/ov2650.h
+new file mode 100644
+index 0000000..f5c0418
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstov2650/ov2650.h
+@@ -0,0 +1,766 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#define I2C_OV2650 0x60
++/* Should add to kernel source */
++#define I2C_DRIVERID_OV2650 1047
++/* GPIO pin on Moorestown */
++#define GPIO_SCLK_25 44
++#define GPIO_STB_PIN 47
++#define GPIO_STDBY_PIN 48
++#define GPIO_RESET_PIN 50
++
++/* System control register */
++#define OV2650_AGC 0x3000
++#define OV2650_AGCS 0x3001
++#define OV2650_AEC_H 0x3002
++#define OV2650_AEC_L 0x3003
++#define OV2650_AECL 0x3004
++#define OV2650_AECS_H 0x3008
++#define OV2650_AECS_L 0x3009
++#define OV2650_PID_H 0x300A
++#define OV2650_PID_L 0x300B
++#define OV2650_SCCB 0x300C
++#define OV2650_PCLK 0x300D
++#define OV2650_PLL_1 0x300E
++#define OV2650_PLL_2 0x300F
++#define OV2650_PLL_3 0x3010
++#define OV2650_CLK 0x3011
++#define OV2650_SYS 0x3012
++#define OV2650_AUTO_1 0x3013
++#define OV2650_AUTO_2 0x3014
++#define OV2650_AUTO_3 0x3015
++#define OV2650_AUTO_4 0x3016
++#define OV2650_AUTO_5 0x3017
++#define OV2650_WPT 0x3018
++#define OV2650_BPT 0x3019
++#define OV2650_VPT 0x301A
++#define OV2650_YAVG 0x301B
++#define OV2650_AECG_50 0x301C
++#define OV2650_AECG_60 0x301D
++#define OV2650_RZM_H 0x301E
++#define OV2650_RZM_L 0x301F
++#define OV2650_HS_H 0x3020
++#define OV2650_HS_L 0x3021
++#define OV2650_VS_H 0x3022
++#define OV2650_VS_L 0x3023
++#define OV2650_HW_H 0x3024
++#define OV2650_HW_L 0x3025
++#define OV2650_VH_H 0x3026
++#define OV2650_VH_L 0x3027
++#define OV2650_HTS_H 0x3028
++#define OV2650_HTS_L 0x3029
++#define OV2650_VTS_H 0x302A
++#define OV2650_VTS_L 0x302B
++#define OV2650_EXHTS 0x302C
++#define OV2650_EXVTS_H 0x302D
++#define OV2650_EXVTS_L 0x302E
++#define OV2650_WET_0 0x3030
++#define OV2650_WET_1 0x3031
++#define OV2650_WET_2 0x3032
++#define OV2650_WET_3 0x3033
++#define OV2650_AHS_H 0x3038
++#define OV2650_AHS_L 0x3039
++#define OV2650_AVS_H 0x303A
++#define OV2650_AVS_L 0x303B
++#define OV2650_AHW_H 0x303C
++#define OV2650_AHW_L 0x303D
++#define OV2650_AVH_H 0x303E
++#define OV2650_AVH_L 0x303F
++#define OV2650_HISTO_0 0x3040
++#define OV2650_HISTO_1 0x3041
++#define OV2650_HISTO_2 0x3042
++#define OV2650_HISTO_3 0x3043
++#define OV2650_HISTO_4 0x3044
++#define OV2650_BLC9A 0x3069
++#define OV2650_BLCC 0x306C
++#define OV2650_BLCD 0x306D
++#define OV2650_BLCF 0x306F
++#define OV2650_BD50_L 0x3070
++#define OV2650_BD50_H 0x3071
++#define OV2650_BD60_L 0x3072
++#define OV2650_BD60_H 0x3073
++#define OV2650_TMC_0 0x3076
++#define OV2650_TMC_1 0x3077
++#define OV2650_TMC_2 0x3078
++#define OV2650_TMC_4 0x307A
++#define OV2650_TMC_6 0x307C
++#define OV2650_TMC_8 0x307E
++#define OV2650_TMC_I2C 0x3084
++#define OV2650_TMC_10 0x3086
++#define OV2650_TMC_11 0x3087
++#define OV2650_ISP_XO_H 0x3088
++#define OV2650_ISP_XO_L 0x3089
++#define OV2650_ISP_YO_H 0x308A
++#define OV2650_ISP_YO_L 0x308B
++#define OV2650_TMC_12 0x308C
++#define OV2650_TMC_13 0x308D
++#define OV2650_EFUSE 0x308F
++#define OV2650_IO_CTL_0 0x30B0
++#define OV2650_IO_CRL_1 0x30B1
++#define OV2650_IO_CTL_2 0x30B2
++#define OV2650_LAEC 0x30F0
++#define OV2650_GRP_EOP 0x30FF
++
++/* SC registers */
++#define OV2650_SC_CTL_0 0x3100
++#define OV2650_SC_SYN_CTL_0 0x3104
++#define OV2650_SC_SYN_CTL_1 0x3105
++#define OV2650_SC_SYN_CTL_3 0x3107
++#define OV2650_SC_SYN_CTL_4 0x3108
++
++/* DSP control register */
++#define OV2650_ISP_CTL_0 0x3300
++#define OV2650_ISP_CTL_1 0x3301
++#define OV2650_ISP_CTL_2 0x3302
++#define OV2650_ISP_CTL_3 0x3303
++#define OV2650_ISP_CTL_4 0x3304
++#define OV2650_ISP_CTL_5 0x3305
++#define OV2650_ISP_CTL_6 0x3306
++#define OV2650_ISP_CTL_7 0x3307
++#define OV2650_ISP_CTL_8 0x3308
++#define OV2650_ISP_CTL_9 0x3309
++#define OV2650_ISP_CTL_A 0x330A
++#define OV2650_ISP_CTL_B 0x330B
++#define OV2650_ISP_CTL_10 0x3310
++#define OV2650_ISP_CTL_11 0x3311
++#define OV2650_ISP_CTL_12 0x3312
++#define OV2650_ISP_CTL_13 0x3313
++#define OV2650_ISP_CTL_14 0x3314
++#define OV2650_ISP_CTL_15 0x3315
++#define OV2650_ISP_CTL_16 0x3316
++#define OV2650_ISP_CTL_17 0x3317
++#define OV2650_ISP_CTL_18 0x3318
++#define OV2650_ISP_CTL_19 0x3319
++#define OV2650_ISP_CTL_1A 0x331A
++#define OV2650_ISP_CTL_1B 0x331B
++#define OV2650_ISP_CTL_1C 0x331C
++#define OV2650_ISP_CTL_1D 0x331D
++#define OV2650_ISP_CTL_1E 0x331E
++#define OV2650_ISP_CTL_20 0x3320
++#define OV2650_ISP_CTL_21 0x3321
++#define OV2650_ISP_CTL_22 0x3322
++#define OV2650_ISP_CTL_23 0x3323
++#define OV2650_ISP_CTL_24 0x3324
++#define OV2650_ISP_CTL_27 0x3327
++#define OV2650_ISP_CTL_28 0x3328
++#define OV2650_ISP_CTL_29 0x3329
++#define OV2650_ISP_CTL_2A 0x332A
++#define OV2650_ISP_CTL_2B 0x332B
++#define OV2650_ISP_CTL_2C 0x332C
++#define OV2650_ISP_CTL_2D 0x332D
++#define OV2650_ISP_CTL_2E 0x332E
++#define OV2650_ISP_CTL_2F 0x332F
++#define OV2650_ISP_CTL_30 0x3330
++#define OV2650_ISP_CTL_31 0x3331
++#define OV2650_ISP_CTL_32 0x3332
++#define OV2650_ISP_CTL_33 0x3333
++#define OV2650_ISP_CTL_34 0x3334
++#define OV2650_ISP_CTL_35 0x3335
++#define OV2650_ISP_CTL_36 0x3336
++#define OV2650_ISP_CTL_40 0x3340
++#define OV2650_ISP_CTL_41 0x3341
++#define OV2650_ISP_CTL_42 0x3342
++#define OV2650_ISP_CTL_43 0x3343
++#define OV2650_ISP_CTL_44 0x3344
++#define OV2650_ISP_CTL_45 0x3345
++#define OV2650_ISP_CTL_46 0x3346
++#define OV2650_ISP_CTL_47 0x3347
++#define OV2650_ISP_CTL_48 0x3348
++#define OV2650_ISP_CTL_49 0x3349
++#define OV2650_ISP_CTL_4A 0x334A
++#define OV2650_ISP_CTL_4B 0x334B
++#define OV2650_ISP_CTL_4C 0x334C
++#define OV2650_ISP_CTL_4D 0x334D
++#define OV2650_ISP_CTL_4E 0x334E
++#define OV2650_ISP_CTL_4F 0x334F
++#define OV2650_ISP_CTL_50 0x3350
++#define OV2650_ISP_CTL_51 0x3351
++#define OV2650_ISP_CTL_52 0x3352
++#define OV2650_ISP_CTL_53 0x3353
++#define OV2650_ISP_CTL_54 0x3354
++#define OV2650_ISP_CTL_55 0x3355
++#define OV2650_ISP_CTL_56 0x3356
++#define OV2650_ISP_CTL_57 0x3357
++#define OV2650_ISP_CTL_58 0x3358
++#define OV2650_ISP_CTL_59 0x3359
++#define OV2650_ISP_CTL_5A 0x335A
++#define OV2650_ISP_CTL_5B 0x335B
++#define OV2650_ISP_CTL_5C 0x335C
++#define OV2650_ISP_CTL_5D 0x335D
++#define OV2650_ISP_CTL_5E 0x335E
++#define OV2650_ISP_CTL_5F 0x335F
++#define OV2650_ISP_CTL_60 0x3360
++#define OV2650_ISP_CTL_61 0x3361
++#define OV2650_ISP_CTL_62 0x3362
++#define OV2650_ISP_CTL_63 0x3363
++#define OV2650_ISP_CTL_64 0x3364
++#define OV2650_ISP_CTL_65 0x3365
++#define OV2650_ISP_CTL_6A 0x336A
++#define OV2650_ISP_CTL_6B 0x336B
++#define OV2650_ISP_CTL_6C 0x336C
++#define OV2650_ISP_CTL_6E 0x336E
++#define OV2650_ISP_CTL_71 0x3371
++#define OV2650_ISP_CTL_72 0x3372
++#define OV2650_ISP_CTL_73 0x3373
++#define OV2650_ISP_CTL_74 0x3374
++#define OV2650_ISP_CTL_75 0x3375
++#define OV2650_ISP_CTL_76 0x3376
++#define OV2650_ISP_CTL_77 0x3377
++#define OV2650_ISP_CTL_78 0x3378
++#define OV2650_ISP_CTL_79 0x3379
++#define OV2650_ISP_CTL_7A 0x337A
++#define OV2650_ISP_CTL_7B 0x337B
++#define OV2650_ISP_CTL_7C 0x337C
++#define OV2650_ISP_CTL_80 0x3380
++#define OV2650_ISP_CTL_81 0x3381
++#define OV2650_ISP_CTL_82 0x3382
++#define OV2650_ISP_CTL_83 0x3383
++#define OV2650_ISP_CTL_84 0x3384
++#define OV2650_ISP_CTL_85 0x3385
++#define OV2650_ISP_CTL_86 0x3386
++#define OV2650_ISP_CTL_87 0x3387
++#define OV2650_ISP_CTL_88 0x3388
++#define OV2650_ISP_CTL_89 0x3389
++#define OV2650_ISP_CTL_8A 0x338A
++#define OV2650_ISP_CTL_8B 0x338B
++#define OV2650_ISP_CTL_8C 0x338C
++#define OV2650_ISP_CTL_8D 0x338D
++#define OV2650_ISP_CTL_8E 0x338E
++#define OV2650_ISP_CTL_90 0x3390
++#define OV2650_ISP_CTL_91 0x3391
++#define OV2650_ISP_CTL_92 0x3392
++#define OV2650_ISP_CTL_93 0x3393
++#define OV2650_ISP_CTL_94 0x3394
++#define OV2650_ISP_CTL_95 0x3395
++#define OV2650_ISP_CTL_96 0x3396
++#define OV2650_ISP_CTL_97 0x3397
++#define OV2650_ISP_CTL_98 0x3398
++#define OV2650_ISP_CTL_99 0x3399
++#define OV2650_ISP_CTL_9A 0x339A
++#define OV2650_ISP_CTL_A0 0x33A0
++#define OV2650_ISP_CTL_A1 0x33A1
++#define OV2650_ISP_CTL_A2 0x33A2
++#define OV2650_ISP_CTL_A3 0x33A3
++#define OV2650_ISP_CTL_A4 0x33A4
++#define OV2650_ISP_CTL_A5 0x33A5
++#define OV2650_ISP_CTL_A6 0x33A6
++#define OV2650_ISP_CTL_A7 0x33A7
++#define OV2650_ISP_CTL_A8 0x33A8
++#define OV2650_ISP_CTL_AA 0x33AA
++#define OV2650_ISP_CTL_AB 0x33AB
++#define OV2650_ISP_CTL_AC 0x33AC
++#define OV2650_ISP_CTL_AD 0x33AD
++#define OV2650_ISP_CTL_AE 0x33AE
++#define OV2650_ISP_CTL_AF 0x33AF
++#define OV2650_ISP_CTL_B0 0x33B0
++#define OV2650_ISP_CTL_B1 0x33B1
++#define OV2650_ISP_CTL_B2 0x33B2
++#define OV2650_ISP_CTL_B3 0x33B3
++#define OV2650_ISP_CTL_B4 0x33B4
++#define OV2650_ISP_CTL_B5 0x33B5
++#define OV2650_ISP_CTL_B6 0x33B6
++#define OV2650_ISP_CTL_B7 0x33B7
++#define OV2650_ISP_CTL_B8 0x33B8
++#define OV2650_ISP_CTL_B9 0x33B9
++
++/* Format register */
++#define OV2650_FMT_CTL_0 0x3400
++#define OV2650_FMT_CTL_1 0x3401
++#define OV2650_FMT_CTL_2 0x3402
++#define OV2650_FMT_CTL_3 0x3403
++#define OV2650_FMT_CTL_4 0x3404
++#define OV2650_FMT_CTL_5 0x3405
++#define OV2650_FMT_CTL_6 0x3406
++#define OV2650_FMT_CTL_7 0x3407
++#define OV2650_FMT_CTL_8 0x3408
++#define OV2650_DITHER_CTL 0x3409
++#define OV2650_DVP_CTL_0 0x3600
++#define OV2650_DVP_CTL_1 0x3601
++#define OV2650_DVP_CTL_6 0x3606
++#define OV2650_DVP_CTL_7 0x3607
++#define OV2650_DVP_CTL_9 0x3609
++#define OV2650_DVP_CTL_B 0x360B
++
++/* General definition for ov2650 */
++#define OV2650_OUTWND_MAX_H UXGA_SIZE_H
++#define OV2650_OUTWND_MAX_V UXGA_SIZE_V
++
++struct regval_list {
++ u16 reg_num;
++ u8 value;
++};
++
++/*
++ * Default register value
++ * 1600x1200 YUV
++ */
++static struct regval_list ov2650_def_reg[] = {
++ {0x3012, 0x80},
++ {0x308c, 0x80},
++ {0x308d, 0x0e},
++ {0x360b, 0x00},
++ {0x30b0, 0xff},
++ {0x30b1, 0xff},
++ {0x30b2, 0x27},
++
++ {0x300e, 0x34},
++ {0x300f, 0xa6},
++ {0x3010, 0x81},
++ {0x3082, 0x01},
++ {0x30f4, 0x01},
++ {0x3090, 0x33},
++ {0x3091, 0xc0},
++ {0x30ac, 0x42},
++
++ {0x30d1, 0x08},
++ {0x30a8, 0x56},
++ {0x3015, 0x03},
++ {0x3093, 0x00},
++ {0x307e, 0xe5},
++ {0x3079, 0x00},
++ {0x30aa, 0x42},
++ {0x3017, 0x40},
++ {0x30f3, 0x82},
++ {0x306a, 0x0c},
++ {0x306d, 0x00},
++ {0x336a, 0x3c},
++ {0x3076, 0x6a},
++ {0x30d9, 0x8c},
++ {0x3016, 0x82},
++ {0x3601, 0x30},
++ {0x304e, 0x88},
++ {0x30f1, 0x82},
++ {0x3011, 0x02},
++
++ {0x3013, 0xf7},
++ {0x301c, 0x13},
++ {0x301d, 0x17},
++ {0x3070, 0x3e},
++ {0x3072, 0x34},
++
++ {0x30af, 0x00},
++ {0x3048, 0x1f},
++ {0x3049, 0x4e},
++ {0x304a, 0x20},
++ {0x304f, 0x20},
++ {0x304b, 0x02},
++ {0x304c, 0x00},
++ {0x304d, 0x02},
++ {0x304f, 0x20},
++ {0x30a3, 0x10},
++ {0x3013, 0xf7},
++ {0x3014, 0x44},
++ {0x3071, 0x00},
++ {0x3070, 0x3e},
++ {0x3073, 0x00},
++ {0x3072, 0x34},
++ {0x301c, 0x12},
++ {0x301d, 0x16},
++ {0x304d, 0x42},
++ {0x304a, 0x40},
++ {0x304f, 0x40},
++ {0x3095, 0x07},
++ {0x3096, 0x16},
++ {0x3097, 0x1d},
++
++ {0x3020, 0x01},
++ {0x3021, 0x18},
++ {0x3022, 0x00},
++ {0x3023, 0x0a},
++ {0x3024, 0x06},
++ {0x3025, 0x58},
++ {0x3026, 0x04},
++ {0x3027, 0xbc},
++ {0x3088, 0x06},
++ {0x3089, 0x40},
++ {0x308a, 0x04},
++ {0x308b, 0xb0},
++ {0x3316, 0x64},
++ {0x3317, 0x4b},
++ {0x3318, 0x00},
++ {0x331a, 0x64},
++ {0x331b, 0x4b},
++ {0x331c, 0x00},
++ {0x3100, 0x00},
++
++ {0x3320, 0xfa},
++ {0x3321, 0x11},
++ {0x3322, 0x92},
++ {0x3323, 0x01},
++ {0x3324, 0x97},
++ {0x3325, 0x02},
++ {0x3326, 0xff},
++ {0x3327, 0x0c},
++ {0x3328, 0x10},
++ {0x3329, 0x10},
++ {0x332a, 0x58},
++ {0x332b, 0x50},
++ {0x332c, 0xbe},
++ {0x332d, 0xe1},
++ {0x332e, 0x43},
++ {0x332f, 0x36},
++ {0x3330, 0x4d},
++ {0x3331, 0x44},
++ {0x3332, 0xf8},
++ {0x3333, 0x0a},
++ {0x3334, 0xf0},
++ {0x3335, 0xf0},
++ {0x3336, 0xf0},
++ {0x3337, 0x40},
++ {0x3338, 0x40},
++ {0x3339, 0x40},
++ {0x333a, 0x00},
++ {0x333b, 0x00},
++
++ {0x3380, 0x28},
++ {0x3381, 0x48},
++ {0x3382, 0x10},
++ {0x3383, 0x23},
++ {0x3384, 0xc0},
++ {0x3385, 0xe5},
++ {0x3386, 0xc2},
++ {0x3387, 0xb3},
++ {0x3388, 0x0e},
++ {0x3389, 0x98},
++ {0x338a, 0x01},
++
++ {0x3340, 0x0e},
++ {0x3341, 0x1a},
++ {0x3342, 0x31},
++ {0x3343, 0x45},
++ {0x3344, 0x5a},
++ {0x3345, 0x69},
++ {0x3346, 0x75},
++ {0x3347, 0x7e},
++ {0x3348, 0x88},
++ {0x3349, 0x96},
++ {0x334a, 0xa3},
++ {0x334b, 0xaf},
++ {0x334c, 0xc4},
++ {0x334d, 0xd7},
++ {0x334e, 0xe8},
++ {0x334f, 0x20},
++
++ {0x3350, 0x32},
++ {0x3351, 0x25},
++ {0x3352, 0x80},
++ {0x3353, 0x1e},
++ {0x3354, 0x00},
++ {0x3355, 0x85},
++ {0x3356, 0x32},
++ {0x3357, 0x25},
++ {0x3358, 0x80},
++ {0x3359, 0x1b},
++ {0x335a, 0x00},
++ {0x335b, 0x85},
++ {0x335c, 0x32},
++ {0x335d, 0x25},
++ {0x335e, 0x80},
++ {0x335f, 0x1b},
++ {0x3360, 0x00},
++ {0x3361, 0x85},
++ {0x3363, 0x70},
++ {0x3364, 0x7f},
++ {0x3365, 0x00},
++ {0x3366, 0x00},
++
++ {0x3301, 0xff},
++ {0x338B, 0x11},
++ {0x338c, 0x10},
++ {0x338d, 0x40},
++
++ {0x3370, 0xd0},
++ {0x3371, 0x00},
++ {0x3372, 0x00},
++ {0x3373, 0x40},
++ {0x3374, 0x10},
++ {0x3375, 0x10},
++ {0x3376, 0x04},
++ {0x3377, 0x00},
++ {0x3378, 0x04},
++ {0x3379, 0x80},
++
++ {0x3069, 0x84},
++ {0x307c, 0x10},
++ {0x3087, 0x02},
++
++ {0x3300, 0xfc},
++ {0x3302, 0x01},
++ {0x3400, 0x00},
++ {0x3606, 0x20},
++ {0x3601, 0x30},
++ {0x30f3, 0x83},
++ {0x304e, 0x88},
++
++ {0x3086, 0x0f},
++ {0x3086, 0x00},
++
++ {0xffff, 0xff},
++};
++
++/* 800x600 */
++static struct regval_list ov2650_res_svga[] = {
++
++ {0x306f, 0x14},
++ {0x302a, 0x02},
++ {0x302b, 0x84},
++ {0x3012, 0x10},
++ {0x3011, 0x01},
++
++ {0x3070, 0x5d},
++ {0x3072, 0x4d},
++
++ {0x3014, 0x84},
++ {0x301c, 0x07},
++ {0x301d, 0x09},
++ {0x3070, 0x50},
++ {0x3071, 0x00},
++ {0x3072, 0x42},
++ {0x3073, 0x00},
++
++ {0x3020, 0x01},
++ {0x3021, 0x18},
++ {0x3022, 0x00},
++ {0x3023, 0x06},
++ {0x3024, 0x06},
++ {0x3025, 0x58},
++ {0x3026, 0x02},
++ {0x3027, 0x5e},
++ {0x3088, 0x03},
++ {0x3089, 0x20},
++ {0x308a, 0x02},
++ {0x308b, 0x58},
++ {0x3316, 0x64},
++ {0x3317, 0x25},
++ {0x3318, 0x80},
++ {0x3319, 0x08},
++ {0x331a, 0x64},
++ {0x331b, 0x4b},
++ {0x331c, 0x00},
++ {0x331d, 0x38},
++ {0x3100, 0x00},
++
++ {0x3302, 0x11},
++
++ {0x3011, 0x01},
++ {0x300f, 0xa6},
++ {0x300e, 0x36},
++ {0x3010, 0x81},
++ {0x302e, 0x00},
++ {0x302d, 0x00},
++ {0x302c, 0x00},
++ {0x302b, 0x84},
++ {0x3014, 0x84},
++ {0x301c, 0x07},
++ {0x301d, 0x09},
++ {0x3070, 0x50},
++ {0x3071, 0x00},
++ {0x3072, 0x42},
++ {0x3073, 0x00},
++
++ {0x3086, 0x0f},
++ {0x3086, 0x00},
++ {0xffff, 0xff},
++};
++
++/* 640x480 */
++static struct regval_list ov2650_res_vga_vario[] = {
++ {0x306f, 0x14},
++ {0x302a, 0x02},
++ {0x302b, 0x6a},
++ {0x3012, 0x10},
++ {0x3011, 0x01},
++
++ {0x3070, 0x5d},
++ {0x3072, 0x4d},
++
++ {0x301c, 0x05},
++ {0x301d, 0x06},
++
++ {0x3020, 0x01},
++ {0x3021, 0x18},
++ {0x3022, 0x00},
++ {0x3023, 0x06},
++ {0x3024, 0x06},
++ {0x3025, 0x58},
++ {0x3026, 0x02},
++ {0x3027, 0x61},
++ {0x3088, 0x02},
++ {0x3089, 0x80},
++ {0x308a, 0x01},
++ {0x308b, 0xe0},
++ {0x3316, 0x64},
++ {0x3317, 0x25},
++ {0x3318, 0x80},
++ {0x3319, 0x08},
++ {0x331a, 0x28},
++ {0x331b, 0x1e},
++ {0x331c, 0x00},
++ {0x331d, 0x38},
++ {0x3100, 0x00},
++
++ {0x3302, 0x11},
++ {0x3011, 0x00},
++
++ {0x3014, 0x84}, /* note this */
++ {0x3086, 0x0f},
++ {0x3086, 0x00},
++ {0xffff, 0xff},
++};
++
++/* 640x480 reverse */
++/*
++static struct regval_list ov2650_res_vga_reverse[] = {
++ {0x306f, 0x10},
++ {0x302a, 0x04},
++ {0x302b, 0xd4},
++ {0x3012, 0x00},
++ {0x3011, 0x02},
++
++ {0x3070, 0x3e},
++ {0x3072, 0x34},
++
++ {0x301c, 0x12},
++ {0x301d, 0x16},
++
++ {0x3020, 0x01},
++ {0x3021, 0x18},
++ {0x3022, 0x00},
++ {0x3023, 0x0a},
++ {0x3024, 0x06},
++ {0x3025, 0x58},
++ {0x3026, 0x04},
++ {0x3027, 0xbc},
++ {0x3088, 0x06},
++ {0x3089, 0x40},
++ {0x308a, 0x04},
++ {0x308b, 0xb0},
++ {0x3316, 0x64},
++ {0x3317, 0xb4},
++ {0x3318, 0x00},
++ {0x3319, 0x6c},
++ {0x331a, 0x64},
++ {0x331b, 0x4b},
++ {0x331c, 0x00},
++ {0x331d, 0x6c},
++ {0x3100, 0x00},
++
++ {0x3302, 0x01},
++ {0x3011, 0x02},
++
++ {0x3014, 0x44},
++ {0x3086, 0x0f},
++ {0x3086, 0x00},
++ {0xffff, 0xff},
++};
++
++*/
++/* 320x240 */
++static struct regval_list ov2650_res_qvga[] = {
++ {0x306f, 0x14},
++ {0x302a, 0x02},
++ {0x302b, 0x6a},
++
++ {0x3012, 0x10},
++ {0x3011, 0x01},
++
++ {0x3070, 0x5d},
++ {0x3072, 0x4d},
++ {0x301c, 0x05},
++ {0x301d, 0x06},
++
++ {0x3023, 0x06},
++ {0x3026, 0x02},
++ {0x3027, 0x61},
++ {0x3088, 0x01},
++ {0x3089, 0x40},
++ {0x308a, 0x00},
++ {0x308b, 0xf0},
++ {0x3316, 0x64},
++ {0x3317, 0x25},
++ {0x3318, 0x80},
++ {0x3319, 0x08},
++ {0x331a, 0x14},
++ {0x331b, 0x0f},
++ {0x331c, 0x00},
++ {0x331d, 0x38},
++ {0x3100, 0x00},
++
++ {0x3015, 0x02}, /* note this */
++ {0x3014, 0x84},
++ {0x3302, 0x11},
++ {0x3086, 0x0f},
++ {0x3086, 0x00},
++ {0xffff, 0xff},
++};
++
++static struct regval_list ov2650_res_uxga[] = {
++ /* Note this added by debug */
++ {0x3014, 0x84},
++ {0x301c, 0x13},
++ {0x301d, 0x17},
++ {0x3070, 0x40},
++ {0x3071, 0x00},
++ {0x3072, 0x36},
++ {0x3073, 0x00},
++
++ {0xffff, 0xff},
++};
++
++static struct regval_list ov2650_res_sxga[] = {
++ {0x3011, 0x02},
++
++ {0x3020, 0x01},
++ {0x3021, 0x18},
++ {0x3022, 0x00},
++ {0x3023, 0x0a},
++ {0x3024, 0x06},
++ {0x3025, 0x58},
++ {0x3026, 0x04},
++ {0x3027, 0xbc},
++ {0x3088, 0x05},
++ {0x3089, 0x00},
++ {0x308a, 0x04},
++ {0x308b, 0x00},
++ {0x3316, 0x64},
++ {0x3317, 0x4b},
++ {0x3318, 0x00},
++ {0x331a, 0x50},
++ {0x331b, 0x40},
++ {0x331c, 0x00},
++
++ {0x3302, 0x11},
++
++ {0x3014, 0x84},
++ {0x301c, 0x13},
++ {0x301d, 0x17},
++ {0x3070, 0x40},
++ {0x3071, 0x00},
++ {0x3072, 0x36},
++ {0x3073, 0x00},
++
++ {0x3086, 0x0f},
++ {0x3086, 0x00},
++ {0xffff, 0xff},
++};
+diff --git a/drivers/media/video/mrstci/mrstov5630/Kconfig b/drivers/media/video/mrstci/mrstov5630/Kconfig
+new file mode 100644
+index 0000000..a28ddc2
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstov5630/Kconfig
+@@ -0,0 +1,9 @@
++config VIDEO_MRST_OV5630
++ tristate "Moorestown OV5630 RAW Sensor"
++ depends on I2C && VIDEO_MRST_ISP
++
++ ---help---
++ Say Y here if your platform support OV5630 RAW Sensor.
++
++ To compile this driver as a module, choose M here: the
++ module will be called mrstov2650.ko.
+diff --git a/drivers/media/video/mrstci/mrstov5630/Makefile b/drivers/media/video/mrstci/mrstov5630/Makefile
+new file mode 100644
+index 0000000..c67abff
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstov5630/Makefile
+@@ -0,0 +1,4 @@
++mrstov5630-objs = ov5630.o
++obj-$(CONFIG_VIDEO_MRST_OV5630) += mrstov5630.o
++
++EXTRA_CFLAGS += -I$(src)/../include
+diff --git a/drivers/media/video/mrstci/mrstov5630/ov5630.c b/drivers/media/video/mrstci/mrstov5630/ov5630.c
+new file mode 100644
+index 0000000..6498153
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstov5630/ov5630.c
+@@ -0,0 +1,1153 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/kmod.h>
++#include <linux/device.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/gpio.h>
++
++#include <media/v4l2-device.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-i2c-drv.h>
++
++#include "ci_sensor_common.h"
++#include "ov5630.h"
++
++static int mrstov5630_debug;
++module_param(mrstov5630_debug, int, 0644);
++MODULE_PARM_DESC(mrstov5630_debug, "Debug level (0-1)");
++
++#define dprintk(level, fmt, arg...) do { \
++ if (mrstov5630_debug >= level) \
++ printk(KERN_DEBUG "mrstisp@%s: " fmt "\n", \
++ __func__, ## arg); } \
++ while (0)
++
++#define eprintk(fmt, arg...) \
++ printk(KERN_ERR "mrstisp@%s: line %d: " fmt "\n", \
++ __func__, __LINE__, ## arg);
++
++#define DBG_entering dprintk(2, "entering");
++#define DBG_leaving dprintk(2, "leaving");
++#define DBG_line dprintk(2, " line: %d", __LINE__);
++
++static inline struct ci_sensor_config *to_sensor_config(struct v4l2_subdev *sd)
++{
++ return container_of(sd, struct ci_sensor_config, sd);
++}
++
++/* static int ov5630_set_res(struct i2c_client *c, const int w, const int h);
++ */
++static struct ov5630_format_struct {
++ __u8 *desc;
++ __u32 pixelformat;
++ struct regval_list *regs;
++} ov5630_formats[] = {
++ {
++ .desc = "Raw RGB Bayer",
++ .pixelformat = SENSOR_MODE_BAYER,
++ .regs = NULL,
++ },
++};
++#define N_OV5630_FMTS ARRAY_SIZE(ov5630_formats)
++
++static struct ov5630_res_struct {
++ __u8 *desc;
++ int res;
++ int width;
++ int height;
++ /* FIXME: correct the fps values.. */
++ int fps;
++ bool used;
++ struct regval_list *regs;
++} ov5630_res[] = {
++ {
++ .desc = "QSXGA_PLUS4",
++ .res = SENSOR_RES_QXGA_PLUS,
++ .width = 2592,
++ .height = 1944,
++ .fps = 15,
++ .used = 0,
++ .regs = ov5630_res_qsxga_plus4,
++ },
++ {
++ .desc = "1080P",
++ .res = SENSOR_RES_1080P,
++ .width = 1920,
++ .height = 1080,
++ .fps = 25,
++ .used = 0,
++ .regs = ov5630_res_1080p,
++ },
++ {
++ .desc = "XGA_PLUS",
++ .res = SENSOR_RES_XGA_PLUS,
++ .width = 1280,
++ .height = 960,
++ .fps = 30,
++ .used = 0,
++ .regs = ov5630_res_xga_plus,
++ },
++ {
++ .desc = "720p",
++ .res = SENSOR_RES_720P,
++ .width = 1280,
++ .height = 720,
++ .fps = 34,
++ .used = 0,
++ .regs = ov5630_res_720p,
++ },
++ {
++ .desc = "VGA",
++ .res = SENSOR_RES_VGA,
++ .width = 640,
++ .height = 480,
++ .fps = 39,
++ .used = 0,
++ .regs = ov5630_res_vga_ac04_bill,
++ },
++};
++
++#define N_RES (ARRAY_SIZE(ov5630_res))
++
++/*
++ * I2C Read & Write stuff
++ */
++static int ov5630_read(struct i2c_client *c, u32 reg, u32 *value)
++{
++ int ret;
++ int i;
++ struct i2c_msg msg[2];
++ u8 msgbuf[2];
++ u8 ret_val = 0;
++ *value = 0;
++ /* Read needs two message to go */
++ memset(&msg, 0, sizeof(msg));
++ msgbuf[0] = 0;
++ msgbuf[1] = 0;
++ i = 0;
++
++ msgbuf[i++] = ((u16)reg) >> 8;
++ msgbuf[i++] = ((u16)reg) & 0xff;
++ msg[0].addr = c->addr;
++ msg[0].buf = msgbuf;
++ msg[0].len = i;
++
++ msg[1].addr = c->addr;
++ msg[1].flags = I2C_M_RD;
++ msg[1].buf = &ret_val;
++ msg[1].len = 1;
++
++ ret = i2c_transfer(c->adapter, &msg[0], 2);
++ *value = ret_val;
++
++ ret = (ret == 2) ? 0 : -1;
++ return ret;
++}
++
++static int ov5630_write(struct i2c_client *c, u32 reg, u32 value)
++{
++ int ret, i;
++ struct i2c_msg msg;
++ u8 msgbuf[3];
++
++ /* Writing only needs one message */
++ memset(&msg, 0, sizeof(msg));
++ i = 0;
++ msgbuf[i++] = ((u16)reg) >> 8;
++ msgbuf[i++] = (u16)reg & 0xff;
++ msgbuf[i++] = (u8)value;
++
++ msg.addr = c->addr;
++ msg.flags = 0;
++ msg.buf = msgbuf;
++ msg.len = i;
++
++ ret = i2c_transfer(c->adapter, &msg, 1);
++
++ /* If this is a reset register, wait for 1ms */
++ if (reg == OV5630_SYS && (value & 0x80))
++ msleep(3);
++
++ ret = (ret == 1) ? 0 : -1;
++ return ret;
++}
++
++static int ov5630_write_array(struct i2c_client *c, struct regval_list *vals)
++{
++ struct regval_list *p;
++ u32 read_val = 0;
++ int err_num = 0;
++ int i = 0;
++ p = vals;
++ while (p->reg_num != 0xffff) {
++ ov5630_write(c, (u32)p->reg_num, (u32)p->value);
++ ov5630_read(c, (u32)p->reg_num, &read_val);
++ if (read_val != p->value)
++ err_num++;
++ p++;
++ i++;
++ }
++ return 0;
++}
++
++/*
++ * Sensor specific helper function
++ */
++static int ov5630_standby(void)
++{
++ gpio_set_value(GPIO_STDBY_PIN, 1);
++ /* ov5630_motor_standby(); */
++ dprintk(1, "PM: standby called\n");
++ return 0;
++}
++
++static int ov5630_wakeup(void)
++{
++ gpio_set_value(GPIO_STDBY_PIN, 0);
++ /* ov5630_motor_wakeup(); */
++ dprintk(1, "PM: wakeup called\n");
++ return 0;
++}
++
++static int ov5630_s_power(struct v4l2_subdev *sd, u32 val)
++{
++ if (val == 1)
++ ov5630_standby();
++ if (val == 0)
++ ov5630_wakeup();
++ return 0;
++}
++
++static int ov5630_set_img_ctrl(struct i2c_client *c,
++ const struct ci_sensor_config *config)
++{
++ int err = 0;
++ u32 reg_val = 0;
++ /* struct ci_sensor_config *info = i2c_get_clientdata(c); */
++
++ switch (config->blc) {
++ case SENSOR_BLC_OFF:
++ err |= ov5630_read(c, OV5630_ISP_CTL00, &reg_val);
++ err |= ov5630_write(c, OV5630_ISP_CTL00, reg_val & 0xFE);
++ break;
++ case SENSOR_BLC_AUTO:
++ err |= ov5630_read(c, OV5630_ISP_CTL00, &reg_val);
++ err |= ov5630_write(c, OV5630_ISP_CTL00, reg_val | 0x01);
++ break;
++ }
++
++ switch (config->agc) {
++ case SENSOR_AGC_AUTO:
++ err |= ov5630_read(c, OV5630_AUTO_1, &reg_val);
++ err |= ov5630_write(c, OV5630_AUTO_1, reg_val | 0x04);
++ break;
++ case SENSOR_AGC_OFF:
++ err |= ov5630_read(c, OV5630_AUTO_1, &reg_val);
++ err |= ov5630_write(c, OV5630_AUTO_1, reg_val & ~0x04);
++ break;
++ }
++
++ switch (config->awb) {
++ case SENSOR_AWB_AUTO:
++ err |= ov5630_read(c, OV5630_ISP_CTL00, &reg_val);
++ err |= ov5630_write(c, OV5630_ISP_CTL00, reg_val | 0x30);
++ break;
++ case SENSOR_AWB_OFF:
++ err |= ov5630_read(c, OV5630_ISP_CTL00, &reg_val);
++ err |= ov5630_write(c, OV5630_ISP_CTL00, reg_val & ~0x30);
++ break;
++ }
++
++ switch (config->aec) {
++ case SENSOR_AEC_AUTO:
++ err |= ov5630_read(c, OV5630_AUTO_1, &reg_val);
++ err |= ov5630_write(c, OV5630_AUTO_1, reg_val | 0xFB);
++ break;
++ case SENSOR_AEC_OFF:
++ err |= ov5630_read(c, OV5630_AUTO_1, &reg_val);
++ err |= ov5630_write(c, OV5630_AUTO_1, reg_val & 0xF6);
++ break;
++ }
++
++ return err;
++}
++
++static int ov5630_init(struct i2c_client *c)
++{
++ int ret;
++ struct v4l2_subdev *sd = i2c_get_clientdata(c);
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ char *name = "";
++
++ /* Fill the configuration structure */
++ /* Note this default configuration value */
++ info->mode = ov5630_formats[0].pixelformat;
++ info->res = ov5630_res[0].res;
++ info->type = SENSOR_TYPE_RAW;
++ info->bls = SENSOR_BLS_OFF;
++ info->gamma = SENSOR_GAMMA_OFF;
++ info->cconv = SENSOR_CCONV_OFF;
++ info->blc = SENSOR_BLC_AUTO;
++ info->agc = SENSOR_AGC_AUTO;
++ info->awb = SENSOR_AWB_AUTO;
++ info->aec = SENSOR_AEC_AUTO;
++ /* info->bus_width = SENSOR_BUSWIDTH_10BIT; */
++ info->bus_width = SENSOR_BUSWIDTH_10BIT_ZZ;
++ info->ycseq = SENSOR_YCSEQ_YCBYCR;
++ /* info->conv422 = SENSOR_CONV422_NOCOSITED; */
++ info->conv422 = SENSOR_CONV422_COSITED;
++ info->bpat = SENSOR_BPAT_BGBGGRGR;
++ info->field_inv = SENSOR_FIELDINV_NOSWAP;
++ info->field_sel = SENSOR_FIELDSEL_BOTH;
++ info->hpol = SENSOR_HPOL_REFPOS;
++ info->vpol = SENSOR_VPOL_NEG;
++ info->edge = SENSOR_EDGE_RISING;
++ info->flicker_freq = SENSOR_FLICKER_100;
++ info->cie_profile = SENSOR_CIEPROF_F11;
++ name = "ov5630";
++ memcpy(info->name, name, 7);
++
++ /* Reset sensor hardware, and implement the setting*/
++ ret = ov5630_write(c, (u32)OV5630_SYS, (u32)0x80);
++ ret += ov5630_write(c, (u32)OV5630_IMAGE_SYSTEM, (u32)0x00);
++
++ /* Set registers into default config value */
++ ret += ov5630_write_array(c, ov5630_def_reg);
++
++ /* Set MIPI interface */
++#ifdef OV5630_MIPI
++ ret += ov5630_write_array(c, ov5630_mipi);
++#endif
++
++ /* turn off AE AEB AGC */
++ ret += ov5630_set_img_ctrl(c, info);
++
++ /* streaming */
++ /* ret += ov5630_write(c, (u32)OV5630_IMAGE_SYSTEM, (u32)0x01); */
++ /* ret += ov5630_write(c, (u32)0x3096, (u32)0x50); */
++ /* /ssleep(1); */
++
++ /* Added by wen to stop sensor from streaming */
++ ov5630_write(c, (u32)OV5630_IMAGE_SYSTEM, (u32)0x00);
++ ov5630_write(c, 0x30b0, 0x00);
++ ov5630_write(c, 0x30b1, 0x00);
++ return ret;
++}
++
++static int distance(struct ov5630_res_struct *res, u32 w, u32 h)
++{
++ int ret;
++ if (res->width < w || res->height < h)
++ return -1;
++
++ ret = ((res->width - w) + (res->height - h));
++ return ret;
++}
++static int ov5630_try_res(u32 *w, u32 *h)
++{
++ struct ov5630_res_struct *res_index, *p = NULL;
++ int dis, last_dis = ov5630_res->width + ov5630_res->height;
++
++ DBG_entering;
++
++ for (res_index = ov5630_res;
++ res_index < ov5630_res + N_RES;
++ res_index++) {
++ if ((res_index->width < *w) || (res_index->height < *h))
++ break;
++ dis = distance(res_index, *w, *h);
++ if (dis < last_dis) {
++ last_dis = dis;
++ p = res_index;
++ }
++ }
++
++ if (p == NULL)
++ p = ov5630_res;
++ else if ((p->width < *w) || (p->height < *h)) {
++ if (p != ov5630_res)
++ p--;
++ }
++
++ if ((w != NULL) && (h != NULL)) {
++ *w = p->width;
++ *h = p->height;
++ }
++
++ DBG_leaving;
++ return 0;
++}
++
++static struct ov5630_res_struct *ov5630_to_res(u32 w, u32 h)
++{
++ struct ov5630_res_struct *res_index;
++
++ for (res_index = ov5630_res;
++ res_index < ov5630_res + N_RES;
++ res_index++)
++ if ((res_index->width == w) && (res_index->height == h))
++ break;
++
++ if (res_index >= ov5630_res + N_RES)
++ res_index--; /* Take the bigger one */
++
++ return res_index;
++}
++
++static int ov5630_try_fmt(struct v4l2_subdev *sd,
++ struct v4l2_format *fmt)
++{
++ DBG_entering;
++ return ov5630_try_res(&fmt->fmt.pix.width, &fmt->fmt.pix.height);
++ DBG_leaving;
++}
++
++static int ov5630_get_fmt(struct v4l2_subdev *sd,
++ struct v4l2_format *fmt)
++{
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ unsigned short width, height;
++ int index;
++
++ ci_sensor_res2size(info->res, &width, &height);
++
++ /* Marked the current sensor res as being "used" */
++ for (index = 0; index < N_RES; index++) {
++ if ((width == ov5630_res[index].width) &&
++ (height == ov5630_res[index].height)) {
++ ov5630_res[index].used = 1;
++ continue;
++ }
++ ov5630_res[index].used = 0;
++ }
++
++ fmt->fmt.pix.width = width;
++ fmt->fmt.pix.height = height;
++ return 0;
++}
++
++static int ov5630_set_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
++{
++ struct i2c_client *c = v4l2_get_subdevdata(sd);
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ int ret = 0;
++ struct ov5630_res_struct *res_index;
++ u32 width, height;
++ int index;
++
++ DBG_entering;
++
++ width = fmt->fmt.pix.width;
++ height = fmt->fmt.pix.height;
++
++ dprintk(1, "was told to set fmt (%d x %d) ", width, height);
++
++ ret = ov5630_try_res(&width, &height);
++
++ dprintk(1, "setting fmt (%d x %d) ", width, height);
++
++ res_index = ov5630_to_res(width, height);
++
++ ov5630_wakeup();
++
++ if (res_index->regs) {
++ /* Soft reset camera first*/
++ ret = ov5630_write(c, (u32)OV5630_SYS, (u32)0x80);
++
++ /* software sleep/standby */
++ ret += ov5630_write(c, (u32)OV5630_IMAGE_SYSTEM, (u32)0x00);
++
++ /* Set registers into default config value */
++ ret += ov5630_write_array(c, ov5630_def_reg);
++
++ /* set image resolution */
++ ret += ov5630_write_array(c, res_index->regs);
++
++ /* turn off AE AEB AGC */
++ ret += ov5630_set_img_ctrl(c, info);
++
++ /* Set MIPI interface */
++#ifdef OV5630_MIPI
++ ret += ov5630_write_array(c, ov5630_mipi);
++#endif
++
++ if (res_index->res == SENSOR_RES_VGA)
++ ret += ov5630_write(c, (u32)0x3015, (u32)0x03);
++
++ /* streaming */
++ ret = ov5630_write(c, (u32)OV5630_IMAGE_SYSTEM, (u32)0x01);
++ ret = ov5630_write(c, (u32)0x3096, (u32)0x50);
++
++ info->res = res_index->res;
++
++ /* Marked current sensor res as being "used" */
++ for (index = 0; index < N_RES; index++) {
++ if ((width == ov5630_res[index].width) &&
++ (height == ov5630_res[index].height)) {
++ ov5630_res[index].used = 1;
++ continue;
++ }
++ ov5630_res[index].used = 0;
++ }
++
++ for (index = 0; index < N_RES; index++)
++ dprintk(2, "index = %d, used = %d\n", index,
++ ov5630_res[index].used);
++ } else {
++ eprintk("no res for (%d x %d)", width, height);
++ }
++
++ DBG_leaving;
++ return ret;
++}
++
++static int ov5630_t_gain(struct v4l2_subdev *sd, int value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ u32 v;
++
++ DBG_entering;
++
++ dprintk(2, "writing gain %x to 0x3000", value);
++
++ ov5630_read(client, 0x3000, &v);
++ v = (v & 0x80) + value;
++ ov5630_write(client, 0x3000, v);
++
++ dprintk(2, "gain %x was writen to 0x3000", v);
++
++ DBG_leaving;
++ return 0;
++}
++
++static int ov5630_t_exposure(struct v4l2_subdev *sd, int value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ u32 v;
++ u32 reg_val;
++
++ DBG_entering;
++
++ ov5630_read(client, 0x3013, &v);
++ dprintk(2, "0x3013 = %x", v);
++ if (v & 0x05) {
++ /* turn off agc/aec */
++ v = v & 0xfa;
++ ov5630_write(client, 0x3013, v);
++ /* turn off awb */
++ ov5630_read(client, OV5630_ISP_CTL00, &reg_val);
++ ov5630_write(client, OV5630_ISP_CTL00, reg_val & ~0x30);
++ }
++ ov5630_read(client, 0x3014, &v);
++ dprintk(2, "0x3014 = %x", v);
++ ov5630_read(client, 0x3002, &v);
++ dprintk(2, "0x3002 = %x", v);
++ ov5630_read(client, 0x3003, &v);
++ dprintk(2, "0x3003 = %x", v);
++
++ dprintk(2, "writing exposure %x to 0x3002/3", value);
++
++ v = value >> 8;
++ ov5630_write(client, 0x3002, v);
++ dprintk(2, "exposure %x was writen to 0x3002", v);
++
++ v = value & 0xff;
++ ov5630_write(client, 0x3003, v);
++ dprintk(2, "exposure %x was writen to 0x3003", v);
++
++ DBG_leaving;
++ return 0;
++}
++
++static struct ov5630_control {
++ struct v4l2_queryctrl qc;
++ int (*query)(struct v4l2_subdev *sd, __s32 *value);
++ int (*tweak)(struct v4l2_subdev *sd, int value);
++} ov5630_controls[] = {
++ {
++ .qc = {
++ .id = V4L2_CID_GAIN,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "global gain",
++ .minimum = 0x0,
++ .maximum = 0xFF,
++ .step = 0x01,
++ .default_value = 0x00,
++ .flags = 0,
++ },
++ .tweak = ov5630_t_gain,
++/* .query = ov5630_q_gain, */
++ },
++ {
++ .qc = {
++ .id = V4L2_CID_EXPOSURE,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "exposure",
++ .minimum = 0x0,
++ .maximum = 0xFFFF,
++ .step = 0x01,
++ .default_value = 0x00,
++ .flags = 0,
++ },
++ .tweak = ov5630_t_exposure,
++/* .query = ov5630_q_exposure; */
++ },
++};
++#define N_CONTROLS (ARRAY_SIZE(ov5630_controls))
++
++/*
++static int ov5630_g_gain(struct v4l2_subdev *sd, int value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ unsigned char v;
++
++ DBG_entering;
++
++ ov5630_write(client, 0x3000, &v);
++ dprintk(2, "writing gain %x to 0x3000", value);
++
++ value
++ DBG_leaving;
++ return 0
++}
++*/
++
++static struct ov5630_control *ov5630_find_control(__u32 id)
++{
++ int i;
++
++ for (i = 0; i < N_CONTROLS; i++)
++ if (ov5630_controls[i].qc.id == id)
++ return ov5630_controls + i;
++ return NULL;
++}
++
++static int ov5630_queryctrl(struct v4l2_subdev *sd,
++ struct v4l2_queryctrl *qc)
++{
++ struct ov5630_control *ctrl = ov5630_find_control(qc->id);
++
++ if (ctrl == NULL)
++ return -EINVAL;
++ *qc = ctrl->qc;
++ return 0;
++}
++
++static int ov5630_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ /*
++ struct ov5630_control *octrl = ov5630_find_control(ctrl->id);
++
++ int ret;
++
++ if (octrl == NULL)
++ return -EINVAL;
++ ret = octrl->query(sd, &ctrl->value);
++ if (ret >= 0)
++ return 0;
++ return ret;
++ */
++ return 0;
++}
++
++static int ov5630_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ struct ov5630_control *octrl = ov5630_find_control(ctrl->id);
++ int ret;
++
++ if (octrl == NULL)
++ return -EINVAL;
++ ret = octrl->tweak(sd, ctrl->value);
++ if (ret >= 0)
++ return 0;
++ return ret;
++}
++
++#if 0
++static int ov5630_get_caps(struct i2c_client *c, struct ci_sensor_caps *caps)
++{
++ if (caps == NULL)
++ return -EIO;
++
++ caps->bus_width = SENSOR_BUSWIDTH_10BIT;
++ caps->mode = SENSOR_MODE_BAYER;
++ caps->field_inv = SENSOR_FIELDINV_NOSWAP;
++ caps->field_sel = SENSOR_FIELDSEL_BOTH;
++ caps->ycseq = SENSOR_YCSEQ_YCBYCR;
++ caps->conv422 = SENSOR_CONV422_NOCOSITED;
++ caps->bpat = SENSOR_BPAT_BGBGGRGR;
++ caps->hpol = SENSOR_HPOL_REFPOS;
++ caps->vpol = SENSOR_VPOL_NEG;
++ caps->edge = SENSOR_EDGE_RISING;
++ caps->bls = SENSOR_BLS_OFF;
++ caps->gamma = SENSOR_GAMMA_OFF;
++ caps->cconv = SENSOR_CCONV_OFF;
++ caps->res = SENSOR_RES_QXGA_PLUS | SENSOR_RES_1080P |
++ SENSOR_RES_XGA_PLUS | SENSOR_RES_720P | SENSOR_RES_VGA;
++ caps->blc = SENSOR_BLC_OFF;
++ caps->agc = SENSOR_AGC_OFF;
++ caps->awb = SENSOR_AWB_OFF;
++ caps->aec = SENSOR_AEC_OFF;
++ caps->cie_profile = SENSOR_CIEPROF_D65 | SENSOR_CIEPROF_D75 |
++ SENSOR_CIEPROF_F11 | SENSOR_CIEPROF_F12 | SENSOR_CIEPROF_A |
++ SENSOR_CIEPROF_F2;
++ caps->flicker_freq = SENSOR_FLICKER_100 | SENSOR_FLICKER_120;
++ caps->type = SENSOR_TYPE_RAW;
++ /* caps->name = "ov5630"; */
++ strcpy(caps->name, "ov5630");
++
++ return 0;
++}
++
++static int ov5630_get_config(struct i2c_client *c,
++ struct ci_sensor_config *config)
++{
++ struct ci_sensor_config *info = i2c_get_clientdata(c);
++
++ if (config == NULL) {
++ printk(KERN_WARNING "sensor_get_config: NULL pointer\n");
++ return -EIO;
++ }
++
++ memcpy(config, info, sizeof(struct ci_sensor_config));
++
++ return 0;
++}
++
++static int ov5630_setup(struct i2c_client *c,
++ const struct ci_sensor_config *config)
++{
++ int ret;
++ u16 width, high;
++ struct ov5630_res_struct *res_index;
++ struct ci_sensor_config *info = i2c_get_clientdata(c);
++
++ /* Soft reset camera first*/
++ ret = ov5630_write(c, (u32)OV5630_SYS, (u32)0x80);
++
++ /* software sleep/standby */
++ ret = ov5630_write(c, (u32)OV5630_IMAGE_SYSTEM, (u32)0x00);
++
++ /* Set registers into default config value */
++ ret = ov5630_write_array(c, ov5630_def_reg);
++
++ /* set image resolution */
++ ci_sensor_res2size(config->res, &width, &high);
++ ret += ov5630_try_res(&width, &high);
++ res_index = ov5630_find_res(width, high);
++ if (res_index->regs)
++ ret += ov5630_write_array(c, res_index->regs);
++ if (!ret)
++ info->res = res_index->res;
++
++ ret += ov5630_set_img_ctrl(c, config);
++
++ /* Set MIPI interface */
++#ifdef OV5630_MIPI
++ ret += ov5630_write_array(c, ov5630_mipi);
++#endif
++
++ /* streaming */
++ ret += ov5630_write(c, (u32)OV5630_IMAGE_SYSTEM, (u32)0x01);
++ ret += ov5630_write(c, (u32)0x3096, (u32)0x50);
++
++ /*Note here for the time delay */
++ /* ssleep(1); */
++ msleep(500);
++ return ret;
++}
++
++/*
++ * File operation functions
++ */
++static int ov5630_dvp_enable(struct i2c_client *client)
++{
++ int ret;
++
++ u8 reg;
++
++ ret = ov5630_read(client, 0x3506, &reg);
++ reg &= 0xdf;
++ reg |= 0x20;
++ ret += ov5630_write(client, 0x3506, reg);
++
++ return ret;
++}
++
++static int ov5630_dvp_disable(struct i2c_client *client)
++{
++ int ret;
++
++ u8 reg;
++
++ ret = ov5630_read(client, 0x3506, &reg);
++ reg &= 0xdf;
++ ret += ov5630_write(client, 0x3506, reg);
++
++ return ret;
++}
++
++static int ov5630_open(struct i2c_setting *c, void *priv)
++{
++ /* Just wake up sensor */
++ if (ov5630_wakeup())
++ return -EIO;
++ ov5630_init(c->sensor_client);
++ /* ov5630_motor_init(c->motor_client); */
++ ov5630_write(c->sensor_client, (u32)OV5630_IMAGE_SYSTEM, (u32)0x00);
++
++ /* disable dvp_en */
++ ov5630_dvp_disable(c->sensor_client);
++
++ return 0;
++}
++
++static int ov5630_release(struct i2c_setting *c, void *priv)
++{
++ /* Just suspend the sensor */
++ if (ov5630_standby())
++ return -EIO;
++ return 0;
++}
++
++static int ov5630_on(struct i2c_setting *c)
++{
++ int ret;
++
++ /* Software wake up sensor */
++ ret = ov5630_write(c->sensor_client,
++ (u32)OV5630_IMAGE_SYSTEM, (u32)0x01);
++
++ /* enable dvp_en */
++ return ret + ov5630_dvp_enable(c->sensor_client);
++}
++
++static int ov5630_off(struct i2c_setting *c)
++{
++ int ret;
++
++ /* Software standby sensor */
++ ret = ov5630_write(c->sensor_client,
++ (u32)OV5630_IMAGE_SYSTEM, (u32)0x00);
++ /* disable dvp_en */
++ return ret + ov5630_dvp_disable(c->sensor_client);
++}
++
++static struct sensor_device ov5630 = {
++ .name = "ov5630",
++ .type = SENSOR_TYPE_RAW,
++ .minor = -1,
++ .open = ov5630_open,
++ .release = ov5630_release,
++ .on = ov5630_on,
++ .off = ov5630_off,
++ .querycap = ov5630_get_caps,
++ .get_config = ov5630_get_config,
++ .set_config = ov5630_setup,
++ .enum_parm = ov5630_queryctrl,
++ .get_parm = ov5630_g_ctrl,
++ .set_parm = ov5630_s_ctrl,
++ .try_res = ov5630_try_res,
++ .set_res = ov5630_set_res,
++ .get_ls_corr_config = NULL,
++ .mdi_get_focus = ov5630_motor_get_focus,
++ .mdi_set_focus = ov5630_motor_set_focus,
++ .mdi_max_step = ov5630_motor_max_step,
++ .mdi_calibrate = NULL,
++ .read = ov5630_read,
++ .write = ov5630_write,
++ .suspend = ov5630_standby,
++ .resume = ov5630_wakeup,
++ /* TBC */
++};
++#endif
++
++static int ov5630_s_stream(struct v4l2_subdev *sd, int enable)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ DBG_entering;
++
++ if (enable) {
++ ov5630_write(client, (u32)OV5630_IMAGE_SYSTEM, (u32)0x01);
++ ov5630_write(client, 0x30b0, 0xff);
++ ov5630_write(client, 0x30b1, 0xff);
++ msleep(500);
++ } else {
++ ov5630_write(client, (u32)OV5630_IMAGE_SYSTEM, (u32)0x00);
++ ov5630_write(client, 0x30b0, 0x00);
++ ov5630_write(client, 0x30b1, 0x00);
++ }
++
++ DBG_leaving;
++ return 0;
++}
++
++static int ov5630_enum_framesizes(struct v4l2_subdev *sd,
++ struct v4l2_frmsizeenum *fsize)
++{
++ unsigned int index = fsize->index;
++
++ DBG_entering;
++
++ if (index >= N_RES)
++ return -EINVAL;
++
++ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
++ fsize->discrete.width = ov5630_res[index].width;
++ fsize->discrete.height = ov5630_res[index].height;
++ fsize->reserved[0] = ov5630_res[index].used;
++
++ DBG_leaving;
++
++ return 0;
++}
++
++static int ov5630_enum_frameintervals(struct v4l2_subdev *sd,
++ struct v4l2_frmivalenum *fival)
++{
++ unsigned int index = fival->index;
++
++ DBG_entering;
++
++ if (index >= N_RES)
++ return -EINVAL;
++
++ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
++ fival->discrete.numerator = 1;
++ fival->discrete.denominator = ov5630_res[index].fps;
++
++ DBG_leaving;
++
++ return 0;
++}
++
++static int ov5630_g_chip_ident(struct v4l2_subdev *sd,
++ struct v4l2_dbg_chip_ident *chip)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++#define V4L2_IDENT_OV5630 8245
++ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_OV5630, 0);
++}
++
++#ifdef CONFIG_VIDEO_ADV_DEBUG
++static int ov5630_g_register(struct v4l2_subdev *sd,
++ struct v4l2_dbg_register *reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ unsigned char val = 0;
++ int ret;
++
++ if (!v4l2_chip_match_i2c_client(client, &reg->match))
++ return -EINVAL;
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++ ret = ov5630_read(client, reg->reg & 0xffff, &val);
++ reg->val = val;
++ reg->size = 1;
++ return ret;
++}
++
++static int ov5630_s_register(struct v4l2_subdev *sd,
++ struct v4l2_dbg_register *reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++ if (!v4l2_chip_match_i2c_client(client, &reg->match))
++ return -EINVAL;
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++ ov5630_write(client, reg->reg & 0xffff, reg->val & 0xff);
++ return 0;
++}
++#endif
++
++static const struct v4l2_subdev_video_ops ov5630_video_ops = {
++ .try_fmt = ov5630_try_fmt,
++ .s_fmt = ov5630_set_fmt,
++ .g_fmt = ov5630_get_fmt,
++ .s_stream = ov5630_s_stream,
++ .enum_framesizes = ov5630_enum_framesizes,
++ .enum_frameintervals = ov5630_enum_frameintervals,
++};
++
++static const struct v4l2_subdev_core_ops ov5630_core_ops = {
++ .g_chip_ident = ov5630_g_chip_ident,
++ .queryctrl = ov5630_queryctrl,
++ .g_ctrl = ov5630_g_ctrl,
++ .s_ctrl = ov5630_s_ctrl,
++ .s_gpio = ov5630_s_power,
++ /*.g_ext_ctrls = ov5630_g_ext_ctrls,*/
++ /*.s_ext_ctrls = ov5630_s_ext_ctrls,*/
++#ifdef CONFIG_VIDEO_ADV_DEBUG
++ .g_register = ov5630_g_register,
++ .s_register = ov5630_s_register,
++#endif
++};
++
++static const struct v4l2_subdev_ops ov5630_ops = {
++ .core = &ov5630_core_ops,
++ .video = &ov5630_video_ops,
++};
++
++/*
++ * Basic i2c stuff
++ */
++/*
++static unsigned short normal_i2c[] = {I2C_OV5630 >> 1,
++ I2C_CLIENT_END};
++I2C_CLIENT_INSMOD;
++
++static struct i2c_driver ov5630_driver;
++*/
++static int ov5630_detect(struct i2c_client *client)
++{
++ struct i2c_adapter *adapter = client->adapter;
++ int adap_id = i2c_adapter_id(adapter);
++ u32 value;
++
++ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
++ eprintk("error i2c check func");
++ return -ENODEV;
++ }
++
++ if (adap_id != 1) {
++ eprintk("adap_id != 1");
++ return -ENODEV;
++ }
++
++ /* if (ov5630_wakeup()) */
++ /* return -ENODEV; */
++ ov5630_wakeup();
++
++ ov5630_read(client, (u32)OV5630_PID_H, &value);
++ if ((u8)value != 0x56) {
++ dprintk(1, "PID != 0x56, but %x", value);
++ dprintk(2, "client->addr = %x", client->addr);
++ return -ENODEV;
++ }
++
++ printk(KERN_INFO "Init ov5630 sensor success\n");
++ return 0;
++}
++
++static int ov5630_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct ci_sensor_config *info;
++ struct v4l2_subdev *sd;
++ int ret = -1;
++/* struct i2c_client *motor; */
++
++ DBG_entering;
++ v4l_info(client, "chip found @ 0x%x (%s)\n",
++ client->addr << 1, client->adapter->name);
++ /*
++ * Setup sensor configuration structure
++ */
++ info = kzalloc(sizeof(struct ci_sensor_config), GFP_KERNEL);
++ if (!info) {
++ eprintk("fail to malloc for ci_sensor_config");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ ret = ov5630_detect(client);
++ if (ret) {
++ dprintk(1, "error ov5630_detect");
++ goto out_free;
++ }
++
++ sd = &info->sd;
++ v4l2_i2c_subdev_init(sd, client, &ov5630_ops);
++
++ /*
++ * Initialization OV5630
++ * then turn into standby mode
++ */
++ /* ret = ov5630_standby(); */
++ ret = ov5630_init(client);
++ if (ret) {
++ eprintk("error calling ov5630_init");
++ goto out_free;
++ }
++ ov5630_standby();
++
++ ret = 0;
++ goto out;
++
++out_free:
++ kfree(info);
++ DBG_leaving;
++out:
++ return ret;
++}
++
++/*
++ * XXX: Need to be checked
++ */
++static int ov5630_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *sd = i2c_get_clientdata(client);
++
++ DBG_entering;
++
++ v4l2_device_unregister_subdev(sd);
++ kfree(to_sensor_config(sd));
++
++ DBG_leaving;
++ return 0;
++}
++
++static const struct i2c_device_id ov5630_id[] = {
++ {"ov5630", 0},
++ {}
++};
++
++MODULE_DEVICE_TABLE(i2c, ov5630_id);
++
++static struct v4l2_i2c_driver_data v4l2_i2c_data = {
++ .name = "ov5630",
++ .probe = ov5630_probe,
++ .remove = ov5630_remove,
++ /* .suspend = ov5630_suspend,
++ * .resume = ov5630_resume, */
++ .id_table = ov5630_id,
++};
++
++MODULE_AUTHOR("Xiaolin Zhang <xiaolin.zhang@intel.com>");
++MODULE_DESCRIPTION("A low-level driver for OmniVision 5630 sensors");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/media/video/mrstci/mrstov5630/ov5630.h b/drivers/media/video/mrstci/mrstov5630/ov5630.h
+new file mode 100644
+index 0000000..3da0ecd
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstov5630/ov5630.h
+@@ -0,0 +1,672 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#define I2C_OV5630 0x6C
++/* Should add to kernel source */
++#define I2C_DRIVERID_OV5630 1046
++/* GPIO pin on Moorestown */
++#define GPIO_SCLK_25 44
++#define GPIO_STB_PIN 47
++#define GPIO_STDBY_PIN 49
++#define GPIO_RESET_PIN 50
++
++/* System control register */
++#define OV5630_AGC 0x3000
++#define OV5630_AGCS 0x3001
++#define OV5630_AEC_H 0x3002
++#define OV5630_AEC_L 0x3003
++#define OV5630_LAEC_H 0x3004
++#define OV5630_LAEC_L 0x3005
++#define OV5630_AECS_H 0x3008
++#define OV5630_AECS_L 0x3009
++#define OV5630_PID_H 0x300A
++#define OV5630_PID_L 0x300B
++#define OV5630_SCCB_ID 0x300C
++#define OV5630_PLL_1 0x300E
++#define OV5630_PLL_2 0x300F
++#define OV5630_PLL_3 0x3010
++#define OV5630_PLL_4 0x3011
++#define OV5630_SYS 0x3012
++#define OV5630_AUTO_1 0x3013
++#define OV5630_AUTO_2 0x3014
++#define OV5630_AUTO_3 0x3015
++#define OV5630_AUTO_4 0x3016
++#define OV5630_AUTO_5 0x3017
++#define OV5630_WPT 0x3018
++#define OV5630_BPT 0x3019
++#define OV5630_VPT 0x301A
++#define OV5630_YAVG 0x301B
++#define OV5630_AECG_50 0x301C
++#define OV5630_AECG_60 0x301D
++#define OV5630_ADDVS_H 0x301E
++#define OV5630_ADDVS_L 0x301F
++#define OV5630_FRAME_LENGTH_LINES_H 0x3020
++#define OV5630_FRAME_LENGTH_LINES_L 0x3021
++#define OV5630_LINE_LENGTH_PCK_H 0x3022
++#define OV5630_LINE_LENGTH_PCK_L 0x3023
++#define OV5630_X_ADDR_START_H 0x3024
++#define OV5630_X_ADDR_START_L 0x3025
++#define OV5630_Y_ADDR_START_H 0x3026
++#define OV5630_Y_ADDR_START_L 0x3027
++#define OV5630_X_ADDR_END_H 0x3028
++#define OV5630_X_ADDR_END_L 0x3029
++#define OV5630_Y_ADDR_END_H 0x302A
++#define OV5630_Y_ADDR_END_L 0x302B
++#define OV5630_X_OUTPUT_SIZE_H 0x302C
++#define OV5630_X_OUTPUT_SIZE_L 0x302D
++#define OV5630_Y_OUTPUT_SIZE_H 0x302E
++#define OV5630_Y_OUTPUT_SIZE_L 0x302F
++#define OV5630_FRAME_CNT 0x3030
++#define OV5630_DATR_LMO_0 0x3038
++#define OV5630_DATR_LMO_1 0x3039
++#define OV5630_DATR_LMO_2 0x303A
++#define OV5630_DATR_D56 0x303D
++#define OV5630_DATR_EF 0x303E
++#define OV5630_R_SIGMA_0 0x3048
++#define OV5630_R_SIGMA_1 0x3049
++#define OV5630_R_SIGMA_2 0x304A
++#define OV5630_R_SIGMA_3 0x304B
++#define OV5630_R_SIGMA_4 0x304C
++#define OV5630_R_SIGMA_5 0x304D
++#define OV5630_D56COM 0x304E
++#define OV5630_5060TH 0x3050
++#define OV5630_LMO_TH1 0x3058
++#define OV5630_LMO_TH2 0x3059
++#define OV5630_LMO_K 0x305A
++#define OV5630_BD50ST_H 0x305C
++#define OV5630_BD50ST_L 0x305D
++#define OV5630_BD60ST_H 0x305E
++#define OV5630_BD60ST_L 0x305F
++#define OV5630_HSYNST 0x306D
++#define OV5630_HSYNED 0x306E
++#define OV5630_HSYNED_HSYNST 0x306F
++#define OV5630_TMC_RWIN0 0x3070
++#define OV5630_IO_CTRL0 0x30B0
++#define OV5630_IO_CTRL1 0x30B1
++#define OV5630_IO_CTRL2 0x30B2
++#define OV5630_DSIO_0 0x30B3
++#define OV5630_DSIO_1 0x30B4
++#define OV5630_DSIO_2 0x30B5
++#define OV5630_TMC_10 0x30B6
++#define OV5630_TMC_12 0x30B7
++#define OV5630_TMC_14 0x30B9
++#define OV5630_TMC_COM4 0x30BA
++#define OV5630_TMC_REG6C 0x30BB
++#define OV5630_TMC_REG6E 0x30BC
++#define OV5630_R_CLK_S 0x30BD
++#define OV5630_R_CLK_A 0x30BE
++#define OV5630_R_CLK_A1 0x30BF
++#define OV5630_FRS_0 0x30E0
++#define OV5630_FRS_1 0x30E1
++#define OV5630_FRS_2 0x30E2
++#define OV5630_FRS_3 0x30E3
++#define OV5630_FRS_FECNT 0x30E4
++#define OV5630_FRS_FECNT_0 0x30E5
++#define OV5630_FRS_FECNT_1 0x30E6
++#define OV5630_FRS_RFRM 0x30E7
++#define OV5630_FRS_RSTRB 0x30E8
++#define OV5630_SA1TMC 0x30E9
++#define OV5630_TMC_MISC0 0x30EA
++#define OV5630_TMC_MISC1 0x30EB
++#define OV5630_FLEX_TXP 0x30F0
++#define OV5630_FLEX_FLT 0x30F1
++#define OV5630_FLEX_TXT 0x30F2
++#define OV5630_FLEX_HBK 0x30F3
++#define OV5630_FLEX_HSG 0x30F4
++#define OV5630_FLEX_SA1SFT 0x30F5
++#define OV5630_RVSOPT 0x30F6
++#define OV5630_AUTO 0x30F7
++#define OV5630_IMAGE_TRANSFORM 0x30F8
++#define OV5630_IMAGE_LUM 0x30F9
++#define OV5630_IMAGE_SYSTEM 0x30FA
++#define OV5630_GROUP_WR 0x30FF
++
++/* CIF control register */
++#define OV5630_CIF_CTRL2 0x3202
++
++/* ISP control register */
++#define OV5630_ISP_CTL00 0x3300
++#define OV5630_ISP_CTL01 0x3301
++#define OV5630_ISP_CTL02 0x3302
++#define OV5630_ISP_03 0x3303
++#define OV5630_ISP_DIG_GAIN_MAN 0x3304
++#define OV5630_ISP_BIAS_MAN 0x3305
++#define OV5630_ISP_06 0x3306
++#define OV5630_ISP_STABLE_RANGE 0x3307
++#define OV5630_ISP_R_GAIN_MAN_1 0x3308
++#define OV5630_ISP_R_GAIN_MAN_0 0x3309
++#define OV5630_ISP_G_GAIN_MAN_1 0x330A
++#define OV5630_ISP_G_GAIN_MAN_0 0x330B
++#define OV5630_ISP_B_GAIN_MAN_1 0x330C
++#define OV5630_ISP_B_GAIN_MAN_0 0x330D
++#define OV5630_ISP_STABLE_RANGEW 0x330E
++#define OV5630_ISP_AWB_FRAME_CNT 0x330F
++#define OV5630_ISP_11 0x3311
++#define OV5630_ISP_12 0x3312
++#define OV5630_ISP_13 0x3313
++#define OV5630_ISP_HSIZE_IN_1 0x3314
++#define OV5630_ISP_HSIZE_IN_0 0x3315
++#define OV5630_ISP_VSIZE_IN_1 0x3316
++#define OV5630_ISP_VSIZE_IN_0 0x3317
++#define OV5630_ISP_18 0x3318
++#define OV5630_ISP_19 0x3319
++#define OV5630_ISP_EVEN_MAN0 0x331A
++#define OV5630_ISP_EVEN_MAN1 0x331B
++#define OV5630_ISP_EVEN_MAN2 0x331C
++#define OV5630_ISP_EVEN_MAN3 0x331D
++#define OV5630_ISP_1E 0x331E
++#define OV5630_ISP_1F 0x331F
++#define OV5630_ISP_BLC_LMT_OPTION 0x3320
++#define OV5630_ISP_BLC_THRE 0x3321
++#define OV5630_ISP_22 0x3322
++#define OV5630_ISP_23 0x3323
++#define OV5630_ISP_BLC_MAN0_1 0x3324
++#define OV5630_ISP_BLC_MAN0_0 0x3325
++#define OV5630_ISP_BLC_MAN1_1 0x3326
++#define OV5630_ISP_BLC_MAN1_0 0x3327
++#define OV5630_ISP_BLC_MAN2_1 0x3328
++#define OV5630_ISP_BLC_MAN2_0 0x3329
++#define OV5630_ISP_BLC_MAN3_1 0x332A
++#define OV5630_ISP_BLC_MAN3_0 0x332B
++#define OV5630_ISP_BLC_MAN4_1 0x332C
++#define OV5630_ISP_BLC_MAN4_0 0x332D
++#define OV5630_ISP_BLC_MAN5_1 0x332E
++#define OV5630_ISP_BLC_MAN5_0 0x332F
++#define OV5630_ISP_BLC_MAN6_1 0x3330
++#define OV5630_ISP_BLC_MAN6_0 0x3331
++#define OV5630_ISP_BLC_MAN7_1 0x3332
++#define OV5630_ISP_BLC_MAN7_0 0x3333
++#define OV5630_ISP_CD 0x33CD
++#define OV5630_ISP_FF 0x33FF
++
++/* clipping control register */
++#define OV5630_CLIP_CTRL0 0x3400
++#define OV5630_CLIP_CTRL1 0x3401
++#define OV5630_CLIP_CTRL2 0x3402
++#define OV5630_CLIP_CTRL3 0x3403
++#define OV5630_CLIP_CTRL4 0x3404
++#define OV5630_CLIP_CTRL5 0x3405
++#define OV5630_CLIP_CTRL6 0x3406
++#define OV5630_CLIP_CTRL7 0x3407
++
++/* DVP control register */
++#define OV5630_DVP_CTRL00 0x3500
++#define OV5630_DVP_CTRL01 0x3501
++#define OV5630_DVP_CTRL02 0x3502
++#define OV5630_DVP_CTRL03 0x3503
++#define OV5630_DVP_CTRL04 0x3504
++#define OV5630_DVP_CTRL05 0x3505
++#define OV5630_DVP_CTRL06 0x3506
++#define OV5630_DVP_CTRL07 0x3507
++#define OV5630_DVP_CTRL08 0x3508
++#define OV5630_DVP_CTRL09 0x3509
++#define OV5630_DVP_CTRL0A 0x350A
++#define OV5630_DVP_CTRL0B 0x350B
++#define OV5630_DVP_CTRL0C 0x350C
++#define OV5630_DVP_CTRL0D 0x350D
++#define OV5630_DVP_CTRL0E 0x350E
++#define OV5630_DVP_CTRL0F 0x350F
++#define OV5630_DVP_CTRL10 0x3510
++#define OV5630_DVP_CTRL11 0x3511
++#define OV5630_DVP_CTRL12 0x3512
++#define OV5630_DVP_CTRL13 0x3513
++#define OV5630_DVP_CTRL14 0x3514
++#define OV5630_DVP_CTRL15 0x3515
++#define OV5630_DVP_CTRL16 0x3516
++#define OV5630_DVP_CTRL17 0x3517
++#define OV5630_DVP_CTRL18 0x3518
++#define OV5630_DVP_CTRL19 0x3519
++#define OV5630_DVP_CTRL1A 0x351A
++#define OV5630_DVP_CTRL1B 0x351B
++#define OV5630_DVP_CTRL1C 0x351C
++#define OV5630_DVP_CTRL1D 0x351D
++#define OV5630_DVP_CTRL1E 0x351E
++#define OV5630_DVP_CTRL1F 0x351F
++
++/* MIPI control register */
++#define OV5630_MIPI_CTRL00 0x3600
++#define OV5630_MIPI_CTRL01 0x3601
++#define OV5630_MIPI_CTRL02 0x3602
++#define OV5630_MIPI_CTRL03 0x3603
++#define OV5630_MIPI_CTRL04 0x3604
++#define OV5630_MIPI_CTRL05 0x3605
++#define OV5630_MIPI_CTRL06 0x3606
++#define OV5630_MIPI_CTRL07 0x3607
++#define OV5630_MIPI_CTRL08 0x3608
++#define OV5630_MIPI_CTRL09 0x3609
++#define OV5630_MIPI_CTRL0A 0x360A
++#define OV5630_MIPI_CTRL0B 0x360B
++#define OV5630_MIPI_CTRL0C 0x360C
++#define OV5630_MIPI_CTRL0D 0x360D
++#define OV5630_MIPI_CTRL0E 0x360E
++#define OV5630_MIPI_CTRL0F 0x360F
++#define OV5630_MIPI_CTRL10 0x3610
++#define OV5630_MIPI_CTRL11 0x3611
++#define OV5630_MIPI_CTRL12 0x3612
++#define OV5630_MIPI_CTRL13 0x3613
++#define OV5630_MIPI_CTRL14 0x3614
++#define OV5630_MIPI_CTRL15 0x3615
++#define OV5630_MIPI_CTRL16 0x3616
++#define OV5630_MIPI_CTRL17 0x3617
++#define OV5630_MIPI_CTRL18 0x3618
++#define OV5630_MIPI_CTRL19 0x3619
++#define OV5630_MIPI_CTRL1A 0x361A
++#define OV5630_MIPI_CTRL1B 0x361B
++#define OV5630_MIPI_CTRL1C 0x361C
++#define OV5630_MIPI_CTRL1D 0x361D
++#define OV5630_MIPI_CTRL1E 0x361E
++#define OV5630_MIPI_CTRL1F 0x361F
++#define OV5630_MIPI_CTRL20 0x3620
++#define OV5630_MIPI_CTRL21 0x3621
++#define OV5630_MIPI_CTRL22 0x3622
++#define OV5630_MIPI_CTRL23 0x3623
++#define OV5630_MIPI_CTRL24 0x3624
++#define OV5630_MIPI_CTRL25 0x3625
++#define OV5630_MIPI_CTRL26 0x3626
++#define OV5630_MIPI_CTRL27 0x3627
++#define OV5630_MIPI_CTRL28 0x3628
++#define OV5630_MIPI_CTRL29 0x3629
++#define OV5630_MIPI_CTRL2A 0x362A
++#define OV5630_MIPI_CTRL2B 0x362B
++#define OV5630_MIPI_CTRL2C 0x362C
++#define OV5630_MIPI_CTRL2D 0x362D
++#define OV5630_MIPI_CTRL2E 0x362E
++#define OV5630_MIPI_CTRL2F 0x362F
++#define OV5630_MIPI_CTRL30 0x3630
++#define OV5630_MIPI_CTRL31 0x3631
++#define OV5630_MIPI_CTRL32 0x3632
++#define OV5630_MIPI_CTRL33 0x3633
++#define OV5630_MIPI_CTRL34 0x3634
++#define OV5630_MIPI_CTRL35 0x3635
++#define OV5630_MIPI_CTRL36 0x3636
++#define OV5630_MIPI_CTRL37 0x3637
++#define OV5630_MIPI_CTRL38 0x3638
++#define OV5630_MIPI_CTRL39 0x3639
++#define OV5630_MIPI_CTRL3A 0x363A
++#define OV5630_MIPI_CTRL3B 0x363B
++#define OV5630_MIPI_CTRL3C 0x363C
++#define OV5630_MIPI_CTRL3D 0x363D
++#define OV5630_MIPI_CTRL3E 0x363E
++#define OV5630_MIPI_CTRL3F 0x363F
++#define OV5630_MIPI_RO61 0x3661
++#define OV5630_MIPI_RO62 0x3662
++#define OV5630_MIPI_RO63 0x3663
++#define OV5630_MIPI_RO64 0x3664
++#define OV5630_MIPI_RO65 0x3665
++#define OV5630_MIPI_RO66 0x3666
++
++/* General definition for ov5630 */
++#define OV5630_OUTWND_MAX_H QSXXGA_PLUS4_SIZE_H
++#define OV5630_OUTWND_MAX_V QSXGA_PLUS4_SIZE_V
++
++struct regval_list {
++ u16 reg_num;
++ u8 value;
++};
++
++/*
++ * Default register value
++ * 5Mega Pixel, 2592x1944
++ */
++static struct regval_list ov5630_def_reg[] = {
++ {0x300f, 0x00}, /*00*/
++ {0x30b2, 0x32},
++ {0x3084, 0x44},
++ {0x3016, 0x01},
++ {0x308a, 0x25},
++
++ {0x3013, 0xff},
++ {0x3015, 0x03},
++ {0x30bf, 0x02},
++
++ {0x3065, 0x50},
++ {0x3068, 0x08},
++ {0x30ac, 0x05},
++ {0x309e, 0x24},
++ {0x3091, 0x04},
++
++ {0x3075, 0x22},
++ {0x3076, 0x23},
++ {0x3077, 0x24},
++ {0x3078, 0x25},
++
++ {0x30b5, 0x0c},
++ {0x3090, 0x67},
++
++ {0x30f9, 0x11},
++ {0x3311, 0x80},
++ {0x3312, 0x1f},
++
++ {0x3103, 0x10},
++ {0x305c, 0x01},
++ {0x305d, 0x29},
++ {0x305e, 0x00},
++ {0x305f, 0xf7},
++ {0x308d, 0x0b},
++ {0x30ad, 0x20},
++ {0x3072, 0x0d},
++ {0x308b, 0x82},
++ {0x3317, 0x9c},
++ {0x3318, 0x22},
++ {0x3025, 0x20},
++ {0x3027, 0x08},
++ {0x3029, 0x3f},
++ {0x302b, 0xa3},
++ {0x3319, 0x22},
++ {0x30a1, 0xc4},
++ {0x306a, 0x05},
++ {0x3315, 0x22},
++ {0x30ae, 0x25},
++ {0x3304, 0x40},
++ {0x3099, 0x49},
++
++ {0x300e, 0xb1/*b0*/}, /* Note this PLL setting*/
++ {0x300f, 0x10}, /*00*/
++ {0x3010, 0x07}, /*change from 0f according to SV */
++ {0x3011, 0x40},
++ {0x30af, 0x10},
++ {0x304a, 0x00},
++ {0x304d, 0x00},
++
++ {0x304e, 0x22},
++ {0x304d, 0xa0},
++ {0x3058, 0x00},
++ {0x3059, 0xff},
++ {0x305a, 0x00},
++
++ {0x30e9, 0x04},
++ {0x3084, 0x44},
++ {0x3090, 0x67},
++ {0x30e9, 0x04},
++
++ {0x30b5, 0x1c},
++ {0x331f, 0x22},
++ {0x30ae, 0x15},
++ {0x3304, 0x4c},
++
++ {0x3300, 0xfb},
++ {0x3071, 0x34},
++ {0x30e7, 0x01},
++ {0x3302, 0x60},
++ {0x331e, 0x05},
++ {0x3321, 0x04},
++
++ /* Mark end */
++ {0xffff, 0xff},
++
++};
++
++/* MIPI register are removed by Wen */
++
++/* 2592x1944 */
++static struct regval_list ov5630_res_qsxga_plus4[] = {
++ {0x3020, 0x07},
++ {0x3021, 0xbc},
++ {0x3022, 0x0c/*0a*/},
++ {0x3023, 0xa0/*00*/},
++ {0x305c, 0x01},
++ {0x305d, 0x29},
++ {0x305e, 0x00},
++ {0x305f, 0xf7},
++
++ /* 30fps , 96 MHZ*/
++ /* {0x300f, 0x10}, */
++ {0x300f, 0x10},
++ {0x300e, 0xb1},
++ /* mipi */
++#ifdef MIPI
++ {0x30b0, 0x00},
++ {0x30b1, 0xfc},
++ {0x3603, 0x50},
++ {0x3601, 0x0F},
++ /* lan2 bit 10*/
++ {0x3010, 0x07},
++ {0x30fa, 0x01},
++ /* {0x 30f8 09 */
++ {0x3096, 0x50},
++ /* end mipi*/
++#else
++ /* parrral */
++ {0x30fa, 0x01},
++#endif
++ /* end post*/
++ {0xffff, 0xff},
++};
++
++/* 1920x1080 */
++static struct regval_list ov5630_res_1080p[] = {
++ /*res start*/
++ {0x3020, 0x04},
++ {0x3021, 0x5c},
++ {0x3022, 0x0b/*0a*/},
++ {0x3023, 0x32/*00*/},
++ {0x305c, 0x01},
++ {0x305d, 0x2c},
++ {0x3024, 0x01},
++ {0x3025, 0x6e/*70*/},
++ {0x3026, 0x01},
++ {0x3027, 0xb8},
++ {0x3028, 0x08},
++ {0x3029, 0xef},
++ {0x302a, 0x05},
++ {0x302b, 0xf3},
++ {0x302c, 0x07},
++ {0x302d, 0x80},
++ {0x302e, 0x04},
++ {0x302f, 0x38},
++ {0x3314, 0x07},
++ {0x3315, 0x82/*80*/},
++ {0x3316, 0x04},
++ {0x3317, 0x3c},
++
++ /* 30fps , 96 MHZ*/
++ {0x300f, 0x10}, /* 00 */
++ {0x300e, 0xb1},
++
++ /* mipi */
++#ifdef MIPI
++ {0x30b0, 0x00},
++ {0x30b1, 0xfc},
++ {0x3603, 0x50},
++ {0x3601, 0x0F},
++ /* lan2 bit 10*/
++ {0x3010, 0x07},
++ {0x30fa, 0x01},
++ /* {0x 30f8 09 */
++ {0x3096, 0x50},
++ /* end mipi*/
++#else
++ /* parrral */
++ {0x30fa, 0x01},
++#endif
++ /* end post*/
++ {0xffff, 0xff},
++};
++
++/* 1280x960 V1F2_H1F2 */
++static struct regval_list ov5630_res_xga_plus[] = {
++ {0x3020, 0x03},
++ {0x3021, 0xe4},
++ {0x3022, 0x0c/*07*/},
++ {0x3023, 0x8c/*76*/},
++ {0x305c, 0x00},
++ {0x305d, 0xb1},
++ {0x3024, 0x00},
++ {0x3025, 0x30},
++ {0x3026, 0x00},
++ {0x3027, 0x10/*14*/},
++ {0x3028, 0x0a},
++ {0x3029, 0x2f},
++ {0x302a, 0x07},
++ {0x302b, 0xa7/*a7*/},
++ {0x302c, 0x05},
++ {0x302d, 0x00},
++ {0x302e, 0x03},
++ {0x302f, 0xc0},
++
++ {0x30f8, 0x05},
++ {0x30f9, 0x13},
++ {0x3314, 0x05},
++ {0x3315, 0x02/*00*/},
++ {0x3316, 0x03},
++ {0x3317, 0xc4},
++
++ {0x300f, 0x10}, /* 00 */
++ {0x300e, 0xb1},
++
++#ifdef MIPI
++ {0x30b0, 0x00},
++ {0x30b1, 0xfc},
++ {0x3603, 0x50},
++ {0x3601, 0x0F},
++ /* lan2 bit 10*/
++ {0x3010, 0x07},
++ {0x30fa, 0x01},
++ /* {0x 30f8 09 */
++ {0x3096, 0x50},
++ /* end mipi*/
++#else
++ /* parrral */
++ {0x30fa, 0x01},
++#endif
++
++ {0xffff, 0xff},
++};
++
++/* 1280x720, V1F2 & H1F2 */
++static struct regval_list ov5630_res_720p[] = {
++ {0x3020, 0x02},
++ {0x3021, 0xf4},
++ {0x3022, 0x07},
++ {0x3023, 0x80},
++ {0x305c, 0x00},
++ {0x305d, 0xff},
++ {0x305e, 0x00},
++ {0x305f, 0xd4},
++
++ /* Crop then downscale */
++ {0x3024, 0x00},
++ {0x3025, 0x2c},
++ {0x3026, 0x00},
++ {0x3027, 0xf0},
++ {0x3028, 0x0a},
++ {0x3029, 0x2f},
++ {0x302a, 0x08},
++ {0x302b, 0x97},
++
++ {0x30f8, 0x05},
++
++ {0x302c, 0x05},
++ {0x302d, 0x00},
++ {0x302e, 0x02},
++ {0x302f, 0xd0},
++
++ {0x30f9, 0x13},
++ {0x3314, 0x05},
++ {0x3315, 0x04},
++ {0x3316, 0x02},
++ {0x3317, 0xd4},
++
++ /* Add this to test setting from OVT */
++ {0x300f, 0x10}, /*00*/
++ {0x300e, 0xb0},
++
++#ifdef MIPI
++ {0x30b0, 0x00},
++ {0x30b1, 0xfc},
++ {0x3603, 0x50},
++ {0x3601, 0x0F},
++ /* lan2 bit 10*/
++ {0x3010, 0x07},
++ {0x30fa, 0x01},
++ /* {0x 30f8 09 */
++ {0x3096, 0x50},
++ /* end mipi*/
++#else
++ /* parrral */
++ {0x30fa, 0x01},
++#endif
++
++ {0xffff, 0xff},
++};
++
++/*VGA 40fps now*/
++static struct regval_list ov5630_res_vga_ac04_bill[] = {
++ /* res setting*/
++ {0x3020, 0x02},
++ {0x3021, 0x04},
++ {0x3022, 0x08},
++ {0x3023, 0x48},
++ {0x305c, 0x00},
++ {0x305d, 0x5e},
++ {0x3024, 0x00},
++ {0x3025, 0x2c},/*2c*/
++ {0x3026, 0x00},
++ {0x3027, 0x14},
++ {0x3028, 0x0a},
++ {0x3029, 0x2f},
++ {0x302a, 0x07},
++ {0x302b, 0xa3},
++ {0x302c, 0x02},
++ {0x302d, 0x80},
++ {0x302e, 0x01},
++ {0x302f, 0xe0},
++
++ {0x30b3, 0x09},
++ {0x3301, 0xc1},
++ {0x3313, 0xf1},
++ {0x3314, 0x05},
++ {0x3315, 0x04},/*04*/
++ {0x3316, 0x01},
++ {0x3317, 0xe4},
++ {0x3318, 0x20},
++
++ {0x300f, 0x10/*00*/},
++ {0x30f8, 0x09},
++
++ {0x300f, 0x11},
++ {0x300e, 0xb2},
++
++ {0x3015, 0x02},
++ /* mipi */
++#ifdef MIPI
++ {0x30b0, 0x00},
++ {0x30b1, 0xfc},
++ {0x3603, 0x50},
++ {0x3601, 0x0F},
++ /* lan2 bit 10*/
++ {0x3010, 0x07},
++ {0x30fa, 0x01},
++ /* {0x 30f8 09 */
++ {0x3096, 0x50},
++ /* end mipi*/
++#else
++
++ /* parrral */
++ {0x30fa, 0x01},
++ {0x30f8, 0x09},
++ {0x3096, 0x50},
++#endif
++
++ {0xffff, 0xff},
++};
+diff --git a/drivers/media/video/mrstci/mrstov5630_motor/Kconfig b/drivers/media/video/mrstci/mrstov5630_motor/Kconfig
+new file mode 100644
+index 0000000..b6dcf62
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstov5630_motor/Kconfig
+@@ -0,0 +1,9 @@
++config VIDEO_MRST_OV5630_MOTOR
++ tristate "Moorestown OV5630 motor"
++ depends on I2C && VIDEO_MRST_ISP && VIDEO_MRST_OV5630
++
++ ---help---
++ Say Y here if your platform support OV5630 motor
++
++ To compile this driver as a module, choose M here: the
++ module will be called mrstov2650.ko.
+diff --git a/drivers/media/video/mrstci/mrstov5630_motor/Makefile b/drivers/media/video/mrstci/mrstov5630_motor/Makefile
+new file mode 100644
+index 0000000..056b2a6
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstov5630_motor/Makefile
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_VIDEO_MRST_OV2650) += mrstov5630_motor.o
++
++EXTRA_CFLAGS += -I$(src)/../include
+diff --git a/drivers/media/video/mrstci/mrstov5630_motor/mrstov5630_motor.c b/drivers/media/video/mrstci/mrstov5630_motor/mrstov5630_motor.c
+new file mode 100644
+index 0000000..1bb7274
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstov5630_motor/mrstov5630_motor.c
+@@ -0,0 +1,428 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/kmod.h>
++#include <linux/device.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/gpio.h>
++
++#include <media/v4l2-device.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-i2c-drv.h>
++
++#include "ov5630_motor.h"
++
++/* #define OSPM */
++#include <asm/ipc_defs.h>
++#define PMIC_WRITE1(ipcbuf, reg1, val1) \
++ do { \
++ memset(&ipcbuf, 0, sizeof(struct ipc_pmic_reg_data)); \
++ ipcbuf.ioc = 0; \
++ ipcbuf.num_entries = 1; \
++ ipcbuf.pmic_reg_data[0].register_address = reg1; \
++ ipcbuf.pmic_reg_data[0].value = val1; \
++ if (ipc_pmic_register_write(&ipcbuf, 1) != 0) { \
++ return -1; \
++ } \
++ } while (0);
++
++static int mrstov5630_motor_debug;
++module_param(mrstov5630_motor_debug, int, 0644);
++MODULE_PARM_DESC(mrstov5630_motor_debug, "Debug level (0-1)");
++
++#define dprintk(level, fmt, arg...) do { \
++ if (mrstov5630_motor_debug >= level) \
++ printk(KERN_DEBUG "mrstisp@%s: " fmt "\n", \
++ __func__, ## arg); } \
++ while (0)
++
++#define eprintk(fmt, arg...) \
++ printk(KERN_ERR "mrstisp@%s: line %d: " fmt "\n", \
++ __func__, __LINE__, ## arg);
++
++#define DBG_entering dprintk(2, "entering");
++#define DBG_leaving dprintk(2, "leaving");
++#define DBG_line dprintk(2, " line: %d", __LINE__);
++
++static inline struct ov5630_motor *to_motor_config(struct v4l2_subdev *sd)
++{
++ return container_of(sd, struct ov5630_motor, sd);
++}
++
++static int motor_read(struct i2c_client *c, u16 *reg)
++{
++ int ret;
++ struct i2c_msg msg;
++ u8 msgbuf[2];
++
++ /* Read needs two message to go */
++ msgbuf[0] = 0;
++ msgbuf[1] = 0;
++
++ memset(&msg, 0, sizeof(msg));
++ msg.addr = c->addr;
++ msg.buf = msgbuf;
++ msg.len = 2;
++ msg.flags = I2C_M_RD;
++
++ ret = i2c_transfer(c->adapter, &msg, 1);
++
++ *reg = (msgbuf[0] << 8 | msgbuf[1]);
++
++ ret = (ret == 1) ? 0 : -1;
++ return ret;
++}
++
++static int motor_write(struct i2c_client *c, u16 reg)
++{
++ int ret;
++ struct i2c_msg msg;
++ u8 msgbuf[2];
++
++ /* Writing only needs one message */
++ memset(&msg, 0, sizeof(msg));
++ msgbuf[0] = reg >> 8;
++ msgbuf[1] = reg;
++
++ msg.addr = c->addr;
++ msg.flags = 0;
++ msg.buf = msgbuf;
++ msg.len = 2;
++
++ ret = i2c_transfer(c->adapter, &msg, 1);
++
++ ret = (ret == 1) ? 0 : -1;
++ return ret;
++}
++
++static int ov5630_motor_goto_position(struct i2c_client *c,
++ unsigned short code,
++ struct ov5630_motor *config)
++{
++ int max_code, min_code;
++ u8 cmdh, cmdl;
++ u16 cmd, val = 0;
++
++ max_code = config->macro_code;
++ min_code = config->infin_code;
++
++ if (code > max_code)
++ code = max_code;
++ if (code < min_code)
++ code = min_code;
++
++ cmdh = (MOTOR_DAC_CODE_H(code));
++ cmdl = (MOTOR_DAC_CODE_L(code) | MOTOR_DAC_CTRL_MODE_2(SUB_MODE_4));
++ cmd = cmdh << 8 | cmdl;
++
++ motor_write(c, cmd);
++ /*Delay more than full-scale transition time 8.8ms*/
++ msleep(8);
++ motor_read(c, &val);
++
++ return (cmd == val ? 0 : -1);
++}
++
++int ov5630_motor_wakeup(void)
++{
++ return gpio_direction_output(GPIO_AF_PD, 1);
++}
++
++int ov5630_motor_standby(void)
++{
++ return gpio_direction_output(GPIO_AF_PD, 0);
++}
++
++int ov5630_motor_init(struct i2c_client *client, struct ov5630_motor *config)
++{
++ int ret;
++ int infin_cur, macro_cur;
++#ifdef OSPM
++ /* Power on motor */
++ struct ipc_pmic_reg_data ipcbuf;
++
++ PMIC_WRITE1(ipcbuf, 0x50, 0x27);
++ printk(KERN_WARNING "Power on Vcc33 for motor\n");
++#endif
++
++ infin_cur = MAX(MOTOR_INFIN_CUR, MOTOR_DAC_MIN_CUR);
++ macro_cur = MIN(MOTOR_MACRO_CUR, MOTOR_DAC_MAX_CUR);
++
++ config->infin_cur = infin_cur;
++ config->macro_cur = macro_cur;
++
++ config->infin_code = (int)((infin_cur * MOTOR_DAC_MAX_CODE)
++ / MOTOR_DAC_MAX_CUR);
++ config->macro_code = (int)((macro_cur * MOTOR_DAC_MAX_CODE)
++ / MOTOR_DAC_MAX_CUR);
++
++ config->max_step = ((config->macro_code - config->infin_code)
++ >> MOTOR_STEP_SHIFT) + 1;
++ /* Note here, maybe macro_code */
++ ret = ov5630_motor_goto_position(client, config->infin_code, config);
++ if (!ret)
++ config->cur_code = config->infin_code;
++ else
++ printk(KERN_ERR "Error while initializing motor\n");
++
++ return ret;
++}
++
++int ov5630_motor_set_focus(struct i2c_client *c, int step,
++ struct ov5630_motor *config)
++{
++ int s_code, ret;
++ int max_step = config->max_step;
++ unsigned int val = step;
++
++ DBG_entering;
++ dprintk(1, "setting setp %d", step);
++ if (val > max_step)
++ val = max_step;
++
++ s_code = (val << MOTOR_STEP_SHIFT);
++ s_code += config->infin_code;
++
++ ret = ov5630_motor_goto_position(c, s_code, config);
++ if (!ret)
++ config->cur_code = s_code;
++
++ DBG_leaving;
++ return ret;
++}
++
++static int ov5630_motor_s_ctrl(struct v4l2_subdev *sd,
++ struct v4l2_control *ctrl)
++{
++ struct i2c_client *c = v4l2_get_subdevdata(sd);
++ struct ov5630_motor *config = to_motor_config(sd);
++ int ret;
++
++ DBG_entering;
++ ret = ov5630_motor_set_focus(c, ctrl->value, config);
++ if (ret) {
++ eprintk("error call ov5630_motor_set_focue");
++ return ret;
++ }
++ DBG_leaving;
++ return 0;
++}
++int ov5630_motor_get_focus(struct i2c_client *c, unsigned int *step,
++ struct ov5630_motor *config)
++{
++ int ret_step;
++
++ ret_step = ((config->cur_code - config->infin_code)
++ >> MOTOR_STEP_SHIFT);
++
++ if (ret_step <= config->max_step)
++ *step = ret_step;
++ else
++ *step = config->max_step;
++
++ return 0;
++}
++
++static int ov5630_motor_g_ctrl(struct v4l2_subdev *sd,
++ struct v4l2_control *ctrl)
++{
++ struct i2c_client *c = v4l2_get_subdevdata(sd);
++ struct ov5630_motor *config = to_motor_config(sd);
++ int ret;
++
++ DBG_entering;
++ dprintk(2, "c = %p, config = %p, ctrl = %p", c, config, ctrl);
++ ret = ov5630_motor_get_focus(c, &ctrl->value, config);
++ if (ret) {
++ eprintk("error call ov5630_motor_get_focue");
++ return ret;
++ }
++ DBG_leaving;
++ return 0;
++}
++int ov5630_motor_max_step(struct i2c_client *c, unsigned int *max_code,
++ struct ov5630_motor *config)
++{
++ if (config->max_step != 0)
++ *max_code = config->max_step;
++ return 0;
++}
++
++static int ov5630_motor_queryctrl(struct v4l2_subdev *sd,
++ struct v4l2_queryctrl *qc)
++{
++ struct ov5630_motor *config = to_motor_config(sd);
++
++ DBG_entering;
++
++ if (qc->id != V4L2_CID_FOCUS_ABSOLUTE)
++ return -EINVAL;
++
++ dprintk(1, "got focus range of %d", config->max_step);
++ if (config->max_step != 0)
++ qc->maximum = config->max_step;
++ DBG_leaving;
++ return 0;
++}
++static const struct v4l2_subdev_core_ops ov5630_motor_core_ops = {
++ /*
++ .queryctrl = ov5630_queryctrl,
++ .g_ctrl = ov5630_g_ctrl,
++ */
++ .g_ctrl = ov5630_motor_g_ctrl,
++ .s_ctrl = ov5630_motor_s_ctrl,
++ .queryctrl = ov5630_motor_queryctrl,
++};
++
++static const struct v4l2_subdev_ops ov5630_motor_ops = {
++ .core = &ov5630_motor_core_ops,
++};
++
++static int ov5630_motor_detect(struct i2c_client *client)
++{
++ struct i2c_adapter *adapter = client->adapter;
++ int adap_id = i2c_adapter_id(adapter);
++
++ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
++ eprintk("error i2c check func");
++ return -ENODEV;
++ }
++
++ if (adap_id != 1) {
++ eprintk("adap_id != 1");
++ return -ENODEV;
++ }
++
++ /* if (ov5630_motor_wakeup()) */
++ /* return -ENODEV; */
++ ov5630_motor_wakeup();
++ ssleep(1);
++
++ /*
++ ov5630_motor_read(client, (u32)OV5630_PID_H, &value);
++ if ((u8)value != 0x56) {
++ eprintk("PID != 0x56, but %x", value);
++ dprintk(2, "client->addr = %x", client->addr);
++ return -ENODEV;
++ }
++ */
++
++ return 0;
++}
++
++static int ov5630_motor_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct ov5630_motor *info;
++ struct v4l2_subdev *sd;
++ int ret = -1;
++/* struct i2c_client *motor; */
++
++ DBG_entering;
++ v4l_info(client, "chip found @ 0x%x (%s)\n",
++ client->addr << 1, client->adapter->name);
++ /*
++ * Setup sensor configuration structure
++ */
++ info = kzalloc(sizeof(struct ov5630_motor), GFP_KERNEL);
++ if (!info) {
++ eprintk("fail to malloc for ci_motor");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ ret = ov5630_motor_detect(client);
++ if (ret) {
++ eprintk("error ov5630_motor_detect");
++ goto out_free;
++ }
++
++ sd = &info->sd;
++ v4l2_i2c_subdev_init(sd, client, &ov5630_motor_ops);
++
++ /*
++ * Initialization OV5630
++ * then turn into standby mode
++ */
++ /* ret = ov5630_motor_standby(); */
++ ret = ov5630_motor_init(client, info);
++ if (ret) {
++ eprintk("error calling ov5630_motor_init");
++ goto out_free;
++ }
++
++ ret = 0;
++ goto out;
++
++out_free:
++ kfree(info);
++ DBG_leaving;
++out:
++ return ret;
++}
++
++/*
++ * XXX: Need to be checked
++ */
++static int ov5630_motor_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *sd = i2c_get_clientdata(client);
++
++ DBG_entering;
++
++ v4l2_device_unregister_subdev(sd);
++ kfree(to_motor_config(sd));
++
++ DBG_leaving;
++ return 0;
++}
++
++static const struct i2c_device_id ov5630_motor_id[] = {
++ {"ov5630_motor", 0},
++ {}
++};
++MODULE_DEVICE_TABLE(i2c, ov5630_motor_id);
++
++static struct v4l2_i2c_driver_data v4l2_i2c_data = {
++ .name = "ov5630_motor",
++ .probe = ov5630_motor_probe,
++ .remove = ov5630_motor_remove,
++ /* .suspend = ov5630_suspend,
++ * .resume = ov5630_resume, */
++ .id_table = ov5630_motor_id,
++};
++MODULE_AUTHOR("Xiaolin Zhang <xiaolin.zhang@intel.com>");
++MODULE_DESCRIPTION("A low-level driver for OmniVision 5630 sensors");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/media/video/mrstci/mrstov5630_motor/ov5630_motor.h b/drivers/media/video/mrstci/mrstov5630_motor/ov5630_motor.h
+new file mode 100644
+index 0000000..302c218
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstov5630_motor/ov5630_motor.h
+@@ -0,0 +1,86 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include <media/v4l2-subdev.h>
++
++/* VCM start current (mA) */
++#define MOTOR_INFIN_CUR 15
++/* VCM max current for Macro (mA) */
++#define MOTOR_MACRO_CUR 90
++/* DAC output max current (mA) */
++#define MOTOR_DAC_MAX_CUR 100
++/* DAC output min current (mA) */
++#define MOTOR_DAC_MIN_CUR 3
++
++#define MOTOR_DAC_BIT_RES 10
++#define MOTOR_DAC_MAX_CODE ((1 << MOTOR_DAC_BIT_RES) - 1)
++
++#define MOTOR_STEP_SHIFT 4
++
++#define MAX(x, y) ((x) > (y) ? (x) : (y))
++#define MIN(x, y) ((x) < (y) ? (x) : (y))
++
++/* DAC register related define */
++#define MOTOR_POWER_DOWN (1 << 7)
++#define PD_ENABLE (1 << 7)
++#define PD_DISABLE (0)
++
++#define MOTOR_DAC_CODE_H(x) ((x >> 4) & 0x3f)
++#define MOTOR_DAC_CODE_L(x) ((x << 4) & 0xf0)
++
++#define MOTOR_DAC_CTRL_MODE_0 0x00
++#define MOTOR_DAC_CTRL_MODE_1(x) (x & 0x07)
++#define MOTOR_DAC_CTRL_MODE_2(x) ((x & 0x07) | 0x08)
++
++#define SUB_MODE_1 0x01
++#define SUB_MODE_2 0x02
++#define SUB_MODE_3 0x03
++#define SUB_MODE_4 0x04
++#define SUB_MODE_5 0x05
++#define SUB_MODE_6 0x06
++#define SUB_MODE_7 0x07
++
++#define OV5630_MOTOR_ADDR (0x18 >> 1)
++#define POWER_EN_PIN 7
++#define GPIO_AF_PD 95
++
++struct ov5630_motor{
++ unsigned int infin_cur;
++ unsigned int infin_code;
++ unsigned int macro_cur;
++ unsigned int macro_code;
++ unsigned int max_step;
++ unsigned int cur_code;
++ struct v4l2_subdev sd;
++};
++
++extern int ov5630_motor_init(struct i2c_client *client, struct ov5630_motor
++ *config);
++extern int ov5630_motor_standby(void);
++extern int ov5630_motor_wakeup(void);
++extern int ov5630_motor_set_focus(struct i2c_client *c, int step,
++ struct ov5630_motor *config);
++extern int ov5630_motor_get_focus(struct i2c_client *c, unsigned int *step,
++ struct ov5630_motor *config);
++extern int ov5630_motor_max_step(struct i2c_client *c, unsigned int *max_code,
++ struct ov5630_motor *config);
+diff --git a/drivers/media/video/mrstci/mrstov9665/Kconfig b/drivers/media/video/mrstci/mrstov9665/Kconfig
+new file mode 100644
+index 0000000..ba9b692
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstov9665/Kconfig
+@@ -0,0 +1,9 @@
++config VIDEO_MRST_OV9665
++ tristate "Moorestown OV9665 SoC Sensor"
++ depends on I2C && VIDEO_MRST_ISP
++
++ ---help---
++ Say Y here if your platform support OV9665 SoC Sensor.
++
++ To compile this driver as a module, choose M here: the
++ module will be called mrstov9665.ko.
+diff --git a/drivers/media/video/mrstci/mrstov9665/Makefile b/drivers/media/video/mrstci/mrstov9665/Makefile
+new file mode 100644
+index 0000000..871b6bf
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstov9665/Makefile
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_VIDEO_MRST_OV9665) += mrstov9665.o
++
++EXTRA_CFLAGS += -I$(src)/../include
+diff --git a/drivers/media/video/mrstci/mrstov9665/mrstov9665.c b/drivers/media/video/mrstci/mrstov9665/mrstov9665.c
+new file mode 100644
+index 0000000..04e553a
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstov9665/mrstov9665.c
+@@ -0,0 +1,972 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/kmod.h>
++#include <linux/device.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/gpio.h>
++#include <linux/videodev2.h>
++
++#include <media/v4l2-device.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-i2c-drv.h>
++
++#include "ci_sensor_common.h"
++#include "ov9665.h"
++
++static int mrstov9665_debug;
++module_param(mrstov9665_debug, int, 0644);
++MODULE_PARM_DESC(mrstov9665_debug, "Debug level (0-1)");
++
++#define dprintk(level, fmt, arg...) do { \
++ if (mrstov9665_debug >= level) \
++ printk(KERN_DEBUG "mrstisp@%s: " fmt "\n", \
++ __func__, ## arg); } \
++ while (0)
++
++#define eprintk(fmt, arg...) \
++ printk(KERN_ERR "mrstisp@%s: line %d: " fmt "\n", \
++ __func__, __LINE__, ## arg);
++
++#define DBG_entering dprintk(2, "entering");
++#define DBG_leaving dprintk(2, "leaving");
++#define DBG_line dprintk(2, " line: %d", __LINE__);
++
++static inline struct ci_sensor_config *to_sensor_config(struct v4l2_subdev *sd)
++{
++ return container_of(sd, struct ci_sensor_config, sd);
++}
++
++static struct ov9665_format_struct {
++ __u8 *desc;
++ __u32 pixelformat;
++ struct regval_list *regs;
++} ov9665_formats[] = {
++ {
++ .desc = "YUYV 4:2:2",
++ .pixelformat = SENSOR_MODE_BT601,
++ .regs = NULL,
++ },
++};
++#define N_OV9665_FMTS ARRAY_SIZE(ov9665_formats)
++
++static struct ov9665_res_struct {
++ __u8 *desc;
++ int res;
++ int width;
++ int height;
++ /* FIXME: correct the fps values.. */
++ int fps;
++ bool used;
++ struct regval_list *regs;
++} ov9665_res[] = {
++ {
++ .desc = "SXGA",
++ .res = SENSOR_RES_SXGA,
++ .width = 1280,
++ .height = 1024,
++ .fps = 15,
++ .used = 0,
++ .regs = ov9665_res_sxga,
++ },
++ {
++ .desc = "VGA",
++ .res = SENSOR_RES_VGA,
++ .width = 640,
++ .height = 480,
++ .fps = 15,
++ .used = 0,
++ .regs = ov9665_res_vga,
++ },
++};
++#define N_RES (ARRAY_SIZE(ov9665_res))
++
++/*
++ * I2C Read & Write stuff
++ */
++static int ov9665_read(struct i2c_client *c, unsigned char reg,
++ unsigned char *value)
++{
++ int ret;
++
++ ret = i2c_smbus_read_byte_data(c, reg);
++ if (ret >= 0) {
++ *value = (unsigned char) ret;
++ ret = 0;
++ }
++ return ret;
++}
++
++static int ov9665_write(struct i2c_client *c, unsigned char reg,
++ unsigned char value)
++{
++ int ret = i2c_smbus_write_byte_data(c, reg, value);
++ if (reg == 0x12 && (value & 0x80))
++ msleep(2); /* Wait for reset to run */
++ return ret;
++}
++
++/*
++ * Write a list of register settings; ff/ff stops the process.
++ */
++static int ov9665_write_array(struct i2c_client *c, struct regval_list *vals)
++{
++ struct regval_list *p;
++ u8 read_val = 0;
++ int err_num = 0;
++ int i = 0;
++ p = vals;
++ while (p->reg_num != 0xff) {
++ ov9665_write(c, p->reg_num, p->value);
++ ov9665_read(c, p->reg_num, &read_val);
++ if (read_val != p->value)
++ err_num++;
++ p++;
++ i++;
++ }
++
++ return 0;
++}
++
++static int ov9665_set_data_pin_in(struct i2c_client *client)
++{
++ int ret = 0;
++
++ ret += ov9665_write(client, 0xd5, 0x00);
++ ret += ov9665_write(client, 0xd6, 0x00);
++
++ return ret;
++}
++
++static int ov9665_set_data_pin_out(struct i2c_client *client)
++{
++ int ret = 0;
++
++ ret += ov9665_write(client, 0xd5, 0xff);
++ ret += ov9665_write(client, 0xd6, 0xff);
++
++ return ret;
++}
++/*
++ * Sensor specific helper function
++ */
++static int ov9665_standby(void)
++{
++ /* Pull the pin to high to hardware standby */
++ gpio_set_value(GPIO_STDBY_PIN, 1);
++ dprintk(1, "PM: standby called\n");
++ return 0;
++}
++
++static int ov9665_wakeup(void)
++{
++ /* Pull the pin to low*/
++ gpio_set_value(GPIO_STDBY_PIN, 0);
++ dprintk(1, "PM: wakeup called\n");
++ msleep(10);
++ return 0;
++}
++
++static int ov9665_s_power(struct v4l2_subdev *sd, u32 val)
++{
++ if (val == 1)
++ ov9665_standby();
++ if (val == 0)
++ ov9665_wakeup();
++ return 0;
++}
++
++static int ov9665_init(struct i2c_client *c)
++{
++ int ret;
++ struct v4l2_subdev *sd = i2c_get_clientdata(c);
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ u8 reg = 0;
++
++ /* Fill the configuration structure */
++ /* Note this default configuration value */
++ info->mode = ov9665_formats[0].pixelformat;
++ info->res = ov9665_res[0].res;
++ info->type = SENSOR_TYPE_SOC;
++ info->bls = SENSOR_BLS_OFF;
++ info->gamma = SENSOR_GAMMA_ON;
++ info->cconv = SENSOR_CCONV_ON;
++ info->blc = SENSOR_BLC_AUTO;
++ info->agc = SENSOR_AGC_AUTO;
++ info->awb = SENSOR_AWB_AUTO;
++ info->aec = SENSOR_AEC_AUTO;
++ info->bus_width = SENSOR_BUSWIDTH_8BIT_ZZ;
++ info->ycseq = SENSOR_YCSEQ_YCBYCR;
++ info->conv422 = SENSOR_CONV422_COSITED;
++ info->bpat = SENSOR_BPAT_GRGRBGBG;
++ info->field_inv = SENSOR_FIELDINV_NOSWAP;
++ info->field_sel = SENSOR_FIELDSEL_BOTH;
++ info->hpol = SENSOR_HPOL_REFPOS;
++ info->vpol = SENSOR_VPOL_POS;
++ info->edge = SENSOR_EDGE_FALLING;
++ info->flicker_freq = SENSOR_FLICKER_100;
++ info->cie_profile = 0;
++ memcpy(info->name, "ov9665", 7);
++
++ ret = ov9665_write(c, 0x12, 0x80);
++ /* Set registers into default config value */
++ ret += ov9665_write_array(c, ov9665_def_reg);
++
++ ov9665_read(c, 0x09, &reg);
++ reg = reg | 0x10;
++ ov9665_write(c, 0x09, reg);
++ ov9665_set_data_pin_in(c);
++ ssleep(1);
++
++ return ret;
++}
++
++static int distance(struct ov9665_res_struct *res, u32 w, u32 h)
++{
++ int ret;
++ if (res->width < w || res->height < h)
++ return -1;
++
++ ret = ((res->width - w) + (res->height - h));
++ return ret;
++}
++static int ov9665_try_res(u32 *w, u32 *h)
++{
++ struct ov9665_res_struct *res_index, *p = NULL;
++ int dis, last_dis = ov9665_res->width + ov9665_res->height;
++
++ dprintk(1, "&&&&& before %dx%d", *w, *h);
++ for (res_index = ov9665_res;
++ res_index < ov9665_res + N_RES;
++ res_index++) {
++ if ((res_index->width <= *w) && (res_index->height <= *h))
++ break;
++ dis = distance(res_index, *w, *h);
++ if (dis < last_dis) {
++ last_dis = dis;
++ p = res_index;
++ }
++ }
++ if ((res_index->width < *w) || (res_index->height < *h)) {
++ if (res_index != ov9665_res)
++ res_index--;
++ }
++
++ /*
++ if (p == NULL) {
++ p = ov2650_res;
++ }
++
++ if ((w != NULL) && (h != NULL)) {
++ *w = p->width;
++ *h = p->height;
++ }
++ */
++ if (res_index == ov9665_res + N_RES)
++ res_index = ov9665_res + N_RES - 1;
++
++ *w = res_index->width;
++ *h = res_index->height;
++
++ dprintk(1, "&&&&& after %dx%d", *w, *h);
++ return 0;
++}
++
++static struct ov9665_res_struct *ov9665_to_res(u32 w, u32 h)
++{
++ struct ov9665_res_struct *res_index;
++
++ for (res_index = ov9665_res;
++ res_index < ov9665_res + N_RES;
++ res_index++)
++ if ((res_index->width == w) && (res_index->height == h))
++ break;
++
++ if (res_index >= ov9665_res + N_RES)
++ res_index--; /* Take the bigger one */
++
++ return res_index;
++}
++
++static int ov9665_try_fmt(struct v4l2_subdev *sd,
++ struct v4l2_format *fmt)
++{
++ DBG_entering;
++ return ov9665_try_res(&fmt->fmt.pix.width, &fmt->fmt.pix.height);
++ DBG_leaving;
++}
++
++static int ov9665_get_fmt(struct v4l2_subdev *sd,
++ struct v4l2_format *fmt)
++{
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ unsigned short width, height;
++ int index;
++
++ ci_sensor_res2size(info->res, &width, &height);
++
++ /* Marked the current sensor res as being "used" */
++ for (index = 0; index < N_RES; index++) {
++ if ((width == ov9665_res[index].width) &&
++ (height == ov9665_res[index].height)) {
++ ov9665_res[index].used = 1;
++ continue;
++ }
++ ov9665_res[index].used = 0;
++ }
++
++ fmt->fmt.pix.width = width;
++ fmt->fmt.pix.height = height;
++ return 0;
++}
++
++static int ov9665_set_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
++{
++ struct i2c_client *c = v4l2_get_subdevdata(sd);
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ int ret = 0;
++ struct ov9665_res_struct *res_index;
++ u32 width, height;
++ int index;
++
++ DBG_entering;
++
++ width = fmt->fmt.pix.width;
++ height = fmt->fmt.pix.height;
++
++ ret = ov9665_try_res(&width, &height);
++ res_index = ov9665_to_res(width, height);
++
++ ov9665_wakeup();
++ /* if ((info->res != res_index->res) && (res_index->regs)) { */
++ if ( res_index->regs) {
++ ret = ov9665_write(c, 0x12, 0x80);
++ ret += ov9665_write_array(c, ov9665_def_reg);
++ ret += ov9665_write_array(c, res_index->regs);
++ /* Add delay here to get better image */
++
++ for (index = 0; index < N_RES; index++) {
++ if ((width == ov9665_res[index].width) &&
++ (height == ov9665_res[index].height)) {
++ ov9665_res[index].used = 1;
++ continue;
++ }
++ ov9665_res[index].used = 0;
++ }
++
++ for (index = 0; index < N_RES; index++)
++ dprintk(2, "index = %d, used = %d\n", index,
++ ov9665_res[index].used);
++
++ }
++ info->res = res_index->res;
++
++ DBG_leaving;
++ return ret;
++}
++
++static int ov9665_q_hflip(struct v4l2_subdev *sd, __s32 *value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ int ret;
++ unsigned char v = 0;
++
++ ret = ov9665_read(client, 0x04, &v);
++ *value = ((v & 0x80) == 0x80);
++ return ret;
++}
++
++static int ov9665_t_hflip(struct v4l2_subdev *sd, int value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ unsigned char v = 0;
++ int ret;
++
++ value = value >= 1 ? 1 : 0;
++ ret = ov9665_read(client, 0x33, &v);
++ if (value)
++ v |= 0x08;
++ else
++ v &= ~0x08;
++ ret += ov9665_write(client, 0x33, v);
++
++ ret += ov9665_read(client, 0x04, &v);
++ if (value)
++ v |= 0x80;
++ else
++ v &= ~0x80;
++ ret += ov9665_write(client, 0x04, v);
++ msleep(10); /* FIXME */
++ return ret;
++}
++
++static int ov9665_q_vflip(struct v4l2_subdev *sd, __s32 *value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ int ret;
++ unsigned char v = 0;
++
++ ret = ov9665_read(client, 0x04, &v);
++ *value = ((v & 0x40) == 0x40);
++ return ret;
++}
++
++static int ov9665_t_vflip(struct v4l2_subdev *sd, int value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ unsigned char v = 0;
++ int ret;
++
++ value = value >= 1 ? 1 : 0;
++ ret = ov9665_read(client, 0x04, &v);
++ if (value)
++ v |= 0x40;
++ else
++ v &= ~0x40;
++ ret += ov9665_write(client, 0x04, v);
++ msleep(10); /* FIXME */
++ return ret;
++}
++
++static struct ov9665_control {
++ struct v4l2_queryctrl qc;
++ int (*query)(struct v4l2_subdev *sd, __s32 *value);
++ int (*tweak)(struct v4l2_subdev *sd, int value);
++} ov9665_controls[] = {
++ {
++ .qc = {
++ .id = V4L2_CID_VFLIP,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Vertical flip",
++ .minimum = 0,
++ .maximum = 1,
++ .step = 1,
++ .default_value = 0,
++ },
++ .tweak = ov9665_t_vflip,
++ .query = ov9665_q_vflip,
++ },
++ {
++ .qc = {
++ .id = V4L2_CID_HFLIP,
++ .type = V4L2_CTRL_TYPE_BOOLEAN,
++ .name = "Horizontal mirror",
++ .minimum = 0,
++ .maximum = 1,
++ .step = 1,
++ .default_value = 0,
++ },
++ .tweak = ov9665_t_hflip,
++ .query = ov9665_q_hflip,
++ },
++};
++#define N_CONTROLS (ARRAY_SIZE(ov9665_controls))
++
++static struct ov9665_control *ov9665_find_control(__u32 id)
++{
++ int i;
++
++ for (i = 0; i < N_CONTROLS; i++)
++ if (ov9665_controls[i].qc.id == id)
++ return ov9665_controls + i;
++ return NULL;
++}
++
++static int ov9665_queryctrl(struct v4l2_subdev *sd,
++ struct v4l2_queryctrl *qc)
++{
++ struct ov9665_control *ctrl = ov9665_find_control(qc->id);
++
++ if (ctrl == NULL)
++ return -EINVAL;
++ *qc = ctrl->qc;
++ return 0;
++}
++
++static int ov9665_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ struct ov9665_control *octrl = ov9665_find_control(ctrl->id);
++ int ret;
++
++ if (octrl == NULL)
++ return -EINVAL;
++ ret = octrl->query(sd, &ctrl->value);
++ if (ret >= 0)
++ return 0;
++ return ret;
++}
++
++static int ov9665_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ struct ov9665_control *octrl = ov9665_find_control(ctrl->id);
++ int ret;
++
++ if (octrl == NULL)
++ return -EINVAL;
++ ret = octrl->tweak(sd, ctrl->value);
++ if (ret >= 0)
++ return 0;
++ return ret;
++}
++
++#if 0
++static int ov9665_get_caps(struct i2c_client *c, struct ci_sensor_caps *caps)
++{
++ if (caps == NULL)
++ return -EIO;
++
++ caps->bus_width = SENSOR_BUSWIDTH_8BIT_ZZ;
++ caps->mode = SENSOR_MODE_BT601;
++ caps->field_inv = SENSOR_FIELDINV_NOSWAP;
++ caps->field_sel = SENSOR_FIELDSEL_BOTH;
++ caps->ycseq = SENSOR_YCSEQ_YCBYCR;
++ caps->conv422 = SENSOR_CONV422_COSITED;
++ caps->bpat = SENSOR_BPAT_GRGRBGBG;
++ caps->hpol = SENSOR_HPOL_REFPOS;
++ caps->vpol = SENSOR_VPOL_POS;
++ caps->edge = SENSOR_EDGE_FALLING;
++ caps->bls = SENSOR_BLS_OFF;
++ caps->gamma = SENSOR_GAMMA_ON;
++ caps->cconv = SENSOR_CCONV_ON;
++ caps->res = SENSOR_RES_SXGA | SENSOR_RES_VGA;
++ caps->blc = SENSOR_BLC_AUTO;
++ caps->agc = SENSOR_AGC_AUTO;
++ caps->awb = SENSOR_AWB_AUTO;
++ caps->aec = SENSOR_AEC_AUTO;
++ caps->cie_profile = 0;
++ caps->flicker_freq = SENSOR_FLICKER_100 | SENSOR_FLICKER_120;
++ caps->type = SENSOR_TYPE_SOC;
++ /* caps->name = "ov9665"; */
++ strcpy(caps->name, "ov9665");
++
++ return 0;
++}
++
++static int ov9665_get_config(struct i2c_client *c,
++ struct ci_sensor_config *config)
++{
++ struct ci_sensor_config *info = i2c_get_clientdata(c);
++
++ if (config == NULL) {
++ printk(KERN_WARNING "sensor_get_config: NULL pointer\n");
++ return -EIO;
++ }
++
++ memset(config, 0, sizeof(struct ci_sensor_config *));
++ memcpy(config, info, sizeof(struct ci_sensor_config));
++
++ return 0;
++}
++
++static int ov9665_setup(struct i2c_client *c,
++ const struct ci_sensor_config *config)
++{
++ int ret;
++ struct ov9665_res_struct *res_index;
++ struct ci_sensor_config *info = i2c_get_clientdata(c);
++ u16 width, high;
++
++ /* Soft reset camera first*/
++ ret = ov9665_write(c, 0x12, 0x80);
++
++ /* Set registers into default config value */
++ ret += ov9665_write_array(c, ov9665_def_reg);
++
++ /* set image resolution */
++ ci_sensor_res2size(config->res, &width, &high);
++ ret += ov9665_try_res(c, &width, &high);
++ res_index = ov9665_find_res(width, high);
++ if (res_index->regs)
++ ret += ov9665_write_array(c, res_index->regs);
++ if (!ret)
++ info->res = res_index->res;
++
++ /* Add some delay here to get a better image*/
++ ssleep(1);
++
++ return ret;
++}
++
++static int ov9665_set_data_pin_in(struct i2c_client *client)
++{
++ int ret = 0;
++
++ ret += ov9665_write(client, 0xd5, 0x00);
++ ret += ov9665_write(client, 0xd6, 0x00);
++
++ return ret;
++}
++
++static int ov9665_set_data_pin_out(struct i2c_client *client)
++{
++ int ret = 0;
++
++ ret += ov9665_write(client, 0xd5, 0xff);
++ ret += ov9665_write(client, 0xd6, 0xff);
++
++ return ret;
++}
++/*
++ * File operation functions
++ */
++static int ov9665_open(struct i2c_setting *c, void *priv)
++{
++ struct i2c_client *client = c->sensor_client;
++ int ret = 0;
++ u8 reg = 0;
++ /* Just wake up sensor */
++ if (ov9665_wakeup())
++ return -EIO;
++
++ ov9665_init(client);
++ ret = ov9665_read(client, 0x09, &reg);
++ reg = reg | 0x10;
++ ret += ov9665_write(client, 0x09, reg);
++
++ if (ov9665_set_data_pin_in(client))
++ return EIO;
++/*
++ if (ov9665_standby())
++ return EIO;
++*/
++ return ret;
++}
++
++static int ov9665_release(struct i2c_setting *c, void *priv)
++{
++ /* Just suspend the sensor */
++ if (ov9665_standby())
++ return EIO;
++ return 0;
++}
++
++static int ov9665_on(struct i2c_setting *c)
++{
++ struct i2c_client *client = c->sensor_client;
++ int ret = 0;
++ u8 reg = 0;
++
++ ret = ov9665_read(client, 0x09, &reg);
++ reg = reg & ~0x10;
++ ret = ov9665_write(client, 0x09, reg);
++
++ if (ov9665_set_data_pin_out(client))
++ return EIO;
++
++ return ret;
++}
++
++static int ov9665_off(struct i2c_setting *c)
++{
++ struct i2c_client *client = c->sensor_client;
++ int ret = 0;
++ u8 reg = 0;
++/*
++ ret = ov9665_read(client, 0x09, &reg);
++ reg = reg | 0x10;
++ ret += ov9665_write(client, 0x09, reg);
++*/
++ if (ov9665_set_data_pin_in(client))
++ return EIO;
++
++ return ret;
++}
++
++static struct sensor_device ov9665 = {
++ .name = "OV9665",
++ .type = SENSOR_TYPE_SOC,
++ .minor = -1,
++ .open = ov9665_open,
++ .release = ov9665_release,
++ .on = ov9665_on,
++ .off = ov9665_off,
++ .querycap = ov9665_get_caps,
++ .get_config = ov9665_get_config,
++ .set_config = ov9665_setup,
++ .enum_parm = ov9665_queryctrl,
++ .get_parm = ov9665_g_ctrl,
++ .set_parm = ov9665_s_ctrl,
++ .try_res = ov9665_try_res,
++ .set_res = ov9665_set_res,
++ .suspend = ov9665_standby,
++ .resume = ov9665_wakeup,
++ .get_ls_corr_config = NULL,
++ .set_awb = NULL,
++ .set_aec = NULL,
++ .set_blc = NULL,
++ /* TBC */
++};
++#endif
++
++static int ov9665_s_stream(struct v4l2_subdev *sd, int enable)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ u8 reg = 0;
++
++ DBG_entering;
++ if (enable) {
++ ov9665_read(client, 0x09, &reg);
++ reg = reg & ~0x10;
++ ov9665_write(client, 0x09, reg);
++ ov9665_set_data_pin_out(client);
++ ssleep(1);
++
++ } else {
++ ov9665_read(client, 0x09, &reg);
++ reg = reg | 0x10;
++ ov9665_write(client, 0x09, reg);
++ ov9665_set_data_pin_in(client);
++ }
++
++ DBG_leaving;
++ return 0;
++}
++
++static int ov9665_enum_framesizes(struct v4l2_subdev *sd,
++ struct v4l2_frmsizeenum *fsize)
++{
++ unsigned int index = fsize->index;
++
++ DBG_entering;
++
++ if (index >= N_RES)
++ return -EINVAL;
++
++ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
++ fsize->discrete.width = ov9665_res[index].width;
++ fsize->discrete.height = ov9665_res[index].height;
++ fsize->reserved[0] = ov9665_res[index].used;
++
++ DBG_leaving;
++
++ return 0;
++}
++
++static int ov9665_enum_frameintervals(struct v4l2_subdev *sd,
++ struct v4l2_frmivalenum *fival)
++{
++ unsigned int index = fival->index;
++
++ DBG_entering;
++
++ if (index >= N_RES)
++ return -EINVAL;
++
++ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
++ fival->discrete.numerator = 1;
++ fival->discrete.denominator = ov9665_res[index].fps;
++
++ DBG_leaving;
++
++ return 0;
++}
++
++static int ov9665_g_chip_ident(struct v4l2_subdev *sd,
++ struct v4l2_dbg_chip_ident *chip)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++#define V4L2_IDENT_OV9665 8246
++ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_OV9665, 0);
++}
++
++#ifdef CONFIG_VIDEO_ADV_DEBUG
++static int ov9665_g_register(struct v4l2_subdev *sd,
++ struct v4l2_dbg_register *reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ unsigned char val = 0;
++ int ret;
++
++ if (!v4l2_chip_match_i2c_client(client, &reg->match))
++ return -EINVAL;
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++ ret = ov9665_read(client, reg->reg & 0xffff, &val);
++ reg->val = val;
++ reg->size = 1;
++ return ret;
++}
++
++static int ov9665_s_register(struct v4l2_subdev *sd,
++ struct v4l2_dbg_register *reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++ if (!v4l2_chip_match_i2c_client(client, &reg->match))
++ return -EINVAL;
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++ ov9665_write(client, reg->reg & 0xffff, reg->val & 0xff);
++ return 0;
++}
++#endif
++
++static const struct v4l2_subdev_video_ops ov9665_video_ops = {
++ .try_fmt = ov9665_try_fmt,
++ .s_fmt = ov9665_set_fmt,
++ .g_fmt = ov9665_get_fmt,
++ .s_stream = ov9665_s_stream,
++ .enum_framesizes = ov9665_enum_framesizes,
++ .enum_frameintervals = ov9665_enum_frameintervals,
++};
++
++static const struct v4l2_subdev_core_ops ov9665_core_ops = {
++ .g_chip_ident = ov9665_g_chip_ident,
++ .queryctrl = ov9665_queryctrl,
++ .g_ctrl = ov9665_g_ctrl,
++ .s_ctrl = ov9665_s_ctrl,
++ .s_gpio = ov9665_s_power,
++ /*.g_ext_ctrls = ov9665_g_ext_ctrls,*/
++ /*.s_ext_ctrls = ov9665_s_ext_ctrls,*/
++#ifdef CONFIG_VIDEO_ADV_DEBUG
++ .g_register = ov9665_g_register,
++ .s_register = ov9665_s_register,
++#endif
++};
++
++static const struct v4l2_subdev_ops ov9665_ops = {
++ .core = &ov9665_core_ops,
++ .video = &ov9665_video_ops,
++};
++/*
++ * Basic i2c stuff
++ */
++/*
++static unsigned short normal_i2c[] = {0x30, I2C_CLIENT_END};
++I2C_CLIENT_INSMOD;
++
++static struct i2c_driver ov9665_driver;
++*/
++static int ov9665_detect(struct i2c_client *client)
++{
++ struct i2c_adapter *adapter = client->adapter;
++ int adap_id = i2c_adapter_id(adapter);
++ u8 config = 0;
++
++ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
++ return -ENODEV;
++
++ if (adap_id != 1)
++ return -ENODEV;
++
++ ov9665_wakeup();
++
++ ov9665_read(client, 0x0a, &config);
++ if (config != 0x96)
++ return -ENODEV;
++
++ ov9665_read(client, 0x0b, &config);
++ if (config != 0x63)
++ return -ENODEV;
++
++ return 0;
++}
++
++static int ov9665_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct ci_sensor_config *info;
++ struct v4l2_subdev *sd;
++ int ret = -1;
++
++ DBG_entering;
++ /*
++ * Setup sensor configuration structure
++ */
++ info = kzalloc(sizeof(struct ci_sensor_config), GFP_KERNEL);
++ if (!info)
++ return -ENOMEM;
++
++ ret = ov9665_detect(client);
++ if (ret) {
++ kfree(info);
++ return -ENODEV;
++ }
++
++ sd = &info->sd;
++ v4l2_i2c_subdev_init(sd, client, &ov9665_ops);
++
++ /*
++ * Initialization OV9665
++ * then turn into standby mode
++ */
++ /* ret = ov9665_standby(); */
++ ret = ov9665_init(client);
++ if (ret) {
++ eprintk("error init ov9665");
++ goto err_1;
++ }
++
++ ov9665_standby();
++ printk(KERN_INFO "Init ov9665 sensor success\n");
++ DBG_leaving;
++ return 0;
++
++err_1:
++ kfree(info);
++ return ret;
++}
++
++/*
++ * XXX: Need to be checked
++ */
++static int ov9665_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *sd = i2c_get_clientdata(client);
++
++ v4l2_device_unregister_subdev(sd);
++ kfree(to_sensor_config(sd));
++
++ return 0;
++}
++
++static const struct i2c_device_id ov9665_id[] = {
++ {"ov9665", 0},
++ {}
++};
++
++MODULE_DEVICE_TABLE(i2c, ov9665_id);
++
++static struct v4l2_i2c_driver_data v4l2_i2c_data = {
++ .name = "ov9665",
++ .probe = ov9665_probe,
++ .remove = ov9665_remove,
++ .id_table = ov9665_id,
++};
++
++MODULE_AUTHOR("Xiaolin Zhang <xiaolin.zhang@intel.com>");
++MODULE_DESCRIPTION("A low-level driver for OmniVision 9665 sensors");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/media/video/mrstci/mrstov9665/ov9665.h b/drivers/media/video/mrstci/mrstov9665/ov9665.h
+new file mode 100644
+index 0000000..6fc9d12
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrstov9665/ov9665.h
+@@ -0,0 +1,263 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#define I2C_OV9665 0x60
++/* Should add to kernel source */
++#define I2C_DRIVERID_OV9665 1047
++/* GPIO pin on Moorestown */
++#define GPIO_SCLK_25 44
++#define GPIO_STB_PIN 47
++#define GPIO_STDBY_PIN 48
++#define GPIO_RESET_PIN 50
++
++struct regval_list {
++ u8 reg_num;
++ u8 value;
++};
++
++/*
++ * Default register value
++ * 1280x1024 YUV
++ */
++static struct regval_list ov9665_def_reg[] = {
++ {0x3E, 0x80},
++ {0x12, 0x80},
++
++ {0xd5, 0xff},
++ {0xd6, 0x3f},
++
++ {0x3d, 0x3c},
++ {0x11, 0x81},
++ {0x2a, 0x00},
++ {0x2b, 0x00},
++
++ {0x3a, 0xf1},
++ {0x3b, 0x00},
++ {0x3c, 0x58},
++ {0x3e, 0x50},
++ {0x71, 0x00},
++
++ {0x15, 0x00},
++ {0x6a, 0x24},
++ {0x85, 0xe7},
++
++ {0x63, 0x01},
++
++ {0x17, 0x0c},
++ {0x18, 0x5c},
++ {0x19, 0x01},
++ {0x1a, 0x82},
++ {0x03, 0x03},
++ {0x2b, 0x00},
++
++ {0x36, 0xb4},
++ {0x65, 0x10},
++ {0x70, 0x02},
++ {0x71, 0x9f},
++ {0x64, 0x24},
++
++ {0x43, 0x00},
++ {0x5D, 0x55},
++ {0x5E, 0x57},
++ {0x5F, 0x21},
++
++ {0x24, 0x3e},
++ {0x25, 0x38},
++ {0x26, 0x72},
++
++ {0x14, 0x68},
++ {0x0C, 0x3a}, /* Auto detect for 50/60 */
++ {0x4F, 0x9E},
++ {0x50, 0x84},
++ {0x5A, 0x67},
++
++ {0x7d, 0x30},
++ {0x7e, 0x00},
++ {0x82, 0x03},
++ {0x7f, 0x00},
++ {0x83, 0x07},
++ {0x80, 0x03},
++ {0x81, 0x04},
++
++ {0x96, 0xf0},
++ {0x97, 0x00},
++ {0x92, 0x33},
++ {0x94, 0x5a},
++ {0x93, 0x3a},
++ {0x95, 0x48},
++ {0x91, 0xfc},
++ {0x90, 0xff},
++ {0x8e, 0x4e},
++ {0x8f, 0x4e},
++ {0x8d, 0x13},
++ {0x8c, 0x0c},
++ {0x8b, 0x0c},
++ {0x86, 0x9e},
++ {0x87, 0x11},
++ {0x88, 0x22},
++ {0x89, 0x05},
++ {0x8a, 0x03},
++
++ {0x9b, 0x0e},
++ {0x9c, 0x1c},
++ {0x9d, 0x34},
++ {0x9e, 0x5a},
++ {0x9f, 0x68},
++ {0xa0, 0x76},
++ {0xa1, 0x82},
++ {0xa2, 0x8e},
++ {0xa3, 0x98},
++ {0xa4, 0xa0},
++ {0xa5, 0xb0},
++ {0xa6, 0xbe},
++ {0xa7, 0xd2},
++ {0xa8, 0xe2},
++ {0xa9, 0xee},
++ {0xaa, 0x18},
++
++ {0xAB, 0xe7},
++ {0xb0, 0x43},
++ {0xac, 0x04},
++ {0x84, 0x40},
++
++ {0xad, 0x84},
++ {0xd9, 0x24},
++ {0xda, 0x00},
++ {0xae, 0x10},
++
++ {0xab, 0xe7},
++ {0xb9, 0xa0},
++ {0xba, 0x80},
++ {0xbb, 0xa0},
++ {0xbc, 0x80},
++
++ {0xbd, 0x08},
++ {0xbe, 0x19},
++ {0xbf, 0x02},
++ {0xc0, 0x08},
++ {0xc1, 0x2a},
++ {0xc2, 0x34},
++ {0xc3, 0x2d},
++ {0xc4, 0x2d},
++ {0xc5, 0x00},
++ {0xc6, 0x98},
++ {0xc7, 0x18},
++ {0x69, 0x48},
++
++ {0x74, 0xc0},
++
++ {0x7c, 0x18},
++ {0x65, 0x11},
++ {0x66, 0x00},
++ {0x41, 0xa0},
++ {0x5b, 0x28},
++ {0x60, 0x84},
++ {0x05, 0x07},
++ {0x03, 0x03},
++ {0xd2, 0x8c},
++
++ {0xc7, 0x90},
++ {0xc8, 0x06},
++ {0xcb, 0x40},
++ {0xcc, 0x40},
++ {0xcf, 0x00},
++ {0xd0, 0x20},
++ {0xd1, 0x00},
++ {0xc7, 0x18},
++
++ {0x0d, 0x82},
++ {0x0d, 0x80},
++
++ {0x09, 0x01},
++
++ {0xff, 0xff},
++};
++
++/* 1280x1024 */
++static struct regval_list ov9665_res_sxga[] = {
++ {0x0c, 0xbc}, /* note this */
++ {0xff, 0xff},
++};
++
++/* 640x480 */
++static struct regval_list ov9665_res_vga[] = {
++ /* Fclk/4 */
++ {0x11, 0x80},
++ {0x63, 0x00},
++
++ {0x12, 0x40}, /*VGA format*/
++ {0x14, 0x30}, /*4x*/
++ {0x0c, 0xbc},
++ {0x4d, 0x09},
++ {0x5c, 0x80}, /* Full average AEC */
++
++ /* Windows setting */
++ {0x17, 0x0c},
++ {0x18, 0x5c},
++ {0x19, 0x02},
++ {0x1a, 0x3f},
++ {0x03, 0x03},
++ {0x32, 0xad},
++
++ /* 50/60Hz AEC */
++ {0x5a, 0x23},
++ {0x2b, 0x00},
++
++ {0x64, 0xa4},
++ /*
++ {0x4F, 0x4f},
++ {0x50, 0x42},
++ */
++ {0x4F, 0x9e},
++ {0x50, 0x84},
++ {0x97, 0x0a},
++ {0xad, 0x82},
++ {0xd9, 0x11},
++
++ /* Scale window */
++ {0xb9, 0x50},
++ {0xba, 0x3c},
++ {0xbb, 0x50},
++ {0xbc, 0x3c},
++
++ {0xad, 0x80},
++ {0xd9, 0x00},
++ {0xac, 0x0f},
++ {0x84, 0x86},
++
++ /*This is for Color Matrix*/
++ {0xbd, 0x05},
++ {0xbe, 0x16},
++ {0xbf, 0x05},
++ {0xc0, 0x07},
++ {0xc1, 0x18},
++ {0xc2, 0x1f},
++ {0xc3, 0x2b},
++ {0xc4, 0x2b},
++ {0xc5, 0x00},
++
++ {0x0d, 0x92},
++ {0x0d, 0x90},
++
++ {0xff, 0xff},
++};
+diff --git a/drivers/media/video/mrstci/mrsts5k4e1/Kconfig b/drivers/media/video/mrstci/mrsts5k4e1/Kconfig
+new file mode 100755
+index 0000000..7dee787
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrsts5k4e1/Kconfig
+@@ -0,0 +1,9 @@
++config VIDEO_MRST_S5K4E1
++ tristate "Moorestown s5k4e1 RAW Sensor"
++ depends on I2C && VIDEO_MRST_ISP
++
++ ---help---
++ Say Y here if your platform support s5k4e1 RAW Sensor.
++
++ To compile this driver as a module, choose M here: the
++ module will be called mrstov2650.ko.
+diff --git a/drivers/media/video/mrstci/mrsts5k4e1/Makefile b/drivers/media/video/mrstci/mrsts5k4e1/Makefile
+new file mode 100644
+index 0000000..8733fa8
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrsts5k4e1/Makefile
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_VIDEO_MRST_S5K4E1) += mrsts5k4e1.o
++
++EXTRA_CFLAGS += -I$(src)/../include
+diff --git a/drivers/media/video/mrstci/mrsts5k4e1/mrsts5k4e1.c b/drivers/media/video/mrstci/mrsts5k4e1/mrsts5k4e1.c
+new file mode 100755
+index 0000000..f644531
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrsts5k4e1/mrsts5k4e1.c
+@@ -0,0 +1,1024 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/kmod.h>
++#include <linux/device.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/gpio.h>
++
++#include <media/v4l2-device.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-i2c-drv.h>
++
++#include "ci_sensor_common.h"
++#include "mrsts5k4e1.h"
++/* #include "priv.h" */
++/* extern const struct DumpRegs regs_d[]; */
++
++static int s5k4e1_debug;
++module_param(s5k4e1_debug, int, 0644);
++MODULE_PARM_DESC(s5k4e1_debug, "Debug level (0-1)");
++
++#define dprintk(level, fmt, arg...) \
++ do { \
++ if (s5k4e1_debug >= level) \
++ printk(KERN_DEBUG "mrstisp@%s: " fmt "\n", \
++ __func__, ## arg);\
++ } while (0)
++
++#define eprintk(fmt, arg...) \
++ printk(KERN_ERR "mrstisp@%s:" fmt "\n", \
++ __func__, ## arg);
++
++#define DBG_entering dprintk(1, "entering");
++#define DBG_leaving dprintk(1, "leaving");
++#define DBG_line dprintk(1, " line: %d", __LINE__);
++
++static inline struct ci_sensor_config *to_sensor_config(struct v4l2_subdev *sd)
++{
++ return container_of(sd, struct ci_sensor_config, sd);
++}
++
++static struct s5k4e1_format_struct {
++ __u8 *desc;
++ __u32 pixelformat;
++ struct regval_list *regs;
++} s5k4e1_formats[] = {
++ {
++ .desc = "Raw RGB Bayer",
++ .pixelformat = SENSOR_MODE_MIPI,
++ .regs = NULL,
++ },
++};
++#define N_S5K4E1_FMTS ARRAY_SIZE(s5k4e1_formats)
++
++static struct s5k4e1_res_struct {
++ __u8 *desc;
++ int res;
++ int width;
++ int height;
++ /* FIXME: correct the fps values.. */
++ int fps;
++ bool used;
++ struct regval_list *regs;
++} s5k4e1_res[] = {
++ {
++ .desc = "QSXGA_PLUS4",
++ .res = SENSOR_RES_QXGA_PLUS,
++ .width = 2592,
++ .height = 1944,
++ .fps = 15,
++ .used = 0,
++ .regs = s5k4e1_res_qsxga_plus4,
++ },
++ {
++ .desc = "1080P",
++ .res = SENSOR_RES_1080P,
++ .width = 1920,
++ .height = 1080,
++ .fps = 25,
++ .used = 0,
++ .regs = s5k4e1_res_1080p,
++ },
++ {
++ .desc = "VGA_PLUS",
++ .res = SENSOR_RES_VGA_PLUS,
++ .width = 1304,
++ .height = 980,
++ .fps = 30,
++ .used = 0,
++ .regs = s5k4e1_res_vga_ac04_bill,
++ },
++ {
++ .desc = "720p",
++ .res = SENSOR_RES_720P,
++ .width = 1280,
++ .height = 720,
++ .fps = 30,
++ .used = 0,
++ .regs = s5k4e1_res_720p,
++ },
++ {
++ .desc = "VGA",
++ .res = SENSOR_RES_VGA,
++ .width = 640,
++ .height = 480,
++ .used = 0,
++ .fps = 40,
++ .regs = s5k4e1_res_vga_ac04_bill,
++ },
++};
++
++#define N_RES (ARRAY_SIZE(s5k4e1_res))
++
++/*
++ * I2C Read & Write stuff
++ */
++static int s5k4e1_read(struct i2c_client *c, u32 reg, u32 *value)
++{
++ int ret;
++ int i;
++ struct i2c_msg msg[2];
++ u8 msgbuf[2];
++ u8 ret_val = 0;
++ *value = 0;
++ /* Read needs two message to go */
++ memset(&msg, 0, sizeof(msg));
++ msgbuf[0] = 0;
++ msgbuf[1] = 0;
++ i = 0;
++
++ msgbuf[i++] = ((u16)reg) >> 8;
++ msgbuf[i++] = ((u16)reg) & 0xff;
++ msg[0].addr = c->addr;
++ msg[0].buf = msgbuf;
++ msg[0].len = i;
++
++ msg[1].addr = c->addr;
++ msg[1].flags = I2C_M_RD;
++ msg[1].buf = &ret_val;
++ msg[1].len = 1;
++
++ ret = i2c_transfer(c->adapter, &msg[0], 2);
++ *value = ret_val;
++
++ ret = (ret == 2) ? 0 : -1;
++ dprintk(2, "reg:0x%8x, value:0x%8x - %s", reg, *value,
++ (ret ? "failed" : "succesfully"));
++ return ret;
++}
++
++static int s5k4e1_write(struct i2c_client *c, u32 reg, u32 value)
++{
++ int ret, i;
++ struct i2c_msg msg;
++ u8 msgbuf[3];
++
++ /* Writing only needs one message */
++ memset(&msg, 0, sizeof(msg));
++ i = 0;
++ msgbuf[i++] = ((u16)reg) >> 8;
++ msgbuf[i++] = (u16)reg & 0xff;
++ msgbuf[i++] = (u8)value;
++
++ msg.addr = c->addr;
++ msg.flags = 0;
++ msg.buf = msgbuf;
++ msg.len = i;
++
++ ret = i2c_transfer(c->adapter, &msg, 1);
++
++ /* If this is a reset register, wait for 1ms */
++ if (reg == 0x0103 && (value & 0x01))
++ /*Note here, check if this is needed */
++ msleep(4);
++
++ ret = (ret == 1) ? 0 : -1;
++ dprintk(2, "reg:0x%8x, value:0x%8x - %s", reg, value,
++ (ret ? "failed" : "successfully"));
++ return ret;
++}
++
++static int s5k4e1_write_array(struct i2c_client *c, struct regval_list *vals)
++{
++ struct regval_list *p;
++ u32 read_val = 0;
++ int err_num = 0;
++ int i = 0;
++
++ DBG_entering;
++
++ p = vals;
++ while (p->reg_num != 0xffff) {
++ s5k4e1_write(c, (u32)p->reg_num, (u32)p->value);
++ s5k4e1_read(c, (u32)p->reg_num, &read_val);
++ /* msleep(100);*/
++ if (read_val != p->value) {
++ eprintk("0x%x write error:should be 0x%x, but 0x%x",
++ p->reg_num, p->value, read_val);
++ err_num++;
++ }
++ p++;
++ i++;
++ }
++ dprintk(1, "sucessfully wrote %d registers, err is %d", i,
++ err_num);
++ return 0;
++}
++
++/*
++ * Sensor specific helper function
++ */
++static int s5k4e1_standby(void)
++{
++ gpio_set_value(GPIO_STDBY_PIN, 1);
++ dprintk(1, "PM: standby called\n");
++ return 0;
++}
++
++static int s5k4e1_wakeup(void)
++{
++ gpio_set_value(GPIO_STDBY_PIN, 0);
++ dprintk(1, "PM: wakeup called\n");
++ return 0;
++}
++
++static int s5k4e1_s_power(struct v4l2_subdev *sd, u32 val)
++{
++ if (val == 1)
++ s5k4e1_standby();
++ if (val == 0)
++ s5k4e1_wakeup();
++ return 0;
++}
++
++static int s5k4e1_set_img_ctrl(struct i2c_client *c,
++ const struct ci_sensor_config *config)
++{
++ int err = 0;
++
++ DBG_entering;
++
++ switch (config->blc) {
++ /* only SENSOR_BLC_AUTO supported */
++ case SENSOR_BLC_AUTO:
++ break;
++ default:
++ dprintk(1, "BLC not supported,\
++ set to BLC_AUTO by default.");
++ }
++
++ switch (config->bls) {
++ /* only SENSOR_BLS_OFF supported */
++ case SENSOR_BLS_OFF:
++ break;
++ default:
++ dprintk(1, "Black level not supported,\
++ set to BLS_OFF by default.");
++ }
++
++ switch (config->agc) {
++ /* only SENSOR_AGC_OFF supported */
++ case SENSOR_AGC_OFF:
++ break;
++ default:
++ dprintk(1, "AGC not supported,\
++ set to AGC_OFF by default.");
++ }
++
++ switch (config->awb) {
++ /* only SENSOR_AWB_OFF supported */
++ case SENSOR_AWB_OFF:
++ break;
++ default:
++ dprintk(1, "AWB not supported,\
++ set to AWB_OFF by default.");
++ }
++
++ switch (config->aec) {
++ /* only SENSOR_AEC_OFF supported */
++ case SENSOR_AEC_OFF:
++ break;
++ default:
++ dprintk(1, "AEC not supported,\
++ set to AEC_OFF by default.");
++ }
++
++ DBG_leaving;
++
++ return err;
++}
++static int s5k4e1_init(struct i2c_client *c)
++{
++ int ret = 0;
++ struct v4l2_subdev *sd = i2c_get_clientdata(c);
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ char *name = "";
++
++ DBG_entering;
++
++ /* Fill the configuration structure */
++ /* Note this default configuration value */
++ info->mode = s5k4e1_formats[0].pixelformat;
++ info->res = s5k4e1_res[0].res;
++ info->type = SENSOR_TYPE_RAW;
++ info->bls = SENSOR_BLS_OFF;
++ info->gamma = SENSOR_GAMMA_OFF;
++ info->cconv = SENSOR_CCONV_OFF;
++ info->blc = SENSOR_BLC_AUTO;
++ info->agc = SENSOR_AGC_OFF;
++ info->awb = SENSOR_AWB_OFF;
++ info->aec = SENSOR_AEC_OFF;
++ /*info->bus_width = SENSOR_BUSWIDTH_10BIT_ZZ;*/
++ info->bus_width = SENSOR_BUSWIDTH_12BIT;
++ info->ycseq = SENSOR_YCSEQ_YCBYCR;
++ info->conv422 = SENSOR_CONV422_COSITED;
++ /*info->conv422 = SENSOR_CONV422_NOCOSITED;*/
++ info->bpat = SENSOR_BPAT_GRGRBGBG;
++ info->field_inv = SENSOR_FIELDINV_NOSWAP;
++ info->field_sel = SENSOR_FIELDSEL_BOTH;
++ info->hpol = SENSOR_HPOL_REFPOS;
++ info->vpol = SENSOR_VPOL_NEG;
++ info->edge = SENSOR_EDGE_RISING;
++ info->flicker_freq = SENSOR_FLICKER_100;
++ info->cie_profile = SENSOR_CIEPROF_F11;
++ info->mipi_mode = SENSOR_MIPI_MODE_RAW_10;
++ name = "s5k4e1";
++ memcpy(info->name, name, 7);
++
++ /* Reset sensor hardware, and implement the setting*/
++ ret += s5k4e1_write(c, 0x0100, (u32)0x00);
++ /*TODO: See if we can ignore this*/
++ ret = s5k4e1_write(c, 0x0103, (u32)0x01);
++
++ /* sw reset -- delay 3.1ms */
++ msleep(4);
++
++ /* Set registers into default config value */
++ /* ret += s5k4e1_write_array(c, s5k4e1_def_reg); */
++
++ /* Set MIPI interface */
++#ifdef S5K4E1_MIPI
++ ret += s5k4e1_write_array(c, s5k4e1_mipi);
++#endif
++
++ ret += s5k4e1_set_img_ctrl(c, info); /*FIXME*/
++
++ /* streaming */
++ /* ret += s5k4e1_write(c, 0x0100, (u32)0x01); */
++ ret += s5k4e1_write(c, 0x0100, (u32)0x00);
++
++ msleep(1);
++
++ DBG_leaving;
++
++ return ret;
++}
++
++static int distance(struct s5k4e1_res_struct *res, u32 w, u32 h)
++{
++ int ret;
++
++ DBG_entering;
++
++ if (res->width < w || res->height < h)
++ return -1;
++
++ ret = ((res->width - w) + (res->height - h));
++
++ DBG_leaving;
++
++ return ret;
++}
++
++static int s5k4e1_try_res(u32 *w, u32 *h)
++{
++ struct s5k4e1_res_struct *res_index, *p = NULL;
++ int dis, last_dis = s5k4e1_res->width + s5k4e1_res->height;
++
++ DBG_entering;
++
++ for (res_index = s5k4e1_res;
++ res_index < s5k4e1_res + N_RES;
++ res_index++) {
++ if ((res_index->width < *w) || (res_index->height < *h))
++ break;
++ dis = distance(res_index, *w, *h);
++ if (dis < last_dis) {
++ last_dis = dis;
++ p = res_index;
++ }
++ }
++
++ if (p == NULL)
++ p = s5k4e1_res;
++ else if ((p->width < *w) || (p->height < *h)) {
++ if (p != s5k4e1_res)
++ p--;
++ }
++
++ if ((w != NULL) && (h != NULL)) {
++ *w = p->width;
++ *h = p->height;
++ }
++
++ DBG_leaving;
++ return 0;
++}
++
++static struct s5k4e1_res_struct *s5k4e1_to_res(u32 w, u32 h)
++{
++ struct s5k4e1_res_struct *res_index;
++
++ DBG_entering;
++
++ for (res_index = s5k4e1_res;
++ res_index < s5k4e1_res + N_RES;
++ res_index++)
++ if ((res_index->width == w) && (res_index->height == h))
++ break;
++
++ if (res_index >= s5k4e1_res + N_RES)
++ res_index--; /* Take the bigger one */
++
++ DBG_leaving;
++
++ return res_index;
++}
++
++static int s5k4e1_try_fmt(struct v4l2_subdev *sd,
++ struct v4l2_format *fmt)
++{
++ DBG_entering;
++ return s5k4e1_try_res(&fmt->fmt.pix.width, &fmt->fmt.pix.height);
++ DBG_leaving;
++}
++
++static int s5k4e1_get_fmt(struct v4l2_subdev *sd,
++ struct v4l2_format *fmt)
++{
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ unsigned short width, height;
++ int index;
++
++ ci_sensor_res2size(info->res, &width, &height);
++
++ /* Marked the current sensor res as being "used" */
++ for (index = 0; index < N_RES; index++) {
++ if ((width == s5k4e1_res[index].width) &&
++ (height == s5k4e1_res[index].height)) {
++ s5k4e1_res[index].used = 1;
++ continue;
++ }
++ s5k4e1_res[index].used = 0;
++ }
++
++ fmt->fmt.pix.width = width;
++ fmt->fmt.pix.height = height;
++ return 0;
++
++}
++
++#if 0
++/* chuanxiao add, to dump regs */
++static int s5k4e1_dump_regs(struct i2c_client *c)
++{
++ /*struct i2c_client *c = v4l2_get_subdevdata(sd);*/
++ const struct DumpRegs *p = regs_d;
++ u32 value;
++ u32 value1, value2, value3, value4;
++ while (p->ulFlags != eTableEnd) {
++ if (p->ulFlags & eFourBytes) {
++ s5k4e1_read(c, (u32)p->ulAddr, &value1);
++ s5k4e1_read(c, (u32)p->ulAddr+1, &value2);
++ s5k4e1_read(c, (u32)p->ulAddr+2, &value3);
++ s5k4e1_read(c, (u32)p->ulAddr+3, &value4);
++ value = value1<<24 | value2<<16 | value3<<8 | value4;
++ } else if (p->ulFlags & eTwoBytes) {
++ s5k4e1_read(c, (u32)p->ulAddr, &value1);
++ s5k4e1_read(c, (u32)p->ulAddr+1, &value2);
++ value = value1<<8 | value2;
++ } else
++ s5k4e1_read(c, (u32)p->ulAddr, &value);
++ /*
++ if (value == p->ulDefaultValue)
++ dprintk(0, "%s\t @ 0x%x = 0x%lx (= default value)\n",
++ p->pszName, p->ulAddr, value);
++ else
++ dprintk(0, "%s\t @ 0x%x = 0x%lx (default was 0x%lx)\n",
++ p->pszName, p->ulAddr, value, p->ulDefaultValue);
++ */
++ dprintk(0, "%-30s @ 0x%04X = 0x%08X", p->pszName,
++ p->ulAddr, value);
++ p++;
++ }
++ return 0;
++}
++#endif
++
++static int s5k4e1_set_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
++{
++ struct i2c_client *c = v4l2_get_subdevdata(sd);
++ struct ci_sensor_config *info = to_sensor_config(sd);
++ int ret = 0;
++ struct s5k4e1_res_struct *res_index;
++ u32 width, height;
++ int index;
++
++ DBG_entering;
++
++ width = fmt->fmt.pix.width;
++ height = fmt->fmt.pix.height;
++
++ dprintk(1, "was told to set fmt (%d x %d) ", width, height);
++ ret = s5k4e1_try_res(&width, &height);
++
++ res_index = s5k4e1_to_res(width, height);
++
++ s5k4e1_wakeup();
++ DBG_line;
++ if (res_index->regs) {
++ /* software sleep/standby */
++ ret += s5k4e1_write(c, 0x0100, (u32)0x00);
++
++ /* Soft reset camera first*/
++ /*TODO: See if we can ignore this*/
++ ret = s5k4e1_write(c, 0x0103, (u32)0xff);
++
++ /* Set registers into default config value */
++ /* ret += s5k4e1_write_array(c, s5k4e1_def_reg);*/
++
++ /* set image resolution */
++ ret += s5k4e1_write_array(c, res_index->regs);
++
++ ret += s5k4e1_set_img_ctrl(c, info);
++
++ /* XXX setup with unknow meaning ... */
++ /* ret += s5k4e1_write(c, 0x30b0, 0xfe); */
++
++ /* Set MIPI interface */
++#ifdef S5K4E1_MIPI
++ ret += s5k4e1_write_array(c, s5k4e1_mipi);
++#endif
++
++ /* streaming */
++ ret = s5k4e1_write(c, 0x0100, (u32)0x01);
++ msleep(1);
++
++ info->res = res_index->res;
++
++ /* Marked current sensor res as being "used" */
++ for (index = 0; index < N_RES; index++) {
++ if ((width == s5k4e1_res[index].width) &&
++ (height == s5k4e1_res[index].height)) {
++ s5k4e1_res[index].used = 1;
++ continue;
++ }
++ s5k4e1_res[index].used = 0;
++ }
++
++ for (index = 0; index < N_RES; index++)
++ dprintk(2, "index = %d, used = %d\n", index,
++ s5k4e1_res[index].used);
++
++ DBG_line;
++ } else {
++ eprintk("no res for (%d x %d)", width, height);
++ }
++
++ DBG_leaving;
++ return ret;
++}
++
++static int s5k4e1_t_gain(struct v4l2_subdev *sd, int value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++ DBG_entering;
++
++ s5k4e1_write(client, 0x0104, 1); /*hold*/
++
++ /* analog gain */
++ s5k4e1_write(client, 0x0204, value >> 8);
++
++ s5k4e1_write(client, 0x0205, value & 0xff);
++
++ s5k4e1_write(client, 0x0104, 0); /*unhold*/
++
++ dprintk(1, "gain %x was writen to 0x0204/5", value);
++
++ DBG_leaving;
++ return 0;
++}
++
++static int s5k4e1_t_exposure(struct v4l2_subdev *sd, int value)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++ DBG_entering;
++
++ s5k4e1_write(client, 0x0104, 1); /*hold*/
++
++ /* fine integration time */
++ s5k4e1_write(client, 0x0200, value >> 24);
++
++ s5k4e1_write(client, 0x0201, (value >> 16) & 0xff);
++
++ /* coarse integration time */
++ s5k4e1_write(client, 0x0202, (value & 0xff00) >> 8);
++
++ s5k4e1_write(client, 0x0203, value & 0xff);
++
++ s5k4e1_write(client, 0x0104, 0); /*unhold*/
++
++ dprintk(1, "exposure %x was writen to 0x0200/1/2/3", value);
++
++ DBG_leaving;
++ return 0;
++}
++
++static struct s5k4e1_control {
++ struct v4l2_queryctrl qc;
++ int (*query)(struct v4l2_subdev *sd, __s32 *value);
++ int (*tweak)(struct v4l2_subdev *sd, int value);
++} s5k4e1_controls[] = {
++ {
++ .qc = {
++ .id = V4L2_CID_GAIN,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "global gain",
++ .minimum = 0x0,
++ .maximum = 0xFFFF,
++ .step = 0x01,
++ .default_value = 0x00,
++ .flags = 0,
++ },
++ .tweak = s5k4e1_t_gain,
++ },
++ {
++ .qc = {
++ .id = V4L2_CID_EXPOSURE,
++ .type = V4L2_CTRL_TYPE_INTEGER,
++ .name = "exposure",
++ .minimum = 0x0,
++ .maximum = 0xFFFF,
++ .step = 0x01,
++ .default_value = 0x00,
++ .flags = 0,
++ },
++ .tweak = s5k4e1_t_exposure,
++ },
++};
++#define N_CONTROLS (ARRAY_SIZE(s5k4e1_controls))
++
++static struct s5k4e1_control *s5k4e1_find_control(__u32 id)
++{
++ int i;
++
++ DBG_entering;
++ for (i = 0; i < N_CONTROLS; i++)
++ if (s5k4e1_controls[i].qc.id == id)
++ return s5k4e1_controls + i;
++ DBG_leaving;
++ return NULL;
++}
++
++static int s5k4e1_queryctrl(struct v4l2_subdev *sd,
++ struct v4l2_queryctrl *qc)
++{
++ struct s5k4e1_control *ctrl = s5k4e1_find_control(qc->id);
++
++ DBG_entering;
++ if (ctrl == NULL)
++ return -EINVAL;
++ *qc = ctrl->qc;
++
++ DBG_leaving;
++ return 0;
++}
++
++static int s5k4e1_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++/*
++ struct s5k4e1_control *octrl = s5k4e1_find_control(parm->index);
++ int ret;
++
++ if (octrl == NULL)
++ return -EINVAL;
++ ret = octrl->query(client, &parm->value);
++ if (ret >= 0)
++ return 0;
++*/
++ return 0;
++}
++
++static int s5k4e1_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
++{
++ struct s5k4e1_control *octrl = s5k4e1_find_control(ctrl->id);
++ int ret;
++
++ DBG_entering;
++
++ if (octrl == NULL)
++ return -EINVAL;
++ ret = octrl->tweak(sd, ctrl->value);
++ if (ret >= 0)
++ return 0;
++
++ DBG_leaving;
++ return ret;
++}
++
++static int s5k4e1_s_stream(struct v4l2_subdev *sd, int enable)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ DBG_entering;
++
++ if (enable) {
++ s5k4e1_write(client, (u32)0x0100, 0x01);
++ /*chuanxiao add, dump s5k4e1 regs*/
++ /* s5k4e1_dump_regs(client); */
++ } else
++ s5k4e1_write(client, (u32)0x0100, 0x00);
++
++ /*msleep(1);*/
++
++ DBG_leaving;
++ return 0;
++}
++
++static int s5k4e1_enum_framesizes(struct v4l2_subdev *sd,
++ struct v4l2_frmsizeenum *fsize)
++{
++ unsigned int index = fsize->index;
++
++ DBG_entering;
++
++ if (index >= N_RES)
++ return -EINVAL;
++
++ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
++ fsize->discrete.width = s5k4e1_res[index].width;
++ fsize->discrete.height = s5k4e1_res[index].height;
++ fsize->reserved[0] = s5k4e1_res[index].used;
++
++ DBG_leaving;
++
++ return 0;
++}
++
++static int s5k4e1_enum_frameintervals(struct v4l2_subdev *sd,
++ struct v4l2_frmivalenum *fival)
++{
++ unsigned int index = fival->index;
++
++ DBG_entering;
++
++ if (index >= N_RES)
++ return -EINVAL;
++
++ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
++ fival->discrete.numerator = 1;
++ fival->discrete.denominator = s5k4e1_res[index].fps;
++
++ DBG_leaving;
++
++ return 0;
++}
++
++static int s5k4e1_g_chip_ident(struct v4l2_subdev *sd,
++ struct v4l2_dbg_chip_ident *chip)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++ DBG_entering;
++
++#define V4L2_IDENT_S5K4E1 8250
++ DBG_leaving;
++
++ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_S5K4E1, 0);
++}
++
++#ifdef CONFIG_VIDEO_ADV_DEBUG
++static int s5k4e1_g_register(struct v4l2_subdev *sd,
++ struct v4l2_dbg_register *reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ unsigned char val = 0;
++ int ret;
++
++ if (!v4l2_chip_match_i2c_client(client, &reg->match))
++ return -EINVAL;
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++ ret = s5k4e1_read(client, reg->reg & 0xffff, &val);
++ reg->val = val;
++ reg->size = 1;
++ return ret;
++}
++
++static int s5k4e1_s_register(struct v4l2_subdev *sd,
++ struct v4l2_dbg_register *reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++ if (!v4l2_chip_match_i2c_client(client, &reg->match))
++ return -EINVAL;
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++ s5k4e1_write(client, reg->reg & 0xffff, reg->val & 0xff);
++ return 0;
++}
++#endif
++
++static const struct v4l2_subdev_video_ops s5k4e1_video_ops = {
++ .try_fmt = s5k4e1_try_fmt,
++ .s_fmt = s5k4e1_set_fmt,
++ .g_fmt = s5k4e1_get_fmt,
++ .s_stream = s5k4e1_s_stream,
++ .enum_framesizes = s5k4e1_enum_framesizes,
++ .enum_frameintervals = s5k4e1_enum_frameintervals,
++};
++
++static const struct v4l2_subdev_core_ops s5k4e1_core_ops = {
++ .g_chip_ident = s5k4e1_g_chip_ident,
++ .queryctrl = s5k4e1_queryctrl,
++ .g_ctrl = s5k4e1_g_ctrl,
++ .s_ctrl = s5k4e1_s_ctrl,
++ .s_gpio = s5k4e1_s_power,
++ /*.g_ext_ctrls = s5k4e1_g_ext_ctrls,*/
++ /*.s_ext_ctrls = s5k4e1_s_ext_ctrls,*/
++#ifdef CONFIG_VIDEO_ADV_DEBUG
++ .g_register = s5k4e1_g_register,
++ .s_register = s5k4e1_s_register,
++#endif
++};
++
++static const struct v4l2_subdev_ops s5k4e1_ops = {
++ .core = &s5k4e1_core_ops,
++ .video = &s5k4e1_video_ops,
++};
++
++/*
++ * Basic i2c stuff
++ */
++/*
++static unsigned short normal_i2c[] = {0x36, I2C_CLIENT_END};
++I2C_CLIENT_INSMOD;
++
++static struct i2c_driver i2c_driver_s5k4e1_sensor;
++*/
++static int s5k4e1_detect(struct i2c_client *client)
++{
++ struct i2c_adapter *adapter = client->adapter;
++ int adap_id = i2c_adapter_id(adapter);
++ u32 value;
++
++ DBG_entering;
++
++ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
++ eprintk("error i2c check func");
++ return -ENODEV;
++ }
++
++ if (adap_id != 1) {
++ eprintk("adap_id != 1");
++ return -ENODEV;
++ }
++
++ if (s5k4e1_wakeup()) {
++ eprintk("sensor wakeup failed");
++ return -EIO;
++ }
++
++ s5k4e1_read(client, 0x0003, &value);
++ dprintk(1, "Read from 0x0003: %x", value);
++ if ((value != 0x09))
++ return -ENODEV;
++
++ s5k4e1_read(client, 0x0000, &value);
++ dprintk(1, "Read from 0x0000: %x", value);
++ if ((value != 0x4e) && (value != 0x10))
++ return -ENODEV;
++
++ s5k4e1_read(client, 0x0001, &value);
++ dprintk(1, "Read from 0x0001: %x", value);
++ if ((value != 0x4e) && (value != 0x10))
++ return -ENODEV;
++
++ /*TODO EVT3 detect*/
++ s5k4e1_read(client, 0x0002, &value);
++ dprintk(1, "Read from 0x0002: %x", value);
++ if (value == 0x0010) {
++ dprintk(1, "EVT3 module not supported!");
++ return -ENODEV;
++ }
++
++ DBG_leaving;
++ return 0;
++}
++
++static int s5k4e1_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct ci_sensor_config *info;
++ struct v4l2_subdev *sd;
++ int ret = -1;
++
++ DBG_entering;
++
++ v4l_info(client, "chip found @ 0x%x (%s)\n",
++ client->addr << 1, client->adapter->name);
++
++ /*
++ * Setup sensor configuration structure
++ */
++ info = kzalloc(sizeof(struct ci_sensor_config), GFP_KERNEL);
++ if (!info) {
++ dprintk(0, "fail to malloc for ci_sensor_config");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ ret = s5k4e1_detect(client);
++ if (ret) {
++ dprintk(0, "error s5k4e1_detect");
++ goto out_free;
++ }
++
++ sd = &info->sd;
++ v4l2_i2c_subdev_init(sd, client, &s5k4e1_ops);
++
++ /*
++ * Initialization S5K4E1
++ * then turn into standby mode
++ */
++ ret = s5k4e1_init(client);
++ if (ret) {
++ dprintk(0, "error calling s5k4e1_init");
++ goto out_free;
++ }
++
++ s5k4e1_standby();
++ dprintk(0, "Init s5k4e1 sensor successfully");
++
++ ret = 0;
++ goto out;
++
++out_free:
++ kfree(info);
++ DBG_leaving;
++out:
++
++ DBG_leaving;
++ return ret;
++}
++
++
++static int s5k4e1_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *sd = i2c_get_clientdata(client);
++
++ DBG_entering;
++
++ v4l2_device_unregister_subdev(sd);
++ kfree(to_sensor_config(sd));
++
++ DBG_leaving;
++ return 0;
++}
++
++/**
++ * i2c_driver for s5k4e1_sensor
++ */
++static const struct i2c_device_id s5k4e1_id[] = {
++ {"s5k4e1", 0},
++ {}
++};
++
++MODULE_DEVICE_TABLE(i2c, s5k4e1_id);
++
++static struct v4l2_i2c_driver_data v4l2_i2c_data = {
++ .name = "s5k4e1",
++ .probe = s5k4e1_probe,
++ .remove = s5k4e1_remove,
++ /* .suspend = s5k4e1_suspend,
++ * .resume = s5k4e1_resume, */
++ .id_table = s5k4e1_id,
++};
++
++MODULE_AUTHOR("Xiaolin Zhang <xiaolin.zhang@intel.com>");
++MODULE_DESCRIPTION("A low-level driver for Samsung S5K4E1 sensors");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/media/video/mrstci/mrsts5k4e1/mrsts5k4e1.h b/drivers/media/video/mrstci/mrsts5k4e1/mrsts5k4e1.h
+new file mode 100755
+index 0000000..d722035
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrsts5k4e1/mrsts5k4e1.h
+@@ -0,0 +1,662 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#define I2C_S5K4E1 0x6C
++/* Should add to kernel source */
++#define I2C_DRIVERID_S5K4E1 1046
++/* GPIO pin on Moorestown */
++#define GPIO_SCLK_25 44
++#define GPIO_STB_PIN 47
++#define GPIO_STDBY_PIN 49
++#define GPIO_RESET_PIN 50
++
++struct regval_list {
++ u16 reg_num;
++ u8 value;
++};
++
++/*
++ * Default register value
++ * 5Mega Pixel, 2592x1944
++ */
++/* MIPI register are removed by Wen */
++
++/* 2592x1944 */
++static struct regval_list s5k4e1_res_qsxga_plus4[] = {
++ /* Reset for operation */
++ {0x0100, 0x00}, /* stream off */
++ {0x0103, 0x01}, /* software reset */
++
++/*
++ * Analog Setting
++ * This register is for FACTORY ONLY.
++ * If you change it without prior notification,
++ * You are RESPONSIBLE for the FAILURE that will happen in the future.
++ */
++
++/* CDS timing setting ... */
++ {0x3000, 0x04}, /* ct_ld_start (default = 07h) */
++ {0x3001, 0x02}, /* ct_sl_start (default = 05h) */
++ {0x3002, 0x0C}, /* ct_rx_start (default = 21h) */
++ {0x3003, 0x0E}, /* ct_cds_start (default = 23h) */
++ {0x3004, 0x2C}, /* ct_smp_width (default = 60h) */
++ {0x3005, 0x0D}, /* ct_az_width (default = 28h) */
++ {0x3006, 0x39}, /* ct_s1r_width (default = 88h) */
++ {0x3007, 0x02}, /* ct_tx_start (default = 06h) */
++ {0x3008, 0x3C}, /* ct_tx_width 1.5us (default = 7Ch) */
++ {0x3009, 0x3C}, /* ct_stx_width 1.5us (default = 7Ch) */
++ {0x300A, 0x28}, /* ct_dtx_width 1us (default = 3Eh) */
++ {0x300B, 0x15}, /* ct_rmp_rst_start (default = 44h) */
++ {0x300C, 0x15}, /* ct_rmp_sig_start (default = 48h) */
++ {0x300D, 0x02}, /* ct_rmp_lat (default = 02h) */
++ {0x300E, 0xA9}, /* D-Shut en[7], CLP On[5], LD high[4] */
++
++/* CDS option setting ... */
++ {0x3010, 0x00}, /* smp_en[2]=0(00) 1(04) row_id[1:0] = 00 */
++ {0x3011, 0x7A}, /* RST_MX (288), SIG_MX (1024+352) */
++ {0x3012, 0x30}, /* SIG offset1 48 code */
++ {0x3013, 0xA0}, /* RST offset1 160 code */
++ {0x3014, 0x00}, /* SIG offset2 */
++ {0x3015, 0x00}, /* RST offset2 */
++ {0x3016, 0x02}, /* ADC_SAT (510mV) */
++ {0x3017, 0x94}, /* RMP_INIT[3:0](RMP_REG) 1.8V MS[6:4]=1 */
++ {0x3018, 0x78}, /* rmp option - ramp connect[MSB] +RMP INIT DAC MIN */
++ {0x301D, 0xD4}, /* CLP level (default = 0Fh) */
++
++ {0x3021, 0x02}, /* inrush ctrl[1] off */
++ {0x3022, 0x44}, /* pump ring oscillator set [7:4]=CP, [3:0]=NCP */
++ {0x3024, 0x40}, /* pix voltage 2.8V (default = 88h) */
++ {0x3027, 0x08}, /* ntg voltage (default = 04h) */
++
++/* Pixel option setting ... */
++ {0x301C, 0x05}, /* Pixel Bias [3:0] (default = 03h) */
++ {0x30D8, 0x3F}, /* All tx off 2f, on 3f */
++
++/* ADLC setting ... */
++ {0x3070, 0x5F}, /* [6]L-ADLC BPR, [4]ch sel, [3]L-ADLC, [2]F-ADLC */
++ {0x3071, 0x00}, /* F&L-adlc max 127 (default = 11h, max 255) */
++ {0x3080, 0x04}, /* F-ADLC filter A (default = 10h) */
++ {0x3081, 0x38}, /* F-ADLC filter B (default = 20h) */
++
++/* Integration setting ... */
++ {0x0202, 0x03}, /* coarse integration time */
++ {0x0203, 0xCF},
++ {0x0204, 0x00}, /* analog gain[msb] 0100 x8 0080 x4 */
++ {0x0205, 0x80}, /* analog gain[lsb] 0040 x2 0020 x1 */
++
++/* Frame Length */
++ {0x0340, 0x07}, /* Capture 07B4(1960[# of row]+12[V-blank]) */
++ {0x0341, 0xA4}, /* Preview 03E0(980[# of row]+12[V-blank]) */
++
++/* Line Length */
++ {0x0342, 0x0A}, /* 2738 */
++ {0x0343, 0xB2}, /* (Same as sensor default) */
++
++/* embedded 2-line OFF setting ... */
++/* 2608 x 1960 */
++ {0x3084, 0x15}, /* SYNC Mode */
++
++/* (3) MIPI 2-lane Serial(TST = 0000b or TST = 0010b), 30 fps */
++
++ {0x30A9, 0x01},
++ {0x0387, 0x01},
++
++ {0x30BD, 0x00}, /* SEL_CCP[0] */
++ {0x30B2, 0x08}, /* PLL P = 8 */
++ {0x30B3, 0x00}, /* PLL M[8] = 0 */
++ {0x30B5, 0x01}, /* PLL S = 0 */
++ {0x30BE, 0x1A}, /* M_PCLKDIV_AUTO[4], M_DIV_PCLK[3:0] */
++
++ {0x30BF, 0xAB},
++ {0x30C0, 0x00}, /* video_offset[7:4] 3240%12 */
++ {0x30C1, 0x01}, /* pack video enable [0] */
++ {0x30C8, 0x0C}, /* video_data_length 3260 = 2608 * 1.25 */
++ {0x30C9, 0xA8},
++ {0x30E2, 0x02}, /* num lanes[1:0] = 2 */
++ {0x30EE, 0x02}, /* DPHY enable [1] */
++ {0x30F1, 0x70}, /* DPHY BANDCTRL 800MHz=80.6MHz */
++ {0x3111, 0x86}, /* Embedded data off [5] */
++
++ {0x034C, 0x0A},
++ {0x034D, 0x20},
++ {0x044E, 0x07},
++ {0x034F, 0x98},
++
++ {0x0344, 0x00},
++ {0x0345, 0x08},
++ {0x0346, 0x00},
++ {0x0347, 0x08},
++ {0x0348, 0x0A},
++ {0x0349, 0x27},
++ {0x034A, 0x07},
++ {0x034B, 0x9F},
++
++ /* This is to set FRAME_NUM > 0 */
++ {0x30d9, 0x00},
++
++ /* Add this setting according to Bill's test */
++ {0x0305, 0x05},
++ {0x0306, 0x00},
++ {0x0307, 0x3c},
++ {0x30b5, 0x02},
++
++ {0x020E, 0x01}, /* Gr Digital Gain */
++ {0x020F, 0x00},
++ {0x0210, 0x01}, /* Red Digital Gain */
++ {0x0211, 0x00},
++ {0x0212, 0x01}, /* Blue Digital Gain */
++ {0x0213, 0x00},
++ {0x0214, 0x01}, /* Gb Digital Gain */
++ {0x0215, 0x00},
++ {0x0204, 0x00},
++ {0x0205, 0x80},
++
++#if 1
++ /*Apply Bill's setting*/
++ {0x30E2, 0x02},
++ {0x0305, 0x05},
++ {0x0306, 0x00},
++ {0x0307, 0x50}, /* vcc_out = 80 */
++ {0x30B5, 0x01}, /* pll_s = 1 */
++ {0x30B4, 0x50},
++
++ {0x30B2, 0x05},
++
++ {0x30BE, 0x1A}, /* DIV_M_PCLK = 5 */
++
++ {0x0100, 0x01}, /* stream on */
++ {0xffff, 0xff},
++#endif
++};
++
++/* 1920x1080 */
++static struct regval_list s5k4e1_res_1080p[] = {
++/* Reset for operation ... */
++ {0x0100, 0x00}, /* stream off */
++ {0x0103, 0x01}, /* software reset */
++
++/*
++ * Analog Setting
++ * This register is for FACTORY ONLY.
++ * If you change it without prior notification,
++ * You are RESPONSIBLE for the FAILURE that will happen in the future.
++ */
++
++/* CDS timing setting ... */
++ {0x3000, 0x04}, /* ct_ld_start (default = 07h) */
++ {0x3001, 0x02}, /* ct_sl_start (default = 05h) */
++ {0x3002, 0x0C}, /* ct_rx_start (default = 21h) */
++ {0x3003, 0x0E}, /* ct_cds_start (default = 23h) */
++ {0x3004, 0x2C}, /* ct_smp_width (default = 60h) */
++ {0x3005, 0x0D}, /* ct_az_width (default = 28h) */
++ {0x3006, 0x39}, /* ct_s1r_width (default = 88h) */
++ {0x3007, 0x02}, /* ct_tx_start (default = 06h) */
++ {0x3008, 0x3C}, /* ct_tx_width 1.5us (default = 7Ch) */
++ {0x300A, 0x28}, /* ct_dtx_width 1us (default = 3Eh) */
++ {0x300B, 0x15}, /* ct_rmp_rst_start (default = 44h) */
++ {0x300C, 0x15}, /* ct_rmp_sig_start (default = 48h) */
++ {0x300D, 0x02}, /* ct_rmp_lat (default = 02h) */
++ {0x300E, 0xA9}, /* D-Shut en[7], CLP On[5], LD high[4] */
++
++/* CDS option setting ... */
++ {0x3010, 0x00}, /* smp_en[2]=0(00) 1(04) row_id[1:0] = 00 */
++ {0x3011, 0x7A}, /* RST_MX (288), SIG_MX (1024+352) */
++ {0x3012, 0x30}, /* SIG offset1 48 code */
++ {0x3013, 0xA0}, /* RST offset1 160 code */
++ {0x3014, 0x00}, /* SIG offset2 */
++ {0x3015, 0x00}, /* RST offset2 */
++ {0x3016, 0x0A}, /* ADC_SAT (510mV) */
++ {0x3017, 0x94}, /* RMP_INIT[3:0](RMP_REG) 1.8V MS[6:4]=1 */
++ {0x3018, 0x78}, /* rmp option - ramp connect[MSB] +RMP INIT DAC MIN */
++
++ {0x301D, 0xD4}, /* CLP level (default = 0Fh) */
++
++ {0x3021, 0x02}, /* inrush ctrl[1] off */
++ {0x3022, 0x41}, /* pump ring oscillator set [7:4]=CP, [3:0]=NCP */
++ {0x3024, 0x08}, /* pix voltage 2.8V (default = 88h) */
++ {0x3027, 0x08}, /* ntg voltage (default = 04h) */
++
++/* Pixel option setting ... */
++ {0x301C, 0x05}, /* Pixel Bias [3:0] (default = 03h) */
++ {0x30D8, 0x3F}, /* All tx off 2f, on 3f */
++
++/* ADLC setting ... */
++ {0x3070, 0x5F}, /* [6]L-ADLC BPR, [4]ch sel, [3]L-ADLC, [2]F-ADLC */
++ {0x3071, 0x00}, /* F&L-adlc max 127 (default = 11h, max 255) */
++ {0x3080, 0x04}, /* F-ADLC filter A (default = 10h) */
++ {0x3081, 0x38}, /* F-ADLC filter B (default = 20h) */
++
++/* Integration setting ... */
++ {0x0202, 0x03}, /* coarse integration time */
++ {0x0203, 0xCD},
++ {0x0204, 0x00}, /* analog gain[msb] 0100 x8 0080 x4 */
++ {0x0205, 0x80}, /* analog gain[lsb] 0040 x2 0020 x1 */
++
++/* Frame Length */
++ {0x0340, 0x04}, /*Capture 07B4(1960[# of row]+12[V-blank]) */
++ {0x0341, 0x44}, /*Preview 03E0(980[# of row]+12[V-blank]) */
++
++/* Line Length */
++ {0x0342, 0x0A}, /* 2738 */
++ {0x0343, 0xB2}, /*(Same as sensor default) */
++
++/* embedded 2-line OFF setting ... */
++/* 1920 x 1080 */
++ {0x3084, 0x15}, /* SYNC Mode */
++
++/* PLL & MIPI setting ... */
++/* input clock 25MHz */
++
++/* (3) MIPI 2-lane Serial(TST = 0000b or TST = 0010b), 30 fps */
++ {0x30BD, 0x00}, /* SEL_CCP[0] */
++ {0x30B2, 0x08}, /* PLL P = 8 */
++ {0x30B3, 0x00}, /* PLL M[8] = 0 */
++ {0x30B4, 0x78}, /* PLL M = 129 */
++ {0x30B5, 0x00}, /* PLL S = 0 */
++ {0x30BE, 0x1A}, /* M_PCLKDIV_AUTO[4], M_DIV_PCLK[3:0] */
++
++ {0x30BF, 0xAB},
++ {0x30C0, 0x00}, /* video_offset[7:4] 2400%12 */
++ {0x30C1, 0x01}, /* pack video enable [0] */
++ {0x30C8, 0x09}, /* video_data_length 2400 = 1920 * 1.25 */
++ {0x30C9, 0x60},
++ {0x30E2, 0x02}, /* num lanes[1:0] = 2 */
++ {0x30EE, 0x02}, /* DPHY enable [1] */
++ {0x30F1, 0x70}, /* DPHY BANDCTRL 800MHz=80.6MHz */
++ {0x3111, 0x86}, /* Embedded data off [5] */
++
++ {0x30b4, 0x20},
++ {0x30b5, 0x01},
++
++ {0x30A9, 0x01},
++ {0x0387, 0x01},
++ {0x0344, 0x01}, /*x_addr_start 344 */
++ {0x0345, 0x58},
++ {0x0348, 0x08}, /*x_addr_end 2263 */
++ {0x0349, 0xD7},
++ {0x0346, 0x01}, /*y_addr_start 440 */
++ {0x0347, 0xB8},
++ {0x034A, 0x05}, /*y_addr_end 1519 */
++ {0x034B, 0xEF},
++
++ {0x034C, 0x07}, /*x_output_size 1920 */
++ {0x034D, 0x80},
++ {0x034E, 0x04}, /*y_output_size 1080 */
++ {0x034F, 0x38},
++
++ {0x30d9, 0x00},
++
++ {0x020E, 0x01}, /*Gr Digital Gain */
++ {0x020F, 0x00},
++ {0x0210, 0x01}, /*Red Digital Gain */
++ {0x0211, 0x00},
++ {0x0212, 0x01}, /*Blue Digital Gain */
++ {0x0213, 0x00},
++ {0x0214, 0x01}, /*Gb Digital Gain */
++ {0x0215, 0x00},
++ {0x0204, 0x00},
++ {0x0205, 0x80},
++
++
++ /*Apply Bill's setting*/
++ {0x30E2, 0x02},
++ {0x0305, 0x05},
++ {0x0306, 0x00},
++ {0x0307, 0x50}, /*vcc_out = 80 */
++ {0x30B5, 0x01}, /*pll_s = 1 */
++ {0x30B4, 0x50},
++
++ {0x30B2, 0x05},
++
++ {0x30BE, 0x1A}, /*DIV_M_PCLK = 5 */
++
++ {0x0383, 0x01},
++
++ {0x0100, 0x01}, /* stream on */
++ {0xffff, 0xff},
++
++};
++
++/* 1280x720, V1F2 & H1F2 */
++static struct regval_list s5k4e1_res_720p[] = {
++ {0x0100, 0x00}, /* stream off */
++ {0x0103, 0x01}, /* software reset */
++
++/* CDS timing setting ... */
++ {0x3000, 0x04},
++ {0x3001, 0x02},
++ {0x3002, 0x0C},
++ {0x3003, 0x0E},
++ {0x3004, 0x2C},
++ {0x3005, 0x0D},
++ {0x3006, 0x39},
++ {0x3007, 0x02},
++ {0x3008, 0x3C},
++ {0x3009, 0x3C},
++ {0x300A, 0x28},
++ {0x300B, 0x15},
++ {0x300C, 0x15},
++ {0x300D, 0x02},
++ {0x300E, 0xAB},
++
++/* CDS option setting ... */
++ {0x3010, 0x00},
++ {0x3011, 0x7A},
++ {0x3012, 0x30},
++ {0x3013, 0x90},
++ {0x3014, 0x00},
++ {0x3015, 0x00},
++ {0x3016, 0x0A},
++ {0x3017, 0x84},
++ {0x3018, 0x78},
++ {0x301D, 0xD4},
++
++ {0x3021, 0x02},
++ {0x3022, 0x41},
++ {0x3024, 0x08},
++ {0x3027, 0x08},
++
++/* Pixel option setting ... */
++ {0x301C, 0x05}, /* Pixel Bias [3:0] (default = 03h) */
++ {0x30D8, 0x3F}, /* All tx off 2f, on 3f */
++
++/* ADLC setting ... */
++ {0x3070, 0x5F},
++ {0x3071, 0x00},
++ {0x3080, 0x04},
++ {0x3081, 0x38},
++
++/* Integration setting ... */
++ {0x0202, 0x03},
++ {0x0203, 0xD8},
++ {0x0204, 0x00},
++ {0x0205, 0x80},
++
++/*Frame Length*/
++ {0x0340, 0x02},
++ {0x0341, 0xDC},
++
++/* Line Length */
++ {0x0342, 0x0A}, /*2738 */
++ {0x0343, 0xB2},
++
++/* Average Sub-sampling */
++ {0x0387, 0x03},
++ {0x30a9, 0x02},
++
++/* embedded 2-line OFF setting ... */
++/* 1280 x 720 */
++ {0x3084, 0x15},
++
++/* PLL & MIPI setting ... */
++
++/* (3) MIPI 2-lane Serial(TST = 0000b or TST = 0010b), 60 fps */
++ {0x30BD, 0x00},
++ {0x30B2, 0x08},
++ {0x30B3, 0x00},
++ {0x30B4, 0x78},
++ {0x30B5, 0x00},
++ {0x30BE, 0x1A},
++
++ {0x30BF, 0xAB},
++ {0x30C0, 0x40},
++ {0x30C1, 0x01},
++ {0x30C8, 0x06},
++ {0x30C9, 0x40},
++
++ {0x30E2, 0x02},
++
++ {0x30b4, 0x20},
++ {0x30b5, 0x01},
++
++ {0x30EE, 0x02},
++ {0x30F1, 0x70},
++ {0x3111, 0x86},
++
++/* MIPI Size Setting ... */
++/* 1304 x 980 */
++ {0x0344, 0x00},
++ {0x0345, 0x18},
++ {0x0348, 0x0A},
++ {0x0349, 0x17},
++ {0x0346, 0x01},
++ {0x0347, 0x04},
++ {0x034A, 0x06},
++ {0x034B, 0xA3},
++
++ {0x0380, 0x00},
++ {0x0381, 0x01},
++ {0x0382, 0x00},
++ {0x0383, 0x01},
++ {0x0384, 0x00},
++ {0x0385, 0x01},
++ {0x0386, 0x00},
++ {0x0387, 0x03},
++
++ {0x034C, 0x05}, /* x_output_size = 1280 */
++ {0x034D, 0x00},
++ {0x034E, 0x02}, /* y_output_size = 720 */
++ {0x034F, 0xD0},
++
++ {0x30d9, 0x00},
++
++ {0x020E, 0x01},
++ {0x020F, 0x00},
++ {0x0210, 0x01},
++ {0x0211, 0x00},
++ {0x0212, 0x01},
++ {0x0213, 0x00},
++ {0x0214, 0x01},
++ {0x0215, 0x00},
++ {0x0204, 0x01},
++ {0x0205, 0x00},
++
++ /*Apply Bill's setting*/
++ {0x30E2, 0x02},
++ {0x0305, 0x05},
++ {0x0306, 0x00},
++ {0x0307, 0x50}, /*vcc_out = 80 */
++ {0x30B5, 0x01}, /*pll_s = 1 */
++ {0x30B4, 0x50},
++
++ {0x30B2, 0x05},
++
++ {0x30BE, 0x15}, /*DIV_M_PCLK = 5 */
++
++ {0x0100, 0x01}, /* stream on */
++ {0xffff, 0xff},
++};
++
++/*VGA*/
++static struct regval_list s5k4e1_res_vga_ac04_bill[] = {
++ {0x0100, 0x00}, /* stream off */
++ {0x0103, 0x01}, /* software reset */
++
++ {0x3000, 0x04},
++ {0x3001, 0x02},
++ {0x3002, 0x0C},
++ {0x3003, 0x0E},
++ {0x3004, 0x2C},
++ {0x3005, 0x0D},
++ {0x3006, 0x39},
++ {0x3007, 0x02},
++ {0x3008, 0x3C},
++ {0x3009, 0x3C},
++ {0x300A, 0x28},
++ {0x300B, 0x15},
++ {0x300C, 0x15},
++ {0x300D, 0x02},
++ {0x300E, 0xA8},
++
++ {0x3010, 0x00},
++ {0x3011, 0x7A},
++ {0x3012, 0x30},
++ {0x3013, 0xA0},
++ {0x3014, 0x00},
++ {0x3015, 0x00},
++ {0x3016, 0x0A},
++ {0x3017, 0x94},
++ {0x3018, 0x78},
++
++ {0x301D, 0xD4},
++
++ {0x3021, 0x02},
++ {0x3022, 0x41},
++ {0x3024, 0x08},
++ {0x3027, 0x08},
++
++ {0x301C, 0x05},
++ {0x30D8, 0x3F},
++
++ {0x3070, 0x5F},
++ {0x3071, 0x00},
++ {0x3080, 0x04},
++ {0x3081, 0x38},
++
++ {0x0202, 0x03},
++ {0x0203, 0xD4},
++ {0x0204, 0x00},
++ {0x0205, 0x20},
++
++ {0x0340, 0x03},
++ {0x0341, 0xE0},
++
++ {0x0342, 0x0A},
++ {0x0343, 0xB2},
++
++ {0x0344, 0x00},
++ {0x0345, 0x18},
++ {0x0348, 0x0A},
++ {0x0349, 0x17},
++ {0x0346, 0x00},
++ {0x0347, 0x14},
++ {0x034A, 0x07},
++ {0x034B, 0x93},
++
++ {0x034C, 0x02},
++ {0x034D, 0x80},
++ {0x034E, 0x01},
++ {0x034F, 0xE0},
++
++ {0x0380, 0x00},
++ {0x0381, 0x01},
++ {0x0382, 0x00},
++ {0x0383, 0x07},
++ {0x0384, 0x00},
++ {0x0385, 0x01},
++ {0x0386, 0x00},
++ {0x0387, 0x07},
++
++ {0x3084, 0x15},
++
++ {0x30BD, 0x00},
++
++
++ {0x30b3, 0x00},
++ {0x30b4, 0x57},
++ {0x30b5, 0x01},
++ {0x30f1, 0x70},
++
++ {0x30BE, 0x1A},
++
++ {0x30BF, 0xAB},
++ {0x30C0, 0x80},
++ {0x30C1, 0x01},
++ {0x30C8, 0x03},
++ {0x30C9, 0x20},
++
++ {0x30b2, 0x06},
++ {0x30E2, 0x02},
++
++ {0x30EE, 0x02},
++
++ {0x3111, 0x86},
++
++ {0x30d9, 0x00},
++
++ {0x020E, 0x01},
++ {0x020F, 0x00},
++ {0x0210, 0x01},
++ {0x0211, 0x00},
++ {0x0212, 0x01},
++ {0x0213, 0x00},
++ {0x0214, 0x01},
++ {0x0215, 0x00},
++ {0x0204, 0x01},
++ {0x0205, 0x00},
++
++#if 1
++ /* Apply Bill's setting */
++ {0x30E2, 0x02},
++ {0x0305, 0x05},
++ {0x0306, 0x00},
++ {0x0307, 0x50},
++ {0x30B5, 0x01},
++ {0x30B4, 0x50},
++
++ {0x30B2, 0x05},
++
++ {0x30BE, 0x15},
++
++ /* {0x0100, 0x01}, */
++ /* {0xffff, 0xff}, */
++#endif
++
++#if 1
++ /* 1304x980 */
++ {0x3013, 0x90},
++ {0x3017, 0x84},
++ {0x30A9, 0x02},
++ {0x300E, 0xAB},
++
++ {0x0387, 0x03},
++ {0x0344, 0x00}, /* x_addr_start = 0 */
++ {0x0345, 0x00},
++ {0x0348, 0x0A}, /* x_addr_end = 2607 */
++ {0x0349, 0x2F},
++ {0x0346, 0x00}, /* y_addr_start = 0 */
++ {0x0347, 0x00},
++ {0x034A, 0x07}, /* y_addr_end = 1959 */
++ {0x034B, 0xA7},
++ {0x0380, 0x00},
++ {0x0381, 0x01},
++ {0x0382, 0x00},
++ {0x0383, 0x01},
++ {0x0384, 0x00},
++ {0x0385, 0x01},
++ {0x0386, 0x00},
++ {0x0387, 0x03},
++ {0x034c, 0x05}, /* x_output_size = 1304 */
++ {0x034d, 0x18},
++ {0x034e, 0x03}, /* y_output_size = 980 */
++ {0x034f, 0xd4},
++ {0x30BF, 0xAB},
++ {0x30c0, 0xa0},
++ {0x30C8, 0x06}, /* x_output_size * 1.25 */
++ {0x30c9, 0x5e},
++
++ {0x0100, 0x01},
++ {0xffff, 0xff},
++
++#endif
++};
+diff --git a/drivers/media/video/mrstci/mrsts5k4e1_motor/Kconfig b/drivers/media/video/mrstci/mrsts5k4e1_motor/Kconfig
+new file mode 100755
+index 0000000..27cb730
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrsts5k4e1_motor/Kconfig
+@@ -0,0 +1,9 @@
++config VIDEO_MRST_S5K4E1_MOTOR
++ tristate "Moorestown s5k4e1 motor"
++ depends on I2C && VIDEO_MRST_ISP && VIDEO_MRST_S5K4E1
++
++ ---help---
++ Say Y here if your platform support s5k4e1 motor.
++
++ To compile this driver as a module, choose M here: the
++ module will be called mrstov2650.ko.
+diff --git a/drivers/media/video/mrstci/mrsts5k4e1_motor/Makefile b/drivers/media/video/mrstci/mrsts5k4e1_motor/Makefile
+new file mode 100644
+index 0000000..68c9fbc
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrsts5k4e1_motor/Makefile
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_VIDEO_MRST_S5K4E1_MOTOR) += mrsts5k4e1_motor.o
++
++EXTRA_CFLAGS += -I$(src)/../include
+diff --git a/drivers/media/video/mrstci/mrsts5k4e1_motor/mrsts5k4e1_motor.c b/drivers/media/video/mrstci/mrsts5k4e1_motor/mrsts5k4e1_motor.c
+new file mode 100644
+index 0000000..cd2813b
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrsts5k4e1_motor/mrsts5k4e1_motor.c
+@@ -0,0 +1,430 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/kmod.h>
++#include <linux/device.h>
++#include <linux/delay.h>
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/i2c.h>
++#include <linux/gpio.h>
++
++#include <media/v4l2-device.h>
++#include <media/v4l2-chip-ident.h>
++#include <media/v4l2-i2c-drv.h>
++
++#include "mrsts5k4e1_motor.h"
++
++static int s5k4e1_motor_debug;
++module_param(s5k4e1_motor_debug, int, 0644);
++MODULE_PARM_DESC(s5k4e1_motor_debug, "Debug level (0-1)");
++
++#define dprintk(level, fmt, arg...) \
++ do { \
++ if (s5k4e1_motor_debug >= level) \
++ printk(KERN_DEBUG "mrstisp@%s: " fmt "\n", __func__, ## arg); \
++ } while (0)
++
++#define eprintk(fmt, arg...) \
++ printk(KERN_ERR "mrstisp@%s: line %d: " fmt "\n", \
++ __func__, __LINE__, ## arg);
++
++#define DBG_entering dprintk(1, "entering");
++#define DBG_leaving dprintk(1, "leaving");
++#define DBG_line dprintk(1, " line: %d", __LINE__);
++
++static inline struct s5k4e1_motor *to_motor_config(struct v4l2_subdev *sd)
++{
++ return container_of(sd, struct s5k4e1_motor, sd);
++}
++
++/*static struct s5k4e1_motor *config; */
++static int motor_read(struct i2c_client *c, u32 *reg)
++{
++ int ret;
++ struct i2c_msg msg;
++ u8 msgbuf[3];
++
++ msgbuf[0] = 0;
++ msgbuf[1] = 0;
++ msgbuf[2] = 0;
++
++ memset(&msg, 0, sizeof(msg));
++ msg.addr = c->addr;
++ msg.buf = msgbuf;
++ msg.len = 3;
++ msg.flags = I2C_M_RD;
++
++ ret = i2c_transfer(c->adapter, &msg, 1);
++
++ *reg = (msgbuf[0] << 16 | msgbuf[1] << 8 | msgbuf[2]);
++
++ ret = (ret == 1) ? 0 : -1;
++ return ret;
++}
++
++static int motor_write(struct i2c_client *c, u32 reg)
++{
++ int ret;
++ struct i2c_msg msg;
++ u8 msgbuf[3];
++
++ memset(&msg, 0, sizeof(msg));
++ msgbuf[0] = (reg & 0x00FFFFFFFF) >> 16;
++ msgbuf[1] = (reg & 0x0000FFFF) >> 8 ;
++ msgbuf[2] = reg;
++
++ msg.addr = c->addr;
++ msg.flags = 0;
++ msg.buf = msgbuf;
++ msg.len = 3;
++
++ ret = i2c_transfer(c->adapter, &msg, 1);
++
++ ret = (ret == 1) ? 0 : -1;
++ return ret;
++}
++
++static int s5k4e1_motor_goto_position(struct i2c_client *c,
++ unsigned short code,
++ struct s5k4e1_motor *config,
++ unsigned int step)
++{
++ int max_code, min_code;
++ int timeout = 25; /*TODO: check the timeout time */
++ u8 cmdh, cmdl, finished;
++ u32 cmd = 0, val = 0;
++
++ max_code = config->macro_code;
++ min_code = config->infin_code;
++
++ if (code > max_code)
++ code = max_code;
++ if (code < min_code)
++ code = min_code;
++
++ cmdh = MOTOR_DAC_CTRL_MODE_1 | (code >> 8); /* PS EN x x M W TD9 TD8*/
++ cmdl = code; /* TD7 ~ TD0 */
++ cmd |= (cmdh << 16) | (cmdl << 8);
++
++ dprintk(1, "cmdh: %x, cmdl: %x, cmd: %x", cmdh, cmdl, cmd);
++ dprintk(1, "DAC code: %x", code);
++
++ motor_write(c, cmd);
++ finished = 0;
++ while ((!finished) && timeout--) {
++ msleep(1);
++ motor_read(c, &val);
++ cmdh = val >> 16;
++ cmdl = val >> 8;
++
++ dprintk(1, "cmdh & MOTOR_F = %x", cmdh & MOTOR_F);
++ finished = cmdh & MOTOR_F;
++ finished = (finished) ? 0 : 1;
++ };
++
++ if (finished) {
++ dprintk(1, "Moving from code %x to code %x takes %d ms.",
++ config->cur_code, code, 25-timeout);
++ return 0;
++ } else {
++ eprintk("Unable to move motor to step %d, TIMEOUT!!", step);
++ return -1;
++ }
++
++}
++
++int s5k4e1_motor_wakeup(struct i2c_client *client)
++{
++ /* hardware wakeup: set PS = 1 */
++ return motor_write(client, 0xC00000);
++}
++
++int s5k4e1_motor_standby(struct i2c_client *client)
++{
++ /* hardware standby: set PS = 0 */
++ return motor_write(client, 0x400000);
++}
++
++int s5k4e1_motor_init(struct i2c_client *client, struct s5k4e1_motor *config)
++{
++
++ int ret;
++ int infin_cur, macro_cur;
++ int step_res, step_time;
++ int val;
++
++ DBG_entering;
++ infin_cur = MAX(MOTOR_INFIN_CUR, MOTOR_DAC_MIN_CUR);
++ macro_cur = MIN(MOTOR_MACRO_CUR, MOTOR_DAC_MAX_CUR);
++ step_res = 1 << MOTOR_STEP_SHIFT;
++ step_time = MOTOR_STEP_TIME;
++
++ /*config->motor = client;*/
++ config->infin_cur = infin_cur;
++ config->macro_cur = macro_cur;
++
++ config->infin_code = MOTOR_INFIN_CODE;
++ config->macro_code = MOTOR_MACRO_CODE;
++
++ config->max_step = ((config->macro_code - config->infin_code)
++ >> MOTOR_STEP_SHIFT) + 1;
++ config->step_res = step_res;
++ config->step_time = step_time;
++
++ dprintk(1, "max_step: %d, step_res: %d, step_time: %d",
++ config->max_step, step_res, step_time);
++
++ /* Set motor step time and resolution */
++ val = (MOTOR_DAC_CTRL_MODE_0 << 16) | (step_res << 8) | step_time;
++ ret = motor_write(client, val);
++
++ /* Note here, maybe macro_code */
++ ret |= s5k4e1_motor_goto_position(client, config->infin_code,
++ config, 0);
++ if (!ret) {
++ config->cur_code = config->infin_code;
++ dprintk(1, "Motor initialization success!");
++ } else
++ eprintk("Error while initializing motor!!!");
++
++ return ret;
++}
++
++int s5k4e1_motor_set_focus(struct i2c_client *c,
++ unsigned int step,
++ struct s5k4e1_motor *config)
++{
++ int s_code, ret;
++ int max_step = config->max_step;
++ unsigned int val = step;
++
++ if (val > max_step)
++ val = max_step;
++
++ s_code = (val << MOTOR_STEP_SHIFT);
++ s_code += config->infin_code;
++
++ ret = s5k4e1_motor_goto_position(c, s_code, config, step);
++ if (!ret)
++ config->cur_code = s_code;
++
++ return ret;
++}
++
++static int s5k4e1_motor_g_ctrl(struct v4l2_subdev *sd,
++ struct v4l2_control *ctrl)
++{
++ struct i2c_client *c = v4l2_get_subdevdata(sd);
++ struct s5k4e1_motor *config = to_motor_config(sd);
++ int ret;
++
++ DBG_entering;
++ ret = s5k4e1_motor_get_focus(c, &ctrl->value, config);
++ if (ret) {
++ eprintk("error call s5k4e1_motor_get_focue");
++ return ret;
++ }
++ DBG_leaving;
++ return 0;
++}
++
++static int s5k4e1_motor_s_ctrl(struct v4l2_subdev *sd,
++ struct v4l2_control *ctrl)
++{
++ struct i2c_client *c = v4l2_get_subdevdata(sd);
++ struct s5k4e1_motor *config = to_motor_config(sd);
++ int ret;
++
++ DBG_entering;
++ ret = s5k4e1_motor_set_focus(c, ctrl->value, config);
++ if (ret) {
++ eprintk("error call s5k4e1_motor_set_focue");
++ return ret;
++ }
++ DBG_leaving;
++ return 0;
++}
++
++int s5k4e1_motor_get_focus(struct i2c_client *c,
++ unsigned int *step,
++ struct s5k4e1_motor *config)
++{
++ int ret_step;
++
++ ret_step = ((config->cur_code - config->infin_code)
++ >> MOTOR_STEP_SHIFT);
++
++ if (ret_step <= config->max_step)
++ *step = ret_step;
++ else
++ *step = config->max_step;
++
++ return 0;
++}
++
++int s5k4e1_motor_max_step(struct i2c_client *c,
++ unsigned int *max_code,
++ struct s5k4e1_motor *config)
++{
++ if (config->max_step != 0)
++ *max_code = config->max_step;
++ return 0;
++
++}
++
++static int s5k4e1_motor_queryctrl(struct v4l2_subdev *sd,
++ struct v4l2_queryctrl *qc)
++{
++ struct s5k4e1_motor *config = to_motor_config(sd);
++
++ DBG_entering;
++ dprintk(1, "got focus range of %d", config->max_step);
++ if (config->max_step != 0)
++ qc->maximum = config->max_step;
++ DBG_leaving;
++ return 0;
++}
++
++static const struct v4l2_subdev_core_ops s5k4e1_motor_core_ops = {
++ .g_ctrl = s5k4e1_motor_g_ctrl,
++ .s_ctrl = s5k4e1_motor_s_ctrl,
++ .queryctrl = s5k4e1_motor_queryctrl,
++};
++
++static const struct v4l2_subdev_ops s5k4e1_motor_ops = {
++ .core = &s5k4e1_motor_core_ops,
++};
++
++static int s5k4e1_motor_detect(struct i2c_client *client)
++{
++ struct i2c_adapter *adapter = client->adapter;
++ int adap_id = i2c_adapter_id(adapter);
++
++ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
++ eprintk("error i2c check func");
++ return -ENODEV;
++ }
++
++ if (adap_id != 1) {
++ eprintk("adap_id != 1");
++ return -ENODEV;
++ }
++
++ if (s5k4e1_motor_wakeup(client))
++ eprintk("unable to wakeup s5k4e1 motor.");
++
++ return 0;
++}
++
++static int s5k4e1_motor_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ struct s5k4e1_motor *info;
++ struct v4l2_subdev *sd;
++ int ret = -1;
++/* struct i2c_client *motor; */
++
++ DBG_entering;
++ v4l_info(client, "chip found @ 0x%x (%s)\n",
++ client->addr << 1, client->adapter->name);
++ /*
++ * Setup sensor configuration structure
++ */
++ info = kzalloc(sizeof(struct s5k4e1_motor), GFP_KERNEL);
++ if (!info) {
++ eprintk("fail to malloc for ci_motor");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ ret = s5k4e1_motor_detect(client);
++ if (ret) {
++ eprintk("error s5k4e1_motor_detect");
++ goto out_free;
++ }
++
++ sd = &info->sd;
++ v4l2_i2c_subdev_init(sd, client, &s5k4e1_motor_ops);
++
++ /*
++ * Initialization S5K4E1
++ * then turn into standby mode
++ */
++ ret = s5k4e1_motor_init(client, info);
++ if (ret) {
++ eprintk("error calling s5k4e1_motor_init");
++ goto out_free;
++ }
++
++ ret = 0;
++ goto out;
++
++out_free:
++ kfree(info);
++ DBG_leaving;
++out:
++ return ret;
++}
++
++/*
++ * XXX: Need to be checked
++ */
++static int s5k4e1_motor_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *sd = i2c_get_clientdata(client);
++
++ DBG_entering;
++
++ v4l2_device_unregister_subdev(sd);
++ kfree(to_motor_config(sd));
++
++ DBG_leaving;
++ return 0;
++}
++
++static const struct i2c_device_id s5k4e1_motor_id[] = {
++ {"s5k4e1_motor", 0},
++ {}
++};
++MODULE_DEVICE_TABLE(i2c, s5k4e1_motor_id);
++
++static struct v4l2_i2c_driver_data v4l2_i2c_data = {
++ .name = "s5k4e1_motor",
++ .probe = s5k4e1_motor_probe,
++ .remove = s5k4e1_motor_remove,
++ /* .suspend = ov5630_suspend,
++ * .resume = ov5630_resume, */
++ .id_table = s5k4e1_motor_id,
++};
++MODULE_AUTHOR("Xiaolin Zhang <xiaolin.zhang@intel.com>");
++MODULE_DESCRIPTION("A low-level driver for Samsung S5K4E1 sensor motor");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/media/video/mrstci/mrsts5k4e1_motor/mrsts5k4e1_motor.h b/drivers/media/video/mrstci/mrsts5k4e1_motor/mrsts5k4e1_motor.h
+new file mode 100644
+index 0000000..04f9436
+--- /dev/null
++++ b/drivers/media/video/mrstci/mrsts5k4e1_motor/mrsts5k4e1_motor.h
+@@ -0,0 +1,102 @@
++/*
++ * Support for Moorestown Langwell Camera Imaging ISP subsystem.
++ *
++ * Copyright (c) 2009 Intel Corporation. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version
++ * 2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
++ * 02110-1301, USA.
++ *
++ *
++ * Xiaolin Zhang <xiaolin.zhang@intel.com>
++ */
++
++#include <media/v4l2-subdev.h>
++
++/* DAC output max current (mA) */
++#define MOTOR_DAC_MAX_CUR 125
++/* DAC output min current (mA) */
++#define MOTOR_DAC_MIN_CUR 1
++/* DAC max code (Hex) */
++#define MOTOR_DAC_CODE_MAX 0x3FF
++/* DAC min code (Hex) */
++#define MOTOR_DAC_CODE_MIN 0x0
++
++/* VCM start code (Hex) */
++#define MOTOR_INFIN_CODE 0x120
++/* VCM stop code (Hex) */
++#define MOTOR_MACRO_CODE 0x205
++
++#define MOTOR_STEP_SHIFT 4 /* Step res = 2^4 = 10H */
++#define MOTOR_STEP_TIME 20 /* Step time = 50us x 20d = 1ms */
++
++/* VCM start current (mA) */
++#define MOTOR_INFIN_CUR ((MOTOR_DAC_MAX_CUR / MOTOR_DAC_CODE_MAX) \
++ * MOTOR_INFIN_CODE + 1)
++/* VCM max current for Macro (mA) */
++#define MOTOR_MACRO_CUR ((MOTOR_DAC_MAX_CUR / MOTOR_DAC_CODE_MAX) \
++ * MOTOR_MACRO_CODE + 1)
++
++
++#define MOTOR_DAC_BIT_RES 10
++#define MOTOR_DAC_MAX_CODE ((1 << MOTOR_DAC_BIT_RES) - 1)
++
++#define MOTOR_STEP_SHIFT 4
++
++#define MAX(x, y) ((x) > (y) ? (x) : (y))
++#define MIN(x, y) ((x) < (y) ? (x) : (y))
++
++/* DAC register related define */
++#define MOTOR_PS (1 << 7) /* power save */
++#define MOTOR_EN (1 << 6) /* out pin status*/
++#define MOTOR_M (1 << 3) /* mode select */
++#define MOTOR_W (1 << 2) /* register address */
++#define MOTOR_F (1 << 4) /* finish flag */
++
++#define MOTOR_DAC_CODE_L(x) (x & 0xff)
++#define MOTOR_DAC_CODE_H(x) ((x >> 8) & 0xf3)
++
++/* Step mode setting */
++#define MOTOR_DAC_CTRL_MODE_0 0xCC
++/* DAC code setting */
++#define MOTOR_DAC_CTRL_MODE_1 0xC8
++
++#define S5K4E1_MOTOR_ADDR (0x18 >> 1)
++/*#define POWER_EN_PIN 7*/
++#define GPIO_AF_PD 95
++
++#define DEBUG 0
++
++struct s5k4e1_motor{
++ /*struct i2c_client *motor;*/
++ unsigned int infin_cur;
++ unsigned int infin_code;
++ unsigned int macro_cur;
++ unsigned int macro_code;
++ unsigned int max_step;
++ unsigned int cur_code;
++ unsigned int step_res;
++ unsigned int step_time;
++ struct v4l2_subdev sd;
++};
++
++extern int s5k4e1_motor_init(struct i2c_client *client,
++ struct s5k4e1_motor *config);
++extern int s5k4e1_motor_standby(struct i2c_client *client);
++extern int s5k4e1_motor_wakeup(struct i2c_client *client);
++extern int s5k4e1_motor_set_focus(struct i2c_client *c, unsigned int step,
++ struct s5k4e1_motor *config);
++extern int s5k4e1_motor_get_focus(struct i2c_client *c, unsigned int *step,
++ struct s5k4e1_motor *config);
++extern int s5k4e1_motor_max_step(struct i2c_client *c, unsigned int *max_code,
++ struct s5k4e1_motor *config);
+--
+1.6.0.6
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-omap-rx-51-enable-tsc2005.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-omap-rx-51-enable-tsc2005.patch
new file mode 100644
index 0000000..49374c9
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-omap-rx-51-enable-tsc2005.patch
@@ -0,0 +1,122 @@
+From fa1d43818de208bdc2fd789777c538ab4aa7956a Mon Sep 17 00:00:00 2001
+From: Aaro Koskinen <Aaro.Koskinen@nokia.com>
+Date: Fri, 12 Mar 2010 16:54:34 +0000
+Subject: [PATCH 8/10] omap: rx-51: enable tsc2005
+
+Patch-mainline: 2.6.35
+Discussions: http://www.mail-archive.com/linux-omap@vger.kernel.org/msg26749.html
+
+Enable TSC2005 touchscreen driver on the RX-51 board.
+
+Signed-off-by: Aaro Koskinen <aaro.koskinen@nokia.com>
+---
+ arch/arm/configs/rx51_defconfig | 1
+ arch/arm/mach-omap2/board-rx51-peripherals.c | 46 +++++++++++++++++++++++++--
+ 2 files changed, 45 insertions(+), 2 deletions(-)
+
+Index: linux-2.6.33-master/arch/arm/configs/rx51_defconfig
+===================================================================
+--- linux-2.6.33-master.orig/arch/arm/configs/rx51_defconfig 2010-04-19 17:28:20.000000000 +0300
++++ linux-2.6.33-master/arch/arm/configs/rx51_defconfig 2010-04-19 17:28:28.000000000 +0300
+@@ -801,6 +801,7 @@
+ # CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+ # CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+ # CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
++CONFIG_TOUCHSCREEN_TSC2005=m
+ # CONFIG_TOUCHSCREEN_TSC2007 is not set
+ CONFIG_INPUT_MISC=y
+ # CONFIG_INPUT_ATI_REMOTE is not set
+Index: linux-2.6.33-master/arch/arm/mach-omap2/board-rx51-peripherals.c
+===================================================================
+--- linux-2.6.33-master.orig/arch/arm/mach-omap2/board-rx51-peripherals.c 2010-04-19 17:28:20.000000000 +0300
++++ linux-2.6.33-master/arch/arm/mach-omap2/board-rx51-peripherals.c 2010-04-19 17:28:28.000000000 +0300
+@@ -14,6 +14,7 @@
+ #include <linux/input.h>
+ #include <linux/input/matrix_keypad.h>
+ #include <linux/spi/spi.h>
++#include <linux/spi/tsc2005.h>
+ #include <linux/spi/wl12xx.h>
+ #include <linux/i2c.h>
+ #include <linux/i2c/twl.h>
+@@ -42,6 +43,9 @@
+ #define RX51_WL1251_POWER_GPIO 87
+ #define RX51_WL1251_IRQ_GPIO 42
+
++#define RX51_TSC2005_RESET_GPIO 104
++#define RX51_TSC2005_IRQ_GPIO 100
++
+ /* list all spi devices here */
+ enum {
+ RX51_SPI_WL1251,
+@@ -50,6 +54,7 @@
+ };
+
+ static struct wl12xx_platform_data wl1251_pdata;
++static struct tsc2005_platform_data tsc2005_pdata;
+
+ static struct omap2_mcspi_device_config wl1251_mcspi_config = {
+ .turbo_mode = 0,
+@@ -87,10 +92,10 @@
+ .modalias = "tsc2005",
+ .bus_num = 1,
+ .chip_select = 0,
+- /* .irq = OMAP_GPIO_IRQ(RX51_TSC2005_IRQ_GPIO),*/
++ .irq = OMAP_GPIO_IRQ(RX51_TSC2005_IRQ_GPIO),
+ .max_speed_hz = 6000000,
+ .controller_data = &tsc2005_mcspi_config,
+- /* .platform_data = &tsc2005_config,*/
++ .platform_data = &tsc2005_pdata,
+ },
+ };
+
+@@ -708,6 +713,42 @@
+
+ #endif
+
++static struct tsc2005_platform_data tsc2005_pdata = {
++ .ts_pressure_max = 2048,
++ .ts_pressure_fudge = 2,
++ .ts_x_max = 4096,
++ .ts_x_fudge = 4,
++ .ts_y_max = 4096,
++ .ts_y_fudge = 7,
++ .ts_x_plate_ohm = 280,
++ .esd_timeout_ms = 8000,
++};
++
++static void rx51_tsc2005_set_reset(bool enable)
++{
++ gpio_set_value(RX51_TSC2005_RESET_GPIO, enable);
++}
++
++static void __init rx51_init_tsc2005(void)
++{
++ int r;
++
++ r = gpio_request(RX51_TSC2005_IRQ_GPIO, "tsc2005 IRQ");
++ if (r >= 0)
++ gpio_direction_input(RX51_TSC2005_IRQ_GPIO);
++ else
++ printk(KERN_ERR "unable to get %s GPIO\n", "tsc2005 IRQ");
++
++ r = gpio_request(RX51_TSC2005_RESET_GPIO, "tsc2005 reset");
++ if (r >= 0) {
++ gpio_direction_output(RX51_TSC2005_RESET_GPIO, 1);
++ tsc2005_pdata.set_reset = rx51_tsc2005_set_reset;
++ } else {
++ printk(KERN_ERR "unable to get %s GPIO\n", "tsc2005 reset");
++ tsc2005_pdata.esd_timeout_ms = 0;
++ }
++}
++
+ #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
+
+ static struct omap_smc91x_platform_data board_smc91x_data = {
+@@ -792,6 +833,7 @@
+ board_smc91x_init();
+ rx51_add_gpio_keys();
+ rx51_init_wl1251();
++ rx51_init_tsc2005();
+ spi_register_board_info(rx51_peripherals_spi_board_info,
+ ARRAY_SIZE(rx51_peripherals_spi_board_info));
+ }
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-phylib-Add-module-table-to-all-existing-phy-drivers.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-phylib-Add-module-table-to-all-existing-phy-drivers.patch
new file mode 100644
index 0000000..8066f56
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-phylib-Add-module-table-to-all-existing-phy-drivers.patch
@@ -0,0 +1,247 @@
+From 4e4f10f6498bc5038c0a110b5f21682fcb5578d7 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw2@infradead.org>
+Date: Fri, 2 Apr 2010 01:05:56 +0000
+Subject: [PATCH] phylib: Add module table to all existing phy drivers
+Patch-mainline: 2.6.35
+Git-commit: 8626d3b4328061f5b82b11ae1d6918a0c3602f42
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git
+
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Yin Kangkai <kangkai.yin@intel.com>
+---
+ drivers/net/phy/bcm63xx.c | 8 ++++++++
+ drivers/net/phy/broadcom.c | 17 +++++++++++++++++
+ drivers/net/phy/cicada.c | 8 ++++++++
+ drivers/net/phy/davicom.c | 9 +++++++++
+ drivers/net/phy/et1011c.c | 7 +++++++
+ drivers/net/phy/icplus.c | 7 +++++++
+ drivers/net/phy/lxt.c | 8 ++++++++
+ drivers/net/phy/marvell.c | 13 +++++++++++++
+ drivers/net/phy/national.c | 7 +++++++
+ drivers/net/phy/qsemi.c | 7 +++++++
+ drivers/net/phy/realtek.c | 7 +++++++
+ drivers/net/phy/smsc.c | 11 +++++++++++
+ drivers/net/phy/ste10Xp.c | 8 ++++++++
+ drivers/net/phy/vitesse.c | 8 ++++++++
+ 14 files changed, 125 insertions(+)
+
+--- a/drivers/net/phy/bcm63xx.c
++++ b/drivers/net/phy/bcm63xx.c
+@@ -130,3 +130,11 @@ static void __exit bcm63xx_phy_exit(void
+
+ module_init(bcm63xx_phy_init);
+ module_exit(bcm63xx_phy_exit);
++
++static struct mdio_device_id bcm63xx_tbl[] = {
++ { 0x00406000, 0xfffffc00 },
++ { 0x002bdc00, 0xfffffc00 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, bcm64xx_tbl);
+--- a/drivers/net/phy/broadcom.c
++++ b/drivers/net/phy/broadcom.c
+@@ -21,6 +21,7 @@
+ #define PHY_ID_BCM50610 0x0143bd60
+ #define PHY_ID_BCM50610M 0x0143bd70
+ #define PHY_ID_BCM57780 0x03625d90
++#define PHY_ID_BCMAC131 0x0143bc70
+
+ #define BRCM_PHY_MODEL(phydev) \
+ ((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask)
+@@ -911,3 +912,19 @@ static void __exit broadcom_exit(void)
+
+ module_init(broadcom_init);
+ module_exit(broadcom_exit);
++
++static struct mdio_device_id broadcom_tbl[] = {
++ { 0x00206070, 0xfffffff0 },
++ { 0x002060e0, 0xfffffff0 },
++ { 0x002060c0, 0xfffffff0 },
++ { 0x002060b0, 0xfffffff0 },
++ { 0x0143bca0, 0xfffffff0 },
++ { 0x0143bcb0, 0xfffffff0 },
++ { PHY_ID_BCM50610, 0xfffffff0 },
++ { PHY_ID_BCM50610M, 0xfffffff0 },
++ { PHY_ID_BCM57780, 0xfffffff0 },
++ { PHY_ID_BCMAC131, 0xfffffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, broadcom_tbl);
+--- a/drivers/net/phy/cicada.c
++++ b/drivers/net/phy/cicada.c
+@@ -159,3 +159,11 @@ static void __exit cicada_exit(void)
+
+ module_init(cicada_init);
+ module_exit(cicada_exit);
++
++static struct mdio_device_id cicada_tbl[] = {
++ { 0x000fc410, 0x000ffff0 },
++ { 0x000fc440, 0x000fffc0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, cicada_tbl);
+--- a/drivers/net/phy/davicom.c
++++ b/drivers/net/phy/davicom.c
+@@ -219,3 +219,12 @@ static void __exit davicom_exit(void)
+
+ module_init(davicom_init);
+ module_exit(davicom_exit);
++
++static struct mdio_device_id davicom_tbl[] = {
++ { 0x0181b880, 0x0ffffff0 },
++ { 0x0181b8a0, 0x0ffffff0 },
++ { 0x00181b80, 0x0ffffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, davicom_tbl);
+--- a/drivers/net/phy/et1011c.c
++++ b/drivers/net/phy/et1011c.c
+@@ -111,3 +111,10 @@ static void __exit et1011c_exit(void)
+
+ module_init(et1011c_init);
+ module_exit(et1011c_exit);
++
++static struct mdio_device_id et1011c_tbl[] = {
++ { 0x0282f014, 0xfffffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, et1011c_tbl);
+--- a/drivers/net/phy/icplus.c
++++ b/drivers/net/phy/icplus.c
+@@ -132,3 +132,10 @@ static void __exit ip175c_exit(void)
+
+ module_init(ip175c_init);
+ module_exit(ip175c_exit);
++
++static struct mdio_device_id icplus_tbl[] = {
++ { 0x02430d80, 0x0ffffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, icplus_tbl);
+--- a/drivers/net/phy/lxt.c
++++ b/drivers/net/phy/lxt.c
+@@ -174,3 +174,11 @@ static void __exit lxt_exit(void)
+
+ module_init(lxt_init);
+ module_exit(lxt_exit);
++
++static struct mdio_device_id lxt_tbl[] = {
++ { 0x78100000, 0xfffffff0 },
++ { 0x001378e0, 0xfffffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, lxt_tbl);
+--- a/drivers/net/phy/marvell.c
++++ b/drivers/net/phy/marvell.c
+@@ -611,3 +611,16 @@ static void __exit marvell_exit(void)
+
+ module_init(marvell_init);
+ module_exit(marvell_exit);
++
++static struct mdio_device_id marvell_tbl[] = {
++ { 0x01410c60, 0xfffffff0 },
++ { 0x01410c90, 0xfffffff0 },
++ { 0x01410cc0, 0xfffffff0 },
++ { 0x01410e10, 0xfffffff0 },
++ { 0x01410cb0, 0xfffffff0 },
++ { 0x01410cd0, 0xfffffff0 },
++ { 0x01410e30, 0xfffffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, marvell_tbl);
+--- a/drivers/net/phy/national.c
++++ b/drivers/net/phy/national.c
+@@ -153,3 +153,10 @@ MODULE_LICENSE("GPL");
+
+ module_init(ns_init);
+ module_exit(ns_exit);
++
++static struct mdio_device_id ns_tbl[] = {
++ { DP83865_PHY_ID, 0xfffffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, ns_tbl);
+--- a/drivers/net/phy/qsemi.c
++++ b/drivers/net/phy/qsemi.c
+@@ -138,3 +138,10 @@ static void __exit qs6612_exit(void)
+
+ module_init(qs6612_init);
+ module_exit(qs6612_exit);
++
++static struct mdio_device_id qs6612_tbl[] = {
++ { 0x00181440, 0xfffffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, qs6612_tbl);
+--- a/drivers/net/phy/realtek.c
++++ b/drivers/net/phy/realtek.c
+@@ -78,3 +78,10 @@ static void __exit realtek_exit(void)
+
+ module_init(realtek_init);
+ module_exit(realtek_exit);
++
++static struct mdio_device_id realtek_tbl[] = {
++ { 0x001cc912, 0x001fffff },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, realtek_tbl);
+--- a/drivers/net/phy/smsc.c
++++ b/drivers/net/phy/smsc.c
+@@ -236,3 +236,14 @@ MODULE_LICENSE("GPL");
+
+ module_init(smsc_init);
+ module_exit(smsc_exit);
++
++static struct mdio_device_id smsc_tbl[] = {
++ { 0x0007c0a0, 0xfffffff0 },
++ { 0x0007c0b0, 0xfffffff0 },
++ { 0x0007c0c0, 0xfffffff0 },
++ { 0x0007c0d0, 0xfffffff0 },
++ { 0x0007c0f0, 0xfffffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, smsc_tbl);
+--- a/drivers/net/phy/ste10Xp.c
++++ b/drivers/net/phy/ste10Xp.c
+@@ -132,6 +132,14 @@ static void __exit ste10Xp_exit(void)
+ module_init(ste10Xp_init);
+ module_exit(ste10Xp_exit);
+
++static struct mdio_device_id ste10Xp_tbl[] = {
++ { STE101P_PHY_ID, 0xfffffff0 },
++ { STE100P_PHY_ID, 0xffffffff },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, ste10Xp_tbl);
++
+ MODULE_DESCRIPTION("STMicroelectronics STe10Xp PHY driver");
+ MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+ MODULE_LICENSE("GPL");
+--- a/drivers/net/phy/vitesse.c
++++ b/drivers/net/phy/vitesse.c
+@@ -191,3 +191,11 @@ static void __exit vsc82xx_exit(void)
+
+ module_init(vsc82xx_init);
+ module_exit(vsc82xx_exit);
++
++static struct mdio_device_id vitesse_tbl[] = {
++ { PHY_ID_VSC8244, 0x000fffc0 },
++ { PHY_ID_VSC8221, 0x000ffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, vitesse_tbl);
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-phylib-Support-phy-module-autoloading.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-phylib-Support-phy-module-autoloading.patch
new file mode 100644
index 0000000..fae435e
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/linux-2.6.35-phylib-Support-phy-module-autoloading.patch
@@ -0,0 +1,150 @@
+From 8626d3b4328061f5b82b11ae1d6918a0c3602f42 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw2@infradead.org>
+Date: Fri, 2 Apr 2010 01:05:27 +0000
+Subject: [PATCH] phylib: Support phy module autoloading
+Patch-mainline: 2.6.35
+Git-commit: 8626d3b4328061f5b82b11ae1d6918a0c3602f42
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git
+
+We don't use the normal hotplug mechanism because it doesn't work. It will
+load the module some time after the device appears, but that's not good
+enough for us -- we need the driver loaded _immediately_ because otherwise
+the NIC driver may just abort and then the phy 'device' goes away.
+
+[bwh: s/phy/mdio/ in module alias, kerneldoc for struct mdio_device_id]
+
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Acked-by: Andy Fleming <afleming@freescale.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/phy/phy_device.c | 12 ++++++++++++
+ include/linux/mod_devicetable.h | 26 ++++++++++++++++++++++++++
+ include/linux/phy.h | 1 +
+ scripts/mod/file2alias.c | 26 ++++++++++++++++++++++++++
+ 4 files changed, 65 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index db17945..1a99bb2 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -149,6 +149,7 @@ EXPORT_SYMBOL(phy_scan_fixups);
+ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
+ {
+ struct phy_device *dev;
++
+ /* We allocate the device, and initialize the
+ * default values */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+@@ -179,6 +180,17 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
+ mutex_init(&dev->lock);
+ INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
+
++ /* Request the appropriate module unconditionally; don't
++ bother trying to do so only if it isn't already loaded,
++ because that gets complicated. A hotplug event would have
++ done an unconditional modprobe anyway.
++ We don't do normal hotplug because it won't work for MDIO
++ -- because it relies on the device staying around for long
++ enough for the driver to get loaded. With MDIO, the NIC
++ driver will get bored and give up as soon as it finds that
++ there's no driver _already_ loaded. */
++ request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
++
+ return dev;
+ }
+ EXPORT_SYMBOL(phy_device_create);
+diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
+index f58e9d8..55f1f9c 100644
+--- a/include/linux/mod_devicetable.h
++++ b/include/linux/mod_devicetable.h
+@@ -474,4 +474,30 @@ struct platform_device_id {
+ __attribute__((aligned(sizeof(kernel_ulong_t))));
+ };
+
++#define MDIO_MODULE_PREFIX "mdio:"
++
++#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d"
++#define MDIO_ID_ARGS(_id) \
++ (_id)>>31, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \
++ ((_id)>>27) & 1, ((_id)>>26) & 1, ((_id)>>25) & 1, ((_id)>>24) & 1, \
++ ((_id)>>23) & 1, ((_id)>>22) & 1, ((_id)>>21) & 1, ((_id)>>20) & 1, \
++ ((_id)>>19) & 1, ((_id)>>18) & 1, ((_id)>>17) & 1, ((_id)>>16) & 1, \
++ ((_id)>>15) & 1, ((_id)>>14) & 1, ((_id)>>13) & 1, ((_id)>>12) & 1, \
++ ((_id)>>11) & 1, ((_id)>>10) & 1, ((_id)>>9) & 1, ((_id)>>8) & 1, \
++ ((_id)>>7) & 1, ((_id)>>6) & 1, ((_id)>>5) & 1, ((_id)>>4) & 1, \
++ ((_id)>>3) & 1, ((_id)>>2) & 1, ((_id)>>1) & 1, (_id) & 1
++
++/**
++ * struct mdio_device_id - identifies PHY devices on an MDIO/MII bus
++ * @phy_id: The result of
++ * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&PHYSID2)) & @phy_id_mask
++ * for this PHY type
++ * @phy_id_mask: Defines the significant bits of @phy_id. A value of 0
++ * is used to terminate an array of struct mdio_device_id.
++ */
++struct mdio_device_id {
++ __u32 phy_id;
++ __u32 phy_id_mask;
++};
++
+ #endif /* LINUX_MOD_DEVICETABLE_H */
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index d9bce4b..987e111 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -24,6 +24,7 @@
+ #include <linux/mii.h>
+ #include <linux/timer.h>
+ #include <linux/workqueue.h>
++#include <linux/mod_devicetable.h>
+
+ #include <asm/atomic.h>
+
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index 220213e..36a60a8 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -796,6 +796,28 @@ static int do_platform_entry(const char *filename,
+ return 1;
+ }
+
++static int do_mdio_entry(const char *filename,
++ struct mdio_device_id *id, char *alias)
++{
++ int i;
++
++ alias += sprintf(alias, MDIO_MODULE_PREFIX);
++
++ for (i = 0; i < 32; i++) {
++ if (!((id->phy_id_mask >> (31-i)) & 1))
++ *(alias++) = '?';
++ else if ((id->phy_id >> (31-i)) & 1)
++ *(alias++) = '1';
++ else
++ *(alias++) = '0';
++ }
++
++ /* Terminate the string */
++ *alias = 0;
++
++ return 1;
++}
++
+ /* Ignore any prefix, eg. some architectures prepend _ */
+ static inline int sym_is(const char *symbol, const char *name)
+ {
+@@ -943,6 +965,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
+ do_table(symval, sym->st_size,
+ sizeof(struct platform_device_id), "platform",
+ do_platform_entry, mod);
++ else if (sym_is(symname, "__mod_mdio_device_table"))
++ do_table(symval, sym->st_size,
++ sizeof(struct mdio_device_id), "mdio",
++ do_mdio_entry, mod);
+ free(zeros);
+ }
+
+--
+1.6.5
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/rtl8192_carrier_off.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/rtl8192_carrier_off.patch
new file mode 100644
index 0000000..9148056
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/rtl8192_carrier_off.patch
@@ -0,0 +1,52 @@
+Toggling the link carrier is a non sense and is the grossest locking I can
+think of. Moreover, it's giving a completely inaccurate status to userspace
+who could for example decide to turn the interface down on carrier off
+detection.
+
+Signed-off-by: Samuel Ortiz <sameo@linux.intel.com>
+---
+ drivers/staging/rtl8192e/ieee80211/ieee80211_softmac_wx.c | 2 --
+ drivers/staging/rtl8192e/r8192E_core.c | 1 -
+ 2 files changed, 3 deletions(-)
+
+Index: b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac_wx.c
+===================================================================
+--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac_wx.c 2010-01-15 19:31:39.000000000 +0100
++++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac_wx.c 2010-01-15 19:32:07.000000000 +0100
+@@ -326,7 +326,6 @@ void ieee80211_wx_sync_scan_wq(struct ie
+ int b40M = 0;
+ static int count = 0;
+ chan = ieee->current_network.channel;
+- netif_carrier_off(ieee->dev);
+
+ if (ieee->data_hard_stop)
+ ieee->data_hard_stop(ieee->dev);
+@@ -372,7 +371,6 @@ void ieee80211_wx_sync_scan_wq(struct ie
+ if(ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER)
+ ieee80211_start_send_beacons(ieee);
+
+- netif_carrier_on(ieee->dev);
+ count = 0;
+ up(&ieee->wx_sem);
+
+Index: b/drivers/staging/rtl8192e/r8192E_core.c
+===================================================================
+--- a/drivers/staging/rtl8192e/r8192E_core.c 2010-01-15 20:03:06.000000000 +0100
++++ b/drivers/staging/rtl8192e/r8192E_core.c 2010-01-15 20:03:11.000000000 +0100
+@@ -4046,7 +4046,6 @@ RESET_START:
+ del_timer_sync(&ieee->associate_timer);
+ cancel_delayed_work(&ieee->associate_retry_wq);
+ ieee80211_stop_scan(ieee);
+- netif_carrier_off(dev);
+ up(&ieee->wx_sem);
+ }
+ else{
+
+--
+Intel Open Source Technology Centre
+http://oss.intel.com/
+_______________________________________________
+Moblin-kernel mailing list
+Moblin-kernel@linux.intel.com
+http://linux.intel.com/mailman/listinfo/moblin-kernel
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/rtl8192_no_WAP_unassoc.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/rtl8192_no_WAP_unassoc.patch
new file mode 100644
index 0000000..436945b
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/rtl8192_no_WAP_unassoc.patch
@@ -0,0 +1,40 @@
+If we're not associated, we should not send wireless events to let userspace
+know that we just left an ESSID, simply because we havent yet joined it.
+If we keep on doing that, wpa_supplicant could receive such events while
+actually trying to join an ESSID, and thus decide to stop trying. This leads
+to a lot of connection failures as this driver seems to be sending GIWAP
+events quite a lot.
+
+Signed-off-by: Samuel Ortiz <sameo@linux.intel.com>
+---
+ drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+Index: b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
+===================================================================
+--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c 2010-01-15 16:57:48.000000000 +0100
++++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c 2010-01-15 19:31:56.000000000 +0100
+@@ -2726,11 +2726,12 @@ void ieee80211_disassociate(struct ieee8
+ if(IS_DOT11D_ENABLE(ieee))
+ Dot11d_Reset(ieee);
+ #endif
+- ieee->state = IEEE80211_NOLINK;
+ ieee->is_set_key = false;
+ ieee->link_change(ieee->dev);
+ //HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
+- notify_wx_assoc_event(ieee);
++ if (ieee->state == IEEE80211_LINKED)
++ notify_wx_assoc_event(ieee);
++ ieee->state = IEEE80211_NOLINK;
+
+ }
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
+
+--
+Intel Open Source Technology Centre
+http://oss.intel.com/
+_______________________________________________
+Moblin-kernel mailing list
+Moblin-kernel@linux.intel.com
+http://linux.intel.com/mailman/listinfo/moblin-kernel
+
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.33.2/rtl8192_no_autoconnect.patch b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/rtl8192_no_autoconnect.patch
new file mode 100644
index 0000000..9119535
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin-2.6.33.2/rtl8192_no_autoconnect.patch
@@ -0,0 +1,41 @@
+Getting a probe response after sending a probe request to a specific SSID
+doesnt mean we're trying to associate with this SSID.
+wpa_supplicant should be the only one deciding when to join an SSID, not the
+kernel.
+
+Signed-off-by: Samuel Ortiz <sameo@linux.intel.com>
+---
+ drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+Index: b/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c
+===================================================================
+--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c 2010-01-15 16:56:47.000000000 +0100
++++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c 2010-01-15 16:57:03.000000000 +0100
+@@ -2716,8 +2716,6 @@ static inline void ieee80211_process_pro
+ #endif
+ memcpy(target, &network, sizeof(*target));
+ list_add_tail(&target->list, &ieee->network_list);
+- if(ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE)
+- ieee80211_softmac_new_net(ieee,&network);
+ } else {
+ IEEE80211_DEBUG_SCAN("Updating '%s' (" MAC_FMT ") via %s.\n",
+ escape_essid(target->ssid,
+@@ -2744,8 +2742,6 @@ static inline void ieee80211_process_pro
+ //YJ,add,080819,for hidden ap,end
+
+ update_network(target, &network);
+- if(renew && (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE))
+- ieee80211_softmac_new_net(ieee,&network);
+ }
+
+ spin_unlock_irqrestore(&ieee->lock, flags);
+
+--
+Intel Open Source Technology Centre
+http://oss.intel.com/
+_______________________________________________
+Moblin-kernel mailing list
+Moblin-kernel@linux.intel.com
+http://linux.intel.com/mailman/listinfo/moblin-kernel
+
diff --git a/meta-moblin/packages/linux/linux-moblin_2.6.33.2.bb b/meta-moblin/packages/linux/linux-moblin_2.6.33.2.bb
new file mode 100644
index 0000000..d0128e3
--- /dev/null
+++ b/meta-moblin/packages/linux/linux-moblin_2.6.33.2.bb
@@ -0,0 +1,108 @@
+require linux-moblin.inc
+
+PR = "r0"
+
+DEFAULT_PREFERENCE = "-1"
+DEFAULT_PREFERENCE_netbook = "1"
+
+SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${PV}.tar.bz2 \
+ file://linux-2.6.29-dont-wait-for-mouse.patch;patch=1 \
+ file://linux-2.6.29-kms-after-sata.patch;patch=1 \
+ file://linux-2.6.29-kms-edid-cache.patch;patch=1 \
+ file://linux-2.6.29-kms-run-async.patch;patch=1 \
+ file://linux-2.6.29-silence-acer-message.patch;patch=1 \
+ file://linux-2.6.29-sreadahead.patch;patch=1 \
+ file://linux-2.6.31-silence-wacom.patch;patch=1 \
+# file://linux-2.6.33-ahci-alpm-accounting.patch;patch=1 \
+# file://linux-2.6.33-ahci-fix-oops-on-dummy-port.patch;patch=1 \
+# file://linux-2.6.33-i2c-workaround-for-aava-koski-touchscreen.patch;patch=1 \
+ file://linux-2.6.33-rc8-timberdale.patch;patch=1 \
+ file://linux-2.6.33-rt2860-1-2.patch;patch=1 \
+ file://linux-2.6.33-rt2860-2-2.patch;patch=1 \
+ file://linux-2.6.33-timberdale-audio-fix.patch;patch=1 \
+ file://linux-2.6.33-vfs-tracepoints.patch;patch=1 \
+ file://linux-2.6.34-cando-dual-touch-driver.patch;patch=1 \
+ file://linux-2.6.34-CVE-tipc-Fix-oops-on-send-prior-to-entering-networked-mode.patch;patch=1 \
+ file://linux-2.6.34-cypress-touch-driver.patch;patch=1 \
+ file://linux-2.6.34-drm-i915-Ignore-LVDS-EDID-when-it-is-unavailabe-or-invalid.patch;patch=1 \
+# file://linux-2.6.34-enable-hid-dg-contact-count-stantum-and-cando-touch-drivers.patch;patch=1 \
+ file://linux-2.6.34-fix-marvell-firmware-path.patch;patch=1 \
+# file://linux-2.6.34-hack-to-fix-aava-camera-sensor-issue.patch;patch=1 \
+ file://linux-2.6.34-input-synaptics-clickpad-support.patch;patch=1 \
+# # file://linux-2.6.34-moorestown-aava-specific-changes-no-audio.patch;patch=1 \
+# # file://linux-2.6.34-moorestown-analog-accelerometer-driver.patch;patch=1 \
+# file://linux-2.6.34-moorestown-audio-driver-6.0-1-8.patch;patch=1 \
+# file://linux-2.6.34-moorestown-audio-driver-6.0-2-8.patch;patch=1 \
+# file://linux-2.6.34-moorestown-audio-driver-6.0-3-8.patch;patch=1 \
+# file://linux-2.6.34-moorestown-audio-driver-6.0-4-8.patch;patch=1 \
+# file://linux-2.6.34-moorestown-audio-driver-6.0-5-8.patch;patch=1 \
+# file://linux-2.6.34-moorestown-audio-driver-6.0-6-8.patch;patch=1 \
+# file://linux-2.6.34-moorestown-audio-driver-6.0-7-8.patch;patch=1 \
+# file://linux-2.6.34-moorestown-audio-driver-6.0-8-8.patch;patch=1 \
+# # file://linux-2.6.34-moorestown-ericsson-mbm-driver.patch;patch=1 \
+# # file://linux-2.6.34-moorestown-fix-hw-qh-prefetch-bug.patch;patch=1 \
+# # file://linux-2.6.34-moorestown-gpe-fix-for-sensor.patch;patch=1 \
+# # file://linux-2.6.34-moorestown-graphics-changes-for-aava-koski-dv1-hardware.patch;patch=1 \
+# # file://linux-2.6.34-moorestown-gtm501l-driver-1.2.patch;patch=1 \
+# # file://linux-2.6.34-moorestown-ifxgps-driver.patch;patch=1 \
+# file://linux-2.6.34-moorestown-img-graphics-driver-5.3.0.0007.patch;patch=1 \
+# # file://linux-2.6.34-moorestown-ipc-host-driver.patch;patch=1 \
+# file://linux-2.6.34-moorestown-keypad-driver.patch;patch=1 \
+# file://linux-2.6.34-moorestown-langwell-dma-driver-3.0.patch;patch=1 \
+# file://linux-2.6.34-moorestown-mmc-driver-1.0.patch;patch=1 \
+# file://linux-2.6.34-moorestown-nand-driver-2.0.patch;patch=1 \
+# # file://linux-2.6.34-moorestown-only-enable-mrst-pciquirks-on-mrst.patch;patch=1 \
+# file://linux-2.6.34-moorestown-platform-enabling.patch;patch=1 \
+# file://linux-2.6.34-moorestown-pmic-battery-driver.patch;patch=1 \
+# # file://linux-2.6.34-moorestown-rar-handler-driver-3.1.patch;patch=1 \
+# file://linux-2.6.34-moorestown-sensor-driver-1.1.patch;patch=1 \
+# file://linux-2.6.34-moorestown-spi-slave-controller-driver-1.1.patch;patch=1 \
+# file://linux-2.6.34-moorestown-thermal-emc1403-driver.patch;patch=1 \
+# file://linux-2.6.34-moorestown-touchscreen-driver.patch;patch=1 \
+# file://linux-2.6.34-moorestown-usb-otg-and-still-image-driver.patch;patch=1 \
+ file://linux-2.6.34-multi-touch-input-driver-for-event-devices.patch;patch=1 \
+ file://linux-2.6.34-ondemand-fix-1-7.patch;patch=1 \
+ file://linux-2.6.34-ondemand-fix-2-7.patch;patch=1 \
+ file://linux-2.6.34-ondemand-fix-3-7.patch;patch=1 \
+ file://linux-2.6.34-ondemand-fix-4-7.patch;patch=1 \
+ file://linux-2.6.34-ondemand-fix-5-7.patch;patch=1 \
+ file://linux-2.6.34-ondemand-fix-6-7.patch;patch=1 \
+ file://linux-2.6.34-ondemand-fix-7-7.patch;patch=1 \
+ file://linux-2.6.34-pch-can.patch;patch=1 \
+ file://linux-2.6.34-pch-dma.patch;patch=1 \
+ file://linux-2.6.34-pch-gbe.patch;patch=1 \
+ file://linux-2.6.34-pch-gpio.patch;patch=1 \
+ file://linux-2.6.34-pch-i2c.patch;patch=1 \
+ file://linux-2.6.34-pch-ieee1588.patch;patch=1 \
+ file://linux-2.6.34-pch-pcieqos.patch;patch=1 \
+ file://linux-2.6.34-pch-spi.patch;patch=1 \
+ file://linux-2.6.34-pch-uart.patch;patch=1 \
+ file://linux-2.6.34-pch-usbdev.patch;patch=1 \
+ file://linux-2.6.34-rt2860-no-debug.patch;patch=1 \
+# file://linux-2.6.34-stantum-and-mosart-multitouch-drivers.patch;patch=1 \
+# file://linux-2.6.34-USB-gadget-introduce-g_nokia-gadget-driver.patch;patch=1 \
+ file://linux-2.6.34-USB-otg-add-notifier-support.patch;patch=1 \
+ file://linux-2.6.35-input-touchscreen-introduce-tsc2005-driver.patch;patch=1 \
+# file://linux-2.6.35-moorestown-camera-driver-10.0-1-3.patch;patch=1 \
+# file://linux-2.6.35-moorestown-camera-driver-10.0-2-3.patch;patch=1 \
+# file://linux-2.6.35-moorestown-camera-driver-10.0-3-3.patch;patch=1 \
+ file://linux-2.6.35-OMAP-DSS2-Add-ACX565AKM-Panel-Driver.patch;patch=1 \
+ file://linux-2.6.35-OMAP-DSS2-Add-Kconfig-option-for-DPI-display-type.patch;patch=1 \
+ file://linux-2.6.35-OMAP-DSS2-Use-vdds_sdi-regulator-supply-in-SDI.patch;patch=1 \
+ file://linux-2.6.35-OMAP-RX51-Add-LCD-Panel-support.patch;patch=1 \
+ file://linux-2.6.35-OMAP-RX51-Add-Touch-Controller-in-SPI-board-info.patch;patch=1 \
+ file://linux-2.6.35-OMAP-RX51-Add-vdds_sdi-supply-voltage-for-SDI.patch;patch=1 \
+ file://linux-2.6.35-omap-rx-51-enable-tsc2005.patch;patch=1 \
+ file://linux-2.6.35-phylib-Add-module-table-to-all-existing-phy-drivers.patch;patch=1 \
+ file://linux-2.6.35-phylib-Support-phy-module-autoloading.patch;patch=1 \
+ file://linux-2.6-build-nonintconfig.patch;patch=1 \
+ file://linux-2.6-driver-level-usb-autosuspend.patch;patch=1 \
+ file://linux-2.6-usb-bt-autosuspend.patch;patch=1 \
+ file://linux-2.6-usb-uvc-autosuspend.patch;patch=1 \
+ file://rtl8192_carrier_off.patch;patch=1 \
+ file://rtl8192_no_autoconnect.patch;patch=1 \
+ file://rtl8192_no_WAP_unassoc.patch;patch=1 \
+# file://defconfig-menlow \
+ file://defconfig-netbook"
+
+S = "${WORKDIR}/linux-${PV}"
OpenPOWER on IntegriCloud